1 /****************************************************************************** 2 3 Copyright (c) 2001-2013, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_common.h" 36 #include "ixgbe_phy.h" 37 #include "ixgbe_api.h" 38 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 45 u16 count); 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 50 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 53 u16 *san_mac_offset); 54 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 55 u16 words, u16 *data); 56 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 57 u16 words, u16 *data); 58 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 59 u16 offset); 60 61 /** 62 * ixgbe_init_ops_generic - Inits function ptrs 63 * @hw: pointer to the hardware structure 64 * 65 * Initialize the function pointers. 66 **/ 67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 68 { 69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 70 struct ixgbe_mac_info *mac = &hw->mac; 71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 72 73 DEBUGFUNC("ixgbe_init_ops_generic"); 74 75 /* EEPROM */ 76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; 77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 78 if (eec & IXGBE_EEC_PRES) { 79 eeprom->ops.read = &ixgbe_read_eerd_generic; 80 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic; 81 } else { 82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; 83 eeprom->ops.read_buffer = 84 &ixgbe_read_eeprom_buffer_bit_bang_generic; 85 } 86 eeprom->ops.write = &ixgbe_write_eeprom_generic; 87 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic; 88 eeprom->ops.validate_checksum = 89 &ixgbe_validate_eeprom_checksum_generic; 90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; 91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; 92 93 /* MAC */ 94 mac->ops.init_hw = &ixgbe_init_hw_generic; 95 mac->ops.reset_hw = NULL; 96 mac->ops.start_hw = &ixgbe_start_hw_generic; 97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; 98 mac->ops.get_media_type = NULL; 99 mac->ops.get_supported_physical_layer = NULL; 100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; 101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; 102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; 103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; 104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; 105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; 106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; 107 108 /* LEDs */ 109 mac->ops.led_on = &ixgbe_led_on_generic; 110 mac->ops.led_off = &ixgbe_led_off_generic; 111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; 112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; 113 114 /* RAR, Multicast, VLAN */ 115 mac->ops.set_rar = &ixgbe_set_rar_generic; 116 mac->ops.clear_rar = &ixgbe_clear_rar_generic; 117 mac->ops.insert_mac_addr = NULL; 118 mac->ops.set_vmdq = NULL; 119 mac->ops.clear_vmdq = NULL; 120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; 121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; 122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; 123 mac->ops.enable_mc = &ixgbe_enable_mc_generic; 124 mac->ops.disable_mc = &ixgbe_disable_mc_generic; 125 mac->ops.clear_vfta = NULL; 126 mac->ops.set_vfta = NULL; 127 mac->ops.set_vlvf = NULL; 128 mac->ops.init_uta_tables = NULL; 129 130 /* Flow Control */ 131 mac->ops.fc_enable = &ixgbe_fc_enable_generic; 132 133 /* Link */ 134 mac->ops.get_link_capabilities = NULL; 135 mac->ops.setup_link = NULL; 136 mac->ops.check_link = NULL; 137 138 return IXGBE_SUCCESS; 139 } 140 141 /** 142 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 143 * control 144 * @hw: pointer to hardware structure 145 * 146 * There are several phys that do not support autoneg flow control. This 147 * function check the device id to see if the associated phy supports 148 * autoneg flow control. 149 **/ 150 s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 151 { 152 153 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 154 155 switch (hw->device_id) { 156 case IXGBE_DEV_ID_82599_T3_LOM: 157 case IXGBE_DEV_ID_X540T: 158 return IXGBE_SUCCESS; 159 default: 160 return IXGBE_ERR_FC_NOT_SUPPORTED; 161 } 162 } 163 164 /** 165 * ixgbe_setup_fc - Set up flow control 166 * @hw: pointer to hardware structure 167 * 168 * Called at init time to set up flow control. 169 **/ 170 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) 171 { 172 s32 ret_val = IXGBE_SUCCESS; 173 u32 reg = 0, reg_bp = 0; 174 u16 reg_cu = 0; 175 bool got_lock = FALSE; 176 177 DEBUGFUNC("ixgbe_setup_fc"); 178 179 /* 180 * Validate the requested mode. Strict IEEE mode does not allow 181 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 182 */ 183 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 184 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 185 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 186 goto out; 187 } 188 189 /* 190 * 10gig parts do not have a word in the EEPROM to determine the 191 * default flow control setting, so we explicitly set it to full. 192 */ 193 if (hw->fc.requested_mode == ixgbe_fc_default) 194 hw->fc.requested_mode = ixgbe_fc_full; 195 196 /* 197 * Set up the 1G and 10G flow control advertisement registers so the 198 * HW will be able to do fc autoneg once the cable is plugged in. If 199 * we link at 10G, the 1G advertisement is harmless and vice versa. 200 */ 201 switch (hw->phy.media_type) { 202 case ixgbe_media_type_fiber_fixed: 203 case ixgbe_media_type_fiber: 204 case ixgbe_media_type_backplane: 205 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 206 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 207 break; 208 case ixgbe_media_type_copper: 209 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 210 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 211 break; 212 default: 213 break; 214 } 215 216 /* 217 * The possible values of fc.requested_mode are: 218 * 0: Flow control is completely disabled 219 * 1: Rx flow control is enabled (we can receive pause frames, 220 * but not send pause frames). 221 * 2: Tx flow control is enabled (we can send pause frames but 222 * we do not support receiving pause frames). 223 * 3: Both Rx and Tx flow control (symmetric) are enabled. 224 * other: Invalid. 225 */ 226 switch (hw->fc.requested_mode) { 227 case ixgbe_fc_none: 228 /* Flow control completely disabled by software override. */ 229 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 230 if (hw->phy.media_type == ixgbe_media_type_backplane) 231 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 232 IXGBE_AUTOC_ASM_PAUSE); 233 else if (hw->phy.media_type == ixgbe_media_type_copper) 234 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 235 break; 236 case ixgbe_fc_tx_pause: 237 /* 238 * Tx Flow control is enabled, and Rx Flow control is 239 * disabled by software override. 240 */ 241 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 242 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 243 if (hw->phy.media_type == ixgbe_media_type_backplane) { 244 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 245 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 246 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 247 reg_cu |= IXGBE_TAF_ASM_PAUSE; 248 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 249 } 250 break; 251 case ixgbe_fc_rx_pause: 252 /* 253 * Rx Flow control is enabled and Tx Flow control is 254 * disabled by software override. Since there really 255 * isn't a way to advertise that we are capable of RX 256 * Pause ONLY, we will advertise that we support both 257 * symmetric and asymmetric Rx PAUSE, as such we fall 258 * through to the fc_full statement. Later, we will 259 * disable the adapter's ability to send PAUSE frames. 260 */ 261 case ixgbe_fc_full: 262 /* Flow control (both Rx and Tx) is enabled by SW override. */ 263 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 264 if (hw->phy.media_type == ixgbe_media_type_backplane) 265 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 266 IXGBE_AUTOC_ASM_PAUSE; 267 else if (hw->phy.media_type == ixgbe_media_type_copper) 268 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 269 break; 270 default: 271 DEBUGOUT("Flow control param set incorrectly\n"); 272 ret_val = IXGBE_ERR_CONFIG; 273 goto out; 274 break; 275 } 276 277 if (hw->mac.type != ixgbe_mac_X540) { 278 /* 279 * Enable auto-negotiation between the MAC & PHY; 280 * the MAC will advertise clause 37 flow control. 281 */ 282 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 283 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 284 285 /* Disable AN timeout */ 286 if (hw->fc.strict_ieee) 287 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 288 289 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 290 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 291 } 292 293 /* 294 * AUTOC restart handles negotiation of 1G and 10G on backplane 295 * and copper. There is no need to set the PCS1GCTL register. 296 * 297 */ 298 if (hw->phy.media_type == ixgbe_media_type_backplane) { 299 reg_bp |= IXGBE_AUTOC_AN_RESTART; 300 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 301 * LESM is on, likewise reset_pipeline requries the lock as 302 * it also writes AUTOC. 303 */ 304 if ((hw->mac.type == ixgbe_mac_82599EB) && 305 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 306 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 307 IXGBE_GSSR_MAC_CSR_SM); 308 if (ret_val != IXGBE_SUCCESS) { 309 ret_val = IXGBE_ERR_SWFW_SYNC; 310 goto out; 311 } 312 got_lock = TRUE; 313 } 314 315 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 316 if (hw->mac.type == ixgbe_mac_82599EB) 317 ixgbe_reset_pipeline_82599(hw); 318 319 if (got_lock) 320 hw->mac.ops.release_swfw_sync(hw, 321 IXGBE_GSSR_MAC_CSR_SM); 322 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 323 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { 324 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 325 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 326 } 327 328 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 329 out: 330 return ret_val; 331 } 332 333 /** 334 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 335 * @hw: pointer to hardware structure 336 * 337 * Starts the hardware by filling the bus info structure and media type, clears 338 * all on chip counters, initializes receive address registers, multicast 339 * table, VLAN filter table, calls routine to set up link and flow control 340 * settings, and leaves transmit and receive units disabled and uninitialized 341 **/ 342 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 343 { 344 s32 ret_val; 345 u32 ctrl_ext; 346 347 DEBUGFUNC("ixgbe_start_hw_generic"); 348 349 /* Set the media type */ 350 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 351 352 /* PHY ops initialization must be done in reset_hw() */ 353 354 /* Clear the VLAN filter table */ 355 hw->mac.ops.clear_vfta(hw); 356 357 /* Clear statistics registers */ 358 hw->mac.ops.clear_hw_cntrs(hw); 359 360 /* Set No Snoop Disable */ 361 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 362 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 363 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 364 IXGBE_WRITE_FLUSH(hw); 365 366 /* Setup flow control */ 367 ret_val = ixgbe_setup_fc(hw); 368 if (ret_val != IXGBE_SUCCESS) 369 goto out; 370 371 /* Clear adapter stopped flag */ 372 hw->adapter_stopped = FALSE; 373 374 out: 375 return ret_val; 376 } 377 378 /** 379 * ixgbe_start_hw_gen2 - Init sequence for common device family 380 * @hw: pointer to hw structure 381 * 382 * Performs the init sequence common to the second generation 383 * of 10 GbE devices. 384 * Devices in the second generation: 385 * 82599 386 * X540 387 **/ 388 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 389 { 390 u32 i; 391 u32 regval; 392 393 /* Clear the rate limiters */ 394 for (i = 0; i < hw->mac.max_tx_queues; i++) { 395 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 396 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 397 } 398 IXGBE_WRITE_FLUSH(hw); 399 400 /* Disable relaxed ordering */ 401 for (i = 0; i < hw->mac.max_tx_queues; i++) { 402 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 403 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 404 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 405 } 406 407 for (i = 0; i < hw->mac.max_rx_queues; i++) { 408 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 409 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 410 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 411 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 412 } 413 414 return IXGBE_SUCCESS; 415 } 416 417 /** 418 * ixgbe_init_hw_generic - Generic hardware initialization 419 * @hw: pointer to hardware structure 420 * 421 * Initialize the hardware by resetting the hardware, filling the bus info 422 * structure and media type, clears all on chip counters, initializes receive 423 * address registers, multicast table, VLAN filter table, calls routine to set 424 * up link and flow control settings, and leaves transmit and receive units 425 * disabled and uninitialized 426 **/ 427 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 428 { 429 s32 status; 430 431 DEBUGFUNC("ixgbe_init_hw_generic"); 432 433 /* Reset the hardware */ 434 status = hw->mac.ops.reset_hw(hw); 435 436 if (status == IXGBE_SUCCESS) { 437 /* Start the HW */ 438 status = hw->mac.ops.start_hw(hw); 439 } 440 441 return status; 442 } 443 444 /** 445 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 446 * @hw: pointer to hardware structure 447 * 448 * Clears all hardware statistics counters by reading them from the hardware 449 * Statistics counters are clear on read. 450 **/ 451 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 452 { 453 u16 i = 0; 454 455 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 456 457 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 458 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 459 IXGBE_READ_REG(hw, IXGBE_ERRBC); 460 IXGBE_READ_REG(hw, IXGBE_MSPDC); 461 for (i = 0; i < 8; i++) 462 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 463 464 IXGBE_READ_REG(hw, IXGBE_MLFC); 465 IXGBE_READ_REG(hw, IXGBE_MRFC); 466 IXGBE_READ_REG(hw, IXGBE_RLEC); 467 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 468 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 469 if (hw->mac.type >= ixgbe_mac_82599EB) { 470 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 471 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 472 } else { 473 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 474 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 475 } 476 477 for (i = 0; i < 8; i++) { 478 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 479 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 480 if (hw->mac.type >= ixgbe_mac_82599EB) { 481 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 482 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 483 } else { 484 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 485 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 486 } 487 } 488 if (hw->mac.type >= ixgbe_mac_82599EB) 489 for (i = 0; i < 8; i++) 490 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 491 IXGBE_READ_REG(hw, IXGBE_PRC64); 492 IXGBE_READ_REG(hw, IXGBE_PRC127); 493 IXGBE_READ_REG(hw, IXGBE_PRC255); 494 IXGBE_READ_REG(hw, IXGBE_PRC511); 495 IXGBE_READ_REG(hw, IXGBE_PRC1023); 496 IXGBE_READ_REG(hw, IXGBE_PRC1522); 497 IXGBE_READ_REG(hw, IXGBE_GPRC); 498 IXGBE_READ_REG(hw, IXGBE_BPRC); 499 IXGBE_READ_REG(hw, IXGBE_MPRC); 500 IXGBE_READ_REG(hw, IXGBE_GPTC); 501 IXGBE_READ_REG(hw, IXGBE_GORCL); 502 IXGBE_READ_REG(hw, IXGBE_GORCH); 503 IXGBE_READ_REG(hw, IXGBE_GOTCL); 504 IXGBE_READ_REG(hw, IXGBE_GOTCH); 505 if (hw->mac.type == ixgbe_mac_82598EB) 506 for (i = 0; i < 8; i++) 507 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 508 IXGBE_READ_REG(hw, IXGBE_RUC); 509 IXGBE_READ_REG(hw, IXGBE_RFC); 510 IXGBE_READ_REG(hw, IXGBE_ROC); 511 IXGBE_READ_REG(hw, IXGBE_RJC); 512 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 513 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 514 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 515 IXGBE_READ_REG(hw, IXGBE_TORL); 516 IXGBE_READ_REG(hw, IXGBE_TORH); 517 IXGBE_READ_REG(hw, IXGBE_TPR); 518 IXGBE_READ_REG(hw, IXGBE_TPT); 519 IXGBE_READ_REG(hw, IXGBE_PTC64); 520 IXGBE_READ_REG(hw, IXGBE_PTC127); 521 IXGBE_READ_REG(hw, IXGBE_PTC255); 522 IXGBE_READ_REG(hw, IXGBE_PTC511); 523 IXGBE_READ_REG(hw, IXGBE_PTC1023); 524 IXGBE_READ_REG(hw, IXGBE_PTC1522); 525 IXGBE_READ_REG(hw, IXGBE_MPTC); 526 IXGBE_READ_REG(hw, IXGBE_BPTC); 527 for (i = 0; i < 16; i++) { 528 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 529 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 530 if (hw->mac.type >= ixgbe_mac_82599EB) { 531 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 532 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 533 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 534 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 535 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 536 } else { 537 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 538 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 539 } 540 } 541 542 if (hw->mac.type == ixgbe_mac_X540) { 543 if (hw->phy.id == 0) 544 ixgbe_identify_phy(hw); 545 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, 546 IXGBE_MDIO_PCS_DEV_TYPE, &i); 547 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, 548 IXGBE_MDIO_PCS_DEV_TYPE, &i); 549 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, 550 IXGBE_MDIO_PCS_DEV_TYPE, &i); 551 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, 552 IXGBE_MDIO_PCS_DEV_TYPE, &i); 553 } 554 555 return IXGBE_SUCCESS; 556 } 557 558 /** 559 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 560 * @hw: pointer to hardware structure 561 * @pba_num: stores the part number string from the EEPROM 562 * @pba_num_size: part number string buffer length 563 * 564 * Reads the part number string from the EEPROM. 565 **/ 566 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 567 u32 pba_num_size) 568 { 569 s32 ret_val; 570 u16 data; 571 u16 pba_ptr; 572 u16 offset; 573 u16 length; 574 575 DEBUGFUNC("ixgbe_read_pba_string_generic"); 576 577 if (pba_num == NULL) { 578 DEBUGOUT("PBA string buffer was null\n"); 579 return IXGBE_ERR_INVALID_ARGUMENT; 580 } 581 582 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 583 if (ret_val) { 584 DEBUGOUT("NVM Read Error\n"); 585 return ret_val; 586 } 587 588 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 589 if (ret_val) { 590 DEBUGOUT("NVM Read Error\n"); 591 return ret_val; 592 } 593 594 /* 595 * if data is not ptr guard the PBA must be in legacy format which 596 * means pba_ptr is actually our second data word for the PBA number 597 * and we can decode it into an ascii string 598 */ 599 if (data != IXGBE_PBANUM_PTR_GUARD) { 600 DEBUGOUT("NVM PBA number is not stored as string\n"); 601 602 /* we will need 11 characters to store the PBA */ 603 if (pba_num_size < 11) { 604 DEBUGOUT("PBA string buffer too small\n"); 605 return IXGBE_ERR_NO_SPACE; 606 } 607 608 /* extract hex string from data and pba_ptr */ 609 pba_num[0] = (data >> 12) & 0xF; 610 pba_num[1] = (data >> 8) & 0xF; 611 pba_num[2] = (data >> 4) & 0xF; 612 pba_num[3] = data & 0xF; 613 pba_num[4] = (pba_ptr >> 12) & 0xF; 614 pba_num[5] = (pba_ptr >> 8) & 0xF; 615 pba_num[6] = '-'; 616 pba_num[7] = 0; 617 pba_num[8] = (pba_ptr >> 4) & 0xF; 618 pba_num[9] = pba_ptr & 0xF; 619 620 /* put a null character on the end of our string */ 621 pba_num[10] = '\0'; 622 623 /* switch all the data but the '-' to hex char */ 624 for (offset = 0; offset < 10; offset++) { 625 if (pba_num[offset] < 0xA) 626 pba_num[offset] += '0'; 627 else if (pba_num[offset] < 0x10) 628 pba_num[offset] += 'A' - 0xA; 629 } 630 631 return IXGBE_SUCCESS; 632 } 633 634 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 635 if (ret_val) { 636 DEBUGOUT("NVM Read Error\n"); 637 return ret_val; 638 } 639 640 if (length == 0xFFFF || length == 0) { 641 DEBUGOUT("NVM PBA number section invalid length\n"); 642 return IXGBE_ERR_PBA_SECTION; 643 } 644 645 /* check if pba_num buffer is big enough */ 646 if (pba_num_size < (((u32)length * 2) - 1)) { 647 DEBUGOUT("PBA string buffer too small\n"); 648 return IXGBE_ERR_NO_SPACE; 649 } 650 651 /* trim pba length from start of string */ 652 pba_ptr++; 653 length--; 654 655 for (offset = 0; offset < length; offset++) { 656 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 657 if (ret_val) { 658 DEBUGOUT("NVM Read Error\n"); 659 return ret_val; 660 } 661 pba_num[offset * 2] = (u8)(data >> 8); 662 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 663 } 664 pba_num[offset * 2] = '\0'; 665 666 return IXGBE_SUCCESS; 667 } 668 669 /** 670 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 671 * @hw: pointer to hardware structure 672 * @pba_num: stores the part number from the EEPROM 673 * 674 * Reads the part number from the EEPROM. 675 **/ 676 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 677 { 678 s32 ret_val; 679 u16 data; 680 681 DEBUGFUNC("ixgbe_read_pba_num_generic"); 682 683 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 684 if (ret_val) { 685 DEBUGOUT("NVM Read Error\n"); 686 return ret_val; 687 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 688 DEBUGOUT("NVM Not supported\n"); 689 return IXGBE_NOT_IMPLEMENTED; 690 } 691 *pba_num = (u32)(data << 16); 692 693 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 694 if (ret_val) { 695 DEBUGOUT("NVM Read Error\n"); 696 return ret_val; 697 } 698 *pba_num |= data; 699 700 return IXGBE_SUCCESS; 701 } 702 703 /** 704 * ixgbe_read_pba_raw 705 * @hw: pointer to the HW structure 706 * @eeprom_buf: optional pointer to EEPROM image 707 * @eeprom_buf_size: size of EEPROM image in words 708 * @max_pba_block_size: PBA block size limit 709 * @pba: pointer to output PBA structure 710 * 711 * Reads PBA from EEPROM image when eeprom_buf is not NULL. 712 * Reads PBA from physical EEPROM device when eeprom_buf is NULL. 713 * 714 **/ 715 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 716 u32 eeprom_buf_size, u16 max_pba_block_size, 717 struct ixgbe_pba *pba) 718 { 719 s32 ret_val; 720 u16 pba_block_size; 721 722 if (pba == NULL) 723 return IXGBE_ERR_PARAM; 724 725 if (eeprom_buf == NULL) { 726 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 727 &pba->word[0]); 728 if (ret_val) 729 return ret_val; 730 } else { 731 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 732 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 733 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 734 } else { 735 return IXGBE_ERR_PARAM; 736 } 737 } 738 739 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 740 if (pba->pba_block == NULL) 741 return IXGBE_ERR_PARAM; 742 743 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, 744 eeprom_buf_size, 745 &pba_block_size); 746 if (ret_val) 747 return ret_val; 748 749 if (pba_block_size > max_pba_block_size) 750 return IXGBE_ERR_PARAM; 751 752 if (eeprom_buf == NULL) { 753 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], 754 pba_block_size, 755 pba->pba_block); 756 if (ret_val) 757 return ret_val; 758 } else { 759 if (eeprom_buf_size > (u32)(pba->word[1] + 760 pba->pba_block[0])) { 761 memcpy(pba->pba_block, 762 &eeprom_buf[pba->word[1]], 763 pba_block_size * sizeof(u16)); 764 } else { 765 return IXGBE_ERR_PARAM; 766 } 767 } 768 } 769 770 return IXGBE_SUCCESS; 771 } 772 773 /** 774 * ixgbe_write_pba_raw 775 * @hw: pointer to the HW structure 776 * @eeprom_buf: optional pointer to EEPROM image 777 * @eeprom_buf_size: size of EEPROM image in words 778 * @pba: pointer to PBA structure 779 * 780 * Writes PBA to EEPROM image when eeprom_buf is not NULL. 781 * Writes PBA to physical EEPROM device when eeprom_buf is NULL. 782 * 783 **/ 784 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 785 u32 eeprom_buf_size, struct ixgbe_pba *pba) 786 { 787 s32 ret_val; 788 789 if (pba == NULL) 790 return IXGBE_ERR_PARAM; 791 792 if (eeprom_buf == NULL) { 793 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, 794 &pba->word[0]); 795 if (ret_val) 796 return ret_val; 797 } else { 798 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 799 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; 800 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; 801 } else { 802 return IXGBE_ERR_PARAM; 803 } 804 } 805 806 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 807 if (pba->pba_block == NULL) 808 return IXGBE_ERR_PARAM; 809 810 if (eeprom_buf == NULL) { 811 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], 812 pba->pba_block[0], 813 pba->pba_block); 814 if (ret_val) 815 return ret_val; 816 } else { 817 if (eeprom_buf_size > (u32)(pba->word[1] + 818 pba->pba_block[0])) { 819 memcpy(&eeprom_buf[pba->word[1]], 820 pba->pba_block, 821 pba->pba_block[0] * sizeof(u16)); 822 } else { 823 return IXGBE_ERR_PARAM; 824 } 825 } 826 } 827 828 return IXGBE_SUCCESS; 829 } 830 831 /** 832 * ixgbe_get_pba_block_size 833 * @hw: pointer to the HW structure 834 * @eeprom_buf: optional pointer to EEPROM image 835 * @eeprom_buf_size: size of EEPROM image in words 836 * @pba_data_size: pointer to output variable 837 * 838 * Returns the size of the PBA block in words. Function operates on EEPROM 839 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical 840 * EEPROM device. 841 * 842 **/ 843 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, 844 u32 eeprom_buf_size, u16 *pba_block_size) 845 { 846 s32 ret_val; 847 u16 pba_word[2]; 848 u16 length; 849 850 DEBUGFUNC("ixgbe_get_pba_block_size"); 851 852 if (eeprom_buf == NULL) { 853 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 854 &pba_word[0]); 855 if (ret_val) 856 return ret_val; 857 } else { 858 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 859 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 860 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 861 } else { 862 return IXGBE_ERR_PARAM; 863 } 864 } 865 866 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { 867 if (eeprom_buf == NULL) { 868 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, 869 &length); 870 if (ret_val) 871 return ret_val; 872 } else { 873 if (eeprom_buf_size > pba_word[1]) 874 length = eeprom_buf[pba_word[1] + 0]; 875 else 876 return IXGBE_ERR_PARAM; 877 } 878 879 if (length == 0xFFFF || length == 0) 880 return IXGBE_ERR_PBA_SECTION; 881 } else { 882 /* PBA number in legacy format, there is no PBA Block. */ 883 length = 0; 884 } 885 886 if (pba_block_size != NULL) 887 *pba_block_size = length; 888 889 return IXGBE_SUCCESS; 890 } 891 892 /** 893 * ixgbe_get_mac_addr_generic - Generic get MAC address 894 * @hw: pointer to hardware structure 895 * @mac_addr: Adapter MAC address 896 * 897 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 898 * A reset of the adapter must be performed prior to calling this function 899 * in order for the MAC address to have been loaded from the EEPROM into RAR0 900 **/ 901 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 902 { 903 u32 rar_high; 904 u32 rar_low; 905 u16 i; 906 907 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 908 909 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 910 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 911 912 for (i = 0; i < 4; i++) 913 mac_addr[i] = (u8)(rar_low >> (i*8)); 914 915 for (i = 0; i < 2; i++) 916 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 917 918 return IXGBE_SUCCESS; 919 } 920 921 /** 922 * ixgbe_get_bus_info_generic - Generic set PCI bus info 923 * @hw: pointer to hardware structure 924 * 925 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 926 **/ 927 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 928 { 929 struct ixgbe_mac_info *mac = &hw->mac; 930 u16 link_status; 931 932 DEBUGFUNC("ixgbe_get_bus_info_generic"); 933 934 hw->bus.type = ixgbe_bus_type_pci_express; 935 936 /* Get the negotiated link width and speed from PCI config space */ 937 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 938 939 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 940 case IXGBE_PCI_LINK_WIDTH_1: 941 hw->bus.width = ixgbe_bus_width_pcie_x1; 942 break; 943 case IXGBE_PCI_LINK_WIDTH_2: 944 hw->bus.width = ixgbe_bus_width_pcie_x2; 945 break; 946 case IXGBE_PCI_LINK_WIDTH_4: 947 hw->bus.width = ixgbe_bus_width_pcie_x4; 948 break; 949 case IXGBE_PCI_LINK_WIDTH_8: 950 hw->bus.width = ixgbe_bus_width_pcie_x8; 951 break; 952 default: 953 hw->bus.width = ixgbe_bus_width_unknown; 954 break; 955 } 956 957 switch (link_status & IXGBE_PCI_LINK_SPEED) { 958 case IXGBE_PCI_LINK_SPEED_2500: 959 hw->bus.speed = ixgbe_bus_speed_2500; 960 break; 961 case IXGBE_PCI_LINK_SPEED_5000: 962 hw->bus.speed = ixgbe_bus_speed_5000; 963 break; 964 case IXGBE_PCI_LINK_SPEED_8000: 965 hw->bus.speed = ixgbe_bus_speed_8000; 966 break; 967 default: 968 hw->bus.speed = ixgbe_bus_speed_unknown; 969 break; 970 } 971 972 mac->ops.set_lan_id(hw); 973 974 return IXGBE_SUCCESS; 975 } 976 977 /** 978 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 979 * @hw: pointer to the HW structure 980 * 981 * Determines the LAN function id by reading memory-mapped registers 982 * and swaps the port value if requested. 983 **/ 984 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 985 { 986 struct ixgbe_bus_info *bus = &hw->bus; 987 u32 reg; 988 989 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 990 991 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 992 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 993 bus->lan_id = bus->func; 994 995 /* check for a port swap */ 996 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); 997 if (reg & IXGBE_FACTPS_LFS) 998 bus->func ^= 0x1; 999 } 1000 1001 /** 1002 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 1003 * @hw: pointer to hardware structure 1004 * 1005 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 1006 * disables transmit and receive units. The adapter_stopped flag is used by 1007 * the shared code and drivers to determine if the adapter is in a stopped 1008 * state and should not touch the hardware. 1009 **/ 1010 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 1011 { 1012 u32 reg_val; 1013 u16 i; 1014 1015 DEBUGFUNC("ixgbe_stop_adapter_generic"); 1016 1017 /* 1018 * Set the adapter_stopped flag so other driver functions stop touching 1019 * the hardware 1020 */ 1021 hw->adapter_stopped = TRUE; 1022 1023 /* Disable the receive unit */ 1024 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); 1025 1026 /* Clear interrupt mask to stop interrupts from being generated */ 1027 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 1028 1029 /* Clear any pending interrupts, flush previous writes */ 1030 IXGBE_READ_REG(hw, IXGBE_EICR); 1031 1032 /* Disable the transmit unit. Each queue must be disabled. */ 1033 for (i = 0; i < hw->mac.max_tx_queues; i++) 1034 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 1035 1036 /* Disable the receive unit by stopping each queue */ 1037 for (i = 0; i < hw->mac.max_rx_queues; i++) { 1038 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1039 reg_val &= ~IXGBE_RXDCTL_ENABLE; 1040 reg_val |= IXGBE_RXDCTL_SWFLSH; 1041 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1042 } 1043 1044 /* flush all queues disables */ 1045 IXGBE_WRITE_FLUSH(hw); 1046 msec_delay(2); 1047 1048 /* 1049 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 1050 * access and verify no pending requests 1051 */ 1052 return ixgbe_disable_pcie_master(hw); 1053 } 1054 1055 /** 1056 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 1057 * @hw: pointer to hardware structure 1058 * @index: led number to turn on 1059 **/ 1060 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 1061 { 1062 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1063 1064 DEBUGFUNC("ixgbe_led_on_generic"); 1065 1066 /* To turn on the LED, set mode to ON. */ 1067 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1068 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 1069 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1070 IXGBE_WRITE_FLUSH(hw); 1071 1072 return IXGBE_SUCCESS; 1073 } 1074 1075 /** 1076 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 1077 * @hw: pointer to hardware structure 1078 * @index: led number to turn off 1079 **/ 1080 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 1081 { 1082 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1083 1084 DEBUGFUNC("ixgbe_led_off_generic"); 1085 1086 /* To turn off the LED, set mode to OFF. */ 1087 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1088 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 1089 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1090 IXGBE_WRITE_FLUSH(hw); 1091 1092 return IXGBE_SUCCESS; 1093 } 1094 1095 /** 1096 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 1097 * @hw: pointer to hardware structure 1098 * 1099 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 1100 * ixgbe_hw struct in order to set up EEPROM access. 1101 **/ 1102 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 1103 { 1104 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1105 u32 eec; 1106 u16 eeprom_size; 1107 1108 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 1109 1110 if (eeprom->type == ixgbe_eeprom_uninitialized) { 1111 eeprom->type = ixgbe_eeprom_none; 1112 /* Set default semaphore delay to 10ms which is a well 1113 * tested value */ 1114 eeprom->semaphore_delay = 10; 1115 /* Clear EEPROM page size, it will be initialized as needed */ 1116 eeprom->word_page_size = 0; 1117 1118 /* 1119 * Check for EEPROM present first. 1120 * If not present leave as none 1121 */ 1122 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1123 if (eec & IXGBE_EEC_PRES) { 1124 eeprom->type = ixgbe_eeprom_spi; 1125 1126 /* 1127 * SPI EEPROM is assumed here. This code would need to 1128 * change if a future EEPROM is not SPI. 1129 */ 1130 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 1131 IXGBE_EEC_SIZE_SHIFT); 1132 eeprom->word_size = 1 << (eeprom_size + 1133 IXGBE_EEPROM_WORD_SIZE_SHIFT); 1134 } 1135 1136 if (eec & IXGBE_EEC_ADDR_SIZE) 1137 eeprom->address_bits = 16; 1138 else 1139 eeprom->address_bits = 8; 1140 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 1141 "%d\n", eeprom->type, eeprom->word_size, 1142 eeprom->address_bits); 1143 } 1144 1145 return IXGBE_SUCCESS; 1146 } 1147 1148 /** 1149 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 1150 * @hw: pointer to hardware structure 1151 * @offset: offset within the EEPROM to write 1152 * @words: number of word(s) 1153 * @data: 16 bit word(s) to write to EEPROM 1154 * 1155 * Reads 16 bit word(s) from EEPROM through bit-bang method 1156 **/ 1157 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1158 u16 words, u16 *data) 1159 { 1160 s32 status = IXGBE_SUCCESS; 1161 u16 i, count; 1162 1163 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); 1164 1165 hw->eeprom.ops.init_params(hw); 1166 1167 if (words == 0) { 1168 status = IXGBE_ERR_INVALID_ARGUMENT; 1169 goto out; 1170 } 1171 1172 if (offset + words > hw->eeprom.word_size) { 1173 status = IXGBE_ERR_EEPROM; 1174 goto out; 1175 } 1176 1177 /* 1178 * The EEPROM page size cannot be queried from the chip. We do lazy 1179 * initialization. It is worth to do that when we write large buffer. 1180 */ 1181 if ((hw->eeprom.word_page_size == 0) && 1182 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 1183 ixgbe_detect_eeprom_page_size_generic(hw, offset); 1184 1185 /* 1186 * We cannot hold synchronization semaphores for too long 1187 * to avoid other entity starvation. However it is more efficient 1188 * to read in bursts than synchronizing access for each word. 1189 */ 1190 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1191 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1192 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1193 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 1194 count, &data[i]); 1195 1196 if (status != IXGBE_SUCCESS) 1197 break; 1198 } 1199 1200 out: 1201 return status; 1202 } 1203 1204 /** 1205 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 1206 * @hw: pointer to hardware structure 1207 * @offset: offset within the EEPROM to be written to 1208 * @words: number of word(s) 1209 * @data: 16 bit word(s) to be written to the EEPROM 1210 * 1211 * If ixgbe_eeprom_update_checksum is not called after this function, the 1212 * EEPROM will most likely contain an invalid checksum. 1213 **/ 1214 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1215 u16 words, u16 *data) 1216 { 1217 s32 status; 1218 u16 word; 1219 u16 page_size; 1220 u16 i; 1221 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1222 1223 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); 1224 1225 /* Prepare the EEPROM for writing */ 1226 status = ixgbe_acquire_eeprom(hw); 1227 1228 if (status == IXGBE_SUCCESS) { 1229 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1230 ixgbe_release_eeprom(hw); 1231 status = IXGBE_ERR_EEPROM; 1232 } 1233 } 1234 1235 if (status == IXGBE_SUCCESS) { 1236 for (i = 0; i < words; i++) { 1237 ixgbe_standby_eeprom(hw); 1238 1239 /* Send the WRITE ENABLE command (8 bit opcode ) */ 1240 ixgbe_shift_out_eeprom_bits(hw, 1241 IXGBE_EEPROM_WREN_OPCODE_SPI, 1242 IXGBE_EEPROM_OPCODE_BITS); 1243 1244 ixgbe_standby_eeprom(hw); 1245 1246 /* 1247 * Some SPI eeproms use the 8th address bit embedded 1248 * in the opcode 1249 */ 1250 if ((hw->eeprom.address_bits == 8) && 1251 ((offset + i) >= 128)) 1252 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1253 1254 /* Send the Write command (8-bit opcode + addr) */ 1255 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1256 IXGBE_EEPROM_OPCODE_BITS); 1257 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1258 hw->eeprom.address_bits); 1259 1260 page_size = hw->eeprom.word_page_size; 1261 1262 /* Send the data in burst via SPI*/ 1263 do { 1264 word = data[i]; 1265 word = (word >> 8) | (word << 8); 1266 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1267 1268 if (page_size == 0) 1269 break; 1270 1271 /* do not wrap around page */ 1272 if (((offset + i) & (page_size - 1)) == 1273 (page_size - 1)) 1274 break; 1275 } while (++i < words); 1276 1277 ixgbe_standby_eeprom(hw); 1278 msec_delay(10); 1279 } 1280 /* Done with writing - release the EEPROM */ 1281 ixgbe_release_eeprom(hw); 1282 } 1283 1284 return status; 1285 } 1286 1287 /** 1288 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1289 * @hw: pointer to hardware structure 1290 * @offset: offset within the EEPROM to be written to 1291 * @data: 16 bit word to be written to the EEPROM 1292 * 1293 * If ixgbe_eeprom_update_checksum is not called after this function, the 1294 * EEPROM will most likely contain an invalid checksum. 1295 **/ 1296 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1297 { 1298 s32 status; 1299 1300 DEBUGFUNC("ixgbe_write_eeprom_generic"); 1301 1302 hw->eeprom.ops.init_params(hw); 1303 1304 if (offset >= hw->eeprom.word_size) { 1305 status = IXGBE_ERR_EEPROM; 1306 goto out; 1307 } 1308 1309 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1310 1311 out: 1312 return status; 1313 } 1314 1315 /** 1316 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1317 * @hw: pointer to hardware structure 1318 * @offset: offset within the EEPROM to be read 1319 * @data: read 16 bit words(s) from EEPROM 1320 * @words: number of word(s) 1321 * 1322 * Reads 16 bit word(s) from EEPROM through bit-bang method 1323 **/ 1324 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1325 u16 words, u16 *data) 1326 { 1327 s32 status = IXGBE_SUCCESS; 1328 u16 i, count; 1329 1330 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); 1331 1332 hw->eeprom.ops.init_params(hw); 1333 1334 if (words == 0) { 1335 status = IXGBE_ERR_INVALID_ARGUMENT; 1336 goto out; 1337 } 1338 1339 if (offset + words > hw->eeprom.word_size) { 1340 status = IXGBE_ERR_EEPROM; 1341 goto out; 1342 } 1343 1344 /* 1345 * We cannot hold synchronization semaphores for too long 1346 * to avoid other entity starvation. However it is more efficient 1347 * to read in bursts than synchronizing access for each word. 1348 */ 1349 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1350 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1351 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1352 1353 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1354 count, &data[i]); 1355 1356 if (status != IXGBE_SUCCESS) 1357 break; 1358 } 1359 1360 out: 1361 return status; 1362 } 1363 1364 /** 1365 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1366 * @hw: pointer to hardware structure 1367 * @offset: offset within the EEPROM to be read 1368 * @words: number of word(s) 1369 * @data: read 16 bit word(s) from EEPROM 1370 * 1371 * Reads 16 bit word(s) from EEPROM through bit-bang method 1372 **/ 1373 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1374 u16 words, u16 *data) 1375 { 1376 s32 status; 1377 u16 word_in; 1378 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1379 u16 i; 1380 1381 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); 1382 1383 /* Prepare the EEPROM for reading */ 1384 status = ixgbe_acquire_eeprom(hw); 1385 1386 if (status == IXGBE_SUCCESS) { 1387 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1388 ixgbe_release_eeprom(hw); 1389 status = IXGBE_ERR_EEPROM; 1390 } 1391 } 1392 1393 if (status == IXGBE_SUCCESS) { 1394 for (i = 0; i < words; i++) { 1395 ixgbe_standby_eeprom(hw); 1396 /* 1397 * Some SPI eeproms use the 8th address bit embedded 1398 * in the opcode 1399 */ 1400 if ((hw->eeprom.address_bits == 8) && 1401 ((offset + i) >= 128)) 1402 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1403 1404 /* Send the READ command (opcode + addr) */ 1405 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1406 IXGBE_EEPROM_OPCODE_BITS); 1407 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1408 hw->eeprom.address_bits); 1409 1410 /* Read the data. */ 1411 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1412 data[i] = (word_in >> 8) | (word_in << 8); 1413 } 1414 1415 /* End this read operation */ 1416 ixgbe_release_eeprom(hw); 1417 } 1418 1419 return status; 1420 } 1421 1422 /** 1423 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1424 * @hw: pointer to hardware structure 1425 * @offset: offset within the EEPROM to be read 1426 * @data: read 16 bit value from EEPROM 1427 * 1428 * Reads 16 bit value from EEPROM through bit-bang method 1429 **/ 1430 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1431 u16 *data) 1432 { 1433 s32 status; 1434 1435 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 1436 1437 hw->eeprom.ops.init_params(hw); 1438 1439 if (offset >= hw->eeprom.word_size) { 1440 status = IXGBE_ERR_EEPROM; 1441 goto out; 1442 } 1443 1444 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1445 1446 out: 1447 return status; 1448 } 1449 1450 /** 1451 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1452 * @hw: pointer to hardware structure 1453 * @offset: offset of word in the EEPROM to read 1454 * @words: number of word(s) 1455 * @data: 16 bit word(s) from the EEPROM 1456 * 1457 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1458 **/ 1459 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1460 u16 words, u16 *data) 1461 { 1462 u32 eerd; 1463 s32 status = IXGBE_SUCCESS; 1464 u32 i; 1465 1466 DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); 1467 1468 hw->eeprom.ops.init_params(hw); 1469 1470 if (words == 0) { 1471 status = IXGBE_ERR_INVALID_ARGUMENT; 1472 goto out; 1473 } 1474 1475 if (offset >= hw->eeprom.word_size) { 1476 status = IXGBE_ERR_EEPROM; 1477 goto out; 1478 } 1479 1480 for (i = 0; i < words; i++) { 1481 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1482 IXGBE_EEPROM_RW_REG_START; 1483 1484 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1485 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1486 1487 if (status == IXGBE_SUCCESS) { 1488 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1489 IXGBE_EEPROM_RW_REG_DATA); 1490 } else { 1491 DEBUGOUT("Eeprom read timed out\n"); 1492 goto out; 1493 } 1494 } 1495 out: 1496 return status; 1497 } 1498 1499 /** 1500 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1501 * @hw: pointer to hardware structure 1502 * @offset: offset within the EEPROM to be used as a scratch pad 1503 * 1504 * Discover EEPROM page size by writing marching data at given offset. 1505 * This function is called only when we are writing a new large buffer 1506 * at given offset so the data would be overwritten anyway. 1507 **/ 1508 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1509 u16 offset) 1510 { 1511 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1512 s32 status = IXGBE_SUCCESS; 1513 u16 i; 1514 1515 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); 1516 1517 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1518 data[i] = i; 1519 1520 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1521 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1522 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1523 hw->eeprom.word_page_size = 0; 1524 if (status != IXGBE_SUCCESS) 1525 goto out; 1526 1527 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1528 if (status != IXGBE_SUCCESS) 1529 goto out; 1530 1531 /* 1532 * When writing in burst more than the actual page size 1533 * EEPROM address wraps around current page. 1534 */ 1535 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1536 1537 DEBUGOUT1("Detected EEPROM page size = %d words.", 1538 hw->eeprom.word_page_size); 1539 out: 1540 return status; 1541 } 1542 1543 /** 1544 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1545 * @hw: pointer to hardware structure 1546 * @offset: offset of word in the EEPROM to read 1547 * @data: word read from the EEPROM 1548 * 1549 * Reads a 16 bit word from the EEPROM using the EERD register. 1550 **/ 1551 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1552 { 1553 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1554 } 1555 1556 /** 1557 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1558 * @hw: pointer to hardware structure 1559 * @offset: offset of word in the EEPROM to write 1560 * @words: number of word(s) 1561 * @data: word(s) write to the EEPROM 1562 * 1563 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1564 **/ 1565 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1566 u16 words, u16 *data) 1567 { 1568 u32 eewr; 1569 s32 status = IXGBE_SUCCESS; 1570 u16 i; 1571 1572 DEBUGFUNC("ixgbe_write_eewr_generic"); 1573 1574 hw->eeprom.ops.init_params(hw); 1575 1576 if (words == 0) { 1577 status = IXGBE_ERR_INVALID_ARGUMENT; 1578 goto out; 1579 } 1580 1581 if (offset >= hw->eeprom.word_size) { 1582 status = IXGBE_ERR_EEPROM; 1583 goto out; 1584 } 1585 1586 for (i = 0; i < words; i++) { 1587 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1588 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1589 IXGBE_EEPROM_RW_REG_START; 1590 1591 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1592 if (status != IXGBE_SUCCESS) { 1593 DEBUGOUT("Eeprom write EEWR timed out\n"); 1594 goto out; 1595 } 1596 1597 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1598 1599 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1600 if (status != IXGBE_SUCCESS) { 1601 DEBUGOUT("Eeprom write EEWR timed out\n"); 1602 goto out; 1603 } 1604 } 1605 1606 out: 1607 return status; 1608 } 1609 1610 /** 1611 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1612 * @hw: pointer to hardware structure 1613 * @offset: offset of word in the EEPROM to write 1614 * @data: word write to the EEPROM 1615 * 1616 * Write a 16 bit word to the EEPROM using the EEWR register. 1617 **/ 1618 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1619 { 1620 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1621 } 1622 1623 /** 1624 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1625 * @hw: pointer to hardware structure 1626 * @ee_reg: EEPROM flag for polling 1627 * 1628 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1629 * read or write is done respectively. 1630 **/ 1631 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1632 { 1633 u32 i; 1634 u32 reg; 1635 s32 status = IXGBE_ERR_EEPROM; 1636 1637 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1638 1639 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1640 if (ee_reg == IXGBE_NVM_POLL_READ) 1641 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1642 else 1643 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1644 1645 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1646 status = IXGBE_SUCCESS; 1647 break; 1648 } 1649 usec_delay(5); 1650 } 1651 return status; 1652 } 1653 1654 /** 1655 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1656 * @hw: pointer to hardware structure 1657 * 1658 * Prepares EEPROM for access using bit-bang method. This function should 1659 * be called before issuing a command to the EEPROM. 1660 **/ 1661 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1662 { 1663 s32 status = IXGBE_SUCCESS; 1664 u32 eec; 1665 u32 i; 1666 1667 DEBUGFUNC("ixgbe_acquire_eeprom"); 1668 1669 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) 1670 != IXGBE_SUCCESS) 1671 status = IXGBE_ERR_SWFW_SYNC; 1672 1673 if (status == IXGBE_SUCCESS) { 1674 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1675 1676 /* Request EEPROM Access */ 1677 eec |= IXGBE_EEC_REQ; 1678 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1679 1680 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1681 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1682 if (eec & IXGBE_EEC_GNT) 1683 break; 1684 usec_delay(5); 1685 } 1686 1687 /* Release if grant not acquired */ 1688 if (!(eec & IXGBE_EEC_GNT)) { 1689 eec &= ~IXGBE_EEC_REQ; 1690 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1691 DEBUGOUT("Could not acquire EEPROM grant\n"); 1692 1693 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1694 status = IXGBE_ERR_EEPROM; 1695 } 1696 1697 /* Setup EEPROM for Read/Write */ 1698 if (status == IXGBE_SUCCESS) { 1699 /* Clear CS and SK */ 1700 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1701 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1702 IXGBE_WRITE_FLUSH(hw); 1703 usec_delay(1); 1704 } 1705 } 1706 return status; 1707 } 1708 1709 /** 1710 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1711 * @hw: pointer to hardware structure 1712 * 1713 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1714 **/ 1715 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1716 { 1717 s32 status = IXGBE_ERR_EEPROM; 1718 u32 timeout = 2000; 1719 u32 i; 1720 u32 swsm; 1721 1722 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1723 1724 1725 /* Get SMBI software semaphore between device drivers first */ 1726 for (i = 0; i < timeout; i++) { 1727 /* 1728 * If the SMBI bit is 0 when we read it, then the bit will be 1729 * set and we have the semaphore 1730 */ 1731 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1732 if (!(swsm & IXGBE_SWSM_SMBI)) { 1733 status = IXGBE_SUCCESS; 1734 break; 1735 } 1736 usec_delay(50); 1737 } 1738 1739 if (i == timeout) { 1740 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " 1741 "not granted.\n"); 1742 /* 1743 * this release is particularly important because our attempts 1744 * above to get the semaphore may have succeeded, and if there 1745 * was a timeout, we should unconditionally clear the semaphore 1746 * bits to free the driver to make progress 1747 */ 1748 ixgbe_release_eeprom_semaphore(hw); 1749 1750 usec_delay(50); 1751 /* 1752 * one last try 1753 * If the SMBI bit is 0 when we read it, then the bit will be 1754 * set and we have the semaphore 1755 */ 1756 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1757 if (!(swsm & IXGBE_SWSM_SMBI)) 1758 status = IXGBE_SUCCESS; 1759 } 1760 1761 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1762 if (status == IXGBE_SUCCESS) { 1763 for (i = 0; i < timeout; i++) { 1764 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1765 1766 /* Set the SW EEPROM semaphore bit to request access */ 1767 swsm |= IXGBE_SWSM_SWESMBI; 1768 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1769 1770 /* 1771 * If we set the bit successfully then we got the 1772 * semaphore. 1773 */ 1774 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1775 if (swsm & IXGBE_SWSM_SWESMBI) 1776 break; 1777 1778 usec_delay(50); 1779 } 1780 1781 /* 1782 * Release semaphores and return error if SW EEPROM semaphore 1783 * was not granted because we don't have access to the EEPROM 1784 */ 1785 if (i >= timeout) { 1786 DEBUGOUT("SWESMBI Software EEPROM semaphore " 1787 "not granted.\n"); 1788 ixgbe_release_eeprom_semaphore(hw); 1789 status = IXGBE_ERR_EEPROM; 1790 } 1791 } else { 1792 DEBUGOUT("Software semaphore SMBI between device drivers " 1793 "not granted.\n"); 1794 } 1795 1796 return status; 1797 } 1798 1799 /** 1800 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1801 * @hw: pointer to hardware structure 1802 * 1803 * This function clears hardware semaphore bits. 1804 **/ 1805 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1806 { 1807 u32 swsm; 1808 1809 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1810 1811 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1812 1813 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1814 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1815 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1816 IXGBE_WRITE_FLUSH(hw); 1817 } 1818 1819 /** 1820 * ixgbe_ready_eeprom - Polls for EEPROM ready 1821 * @hw: pointer to hardware structure 1822 **/ 1823 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1824 { 1825 s32 status = IXGBE_SUCCESS; 1826 u16 i; 1827 u8 spi_stat_reg; 1828 1829 DEBUGFUNC("ixgbe_ready_eeprom"); 1830 1831 /* 1832 * Read "Status Register" repeatedly until the LSB is cleared. The 1833 * EEPROM will signal that the command has been completed by clearing 1834 * bit 0 of the internal status register. If it's not cleared within 1835 * 5 milliseconds, then error out. 1836 */ 1837 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1838 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1839 IXGBE_EEPROM_OPCODE_BITS); 1840 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1841 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1842 break; 1843 1844 usec_delay(5); 1845 ixgbe_standby_eeprom(hw); 1846 }; 1847 1848 /* 1849 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1850 * devices (and only 0-5mSec on 5V devices) 1851 */ 1852 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1853 DEBUGOUT("SPI EEPROM Status error\n"); 1854 status = IXGBE_ERR_EEPROM; 1855 } 1856 1857 return status; 1858 } 1859 1860 /** 1861 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1862 * @hw: pointer to hardware structure 1863 **/ 1864 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1865 { 1866 u32 eec; 1867 1868 DEBUGFUNC("ixgbe_standby_eeprom"); 1869 1870 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1871 1872 /* Toggle CS to flush commands */ 1873 eec |= IXGBE_EEC_CS; 1874 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1875 IXGBE_WRITE_FLUSH(hw); 1876 usec_delay(1); 1877 eec &= ~IXGBE_EEC_CS; 1878 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1879 IXGBE_WRITE_FLUSH(hw); 1880 usec_delay(1); 1881 } 1882 1883 /** 1884 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1885 * @hw: pointer to hardware structure 1886 * @data: data to send to the EEPROM 1887 * @count: number of bits to shift out 1888 **/ 1889 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1890 u16 count) 1891 { 1892 u32 eec; 1893 u32 mask; 1894 u32 i; 1895 1896 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 1897 1898 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1899 1900 /* 1901 * Mask is used to shift "count" bits of "data" out to the EEPROM 1902 * one bit at a time. Determine the starting bit based on count 1903 */ 1904 mask = 0x01 << (count - 1); 1905 1906 for (i = 0; i < count; i++) { 1907 /* 1908 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1909 * "1", and then raising and then lowering the clock (the SK 1910 * bit controls the clock input to the EEPROM). A "0" is 1911 * shifted out to the EEPROM by setting "DI" to "0" and then 1912 * raising and then lowering the clock. 1913 */ 1914 if (data & mask) 1915 eec |= IXGBE_EEC_DI; 1916 else 1917 eec &= ~IXGBE_EEC_DI; 1918 1919 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1920 IXGBE_WRITE_FLUSH(hw); 1921 1922 usec_delay(1); 1923 1924 ixgbe_raise_eeprom_clk(hw, &eec); 1925 ixgbe_lower_eeprom_clk(hw, &eec); 1926 1927 /* 1928 * Shift mask to signify next bit of data to shift in to the 1929 * EEPROM 1930 */ 1931 mask = mask >> 1; 1932 }; 1933 1934 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1935 eec &= ~IXGBE_EEC_DI; 1936 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1937 IXGBE_WRITE_FLUSH(hw); 1938 } 1939 1940 /** 1941 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1942 * @hw: pointer to hardware structure 1943 **/ 1944 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1945 { 1946 u32 eec; 1947 u32 i; 1948 u16 data = 0; 1949 1950 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 1951 1952 /* 1953 * In order to read a register from the EEPROM, we need to shift 1954 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1955 * the clock input to the EEPROM (setting the SK bit), and then reading 1956 * the value of the "DO" bit. During this "shifting in" process the 1957 * "DI" bit should always be clear. 1958 */ 1959 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1960 1961 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1962 1963 for (i = 0; i < count; i++) { 1964 data = data << 1; 1965 ixgbe_raise_eeprom_clk(hw, &eec); 1966 1967 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1968 1969 eec &= ~(IXGBE_EEC_DI); 1970 if (eec & IXGBE_EEC_DO) 1971 data |= 1; 1972 1973 ixgbe_lower_eeprom_clk(hw, &eec); 1974 } 1975 1976 return data; 1977 } 1978 1979 /** 1980 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1981 * @hw: pointer to hardware structure 1982 * @eec: EEC register's current value 1983 **/ 1984 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1985 { 1986 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 1987 1988 /* 1989 * Raise the clock input to the EEPROM 1990 * (setting the SK bit), then delay 1991 */ 1992 *eec = *eec | IXGBE_EEC_SK; 1993 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1994 IXGBE_WRITE_FLUSH(hw); 1995 usec_delay(1); 1996 } 1997 1998 /** 1999 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 2000 * @hw: pointer to hardware structure 2001 * @eecd: EECD's current value 2002 **/ 2003 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2004 { 2005 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 2006 2007 /* 2008 * Lower the clock input to the EEPROM (clearing the SK bit), then 2009 * delay 2010 */ 2011 *eec = *eec & ~IXGBE_EEC_SK; 2012 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 2013 IXGBE_WRITE_FLUSH(hw); 2014 usec_delay(1); 2015 } 2016 2017 /** 2018 * ixgbe_release_eeprom - Release EEPROM, release semaphores 2019 * @hw: pointer to hardware structure 2020 **/ 2021 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 2022 { 2023 u32 eec; 2024 2025 DEBUGFUNC("ixgbe_release_eeprom"); 2026 2027 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 2028 2029 eec |= IXGBE_EEC_CS; /* Pull CS high */ 2030 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 2031 2032 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 2033 IXGBE_WRITE_FLUSH(hw); 2034 2035 usec_delay(1); 2036 2037 /* Stop requesting EEPROM access */ 2038 eec &= ~IXGBE_EEC_REQ; 2039 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 2040 2041 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 2042 2043 /* Delay before attempt to obtain semaphore again to allow FW access */ 2044 msec_delay(hw->eeprom.semaphore_delay); 2045 } 2046 2047 /** 2048 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 2049 * @hw: pointer to hardware structure 2050 **/ 2051 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 2052 { 2053 u16 i; 2054 u16 j; 2055 u16 checksum = 0; 2056 u16 length = 0; 2057 u16 pointer = 0; 2058 u16 word = 0; 2059 2060 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 2061 2062 /* Include 0x0-0x3F in the checksum */ 2063 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 2064 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { 2065 DEBUGOUT("EEPROM read failed\n"); 2066 break; 2067 } 2068 checksum += word; 2069 } 2070 2071 /* Include all data from pointers except for the fw pointer */ 2072 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 2073 hw->eeprom.ops.read(hw, i, &pointer); 2074 2075 /* Make sure the pointer seems valid */ 2076 if (pointer != 0xFFFF && pointer != 0) { 2077 hw->eeprom.ops.read(hw, pointer, &length); 2078 2079 if (length != 0xFFFF && length != 0) { 2080 for (j = pointer+1; j <= pointer+length; j++) { 2081 hw->eeprom.ops.read(hw, j, &word); 2082 checksum += word; 2083 } 2084 } 2085 } 2086 } 2087 2088 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 2089 2090 return checksum; 2091 } 2092 2093 /** 2094 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 2095 * @hw: pointer to hardware structure 2096 * @checksum_val: calculated checksum 2097 * 2098 * Performs checksum calculation and validates the EEPROM checksum. If the 2099 * caller does not need checksum_val, the value can be NULL. 2100 **/ 2101 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 2102 u16 *checksum_val) 2103 { 2104 s32 status; 2105 u16 checksum; 2106 u16 read_checksum = 0; 2107 2108 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 2109 2110 /* 2111 * Read the first word from the EEPROM. If this times out or fails, do 2112 * not continue or we could be in for a very long wait while every 2113 * EEPROM read fails 2114 */ 2115 status = hw->eeprom.ops.read(hw, 0, &checksum); 2116 2117 if (status == IXGBE_SUCCESS) { 2118 checksum = hw->eeprom.ops.calc_checksum(hw); 2119 2120 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 2121 2122 /* 2123 * Verify read checksum from EEPROM is the same as 2124 * calculated checksum 2125 */ 2126 if (read_checksum != checksum) 2127 status = IXGBE_ERR_EEPROM_CHECKSUM; 2128 2129 /* If the user cares, return the calculated checksum */ 2130 if (checksum_val) 2131 *checksum_val = checksum; 2132 } else { 2133 DEBUGOUT("EEPROM read failed\n"); 2134 } 2135 2136 return status; 2137 } 2138 2139 /** 2140 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 2141 * @hw: pointer to hardware structure 2142 **/ 2143 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 2144 { 2145 s32 status; 2146 u16 checksum; 2147 2148 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 2149 2150 /* 2151 * Read the first word from the EEPROM. If this times out or fails, do 2152 * not continue or we could be in for a very long wait while every 2153 * EEPROM read fails 2154 */ 2155 status = hw->eeprom.ops.read(hw, 0, &checksum); 2156 2157 if (status == IXGBE_SUCCESS) { 2158 checksum = hw->eeprom.ops.calc_checksum(hw); 2159 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 2160 checksum); 2161 } else { 2162 DEBUGOUT("EEPROM read failed\n"); 2163 } 2164 2165 return status; 2166 } 2167 2168 /** 2169 * ixgbe_validate_mac_addr - Validate MAC address 2170 * @mac_addr: pointer to MAC address. 2171 * 2172 * Tests a MAC address to ensure it is a valid Individual Address 2173 **/ 2174 s32 ixgbe_validate_mac_addr(u8 *mac_addr) 2175 { 2176 s32 status = IXGBE_SUCCESS; 2177 2178 DEBUGFUNC("ixgbe_validate_mac_addr"); 2179 2180 /* Make sure it is not a multicast address */ 2181 if (IXGBE_IS_MULTICAST(mac_addr)) { 2182 DEBUGOUT("MAC address is multicast\n"); 2183 status = IXGBE_ERR_INVALID_MAC_ADDR; 2184 /* Not a broadcast address */ 2185 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 2186 DEBUGOUT("MAC address is broadcast\n"); 2187 status = IXGBE_ERR_INVALID_MAC_ADDR; 2188 /* Reject the zero address */ 2189 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 2190 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 2191 DEBUGOUT("MAC address is all zeros\n"); 2192 status = IXGBE_ERR_INVALID_MAC_ADDR; 2193 } 2194 return status; 2195 } 2196 2197 /** 2198 * ixgbe_set_rar_generic - Set Rx address register 2199 * @hw: pointer to hardware structure 2200 * @index: Receive address register to write 2201 * @addr: Address to put into receive address register 2202 * @vmdq: VMDq "set" or "pool" index 2203 * @enable_addr: set flag that address is active 2204 * 2205 * Puts an ethernet address into a receive address register. 2206 **/ 2207 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 2208 u32 enable_addr) 2209 { 2210 u32 rar_low, rar_high; 2211 u32 rar_entries = hw->mac.num_rar_entries; 2212 2213 DEBUGFUNC("ixgbe_set_rar_generic"); 2214 2215 /* Make sure we are using a valid rar index range */ 2216 if (index >= rar_entries) { 2217 DEBUGOUT1("RAR index %d is out of range.\n", index); 2218 return IXGBE_ERR_INVALID_ARGUMENT; 2219 } 2220 2221 /* setup VMDq pool selection before this RAR gets enabled */ 2222 hw->mac.ops.set_vmdq(hw, index, vmdq); 2223 2224 /* 2225 * HW expects these in little endian so we reverse the byte 2226 * order from network order (big endian) to little endian 2227 */ 2228 rar_low = ((u32)addr[0] | 2229 ((u32)addr[1] << 8) | 2230 ((u32)addr[2] << 16) | 2231 ((u32)addr[3] << 24)); 2232 /* 2233 * Some parts put the VMDq setting in the extra RAH bits, 2234 * so save everything except the lower 16 bits that hold part 2235 * of the address and the address valid bit. 2236 */ 2237 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2238 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2239 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 2240 2241 if (enable_addr != 0) 2242 rar_high |= IXGBE_RAH_AV; 2243 2244 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 2245 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2246 2247 return IXGBE_SUCCESS; 2248 } 2249 2250 /** 2251 * ixgbe_clear_rar_generic - Remove Rx address register 2252 * @hw: pointer to hardware structure 2253 * @index: Receive address register to write 2254 * 2255 * Clears an ethernet address from a receive address register. 2256 **/ 2257 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 2258 { 2259 u32 rar_high; 2260 u32 rar_entries = hw->mac.num_rar_entries; 2261 2262 DEBUGFUNC("ixgbe_clear_rar_generic"); 2263 2264 /* Make sure we are using a valid rar index range */ 2265 if (index >= rar_entries) { 2266 DEBUGOUT1("RAR index %d is out of range.\n", index); 2267 return IXGBE_ERR_INVALID_ARGUMENT; 2268 } 2269 2270 /* 2271 * Some parts put the VMDq setting in the extra RAH bits, 2272 * so save everything except the lower 16 bits that hold part 2273 * of the address and the address valid bit. 2274 */ 2275 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2276 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2277 2278 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 2279 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2280 2281 /* clear VMDq pool/queue selection for this RAR */ 2282 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 2283 2284 return IXGBE_SUCCESS; 2285 } 2286 2287 /** 2288 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 2289 * @hw: pointer to hardware structure 2290 * 2291 * Places the MAC address in receive address register 0 and clears the rest 2292 * of the receive address registers. Clears the multicast table. Assumes 2293 * the receiver is in reset when the routine is called. 2294 **/ 2295 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 2296 { 2297 u32 i; 2298 u32 rar_entries = hw->mac.num_rar_entries; 2299 2300 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 2301 2302 /* 2303 * If the current mac address is valid, assume it is a software override 2304 * to the permanent address. 2305 * Otherwise, use the permanent address from the eeprom. 2306 */ 2307 if (ixgbe_validate_mac_addr(hw->mac.addr) == 2308 IXGBE_ERR_INVALID_MAC_ADDR) { 2309 /* Get the MAC address from the RAR0 for later reference */ 2310 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2311 2312 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 2313 hw->mac.addr[0], hw->mac.addr[1], 2314 hw->mac.addr[2]); 2315 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2316 hw->mac.addr[4], hw->mac.addr[5]); 2317 } else { 2318 /* Setup the receive address. */ 2319 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 2320 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 2321 hw->mac.addr[0], hw->mac.addr[1], 2322 hw->mac.addr[2]); 2323 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2324 hw->mac.addr[4], hw->mac.addr[5]); 2325 2326 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2327 2328 /* clear VMDq pool/queue selection for RAR 0 */ 2329 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 2330 } 2331 hw->addr_ctrl.overflow_promisc = 0; 2332 2333 hw->addr_ctrl.rar_used_count = 1; 2334 2335 /* Zero out the other receive addresses. */ 2336 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 2337 for (i = 1; i < rar_entries; i++) { 2338 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2339 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2340 } 2341 2342 /* Clear the MTA */ 2343 hw->addr_ctrl.mta_in_use = 0; 2344 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2345 2346 DEBUGOUT(" Clearing MTA\n"); 2347 for (i = 0; i < hw->mac.mcft_size; i++) 2348 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2349 2350 ixgbe_init_uta_tables(hw); 2351 2352 return IXGBE_SUCCESS; 2353 } 2354 2355 /** 2356 * ixgbe_add_uc_addr - Adds a secondary unicast address. 2357 * @hw: pointer to hardware structure 2358 * @addr: new address 2359 * 2360 * Adds it to unused receive address register or goes into promiscuous mode. 2361 **/ 2362 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2363 { 2364 u32 rar_entries = hw->mac.num_rar_entries; 2365 u32 rar; 2366 2367 DEBUGFUNC("ixgbe_add_uc_addr"); 2368 2369 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 2370 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 2371 2372 /* 2373 * Place this address in the RAR if there is room, 2374 * else put the controller into promiscuous mode 2375 */ 2376 if (hw->addr_ctrl.rar_used_count < rar_entries) { 2377 rar = hw->addr_ctrl.rar_used_count; 2378 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2379 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 2380 hw->addr_ctrl.rar_used_count++; 2381 } else { 2382 hw->addr_ctrl.overflow_promisc++; 2383 } 2384 2385 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 2386 } 2387 2388 /** 2389 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 2390 * @hw: pointer to hardware structure 2391 * @addr_list: the list of new addresses 2392 * @addr_count: number of addresses 2393 * @next: iterator function to walk the address list 2394 * 2395 * The given list replaces any existing list. Clears the secondary addrs from 2396 * receive address registers. Uses unused receive address registers for the 2397 * first secondary addresses, and falls back to promiscuous mode as needed. 2398 * 2399 * Drivers using secondary unicast addresses must set user_set_promisc when 2400 * manually putting the device into promiscuous mode. 2401 **/ 2402 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 2403 u32 addr_count, ixgbe_mc_addr_itr next) 2404 { 2405 u8 *addr; 2406 u32 i; 2407 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 2408 u32 uc_addr_in_use; 2409 u32 fctrl; 2410 u32 vmdq; 2411 2412 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 2413 2414 /* 2415 * Clear accounting of old secondary address list, 2416 * don't count RAR[0] 2417 */ 2418 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 2419 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 2420 hw->addr_ctrl.overflow_promisc = 0; 2421 2422 /* Zero out the other receive addresses */ 2423 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 2424 for (i = 0; i < uc_addr_in_use; i++) { 2425 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 2426 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 2427 } 2428 2429 /* Add the new addresses */ 2430 for (i = 0; i < addr_count; i++) { 2431 DEBUGOUT(" Adding the secondary addresses:\n"); 2432 addr = next(hw, &addr_list, &vmdq); 2433 ixgbe_add_uc_addr(hw, addr, vmdq); 2434 } 2435 2436 if (hw->addr_ctrl.overflow_promisc) { 2437 /* enable promisc if not already in overflow or set by user */ 2438 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2439 DEBUGOUT(" Entering address overflow promisc mode\n"); 2440 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2441 fctrl |= IXGBE_FCTRL_UPE; 2442 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2443 } 2444 } else { 2445 /* only disable if set by overflow, not by user */ 2446 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2447 DEBUGOUT(" Leaving address overflow promisc mode\n"); 2448 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2449 fctrl &= ~IXGBE_FCTRL_UPE; 2450 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2451 } 2452 } 2453 2454 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 2455 return IXGBE_SUCCESS; 2456 } 2457 2458 /** 2459 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2460 * @hw: pointer to hardware structure 2461 * @mc_addr: the multicast address 2462 * 2463 * Extracts the 12 bits, from a multicast address, to determine which 2464 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2465 * incoming rx multicast addresses, to determine the bit-vector to check in 2466 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2467 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2468 * to mc_filter_type. 2469 **/ 2470 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2471 { 2472 u32 vector = 0; 2473 2474 DEBUGFUNC("ixgbe_mta_vector"); 2475 2476 switch (hw->mac.mc_filter_type) { 2477 case 0: /* use bits [47:36] of the address */ 2478 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2479 break; 2480 case 1: /* use bits [46:35] of the address */ 2481 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2482 break; 2483 case 2: /* use bits [45:34] of the address */ 2484 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2485 break; 2486 case 3: /* use bits [43:32] of the address */ 2487 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2488 break; 2489 default: /* Invalid mc_filter_type */ 2490 DEBUGOUT("MC filter type param set incorrectly\n"); 2491 ASSERT(0); 2492 break; 2493 } 2494 2495 /* vector can only be 12-bits or boundary will be exceeded */ 2496 vector &= 0xFFF; 2497 return vector; 2498 } 2499 2500 /** 2501 * ixgbe_set_mta - Set bit-vector in multicast table 2502 * @hw: pointer to hardware structure 2503 * @hash_value: Multicast address hash value 2504 * 2505 * Sets the bit-vector in the multicast table. 2506 **/ 2507 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2508 { 2509 u32 vector; 2510 u32 vector_bit; 2511 u32 vector_reg; 2512 2513 DEBUGFUNC("ixgbe_set_mta"); 2514 2515 hw->addr_ctrl.mta_in_use++; 2516 2517 vector = ixgbe_mta_vector(hw, mc_addr); 2518 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 2519 2520 /* 2521 * The MTA is a register array of 128 32-bit registers. It is treated 2522 * like an array of 4096 bits. We want to set bit 2523 * BitArray[vector_value]. So we figure out what register the bit is 2524 * in, read it, OR in the new bit, then write back the new value. The 2525 * register is determined by the upper 7 bits of the vector value and 2526 * the bit within that register are determined by the lower 5 bits of 2527 * the value. 2528 */ 2529 vector_reg = (vector >> 5) & 0x7F; 2530 vector_bit = vector & 0x1F; 2531 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2532 } 2533 2534 /** 2535 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2536 * @hw: pointer to hardware structure 2537 * @mc_addr_list: the list of new multicast addresses 2538 * @mc_addr_count: number of addresses 2539 * @next: iterator function to walk the multicast address list 2540 * @clear: flag, when set clears the table beforehand 2541 * 2542 * When the clear flag is set, the given list replaces any existing list. 2543 * Hashes the given addresses into the multicast table. 2544 **/ 2545 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 2546 u32 mc_addr_count, ixgbe_mc_addr_itr next, 2547 bool clear) 2548 { 2549 u32 i; 2550 u32 vmdq; 2551 2552 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 2553 2554 /* 2555 * Set the new number of MC addresses that we are being requested to 2556 * use. 2557 */ 2558 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 2559 hw->addr_ctrl.mta_in_use = 0; 2560 2561 /* Clear mta_shadow */ 2562 if (clear) { 2563 DEBUGOUT(" Clearing MTA\n"); 2564 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2565 } 2566 2567 /* Update mta_shadow */ 2568 for (i = 0; i < mc_addr_count; i++) { 2569 DEBUGOUT(" Adding the multicast addresses:\n"); 2570 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 2571 } 2572 2573 /* Enable mta */ 2574 for (i = 0; i < hw->mac.mcft_size; i++) 2575 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2576 hw->mac.mta_shadow[i]); 2577 2578 if (hw->addr_ctrl.mta_in_use > 0) 2579 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2580 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2581 2582 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 2583 return IXGBE_SUCCESS; 2584 } 2585 2586 /** 2587 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2588 * @hw: pointer to hardware structure 2589 * 2590 * Enables multicast address in RAR and the use of the multicast hash table. 2591 **/ 2592 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2593 { 2594 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2595 2596 DEBUGFUNC("ixgbe_enable_mc_generic"); 2597 2598 if (a->mta_in_use > 0) 2599 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2600 hw->mac.mc_filter_type); 2601 2602 return IXGBE_SUCCESS; 2603 } 2604 2605 /** 2606 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2607 * @hw: pointer to hardware structure 2608 * 2609 * Disables multicast address in RAR and the use of the multicast hash table. 2610 **/ 2611 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2612 { 2613 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2614 2615 DEBUGFUNC("ixgbe_disable_mc_generic"); 2616 2617 if (a->mta_in_use > 0) 2618 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2619 2620 return IXGBE_SUCCESS; 2621 } 2622 2623 /** 2624 * ixgbe_fc_enable_generic - Enable flow control 2625 * @hw: pointer to hardware structure 2626 * 2627 * Enable flow control according to the current settings. 2628 **/ 2629 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2630 { 2631 s32 ret_val = IXGBE_SUCCESS; 2632 u32 mflcn_reg, fccfg_reg; 2633 u32 reg; 2634 u32 fcrtl, fcrth; 2635 int i; 2636 2637 DEBUGFUNC("ixgbe_fc_enable_generic"); 2638 2639 /* Validate the water mark configuration */ 2640 if (!hw->fc.pause_time) { 2641 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2642 goto out; 2643 } 2644 2645 /* Low water mark of zero causes XOFF floods */ 2646 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2647 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2648 hw->fc.high_water[i]) { 2649 if (!hw->fc.low_water[i] || 2650 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2651 DEBUGOUT("Invalid water mark configuration\n"); 2652 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2653 goto out; 2654 } 2655 } 2656 } 2657 2658 /* Negotiate the fc mode to use */ 2659 ixgbe_fc_autoneg(hw); 2660 2661 /* Disable any previous flow control settings */ 2662 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2663 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2664 2665 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2666 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2667 2668 /* 2669 * The possible values of fc.current_mode are: 2670 * 0: Flow control is completely disabled 2671 * 1: Rx flow control is enabled (we can receive pause frames, 2672 * but not send pause frames). 2673 * 2: Tx flow control is enabled (we can send pause frames but 2674 * we do not support receiving pause frames). 2675 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2676 * other: Invalid. 2677 */ 2678 switch (hw->fc.current_mode) { 2679 case ixgbe_fc_none: 2680 /* 2681 * Flow control is disabled by software override or autoneg. 2682 * The code below will actually disable it in the HW. 2683 */ 2684 break; 2685 case ixgbe_fc_rx_pause: 2686 /* 2687 * Rx Flow control is enabled and Tx Flow control is 2688 * disabled by software override. Since there really 2689 * isn't a way to advertise that we are capable of RX 2690 * Pause ONLY, we will advertise that we support both 2691 * symmetric and asymmetric Rx PAUSE. Later, we will 2692 * disable the adapter's ability to send PAUSE frames. 2693 */ 2694 mflcn_reg |= IXGBE_MFLCN_RFCE; 2695 break; 2696 case ixgbe_fc_tx_pause: 2697 /* 2698 * Tx Flow control is enabled, and Rx Flow control is 2699 * disabled by software override. 2700 */ 2701 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2702 break; 2703 case ixgbe_fc_full: 2704 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2705 mflcn_reg |= IXGBE_MFLCN_RFCE; 2706 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2707 break; 2708 default: 2709 DEBUGOUT("Flow control param set incorrectly\n"); 2710 ret_val = IXGBE_ERR_CONFIG; 2711 goto out; 2712 break; 2713 } 2714 2715 /* Set 802.3x based flow control settings. */ 2716 mflcn_reg |= IXGBE_MFLCN_DPF; 2717 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2718 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2719 2720 2721 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2722 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2723 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2724 hw->fc.high_water[i]) { 2725 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2726 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2727 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2728 } else { 2729 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2730 /* 2731 * In order to prevent Tx hangs when the internal Tx 2732 * switch is enabled we must set the high water mark 2733 * to the maximum FCRTH value. This allows the Tx 2734 * switch to function even under heavy Rx workloads. 2735 */ 2736 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; 2737 } 2738 2739 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2740 } 2741 2742 /* Configure pause time (2 TCs per register) */ 2743 reg = hw->fc.pause_time * 0x00010001; 2744 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 2745 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2746 2747 /* Configure flow control refresh threshold value */ 2748 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2749 2750 out: 2751 return ret_val; 2752 } 2753 2754 /** 2755 * ixgbe_negotiate_fc - Negotiate flow control 2756 * @hw: pointer to hardware structure 2757 * @adv_reg: flow control advertised settings 2758 * @lp_reg: link partner's flow control settings 2759 * @adv_sym: symmetric pause bit in advertisement 2760 * @adv_asm: asymmetric pause bit in advertisement 2761 * @lp_sym: symmetric pause bit in link partner advertisement 2762 * @lp_asm: asymmetric pause bit in link partner advertisement 2763 * 2764 * Find the intersection between advertised settings and link partner's 2765 * advertised settings 2766 **/ 2767 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2768 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2769 { 2770 if ((!(adv_reg)) || (!(lp_reg))) 2771 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2772 2773 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2774 /* 2775 * Now we need to check if the user selected Rx ONLY 2776 * of pause frames. In this case, we had to advertise 2777 * FULL flow control because we could not advertise RX 2778 * ONLY. Hence, we must now check to see if we need to 2779 * turn OFF the TRANSMISSION of PAUSE frames. 2780 */ 2781 if (hw->fc.requested_mode == ixgbe_fc_full) { 2782 hw->fc.current_mode = ixgbe_fc_full; 2783 DEBUGOUT("Flow Control = FULL.\n"); 2784 } else { 2785 hw->fc.current_mode = ixgbe_fc_rx_pause; 2786 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2787 } 2788 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2789 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2790 hw->fc.current_mode = ixgbe_fc_tx_pause; 2791 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2792 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2793 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2794 hw->fc.current_mode = ixgbe_fc_rx_pause; 2795 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2796 } else { 2797 hw->fc.current_mode = ixgbe_fc_none; 2798 DEBUGOUT("Flow Control = NONE.\n"); 2799 } 2800 return IXGBE_SUCCESS; 2801 } 2802 2803 /** 2804 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2805 * @hw: pointer to hardware structure 2806 * 2807 * Enable flow control according on 1 gig fiber. 2808 **/ 2809 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2810 { 2811 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2812 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2813 2814 /* 2815 * On multispeed fiber at 1g, bail out if 2816 * - link is up but AN did not complete, or if 2817 * - link is up and AN completed but timed out 2818 */ 2819 2820 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2821 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2822 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2823 goto out; 2824 2825 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2826 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2827 2828 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2829 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2830 IXGBE_PCS1GANA_ASM_PAUSE, 2831 IXGBE_PCS1GANA_SYM_PAUSE, 2832 IXGBE_PCS1GANA_ASM_PAUSE); 2833 2834 out: 2835 return ret_val; 2836 } 2837 2838 /** 2839 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2840 * @hw: pointer to hardware structure 2841 * 2842 * Enable flow control according to IEEE clause 37. 2843 **/ 2844 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2845 { 2846 u32 links2, anlp1_reg, autoc_reg, links; 2847 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2848 2849 /* 2850 * On backplane, bail out if 2851 * - backplane autoneg was not completed, or if 2852 * - we are 82599 and link partner is not AN enabled 2853 */ 2854 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2855 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2856 goto out; 2857 2858 if (hw->mac.type == ixgbe_mac_82599EB) { 2859 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2860 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2861 goto out; 2862 } 2863 /* 2864 * Read the 10g AN autoc and LP ability registers and resolve 2865 * local flow control settings accordingly 2866 */ 2867 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2868 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2869 2870 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2871 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2872 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2873 2874 out: 2875 return ret_val; 2876 } 2877 2878 /** 2879 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2880 * @hw: pointer to hardware structure 2881 * 2882 * Enable flow control according to IEEE clause 37. 2883 **/ 2884 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2885 { 2886 u16 technology_ability_reg = 0; 2887 u16 lp_technology_ability_reg = 0; 2888 2889 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2890 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2891 &technology_ability_reg); 2892 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 2893 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2894 &lp_technology_ability_reg); 2895 2896 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2897 (u32)lp_technology_ability_reg, 2898 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2899 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2900 } 2901 2902 /** 2903 * ixgbe_fc_autoneg - Configure flow control 2904 * @hw: pointer to hardware structure 2905 * 2906 * Compares our advertised flow control capabilities to those advertised by 2907 * our link partner, and determines the proper flow control mode to use. 2908 **/ 2909 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2910 { 2911 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2912 ixgbe_link_speed speed; 2913 bool link_up; 2914 2915 DEBUGFUNC("ixgbe_fc_autoneg"); 2916 2917 /* 2918 * AN should have completed when the cable was plugged in. 2919 * Look for reasons to bail out. Bail out if: 2920 * - FC autoneg is disabled, or if 2921 * - link is not up. 2922 */ 2923 if (hw->fc.disable_fc_autoneg) 2924 goto out; 2925 2926 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2927 if (!link_up) 2928 goto out; 2929 2930 switch (hw->phy.media_type) { 2931 /* Autoneg flow control on fiber adapters */ 2932 case ixgbe_media_type_fiber_fixed: 2933 case ixgbe_media_type_fiber: 2934 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2935 ret_val = ixgbe_fc_autoneg_fiber(hw); 2936 break; 2937 2938 /* Autoneg flow control on backplane adapters */ 2939 case ixgbe_media_type_backplane: 2940 ret_val = ixgbe_fc_autoneg_backplane(hw); 2941 break; 2942 2943 /* Autoneg flow control on copper adapters */ 2944 case ixgbe_media_type_copper: 2945 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS) 2946 ret_val = ixgbe_fc_autoneg_copper(hw); 2947 break; 2948 2949 default: 2950 break; 2951 } 2952 2953 out: 2954 if (ret_val == IXGBE_SUCCESS) { 2955 hw->fc.fc_was_autonegged = TRUE; 2956 } else { 2957 hw->fc.fc_was_autonegged = FALSE; 2958 hw->fc.current_mode = hw->fc.requested_mode; 2959 } 2960 } 2961 2962 /** 2963 * ixgbe_disable_pcie_master - Disable PCI-express master access 2964 * @hw: pointer to hardware structure 2965 * 2966 * Disables PCI-Express master access and verifies there are no pending 2967 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2968 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 2969 * is returned signifying master requests disabled. 2970 **/ 2971 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2972 { 2973 s32 status = IXGBE_SUCCESS; 2974 u32 i; 2975 2976 DEBUGFUNC("ixgbe_disable_pcie_master"); 2977 2978 /* Always set this bit to ensure any future transactions are blocked */ 2979 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2980 2981 /* Exit if master requets are blocked */ 2982 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2983 goto out; 2984 2985 /* Poll for master request bit to clear */ 2986 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2987 usec_delay(100); 2988 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2989 goto out; 2990 } 2991 2992 /* 2993 * Two consecutive resets are required via CTRL.RST per datasheet 2994 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2995 * of this need. The first reset prevents new master requests from 2996 * being issued by our device. We then must wait 1usec or more for any 2997 * remaining completions from the PCIe bus to trickle in, and then reset 2998 * again to clear out any effects they may have had on our device. 2999 */ 3000 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 3001 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3002 3003 /* 3004 * Before proceeding, make sure that the PCIe block does not have 3005 * transactions pending. 3006 */ 3007 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 3008 usec_delay(100); 3009 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & 3010 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3011 goto out; 3012 } 3013 3014 DEBUGOUT("PCIe transaction pending bit also did not clear.\n"); 3015 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 3016 3017 out: 3018 return status; 3019 } 3020 3021 /** 3022 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 3023 * @hw: pointer to hardware structure 3024 * @mask: Mask to specify which semaphore to acquire 3025 * 3026 * Acquires the SWFW semaphore through the GSSR register for the specified 3027 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3028 **/ 3029 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 3030 { 3031 u32 gssr; 3032 u32 swmask = mask; 3033 u32 fwmask = mask << 5; 3034 s32 timeout = 200; 3035 3036 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 3037 3038 while (timeout) { 3039 /* 3040 * SW EEPROM semaphore bit is used for access to all 3041 * SW_FW_SYNC/GSSR bits (not just EEPROM) 3042 */ 3043 if (ixgbe_get_eeprom_semaphore(hw)) 3044 return IXGBE_ERR_SWFW_SYNC; 3045 3046 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3047 if (!(gssr & (fwmask | swmask))) 3048 break; 3049 3050 /* 3051 * Firmware currently using resource (fwmask) or other software 3052 * thread currently using resource (swmask) 3053 */ 3054 ixgbe_release_eeprom_semaphore(hw); 3055 msec_delay(5); 3056 timeout--; 3057 } 3058 3059 if (!timeout) { 3060 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 3061 return IXGBE_ERR_SWFW_SYNC; 3062 } 3063 3064 gssr |= swmask; 3065 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3066 3067 ixgbe_release_eeprom_semaphore(hw); 3068 return IXGBE_SUCCESS; 3069 } 3070 3071 /** 3072 * ixgbe_release_swfw_sync - Release SWFW semaphore 3073 * @hw: pointer to hardware structure 3074 * @mask: Mask to specify which semaphore to release 3075 * 3076 * Releases the SWFW semaphore through the GSSR register for the specified 3077 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3078 **/ 3079 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 3080 { 3081 u32 gssr; 3082 u32 swmask = mask; 3083 3084 DEBUGFUNC("ixgbe_release_swfw_sync"); 3085 3086 ixgbe_get_eeprom_semaphore(hw); 3087 3088 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3089 gssr &= ~swmask; 3090 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3091 3092 ixgbe_release_eeprom_semaphore(hw); 3093 } 3094 3095 /** 3096 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path 3097 * @hw: pointer to hardware structure 3098 * 3099 * Stops the receive data path and waits for the HW to internally empty 3100 * the Rx security block 3101 **/ 3102 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) 3103 { 3104 #define IXGBE_MAX_SECRX_POLL 40 3105 3106 int i; 3107 int secrxreg; 3108 3109 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); 3110 3111 3112 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3113 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 3114 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3115 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 3116 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 3117 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 3118 break; 3119 else 3120 /* Use interrupt-safe sleep just in case */ 3121 usec_delay(1000); 3122 } 3123 3124 /* For informational purposes only */ 3125 if (i >= IXGBE_MAX_SECRX_POLL) 3126 DEBUGOUT("Rx unit being enabled before security " 3127 "path fully disabled. Continuing with init.\n"); 3128 3129 return IXGBE_SUCCESS; 3130 } 3131 3132 /** 3133 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path 3134 * @hw: pointer to hardware structure 3135 * 3136 * Enables the receive data path. 3137 **/ 3138 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) 3139 { 3140 int secrxreg; 3141 3142 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); 3143 3144 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3145 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 3146 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3147 IXGBE_WRITE_FLUSH(hw); 3148 3149 return IXGBE_SUCCESS; 3150 } 3151 3152 /** 3153 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 3154 * @hw: pointer to hardware structure 3155 * @regval: register value to write to RXCTRL 3156 * 3157 * Enables the Rx DMA unit 3158 **/ 3159 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 3160 { 3161 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 3162 3163 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 3164 3165 return IXGBE_SUCCESS; 3166 } 3167 3168 /** 3169 * ixgbe_blink_led_start_generic - Blink LED based on index. 3170 * @hw: pointer to hardware structure 3171 * @index: led number to blink 3172 **/ 3173 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 3174 { 3175 ixgbe_link_speed speed = 0; 3176 bool link_up = 0; 3177 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3178 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3179 s32 ret_val = IXGBE_SUCCESS; 3180 3181 DEBUGFUNC("ixgbe_blink_led_start_generic"); 3182 3183 /* 3184 * Link must be up to auto-blink the LEDs; 3185 * Force it if link is down. 3186 */ 3187 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3188 3189 if (!link_up) { 3190 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 3191 * LESM is on. 3192 */ 3193 bool got_lock = FALSE; 3194 if ((hw->mac.type == ixgbe_mac_82599EB) && 3195 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 3196 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 3197 IXGBE_GSSR_MAC_CSR_SM); 3198 if (ret_val != IXGBE_SUCCESS) { 3199 ret_val = IXGBE_ERR_SWFW_SYNC; 3200 goto out; 3201 } 3202 got_lock = TRUE; 3203 } 3204 3205 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3206 autoc_reg |= IXGBE_AUTOC_FLU; 3207 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 3208 IXGBE_WRITE_FLUSH(hw); 3209 3210 if (got_lock) 3211 hw->mac.ops.release_swfw_sync(hw, 3212 IXGBE_GSSR_MAC_CSR_SM); 3213 msec_delay(10); 3214 } 3215 3216 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3217 led_reg |= IXGBE_LED_BLINK(index); 3218 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3219 IXGBE_WRITE_FLUSH(hw); 3220 3221 out: 3222 return ret_val; 3223 } 3224 3225 /** 3226 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 3227 * @hw: pointer to hardware structure 3228 * @index: led number to stop blinking 3229 **/ 3230 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 3231 { 3232 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3233 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3234 s32 ret_val = IXGBE_SUCCESS; 3235 bool got_lock = FALSE; 3236 3237 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 3238 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 3239 * LESM is on. 3240 */ 3241 if ((hw->mac.type == ixgbe_mac_82599EB) && 3242 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 3243 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 3244 IXGBE_GSSR_MAC_CSR_SM); 3245 if (ret_val != IXGBE_SUCCESS) { 3246 ret_val = IXGBE_ERR_SWFW_SYNC; 3247 goto out; 3248 } 3249 got_lock = TRUE; 3250 } 3251 3252 3253 autoc_reg &= ~IXGBE_AUTOC_FLU; 3254 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3255 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 3256 3257 if (hw->mac.type == ixgbe_mac_82599EB) 3258 ixgbe_reset_pipeline_82599(hw); 3259 3260 if (got_lock) 3261 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 3262 3263 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3264 led_reg &= ~IXGBE_LED_BLINK(index); 3265 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 3266 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3267 IXGBE_WRITE_FLUSH(hw); 3268 3269 out: 3270 return ret_val; 3271 } 3272 3273 /** 3274 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 3275 * @hw: pointer to hardware structure 3276 * @san_mac_offset: SAN MAC address offset 3277 * 3278 * This function will read the EEPROM location for the SAN MAC address 3279 * pointer, and returns the value at that location. This is used in both 3280 * get and set mac_addr routines. 3281 **/ 3282 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 3283 u16 *san_mac_offset) 3284 { 3285 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 3286 3287 /* 3288 * First read the EEPROM pointer to see if the MAC addresses are 3289 * available. 3290 */ 3291 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 3292 3293 return IXGBE_SUCCESS; 3294 } 3295 3296 /** 3297 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 3298 * @hw: pointer to hardware structure 3299 * @san_mac_addr: SAN MAC address 3300 * 3301 * Reads the SAN MAC address from the EEPROM, if it's available. This is 3302 * per-port, so set_lan_id() must be called before reading the addresses. 3303 * set_lan_id() is called by identify_sfp(), but this cannot be relied 3304 * upon for non-SFP connections, so we must call it here. 3305 **/ 3306 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3307 { 3308 u16 san_mac_data, san_mac_offset; 3309 u8 i; 3310 3311 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 3312 3313 /* 3314 * First read the EEPROM pointer to see if the MAC addresses are 3315 * available. If they're not, no point in calling set_lan_id() here. 3316 */ 3317 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3318 3319 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 3320 /* 3321 * No addresses available in this EEPROM. It's not an 3322 * error though, so just wipe the local address and return. 3323 */ 3324 for (i = 0; i < 6; i++) 3325 san_mac_addr[i] = 0xFF; 3326 3327 goto san_mac_addr_out; 3328 } 3329 3330 /* make sure we know which port we need to program */ 3331 hw->mac.ops.set_lan_id(hw); 3332 /* apply the port offset to the address offset */ 3333 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3334 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3335 for (i = 0; i < 3; i++) { 3336 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 3337 san_mac_addr[i * 2] = (u8)(san_mac_data); 3338 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 3339 san_mac_offset++; 3340 } 3341 3342 san_mac_addr_out: 3343 return IXGBE_SUCCESS; 3344 } 3345 3346 /** 3347 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 3348 * @hw: pointer to hardware structure 3349 * @san_mac_addr: SAN MAC address 3350 * 3351 * Write a SAN MAC address to the EEPROM. 3352 **/ 3353 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3354 { 3355 s32 status = IXGBE_SUCCESS; 3356 u16 san_mac_data, san_mac_offset; 3357 u8 i; 3358 3359 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 3360 3361 /* Look for SAN mac address pointer. If not defined, return */ 3362 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3363 3364 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 3365 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 3366 goto san_mac_addr_out; 3367 } 3368 3369 /* Make sure we know which port we need to write */ 3370 hw->mac.ops.set_lan_id(hw); 3371 /* Apply the port offset to the address offset */ 3372 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3373 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3374 3375 for (i = 0; i < 3; i++) { 3376 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 3377 san_mac_data |= (u16)(san_mac_addr[i * 2]); 3378 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 3379 san_mac_offset++; 3380 } 3381 3382 san_mac_addr_out: 3383 return status; 3384 } 3385 3386 /** 3387 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 3388 * @hw: pointer to hardware structure 3389 * 3390 * Read PCIe configuration space, and get the MSI-X vector count from 3391 * the capabilities table. 3392 **/ 3393 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 3394 { 3395 u16 msix_count = 1; 3396 u16 max_msix_count; 3397 u16 pcie_offset; 3398 3399 switch (hw->mac.type) { 3400 case ixgbe_mac_82598EB: 3401 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 3402 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 3403 break; 3404 case ixgbe_mac_82599EB: 3405 case ixgbe_mac_X540: 3406 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 3407 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 3408 break; 3409 default: 3410 return msix_count; 3411 } 3412 3413 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 3414 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); 3415 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 3416 3417 /* MSI-X count is zero-based in HW */ 3418 msix_count++; 3419 3420 if (msix_count > max_msix_count) 3421 msix_count = max_msix_count; 3422 3423 return msix_count; 3424 } 3425 3426 /** 3427 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 3428 * @hw: pointer to hardware structure 3429 * @addr: Address to put into receive address register 3430 * @vmdq: VMDq pool to assign 3431 * 3432 * Puts an ethernet address into a receive address register, or 3433 * finds the rar that it is aleady in; adds to the pool list 3434 **/ 3435 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 3436 { 3437 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 3438 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 3439 u32 rar; 3440 u32 rar_low, rar_high; 3441 u32 addr_low, addr_high; 3442 3443 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 3444 3445 /* swap bytes for HW little endian */ 3446 addr_low = addr[0] | (addr[1] << 8) 3447 | (addr[2] << 16) 3448 | (addr[3] << 24); 3449 addr_high = addr[4] | (addr[5] << 8); 3450 3451 /* 3452 * Either find the mac_id in rar or find the first empty space. 3453 * rar_highwater points to just after the highest currently used 3454 * rar in order to shorten the search. It grows when we add a new 3455 * rar to the top. 3456 */ 3457 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 3458 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 3459 3460 if (((IXGBE_RAH_AV & rar_high) == 0) 3461 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 3462 first_empty_rar = rar; 3463 } else if ((rar_high & 0xFFFF) == addr_high) { 3464 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 3465 if (rar_low == addr_low) 3466 break; /* found it already in the rars */ 3467 } 3468 } 3469 3470 if (rar < hw->mac.rar_highwater) { 3471 /* already there so just add to the pool bits */ 3472 ixgbe_set_vmdq(hw, rar, vmdq); 3473 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 3474 /* stick it into first empty RAR slot we found */ 3475 rar = first_empty_rar; 3476 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3477 } else if (rar == hw->mac.rar_highwater) { 3478 /* add it to the top of the list and inc the highwater mark */ 3479 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3480 hw->mac.rar_highwater++; 3481 } else if (rar >= hw->mac.num_rar_entries) { 3482 return IXGBE_ERR_INVALID_MAC_ADDR; 3483 } 3484 3485 /* 3486 * If we found rar[0], make sure the default pool bit (we use pool 0) 3487 * remains cleared to be sure default pool packets will get delivered 3488 */ 3489 if (rar == 0) 3490 ixgbe_clear_vmdq(hw, rar, 0); 3491 3492 return rar; 3493 } 3494 3495 /** 3496 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 3497 * @hw: pointer to hardware struct 3498 * @rar: receive address register index to disassociate 3499 * @vmdq: VMDq pool index to remove from the rar 3500 **/ 3501 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3502 { 3503 u32 mpsar_lo, mpsar_hi; 3504 u32 rar_entries = hw->mac.num_rar_entries; 3505 3506 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 3507 3508 /* Make sure we are using a valid rar index range */ 3509 if (rar >= rar_entries) { 3510 DEBUGOUT1("RAR index %d is out of range.\n", rar); 3511 return IXGBE_ERR_INVALID_ARGUMENT; 3512 } 3513 3514 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3515 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3516 3517 if (!mpsar_lo && !mpsar_hi) 3518 goto done; 3519 3520 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3521 if (mpsar_lo) { 3522 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3523 mpsar_lo = 0; 3524 } 3525 if (mpsar_hi) { 3526 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3527 mpsar_hi = 0; 3528 } 3529 } else if (vmdq < 32) { 3530 mpsar_lo &= ~(1 << vmdq); 3531 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3532 } else { 3533 mpsar_hi &= ~(1 << (vmdq - 32)); 3534 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3535 } 3536 3537 /* was that the last pool using this rar? */ 3538 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 3539 hw->mac.ops.clear_rar(hw, rar); 3540 done: 3541 return IXGBE_SUCCESS; 3542 } 3543 3544 /** 3545 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3546 * @hw: pointer to hardware struct 3547 * @rar: receive address register index to associate with a VMDq index 3548 * @vmdq: VMDq pool index 3549 **/ 3550 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3551 { 3552 u32 mpsar; 3553 u32 rar_entries = hw->mac.num_rar_entries; 3554 3555 DEBUGFUNC("ixgbe_set_vmdq_generic"); 3556 3557 /* Make sure we are using a valid rar index range */ 3558 if (rar >= rar_entries) { 3559 DEBUGOUT1("RAR index %d is out of range.\n", rar); 3560 return IXGBE_ERR_INVALID_ARGUMENT; 3561 } 3562 3563 if (vmdq < 32) { 3564 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3565 mpsar |= 1 << vmdq; 3566 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3567 } else { 3568 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3569 mpsar |= 1 << (vmdq - 32); 3570 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3571 } 3572 return IXGBE_SUCCESS; 3573 } 3574 3575 /** 3576 * This function should only be involved in the IOV mode. 3577 * In IOV mode, Default pool is next pool after the number of 3578 * VFs advertized and not 0. 3579 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3580 * 3581 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3582 * @hw: pointer to hardware struct 3583 * @vmdq: VMDq pool index 3584 **/ 3585 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3586 { 3587 u32 rar = hw->mac.san_mac_rar_index; 3588 3589 DEBUGFUNC("ixgbe_set_vmdq_san_mac"); 3590 3591 if (vmdq < 32) { 3592 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 3593 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3594 } else { 3595 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 3597 } 3598 3599 return IXGBE_SUCCESS; 3600 } 3601 3602 /** 3603 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3604 * @hw: pointer to hardware structure 3605 **/ 3606 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3607 { 3608 int i; 3609 3610 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3611 DEBUGOUT(" Clearing UTA\n"); 3612 3613 for (i = 0; i < 128; i++) 3614 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3615 3616 return IXGBE_SUCCESS; 3617 } 3618 3619 /** 3620 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3621 * @hw: pointer to hardware structure 3622 * @vlan: VLAN id to write to VLAN filter 3623 * 3624 * return the VLVF index where this VLAN id should be placed 3625 * 3626 **/ 3627 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 3628 { 3629 u32 bits = 0; 3630 u32 first_empty_slot = 0; 3631 s32 regindex; 3632 3633 /* short cut the special case */ 3634 if (vlan == 0) 3635 return 0; 3636 3637 /* 3638 * Search for the vlan id in the VLVF entries. Save off the first empty 3639 * slot found along the way 3640 */ 3641 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 3642 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3643 if (!bits && !(first_empty_slot)) 3644 first_empty_slot = regindex; 3645 else if ((bits & 0x0FFF) == vlan) 3646 break; 3647 } 3648 3649 /* 3650 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 3651 * in the VLVF. Else use the first empty VLVF register for this 3652 * vlan id. 3653 */ 3654 if (regindex >= IXGBE_VLVF_ENTRIES) { 3655 if (first_empty_slot) 3656 regindex = first_empty_slot; 3657 else { 3658 DEBUGOUT("No space in VLVF.\n"); 3659 regindex = IXGBE_ERR_NO_SPACE; 3660 } 3661 } 3662 3663 return regindex; 3664 } 3665 3666 /** 3667 * ixgbe_set_vfta_generic - Set VLAN filter table 3668 * @hw: pointer to hardware structure 3669 * @vlan: VLAN id to write to VLAN filter 3670 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3671 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3672 * 3673 * Turn on/off specified VLAN in the VLAN filter table. 3674 **/ 3675 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3676 bool vlan_on) 3677 { 3678 s32 regindex; 3679 u32 bitindex; 3680 u32 vfta; 3681 u32 targetbit; 3682 s32 ret_val = IXGBE_SUCCESS; 3683 bool vfta_changed = FALSE; 3684 3685 DEBUGFUNC("ixgbe_set_vfta_generic"); 3686 3687 if (vlan > 4095) 3688 return IXGBE_ERR_PARAM; 3689 3690 /* 3691 * this is a 2 part operation - first the VFTA, then the 3692 * VLVF and VLVFB if VT Mode is set 3693 * We don't write the VFTA until we know the VLVF part succeeded. 3694 */ 3695 3696 /* Part 1 3697 * The VFTA is a bitstring made up of 128 32-bit registers 3698 * that enable the particular VLAN id, much like the MTA: 3699 * bits[11-5]: which register 3700 * bits[4-0]: which bit in the register 3701 */ 3702 regindex = (vlan >> 5) & 0x7F; 3703 bitindex = vlan & 0x1F; 3704 targetbit = (1 << bitindex); 3705 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3706 3707 if (vlan_on) { 3708 if (!(vfta & targetbit)) { 3709 vfta |= targetbit; 3710 vfta_changed = TRUE; 3711 } 3712 } else { 3713 if ((vfta & targetbit)) { 3714 vfta &= ~targetbit; 3715 vfta_changed = TRUE; 3716 } 3717 } 3718 3719 /* Part 2 3720 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF 3721 */ 3722 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, 3723 &vfta_changed); 3724 if (ret_val != IXGBE_SUCCESS) 3725 return ret_val; 3726 3727 if (vfta_changed) 3728 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3729 3730 return IXGBE_SUCCESS; 3731 } 3732 3733 /** 3734 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter 3735 * @hw: pointer to hardware structure 3736 * @vlan: VLAN id to write to VLAN filter 3737 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3738 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3739 * @vfta_changed: pointer to boolean flag which indicates whether VFTA 3740 * should be changed 3741 * 3742 * Turn on/off specified bit in VLVF table. 3743 **/ 3744 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3745 bool vlan_on, bool *vfta_changed) 3746 { 3747 u32 vt; 3748 3749 DEBUGFUNC("ixgbe_set_vlvf_generic"); 3750 3751 if (vlan > 4095) 3752 return IXGBE_ERR_PARAM; 3753 3754 /* If VT Mode is set 3755 * Either vlan_on 3756 * make sure the vlan is in VLVF 3757 * set the vind bit in the matching VLVFB 3758 * Or !vlan_on 3759 * clear the pool bit and possibly the vind 3760 */ 3761 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3762 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3763 s32 vlvf_index; 3764 u32 bits; 3765 3766 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3767 if (vlvf_index < 0) 3768 return vlvf_index; 3769 3770 if (vlan_on) { 3771 /* set the pool bit */ 3772 if (vind < 32) { 3773 bits = IXGBE_READ_REG(hw, 3774 IXGBE_VLVFB(vlvf_index * 2)); 3775 bits |= (1 << vind); 3776 IXGBE_WRITE_REG(hw, 3777 IXGBE_VLVFB(vlvf_index * 2), 3778 bits); 3779 } else { 3780 bits = IXGBE_READ_REG(hw, 3781 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3782 bits |= (1 << (vind - 32)); 3783 IXGBE_WRITE_REG(hw, 3784 IXGBE_VLVFB((vlvf_index * 2) + 1), 3785 bits); 3786 } 3787 } else { 3788 /* clear the pool bit */ 3789 if (vind < 32) { 3790 bits = IXGBE_READ_REG(hw, 3791 IXGBE_VLVFB(vlvf_index * 2)); 3792 bits &= ~(1 << vind); 3793 IXGBE_WRITE_REG(hw, 3794 IXGBE_VLVFB(vlvf_index * 2), 3795 bits); 3796 bits |= IXGBE_READ_REG(hw, 3797 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3798 } else { 3799 bits = IXGBE_READ_REG(hw, 3800 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3801 bits &= ~(1 << (vind - 32)); 3802 IXGBE_WRITE_REG(hw, 3803 IXGBE_VLVFB((vlvf_index * 2) + 1), 3804 bits); 3805 bits |= IXGBE_READ_REG(hw, 3806 IXGBE_VLVFB(vlvf_index * 2)); 3807 } 3808 } 3809 3810 /* 3811 * If there are still bits set in the VLVFB registers 3812 * for the VLAN ID indicated we need to see if the 3813 * caller is requesting that we clear the VFTA entry bit. 3814 * If the caller has requested that we clear the VFTA 3815 * entry bit but there are still pools/VFs using this VLAN 3816 * ID entry then ignore the request. We're not worried 3817 * about the case where we're turning the VFTA VLAN ID 3818 * entry bit on, only when requested to turn it off as 3819 * there may be multiple pools and/or VFs using the 3820 * VLAN ID entry. In that case we cannot clear the 3821 * VFTA bit until all pools/VFs using that VLAN ID have also 3822 * been cleared. This will be indicated by "bits" being 3823 * zero. 3824 */ 3825 if (bits) { 3826 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 3827 (IXGBE_VLVF_VIEN | vlan)); 3828 if ((!vlan_on) && (vfta_changed != NULL)) { 3829 /* someone wants to clear the vfta entry 3830 * but some pools/VFs are still using it. 3831 * Ignore it. */ 3832 *vfta_changed = FALSE; 3833 } 3834 } else 3835 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3836 } 3837 3838 return IXGBE_SUCCESS; 3839 } 3840 3841 /** 3842 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3843 * @hw: pointer to hardware structure 3844 * 3845 * Clears the VLAN filer table, and the VMDq index associated with the filter 3846 **/ 3847 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3848 { 3849 u32 offset; 3850 3851 DEBUGFUNC("ixgbe_clear_vfta_generic"); 3852 3853 for (offset = 0; offset < hw->mac.vft_size; offset++) 3854 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3855 3856 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3857 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3858 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3859 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); 3860 } 3861 3862 return IXGBE_SUCCESS; 3863 } 3864 3865 /** 3866 * ixgbe_check_mac_link_generic - Determine link and speed status 3867 * @hw: pointer to hardware structure 3868 * @speed: pointer to link speed 3869 * @link_up: TRUE when link is up 3870 * @link_up_wait_to_complete: bool used to wait for link up or not 3871 * 3872 * Reads the links register to determine if link is up and the current speed 3873 **/ 3874 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3875 bool *link_up, bool link_up_wait_to_complete) 3876 { 3877 u32 links_reg, links_orig; 3878 u32 i; 3879 3880 DEBUGFUNC("ixgbe_check_mac_link_generic"); 3881 3882 /* clear the old state */ 3883 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3884 3885 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3886 3887 if (links_orig != links_reg) { 3888 DEBUGOUT2("LINKS changed from %08X to %08X\n", 3889 links_orig, links_reg); 3890 } 3891 3892 if (link_up_wait_to_complete) { 3893 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3894 if (links_reg & IXGBE_LINKS_UP) { 3895 *link_up = TRUE; 3896 break; 3897 } else { 3898 *link_up = FALSE; 3899 } 3900 msec_delay(100); 3901 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3902 } 3903 } else { 3904 if (links_reg & IXGBE_LINKS_UP) 3905 *link_up = TRUE; 3906 else 3907 *link_up = FALSE; 3908 } 3909 3910 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3911 IXGBE_LINKS_SPEED_10G_82599) 3912 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3913 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3914 IXGBE_LINKS_SPEED_1G_82599) 3915 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3916 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3917 IXGBE_LINKS_SPEED_100_82599) 3918 *speed = IXGBE_LINK_SPEED_100_FULL; 3919 else 3920 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3921 3922 return IXGBE_SUCCESS; 3923 } 3924 3925 /** 3926 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3927 * the EEPROM 3928 * @hw: pointer to hardware structure 3929 * @wwnn_prefix: the alternative WWNN prefix 3930 * @wwpn_prefix: the alternative WWPN prefix 3931 * 3932 * This function will read the EEPROM from the alternative SAN MAC address 3933 * block to check the support for the alternative WWNN/WWPN prefix support. 3934 **/ 3935 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3936 u16 *wwpn_prefix) 3937 { 3938 u16 offset, caps; 3939 u16 alt_san_mac_blk_offset; 3940 3941 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 3942 3943 /* clear output first */ 3944 *wwnn_prefix = 0xFFFF; 3945 *wwpn_prefix = 0xFFFF; 3946 3947 /* check if alternative SAN MAC is supported */ 3948 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3949 &alt_san_mac_blk_offset); 3950 3951 if ((alt_san_mac_blk_offset == 0) || 3952 (alt_san_mac_blk_offset == 0xFFFF)) 3953 goto wwn_prefix_out; 3954 3955 /* check capability in alternative san mac address block */ 3956 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3957 hw->eeprom.ops.read(hw, offset, &caps); 3958 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3959 goto wwn_prefix_out; 3960 3961 /* get the corresponding prefix for WWNN/WWPN */ 3962 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3963 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3964 3965 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3966 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3967 3968 wwn_prefix_out: 3969 return IXGBE_SUCCESS; 3970 } 3971 3972 /** 3973 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 3974 * @hw: pointer to hardware structure 3975 * @bs: the fcoe boot status 3976 * 3977 * This function will read the FCOE boot status from the iSCSI FCOE block 3978 **/ 3979 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 3980 { 3981 u16 offset, caps, flags; 3982 s32 status; 3983 3984 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 3985 3986 /* clear output first */ 3987 *bs = ixgbe_fcoe_bootstatus_unavailable; 3988 3989 /* check if FCOE IBA block is present */ 3990 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 3991 status = hw->eeprom.ops.read(hw, offset, &caps); 3992 if (status != IXGBE_SUCCESS) 3993 goto out; 3994 3995 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 3996 goto out; 3997 3998 /* check if iSCSI FCOE block is populated */ 3999 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 4000 if (status != IXGBE_SUCCESS) 4001 goto out; 4002 4003 if ((offset == 0) || (offset == 0xFFFF)) 4004 goto out; 4005 4006 /* read fcoe flags in iSCSI FCOE block */ 4007 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 4008 status = hw->eeprom.ops.read(hw, offset, &flags); 4009 if (status != IXGBE_SUCCESS) 4010 goto out; 4011 4012 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 4013 *bs = ixgbe_fcoe_bootstatus_enabled; 4014 else 4015 *bs = ixgbe_fcoe_bootstatus_disabled; 4016 4017 out: 4018 return status; 4019 } 4020 4021 /** 4022 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 4023 * @hw: pointer to hardware structure 4024 * @enable: enable or disable switch for anti-spoofing 4025 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 4026 * 4027 **/ 4028 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 4029 { 4030 int j; 4031 int pf_target_reg = pf >> 3; 4032 int pf_target_shift = pf % 8; 4033 u32 pfvfspoof = 0; 4034 4035 if (hw->mac.type == ixgbe_mac_82598EB) 4036 return; 4037 4038 if (enable) 4039 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 4040 4041 /* 4042 * PFVFSPOOF register array is size 8 with 8 bits assigned to 4043 * MAC anti-spoof enables in each register array element. 4044 */ 4045 for (j = 0; j < pf_target_reg; j++) 4046 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 4047 4048 /* 4049 * The PF should be allowed to spoof so that it can support 4050 * emulation mode NICs. Do not set the bits assigned to the PF 4051 */ 4052 pfvfspoof &= (1 << pf_target_shift) - 1; 4053 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 4054 4055 /* 4056 * Remaining pools belong to the PF so they do not need to have 4057 * anti-spoofing enabled. 4058 */ 4059 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 4060 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); 4061 } 4062 4063 /** 4064 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 4065 * @hw: pointer to hardware structure 4066 * @enable: enable or disable switch for VLAN anti-spoofing 4067 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 4068 * 4069 **/ 4070 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4071 { 4072 int vf_target_reg = vf >> 3; 4073 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 4074 u32 pfvfspoof; 4075 4076 if (hw->mac.type == ixgbe_mac_82598EB) 4077 return; 4078 4079 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4080 if (enable) 4081 pfvfspoof |= (1 << vf_target_shift); 4082 else 4083 pfvfspoof &= ~(1 << vf_target_shift); 4084 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4085 } 4086 4087 /** 4088 * ixgbe_get_device_caps_generic - Get additional device capabilities 4089 * @hw: pointer to hardware structure 4090 * @device_caps: the EEPROM word with the extra device capabilities 4091 * 4092 * This function will read the EEPROM location for the device capabilities, 4093 * and return the word through device_caps. 4094 **/ 4095 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 4096 { 4097 DEBUGFUNC("ixgbe_get_device_caps_generic"); 4098 4099 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 4100 4101 return IXGBE_SUCCESS; 4102 } 4103 4104 /** 4105 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 4106 * @hw: pointer to hardware structure 4107 * 4108 **/ 4109 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 4110 { 4111 u32 regval; 4112 u32 i; 4113 4114 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 4115 4116 /* Enable relaxed ordering */ 4117 for (i = 0; i < hw->mac.max_tx_queues; i++) { 4118 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 4119 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 4120 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 4121 } 4122 4123 for (i = 0; i < hw->mac.max_rx_queues; i++) { 4124 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 4125 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 4126 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 4127 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 4128 } 4129 4130 } 4131 4132 /** 4133 * ixgbe_calculate_checksum - Calculate checksum for buffer 4134 * @buffer: pointer to EEPROM 4135 * @length: size of EEPROM to calculate a checksum for 4136 * Calculates the checksum for some buffer on a specified length. The 4137 * checksum calculated is returned. 4138 **/ 4139 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 4140 { 4141 u32 i; 4142 u8 sum = 0; 4143 4144 DEBUGFUNC("ixgbe_calculate_checksum"); 4145 4146 if (!buffer) 4147 return 0; 4148 4149 for (i = 0; i < length; i++) 4150 sum += buffer[i]; 4151 4152 return (u8) (0 - sum); 4153 } 4154 4155 /** 4156 * ixgbe_host_interface_command - Issue command to manageability block 4157 * @hw: pointer to the HW structure 4158 * @buffer: contains the command to write and where the return status will 4159 * be placed 4160 * @length: length of buffer, must be multiple of 4 bytes 4161 * 4162 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4163 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 4164 **/ 4165 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 4166 u32 length) 4167 { 4168 u32 hicr, i, bi; 4169 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 4170 u8 buf_len, dword_len; 4171 4172 s32 ret_val = IXGBE_SUCCESS; 4173 4174 DEBUGFUNC("ixgbe_host_interface_command"); 4175 4176 if (length == 0 || length & 0x3 || 4177 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4178 DEBUGOUT("Buffer length failure.\n"); 4179 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4180 goto out; 4181 } 4182 4183 /* Check that the host interface is enabled. */ 4184 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4185 if ((hicr & IXGBE_HICR_EN) == 0) { 4186 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); 4187 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4188 goto out; 4189 } 4190 4191 /* Calculate length in DWORDs */ 4192 dword_len = length >> 2; 4193 4194 /* 4195 * The device driver writes the relevant command block 4196 * into the ram area. 4197 */ 4198 for (i = 0; i < dword_len; i++) 4199 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4200 i, IXGBE_CPU_TO_LE32(buffer[i])); 4201 4202 /* Setting this bit tells the ARC that a new command is pending. */ 4203 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 4204 4205 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { 4206 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4207 if (!(hicr & IXGBE_HICR_C)) 4208 break; 4209 msec_delay(1); 4210 } 4211 4212 /* Check command successful completion. */ 4213 if (i == IXGBE_HI_COMMAND_TIMEOUT || 4214 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { 4215 DEBUGOUT("Command has failed with no status valid.\n"); 4216 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4217 goto out; 4218 } 4219 4220 /* Calculate length in DWORDs */ 4221 dword_len = hdr_size >> 2; 4222 4223 /* first pull in the header so we know the buffer length */ 4224 for (bi = 0; bi < dword_len; bi++) { 4225 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4226 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4227 } 4228 4229 /* If there is any thing in data position pull it in */ 4230 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 4231 if (buf_len == 0) 4232 goto out; 4233 4234 if (length < (buf_len + hdr_size)) { 4235 DEBUGOUT("Buffer not large enough for reply message.\n"); 4236 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4237 goto out; 4238 } 4239 4240 /* Calculate length in DWORDs, add 3 for odd lengths */ 4241 dword_len = (buf_len + 3) >> 2; 4242 4243 /* Pull in the rest of the buffer (bi is where we left off)*/ 4244 for (; bi <= dword_len; bi++) { 4245 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4246 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4247 } 4248 4249 out: 4250 return ret_val; 4251 } 4252 4253 /** 4254 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 4255 * @hw: pointer to the HW structure 4256 * @maj: driver version major number 4257 * @min: driver version minor number 4258 * @build: driver version build number 4259 * @sub: driver version sub build number 4260 * 4261 * Sends driver version number to firmware through the manageability 4262 * block. On success return IXGBE_SUCCESS 4263 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 4264 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4265 **/ 4266 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 4267 u8 build, u8 sub) 4268 { 4269 struct ixgbe_hic_drv_info fw_cmd; 4270 int i; 4271 s32 ret_val = IXGBE_SUCCESS; 4272 4273 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); 4274 4275 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) 4276 != IXGBE_SUCCESS) { 4277 ret_val = IXGBE_ERR_SWFW_SYNC; 4278 goto out; 4279 } 4280 4281 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 4282 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 4283 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 4284 fw_cmd.port_num = (u8)hw->bus.func; 4285 fw_cmd.ver_maj = maj; 4286 fw_cmd.ver_min = min; 4287 fw_cmd.ver_build = build; 4288 fw_cmd.ver_sub = sub; 4289 fw_cmd.hdr.checksum = 0; 4290 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 4291 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 4292 fw_cmd.pad = 0; 4293 fw_cmd.pad2 = 0; 4294 4295 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 4296 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 4297 sizeof(fw_cmd)); 4298 if (ret_val != IXGBE_SUCCESS) 4299 continue; 4300 4301 if (fw_cmd.hdr.cmd_or_resp.ret_status == 4302 FW_CEM_RESP_STATUS_SUCCESS) 4303 ret_val = IXGBE_SUCCESS; 4304 else 4305 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4306 4307 break; 4308 } 4309 4310 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4311 out: 4312 return ret_val; 4313 } 4314 4315 /** 4316 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer 4317 * @hw: pointer to hardware structure 4318 * @num_pb: number of packet buffers to allocate 4319 * @headroom: reserve n KB of headroom 4320 * @strategy: packet buffer allocation strategy 4321 **/ 4322 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, 4323 int strategy) 4324 { 4325 u32 pbsize = hw->mac.rx_pb_size; 4326 int i = 0; 4327 u32 rxpktsize, txpktsize, txpbthresh; 4328 4329 /* Reserve headroom */ 4330 pbsize -= headroom; 4331 4332 if (!num_pb) 4333 num_pb = 1; 4334 4335 /* Divide remaining packet buffer space amongst the number of packet 4336 * buffers requested using supplied strategy. 4337 */ 4338 switch (strategy) { 4339 case PBA_STRATEGY_WEIGHTED: 4340 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet 4341 * buffer with 5/8 of the packet buffer space. 4342 */ 4343 rxpktsize = (pbsize * 5) / (num_pb * 4); 4344 pbsize -= rxpktsize * (num_pb / 2); 4345 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 4346 for (; i < (num_pb / 2); i++) 4347 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4348 /* Fall through to configure remaining packet buffers */ 4349 case PBA_STRATEGY_EQUAL: 4350 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 4351 for (; i < num_pb; i++) 4352 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4353 break; 4354 default: 4355 break; 4356 } 4357 4358 /* Only support an equally distributed Tx packet buffer strategy. */ 4359 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 4360 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 4361 for (i = 0; i < num_pb; i++) { 4362 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 4363 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 4364 } 4365 4366 /* Clear unused TCs, if any, to zero buffer size*/ 4367 for (; i < IXGBE_MAX_PB; i++) { 4368 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 4369 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 4370 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 4371 } 4372 } 4373 4374 /** 4375 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 4376 * @hw: pointer to the hardware structure 4377 * 4378 * The 82599 and x540 MACs can experience issues if TX work is still pending 4379 * when a reset occurs. This function prevents this by flushing the PCIe 4380 * buffers on the system. 4381 **/ 4382 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 4383 { 4384 u32 gcr_ext, hlreg0; 4385 4386 /* 4387 * If double reset is not requested then all transactions should 4388 * already be clear and as such there is no work to do 4389 */ 4390 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 4391 return; 4392 4393 /* 4394 * Set loopback enable to prevent any transmits from being sent 4395 * should the link come up. This assumes that the RXCTRL.RXEN bit 4396 * has already been cleared. 4397 */ 4398 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4399 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 4400 4401 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 4402 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4403 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 4404 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 4405 4406 /* Flush all writes and allow 20usec for all transactions to clear */ 4407 IXGBE_WRITE_FLUSH(hw); 4408 usec_delay(20); 4409 4410 /* restore previous register values */ 4411 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4412 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4413 } 4414 4415