1 /****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_common.h" 36 #include "ixgbe_phy.h" 37 #include "ixgbe_api.h" 38 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 45 u16 count); 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 50 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 53 u16 *san_mac_offset); 54 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); 55 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); 56 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); 57 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 58 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 59 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 60 61 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); 62 63 /** 64 * ixgbe_init_ops_generic - Inits function ptrs 65 * @hw: pointer to the hardware structure 66 * 67 * Initialize the function pointers. 68 **/ 69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 70 { 71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 72 struct ixgbe_mac_info *mac = &hw->mac; 73 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 74 75 DEBUGFUNC("ixgbe_init_ops_generic"); 76 77 /* EEPROM */ 78 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; 79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 80 if (eec & (1 << 8)) 81 eeprom->ops.read = &ixgbe_read_eerd_generic; 82 else 83 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; 84 eeprom->ops.write = &ixgbe_write_eeprom_generic; 85 eeprom->ops.validate_checksum = 86 &ixgbe_validate_eeprom_checksum_generic; 87 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; 88 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; 89 90 /* MAC */ 91 mac->ops.init_hw = &ixgbe_init_hw_generic; 92 mac->ops.reset_hw = NULL; 93 mac->ops.start_hw = &ixgbe_start_hw_generic; 94 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; 95 mac->ops.get_media_type = NULL; 96 mac->ops.get_supported_physical_layer = NULL; 97 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; 98 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; 99 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; 100 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; 101 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; 102 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; 103 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; 104 105 /* LEDs */ 106 mac->ops.led_on = &ixgbe_led_on_generic; 107 mac->ops.led_off = &ixgbe_led_off_generic; 108 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; 109 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; 110 111 /* RAR, Multicast, VLAN */ 112 mac->ops.set_rar = &ixgbe_set_rar_generic; 113 mac->ops.clear_rar = &ixgbe_clear_rar_generic; 114 mac->ops.insert_mac_addr = NULL; 115 mac->ops.set_vmdq = NULL; 116 mac->ops.clear_vmdq = NULL; 117 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; 118 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; 119 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; 120 mac->ops.enable_mc = &ixgbe_enable_mc_generic; 121 mac->ops.disable_mc = &ixgbe_disable_mc_generic; 122 mac->ops.clear_vfta = NULL; 123 mac->ops.set_vfta = NULL; 124 mac->ops.init_uta_tables = NULL; 125 126 /* Flow Control */ 127 mac->ops.fc_enable = &ixgbe_fc_enable_generic; 128 129 /* Link */ 130 mac->ops.get_link_capabilities = NULL; 131 mac->ops.setup_link = NULL; 132 mac->ops.check_link = NULL; 133 134 return IXGBE_SUCCESS; 135 } 136 137 /** 138 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 139 * @hw: pointer to hardware structure 140 * 141 * Starts the hardware by filling the bus info structure and media type, clears 142 * all on chip counters, initializes receive address registers, multicast 143 * table, VLAN filter table, calls routine to set up link and flow control 144 * settings, and leaves transmit and receive units disabled and uninitialized 145 **/ 146 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 147 { 148 u32 ctrl_ext; 149 150 DEBUGFUNC("ixgbe_start_hw_generic"); 151 152 /* Set the media type */ 153 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 154 155 /* PHY ops initialization must be done in reset_hw() */ 156 157 /* Clear the VLAN filter table */ 158 hw->mac.ops.clear_vfta(hw); 159 160 /* Clear statistics registers */ 161 hw->mac.ops.clear_hw_cntrs(hw); 162 163 /* Set No Snoop Disable */ 164 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 165 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 166 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 167 IXGBE_WRITE_FLUSH(hw); 168 169 /* Setup flow control */ 170 ixgbe_setup_fc(hw, 0); 171 172 /* Clear adapter stopped flag */ 173 hw->adapter_stopped = FALSE; 174 175 return IXGBE_SUCCESS; 176 } 177 178 /** 179 * ixgbe_start_hw_gen2 - Init sequence for common device family 180 * @hw: pointer to hw structure 181 * 182 * Performs the init sequence common to the second generation 183 * of 10 GbE devices. 184 * Devices in the second generation: 185 * 82599 186 * X540 187 **/ 188 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 189 { 190 u32 i; 191 u32 regval; 192 193 /* Clear the rate limiters */ 194 for (i = 0; i < hw->mac.max_tx_queues; i++) { 195 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 196 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 197 } 198 IXGBE_WRITE_FLUSH(hw); 199 200 /* Disable relaxed ordering */ 201 for (i = 0; i < hw->mac.max_tx_queues; i++) { 202 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 203 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 204 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 205 } 206 207 for (i = 0; i < hw->mac.max_rx_queues; i++) { 208 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 209 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 210 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 211 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 212 } 213 214 return IXGBE_SUCCESS; 215 } 216 217 /** 218 * ixgbe_init_hw_generic - Generic hardware initialization 219 * @hw: pointer to hardware structure 220 * 221 * Initialize the hardware by resetting the hardware, filling the bus info 222 * structure and media type, clears all on chip counters, initializes receive 223 * address registers, multicast table, VLAN filter table, calls routine to set 224 * up link and flow control settings, and leaves transmit and receive units 225 * disabled and uninitialized 226 **/ 227 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 228 { 229 s32 status; 230 231 DEBUGFUNC("ixgbe_init_hw_generic"); 232 233 /* Reset the hardware */ 234 status = hw->mac.ops.reset_hw(hw); 235 236 if (status == IXGBE_SUCCESS) { 237 /* Start the HW */ 238 status = hw->mac.ops.start_hw(hw); 239 } 240 241 return status; 242 } 243 244 /** 245 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 246 * @hw: pointer to hardware structure 247 * 248 * Clears all hardware statistics counters by reading them from the hardware 249 * Statistics counters are clear on read. 250 **/ 251 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 252 { 253 u16 i = 0; 254 255 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 256 257 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 258 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 259 IXGBE_READ_REG(hw, IXGBE_ERRBC); 260 IXGBE_READ_REG(hw, IXGBE_MSPDC); 261 for (i = 0; i < 8; i++) 262 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 263 264 IXGBE_READ_REG(hw, IXGBE_MLFC); 265 IXGBE_READ_REG(hw, IXGBE_MRFC); 266 IXGBE_READ_REG(hw, IXGBE_RLEC); 267 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 268 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 269 if (hw->mac.type >= ixgbe_mac_82599EB) { 270 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 271 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 272 } else { 273 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 274 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 275 } 276 277 for (i = 0; i < 8; i++) { 278 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 279 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 280 if (hw->mac.type >= ixgbe_mac_82599EB) { 281 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 282 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 283 } else { 284 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 285 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 286 } 287 } 288 if (hw->mac.type >= ixgbe_mac_82599EB) 289 for (i = 0; i < 8; i++) 290 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 291 IXGBE_READ_REG(hw, IXGBE_PRC64); 292 IXGBE_READ_REG(hw, IXGBE_PRC127); 293 IXGBE_READ_REG(hw, IXGBE_PRC255); 294 IXGBE_READ_REG(hw, IXGBE_PRC511); 295 IXGBE_READ_REG(hw, IXGBE_PRC1023); 296 IXGBE_READ_REG(hw, IXGBE_PRC1522); 297 IXGBE_READ_REG(hw, IXGBE_GPRC); 298 IXGBE_READ_REG(hw, IXGBE_BPRC); 299 IXGBE_READ_REG(hw, IXGBE_MPRC); 300 IXGBE_READ_REG(hw, IXGBE_GPTC); 301 IXGBE_READ_REG(hw, IXGBE_GORCL); 302 IXGBE_READ_REG(hw, IXGBE_GORCH); 303 IXGBE_READ_REG(hw, IXGBE_GOTCL); 304 IXGBE_READ_REG(hw, IXGBE_GOTCH); 305 for (i = 0; i < 8; i++) 306 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 307 IXGBE_READ_REG(hw, IXGBE_RUC); 308 IXGBE_READ_REG(hw, IXGBE_RFC); 309 IXGBE_READ_REG(hw, IXGBE_ROC); 310 IXGBE_READ_REG(hw, IXGBE_RJC); 311 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 312 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 313 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 314 IXGBE_READ_REG(hw, IXGBE_TORL); 315 IXGBE_READ_REG(hw, IXGBE_TORH); 316 IXGBE_READ_REG(hw, IXGBE_TPR); 317 IXGBE_READ_REG(hw, IXGBE_TPT); 318 IXGBE_READ_REG(hw, IXGBE_PTC64); 319 IXGBE_READ_REG(hw, IXGBE_PTC127); 320 IXGBE_READ_REG(hw, IXGBE_PTC255); 321 IXGBE_READ_REG(hw, IXGBE_PTC511); 322 IXGBE_READ_REG(hw, IXGBE_PTC1023); 323 IXGBE_READ_REG(hw, IXGBE_PTC1522); 324 IXGBE_READ_REG(hw, IXGBE_MPTC); 325 IXGBE_READ_REG(hw, IXGBE_BPTC); 326 for (i = 0; i < 16; i++) { 327 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 328 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 329 if (hw->mac.type >= ixgbe_mac_82599EB) { 330 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 331 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 332 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 333 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 334 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 335 } else { 336 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 337 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 338 } 339 } 340 341 return IXGBE_SUCCESS; 342 } 343 344 /** 345 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 346 * @hw: pointer to hardware structure 347 * @pba_num: stores the part number string from the EEPROM 348 * @pba_num_size: part number string buffer length 349 * 350 * Reads the part number string from the EEPROM. 351 **/ 352 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 353 u32 pba_num_size) 354 { 355 s32 ret_val; 356 u16 data; 357 u16 pba_ptr; 358 u16 offset; 359 u16 length; 360 361 DEBUGFUNC("ixgbe_read_pba_string_generic"); 362 363 if (pba_num == NULL) { 364 DEBUGOUT("PBA string buffer was null\n"); 365 return IXGBE_ERR_INVALID_ARGUMENT; 366 } 367 368 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 369 if (ret_val) { 370 DEBUGOUT("NVM Read Error\n"); 371 return ret_val; 372 } 373 374 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 375 if (ret_val) { 376 DEBUGOUT("NVM Read Error\n"); 377 return ret_val; 378 } 379 380 /* 381 * if data is not ptr guard the PBA must be in legacy format which 382 * means pba_ptr is actually our second data word for the PBA number 383 * and we can decode it into an ascii string 384 */ 385 if (data != IXGBE_PBANUM_PTR_GUARD) { 386 DEBUGOUT("NVM PBA number is not stored as string\n"); 387 388 /* we will need 11 characters to store the PBA */ 389 if (pba_num_size < 11) { 390 DEBUGOUT("PBA string buffer too small\n"); 391 return IXGBE_ERR_NO_SPACE; 392 } 393 394 /* extract hex string from data and pba_ptr */ 395 pba_num[0] = (data >> 12) & 0xF; 396 pba_num[1] = (data >> 8) & 0xF; 397 pba_num[2] = (data >> 4) & 0xF; 398 pba_num[3] = data & 0xF; 399 pba_num[4] = (pba_ptr >> 12) & 0xF; 400 pba_num[5] = (pba_ptr >> 8) & 0xF; 401 pba_num[6] = '-'; 402 pba_num[7] = 0; 403 pba_num[8] = (pba_ptr >> 4) & 0xF; 404 pba_num[9] = pba_ptr & 0xF; 405 406 /* put a null character on the end of our string */ 407 pba_num[10] = '\0'; 408 409 /* switch all the data but the '-' to hex char */ 410 for (offset = 0; offset < 10; offset++) { 411 if (pba_num[offset] < 0xA) 412 pba_num[offset] += '0'; 413 else if (pba_num[offset] < 0x10) 414 pba_num[offset] += 'A' - 0xA; 415 } 416 417 return IXGBE_SUCCESS; 418 } 419 420 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 421 if (ret_val) { 422 DEBUGOUT("NVM Read Error\n"); 423 return ret_val; 424 } 425 426 if (length == 0xFFFF || length == 0) { 427 DEBUGOUT("NVM PBA number section invalid length\n"); 428 return IXGBE_ERR_PBA_SECTION; 429 } 430 431 /* check if pba_num buffer is big enough */ 432 if (pba_num_size < (((u32)length * 2) - 1)) { 433 DEBUGOUT("PBA string buffer too small\n"); 434 return IXGBE_ERR_NO_SPACE; 435 } 436 437 /* trim pba length from start of string */ 438 pba_ptr++; 439 length--; 440 441 for (offset = 0; offset < length; offset++) { 442 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 443 if (ret_val) { 444 DEBUGOUT("NVM Read Error\n"); 445 return ret_val; 446 } 447 pba_num[offset * 2] = (u8)(data >> 8); 448 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 449 } 450 pba_num[offset * 2] = '\0'; 451 452 return IXGBE_SUCCESS; 453 } 454 455 /** 456 * ixgbe_read_pba_length_generic - Reads part number length from EEPROM 457 * @hw: pointer to hardware structure 458 * @pba_num_size: part number string buffer length 459 * 460 * Reads the part number length from the EEPROM. 461 * Returns expected buffer size in pba_num_size 462 **/ 463 s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size) 464 { 465 s32 ret_val; 466 u16 data; 467 u16 pba_ptr; 468 u16 length; 469 470 DEBUGFUNC("ixgbe_read_pba_length_generic"); 471 472 if (pba_num_size == NULL) { 473 DEBUGOUT("PBA buffer size was null\n"); 474 return IXGBE_ERR_INVALID_ARGUMENT; 475 } 476 477 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 478 if (ret_val) { 479 DEBUGOUT("NVM Read Error\n"); 480 return ret_val; 481 } 482 483 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 484 if (ret_val) { 485 DEBUGOUT("NVM Read Error\n"); 486 return ret_val; 487 } 488 489 /* if data is not ptr guard the PBA must be in legacy format */ 490 if (data != IXGBE_PBANUM_PTR_GUARD) { 491 *pba_num_size = 11; 492 return IXGBE_SUCCESS; 493 } 494 495 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 496 if (ret_val) { 497 DEBUGOUT("NVM Read Error\n"); 498 return ret_val; 499 } 500 501 if (length == 0xFFFF || length == 0) { 502 DEBUGOUT("NVM PBA number section invalid length\n"); 503 return IXGBE_ERR_PBA_SECTION; 504 } 505 506 /* 507 * Convert from length in u16 values to u8 chars, add 1 for NULL, 508 * and subtract 2 because length field is included in length. 509 */ 510 *pba_num_size = ((u32)length * 2) - 1; 511 512 return IXGBE_SUCCESS; 513 } 514 515 /** 516 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 517 * @hw: pointer to hardware structure 518 * @pba_num: stores the part number from the EEPROM 519 * 520 * Reads the part number from the EEPROM. 521 **/ 522 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 523 { 524 s32 ret_val; 525 u16 data; 526 527 DEBUGFUNC("ixgbe_read_pba_num_generic"); 528 529 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 530 if (ret_val) { 531 DEBUGOUT("NVM Read Error\n"); 532 return ret_val; 533 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 534 DEBUGOUT("NVM Not supported\n"); 535 return IXGBE_NOT_IMPLEMENTED; 536 } 537 *pba_num = (u32)(data << 16); 538 539 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 540 if (ret_val) { 541 DEBUGOUT("NVM Read Error\n"); 542 return ret_val; 543 } 544 *pba_num |= data; 545 546 return IXGBE_SUCCESS; 547 } 548 549 /** 550 * ixgbe_get_mac_addr_generic - Generic get MAC address 551 * @hw: pointer to hardware structure 552 * @mac_addr: Adapter MAC address 553 * 554 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 555 * A reset of the adapter must be performed prior to calling this function 556 * in order for the MAC address to have been loaded from the EEPROM into RAR0 557 **/ 558 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 559 { 560 u32 rar_high; 561 u32 rar_low; 562 u16 i; 563 564 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 565 566 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 567 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 568 569 for (i = 0; i < 4; i++) 570 mac_addr[i] = (u8)(rar_low >> (i*8)); 571 572 for (i = 0; i < 2; i++) 573 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 574 575 return IXGBE_SUCCESS; 576 } 577 578 /** 579 * ixgbe_get_bus_info_generic - Generic set PCI bus info 580 * @hw: pointer to hardware structure 581 * 582 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 583 **/ 584 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 585 { 586 struct ixgbe_mac_info *mac = &hw->mac; 587 u16 link_status; 588 589 DEBUGFUNC("ixgbe_get_bus_info_generic"); 590 591 hw->bus.type = ixgbe_bus_type_pci_express; 592 593 /* Get the negotiated link width and speed from PCI config space */ 594 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 595 596 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 597 case IXGBE_PCI_LINK_WIDTH_1: 598 hw->bus.width = ixgbe_bus_width_pcie_x1; 599 break; 600 case IXGBE_PCI_LINK_WIDTH_2: 601 hw->bus.width = ixgbe_bus_width_pcie_x2; 602 break; 603 case IXGBE_PCI_LINK_WIDTH_4: 604 hw->bus.width = ixgbe_bus_width_pcie_x4; 605 break; 606 case IXGBE_PCI_LINK_WIDTH_8: 607 hw->bus.width = ixgbe_bus_width_pcie_x8; 608 break; 609 default: 610 hw->bus.width = ixgbe_bus_width_unknown; 611 break; 612 } 613 614 switch (link_status & IXGBE_PCI_LINK_SPEED) { 615 case IXGBE_PCI_LINK_SPEED_2500: 616 hw->bus.speed = ixgbe_bus_speed_2500; 617 break; 618 case IXGBE_PCI_LINK_SPEED_5000: 619 hw->bus.speed = ixgbe_bus_speed_5000; 620 break; 621 default: 622 hw->bus.speed = ixgbe_bus_speed_unknown; 623 break; 624 } 625 626 mac->ops.set_lan_id(hw); 627 628 return IXGBE_SUCCESS; 629 } 630 631 /** 632 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 633 * @hw: pointer to the HW structure 634 * 635 * Determines the LAN function id by reading memory-mapped registers 636 * and swaps the port value if requested. 637 **/ 638 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 639 { 640 struct ixgbe_bus_info *bus = &hw->bus; 641 u32 reg; 642 643 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 644 645 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 646 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 647 bus->lan_id = bus->func; 648 649 /* check for a port swap */ 650 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); 651 if (reg & IXGBE_FACTPS_LFS) 652 bus->func ^= 0x1; 653 } 654 655 /** 656 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 657 * @hw: pointer to hardware structure 658 * 659 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 660 * disables transmit and receive units. The adapter_stopped flag is used by 661 * the shared code and drivers to determine if the adapter is in a stopped 662 * state and should not touch the hardware. 663 **/ 664 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 665 { 666 u32 number_of_queues; 667 u32 reg_val; 668 u16 i; 669 670 DEBUGFUNC("ixgbe_stop_adapter_generic"); 671 672 /* 673 * Set the adapter_stopped flag so other driver functions stop touching 674 * the hardware 675 */ 676 hw->adapter_stopped = TRUE; 677 678 /* Disable the receive unit */ 679 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 680 reg_val &= ~(IXGBE_RXCTRL_RXEN); 681 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 682 IXGBE_WRITE_FLUSH(hw); 683 msec_delay(2); 684 685 /* Clear interrupt mask to stop from interrupts being generated */ 686 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 687 688 /* Clear any pending interrupts */ 689 IXGBE_READ_REG(hw, IXGBE_EICR); 690 691 /* Disable the transmit unit. Each queue must be disabled. */ 692 number_of_queues = hw->mac.max_tx_queues; 693 for (i = 0; i < number_of_queues; i++) { 694 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 695 if (reg_val & IXGBE_TXDCTL_ENABLE) { 696 reg_val &= ~IXGBE_TXDCTL_ENABLE; 697 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); 698 } 699 } 700 701 /* 702 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 703 * access and verify no pending requests 704 */ 705 ixgbe_disable_pcie_master(hw); 706 707 return IXGBE_SUCCESS; 708 } 709 710 /** 711 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 712 * @hw: pointer to hardware structure 713 * @index: led number to turn on 714 **/ 715 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 716 { 717 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 718 719 DEBUGFUNC("ixgbe_led_on_generic"); 720 721 /* To turn on the LED, set mode to ON. */ 722 led_reg &= ~IXGBE_LED_MODE_MASK(index); 723 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 724 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 725 IXGBE_WRITE_FLUSH(hw); 726 727 return IXGBE_SUCCESS; 728 } 729 730 /** 731 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 732 * @hw: pointer to hardware structure 733 * @index: led number to turn off 734 **/ 735 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 736 { 737 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 738 739 DEBUGFUNC("ixgbe_led_off_generic"); 740 741 /* To turn off the LED, set mode to OFF. */ 742 led_reg &= ~IXGBE_LED_MODE_MASK(index); 743 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 744 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 745 IXGBE_WRITE_FLUSH(hw); 746 747 return IXGBE_SUCCESS; 748 } 749 750 /** 751 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 752 * @hw: pointer to hardware structure 753 * 754 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 755 * ixgbe_hw struct in order to set up EEPROM access. 756 **/ 757 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 758 { 759 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 760 u32 eec; 761 u16 eeprom_size; 762 763 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 764 765 if (eeprom->type == ixgbe_eeprom_uninitialized) { 766 eeprom->type = ixgbe_eeprom_none; 767 /* Set default semaphore delay to 10ms which is a well 768 * tested value */ 769 eeprom->semaphore_delay = 10; 770 771 /* 772 * Check for EEPROM present first. 773 * If not present leave as none 774 */ 775 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 776 if (eec & IXGBE_EEC_PRES) { 777 eeprom->type = ixgbe_eeprom_spi; 778 779 /* 780 * SPI EEPROM is assumed here. This code would need to 781 * change if a future EEPROM is not SPI. 782 */ 783 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 784 IXGBE_EEC_SIZE_SHIFT); 785 eeprom->word_size = 1 << (eeprom_size + 786 IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT); 787 } 788 789 if (eec & IXGBE_EEC_ADDR_SIZE) 790 eeprom->address_bits = 16; 791 else 792 eeprom->address_bits = 8; 793 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 794 "%d\n", eeprom->type, eeprom->word_size, 795 eeprom->address_bits); 796 } 797 798 return IXGBE_SUCCESS; 799 } 800 801 /** 802 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 803 * @hw: pointer to hardware structure 804 * @offset: offset within the EEPROM to be written to 805 * @data: 16 bit word to be written to the EEPROM 806 * 807 * If ixgbe_eeprom_update_checksum is not called after this function, the 808 * EEPROM will most likely contain an invalid checksum. 809 **/ 810 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 811 { 812 s32 status; 813 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 814 815 DEBUGFUNC("ixgbe_write_eeprom_generic"); 816 817 hw->eeprom.ops.init_params(hw); 818 819 if (offset >= hw->eeprom.word_size) { 820 status = IXGBE_ERR_EEPROM; 821 goto out; 822 } 823 824 /* Prepare the EEPROM for writing */ 825 status = ixgbe_acquire_eeprom(hw); 826 827 if (status == IXGBE_SUCCESS) { 828 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 829 ixgbe_release_eeprom(hw); 830 status = IXGBE_ERR_EEPROM; 831 } 832 } 833 834 if (status == IXGBE_SUCCESS) { 835 ixgbe_standby_eeprom(hw); 836 837 /* Send the WRITE ENABLE command (8 bit opcode ) */ 838 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, 839 IXGBE_EEPROM_OPCODE_BITS); 840 841 ixgbe_standby_eeprom(hw); 842 843 /* 844 * Some SPI eeproms use the 8th address bit embedded in the 845 * opcode 846 */ 847 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 848 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 849 850 /* Send the Write command (8-bit opcode + addr) */ 851 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 852 IXGBE_EEPROM_OPCODE_BITS); 853 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 854 hw->eeprom.address_bits); 855 856 /* Send the data */ 857 data = (data >> 8) | (data << 8); 858 ixgbe_shift_out_eeprom_bits(hw, data, 16); 859 ixgbe_standby_eeprom(hw); 860 861 /* Done with writing - release the EEPROM */ 862 ixgbe_release_eeprom(hw); 863 } 864 865 out: 866 return status; 867 } 868 869 /** 870 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 871 * @hw: pointer to hardware structure 872 * @offset: offset within the EEPROM to be read 873 * @data: read 16 bit value from EEPROM 874 * 875 * Reads 16 bit value from EEPROM through bit-bang method 876 **/ 877 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 878 u16 *data) 879 { 880 s32 status; 881 u16 word_in; 882 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 883 884 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 885 886 hw->eeprom.ops.init_params(hw); 887 888 if (offset >= hw->eeprom.word_size) { 889 status = IXGBE_ERR_EEPROM; 890 goto out; 891 } 892 893 /* Prepare the EEPROM for reading */ 894 status = ixgbe_acquire_eeprom(hw); 895 896 if (status == IXGBE_SUCCESS) { 897 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 898 ixgbe_release_eeprom(hw); 899 status = IXGBE_ERR_EEPROM; 900 } 901 } 902 903 if (status == IXGBE_SUCCESS) { 904 ixgbe_standby_eeprom(hw); 905 906 /* 907 * Some SPI eeproms use the 8th address bit embedded in the 908 * opcode 909 */ 910 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 911 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 912 913 /* Send the READ command (opcode + addr) */ 914 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 915 IXGBE_EEPROM_OPCODE_BITS); 916 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 917 hw->eeprom.address_bits); 918 919 /* Read the data. */ 920 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 921 *data = (word_in >> 8) | (word_in << 8); 922 923 /* End this read operation */ 924 ixgbe_release_eeprom(hw); 925 } 926 927 out: 928 return status; 929 } 930 931 /** 932 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 933 * @hw: pointer to hardware structure 934 * @offset: offset of word in the EEPROM to read 935 * @data: word read from the EEPROM 936 * 937 * Reads a 16 bit word from the EEPROM using the EERD register. 938 **/ 939 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 940 { 941 u32 eerd; 942 s32 status; 943 944 DEBUGFUNC("ixgbe_read_eerd_generic"); 945 946 hw->eeprom.ops.init_params(hw); 947 948 if (offset >= hw->eeprom.word_size) { 949 status = IXGBE_ERR_EEPROM; 950 goto out; 951 } 952 953 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + 954 IXGBE_EEPROM_RW_REG_START; 955 956 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 957 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 958 959 if (status == IXGBE_SUCCESS) 960 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 961 IXGBE_EEPROM_RW_REG_DATA); 962 else 963 DEBUGOUT("Eeprom read timed out\n"); 964 965 out: 966 return status; 967 } 968 969 /** 970 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 971 * @hw: pointer to hardware structure 972 * @offset: offset of word in the EEPROM to write 973 * @data: word write to the EEPROM 974 * 975 * Write a 16 bit word to the EEPROM using the EEWR register. 976 **/ 977 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 978 { 979 u32 eewr; 980 s32 status; 981 982 DEBUGFUNC("ixgbe_write_eewr_generic"); 983 984 hw->eeprom.ops.init_params(hw); 985 986 if (offset >= hw->eeprom.word_size) { 987 status = IXGBE_ERR_EEPROM; 988 goto out; 989 } 990 991 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | 992 (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; 993 994 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 995 if (status != IXGBE_SUCCESS) { 996 DEBUGOUT("Eeprom write EEWR timed out\n"); 997 goto out; 998 } 999 1000 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1001 1002 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1003 if (status != IXGBE_SUCCESS) { 1004 DEBUGOUT("Eeprom write EEWR timed out\n"); 1005 goto out; 1006 } 1007 1008 out: 1009 return status; 1010 } 1011 1012 /** 1013 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1014 * @hw: pointer to hardware structure 1015 * @ee_reg: EEPROM flag for polling 1016 * 1017 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1018 * read or write is done respectively. 1019 **/ 1020 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1021 { 1022 u32 i; 1023 u32 reg; 1024 s32 status = IXGBE_ERR_EEPROM; 1025 1026 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1027 1028 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1029 if (ee_reg == IXGBE_NVM_POLL_READ) 1030 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1031 else 1032 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1033 1034 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1035 status = IXGBE_SUCCESS; 1036 break; 1037 } 1038 usec_delay(5); 1039 } 1040 return status; 1041 } 1042 1043 /** 1044 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1045 * @hw: pointer to hardware structure 1046 * 1047 * Prepares EEPROM for access using bit-bang method. This function should 1048 * be called before issuing a command to the EEPROM. 1049 **/ 1050 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1051 { 1052 s32 status = IXGBE_SUCCESS; 1053 u32 eec; 1054 u32 i; 1055 1056 DEBUGFUNC("ixgbe_acquire_eeprom"); 1057 1058 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS) 1059 status = IXGBE_ERR_SWFW_SYNC; 1060 1061 if (status == IXGBE_SUCCESS) { 1062 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1063 1064 /* Request EEPROM Access */ 1065 eec |= IXGBE_EEC_REQ; 1066 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1067 1068 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1069 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1070 if (eec & IXGBE_EEC_GNT) 1071 break; 1072 usec_delay(5); 1073 } 1074 1075 /* Release if grant not acquired */ 1076 if (!(eec & IXGBE_EEC_GNT)) { 1077 eec &= ~IXGBE_EEC_REQ; 1078 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1079 DEBUGOUT("Could not acquire EEPROM grant\n"); 1080 1081 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1082 status = IXGBE_ERR_EEPROM; 1083 } 1084 1085 /* Setup EEPROM for Read/Write */ 1086 if (status == IXGBE_SUCCESS) { 1087 /* Clear CS and SK */ 1088 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1089 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1090 IXGBE_WRITE_FLUSH(hw); 1091 usec_delay(1); 1092 } 1093 } 1094 return status; 1095 } 1096 1097 /** 1098 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1099 * @hw: pointer to hardware structure 1100 * 1101 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1102 **/ 1103 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1104 { 1105 s32 status = IXGBE_ERR_EEPROM; 1106 u32 timeout = 2000; 1107 u32 i; 1108 u32 swsm; 1109 1110 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1111 1112 1113 /* Get SMBI software semaphore between device drivers first */ 1114 for (i = 0; i < timeout; i++) { 1115 /* 1116 * If the SMBI bit is 0 when we read it, then the bit will be 1117 * set and we have the semaphore 1118 */ 1119 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1120 if (!(swsm & IXGBE_SWSM_SMBI)) { 1121 status = IXGBE_SUCCESS; 1122 break; 1123 } 1124 usec_delay(50); 1125 } 1126 1127 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1128 if (status == IXGBE_SUCCESS) { 1129 for (i = 0; i < timeout; i++) { 1130 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1131 1132 /* Set the SW EEPROM semaphore bit to request access */ 1133 swsm |= IXGBE_SWSM_SWESMBI; 1134 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1135 1136 /* 1137 * If we set the bit successfully then we got the 1138 * semaphore. 1139 */ 1140 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1141 if (swsm & IXGBE_SWSM_SWESMBI) 1142 break; 1143 1144 usec_delay(50); 1145 } 1146 1147 /* 1148 * Release semaphores and return error if SW EEPROM semaphore 1149 * was not granted because we don't have access to the EEPROM 1150 */ 1151 if (i >= timeout) { 1152 DEBUGOUT("SWESMBI Software EEPROM semaphore " 1153 "not granted.\n"); 1154 ixgbe_release_eeprom_semaphore(hw); 1155 status = IXGBE_ERR_EEPROM; 1156 } 1157 } else { 1158 DEBUGOUT("Software semaphore SMBI between device drivers " 1159 "not granted.\n"); 1160 } 1161 1162 return status; 1163 } 1164 1165 /** 1166 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1167 * @hw: pointer to hardware structure 1168 * 1169 * This function clears hardware semaphore bits. 1170 **/ 1171 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1172 { 1173 u32 swsm; 1174 1175 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1176 1177 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1178 1179 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1180 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1181 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1182 IXGBE_WRITE_FLUSH(hw); 1183 } 1184 1185 /** 1186 * ixgbe_ready_eeprom - Polls for EEPROM ready 1187 * @hw: pointer to hardware structure 1188 **/ 1189 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1190 { 1191 s32 status = IXGBE_SUCCESS; 1192 u16 i; 1193 u8 spi_stat_reg; 1194 1195 DEBUGFUNC("ixgbe_ready_eeprom"); 1196 1197 /* 1198 * Read "Status Register" repeatedly until the LSB is cleared. The 1199 * EEPROM will signal that the command has been completed by clearing 1200 * bit 0 of the internal status register. If it's not cleared within 1201 * 5 milliseconds, then error out. 1202 */ 1203 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1204 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1205 IXGBE_EEPROM_OPCODE_BITS); 1206 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1207 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1208 break; 1209 1210 usec_delay(5); 1211 ixgbe_standby_eeprom(hw); 1212 }; 1213 1214 /* 1215 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1216 * devices (and only 0-5mSec on 5V devices) 1217 */ 1218 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1219 DEBUGOUT("SPI EEPROM Status error\n"); 1220 status = IXGBE_ERR_EEPROM; 1221 } 1222 1223 return status; 1224 } 1225 1226 /** 1227 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1228 * @hw: pointer to hardware structure 1229 **/ 1230 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1231 { 1232 u32 eec; 1233 1234 DEBUGFUNC("ixgbe_standby_eeprom"); 1235 1236 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1237 1238 /* Toggle CS to flush commands */ 1239 eec |= IXGBE_EEC_CS; 1240 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1241 IXGBE_WRITE_FLUSH(hw); 1242 usec_delay(1); 1243 eec &= ~IXGBE_EEC_CS; 1244 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1245 IXGBE_WRITE_FLUSH(hw); 1246 usec_delay(1); 1247 } 1248 1249 /** 1250 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1251 * @hw: pointer to hardware structure 1252 * @data: data to send to the EEPROM 1253 * @count: number of bits to shift out 1254 **/ 1255 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1256 u16 count) 1257 { 1258 u32 eec; 1259 u32 mask; 1260 u32 i; 1261 1262 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 1263 1264 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1265 1266 /* 1267 * Mask is used to shift "count" bits of "data" out to the EEPROM 1268 * one bit at a time. Determine the starting bit based on count 1269 */ 1270 mask = 0x01 << (count - 1); 1271 1272 for (i = 0; i < count; i++) { 1273 /* 1274 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1275 * "1", and then raising and then lowering the clock (the SK 1276 * bit controls the clock input to the EEPROM). A "0" is 1277 * shifted out to the EEPROM by setting "DI" to "0" and then 1278 * raising and then lowering the clock. 1279 */ 1280 if (data & mask) 1281 eec |= IXGBE_EEC_DI; 1282 else 1283 eec &= ~IXGBE_EEC_DI; 1284 1285 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1286 IXGBE_WRITE_FLUSH(hw); 1287 1288 usec_delay(1); 1289 1290 ixgbe_raise_eeprom_clk(hw, &eec); 1291 ixgbe_lower_eeprom_clk(hw, &eec); 1292 1293 /* 1294 * Shift mask to signify next bit of data to shift in to the 1295 * EEPROM 1296 */ 1297 mask = mask >> 1; 1298 }; 1299 1300 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1301 eec &= ~IXGBE_EEC_DI; 1302 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1303 IXGBE_WRITE_FLUSH(hw); 1304 } 1305 1306 /** 1307 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1308 * @hw: pointer to hardware structure 1309 **/ 1310 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1311 { 1312 u32 eec; 1313 u32 i; 1314 u16 data = 0; 1315 1316 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 1317 1318 /* 1319 * In order to read a register from the EEPROM, we need to shift 1320 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1321 * the clock input to the EEPROM (setting the SK bit), and then reading 1322 * the value of the "DO" bit. During this "shifting in" process the 1323 * "DI" bit should always be clear. 1324 */ 1325 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1326 1327 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1328 1329 for (i = 0; i < count; i++) { 1330 data = data << 1; 1331 ixgbe_raise_eeprom_clk(hw, &eec); 1332 1333 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1334 1335 eec &= ~(IXGBE_EEC_DI); 1336 if (eec & IXGBE_EEC_DO) 1337 data |= 1; 1338 1339 ixgbe_lower_eeprom_clk(hw, &eec); 1340 } 1341 1342 return data; 1343 } 1344 1345 /** 1346 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1347 * @hw: pointer to hardware structure 1348 * @eec: EEC register's current value 1349 **/ 1350 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1351 { 1352 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 1353 1354 /* 1355 * Raise the clock input to the EEPROM 1356 * (setting the SK bit), then delay 1357 */ 1358 *eec = *eec | IXGBE_EEC_SK; 1359 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1360 IXGBE_WRITE_FLUSH(hw); 1361 usec_delay(1); 1362 } 1363 1364 /** 1365 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1366 * @hw: pointer to hardware structure 1367 * @eecd: EECD's current value 1368 **/ 1369 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1370 { 1371 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 1372 1373 /* 1374 * Lower the clock input to the EEPROM (clearing the SK bit), then 1375 * delay 1376 */ 1377 *eec = *eec & ~IXGBE_EEC_SK; 1378 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1379 IXGBE_WRITE_FLUSH(hw); 1380 usec_delay(1); 1381 } 1382 1383 /** 1384 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1385 * @hw: pointer to hardware structure 1386 **/ 1387 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1388 { 1389 u32 eec; 1390 1391 DEBUGFUNC("ixgbe_release_eeprom"); 1392 1393 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1394 1395 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1396 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1397 1398 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1399 IXGBE_WRITE_FLUSH(hw); 1400 1401 usec_delay(1); 1402 1403 /* Stop requesting EEPROM access */ 1404 eec &= ~IXGBE_EEC_REQ; 1405 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1406 1407 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1408 1409 /* Delay before attempt to obtain semaphore again to allow FW access */ 1410 msec_delay(hw->eeprom.semaphore_delay); 1411 } 1412 1413 /** 1414 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1415 * @hw: pointer to hardware structure 1416 **/ 1417 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1418 { 1419 u16 i; 1420 u16 j; 1421 u16 checksum = 0; 1422 u16 length = 0; 1423 u16 pointer = 0; 1424 u16 word = 0; 1425 1426 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 1427 1428 /* Include 0x0-0x3F in the checksum */ 1429 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1430 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { 1431 DEBUGOUT("EEPROM read failed\n"); 1432 break; 1433 } 1434 checksum += word; 1435 } 1436 1437 /* Include all data from pointers except for the fw pointer */ 1438 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1439 hw->eeprom.ops.read(hw, i, &pointer); 1440 1441 /* Make sure the pointer seems valid */ 1442 if (pointer != 0xFFFF && pointer != 0) { 1443 hw->eeprom.ops.read(hw, pointer, &length); 1444 1445 if (length != 0xFFFF && length != 0) { 1446 for (j = pointer+1; j <= pointer+length; j++) { 1447 hw->eeprom.ops.read(hw, j, &word); 1448 checksum += word; 1449 } 1450 } 1451 } 1452 } 1453 1454 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1455 1456 return checksum; 1457 } 1458 1459 /** 1460 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1461 * @hw: pointer to hardware structure 1462 * @checksum_val: calculated checksum 1463 * 1464 * Performs checksum calculation and validates the EEPROM checksum. If the 1465 * caller does not need checksum_val, the value can be NULL. 1466 **/ 1467 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1468 u16 *checksum_val) 1469 { 1470 s32 status; 1471 u16 checksum; 1472 u16 read_checksum = 0; 1473 1474 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 1475 1476 /* 1477 * Read the first word from the EEPROM. If this times out or fails, do 1478 * not continue or we could be in for a very long wait while every 1479 * EEPROM read fails 1480 */ 1481 status = hw->eeprom.ops.read(hw, 0, &checksum); 1482 1483 if (status == IXGBE_SUCCESS) { 1484 checksum = hw->eeprom.ops.calc_checksum(hw); 1485 1486 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1487 1488 /* 1489 * Verify read checksum from EEPROM is the same as 1490 * calculated checksum 1491 */ 1492 if (read_checksum != checksum) 1493 status = IXGBE_ERR_EEPROM_CHECKSUM; 1494 1495 /* If the user cares, return the calculated checksum */ 1496 if (checksum_val) 1497 *checksum_val = checksum; 1498 } else { 1499 DEBUGOUT("EEPROM read failed\n"); 1500 } 1501 1502 return status; 1503 } 1504 1505 /** 1506 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1507 * @hw: pointer to hardware structure 1508 **/ 1509 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1510 { 1511 s32 status; 1512 u16 checksum; 1513 1514 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 1515 1516 /* 1517 * Read the first word from the EEPROM. If this times out or fails, do 1518 * not continue or we could be in for a very long wait while every 1519 * EEPROM read fails 1520 */ 1521 status = hw->eeprom.ops.read(hw, 0, &checksum); 1522 1523 if (status == IXGBE_SUCCESS) { 1524 checksum = hw->eeprom.ops.calc_checksum(hw); 1525 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1526 checksum); 1527 } else { 1528 DEBUGOUT("EEPROM read failed\n"); 1529 } 1530 1531 return status; 1532 } 1533 1534 /** 1535 * ixgbe_validate_mac_addr - Validate MAC address 1536 * @mac_addr: pointer to MAC address. 1537 * 1538 * Tests a MAC address to ensure it is a valid Individual Address 1539 **/ 1540 s32 ixgbe_validate_mac_addr(u8 *mac_addr) 1541 { 1542 s32 status = IXGBE_SUCCESS; 1543 1544 DEBUGFUNC("ixgbe_validate_mac_addr"); 1545 1546 /* Make sure it is not a multicast address */ 1547 if (IXGBE_IS_MULTICAST(mac_addr)) { 1548 DEBUGOUT("MAC address is multicast\n"); 1549 status = IXGBE_ERR_INVALID_MAC_ADDR; 1550 /* Not a broadcast address */ 1551 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 1552 DEBUGOUT("MAC address is broadcast\n"); 1553 status = IXGBE_ERR_INVALID_MAC_ADDR; 1554 /* Reject the zero address */ 1555 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 1556 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 1557 DEBUGOUT("MAC address is all zeros\n"); 1558 status = IXGBE_ERR_INVALID_MAC_ADDR; 1559 } 1560 return status; 1561 } 1562 1563 /** 1564 * ixgbe_set_rar_generic - Set Rx address register 1565 * @hw: pointer to hardware structure 1566 * @index: Receive address register to write 1567 * @addr: Address to put into receive address register 1568 * @vmdq: VMDq "set" or "pool" index 1569 * @enable_addr: set flag that address is active 1570 * 1571 * Puts an ethernet address into a receive address register. 1572 **/ 1573 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1574 u32 enable_addr) 1575 { 1576 u32 rar_low, rar_high; 1577 u32 rar_entries = hw->mac.num_rar_entries; 1578 1579 DEBUGFUNC("ixgbe_set_rar_generic"); 1580 1581 /* Make sure we are using a valid rar index range */ 1582 if (index >= rar_entries) { 1583 DEBUGOUT1("RAR index %d is out of range.\n", index); 1584 return IXGBE_ERR_INVALID_ARGUMENT; 1585 } 1586 1587 /* setup VMDq pool selection before this RAR gets enabled */ 1588 hw->mac.ops.set_vmdq(hw, index, vmdq); 1589 1590 /* 1591 * HW expects these in little endian so we reverse the byte 1592 * order from network order (big endian) to little endian 1593 */ 1594 rar_low = ((u32)addr[0] | 1595 ((u32)addr[1] << 8) | 1596 ((u32)addr[2] << 16) | 1597 ((u32)addr[3] << 24)); 1598 /* 1599 * Some parts put the VMDq setting in the extra RAH bits, 1600 * so save everything except the lower 16 bits that hold part 1601 * of the address and the address valid bit. 1602 */ 1603 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1604 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1605 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1606 1607 if (enable_addr != 0) 1608 rar_high |= IXGBE_RAH_AV; 1609 1610 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1611 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1612 1613 return IXGBE_SUCCESS; 1614 } 1615 1616 /** 1617 * ixgbe_clear_rar_generic - Remove Rx address register 1618 * @hw: pointer to hardware structure 1619 * @index: Receive address register to write 1620 * 1621 * Clears an ethernet address from a receive address register. 1622 **/ 1623 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1624 { 1625 u32 rar_high; 1626 u32 rar_entries = hw->mac.num_rar_entries; 1627 1628 DEBUGFUNC("ixgbe_clear_rar_generic"); 1629 1630 /* Make sure we are using a valid rar index range */ 1631 if (index >= rar_entries) { 1632 DEBUGOUT1("RAR index %d is out of range.\n", index); 1633 return IXGBE_ERR_INVALID_ARGUMENT; 1634 } 1635 1636 /* 1637 * Some parts put the VMDq setting in the extra RAH bits, 1638 * so save everything except the lower 16 bits that hold part 1639 * of the address and the address valid bit. 1640 */ 1641 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1642 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1643 1644 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1645 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1646 1647 /* clear VMDq pool/queue selection for this RAR */ 1648 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1649 1650 return IXGBE_SUCCESS; 1651 } 1652 1653 /** 1654 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1655 * @hw: pointer to hardware structure 1656 * 1657 * Places the MAC address in receive address register 0 and clears the rest 1658 * of the receive address registers. Clears the multicast table. Assumes 1659 * the receiver is in reset when the routine is called. 1660 **/ 1661 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1662 { 1663 u32 i; 1664 u32 rar_entries = hw->mac.num_rar_entries; 1665 1666 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 1667 1668 /* 1669 * If the current mac address is valid, assume it is a software override 1670 * to the permanent address. 1671 * Otherwise, use the permanent address from the eeprom. 1672 */ 1673 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1674 IXGBE_ERR_INVALID_MAC_ADDR) { 1675 /* Get the MAC address from the RAR0 for later reference */ 1676 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1677 1678 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1679 hw->mac.addr[0], hw->mac.addr[1], 1680 hw->mac.addr[2]); 1681 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1682 hw->mac.addr[4], hw->mac.addr[5]); 1683 } else { 1684 /* Setup the receive address. */ 1685 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 1686 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 1687 hw->mac.addr[0], hw->mac.addr[1], 1688 hw->mac.addr[2]); 1689 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1690 hw->mac.addr[4], hw->mac.addr[5]); 1691 1692 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1693 1694 /* clear VMDq pool/queue selection for RAR 0 */ 1695 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1696 } 1697 hw->addr_ctrl.overflow_promisc = 0; 1698 1699 hw->addr_ctrl.rar_used_count = 1; 1700 1701 /* Zero out the other receive addresses. */ 1702 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 1703 for (i = 1; i < rar_entries; i++) { 1704 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1705 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1706 } 1707 1708 /* Clear the MTA */ 1709 hw->addr_ctrl.mta_in_use = 0; 1710 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1711 1712 DEBUGOUT(" Clearing MTA\n"); 1713 for (i = 0; i < hw->mac.mcft_size; i++) 1714 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1715 1716 ixgbe_init_uta_tables(hw); 1717 1718 return IXGBE_SUCCESS; 1719 } 1720 1721 /** 1722 * ixgbe_add_uc_addr - Adds a secondary unicast address. 1723 * @hw: pointer to hardware structure 1724 * @addr: new address 1725 * 1726 * Adds it to unused receive address register or goes into promiscuous mode. 1727 **/ 1728 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 1729 { 1730 u32 rar_entries = hw->mac.num_rar_entries; 1731 u32 rar; 1732 1733 DEBUGFUNC("ixgbe_add_uc_addr"); 1734 1735 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 1736 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 1737 1738 /* 1739 * Place this address in the RAR if there is room, 1740 * else put the controller into promiscuous mode 1741 */ 1742 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1743 rar = hw->addr_ctrl.rar_used_count; 1744 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 1745 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 1746 hw->addr_ctrl.rar_used_count++; 1747 } else { 1748 hw->addr_ctrl.overflow_promisc++; 1749 } 1750 1751 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 1752 } 1753 1754 /** 1755 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1756 * @hw: pointer to hardware structure 1757 * @addr_list: the list of new addresses 1758 * @addr_count: number of addresses 1759 * @next: iterator function to walk the address list 1760 * 1761 * The given list replaces any existing list. Clears the secondary addrs from 1762 * receive address registers. Uses unused receive address registers for the 1763 * first secondary addresses, and falls back to promiscuous mode as needed. 1764 * 1765 * Drivers using secondary unicast addresses must set user_set_promisc when 1766 * manually putting the device into promiscuous mode. 1767 **/ 1768 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 1769 u32 addr_count, ixgbe_mc_addr_itr next) 1770 { 1771 u8 *addr; 1772 u32 i; 1773 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1774 u32 uc_addr_in_use; 1775 u32 fctrl; 1776 u32 vmdq; 1777 1778 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 1779 1780 /* 1781 * Clear accounting of old secondary address list, 1782 * don't count RAR[0] 1783 */ 1784 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 1785 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 1786 hw->addr_ctrl.overflow_promisc = 0; 1787 1788 /* Zero out the other receive addresses */ 1789 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 1790 for (i = 0; i < uc_addr_in_use; i++) { 1791 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 1792 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 1793 } 1794 1795 /* Add the new addresses */ 1796 for (i = 0; i < addr_count; i++) { 1797 DEBUGOUT(" Adding the secondary addresses:\n"); 1798 addr = next(hw, &addr_list, &vmdq); 1799 ixgbe_add_uc_addr(hw, addr, vmdq); 1800 } 1801 1802 if (hw->addr_ctrl.overflow_promisc) { 1803 /* enable promisc if not already in overflow or set by user */ 1804 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1805 DEBUGOUT(" Entering address overflow promisc mode\n"); 1806 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1807 fctrl |= IXGBE_FCTRL_UPE; 1808 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1809 } 1810 } else { 1811 /* only disable if set by overflow, not by user */ 1812 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1813 DEBUGOUT(" Leaving address overflow promisc mode\n"); 1814 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1815 fctrl &= ~IXGBE_FCTRL_UPE; 1816 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1817 } 1818 } 1819 1820 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 1821 return IXGBE_SUCCESS; 1822 } 1823 1824 /** 1825 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1826 * @hw: pointer to hardware structure 1827 * @mc_addr: the multicast address 1828 * 1829 * Extracts the 12 bits, from a multicast address, to determine which 1830 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1831 * incoming rx multicast addresses, to determine the bit-vector to check in 1832 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1833 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1834 * to mc_filter_type. 1835 **/ 1836 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1837 { 1838 u32 vector = 0; 1839 1840 DEBUGFUNC("ixgbe_mta_vector"); 1841 1842 switch (hw->mac.mc_filter_type) { 1843 case 0: /* use bits [47:36] of the address */ 1844 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1845 break; 1846 case 1: /* use bits [46:35] of the address */ 1847 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1848 break; 1849 case 2: /* use bits [45:34] of the address */ 1850 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1851 break; 1852 case 3: /* use bits [43:32] of the address */ 1853 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1854 break; 1855 default: /* Invalid mc_filter_type */ 1856 DEBUGOUT("MC filter type param set incorrectly\n"); 1857 ASSERT(0); 1858 break; 1859 } 1860 1861 /* vector can only be 12-bits or boundary will be exceeded */ 1862 vector &= 0xFFF; 1863 return vector; 1864 } 1865 1866 /** 1867 * ixgbe_set_mta - Set bit-vector in multicast table 1868 * @hw: pointer to hardware structure 1869 * @hash_value: Multicast address hash value 1870 * 1871 * Sets the bit-vector in the multicast table. 1872 **/ 1873 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 1874 { 1875 u32 vector; 1876 u32 vector_bit; 1877 u32 vector_reg; 1878 1879 DEBUGFUNC("ixgbe_set_mta"); 1880 1881 hw->addr_ctrl.mta_in_use++; 1882 1883 vector = ixgbe_mta_vector(hw, mc_addr); 1884 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 1885 1886 /* 1887 * The MTA is a register array of 128 32-bit registers. It is treated 1888 * like an array of 4096 bits. We want to set bit 1889 * BitArray[vector_value]. So we figure out what register the bit is 1890 * in, read it, OR in the new bit, then write back the new value. The 1891 * register is determined by the upper 7 bits of the vector value and 1892 * the bit within that register are determined by the lower 5 bits of 1893 * the value. 1894 */ 1895 vector_reg = (vector >> 5) & 0x7F; 1896 vector_bit = vector & 0x1F; 1897 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 1898 } 1899 1900 /** 1901 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1902 * @hw: pointer to hardware structure 1903 * @mc_addr_list: the list of new multicast addresses 1904 * @mc_addr_count: number of addresses 1905 * @next: iterator function to walk the multicast address list 1906 * 1907 * The given list replaces any existing list. Clears the MC addrs from receive 1908 * address registers and the multicast table. Uses unused receive address 1909 * registers for the first multicast addresses, and hashes the rest into the 1910 * multicast table. 1911 **/ 1912 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1913 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1914 { 1915 u32 i; 1916 u32 vmdq; 1917 1918 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 1919 1920 /* 1921 * Set the new number of MC addresses that we are being requested to 1922 * use. 1923 */ 1924 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1925 hw->addr_ctrl.mta_in_use = 0; 1926 1927 /* Clear mta_shadow */ 1928 DEBUGOUT(" Clearing MTA\n"); 1929 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 1930 1931 /* Update mta_shadow */ 1932 for (i = 0; i < mc_addr_count; i++) { 1933 DEBUGOUT(" Adding the multicast addresses:\n"); 1934 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1935 } 1936 1937 /* Enable mta */ 1938 for (i = 0; i < hw->mac.mcft_size; i++) 1939 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 1940 hw->mac.mta_shadow[i]); 1941 1942 if (hw->addr_ctrl.mta_in_use > 0) 1943 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1944 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1945 1946 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 1947 return IXGBE_SUCCESS; 1948 } 1949 1950 /** 1951 * ixgbe_enable_mc_generic - Enable multicast address in RAR 1952 * @hw: pointer to hardware structure 1953 * 1954 * Enables multicast address in RAR and the use of the multicast hash table. 1955 **/ 1956 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1957 { 1958 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1959 1960 DEBUGFUNC("ixgbe_enable_mc_generic"); 1961 1962 if (a->mta_in_use > 0) 1963 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1964 hw->mac.mc_filter_type); 1965 1966 return IXGBE_SUCCESS; 1967 } 1968 1969 /** 1970 * ixgbe_disable_mc_generic - Disable multicast address in RAR 1971 * @hw: pointer to hardware structure 1972 * 1973 * Disables multicast address in RAR and the use of the multicast hash table. 1974 **/ 1975 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1976 { 1977 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1978 1979 DEBUGFUNC("ixgbe_disable_mc_generic"); 1980 1981 if (a->mta_in_use > 0) 1982 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1983 1984 return IXGBE_SUCCESS; 1985 } 1986 1987 /** 1988 * ixgbe_fc_enable_generic - Enable flow control 1989 * @hw: pointer to hardware structure 1990 * @packetbuf_num: packet buffer number (0-7) 1991 * 1992 * Enable flow control according to the current settings. 1993 **/ 1994 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 1995 { 1996 s32 ret_val = IXGBE_SUCCESS; 1997 u32 mflcn_reg, fccfg_reg; 1998 u32 reg; 1999 u32 rx_pba_size; 2000 u32 fcrtl, fcrth; 2001 2002 DEBUGFUNC("ixgbe_fc_enable_generic"); 2003 2004 /* Negotiate the fc mode to use */ 2005 ret_val = ixgbe_fc_autoneg(hw); 2006 if (ret_val == IXGBE_ERR_FLOW_CONTROL) 2007 goto out; 2008 2009 /* Disable any previous flow control settings */ 2010 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2011 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2012 2013 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2014 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2015 2016 /* 2017 * The possible values of fc.current_mode are: 2018 * 0: Flow control is completely disabled 2019 * 1: Rx flow control is enabled (we can receive pause frames, 2020 * but not send pause frames). 2021 * 2: Tx flow control is enabled (we can send pause frames but 2022 * we do not support receiving pause frames). 2023 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2024 * other: Invalid. 2025 */ 2026 switch (hw->fc.current_mode) { 2027 case ixgbe_fc_none: 2028 /* 2029 * Flow control is disabled by software override or autoneg. 2030 * The code below will actually disable it in the HW. 2031 */ 2032 break; 2033 case ixgbe_fc_rx_pause: 2034 /* 2035 * Rx Flow control is enabled and Tx Flow control is 2036 * disabled by software override. Since there really 2037 * isn't a way to advertise that we are capable of RX 2038 * Pause ONLY, we will advertise that we support both 2039 * symmetric and asymmetric Rx PAUSE. Later, we will 2040 * disable the adapter's ability to send PAUSE frames. 2041 */ 2042 mflcn_reg |= IXGBE_MFLCN_RFCE; 2043 break; 2044 case ixgbe_fc_tx_pause: 2045 /* 2046 * Tx Flow control is enabled, and Rx Flow control is 2047 * disabled by software override. 2048 */ 2049 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2050 break; 2051 case ixgbe_fc_full: 2052 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2053 mflcn_reg |= IXGBE_MFLCN_RFCE; 2054 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2055 break; 2056 default: 2057 DEBUGOUT("Flow control param set incorrectly\n"); 2058 ret_val = IXGBE_ERR_CONFIG; 2059 goto out; 2060 break; 2061 } 2062 2063 /* Set 802.3x based flow control settings. */ 2064 mflcn_reg |= IXGBE_MFLCN_DPF; 2065 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2066 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2067 2068 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); 2069 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 2070 2071 fcrth = (rx_pba_size - hw->fc.high_water) << 10; 2072 fcrtl = (rx_pba_size - hw->fc.low_water) << 10; 2073 2074 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2075 fcrth |= IXGBE_FCRTH_FCEN; 2076 if (hw->fc.send_xon) 2077 fcrtl |= IXGBE_FCRTL_XONE; 2078 } 2079 2080 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2081 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2082 2083 /* Configure pause time (2 TCs per register) */ 2084 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2085 if ((packetbuf_num & 1) == 0) 2086 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2087 else 2088 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); 2089 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); 2090 2091 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2092 2093 out: 2094 return ret_val; 2095 } 2096 2097 /** 2098 * ixgbe_fc_autoneg - Configure flow control 2099 * @hw: pointer to hardware structure 2100 * 2101 * Compares our advertised flow control capabilities to those advertised by 2102 * our link partner, and determines the proper flow control mode to use. 2103 **/ 2104 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2105 { 2106 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2107 ixgbe_link_speed speed; 2108 bool link_up; 2109 2110 DEBUGFUNC("ixgbe_fc_autoneg"); 2111 2112 if (hw->fc.disable_fc_autoneg) 2113 goto out; 2114 2115 /* 2116 * AN should have completed when the cable was plugged in. 2117 * Look for reasons to bail out. Bail out if: 2118 * - FC autoneg is disabled, or if 2119 * - link is not up. 2120 * 2121 * Since we're being called from an LSC, link is already known to be up. 2122 * So use link_up_wait_to_complete=FALSE. 2123 */ 2124 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2125 if (!link_up) { 2126 ret_val = IXGBE_ERR_FLOW_CONTROL; 2127 goto out; 2128 } 2129 2130 switch (hw->phy.media_type) { 2131 /* Autoneg flow control on fiber adapters */ 2132 case ixgbe_media_type_fiber: 2133 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2134 ret_val = ixgbe_fc_autoneg_fiber(hw); 2135 break; 2136 2137 /* Autoneg flow control on backplane adapters */ 2138 case ixgbe_media_type_backplane: 2139 ret_val = ixgbe_fc_autoneg_backplane(hw); 2140 break; 2141 2142 /* Autoneg flow control on copper adapters */ 2143 case ixgbe_media_type_copper: 2144 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS) 2145 ret_val = ixgbe_fc_autoneg_copper(hw); 2146 break; 2147 2148 default: 2149 break; 2150 } 2151 2152 out: 2153 if (ret_val == IXGBE_SUCCESS) { 2154 hw->fc.fc_was_autonegged = TRUE; 2155 } else { 2156 hw->fc.fc_was_autonegged = FALSE; 2157 hw->fc.current_mode = hw->fc.requested_mode; 2158 } 2159 return ret_val; 2160 } 2161 2162 /** 2163 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2164 * @hw: pointer to hardware structure 2165 * @speed: 2166 * @link_up 2167 * 2168 * Enable flow control according on 1 gig fiber. 2169 **/ 2170 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2171 { 2172 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2173 s32 ret_val; 2174 2175 /* 2176 * On multispeed fiber at 1g, bail out if 2177 * - link is up but AN did not complete, or if 2178 * - link is up and AN completed but timed out 2179 */ 2180 2181 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2182 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2183 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2184 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2185 goto out; 2186 } 2187 2188 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2189 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2190 2191 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2192 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2193 IXGBE_PCS1GANA_ASM_PAUSE, 2194 IXGBE_PCS1GANA_SYM_PAUSE, 2195 IXGBE_PCS1GANA_ASM_PAUSE); 2196 2197 out: 2198 return ret_val; 2199 } 2200 2201 /** 2202 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2203 * @hw: pointer to hardware structure 2204 * 2205 * Enable flow control according to IEEE clause 37. 2206 **/ 2207 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2208 { 2209 u32 links2, anlp1_reg, autoc_reg, links; 2210 s32 ret_val; 2211 2212 /* 2213 * On backplane, bail out if 2214 * - backplane autoneg was not completed, or if 2215 * - we are 82599 and link partner is not AN enabled 2216 */ 2217 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2218 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2219 hw->fc.fc_was_autonegged = FALSE; 2220 hw->fc.current_mode = hw->fc.requested_mode; 2221 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2222 goto out; 2223 } 2224 2225 if (hw->mac.type == ixgbe_mac_82599EB) { 2226 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2227 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2228 hw->fc.fc_was_autonegged = FALSE; 2229 hw->fc.current_mode = hw->fc.requested_mode; 2230 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2231 goto out; 2232 } 2233 } 2234 /* 2235 * Read the 10g AN autoc and LP ability registers and resolve 2236 * local flow control settings accordingly 2237 */ 2238 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2239 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2240 2241 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2242 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2243 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2244 2245 out: 2246 return ret_val; 2247 } 2248 2249 /** 2250 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2251 * @hw: pointer to hardware structure 2252 * 2253 * Enable flow control according to IEEE clause 37. 2254 **/ 2255 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2256 { 2257 u16 technology_ability_reg = 0; 2258 u16 lp_technology_ability_reg = 0; 2259 2260 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2261 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2262 &technology_ability_reg); 2263 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 2264 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2265 &lp_technology_ability_reg); 2266 2267 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2268 (u32)lp_technology_ability_reg, 2269 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2270 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2271 } 2272 2273 /** 2274 * ixgbe_negotiate_fc - Negotiate flow control 2275 * @hw: pointer to hardware structure 2276 * @adv_reg: flow control advertised settings 2277 * @lp_reg: link partner's flow control settings 2278 * @adv_sym: symmetric pause bit in advertisement 2279 * @adv_asm: asymmetric pause bit in advertisement 2280 * @lp_sym: symmetric pause bit in link partner advertisement 2281 * @lp_asm: asymmetric pause bit in link partner advertisement 2282 * 2283 * Find the intersection between advertised settings and link partner's 2284 * advertised settings 2285 **/ 2286 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2287 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2288 { 2289 if ((!(adv_reg)) || (!(lp_reg))) 2290 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2291 2292 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2293 /* 2294 * Now we need to check if the user selected Rx ONLY 2295 * of pause frames. In this case, we had to advertise 2296 * FULL flow control because we could not advertise RX 2297 * ONLY. Hence, we must now check to see if we need to 2298 * turn OFF the TRANSMISSION of PAUSE frames. 2299 */ 2300 if (hw->fc.requested_mode == ixgbe_fc_full) { 2301 hw->fc.current_mode = ixgbe_fc_full; 2302 DEBUGOUT("Flow Control = FULL.\n"); 2303 } else { 2304 hw->fc.current_mode = ixgbe_fc_rx_pause; 2305 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2306 } 2307 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2308 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2309 hw->fc.current_mode = ixgbe_fc_tx_pause; 2310 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2311 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2312 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2313 hw->fc.current_mode = ixgbe_fc_rx_pause; 2314 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2315 } else { 2316 hw->fc.current_mode = ixgbe_fc_none; 2317 DEBUGOUT("Flow Control = NONE.\n"); 2318 } 2319 return IXGBE_SUCCESS; 2320 } 2321 2322 /** 2323 * ixgbe_setup_fc - Set up flow control 2324 * @hw: pointer to hardware structure 2325 * 2326 * Called at init time to set up flow control. 2327 **/ 2328 s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2329 { 2330 s32 ret_val = IXGBE_SUCCESS; 2331 u32 reg = 0, reg_bp = 0; 2332 u16 reg_cu = 0; 2333 2334 DEBUGFUNC("ixgbe_setup_fc"); 2335 2336 /* Validate the packetbuf configuration */ 2337 if (packetbuf_num < 0 || packetbuf_num > 7) { 2338 DEBUGOUT1("Invalid packet buffer number [%d], expected range is" 2339 " 0-7\n", packetbuf_num); 2340 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2341 goto out; 2342 } 2343 2344 /* 2345 * Validate the water mark configuration. Zero water marks are invalid 2346 * because it causes the controller to just blast out fc packets. 2347 */ 2348 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 2349 DEBUGOUT("Invalid water mark configuration\n"); 2350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2351 goto out; 2352 } 2353 2354 /* 2355 * Validate the requested mode. Strict IEEE mode does not allow 2356 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 2357 */ 2358 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 2359 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 2360 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2361 goto out; 2362 } 2363 2364 /* 2365 * 10gig parts do not have a word in the EEPROM to determine the 2366 * default flow control setting, so we explicitly set it to full. 2367 */ 2368 if (hw->fc.requested_mode == ixgbe_fc_default) 2369 hw->fc.requested_mode = ixgbe_fc_full; 2370 2371 /* 2372 * Set up the 1G and 10G flow control advertisement registers so the 2373 * HW will be able to do fc autoneg once the cable is plugged in. If 2374 * we link at 10G, the 1G advertisement is harmless and vice versa. 2375 */ 2376 2377 switch (hw->phy.media_type) { 2378 case ixgbe_media_type_fiber: 2379 case ixgbe_media_type_backplane: 2380 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2381 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2382 break; 2383 2384 case ixgbe_media_type_copper: 2385 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2386 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 2387 break; 2388 2389 default: 2390 ; 2391 } 2392 2393 /* 2394 * The possible values of fc.requested_mode are: 2395 * 0: Flow control is completely disabled 2396 * 1: Rx flow control is enabled (we can receive pause frames, 2397 * but not send pause frames). 2398 * 2: Tx flow control is enabled (we can send pause frames but 2399 * we do not support receiving pause frames). 2400 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2401 * other: Invalid. 2402 */ 2403 switch (hw->fc.requested_mode) { 2404 case ixgbe_fc_none: 2405 /* Flow control completely disabled by software override. */ 2406 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2407 if (hw->phy.media_type == ixgbe_media_type_backplane) 2408 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 2409 IXGBE_AUTOC_ASM_PAUSE); 2410 else if (hw->phy.media_type == ixgbe_media_type_copper) 2411 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2412 break; 2413 case ixgbe_fc_rx_pause: 2414 /* 2415 * Rx Flow control is enabled and Tx Flow control is 2416 * disabled by software override. Since there really 2417 * isn't a way to advertise that we are capable of RX 2418 * Pause ONLY, we will advertise that we support both 2419 * symmetric and asymmetric Rx PAUSE. Later, we will 2420 * disable the adapter's ability to send PAUSE frames. 2421 */ 2422 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2423 if (hw->phy.media_type == ixgbe_media_type_backplane) 2424 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2425 IXGBE_AUTOC_ASM_PAUSE); 2426 else if (hw->phy.media_type == ixgbe_media_type_copper) 2427 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2428 break; 2429 case ixgbe_fc_tx_pause: 2430 /* 2431 * Tx Flow control is enabled, and Rx Flow control is 2432 * disabled by software override. 2433 */ 2434 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2435 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2436 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2437 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); 2438 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); 2439 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 2440 reg_cu |= (IXGBE_TAF_ASM_PAUSE); 2441 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); 2442 } 2443 break; 2444 case ixgbe_fc_full: 2445 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2446 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2447 if (hw->phy.media_type == ixgbe_media_type_backplane) 2448 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2449 IXGBE_AUTOC_ASM_PAUSE); 2450 else if (hw->phy.media_type == ixgbe_media_type_copper) 2451 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2452 break; 2453 default: 2454 DEBUGOUT("Flow control param set incorrectly\n"); 2455 ret_val = IXGBE_ERR_CONFIG; 2456 goto out; 2457 break; 2458 } 2459 2460 /* 2461 * Enable auto-negotiation between the MAC & PHY; 2462 * the MAC will advertise clause 37 flow control. 2463 */ 2464 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2465 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2466 2467 /* Disable AN timeout */ 2468 if (hw->fc.strict_ieee) 2469 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2470 2471 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2472 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2473 2474 /* 2475 * AUTOC restart handles negotiation of 1G and 10G on backplane 2476 * and copper. There is no need to set the PCS1GCTL register. 2477 * 2478 */ 2479 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2480 reg_bp |= IXGBE_AUTOC_AN_RESTART; 2481 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 2482 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 2483 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { 2484 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2485 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 2486 } 2487 2488 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 2489 out: 2490 return ret_val; 2491 } 2492 2493 /** 2494 * ixgbe_disable_pcie_master - Disable PCI-express master access 2495 * @hw: pointer to hardware structure 2496 * 2497 * Disables PCI-Express master access and verifies there are no pending 2498 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2499 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 2500 * is returned signifying master requests disabled. 2501 **/ 2502 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2503 { 2504 u32 i; 2505 u32 reg_val; 2506 u32 number_of_queues; 2507 s32 status = IXGBE_SUCCESS; 2508 2509 DEBUGFUNC("ixgbe_disable_pcie_master"); 2510 2511 /* Just jump out if bus mastering is already disabled */ 2512 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2513 goto out; 2514 2515 /* Disable the receive unit by stopping each queue */ 2516 number_of_queues = hw->mac.max_rx_queues; 2517 for (i = 0; i < number_of_queues; i++) { 2518 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 2519 if (reg_val & IXGBE_RXDCTL_ENABLE) { 2520 reg_val &= ~IXGBE_RXDCTL_ENABLE; 2521 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 2522 } 2523 } 2524 2525 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); 2526 reg_val |= IXGBE_CTRL_GIO_DIS; 2527 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2528 2529 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2530 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2531 goto check_device_status; 2532 usec_delay(100); 2533 } 2534 2535 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 2536 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2537 2538 /* 2539 * Before proceeding, make sure that the PCIe block does not have 2540 * transactions pending. 2541 */ 2542 check_device_status: 2543 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2544 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & 2545 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2546 break; 2547 usec_delay(100); 2548 } 2549 2550 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) 2551 DEBUGOUT("PCIe transaction pending bit also did not clear.\n"); 2552 else 2553 goto out; 2554 2555 /* 2556 * Two consecutive resets are required via CTRL.RST per datasheet 2557 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2558 * of this need. The first reset prevents new master requests from 2559 * being issued by our device. We then must wait 1usec for any 2560 * remaining completions from the PCIe bus to trickle in, and then reset 2561 * again to clear out any effects they may have had on our device. 2562 */ 2563 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2564 2565 out: 2566 return status; 2567 } 2568 2569 2570 /** 2571 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2572 * @hw: pointer to hardware structure 2573 * @mask: Mask to specify which semaphore to acquire 2574 * 2575 * Acquires the SWFW semaphore thought the GSSR register for the specified 2576 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2577 **/ 2578 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2579 { 2580 u32 gssr; 2581 u32 swmask = mask; 2582 u32 fwmask = mask << 5; 2583 s32 timeout = 200; 2584 2585 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 2586 2587 while (timeout) { 2588 /* 2589 * SW EEPROM semaphore bit is used for access to all 2590 * SW_FW_SYNC/GSSR bits (not just EEPROM) 2591 */ 2592 if (ixgbe_get_eeprom_semaphore(hw)) 2593 return IXGBE_ERR_SWFW_SYNC; 2594 2595 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2596 if (!(gssr & (fwmask | swmask))) 2597 break; 2598 2599 /* 2600 * Firmware currently using resource (fwmask) or other software 2601 * thread currently using resource (swmask) 2602 */ 2603 ixgbe_release_eeprom_semaphore(hw); 2604 msec_delay(5); 2605 timeout--; 2606 } 2607 2608 if (!timeout) { 2609 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 2610 return IXGBE_ERR_SWFW_SYNC; 2611 } 2612 2613 gssr |= swmask; 2614 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2615 2616 ixgbe_release_eeprom_semaphore(hw); 2617 return IXGBE_SUCCESS; 2618 } 2619 2620 /** 2621 * ixgbe_release_swfw_sync - Release SWFW semaphore 2622 * @hw: pointer to hardware structure 2623 * @mask: Mask to specify which semaphore to release 2624 * 2625 * Releases the SWFW semaphore thought the GSSR register for the specified 2626 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2627 **/ 2628 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2629 { 2630 u32 gssr; 2631 u32 swmask = mask; 2632 2633 DEBUGFUNC("ixgbe_release_swfw_sync"); 2634 2635 ixgbe_get_eeprom_semaphore(hw); 2636 2637 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2638 gssr &= ~swmask; 2639 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2640 2641 ixgbe_release_eeprom_semaphore(hw); 2642 } 2643 2644 /** 2645 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2646 * @hw: pointer to hardware structure 2647 * @regval: register value to write to RXCTRL 2648 * 2649 * Enables the Rx DMA unit 2650 **/ 2651 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2652 { 2653 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 2654 2655 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2656 2657 return IXGBE_SUCCESS; 2658 } 2659 2660 /** 2661 * ixgbe_blink_led_start_generic - Blink LED based on index. 2662 * @hw: pointer to hardware structure 2663 * @index: led number to blink 2664 **/ 2665 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2666 { 2667 ixgbe_link_speed speed = 0; 2668 bool link_up = 0; 2669 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2670 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2671 2672 DEBUGFUNC("ixgbe_blink_led_start_generic"); 2673 2674 /* 2675 * Link must be up to auto-blink the LEDs; 2676 * Force it if link is down. 2677 */ 2678 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2679 2680 if (!link_up) { 2681 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2682 autoc_reg |= IXGBE_AUTOC_FLU; 2683 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2684 msec_delay(10); 2685 } 2686 2687 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2688 led_reg |= IXGBE_LED_BLINK(index); 2689 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2690 IXGBE_WRITE_FLUSH(hw); 2691 2692 return IXGBE_SUCCESS; 2693 } 2694 2695 /** 2696 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2697 * @hw: pointer to hardware structure 2698 * @index: led number to stop blinking 2699 **/ 2700 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2701 { 2702 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2703 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2704 2705 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 2706 2707 2708 autoc_reg &= ~IXGBE_AUTOC_FLU; 2709 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2710 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2711 2712 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2713 led_reg &= ~IXGBE_LED_BLINK(index); 2714 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2715 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2716 IXGBE_WRITE_FLUSH(hw); 2717 2718 return IXGBE_SUCCESS; 2719 } 2720 2721 /** 2722 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2723 * @hw: pointer to hardware structure 2724 * @san_mac_offset: SAN MAC address offset 2725 * 2726 * This function will read the EEPROM location for the SAN MAC address 2727 * pointer, and returns the value at that location. This is used in both 2728 * get and set mac_addr routines. 2729 **/ 2730 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2731 u16 *san_mac_offset) 2732 { 2733 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 2734 2735 /* 2736 * First read the EEPROM pointer to see if the MAC addresses are 2737 * available. 2738 */ 2739 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2740 2741 return IXGBE_SUCCESS; 2742 } 2743 2744 /** 2745 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2746 * @hw: pointer to hardware structure 2747 * @san_mac_addr: SAN MAC address 2748 * 2749 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2750 * per-port, so set_lan_id() must be called before reading the addresses. 2751 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2752 * upon for non-SFP connections, so we must call it here. 2753 **/ 2754 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2755 { 2756 u16 san_mac_data, san_mac_offset; 2757 u8 i; 2758 2759 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 2760 2761 /* 2762 * First read the EEPROM pointer to see if the MAC addresses are 2763 * available. If they're not, no point in calling set_lan_id() here. 2764 */ 2765 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2766 2767 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2768 /* 2769 * No addresses available in this EEPROM. It's not an 2770 * error though, so just wipe the local address and return. 2771 */ 2772 for (i = 0; i < 6; i++) 2773 san_mac_addr[i] = 0xFF; 2774 2775 goto san_mac_addr_out; 2776 } 2777 2778 /* make sure we know which port we need to program */ 2779 hw->mac.ops.set_lan_id(hw); 2780 /* apply the port offset to the address offset */ 2781 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2782 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2783 for (i = 0; i < 3; i++) { 2784 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2785 san_mac_addr[i * 2] = (u8)(san_mac_data); 2786 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2787 san_mac_offset++; 2788 } 2789 2790 san_mac_addr_out: 2791 return IXGBE_SUCCESS; 2792 } 2793 2794 /** 2795 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 2796 * @hw: pointer to hardware structure 2797 * @san_mac_addr: SAN MAC address 2798 * 2799 * Write a SAN MAC address to the EEPROM. 2800 **/ 2801 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2802 { 2803 s32 status = IXGBE_SUCCESS; 2804 u16 san_mac_data, san_mac_offset; 2805 u8 i; 2806 2807 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 2808 2809 /* Look for SAN mac address pointer. If not defined, return */ 2810 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2811 2812 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2813 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 2814 goto san_mac_addr_out; 2815 } 2816 2817 /* Make sure we know which port we need to write */ 2818 hw->mac.ops.set_lan_id(hw); 2819 /* Apply the port offset to the address offset */ 2820 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2821 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2822 2823 for (i = 0; i < 3; i++) { 2824 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 2825 san_mac_data |= (u16)(san_mac_addr[i * 2]); 2826 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 2827 san_mac_offset++; 2828 } 2829 2830 san_mac_addr_out: 2831 return status; 2832 } 2833 2834 /** 2835 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2836 * @hw: pointer to hardware structure 2837 * 2838 * Read PCIe configuration space, and get the MSI-X vector count from 2839 * the capabilities table. 2840 **/ 2841 u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2842 { 2843 u32 msix_count = 64; 2844 2845 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 2846 if (hw->mac.msix_vectors_from_pcie) { 2847 msix_count = IXGBE_READ_PCIE_WORD(hw, 2848 IXGBE_PCIE_MSIX_82599_CAPS); 2849 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2850 2851 /* MSI-X count is zero-based in HW, so increment to give 2852 * proper value */ 2853 msix_count++; 2854 } 2855 2856 return msix_count; 2857 } 2858 2859 /** 2860 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 2861 * @hw: pointer to hardware structure 2862 * @addr: Address to put into receive address register 2863 * @vmdq: VMDq pool to assign 2864 * 2865 * Puts an ethernet address into a receive address register, or 2866 * finds the rar that it is aleady in; adds to the pool list 2867 **/ 2868 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2869 { 2870 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 2871 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 2872 u32 rar; 2873 u32 rar_low, rar_high; 2874 u32 addr_low, addr_high; 2875 2876 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 2877 2878 /* swap bytes for HW little endian */ 2879 addr_low = addr[0] | (addr[1] << 8) 2880 | (addr[2] << 16) 2881 | (addr[3] << 24); 2882 addr_high = addr[4] | (addr[5] << 8); 2883 2884 /* 2885 * Either find the mac_id in rar or find the first empty space. 2886 * rar_highwater points to just after the highest currently used 2887 * rar in order to shorten the search. It grows when we add a new 2888 * rar to the top. 2889 */ 2890 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 2891 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 2892 2893 if (((IXGBE_RAH_AV & rar_high) == 0) 2894 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 2895 first_empty_rar = rar; 2896 } else if ((rar_high & 0xFFFF) == addr_high) { 2897 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 2898 if (rar_low == addr_low) 2899 break; /* found it already in the rars */ 2900 } 2901 } 2902 2903 if (rar < hw->mac.rar_highwater) { 2904 /* already there so just add to the pool bits */ 2905 ixgbe_set_vmdq(hw, rar, vmdq); 2906 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 2907 /* stick it into first empty RAR slot we found */ 2908 rar = first_empty_rar; 2909 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2910 } else if (rar == hw->mac.rar_highwater) { 2911 /* add it to the top of the list and inc the highwater mark */ 2912 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2913 hw->mac.rar_highwater++; 2914 } else if (rar >= hw->mac.num_rar_entries) { 2915 return IXGBE_ERR_INVALID_MAC_ADDR; 2916 } 2917 2918 /* 2919 * If we found rar[0], make sure the default pool bit (we use pool 0) 2920 * remains cleared to be sure default pool packets will get delivered 2921 */ 2922 if (rar == 0) 2923 ixgbe_clear_vmdq(hw, rar, 0); 2924 2925 return rar; 2926 } 2927 2928 /** 2929 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2930 * @hw: pointer to hardware struct 2931 * @rar: receive address register index to disassociate 2932 * @vmdq: VMDq pool index to remove from the rar 2933 **/ 2934 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2935 { 2936 u32 mpsar_lo, mpsar_hi; 2937 u32 rar_entries = hw->mac.num_rar_entries; 2938 2939 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 2940 2941 /* Make sure we are using a valid rar index range */ 2942 if (rar >= rar_entries) { 2943 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2944 return IXGBE_ERR_INVALID_ARGUMENT; 2945 } 2946 2947 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2948 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2949 2950 if (!mpsar_lo && !mpsar_hi) 2951 goto done; 2952 2953 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2954 if (mpsar_lo) { 2955 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2956 mpsar_lo = 0; 2957 } 2958 if (mpsar_hi) { 2959 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2960 mpsar_hi = 0; 2961 } 2962 } else if (vmdq < 32) { 2963 mpsar_lo &= ~(1 << vmdq); 2964 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2965 } else { 2966 mpsar_hi &= ~(1 << (vmdq - 32)); 2967 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2968 } 2969 2970 /* was that the last pool using this rar? */ 2971 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2972 hw->mac.ops.clear_rar(hw, rar); 2973 done: 2974 return IXGBE_SUCCESS; 2975 } 2976 2977 /** 2978 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 2979 * @hw: pointer to hardware struct 2980 * @rar: receive address register index to associate with a VMDq index 2981 * @vmdq: VMDq pool index 2982 **/ 2983 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2984 { 2985 u32 mpsar; 2986 u32 rar_entries = hw->mac.num_rar_entries; 2987 2988 DEBUGFUNC("ixgbe_set_vmdq_generic"); 2989 2990 /* Make sure we are using a valid rar index range */ 2991 if (rar >= rar_entries) { 2992 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2993 return IXGBE_ERR_INVALID_ARGUMENT; 2994 } 2995 2996 if (vmdq < 32) { 2997 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2998 mpsar |= 1 << vmdq; 2999 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3000 } else { 3001 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3002 mpsar |= 1 << (vmdq - 32); 3003 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3004 } 3005 return IXGBE_SUCCESS; 3006 } 3007 3008 /** 3009 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3010 * @hw: pointer to hardware structure 3011 **/ 3012 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3013 { 3014 int i; 3015 3016 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3017 DEBUGOUT(" Clearing UTA\n"); 3018 3019 for (i = 0; i < 128; i++) 3020 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3021 3022 return IXGBE_SUCCESS; 3023 } 3024 3025 /** 3026 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3027 * @hw: pointer to hardware structure 3028 * @vlan: VLAN id to write to VLAN filter 3029 * 3030 * return the VLVF index where this VLAN id should be placed 3031 * 3032 **/ 3033 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 3034 { 3035 u32 bits = 0; 3036 u32 first_empty_slot = 0; 3037 s32 regindex; 3038 3039 /* short cut the special case */ 3040 if (vlan == 0) 3041 return 0; 3042 3043 /* 3044 * Search for the vlan id in the VLVF entries. Save off the first empty 3045 * slot found along the way 3046 */ 3047 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 3048 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3049 if (!bits && !(first_empty_slot)) 3050 first_empty_slot = regindex; 3051 else if ((bits & 0x0FFF) == vlan) 3052 break; 3053 } 3054 3055 /* 3056 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 3057 * in the VLVF. Else use the first empty VLVF register for this 3058 * vlan id. 3059 */ 3060 if (regindex >= IXGBE_VLVF_ENTRIES) { 3061 if (first_empty_slot) 3062 regindex = first_empty_slot; 3063 else { 3064 DEBUGOUT("No space in VLVF.\n"); 3065 regindex = IXGBE_ERR_NO_SPACE; 3066 } 3067 } 3068 3069 return regindex; 3070 } 3071 3072 /** 3073 * ixgbe_set_vfta_generic - Set VLAN filter table 3074 * @hw: pointer to hardware structure 3075 * @vlan: VLAN id to write to VLAN filter 3076 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3077 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3078 * 3079 * Turn on/off specified VLAN in the VLAN filter table. 3080 **/ 3081 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3082 bool vlan_on) 3083 { 3084 s32 regindex; 3085 u32 bitindex; 3086 u32 vfta; 3087 u32 bits; 3088 u32 vt; 3089 u32 targetbit; 3090 bool vfta_changed = FALSE; 3091 3092 DEBUGFUNC("ixgbe_set_vfta_generic"); 3093 3094 if (vlan > 4095) 3095 return IXGBE_ERR_PARAM; 3096 3097 /* 3098 * this is a 2 part operation - first the VFTA, then the 3099 * VLVF and VLVFB if VT Mode is set 3100 * We don't write the VFTA until we know the VLVF part succeeded. 3101 */ 3102 3103 /* Part 1 3104 * The VFTA is a bitstring made up of 128 32-bit registers 3105 * that enable the particular VLAN id, much like the MTA: 3106 * bits[11-5]: which register 3107 * bits[4-0]: which bit in the register 3108 */ 3109 regindex = (vlan >> 5) & 0x7F; 3110 bitindex = vlan & 0x1F; 3111 targetbit = (1 << bitindex); 3112 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3113 3114 if (vlan_on) { 3115 if (!(vfta & targetbit)) { 3116 vfta |= targetbit; 3117 vfta_changed = TRUE; 3118 } 3119 } else { 3120 if ((vfta & targetbit)) { 3121 vfta &= ~targetbit; 3122 vfta_changed = TRUE; 3123 } 3124 } 3125 3126 /* Part 2 3127 * If VT Mode is set 3128 * Either vlan_on 3129 * make sure the vlan is in VLVF 3130 * set the vind bit in the matching VLVFB 3131 * Or !vlan_on 3132 * clear the pool bit and possibly the vind 3133 */ 3134 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3135 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3136 s32 vlvf_index; 3137 3138 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3139 if (vlvf_index < 0) 3140 return vlvf_index; 3141 3142 if (vlan_on) { 3143 /* set the pool bit */ 3144 if (vind < 32) { 3145 bits = IXGBE_READ_REG(hw, 3146 IXGBE_VLVFB(vlvf_index*2)); 3147 bits |= (1 << vind); 3148 IXGBE_WRITE_REG(hw, 3149 IXGBE_VLVFB(vlvf_index*2), 3150 bits); 3151 } else { 3152 bits = IXGBE_READ_REG(hw, 3153 IXGBE_VLVFB((vlvf_index*2)+1)); 3154 bits |= (1 << (vind-32)); 3155 IXGBE_WRITE_REG(hw, 3156 IXGBE_VLVFB((vlvf_index*2)+1), 3157 bits); 3158 } 3159 } else { 3160 /* clear the pool bit */ 3161 if (vind < 32) { 3162 bits = IXGBE_READ_REG(hw, 3163 IXGBE_VLVFB(vlvf_index*2)); 3164 bits &= ~(1 << vind); 3165 IXGBE_WRITE_REG(hw, 3166 IXGBE_VLVFB(vlvf_index*2), 3167 bits); 3168 bits |= IXGBE_READ_REG(hw, 3169 IXGBE_VLVFB((vlvf_index*2)+1)); 3170 } else { 3171 bits = IXGBE_READ_REG(hw, 3172 IXGBE_VLVFB((vlvf_index*2)+1)); 3173 bits &= ~(1 << (vind-32)); 3174 IXGBE_WRITE_REG(hw, 3175 IXGBE_VLVFB((vlvf_index*2)+1), 3176 bits); 3177 bits |= IXGBE_READ_REG(hw, 3178 IXGBE_VLVFB(vlvf_index*2)); 3179 } 3180 } 3181 3182 /* 3183 * If there are still bits set in the VLVFB registers 3184 * for the VLAN ID indicated we need to see if the 3185 * caller is requesting that we clear the VFTA entry bit. 3186 * If the caller has requested that we clear the VFTA 3187 * entry bit but there are still pools/VFs using this VLAN 3188 * ID entry then ignore the request. We're not worried 3189 * about the case where we're turning the VFTA VLAN ID 3190 * entry bit on, only when requested to turn it off as 3191 * there may be multiple pools and/or VFs using the 3192 * VLAN ID entry. In that case we cannot clear the 3193 * VFTA bit until all pools/VFs using that VLAN ID have also 3194 * been cleared. This will be indicated by "bits" being 3195 * zero. 3196 */ 3197 if (bits) { 3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 3199 (IXGBE_VLVF_VIEN | vlan)); 3200 if (!vlan_on) { 3201 /* someone wants to clear the vfta entry 3202 * but some pools/VFs are still using it. 3203 * Ignore it. */ 3204 vfta_changed = FALSE; 3205 } 3206 } 3207 else 3208 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3209 } 3210 3211 if (vfta_changed) 3212 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3213 3214 return IXGBE_SUCCESS; 3215 } 3216 3217 /** 3218 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3219 * @hw: pointer to hardware structure 3220 * 3221 * Clears the VLAN filer table, and the VMDq index associated with the filter 3222 **/ 3223 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3224 { 3225 u32 offset; 3226 3227 DEBUGFUNC("ixgbe_clear_vfta_generic"); 3228 3229 for (offset = 0; offset < hw->mac.vft_size; offset++) 3230 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3231 3232 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3233 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3234 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 3235 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 3236 } 3237 3238 return IXGBE_SUCCESS; 3239 } 3240 3241 /** 3242 * ixgbe_check_mac_link_generic - Determine link and speed status 3243 * @hw: pointer to hardware structure 3244 * @speed: pointer to link speed 3245 * @link_up: TRUE when link is up 3246 * @link_up_wait_to_complete: bool used to wait for link up or not 3247 * 3248 * Reads the links register to determine if link is up and the current speed 3249 **/ 3250 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3251 bool *link_up, bool link_up_wait_to_complete) 3252 { 3253 u32 links_reg, links_orig; 3254 u32 i; 3255 3256 DEBUGFUNC("ixgbe_check_mac_link_generic"); 3257 3258 /* clear the old state */ 3259 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3260 3261 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3262 3263 if (links_orig != links_reg) { 3264 DEBUGOUT2("LINKS changed from %08X to %08X\n", 3265 links_orig, links_reg); 3266 } 3267 3268 if (link_up_wait_to_complete) { 3269 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3270 if (links_reg & IXGBE_LINKS_UP) { 3271 *link_up = TRUE; 3272 break; 3273 } else { 3274 *link_up = FALSE; 3275 } 3276 msec_delay(100); 3277 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3278 } 3279 } else { 3280 if (links_reg & IXGBE_LINKS_UP) 3281 *link_up = TRUE; 3282 else 3283 *link_up = FALSE; 3284 } 3285 3286 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3287 IXGBE_LINKS_SPEED_10G_82599) 3288 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3289 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3290 IXGBE_LINKS_SPEED_1G_82599) 3291 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3292 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3293 IXGBE_LINKS_SPEED_100_82599) 3294 *speed = IXGBE_LINK_SPEED_100_FULL; 3295 else 3296 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3297 3298 /* if link is down, zero out the current_mode */ 3299 if (*link_up == FALSE) { 3300 hw->fc.current_mode = ixgbe_fc_none; 3301 hw->fc.fc_was_autonegged = FALSE; 3302 } 3303 3304 return IXGBE_SUCCESS; 3305 } 3306 3307 /** 3308 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3309 * the EEPROM 3310 * @hw: pointer to hardware structure 3311 * @wwnn_prefix: the alternative WWNN prefix 3312 * @wwpn_prefix: the alternative WWPN prefix 3313 * 3314 * This function will read the EEPROM from the alternative SAN MAC address 3315 * block to check the support for the alternative WWNN/WWPN prefix support. 3316 **/ 3317 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3318 u16 *wwpn_prefix) 3319 { 3320 u16 offset, caps; 3321 u16 alt_san_mac_blk_offset; 3322 3323 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 3324 3325 /* clear output first */ 3326 *wwnn_prefix = 0xFFFF; 3327 *wwpn_prefix = 0xFFFF; 3328 3329 /* check if alternative SAN MAC is supported */ 3330 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3331 &alt_san_mac_blk_offset); 3332 3333 if ((alt_san_mac_blk_offset == 0) || 3334 (alt_san_mac_blk_offset == 0xFFFF)) 3335 goto wwn_prefix_out; 3336 3337 /* check capability in alternative san mac address block */ 3338 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3339 hw->eeprom.ops.read(hw, offset, &caps); 3340 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3341 goto wwn_prefix_out; 3342 3343 /* get the corresponding prefix for WWNN/WWPN */ 3344 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3345 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3346 3347 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3348 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3349 3350 wwn_prefix_out: 3351 return IXGBE_SUCCESS; 3352 } 3353 3354 /** 3355 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 3356 * @hw: pointer to hardware structure 3357 * @bs: the fcoe boot status 3358 * 3359 * This function will read the FCOE boot status from the iSCSI FCOE block 3360 **/ 3361 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 3362 { 3363 u16 offset, caps, flags; 3364 s32 status; 3365 3366 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 3367 3368 /* clear output first */ 3369 *bs = ixgbe_fcoe_bootstatus_unavailable; 3370 3371 /* check if FCOE IBA block is present */ 3372 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 3373 status = hw->eeprom.ops.read(hw, offset, &caps); 3374 if (status != IXGBE_SUCCESS) 3375 goto out; 3376 3377 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 3378 goto out; 3379 3380 /* check if iSCSI FCOE block is populated */ 3381 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 3382 if (status != IXGBE_SUCCESS) 3383 goto out; 3384 3385 if ((offset == 0) || (offset == 0xFFFF)) 3386 goto out; 3387 3388 /* read fcoe flags in iSCSI FCOE block */ 3389 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 3390 status = hw->eeprom.ops.read(hw, offset, &flags); 3391 if (status != IXGBE_SUCCESS) 3392 goto out; 3393 3394 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 3395 *bs = ixgbe_fcoe_bootstatus_enabled; 3396 else 3397 *bs = ixgbe_fcoe_bootstatus_disabled; 3398 3399 out: 3400 return status; 3401 } 3402 3403 /** 3404 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 3405 * control 3406 * @hw: pointer to hardware structure 3407 * 3408 * There are several phys that do not support autoneg flow control. This 3409 * function check the device id to see if the associated phy supports 3410 * autoneg flow control. 3411 **/ 3412 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 3413 { 3414 3415 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 3416 3417 switch (hw->device_id) { 3418 case IXGBE_DEV_ID_82599_T3_LOM: 3419 return IXGBE_SUCCESS; 3420 default: 3421 return IXGBE_ERR_FC_NOT_SUPPORTED; 3422 } 3423 } 3424 3425 /** 3426 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3427 * @hw: pointer to hardware structure 3428 * @enable: enable or disable switch for anti-spoofing 3429 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 3430 * 3431 **/ 3432 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 3433 { 3434 int j; 3435 int pf_target_reg = pf >> 3; 3436 int pf_target_shift = pf % 8; 3437 u32 pfvfspoof = 0; 3438 3439 if (hw->mac.type == ixgbe_mac_82598EB) 3440 return; 3441 3442 if (enable) 3443 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 3444 3445 /* 3446 * PFVFSPOOF register array is size 8 with 8 bits assigned to 3447 * MAC anti-spoof enables in each register array element. 3448 */ 3449 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 3450 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3451 3452 /* If not enabling anti-spoofing then done */ 3453 if (!enable) 3454 return; 3455 3456 /* 3457 * The PF should be allowed to spoof so that it can support 3458 * emulation mode NICs. Reset the bit assigned to the PF 3459 */ 3460 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); 3461 pfvfspoof ^= (1 << pf_target_shift); 3462 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); 3463 } 3464 3465 /** 3466 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3467 * @hw: pointer to hardware structure 3468 * @enable: enable or disable switch for VLAN anti-spoofing 3469 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3470 * 3471 **/ 3472 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3473 { 3474 int vf_target_reg = vf >> 3; 3475 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3476 u32 pfvfspoof; 3477 3478 if (hw->mac.type == ixgbe_mac_82598EB) 3479 return; 3480 3481 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3482 if (enable) 3483 pfvfspoof |= (1 << vf_target_shift); 3484 else 3485 pfvfspoof &= ~(1 << vf_target_shift); 3486 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3487 } 3488 3489 /** 3490 * ixgbe_get_device_caps_generic - Get additional device capabilities 3491 * @hw: pointer to hardware structure 3492 * @device_caps: the EEPROM word with the extra device capabilities 3493 * 3494 * This function will read the EEPROM location for the device capabilities, 3495 * and return the word through device_caps. 3496 **/ 3497 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3498 { 3499 DEBUGFUNC("ixgbe_get_device_caps_generic"); 3500 3501 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3502 3503 return IXGBE_SUCCESS; 3504 } 3505 3506 /** 3507 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 3508 * @hw: pointer to hardware structure 3509 * 3510 **/ 3511 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 3512 { 3513 u32 regval; 3514 u32 i; 3515 3516 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 3517 3518 /* Enable relaxed ordering */ 3519 for (i = 0; i < hw->mac.max_tx_queues; i++) { 3520 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 3521 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 3522 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 3523 } 3524 3525 for (i = 0; i < hw->mac.max_rx_queues; i++) { 3526 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 3527 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | 3528 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 3529 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 3530 } 3531 3532 } 3533