1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2024 Intel Corporation. */ 3 4 #include <linux/pci.h> 5 #include <linux/delay.h> 6 #include <linux/sched.h> 7 #include <linux/netdevice.h> 8 9 #include "ixgbe.h" 10 #include "ixgbe_common.h" 11 #include "ixgbe_phy.h" 12 13 static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 14 static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 15 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 16 static int ixgbe_ready_eeprom(struct ixgbe_hw *hw); 17 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 18 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 19 u16 count); 20 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 21 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 22 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 23 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 24 25 static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 26 static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 27 static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 28 u16 words, u16 *data); 29 static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 30 u16 words, u16 *data); 31 static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 32 u16 offset); 33 static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); 34 35 /* Base table for registers values that change by MAC */ 36 const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { 37 IXGBE_MVALS_INIT(8259X) 38 }; 39 40 /** 41 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 42 * control 43 * @hw: pointer to hardware structure 44 * 45 * There are several phys that do not support autoneg flow control. This 46 * function check the device id to see if the associated phy supports 47 * autoneg flow control. 48 **/ 49 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 50 { 51 bool supported = false; 52 ixgbe_link_speed speed; 53 bool link_up; 54 55 switch (hw->phy.media_type) { 56 case ixgbe_media_type_fiber: 57 /* flow control autoneg black list */ 58 switch (hw->device_id) { 59 case IXGBE_DEV_ID_X550EM_A_SFP: 60 case IXGBE_DEV_ID_X550EM_A_SFP_N: 61 case IXGBE_DEV_ID_E610_SFP: 62 supported = false; 63 break; 64 default: 65 hw->mac.ops.check_link(hw, &speed, &link_up, false); 66 /* if link is down, assume supported */ 67 if (link_up) 68 supported = speed == IXGBE_LINK_SPEED_1GB_FULL; 69 else 70 supported = true; 71 } 72 73 break; 74 case ixgbe_media_type_backplane: 75 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 76 supported = false; 77 else 78 supported = true; 79 break; 80 case ixgbe_media_type_copper: 81 /* only some copper devices support flow control autoneg */ 82 switch (hw->device_id) { 83 case IXGBE_DEV_ID_82599_T3_LOM: 84 case IXGBE_DEV_ID_X540T: 85 case IXGBE_DEV_ID_X540T1: 86 case IXGBE_DEV_ID_X550T: 87 case IXGBE_DEV_ID_X550T1: 88 case IXGBE_DEV_ID_X550EM_X_10G_T: 89 case IXGBE_DEV_ID_X550EM_A_10G_T: 90 case IXGBE_DEV_ID_X550EM_A_1G_T: 91 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 92 case IXGBE_DEV_ID_E610_10G_T: 93 case IXGBE_DEV_ID_E610_2_5G_T: 94 supported = true; 95 break; 96 default: 97 break; 98 } 99 break; 100 default: 101 break; 102 } 103 104 if (!supported) 105 hw_dbg(hw, "Device %x does not support flow control autoneg\n", 106 hw->device_id); 107 108 return supported; 109 } 110 111 /** 112 * ixgbe_setup_fc_generic - Set up flow control 113 * @hw: pointer to hardware structure 114 * 115 * Called at init time to set up flow control. 116 **/ 117 int ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 118 { 119 u32 reg = 0, reg_bp = 0; 120 bool locked = false; 121 int ret_val = 0; 122 u16 reg_cu = 0; 123 124 /* 125 * Validate the requested mode. Strict IEEE mode does not allow 126 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 127 */ 128 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 129 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 130 return -EINVAL; 131 } 132 133 /* 134 * 10gig parts do not have a word in the EEPROM to determine the 135 * default flow control setting, so we explicitly set it to full. 136 */ 137 if (hw->fc.requested_mode == ixgbe_fc_default) 138 hw->fc.requested_mode = ixgbe_fc_full; 139 140 /* 141 * Set up the 1G and 10G flow control advertisement registers so the 142 * HW will be able to do fc autoneg once the cable is plugged in. If 143 * we link at 10G, the 1G advertisement is harmless and vice versa. 144 */ 145 switch (hw->phy.media_type) { 146 case ixgbe_media_type_backplane: 147 /* some MAC's need RMW protection on AUTOC */ 148 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 149 if (ret_val) 150 return ret_val; 151 152 fallthrough; /* only backplane uses autoc */ 153 case ixgbe_media_type_fiber: 154 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 155 156 break; 157 case ixgbe_media_type_copper: 158 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 159 MDIO_MMD_AN, ®_cu); 160 break; 161 default: 162 break; 163 } 164 165 /* 166 * The possible values of fc.requested_mode are: 167 * 0: Flow control is completely disabled 168 * 1: Rx flow control is enabled (we can receive pause frames, 169 * but not send pause frames). 170 * 2: Tx flow control is enabled (we can send pause frames but 171 * we do not support receiving pause frames). 172 * 3: Both Rx and Tx flow control (symmetric) are enabled. 173 * other: Invalid. 174 */ 175 switch (hw->fc.requested_mode) { 176 case ixgbe_fc_none: 177 /* Flow control completely disabled by software override. */ 178 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 179 if (hw->phy.media_type == ixgbe_media_type_backplane) 180 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 181 IXGBE_AUTOC_ASM_PAUSE); 182 else if (hw->phy.media_type == ixgbe_media_type_copper) 183 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 184 break; 185 case ixgbe_fc_tx_pause: 186 /* 187 * Tx Flow control is enabled, and Rx Flow control is 188 * disabled by software override. 189 */ 190 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 191 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 192 if (hw->phy.media_type == ixgbe_media_type_backplane) { 193 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 194 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 195 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 196 reg_cu |= IXGBE_TAF_ASM_PAUSE; 197 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 198 } 199 break; 200 case ixgbe_fc_rx_pause: 201 /* 202 * Rx Flow control is enabled and Tx Flow control is 203 * disabled by software override. Since there really 204 * isn't a way to advertise that we are capable of RX 205 * Pause ONLY, we will advertise that we support both 206 * symmetric and asymmetric Rx PAUSE, as such we fall 207 * through to the fc_full statement. Later, we will 208 * disable the adapter's ability to send PAUSE frames. 209 */ 210 case ixgbe_fc_full: 211 /* Flow control (both Rx and Tx) is enabled by SW override. */ 212 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 213 if (hw->phy.media_type == ixgbe_media_type_backplane) 214 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 215 IXGBE_AUTOC_ASM_PAUSE; 216 else if (hw->phy.media_type == ixgbe_media_type_copper) 217 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 218 break; 219 default: 220 hw_dbg(hw, "Flow control param set incorrectly\n"); 221 return -EIO; 222 } 223 224 if (hw->mac.type != ixgbe_mac_X540) { 225 /* 226 * Enable auto-negotiation between the MAC & PHY; 227 * the MAC will advertise clause 37 flow control. 228 */ 229 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 230 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 231 232 /* Disable AN timeout */ 233 if (hw->fc.strict_ieee) 234 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 235 236 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 237 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 238 } 239 240 /* 241 * AUTOC restart handles negotiation of 1G and 10G on backplane 242 * and copper. There is no need to set the PCS1GCTL register. 243 * 244 */ 245 if (hw->phy.media_type == ixgbe_media_type_backplane) { 246 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 247 * LESM is on, likewise reset_pipeline requries the lock as 248 * it also writes AUTOC. 249 */ 250 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 251 if (ret_val) 252 return ret_val; 253 254 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 255 ixgbe_device_supports_autoneg_fc(hw)) { 256 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 257 MDIO_MMD_AN, reg_cu); 258 } 259 260 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 261 return ret_val; 262 } 263 264 /** 265 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 266 * @hw: pointer to hardware structure 267 * 268 * Starts the hardware by filling the bus info structure and media type, clears 269 * all on chip counters, initializes receive address registers, multicast 270 * table, VLAN filter table, calls routine to set up link and flow control 271 * settings, and leaves transmit and receive units disabled and uninitialized 272 **/ 273 int ixgbe_start_hw_generic(struct ixgbe_hw *hw) 274 { 275 u16 device_caps; 276 u32 ctrl_ext; 277 int ret_val; 278 279 /* Set the media type */ 280 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 281 282 /* Identify the PHY */ 283 hw->phy.ops.identify(hw); 284 285 /* Clear the VLAN filter table */ 286 hw->mac.ops.clear_vfta(hw); 287 288 /* Clear statistics registers */ 289 hw->mac.ops.clear_hw_cntrs(hw); 290 291 /* Set No Snoop Disable */ 292 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 293 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 294 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 295 IXGBE_WRITE_FLUSH(hw); 296 297 /* Setup flow control if method for doing so */ 298 if (hw->mac.ops.setup_fc) { 299 ret_val = hw->mac.ops.setup_fc(hw); 300 if (ret_val) 301 return ret_val; 302 } 303 304 /* Cashe bit indicating need for crosstalk fix */ 305 switch (hw->mac.type) { 306 case ixgbe_mac_82599EB: 307 case ixgbe_mac_X550EM_x: 308 case ixgbe_mac_x550em_a: 309 hw->mac.ops.get_device_caps(hw, &device_caps); 310 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 311 hw->need_crosstalk_fix = false; 312 else 313 hw->need_crosstalk_fix = true; 314 break; 315 default: 316 hw->need_crosstalk_fix = false; 317 break; 318 } 319 320 /* Clear adapter stopped flag */ 321 hw->adapter_stopped = false; 322 323 return 0; 324 } 325 326 /** 327 * ixgbe_start_hw_gen2 - Init sequence for common device family 328 * @hw: pointer to hw structure 329 * 330 * Performs the init sequence common to the second generation 331 * of 10 GbE devices. 332 * Devices in the second generation: 333 * 82599 334 * X540 335 **/ 336 int ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 337 { 338 u32 i; 339 340 /* Clear the rate limiters */ 341 for (i = 0; i < hw->mac.max_tx_queues; i++) { 342 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 343 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 344 } 345 IXGBE_WRITE_FLUSH(hw); 346 347 return 0; 348 } 349 350 /** 351 * ixgbe_init_hw_generic - Generic hardware initialization 352 * @hw: pointer to hardware structure 353 * 354 * Initialize the hardware by resetting the hardware, filling the bus info 355 * structure and media type, clears all on chip counters, initializes receive 356 * address registers, multicast table, VLAN filter table, calls routine to set 357 * up link and flow control settings, and leaves transmit and receive units 358 * disabled and uninitialized 359 **/ 360 int ixgbe_init_hw_generic(struct ixgbe_hw *hw) 361 { 362 int status; 363 364 /* Reset the hardware */ 365 status = hw->mac.ops.reset_hw(hw); 366 367 if (status == 0) { 368 /* Start the HW */ 369 status = hw->mac.ops.start_hw(hw); 370 } 371 372 /* Initialize the LED link active for LED blink support */ 373 if (hw->mac.ops.init_led_link_act) 374 hw->mac.ops.init_led_link_act(hw); 375 376 return status; 377 } 378 379 /** 380 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 381 * @hw: pointer to hardware structure 382 * 383 * Clears all hardware statistics counters by reading them from the hardware 384 * Statistics counters are clear on read. 385 **/ 386 int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 387 { 388 u16 i = 0; 389 390 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 391 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 392 IXGBE_READ_REG(hw, IXGBE_ERRBC); 393 IXGBE_READ_REG(hw, IXGBE_MSPDC); 394 for (i = 0; i < 8; i++) 395 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 396 397 IXGBE_READ_REG(hw, IXGBE_MLFC); 398 IXGBE_READ_REG(hw, IXGBE_MRFC); 399 IXGBE_READ_REG(hw, IXGBE_RLEC); 400 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 401 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 402 if (hw->mac.type >= ixgbe_mac_82599EB) { 403 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 404 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 405 } else { 406 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 407 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 408 } 409 410 for (i = 0; i < 8; i++) { 411 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 412 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 413 if (hw->mac.type >= ixgbe_mac_82599EB) { 414 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 415 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 416 } else { 417 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 418 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 419 } 420 } 421 if (hw->mac.type >= ixgbe_mac_82599EB) 422 for (i = 0; i < 8; i++) 423 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 424 IXGBE_READ_REG(hw, IXGBE_PRC64); 425 IXGBE_READ_REG(hw, IXGBE_PRC127); 426 IXGBE_READ_REG(hw, IXGBE_PRC255); 427 IXGBE_READ_REG(hw, IXGBE_PRC511); 428 IXGBE_READ_REG(hw, IXGBE_PRC1023); 429 IXGBE_READ_REG(hw, IXGBE_PRC1522); 430 IXGBE_READ_REG(hw, IXGBE_GPRC); 431 IXGBE_READ_REG(hw, IXGBE_BPRC); 432 IXGBE_READ_REG(hw, IXGBE_MPRC); 433 IXGBE_READ_REG(hw, IXGBE_GPTC); 434 IXGBE_READ_REG(hw, IXGBE_GORCL); 435 IXGBE_READ_REG(hw, IXGBE_GORCH); 436 IXGBE_READ_REG(hw, IXGBE_GOTCL); 437 IXGBE_READ_REG(hw, IXGBE_GOTCH); 438 if (hw->mac.type == ixgbe_mac_82598EB) 439 for (i = 0; i < 8; i++) 440 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 441 IXGBE_READ_REG(hw, IXGBE_RUC); 442 IXGBE_READ_REG(hw, IXGBE_RFC); 443 IXGBE_READ_REG(hw, IXGBE_ROC); 444 IXGBE_READ_REG(hw, IXGBE_RJC); 445 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 446 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 447 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 448 IXGBE_READ_REG(hw, IXGBE_TORL); 449 IXGBE_READ_REG(hw, IXGBE_TORH); 450 IXGBE_READ_REG(hw, IXGBE_TPR); 451 IXGBE_READ_REG(hw, IXGBE_TPT); 452 IXGBE_READ_REG(hw, IXGBE_PTC64); 453 IXGBE_READ_REG(hw, IXGBE_PTC127); 454 IXGBE_READ_REG(hw, IXGBE_PTC255); 455 IXGBE_READ_REG(hw, IXGBE_PTC511); 456 IXGBE_READ_REG(hw, IXGBE_PTC1023); 457 IXGBE_READ_REG(hw, IXGBE_PTC1522); 458 IXGBE_READ_REG(hw, IXGBE_MPTC); 459 IXGBE_READ_REG(hw, IXGBE_BPTC); 460 for (i = 0; i < 16; i++) { 461 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 462 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 463 if (hw->mac.type >= ixgbe_mac_82599EB) { 464 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 465 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 466 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 467 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 468 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 469 } else { 470 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 471 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 472 } 473 } 474 475 if (hw->mac.type == ixgbe_mac_X550 || 476 hw->mac.type == ixgbe_mac_X540 || 477 hw->mac.type == ixgbe_mac_e610) { 478 if (hw->phy.id == 0) 479 hw->phy.ops.identify(hw); 480 } 481 482 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 483 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 484 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 485 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 486 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 487 } 488 489 return 0; 490 } 491 492 /** 493 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 494 * @hw: pointer to hardware structure 495 * @pba_num: stores the part number string from the EEPROM 496 * @pba_num_size: part number string buffer length 497 * 498 * Reads the part number string from the EEPROM. 499 **/ 500 int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 501 u32 pba_num_size) 502 { 503 int ret_val; 504 u16 pba_ptr; 505 u16 offset; 506 u16 length; 507 u16 data; 508 509 if (pba_num == NULL) { 510 hw_dbg(hw, "PBA string buffer was null\n"); 511 return -EINVAL; 512 } 513 514 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 515 if (ret_val) { 516 hw_dbg(hw, "NVM Read Error\n"); 517 return ret_val; 518 } 519 520 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 521 if (ret_val) { 522 hw_dbg(hw, "NVM Read Error\n"); 523 return ret_val; 524 } 525 526 /* 527 * if data is not ptr guard the PBA must be in legacy format which 528 * means pba_ptr is actually our second data word for the PBA number 529 * and we can decode it into an ascii string 530 */ 531 if (data != IXGBE_PBANUM_PTR_GUARD) { 532 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 533 534 /* we will need 11 characters to store the PBA */ 535 if (pba_num_size < 11) { 536 hw_dbg(hw, "PBA string buffer too small\n"); 537 return -ENOSPC; 538 } 539 540 /* extract hex string from data and pba_ptr */ 541 pba_num[0] = (data >> 12) & 0xF; 542 pba_num[1] = (data >> 8) & 0xF; 543 pba_num[2] = (data >> 4) & 0xF; 544 pba_num[3] = data & 0xF; 545 pba_num[4] = (pba_ptr >> 12) & 0xF; 546 pba_num[5] = (pba_ptr >> 8) & 0xF; 547 pba_num[6] = '-'; 548 pba_num[7] = 0; 549 pba_num[8] = (pba_ptr >> 4) & 0xF; 550 pba_num[9] = pba_ptr & 0xF; 551 552 /* put a null character on the end of our string */ 553 pba_num[10] = '\0'; 554 555 /* switch all the data but the '-' to hex char */ 556 for (offset = 0; offset < 10; offset++) { 557 if (pba_num[offset] < 0xA) 558 pba_num[offset] += '0'; 559 else if (pba_num[offset] < 0x10) 560 pba_num[offset] += 'A' - 0xA; 561 } 562 563 return 0; 564 } 565 566 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 567 if (ret_val) { 568 hw_dbg(hw, "NVM Read Error\n"); 569 return ret_val; 570 } 571 572 if (length == 0xFFFF || length == 0) { 573 hw_dbg(hw, "NVM PBA number section invalid length\n"); 574 return -EIO; 575 } 576 577 /* check if pba_num buffer is big enough */ 578 if (pba_num_size < (((u32)length * 2) - 1)) { 579 hw_dbg(hw, "PBA string buffer too small\n"); 580 return -ENOSPC; 581 } 582 583 /* trim pba length from start of string */ 584 pba_ptr++; 585 length--; 586 587 for (offset = 0; offset < length; offset++) { 588 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 589 if (ret_val) { 590 hw_dbg(hw, "NVM Read Error\n"); 591 return ret_val; 592 } 593 pba_num[offset * 2] = (u8)(data >> 8); 594 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 595 } 596 pba_num[offset * 2] = '\0'; 597 598 return 0; 599 } 600 601 /** 602 * ixgbe_get_mac_addr_generic - Generic get MAC address 603 * @hw: pointer to hardware structure 604 * @mac_addr: Adapter MAC address 605 * 606 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 607 * A reset of the adapter must be performed prior to calling this function 608 * in order for the MAC address to have been loaded from the EEPROM into RAR0 609 **/ 610 int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 611 { 612 u32 rar_high; 613 u32 rar_low; 614 u16 i; 615 616 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 617 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 618 619 for (i = 0; i < 4; i++) 620 mac_addr[i] = (u8)(rar_low >> (i*8)); 621 622 for (i = 0; i < 2; i++) 623 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 624 625 return 0; 626 } 627 628 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 629 { 630 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 631 case IXGBE_PCI_LINK_WIDTH_1: 632 return ixgbe_bus_width_pcie_x1; 633 case IXGBE_PCI_LINK_WIDTH_2: 634 return ixgbe_bus_width_pcie_x2; 635 case IXGBE_PCI_LINK_WIDTH_4: 636 return ixgbe_bus_width_pcie_x4; 637 case IXGBE_PCI_LINK_WIDTH_8: 638 return ixgbe_bus_width_pcie_x8; 639 default: 640 return ixgbe_bus_width_unknown; 641 } 642 } 643 644 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 645 { 646 switch (link_status & IXGBE_PCI_LINK_SPEED) { 647 case IXGBE_PCI_LINK_SPEED_2500: 648 return ixgbe_bus_speed_2500; 649 case IXGBE_PCI_LINK_SPEED_5000: 650 return ixgbe_bus_speed_5000; 651 case IXGBE_PCI_LINK_SPEED_8000: 652 return ixgbe_bus_speed_8000; 653 default: 654 return ixgbe_bus_speed_unknown; 655 } 656 } 657 658 /** 659 * ixgbe_get_bus_info_generic - Generic set PCI bus info 660 * @hw: pointer to hardware structure 661 * 662 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 663 **/ 664 int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 665 { 666 u16 link_status; 667 668 hw->bus.type = ixgbe_bus_type_pci_express; 669 670 /* Get the negotiated link width and speed from PCI config space */ 671 if (hw->mac.type == ixgbe_mac_e610) 672 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610); 673 else 674 link_status = ixgbe_read_pci_cfg_word(hw, 675 IXGBE_PCI_LINK_STATUS); 676 677 hw->bus.width = ixgbe_convert_bus_width(link_status); 678 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 679 680 hw->mac.ops.set_lan_id(hw); 681 682 return 0; 683 } 684 685 /** 686 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 687 * @hw: pointer to the HW structure 688 * 689 * Determines the LAN function id by reading memory-mapped registers 690 * and swaps the port value if requested. 691 **/ 692 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 693 { 694 struct ixgbe_bus_info *bus = &hw->bus; 695 u16 ee_ctrl_4; 696 u32 reg; 697 698 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 699 bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg); 700 bus->lan_id = bus->func; 701 702 /* check for a port swap */ 703 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 704 if (reg & IXGBE_FACTPS_LFS) 705 bus->func ^= 0x1; 706 707 /* Get MAC instance from EEPROM for configuring CS4227 */ 708 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 709 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 710 bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID, 711 ee_ctrl_4); 712 } 713 } 714 715 /** 716 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 717 * @hw: pointer to hardware structure 718 * 719 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 720 * disables transmit and receive units. The adapter_stopped flag is used by 721 * the shared code and drivers to determine if the adapter is in a stopped 722 * state and should not touch the hardware. 723 **/ 724 int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 725 { 726 u32 reg_val; 727 u16 i; 728 729 /* 730 * Set the adapter_stopped flag so other driver functions stop touching 731 * the hardware 732 */ 733 hw->adapter_stopped = true; 734 735 /* Disable the receive unit */ 736 hw->mac.ops.disable_rx(hw); 737 738 /* Clear interrupt mask to stop interrupts from being generated */ 739 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 740 741 /* Clear any pending interrupts, flush previous writes */ 742 IXGBE_READ_REG(hw, IXGBE_EICR); 743 744 /* Disable the transmit unit. Each queue must be disabled. */ 745 for (i = 0; i < hw->mac.max_tx_queues; i++) 746 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 747 748 /* Disable the receive unit by stopping each queue */ 749 for (i = 0; i < hw->mac.max_rx_queues; i++) { 750 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 751 reg_val &= ~IXGBE_RXDCTL_ENABLE; 752 reg_val |= IXGBE_RXDCTL_SWFLSH; 753 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 754 } 755 756 /* flush all queues disables */ 757 IXGBE_WRITE_FLUSH(hw); 758 usleep_range(1000, 2000); 759 760 /* 761 * Prevent the PCI-E bus from hanging by disabling PCI-E primary 762 * access and verify no pending requests 763 */ 764 return ixgbe_disable_pcie_primary(hw); 765 } 766 767 /** 768 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 769 * @hw: pointer to hardware structure 770 * 771 * Store the index for the link active LED. This will be used to support 772 * blinking the LED. 773 **/ 774 int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 775 { 776 struct ixgbe_mac_info *mac = &hw->mac; 777 u32 led_reg, led_mode; 778 u16 i; 779 780 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 781 782 /* Get LED link active from the LEDCTL register */ 783 for (i = 0; i < 4; i++) { 784 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 785 786 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 787 IXGBE_LED_LINK_ACTIVE) { 788 mac->led_link_act = i; 789 return 0; 790 } 791 } 792 793 /* If LEDCTL register does not have the LED link active set, then use 794 * known MAC defaults. 795 */ 796 switch (hw->mac.type) { 797 case ixgbe_mac_x550em_a: 798 mac->led_link_act = 0; 799 break; 800 case ixgbe_mac_X550EM_x: 801 mac->led_link_act = 1; 802 break; 803 default: 804 mac->led_link_act = 2; 805 } 806 807 return 0; 808 } 809 810 /** 811 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 812 * @hw: pointer to hardware structure 813 * @index: led number to turn on 814 **/ 815 int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 816 { 817 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 818 819 if (index > 3) 820 return -EINVAL; 821 822 /* To turn on the LED, set mode to ON. */ 823 led_reg &= ~IXGBE_LED_MODE_MASK(index); 824 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 825 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 826 IXGBE_WRITE_FLUSH(hw); 827 828 return 0; 829 } 830 831 /** 832 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 833 * @hw: pointer to hardware structure 834 * @index: led number to turn off 835 **/ 836 int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 837 { 838 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 839 840 if (index > 3) 841 return -EINVAL; 842 843 /* To turn off the LED, set mode to OFF. */ 844 led_reg &= ~IXGBE_LED_MODE_MASK(index); 845 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 846 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 847 IXGBE_WRITE_FLUSH(hw); 848 849 return 0; 850 } 851 852 /** 853 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 854 * @hw: pointer to hardware structure 855 * 856 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 857 * ixgbe_hw struct in order to set up EEPROM access. 858 **/ 859 int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 860 { 861 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 862 u32 eec; 863 u16 eeprom_size; 864 865 if (eeprom->type == ixgbe_eeprom_uninitialized) { 866 eeprom->type = ixgbe_eeprom_none; 867 /* Set default semaphore delay to 10ms which is a well 868 * tested value */ 869 eeprom->semaphore_delay = 10; 870 /* Clear EEPROM page size, it will be initialized as needed */ 871 eeprom->word_page_size = 0; 872 873 /* 874 * Check for EEPROM present first. 875 * If not present leave as none 876 */ 877 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 878 if (eec & IXGBE_EEC_PRES) { 879 eeprom->type = ixgbe_eeprom_spi; 880 881 /* 882 * SPI EEPROM is assumed here. This code would need to 883 * change if a future EEPROM is not SPI. 884 */ 885 eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec); 886 eeprom->word_size = BIT(eeprom_size + 887 IXGBE_EEPROM_WORD_SIZE_SHIFT); 888 } 889 890 if (eec & IXGBE_EEC_ADDR_SIZE) 891 eeprom->address_bits = 16; 892 else 893 eeprom->address_bits = 8; 894 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 895 eeprom->type, eeprom->word_size, eeprom->address_bits); 896 } 897 898 return 0; 899 } 900 901 /** 902 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 903 * @hw: pointer to hardware structure 904 * @offset: offset within the EEPROM to write 905 * @words: number of words 906 * @data: 16 bit word(s) to write to EEPROM 907 * 908 * Reads 16 bit word(s) from EEPROM through bit-bang method 909 **/ 910 int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 911 u16 words, u16 *data) 912 { 913 u16 i, count; 914 int status; 915 916 hw->eeprom.ops.init_params(hw); 917 918 if (words == 0 || (offset + words > hw->eeprom.word_size)) 919 return -EINVAL; 920 921 /* 922 * The EEPROM page size cannot be queried from the chip. We do lazy 923 * initialization. It is worth to do that when we write large buffer. 924 */ 925 if ((hw->eeprom.word_page_size == 0) && 926 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 927 ixgbe_detect_eeprom_page_size_generic(hw, offset); 928 929 /* 930 * We cannot hold synchronization semaphores for too long 931 * to avoid other entity starvation. However it is more efficient 932 * to read in bursts than synchronizing access for each word. 933 */ 934 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 935 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 936 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 937 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 938 count, &data[i]); 939 940 if (status != 0) 941 break; 942 } 943 944 return status; 945 } 946 947 /** 948 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 949 * @hw: pointer to hardware structure 950 * @offset: offset within the EEPROM to be written to 951 * @words: number of word(s) 952 * @data: 16 bit word(s) to be written to the EEPROM 953 * 954 * If ixgbe_eeprom_update_checksum is not called after this function, the 955 * EEPROM will most likely contain an invalid checksum. 956 **/ 957 static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 958 u16 words, u16 *data) 959 { 960 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 961 u16 page_size; 962 int status; 963 u16 word; 964 u16 i; 965 966 /* Prepare the EEPROM for writing */ 967 status = ixgbe_acquire_eeprom(hw); 968 if (status) 969 return status; 970 971 if (ixgbe_ready_eeprom(hw) != 0) { 972 ixgbe_release_eeprom(hw); 973 return -EIO; 974 } 975 976 for (i = 0; i < words; i++) { 977 ixgbe_standby_eeprom(hw); 978 979 /* Send the WRITE ENABLE command (8 bit opcode) */ 980 ixgbe_shift_out_eeprom_bits(hw, 981 IXGBE_EEPROM_WREN_OPCODE_SPI, 982 IXGBE_EEPROM_OPCODE_BITS); 983 984 ixgbe_standby_eeprom(hw); 985 986 /* Some SPI eeproms use the 8th address bit embedded 987 * in the opcode 988 */ 989 if ((hw->eeprom.address_bits == 8) && 990 ((offset + i) >= 128)) 991 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 992 993 /* Send the Write command (8-bit opcode + addr) */ 994 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 995 IXGBE_EEPROM_OPCODE_BITS); 996 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 997 hw->eeprom.address_bits); 998 999 page_size = hw->eeprom.word_page_size; 1000 1001 /* Send the data in burst via SPI */ 1002 do { 1003 word = data[i]; 1004 word = (word >> 8) | (word << 8); 1005 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1006 1007 if (page_size == 0) 1008 break; 1009 1010 /* do not wrap around page */ 1011 if (((offset + i) & (page_size - 1)) == 1012 (page_size - 1)) 1013 break; 1014 } while (++i < words); 1015 1016 ixgbe_standby_eeprom(hw); 1017 usleep_range(10000, 20000); 1018 } 1019 /* Done with writing - release the EEPROM */ 1020 ixgbe_release_eeprom(hw); 1021 1022 return 0; 1023 } 1024 1025 /** 1026 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1027 * @hw: pointer to hardware structure 1028 * @offset: offset within the EEPROM to be written to 1029 * @data: 16 bit word to be written to the EEPROM 1030 * 1031 * If ixgbe_eeprom_update_checksum is not called after this function, the 1032 * EEPROM will most likely contain an invalid checksum. 1033 **/ 1034 int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1035 { 1036 hw->eeprom.ops.init_params(hw); 1037 1038 if (offset >= hw->eeprom.word_size) 1039 return -EINVAL; 1040 1041 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1042 } 1043 1044 /** 1045 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1046 * @hw: pointer to hardware structure 1047 * @offset: offset within the EEPROM to be read 1048 * @words: number of word(s) 1049 * @data: read 16 bit words(s) from EEPROM 1050 * 1051 * Reads 16 bit word(s) from EEPROM through bit-bang method 1052 **/ 1053 int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1054 u16 words, u16 *data) 1055 { 1056 u16 i, count; 1057 int status; 1058 1059 hw->eeprom.ops.init_params(hw); 1060 1061 if (words == 0 || (offset + words > hw->eeprom.word_size)) 1062 return -EINVAL; 1063 1064 /* 1065 * We cannot hold synchronization semaphores for too long 1066 * to avoid other entity starvation. However it is more efficient 1067 * to read in bursts than synchronizing access for each word. 1068 */ 1069 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1070 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1071 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1072 1073 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1074 count, &data[i]); 1075 1076 if (status) 1077 return status; 1078 } 1079 1080 return 0; 1081 } 1082 1083 /** 1084 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1085 * @hw: pointer to hardware structure 1086 * @offset: offset within the EEPROM to be read 1087 * @words: number of word(s) 1088 * @data: read 16 bit word(s) from EEPROM 1089 * 1090 * Reads 16 bit word(s) from EEPROM through bit-bang method 1091 **/ 1092 static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1093 u16 words, u16 *data) 1094 { 1095 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1096 u16 word_in; 1097 int status; 1098 u16 i; 1099 1100 /* Prepare the EEPROM for reading */ 1101 status = ixgbe_acquire_eeprom(hw); 1102 if (status) 1103 return status; 1104 1105 if (ixgbe_ready_eeprom(hw) != 0) { 1106 ixgbe_release_eeprom(hw); 1107 return -EIO; 1108 } 1109 1110 for (i = 0; i < words; i++) { 1111 ixgbe_standby_eeprom(hw); 1112 /* Some SPI eeproms use the 8th address bit embedded 1113 * in the opcode 1114 */ 1115 if ((hw->eeprom.address_bits == 8) && 1116 ((offset + i) >= 128)) 1117 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1118 1119 /* Send the READ command (opcode + addr) */ 1120 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1121 IXGBE_EEPROM_OPCODE_BITS); 1122 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1123 hw->eeprom.address_bits); 1124 1125 /* Read the data. */ 1126 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1127 data[i] = (word_in >> 8) | (word_in << 8); 1128 } 1129 1130 /* End this read operation */ 1131 ixgbe_release_eeprom(hw); 1132 1133 return 0; 1134 } 1135 1136 /** 1137 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1138 * @hw: pointer to hardware structure 1139 * @offset: offset within the EEPROM to be read 1140 * @data: read 16 bit value from EEPROM 1141 * 1142 * Reads 16 bit value from EEPROM through bit-bang method 1143 **/ 1144 int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1145 u16 *data) 1146 { 1147 hw->eeprom.ops.init_params(hw); 1148 1149 if (offset >= hw->eeprom.word_size) 1150 return -EINVAL; 1151 1152 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1153 } 1154 1155 /** 1156 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1157 * @hw: pointer to hardware structure 1158 * @offset: offset of word in the EEPROM to read 1159 * @words: number of word(s) 1160 * @data: 16 bit word(s) from the EEPROM 1161 * 1162 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1163 **/ 1164 int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1165 u16 words, u16 *data) 1166 { 1167 int status; 1168 u32 eerd; 1169 u32 i; 1170 1171 hw->eeprom.ops.init_params(hw); 1172 1173 if (words == 0 || offset >= hw->eeprom.word_size) 1174 return -EINVAL; 1175 1176 for (i = 0; i < words; i++) { 1177 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1178 IXGBE_EEPROM_RW_REG_START; 1179 1180 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1181 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1182 1183 if (status == 0) { 1184 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1185 IXGBE_EEPROM_RW_REG_DATA); 1186 } else { 1187 hw_dbg(hw, "Eeprom read timed out\n"); 1188 return status; 1189 } 1190 } 1191 1192 return 0; 1193 } 1194 1195 /** 1196 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1197 * @hw: pointer to hardware structure 1198 * @offset: offset within the EEPROM to be used as a scratch pad 1199 * 1200 * Discover EEPROM page size by writing marching data at given offset. 1201 * This function is called only when we are writing a new large buffer 1202 * at given offset so the data would be overwritten anyway. 1203 **/ 1204 static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1205 u16 offset) 1206 { 1207 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1208 int status; 1209 u16 i; 1210 1211 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1212 data[i] = i; 1213 1214 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1215 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1216 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1217 hw->eeprom.word_page_size = 0; 1218 if (status) 1219 return status; 1220 1221 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1222 if (status) 1223 return status; 1224 1225 /* 1226 * When writing in burst more than the actual page size 1227 * EEPROM address wraps around current page. 1228 */ 1229 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1230 1231 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1232 hw->eeprom.word_page_size); 1233 return 0; 1234 } 1235 1236 /** 1237 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1238 * @hw: pointer to hardware structure 1239 * @offset: offset of word in the EEPROM to read 1240 * @data: word read from the EEPROM 1241 * 1242 * Reads a 16 bit word from the EEPROM using the EERD register. 1243 **/ 1244 int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1245 { 1246 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1247 } 1248 1249 /** 1250 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1251 * @hw: pointer to hardware structure 1252 * @offset: offset of word in the EEPROM to write 1253 * @words: number of words 1254 * @data: word(s) write to the EEPROM 1255 * 1256 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1257 **/ 1258 int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1259 u16 words, u16 *data) 1260 { 1261 int status; 1262 u32 eewr; 1263 u16 i; 1264 1265 hw->eeprom.ops.init_params(hw); 1266 1267 if (words == 0 || offset >= hw->eeprom.word_size) 1268 return -EINVAL; 1269 1270 for (i = 0; i < words; i++) { 1271 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1272 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1273 IXGBE_EEPROM_RW_REG_START; 1274 1275 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1276 if (status) { 1277 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1278 return status; 1279 } 1280 1281 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1282 1283 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1284 if (status) { 1285 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1286 return status; 1287 } 1288 } 1289 1290 return 0; 1291 } 1292 1293 /** 1294 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1295 * @hw: pointer to hardware structure 1296 * @offset: offset of word in the EEPROM to write 1297 * @data: word write to the EEPROM 1298 * 1299 * Write a 16 bit word to the EEPROM using the EEWR register. 1300 **/ 1301 int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1302 { 1303 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1304 } 1305 1306 /** 1307 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1308 * @hw: pointer to hardware structure 1309 * @ee_reg: EEPROM flag for polling 1310 * 1311 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1312 * read or write is done respectively. 1313 **/ 1314 static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1315 { 1316 u32 i; 1317 u32 reg; 1318 1319 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1320 if (ee_reg == IXGBE_NVM_POLL_READ) 1321 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1322 else 1323 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1324 1325 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1326 return 0; 1327 } 1328 udelay(5); 1329 } 1330 return -EIO; 1331 } 1332 1333 /** 1334 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1335 * @hw: pointer to hardware structure 1336 * 1337 * Prepares EEPROM for access using bit-bang method. This function should 1338 * be called before issuing a command to the EEPROM. 1339 **/ 1340 static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1341 { 1342 u32 eec; 1343 u32 i; 1344 1345 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1346 return -EBUSY; 1347 1348 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1349 1350 /* Request EEPROM Access */ 1351 eec |= IXGBE_EEC_REQ; 1352 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1353 1354 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1355 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1356 if (eec & IXGBE_EEC_GNT) 1357 break; 1358 udelay(5); 1359 } 1360 1361 /* Release if grant not acquired */ 1362 if (!(eec & IXGBE_EEC_GNT)) { 1363 eec &= ~IXGBE_EEC_REQ; 1364 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1365 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1366 1367 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1368 return -EIO; 1369 } 1370 1371 /* Setup EEPROM for Read/Write */ 1372 /* Clear CS and SK */ 1373 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1374 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1375 IXGBE_WRITE_FLUSH(hw); 1376 udelay(1); 1377 return 0; 1378 } 1379 1380 /** 1381 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1382 * @hw: pointer to hardware structure 1383 * 1384 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1385 **/ 1386 static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1387 { 1388 u32 timeout = 2000; 1389 u32 i; 1390 u32 swsm; 1391 1392 /* Get SMBI software semaphore between device drivers first */ 1393 for (i = 0; i < timeout; i++) { 1394 /* 1395 * If the SMBI bit is 0 when we read it, then the bit will be 1396 * set and we have the semaphore 1397 */ 1398 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1399 if (!(swsm & IXGBE_SWSM_SMBI)) 1400 break; 1401 usleep_range(50, 100); 1402 } 1403 1404 if (i == timeout) { 1405 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1406 /* this release is particularly important because our attempts 1407 * above to get the semaphore may have succeeded, and if there 1408 * was a timeout, we should unconditionally clear the semaphore 1409 * bits to free the driver to make progress 1410 */ 1411 ixgbe_release_eeprom_semaphore(hw); 1412 1413 usleep_range(50, 100); 1414 /* one last try 1415 * If the SMBI bit is 0 when we read it, then the bit will be 1416 * set and we have the semaphore 1417 */ 1418 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1419 if (swsm & IXGBE_SWSM_SMBI) { 1420 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1421 return -EIO; 1422 } 1423 } 1424 1425 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1426 for (i = 0; i < timeout; i++) { 1427 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1428 1429 /* Set the SW EEPROM semaphore bit to request access */ 1430 swsm |= IXGBE_SWSM_SWESMBI; 1431 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1432 1433 /* If we set the bit successfully then we got the 1434 * semaphore. 1435 */ 1436 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1437 if (swsm & IXGBE_SWSM_SWESMBI) 1438 break; 1439 1440 usleep_range(50, 100); 1441 } 1442 1443 /* Release semaphores and return error if SW EEPROM semaphore 1444 * was not granted because we don't have access to the EEPROM 1445 */ 1446 if (i >= timeout) { 1447 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1448 ixgbe_release_eeprom_semaphore(hw); 1449 return -EIO; 1450 } 1451 1452 return 0; 1453 } 1454 1455 /** 1456 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1457 * @hw: pointer to hardware structure 1458 * 1459 * This function clears hardware semaphore bits. 1460 **/ 1461 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1462 { 1463 u32 swsm; 1464 1465 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1466 1467 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1468 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1469 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1470 IXGBE_WRITE_FLUSH(hw); 1471 } 1472 1473 /** 1474 * ixgbe_ready_eeprom - Polls for EEPROM ready 1475 * @hw: pointer to hardware structure 1476 **/ 1477 static int ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1478 { 1479 u16 i; 1480 u8 spi_stat_reg; 1481 1482 /* 1483 * Read "Status Register" repeatedly until the LSB is cleared. The 1484 * EEPROM will signal that the command has been completed by clearing 1485 * bit 0 of the internal status register. If it's not cleared within 1486 * 5 milliseconds, then error out. 1487 */ 1488 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1489 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1490 IXGBE_EEPROM_OPCODE_BITS); 1491 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1492 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1493 break; 1494 1495 udelay(5); 1496 ixgbe_standby_eeprom(hw); 1497 } 1498 1499 /* 1500 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1501 * devices (and only 0-5mSec on 5V devices) 1502 */ 1503 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1504 hw_dbg(hw, "SPI EEPROM Status error\n"); 1505 return -EIO; 1506 } 1507 1508 return 0; 1509 } 1510 1511 /** 1512 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1513 * @hw: pointer to hardware structure 1514 **/ 1515 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1516 { 1517 u32 eec; 1518 1519 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1520 1521 /* Toggle CS to flush commands */ 1522 eec |= IXGBE_EEC_CS; 1523 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1524 IXGBE_WRITE_FLUSH(hw); 1525 udelay(1); 1526 eec &= ~IXGBE_EEC_CS; 1527 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1528 IXGBE_WRITE_FLUSH(hw); 1529 udelay(1); 1530 } 1531 1532 /** 1533 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1534 * @hw: pointer to hardware structure 1535 * @data: data to send to the EEPROM 1536 * @count: number of bits to shift out 1537 **/ 1538 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1539 u16 count) 1540 { 1541 u32 eec; 1542 u32 mask; 1543 u32 i; 1544 1545 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1546 1547 /* 1548 * Mask is used to shift "count" bits of "data" out to the EEPROM 1549 * one bit at a time. Determine the starting bit based on count 1550 */ 1551 mask = BIT(count - 1); 1552 1553 for (i = 0; i < count; i++) { 1554 /* 1555 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1556 * "1", and then raising and then lowering the clock (the SK 1557 * bit controls the clock input to the EEPROM). A "0" is 1558 * shifted out to the EEPROM by setting "DI" to "0" and then 1559 * raising and then lowering the clock. 1560 */ 1561 if (data & mask) 1562 eec |= IXGBE_EEC_DI; 1563 else 1564 eec &= ~IXGBE_EEC_DI; 1565 1566 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1567 IXGBE_WRITE_FLUSH(hw); 1568 1569 udelay(1); 1570 1571 ixgbe_raise_eeprom_clk(hw, &eec); 1572 ixgbe_lower_eeprom_clk(hw, &eec); 1573 1574 /* 1575 * Shift mask to signify next bit of data to shift in to the 1576 * EEPROM 1577 */ 1578 mask = mask >> 1; 1579 } 1580 1581 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1582 eec &= ~IXGBE_EEC_DI; 1583 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1584 IXGBE_WRITE_FLUSH(hw); 1585 } 1586 1587 /** 1588 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1589 * @hw: pointer to hardware structure 1590 * @count: number of bits to shift 1591 **/ 1592 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1593 { 1594 u32 eec; 1595 u32 i; 1596 u16 data = 0; 1597 1598 /* 1599 * In order to read a register from the EEPROM, we need to shift 1600 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1601 * the clock input to the EEPROM (setting the SK bit), and then reading 1602 * the value of the "DO" bit. During this "shifting in" process the 1603 * "DI" bit should always be clear. 1604 */ 1605 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1606 1607 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1608 1609 for (i = 0; i < count; i++) { 1610 data = data << 1; 1611 ixgbe_raise_eeprom_clk(hw, &eec); 1612 1613 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1614 1615 eec &= ~(IXGBE_EEC_DI); 1616 if (eec & IXGBE_EEC_DO) 1617 data |= 1; 1618 1619 ixgbe_lower_eeprom_clk(hw, &eec); 1620 } 1621 1622 return data; 1623 } 1624 1625 /** 1626 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1627 * @hw: pointer to hardware structure 1628 * @eec: EEC register's current value 1629 **/ 1630 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1631 { 1632 /* 1633 * Raise the clock input to the EEPROM 1634 * (setting the SK bit), then delay 1635 */ 1636 *eec = *eec | IXGBE_EEC_SK; 1637 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1638 IXGBE_WRITE_FLUSH(hw); 1639 udelay(1); 1640 } 1641 1642 /** 1643 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1644 * @hw: pointer to hardware structure 1645 * @eec: EEC's current value 1646 **/ 1647 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1648 { 1649 /* 1650 * Lower the clock input to the EEPROM (clearing the SK bit), then 1651 * delay 1652 */ 1653 *eec = *eec & ~IXGBE_EEC_SK; 1654 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1655 IXGBE_WRITE_FLUSH(hw); 1656 udelay(1); 1657 } 1658 1659 /** 1660 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1661 * @hw: pointer to hardware structure 1662 **/ 1663 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1664 { 1665 u32 eec; 1666 1667 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1668 1669 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1670 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1671 1672 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1673 IXGBE_WRITE_FLUSH(hw); 1674 1675 udelay(1); 1676 1677 /* Stop requesting EEPROM access */ 1678 eec &= ~IXGBE_EEC_REQ; 1679 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1680 1681 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1682 1683 /* 1684 * Delay before attempt to obtain semaphore again to allow FW 1685 * access. semaphore_delay is in ms we need us for usleep_range 1686 */ 1687 usleep_range(hw->eeprom.semaphore_delay * 1000, 1688 hw->eeprom.semaphore_delay * 2000); 1689 } 1690 1691 /** 1692 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1693 * @hw: pointer to hardware structure 1694 **/ 1695 int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1696 { 1697 u16 i; 1698 u16 j; 1699 u16 checksum = 0; 1700 u16 length = 0; 1701 u16 pointer = 0; 1702 u16 word = 0; 1703 1704 /* Include 0x0-0x3F in the checksum */ 1705 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1706 if (hw->eeprom.ops.read(hw, i, &word)) { 1707 hw_dbg(hw, "EEPROM read failed\n"); 1708 break; 1709 } 1710 checksum += word; 1711 } 1712 1713 /* Include all data from pointers except for the fw pointer */ 1714 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1715 if (hw->eeprom.ops.read(hw, i, &pointer)) { 1716 hw_dbg(hw, "EEPROM read failed\n"); 1717 return -EIO; 1718 } 1719 1720 /* If the pointer seems invalid */ 1721 if (pointer == 0xFFFF || pointer == 0) 1722 continue; 1723 1724 if (hw->eeprom.ops.read(hw, pointer, &length)) { 1725 hw_dbg(hw, "EEPROM read failed\n"); 1726 return -EIO; 1727 } 1728 1729 if (length == 0xFFFF || length == 0) 1730 continue; 1731 1732 for (j = pointer + 1; j <= pointer + length; j++) { 1733 if (hw->eeprom.ops.read(hw, j, &word)) { 1734 hw_dbg(hw, "EEPROM read failed\n"); 1735 return -EIO; 1736 } 1737 checksum += word; 1738 } 1739 } 1740 1741 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1742 1743 return (int)checksum; 1744 } 1745 1746 /** 1747 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1748 * @hw: pointer to hardware structure 1749 * @checksum_val: calculated checksum 1750 * 1751 * Performs checksum calculation and validates the EEPROM checksum. If the 1752 * caller does not need checksum_val, the value can be NULL. 1753 **/ 1754 int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1755 u16 *checksum_val) 1756 { 1757 u16 read_checksum = 0; 1758 u16 checksum; 1759 int status; 1760 1761 /* 1762 * Read the first word from the EEPROM. If this times out or fails, do 1763 * not continue or we could be in for a very long wait while every 1764 * EEPROM read fails 1765 */ 1766 status = hw->eeprom.ops.read(hw, 0, &checksum); 1767 if (status) { 1768 hw_dbg(hw, "EEPROM read failed\n"); 1769 return status; 1770 } 1771 1772 status = hw->eeprom.ops.calc_checksum(hw); 1773 if (status < 0) 1774 return status; 1775 1776 checksum = (u16)(status & 0xffff); 1777 1778 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1779 if (status) { 1780 hw_dbg(hw, "EEPROM read failed\n"); 1781 return status; 1782 } 1783 1784 /* Verify read checksum from EEPROM is the same as 1785 * calculated checksum 1786 */ 1787 if (read_checksum != checksum) 1788 status = -EIO; 1789 1790 /* If the user cares, return the calculated checksum */ 1791 if (checksum_val) 1792 *checksum_val = checksum; 1793 1794 return status; 1795 } 1796 1797 /** 1798 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1799 * @hw: pointer to hardware structure 1800 **/ 1801 int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1802 { 1803 u16 checksum; 1804 int status; 1805 1806 /* 1807 * Read the first word from the EEPROM. If this times out or fails, do 1808 * not continue or we could be in for a very long wait while every 1809 * EEPROM read fails 1810 */ 1811 status = hw->eeprom.ops.read(hw, 0, &checksum); 1812 if (status) { 1813 hw_dbg(hw, "EEPROM read failed\n"); 1814 return status; 1815 } 1816 1817 status = hw->eeprom.ops.calc_checksum(hw); 1818 if (status < 0) 1819 return status; 1820 1821 checksum = (u16)(status & 0xffff); 1822 1823 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 1824 1825 return status; 1826 } 1827 1828 /** 1829 * ixgbe_set_rar_generic - Set Rx address register 1830 * @hw: pointer to hardware structure 1831 * @index: Receive address register to write 1832 * @addr: Address to put into receive address register 1833 * @vmdq: VMDq "set" or "pool" index 1834 * @enable_addr: set flag that address is active 1835 * 1836 * Puts an ethernet address into a receive address register. 1837 **/ 1838 int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1839 u32 enable_addr) 1840 { 1841 u32 rar_low, rar_high; 1842 u32 rar_entries = hw->mac.num_rar_entries; 1843 1844 /* Make sure we are using a valid rar index range */ 1845 if (index >= rar_entries) { 1846 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1847 return -EINVAL; 1848 } 1849 1850 /* setup VMDq pool selection before this RAR gets enabled */ 1851 hw->mac.ops.set_vmdq(hw, index, vmdq); 1852 1853 /* 1854 * HW expects these in little endian so we reverse the byte 1855 * order from network order (big endian) to little endian 1856 */ 1857 rar_low = ((u32)addr[0] | 1858 ((u32)addr[1] << 8) | 1859 ((u32)addr[2] << 16) | 1860 ((u32)addr[3] << 24)); 1861 /* 1862 * Some parts put the VMDq setting in the extra RAH bits, 1863 * so save everything except the lower 16 bits that hold part 1864 * of the address and the address valid bit. 1865 */ 1866 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1867 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1868 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1869 1870 if (enable_addr != 0) 1871 rar_high |= IXGBE_RAH_AV; 1872 1873 /* Record lower 32 bits of MAC address and then make 1874 * sure that write is flushed to hardware before writing 1875 * the upper 16 bits and setting the valid bit. 1876 */ 1877 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1878 IXGBE_WRITE_FLUSH(hw); 1879 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1880 1881 return 0; 1882 } 1883 1884 /** 1885 * ixgbe_clear_rar_generic - Remove Rx address register 1886 * @hw: pointer to hardware structure 1887 * @index: Receive address register to write 1888 * 1889 * Clears an ethernet address from a receive address register. 1890 **/ 1891 int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1892 { 1893 u32 rar_high; 1894 u32 rar_entries = hw->mac.num_rar_entries; 1895 1896 /* Make sure we are using a valid rar index range */ 1897 if (index >= rar_entries) { 1898 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1899 return -EINVAL; 1900 } 1901 1902 /* 1903 * Some parts put the VMDq setting in the extra RAH bits, 1904 * so save everything except the lower 16 bits that hold part 1905 * of the address and the address valid bit. 1906 */ 1907 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1908 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1909 1910 /* Clear the address valid bit and upper 16 bits of the address 1911 * before clearing the lower bits. This way we aren't updating 1912 * a live filter. 1913 */ 1914 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1915 IXGBE_WRITE_FLUSH(hw); 1916 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1917 1918 /* clear VMDq pool/queue selection for this RAR */ 1919 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1920 1921 return 0; 1922 } 1923 1924 /** 1925 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1926 * @hw: pointer to hardware structure 1927 * 1928 * Places the MAC address in receive address register 0 and clears the rest 1929 * of the receive address registers. Clears the multicast table. Assumes 1930 * the receiver is in reset when the routine is called. 1931 **/ 1932 int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1933 { 1934 u32 i; 1935 u32 rar_entries = hw->mac.num_rar_entries; 1936 1937 /* 1938 * If the current mac address is valid, assume it is a software override 1939 * to the permanent address. 1940 * Otherwise, use the permanent address from the eeprom. 1941 */ 1942 if (!is_valid_ether_addr(hw->mac.addr)) { 1943 /* Get the MAC address from the RAR0 for later reference */ 1944 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1945 1946 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1947 } else { 1948 /* Setup the receive address. */ 1949 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1950 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1951 1952 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1953 } 1954 1955 /* clear VMDq pool/queue selection for RAR 0 */ 1956 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1957 1958 hw->addr_ctrl.overflow_promisc = 0; 1959 1960 hw->addr_ctrl.rar_used_count = 1; 1961 1962 /* Zero out the other receive addresses. */ 1963 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1964 for (i = 1; i < rar_entries; i++) { 1965 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1966 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1967 } 1968 1969 /* Clear the MTA */ 1970 hw->addr_ctrl.mta_in_use = 0; 1971 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1972 1973 hw_dbg(hw, " Clearing MTA\n"); 1974 for (i = 0; i < hw->mac.mcft_size; i++) 1975 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1976 1977 if (hw->mac.ops.init_uta_tables) 1978 hw->mac.ops.init_uta_tables(hw); 1979 1980 return 0; 1981 } 1982 1983 /** 1984 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1985 * @hw: pointer to hardware structure 1986 * @mc_addr: the multicast address 1987 * 1988 * Extracts the 12 bits, from a multicast address, to determine which 1989 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1990 * incoming rx multicast addresses, to determine the bit-vector to check in 1991 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1992 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1993 * to mc_filter_type. 1994 **/ 1995 static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1996 { 1997 u32 vector = 0; 1998 1999 switch (hw->mac.mc_filter_type) { 2000 case 0: /* use bits [47:36] of the address */ 2001 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2002 break; 2003 case 1: /* use bits [46:35] of the address */ 2004 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2005 break; 2006 case 2: /* use bits [45:34] of the address */ 2007 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2008 break; 2009 case 3: /* use bits [43:32] of the address */ 2010 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2011 break; 2012 default: /* Invalid mc_filter_type */ 2013 hw_dbg(hw, "MC filter type param set incorrectly\n"); 2014 break; 2015 } 2016 2017 /* vector can only be 12-bits or boundary will be exceeded */ 2018 vector &= 0xFFF; 2019 return vector; 2020 } 2021 2022 /** 2023 * ixgbe_set_mta - Set bit-vector in multicast table 2024 * @hw: pointer to hardware structure 2025 * @mc_addr: Multicast address 2026 * 2027 * Sets the bit-vector in the multicast table. 2028 **/ 2029 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2030 { 2031 u32 vector; 2032 u32 vector_bit; 2033 u32 vector_reg; 2034 2035 hw->addr_ctrl.mta_in_use++; 2036 2037 vector = ixgbe_mta_vector(hw, mc_addr); 2038 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2039 2040 /* 2041 * The MTA is a register array of 128 32-bit registers. It is treated 2042 * like an array of 4096 bits. We want to set bit 2043 * BitArray[vector_value]. So we figure out what register the bit is 2044 * in, read it, OR in the new bit, then write back the new value. The 2045 * register is determined by the upper 7 bits of the vector value and 2046 * the bit within that register are determined by the lower 5 bits of 2047 * the value. 2048 */ 2049 vector_reg = (vector >> 5) & 0x7F; 2050 vector_bit = vector & 0x1F; 2051 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); 2052 } 2053 2054 /** 2055 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2056 * @hw: pointer to hardware structure 2057 * @netdev: pointer to net device structure 2058 * 2059 * The given list replaces any existing list. Clears the MC addrs from receive 2060 * address registers and the multicast table. Uses unused receive address 2061 * registers for the first multicast addresses, and hashes the rest into the 2062 * multicast table. 2063 **/ 2064 int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2065 struct net_device *netdev) 2066 { 2067 struct netdev_hw_addr *ha; 2068 u32 i; 2069 2070 /* 2071 * Set the new number of MC addresses that we are being requested to 2072 * use. 2073 */ 2074 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2075 hw->addr_ctrl.mta_in_use = 0; 2076 2077 /* Clear mta_shadow */ 2078 hw_dbg(hw, " Clearing MTA\n"); 2079 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2080 2081 /* Update mta shadow */ 2082 netdev_for_each_mc_addr(ha, netdev) { 2083 hw_dbg(hw, " Adding the multicast addresses:\n"); 2084 ixgbe_set_mta(hw, ha->addr); 2085 } 2086 2087 /* Enable mta */ 2088 for (i = 0; i < hw->mac.mcft_size; i++) 2089 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2090 hw->mac.mta_shadow[i]); 2091 2092 if (hw->addr_ctrl.mta_in_use > 0) 2093 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2094 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2095 2096 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2097 return 0; 2098 } 2099 2100 /** 2101 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2102 * @hw: pointer to hardware structure 2103 * 2104 * Enables multicast address in RAR and the use of the multicast hash table. 2105 **/ 2106 int ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2107 { 2108 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2109 2110 if (a->mta_in_use > 0) 2111 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2112 hw->mac.mc_filter_type); 2113 2114 return 0; 2115 } 2116 2117 /** 2118 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2119 * @hw: pointer to hardware structure 2120 * 2121 * Disables multicast address in RAR and the use of the multicast hash table. 2122 **/ 2123 int ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2124 { 2125 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2126 2127 if (a->mta_in_use > 0) 2128 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2129 2130 return 0; 2131 } 2132 2133 /** 2134 * ixgbe_fc_enable_generic - Enable flow control 2135 * @hw: pointer to hardware structure 2136 * 2137 * Enable flow control according to the current settings. 2138 **/ 2139 int ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2140 { 2141 u32 mflcn_reg, fccfg_reg; 2142 u32 reg; 2143 u32 fcrtl, fcrth; 2144 int i; 2145 2146 /* Validate the water mark configuration. */ 2147 if (!hw->fc.pause_time) 2148 return -EINVAL; 2149 2150 /* Low water mark of zero causes XOFF floods */ 2151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2152 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2153 hw->fc.high_water[i]) { 2154 if (!hw->fc.low_water[i] || 2155 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2156 hw_dbg(hw, "Invalid water mark configuration\n"); 2157 return -EINVAL; 2158 } 2159 } 2160 } 2161 2162 /* Negotiate the fc mode to use */ 2163 hw->mac.ops.fc_autoneg(hw); 2164 2165 /* Disable any previous flow control settings */ 2166 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2167 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2168 2169 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2170 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2171 2172 /* 2173 * The possible values of fc.current_mode are: 2174 * 0: Flow control is completely disabled 2175 * 1: Rx flow control is enabled (we can receive pause frames, 2176 * but not send pause frames). 2177 * 2: Tx flow control is enabled (we can send pause frames but 2178 * we do not support receiving pause frames). 2179 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2180 * other: Invalid. 2181 */ 2182 switch (hw->fc.current_mode) { 2183 case ixgbe_fc_none: 2184 /* 2185 * Flow control is disabled by software override or autoneg. 2186 * The code below will actually disable it in the HW. 2187 */ 2188 break; 2189 case ixgbe_fc_rx_pause: 2190 /* 2191 * Rx Flow control is enabled and Tx Flow control is 2192 * disabled by software override. Since there really 2193 * isn't a way to advertise that we are capable of RX 2194 * Pause ONLY, we will advertise that we support both 2195 * symmetric and asymmetric Rx PAUSE. Later, we will 2196 * disable the adapter's ability to send PAUSE frames. 2197 */ 2198 mflcn_reg |= IXGBE_MFLCN_RFCE; 2199 break; 2200 case ixgbe_fc_tx_pause: 2201 /* 2202 * Tx Flow control is enabled, and Rx Flow control is 2203 * disabled by software override. 2204 */ 2205 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2206 break; 2207 case ixgbe_fc_full: 2208 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2209 mflcn_reg |= IXGBE_MFLCN_RFCE; 2210 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2211 break; 2212 default: 2213 hw_dbg(hw, "Flow control param set incorrectly\n"); 2214 return -EIO; 2215 } 2216 2217 /* Set 802.3x based flow control settings. */ 2218 mflcn_reg |= IXGBE_MFLCN_DPF; 2219 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2220 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2221 2222 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2223 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2224 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2225 hw->fc.high_water[i]) { 2226 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2227 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2228 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2229 } else { 2230 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2231 /* 2232 * In order to prevent Tx hangs when the internal Tx 2233 * switch is enabled we must set the high water mark 2234 * to the Rx packet buffer size - 24KB. This allows 2235 * the Tx switch to function even under heavy Rx 2236 * workloads. 2237 */ 2238 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2239 } 2240 2241 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2242 } 2243 2244 /* Configure pause time (2 TCs per register) */ 2245 reg = hw->fc.pause_time * 0x00010001U; 2246 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2247 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2248 2249 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2250 2251 return 0; 2252 } 2253 2254 /** 2255 * ixgbe_negotiate_fc - Negotiate flow control 2256 * @hw: pointer to hardware structure 2257 * @adv_reg: flow control advertised settings 2258 * @lp_reg: link partner's flow control settings 2259 * @adv_sym: symmetric pause bit in advertisement 2260 * @adv_asm: asymmetric pause bit in advertisement 2261 * @lp_sym: symmetric pause bit in link partner advertisement 2262 * @lp_asm: asymmetric pause bit in link partner advertisement 2263 * 2264 * Find the intersection between advertised settings and link partner's 2265 * advertised settings 2266 **/ 2267 int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2268 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2269 { 2270 if ((!(adv_reg)) || (!(lp_reg))) 2271 return -EINVAL; 2272 2273 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2274 /* 2275 * Now we need to check if the user selected Rx ONLY 2276 * of pause frames. In this case, we had to advertise 2277 * FULL flow control because we could not advertise RX 2278 * ONLY. Hence, we must now check to see if we need to 2279 * turn OFF the TRANSMISSION of PAUSE frames. 2280 */ 2281 if (hw->fc.requested_mode == ixgbe_fc_full) { 2282 hw->fc.current_mode = ixgbe_fc_full; 2283 hw_dbg(hw, "Flow Control = FULL.\n"); 2284 } else { 2285 hw->fc.current_mode = ixgbe_fc_rx_pause; 2286 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2287 } 2288 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2289 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2290 hw->fc.current_mode = ixgbe_fc_tx_pause; 2291 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2292 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2293 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2294 hw->fc.current_mode = ixgbe_fc_rx_pause; 2295 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2296 } else { 2297 hw->fc.current_mode = ixgbe_fc_none; 2298 hw_dbg(hw, "Flow Control = NONE.\n"); 2299 } 2300 return 0; 2301 } 2302 2303 /** 2304 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2305 * @hw: pointer to hardware structure 2306 * 2307 * Enable flow control according on 1 gig fiber. 2308 **/ 2309 static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2310 { 2311 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2312 int ret_val; 2313 2314 /* 2315 * On multispeed fiber at 1g, bail out if 2316 * - link is up but AN did not complete, or if 2317 * - link is up and AN completed but timed out 2318 */ 2319 2320 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2321 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2322 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2323 return -EIO; 2324 2325 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2326 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2327 2328 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2329 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2330 IXGBE_PCS1GANA_ASM_PAUSE, 2331 IXGBE_PCS1GANA_SYM_PAUSE, 2332 IXGBE_PCS1GANA_ASM_PAUSE); 2333 2334 return ret_val; 2335 } 2336 2337 /** 2338 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2339 * @hw: pointer to hardware structure 2340 * 2341 * Enable flow control according to IEEE clause 37. 2342 **/ 2343 static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2344 { 2345 u32 links2, anlp1_reg, autoc_reg, links; 2346 int ret_val; 2347 2348 /* 2349 * On backplane, bail out if 2350 * - backplane autoneg was not completed, or if 2351 * - we are 82599 and link partner is not AN enabled 2352 */ 2353 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2354 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2355 return -EIO; 2356 2357 if (hw->mac.type == ixgbe_mac_82599EB) { 2358 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2359 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2360 return -EIO; 2361 } 2362 /* 2363 * Read the 10g AN autoc and LP ability registers and resolve 2364 * local flow control settings accordingly 2365 */ 2366 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2367 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2368 2369 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2370 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2371 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2372 2373 return ret_val; 2374 } 2375 2376 /** 2377 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2378 * @hw: pointer to hardware structure 2379 * 2380 * Enable flow control according to IEEE clause 37. 2381 **/ 2382 static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2383 { 2384 u16 technology_ability_reg = 0; 2385 u16 lp_technology_ability_reg = 0; 2386 2387 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2388 MDIO_MMD_AN, 2389 &technology_ability_reg); 2390 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2391 MDIO_MMD_AN, 2392 &lp_technology_ability_reg); 2393 2394 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2395 (u32)lp_technology_ability_reg, 2396 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2397 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2398 } 2399 2400 /** 2401 * ixgbe_fc_autoneg - Configure flow control 2402 * @hw: pointer to hardware structure 2403 * 2404 * Compares our advertised flow control capabilities to those advertised by 2405 * our link partner, and determines the proper flow control mode to use. 2406 **/ 2407 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2408 { 2409 ixgbe_link_speed speed; 2410 int ret_val = -EIO; 2411 bool link_up; 2412 2413 /* 2414 * AN should have completed when the cable was plugged in. 2415 * Look for reasons to bail out. Bail out if: 2416 * - FC autoneg is disabled, or if 2417 * - link is not up. 2418 * 2419 * Since we're being called from an LSC, link is already known to be up. 2420 * So use link_up_wait_to_complete=false. 2421 */ 2422 if (hw->fc.disable_fc_autoneg) 2423 goto out; 2424 2425 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2426 if (!link_up) 2427 goto out; 2428 2429 switch (hw->phy.media_type) { 2430 /* Autoneg flow control on fiber adapters */ 2431 case ixgbe_media_type_fiber: 2432 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2433 ret_val = ixgbe_fc_autoneg_fiber(hw); 2434 break; 2435 2436 /* Autoneg flow control on backplane adapters */ 2437 case ixgbe_media_type_backplane: 2438 ret_val = ixgbe_fc_autoneg_backplane(hw); 2439 break; 2440 2441 /* Autoneg flow control on copper adapters */ 2442 case ixgbe_media_type_copper: 2443 if (ixgbe_device_supports_autoneg_fc(hw)) 2444 ret_val = ixgbe_fc_autoneg_copper(hw); 2445 break; 2446 2447 default: 2448 break; 2449 } 2450 2451 out: 2452 if (ret_val == 0) { 2453 hw->fc.fc_was_autonegged = true; 2454 } else { 2455 hw->fc.fc_was_autonegged = false; 2456 hw->fc.current_mode = hw->fc.requested_mode; 2457 } 2458 } 2459 2460 /** 2461 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 2462 * @hw: pointer to hardware structure 2463 * 2464 * System-wide timeout range is encoded in PCIe Device Control2 register. 2465 * 2466 * Add 10% to specified maximum and return the number of times to poll for 2467 * completion timeout, in units of 100 microsec. Never return less than 2468 * 800 = 80 millisec. 2469 **/ 2470 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 2471 { 2472 s16 devctl2; 2473 u32 pollcnt; 2474 2475 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 2476 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 2477 2478 switch (devctl2) { 2479 case IXGBE_PCIDEVCTRL2_65_130ms: 2480 pollcnt = 1300; /* 130 millisec */ 2481 break; 2482 case IXGBE_PCIDEVCTRL2_260_520ms: 2483 pollcnt = 5200; /* 520 millisec */ 2484 break; 2485 case IXGBE_PCIDEVCTRL2_1_2s: 2486 pollcnt = 20000; /* 2 sec */ 2487 break; 2488 case IXGBE_PCIDEVCTRL2_4_8s: 2489 pollcnt = 80000; /* 8 sec */ 2490 break; 2491 case IXGBE_PCIDEVCTRL2_17_34s: 2492 pollcnt = 34000; /* 34 sec */ 2493 break; 2494 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 2495 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 2496 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 2497 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 2498 default: 2499 pollcnt = 800; /* 80 millisec minimum */ 2500 break; 2501 } 2502 2503 /* add 10% to spec maximum */ 2504 return (pollcnt * 11) / 10; 2505 } 2506 2507 /** 2508 * ixgbe_disable_pcie_primary - Disable PCI-express primary access 2509 * @hw: pointer to hardware structure 2510 * 2511 * Disables PCI-Express primary access and verifies there are no pending 2512 * requests. -EALREADY is returned if primary disable 2513 * bit hasn't caused the primary requests to be disabled, else 0 2514 * is returned signifying primary requests disabled. 2515 **/ 2516 static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) 2517 { 2518 u32 i, poll; 2519 u16 value; 2520 2521 /* Always set this bit to ensure any future transactions are blocked */ 2522 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2523 2524 /* Poll for bit to read as set */ 2525 for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { 2526 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) 2527 break; 2528 usleep_range(100, 120); 2529 } 2530 if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) { 2531 hw_dbg(hw, "GIO disable did not set - requesting resets\n"); 2532 goto gio_disable_fail; 2533 } 2534 2535 /* Exit if primary requests are blocked */ 2536 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2537 ixgbe_removed(hw->hw_addr)) 2538 return 0; 2539 2540 /* Poll for primary request bit to clear */ 2541 for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { 2542 udelay(100); 2543 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2544 return 0; 2545 } 2546 2547 /* 2548 * Two consecutive resets are required via CTRL.RST per datasheet 2549 * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine 2550 * of this need. The first reset prevents new primary requests from 2551 * being issued by our device. We then must wait 1usec or more for any 2552 * remaining completions from the PCIe bus to trickle in, and then reset 2553 * again to clear out any effects they may have had on our device. 2554 */ 2555 hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n"); 2556 gio_disable_fail: 2557 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2558 2559 if (hw->mac.type >= ixgbe_mac_X550) 2560 return 0; 2561 2562 /* 2563 * Before proceeding, make sure that the PCIe block does not have 2564 * transactions pending. 2565 */ 2566 poll = ixgbe_pcie_timeout_poll(hw); 2567 for (i = 0; i < poll; i++) { 2568 udelay(100); 2569 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2570 if (ixgbe_removed(hw->hw_addr)) 2571 return 0; 2572 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2573 return 0; 2574 } 2575 2576 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2577 return -EALREADY; 2578 } 2579 2580 /** 2581 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2582 * @hw: pointer to hardware structure 2583 * @mask: Mask to specify which semaphore to acquire 2584 * 2585 * Acquires the SWFW semaphore through the GSSR register for the specified 2586 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2587 **/ 2588 int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2589 { 2590 u32 gssr = 0; 2591 u32 swmask = mask; 2592 u32 fwmask = mask << 5; 2593 u32 timeout = 200; 2594 u32 i; 2595 2596 for (i = 0; i < timeout; i++) { 2597 /* 2598 * SW NVM semaphore bit is used for access to all 2599 * SW_FW_SYNC bits (not just NVM) 2600 */ 2601 if (ixgbe_get_eeprom_semaphore(hw)) 2602 return -EBUSY; 2603 2604 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2605 if (!(gssr & (fwmask | swmask))) { 2606 gssr |= swmask; 2607 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2608 ixgbe_release_eeprom_semaphore(hw); 2609 return 0; 2610 } else { 2611 /* Resource is currently in use by FW or SW */ 2612 ixgbe_release_eeprom_semaphore(hw); 2613 usleep_range(5000, 10000); 2614 } 2615 } 2616 2617 /* If time expired clear the bits holding the lock and retry */ 2618 if (gssr & (fwmask | swmask)) 2619 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2620 2621 usleep_range(5000, 10000); 2622 return -EBUSY; 2623 } 2624 2625 /** 2626 * ixgbe_release_swfw_sync - Release SWFW semaphore 2627 * @hw: pointer to hardware structure 2628 * @mask: Mask to specify which semaphore to release 2629 * 2630 * Releases the SWFW semaphore through the GSSR register for the specified 2631 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2632 **/ 2633 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2634 { 2635 u32 gssr; 2636 u32 swmask = mask; 2637 2638 ixgbe_get_eeprom_semaphore(hw); 2639 2640 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2641 gssr &= ~swmask; 2642 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2643 2644 ixgbe_release_eeprom_semaphore(hw); 2645 } 2646 2647 /** 2648 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 2649 * @hw: pointer to hardware structure 2650 * @reg_val: Value we read from AUTOC 2651 * @locked: bool to indicate whether the SW/FW lock should be taken. Never 2652 * true in this the generic case. 2653 * 2654 * The default case requires no protection so just to the register read. 2655 **/ 2656 int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 2657 { 2658 *locked = false; 2659 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2660 return 0; 2661 } 2662 2663 /** 2664 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 2665 * @hw: pointer to hardware structure 2666 * @reg_val: value to write to AUTOC 2667 * @locked: bool to indicate whether the SW/FW lock was already taken by 2668 * previous read. 2669 **/ 2670 int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 2671 { 2672 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 2673 return 0; 2674 } 2675 2676 /** 2677 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2678 * @hw: pointer to hardware structure 2679 * 2680 * Stops the receive data path and waits for the HW to internally 2681 * empty the Rx security block. 2682 **/ 2683 int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2684 { 2685 #define IXGBE_MAX_SECRX_POLL 40 2686 int i; 2687 int secrxreg; 2688 2689 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2690 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2691 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2692 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2693 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2694 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2695 break; 2696 else 2697 /* Use interrupt-safe sleep just in case */ 2698 udelay(1000); 2699 } 2700 2701 /* For informational purposes only */ 2702 if (i >= IXGBE_MAX_SECRX_POLL) 2703 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2704 2705 return 0; 2706 2707 } 2708 2709 /** 2710 * ixgbe_enable_rx_buff_generic - Enables the receive data path 2711 * @hw: pointer to hardware structure 2712 * 2713 * Enables the receive data path 2714 **/ 2715 int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2716 { 2717 u32 secrxreg; 2718 2719 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2720 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2721 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2722 IXGBE_WRITE_FLUSH(hw); 2723 2724 return 0; 2725 } 2726 2727 /** 2728 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2729 * @hw: pointer to hardware structure 2730 * @regval: register value to write to RXCTRL 2731 * 2732 * Enables the Rx DMA unit 2733 **/ 2734 int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2735 { 2736 if (regval & IXGBE_RXCTRL_RXEN) 2737 hw->mac.ops.enable_rx(hw); 2738 else 2739 hw->mac.ops.disable_rx(hw); 2740 2741 return 0; 2742 } 2743 2744 /** 2745 * ixgbe_blink_led_start_generic - Blink LED based on index. 2746 * @hw: pointer to hardware structure 2747 * @index: led number to blink 2748 **/ 2749 int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2750 { 2751 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2752 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2753 ixgbe_link_speed speed = 0; 2754 bool link_up = false; 2755 bool locked = false; 2756 int ret_val; 2757 2758 if (index > 3) 2759 return -EINVAL; 2760 2761 /* 2762 * Link must be up to auto-blink the LEDs; 2763 * Force it if link is down. 2764 */ 2765 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2766 2767 if (!link_up) { 2768 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2769 if (ret_val) 2770 return ret_val; 2771 2772 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2773 autoc_reg |= IXGBE_AUTOC_FLU; 2774 2775 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2776 if (ret_val) 2777 return ret_val; 2778 2779 IXGBE_WRITE_FLUSH(hw); 2780 2781 usleep_range(10000, 20000); 2782 } 2783 2784 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2785 led_reg |= IXGBE_LED_BLINK(index); 2786 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2787 IXGBE_WRITE_FLUSH(hw); 2788 2789 return 0; 2790 } 2791 2792 /** 2793 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2794 * @hw: pointer to hardware structure 2795 * @index: led number to stop blinking 2796 **/ 2797 int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2798 { 2799 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2800 bool locked = false; 2801 u32 autoc_reg = 0; 2802 int ret_val; 2803 2804 if (index > 3) 2805 return -EINVAL; 2806 2807 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2808 if (ret_val) 2809 return ret_val; 2810 2811 autoc_reg &= ~IXGBE_AUTOC_FLU; 2812 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2813 2814 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2815 if (ret_val) 2816 return ret_val; 2817 2818 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2819 led_reg &= ~IXGBE_LED_BLINK(index); 2820 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2821 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2822 IXGBE_WRITE_FLUSH(hw); 2823 2824 return 0; 2825 } 2826 2827 /** 2828 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2829 * @hw: pointer to hardware structure 2830 * @san_mac_offset: SAN MAC address offset 2831 * 2832 * This function will read the EEPROM location for the SAN MAC address 2833 * pointer, and returns the value at that location. This is used in both 2834 * get and set mac_addr routines. 2835 **/ 2836 static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2837 u16 *san_mac_offset) 2838 { 2839 int ret_val; 2840 2841 /* 2842 * First read the EEPROM pointer to see if the MAC addresses are 2843 * available. 2844 */ 2845 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2846 san_mac_offset); 2847 if (ret_val) 2848 hw_err(hw, "eeprom read at offset %d failed\n", 2849 IXGBE_SAN_MAC_ADDR_PTR); 2850 2851 return ret_val; 2852 } 2853 2854 /** 2855 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2856 * @hw: pointer to hardware structure 2857 * @san_mac_addr: SAN MAC address 2858 * 2859 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2860 * per-port, so set_lan_id() must be called before reading the addresses. 2861 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2862 * upon for non-SFP connections, so we must call it here. 2863 **/ 2864 int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2865 { 2866 u16 san_mac_data, san_mac_offset; 2867 int ret_val; 2868 u8 i; 2869 2870 /* 2871 * First read the EEPROM pointer to see if the MAC addresses are 2872 * available. If they're not, no point in calling set_lan_id() here. 2873 */ 2874 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2875 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2876 2877 goto san_mac_addr_clr; 2878 2879 /* make sure we know which port we need to program */ 2880 hw->mac.ops.set_lan_id(hw); 2881 /* apply the port offset to the address offset */ 2882 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2883 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2884 for (i = 0; i < 3; i++) { 2885 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2886 &san_mac_data); 2887 if (ret_val) { 2888 hw_err(hw, "eeprom read at offset %d failed\n", 2889 san_mac_offset); 2890 goto san_mac_addr_clr; 2891 } 2892 san_mac_addr[i * 2] = (u8)(san_mac_data); 2893 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2894 san_mac_offset++; 2895 } 2896 return 0; 2897 2898 san_mac_addr_clr: 2899 /* No addresses available in this EEPROM. It's not necessarily an 2900 * error though, so just wipe the local address and return. 2901 */ 2902 for (i = 0; i < 6; i++) 2903 san_mac_addr[i] = 0xFF; 2904 return ret_val; 2905 } 2906 2907 /** 2908 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2909 * @hw: pointer to hardware structure 2910 * 2911 * Read PCIe configuration space, and get the MSI-X vector count from 2912 * the capabilities table. 2913 **/ 2914 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2915 { 2916 u16 msix_count; 2917 u16 max_msix_count; 2918 u16 pcie_offset; 2919 2920 switch (hw->mac.type) { 2921 case ixgbe_mac_82598EB: 2922 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2923 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2924 break; 2925 case ixgbe_mac_82599EB: 2926 case ixgbe_mac_X540: 2927 case ixgbe_mac_X550: 2928 case ixgbe_mac_X550EM_x: 2929 case ixgbe_mac_x550em_a: 2930 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2931 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2932 break; 2933 case ixgbe_mac_e610: 2934 pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS; 2935 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2936 break; 2937 default: 2938 return 1; 2939 } 2940 2941 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2942 if (ixgbe_removed(hw->hw_addr)) 2943 msix_count = 0; 2944 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2945 2946 /* MSI-X count is zero-based in HW */ 2947 msix_count++; 2948 2949 if (msix_count > max_msix_count) 2950 msix_count = max_msix_count; 2951 2952 return msix_count; 2953 } 2954 2955 /** 2956 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2957 * @hw: pointer to hardware struct 2958 * @rar: receive address register index to disassociate 2959 * @vmdq: VMDq pool index to remove from the rar 2960 **/ 2961 int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2962 { 2963 u32 mpsar_lo, mpsar_hi; 2964 u32 rar_entries = hw->mac.num_rar_entries; 2965 2966 /* Make sure we are using a valid rar index range */ 2967 if (rar >= rar_entries) { 2968 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2969 return -EINVAL; 2970 } 2971 2972 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2973 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2974 2975 if (ixgbe_removed(hw->hw_addr)) 2976 return 0; 2977 2978 if (!mpsar_lo && !mpsar_hi) 2979 return 0; 2980 2981 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2982 if (mpsar_lo) { 2983 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2984 mpsar_lo = 0; 2985 } 2986 if (mpsar_hi) { 2987 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2988 mpsar_hi = 0; 2989 } 2990 } else if (vmdq < 32) { 2991 mpsar_lo &= ~BIT(vmdq); 2992 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2993 } else { 2994 mpsar_hi &= ~BIT(vmdq - 32); 2995 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2996 } 2997 2998 /* was that the last pool using this rar? */ 2999 if (mpsar_lo == 0 && mpsar_hi == 0 && 3000 rar != 0 && rar != hw->mac.san_mac_rar_index) 3001 hw->mac.ops.clear_rar(hw, rar); 3002 3003 return 0; 3004 } 3005 3006 /** 3007 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3008 * @hw: pointer to hardware struct 3009 * @rar: receive address register index to associate with a VMDq index 3010 * @vmdq: VMDq pool index 3011 **/ 3012 int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3013 { 3014 u32 mpsar; 3015 u32 rar_entries = hw->mac.num_rar_entries; 3016 3017 /* Make sure we are using a valid rar index range */ 3018 if (rar >= rar_entries) { 3019 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 3020 return -EINVAL; 3021 } 3022 3023 if (vmdq < 32) { 3024 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3025 mpsar |= BIT(vmdq); 3026 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3027 } else { 3028 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3029 mpsar |= BIT(vmdq - 32); 3030 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3031 } 3032 return 0; 3033 } 3034 3035 /** 3036 * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address 3037 * @hw: pointer to hardware struct 3038 * @vmdq: VMDq pool index 3039 * 3040 * This function should only be involved in the IOV mode. 3041 * In IOV mode, Default pool is next pool after the number of 3042 * VFs advertized and not 0. 3043 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3044 **/ 3045 int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3046 { 3047 u32 rar = hw->mac.san_mac_rar_index; 3048 3049 if (vmdq < 32) { 3050 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); 3051 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3052 } else { 3053 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3054 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); 3055 } 3056 3057 return 0; 3058 } 3059 3060 /** 3061 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3062 * @hw: pointer to hardware structure 3063 **/ 3064 int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3065 { 3066 int i; 3067 3068 for (i = 0; i < 128; i++) 3069 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3070 3071 return 0; 3072 } 3073 3074 /** 3075 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3076 * @hw: pointer to hardware structure 3077 * @vlan: VLAN id to write to VLAN filter 3078 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if 3079 * vlanid not found 3080 * 3081 * return the VLVF index where this VLAN id should be placed 3082 * 3083 **/ 3084 static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3085 { 3086 int regindex, first_empty_slot; 3087 u32 bits; 3088 3089 /* short cut the special case */ 3090 if (vlan == 0) 3091 return 0; 3092 3093 /* if vlvf_bypass is set we don't want to use an empty slot, we 3094 * will simply bypass the VLVF if there are no entries present in the 3095 * VLVF that contain our VLAN 3096 */ 3097 first_empty_slot = vlvf_bypass ? -ENOSPC : 0; 3098 3099 /* add VLAN enable bit for comparison */ 3100 vlan |= IXGBE_VLVF_VIEN; 3101 3102 /* Search for the vlan id in the VLVF entries. Save off the first empty 3103 * slot found along the way. 3104 * 3105 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3106 */ 3107 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3108 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3109 if (bits == vlan) 3110 return regindex; 3111 if (!first_empty_slot && !bits) 3112 first_empty_slot = regindex; 3113 } 3114 3115 /* If we are here then we didn't find the VLAN. Return first empty 3116 * slot we found during our search, else error. 3117 */ 3118 if (!first_empty_slot) 3119 hw_dbg(hw, "No space in VLVF.\n"); 3120 3121 return first_empty_slot ? : -ENOSPC; 3122 } 3123 3124 /** 3125 * ixgbe_set_vfta_generic - Set VLAN filter table 3126 * @hw: pointer to hardware structure 3127 * @vlan: VLAN id to write to VLAN filter 3128 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3129 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3130 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3131 * 3132 * Turn on/off specified VLAN in the VLAN filter table. 3133 **/ 3134 int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3135 bool vlan_on, bool vlvf_bypass) 3136 { 3137 u32 regidx, vfta_delta, vfta, bits; 3138 int vlvf_index; 3139 3140 if ((vlan > 4095) || (vind > 63)) 3141 return -EINVAL; 3142 3143 /* 3144 * this is a 2 part operation - first the VFTA, then the 3145 * VLVF and VLVFB if VT Mode is set 3146 * We don't write the VFTA until we know the VLVF part succeeded. 3147 */ 3148 3149 /* Part 1 3150 * The VFTA is a bitstring made up of 128 32-bit registers 3151 * that enable the particular VLAN id, much like the MTA: 3152 * bits[11-5]: which register 3153 * bits[4-0]: which bit in the register 3154 */ 3155 regidx = vlan / 32; 3156 vfta_delta = BIT(vlan % 32); 3157 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3158 3159 /* vfta_delta represents the difference between the current value 3160 * of vfta and the value we want in the register. Since the diff 3161 * is an XOR mask we can just update vfta using an XOR. 3162 */ 3163 vfta_delta &= vlan_on ? ~vfta : vfta; 3164 vfta ^= vfta_delta; 3165 3166 /* Part 2 3167 * If VT Mode is set 3168 * Either vlan_on 3169 * make sure the vlan is in VLVF 3170 * set the vind bit in the matching VLVFB 3171 * Or !vlan_on 3172 * clear the pool bit and possibly the vind 3173 */ 3174 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 3175 goto vfta_update; 3176 3177 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 3178 if (vlvf_index < 0) { 3179 if (vlvf_bypass) 3180 goto vfta_update; 3181 return vlvf_index; 3182 } 3183 3184 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 3185 3186 /* set the pool bit */ 3187 bits |= BIT(vind % 32); 3188 if (vlan_on) 3189 goto vlvf_update; 3190 3191 /* clear the pool bit */ 3192 bits ^= BIT(vind % 32); 3193 3194 if (!bits && 3195 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 3196 /* Clear VFTA first, then disable VLVF. Otherwise 3197 * we run the risk of stray packets leaking into 3198 * the PF via the default pool 3199 */ 3200 if (vfta_delta) 3201 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3202 3203 /* disable VLVF and clear remaining bit from pool */ 3204 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3205 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 3206 3207 return 0; 3208 } 3209 3210 /* If there are still bits set in the VLVFB registers 3211 * for the VLAN ID indicated we need to see if the 3212 * caller is requesting that we clear the VFTA entry bit. 3213 * If the caller has requested that we clear the VFTA 3214 * entry bit but there are still pools/VFs using this VLAN 3215 * ID entry then ignore the request. We're not worried 3216 * about the case where we're turning the VFTA VLAN ID 3217 * entry bit on, only when requested to turn it off as 3218 * there may be multiple pools and/or VFs using the 3219 * VLAN ID entry. In that case we cannot clear the 3220 * VFTA bit until all pools/VFs using that VLAN ID have also 3221 * been cleared. This will be indicated by "bits" being 3222 * zero. 3223 */ 3224 vfta_delta = 0; 3225 3226 vlvf_update: 3227 /* record pool change and enable VLAN ID if not already enabled */ 3228 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 3229 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 3230 3231 vfta_update: 3232 /* Update VFTA now that we are ready for traffic */ 3233 if (vfta_delta) 3234 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3235 3236 return 0; 3237 } 3238 3239 /** 3240 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3241 * @hw: pointer to hardware structure 3242 * 3243 * Clears the VLAN filter table, and the VMDq index associated with the filter 3244 **/ 3245 int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3246 { 3247 u32 offset; 3248 3249 for (offset = 0; offset < hw->mac.vft_size; offset++) 3250 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3251 3252 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3253 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3254 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3255 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 3256 } 3257 3258 return 0; 3259 } 3260 3261 /** 3262 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 3263 * @hw: pointer to hardware structure 3264 * 3265 * Contains the logic to identify if we need to verify link for the 3266 * crosstalk fix 3267 **/ 3268 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 3269 { 3270 /* Does FW say we need the fix */ 3271 if (!hw->need_crosstalk_fix) 3272 return false; 3273 3274 /* Only consider SFP+ PHYs i.e. media type fiber */ 3275 switch (hw->mac.ops.get_media_type(hw)) { 3276 case ixgbe_media_type_fiber: 3277 case ixgbe_media_type_fiber_qsfp: 3278 break; 3279 default: 3280 return false; 3281 } 3282 3283 return true; 3284 } 3285 3286 /** 3287 * ixgbe_check_mac_link_generic - Determine link and speed status 3288 * @hw: pointer to hardware structure 3289 * @speed: pointer to link speed 3290 * @link_up: true when link is up 3291 * @link_up_wait_to_complete: bool used to wait for link up or not 3292 * 3293 * Reads the links register to determine if link is up and the current speed 3294 **/ 3295 int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3296 bool *link_up, bool link_up_wait_to_complete) 3297 { 3298 bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); 3299 u32 links_reg, links_orig; 3300 u32 i; 3301 3302 /* If Crosstalk fix enabled do the sanity check of making sure 3303 * the SFP+ cage is full. 3304 */ 3305 if (crosstalk_fix_active) { 3306 u32 sfp_cage_full; 3307 3308 switch (hw->mac.type) { 3309 case ixgbe_mac_82599EB: 3310 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3311 IXGBE_ESDP_SDP2; 3312 break; 3313 case ixgbe_mac_X550EM_x: 3314 case ixgbe_mac_x550em_a: 3315 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3316 IXGBE_ESDP_SDP0; 3317 break; 3318 default: 3319 /* sanity check - No SFP+ devices here */ 3320 sfp_cage_full = false; 3321 break; 3322 } 3323 3324 if (!sfp_cage_full) { 3325 *link_up = false; 3326 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3327 return 0; 3328 } 3329 } 3330 3331 /* clear the old state */ 3332 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3333 3334 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3335 3336 if (links_orig != links_reg) { 3337 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3338 links_orig, links_reg); 3339 } 3340 3341 if (link_up_wait_to_complete) { 3342 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3343 if (links_reg & IXGBE_LINKS_UP) { 3344 *link_up = true; 3345 break; 3346 } else { 3347 *link_up = false; 3348 } 3349 msleep(100); 3350 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3351 } 3352 } else { 3353 if (links_reg & IXGBE_LINKS_UP) { 3354 if (crosstalk_fix_active) { 3355 /* Check the link state again after a delay 3356 * to filter out spurious link up 3357 * notifications. 3358 */ 3359 mdelay(5); 3360 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3361 if (!(links_reg & IXGBE_LINKS_UP)) { 3362 *link_up = false; 3363 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3364 return 0; 3365 } 3366 } 3367 *link_up = true; 3368 } else { 3369 *link_up = false; 3370 } 3371 } 3372 3373 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3374 case IXGBE_LINKS_SPEED_10G_82599: 3375 if ((hw->mac.type >= ixgbe_mac_X550) && 3376 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3377 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3378 else 3379 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3380 break; 3381 case IXGBE_LINKS_SPEED_1G_82599: 3382 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3383 break; 3384 case IXGBE_LINKS_SPEED_100_82599: 3385 if ((hw->mac.type >= ixgbe_mac_X550 || 3386 hw->mac.type == ixgbe_mac_e610) && 3387 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3388 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3389 else 3390 *speed = IXGBE_LINK_SPEED_100_FULL; 3391 break; 3392 case IXGBE_LINKS_SPEED_10_X550EM_A: 3393 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3394 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3395 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 3396 *speed = IXGBE_LINK_SPEED_10_FULL; 3397 } 3398 break; 3399 default: 3400 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3401 } 3402 3403 return 0; 3404 } 3405 3406 /** 3407 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3408 * the EEPROM 3409 * @hw: pointer to hardware structure 3410 * @wwnn_prefix: the alternative WWNN prefix 3411 * @wwpn_prefix: the alternative WWPN prefix 3412 * 3413 * This function will read the EEPROM from the alternative SAN MAC address 3414 * block to check the support for the alternative WWNN/WWPN prefix support. 3415 **/ 3416 int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3417 u16 *wwpn_prefix) 3418 { 3419 u16 offset, caps; 3420 u16 alt_san_mac_blk_offset; 3421 3422 /* clear output first */ 3423 *wwnn_prefix = 0xFFFF; 3424 *wwpn_prefix = 0xFFFF; 3425 3426 /* check if alternative SAN MAC is supported */ 3427 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3428 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3429 goto wwn_prefix_err; 3430 3431 if ((alt_san_mac_blk_offset == 0) || 3432 (alt_san_mac_blk_offset == 0xFFFF)) 3433 return 0; 3434 3435 /* check capability in alternative san mac address block */ 3436 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3437 if (hw->eeprom.ops.read(hw, offset, &caps)) 3438 goto wwn_prefix_err; 3439 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3440 return 0; 3441 3442 /* get the corresponding prefix for WWNN/WWPN */ 3443 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3444 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3445 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3446 3447 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3448 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3449 goto wwn_prefix_err; 3450 3451 return 0; 3452 3453 wwn_prefix_err: 3454 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3455 return 0; 3456 } 3457 3458 /** 3459 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3460 * @hw: pointer to hardware structure 3461 * @enable: enable or disable switch for MAC anti-spoofing 3462 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 3463 * 3464 **/ 3465 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3466 { 3467 int vf_target_reg = vf >> 3; 3468 int vf_target_shift = vf % 8; 3469 u32 pfvfspoof; 3470 3471 if (hw->mac.type == ixgbe_mac_82598EB) 3472 return; 3473 3474 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3475 if (enable) 3476 pfvfspoof |= BIT(vf_target_shift); 3477 else 3478 pfvfspoof &= ~BIT(vf_target_shift); 3479 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3480 } 3481 3482 /** 3483 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3484 * @hw: pointer to hardware structure 3485 * @enable: enable or disable switch for VLAN anti-spoofing 3486 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3487 * 3488 **/ 3489 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3490 { 3491 int vf_target_reg = vf >> 3; 3492 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3493 u32 pfvfspoof; 3494 3495 if (hw->mac.type == ixgbe_mac_82598EB) 3496 return; 3497 3498 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3499 if (enable) 3500 pfvfspoof |= BIT(vf_target_shift); 3501 else 3502 pfvfspoof &= ~BIT(vf_target_shift); 3503 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3504 } 3505 3506 /** 3507 * ixgbe_get_device_caps_generic - Get additional device capabilities 3508 * @hw: pointer to hardware structure 3509 * @device_caps: the EEPROM word with the extra device capabilities 3510 * 3511 * This function will read the EEPROM location for the device capabilities, 3512 * and return the word through device_caps. 3513 **/ 3514 int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3515 { 3516 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3517 3518 return 0; 3519 } 3520 3521 /** 3522 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3523 * @hw: pointer to hardware structure 3524 * @num_pb: number of packet buffers to allocate 3525 * @headroom: reserve n KB of headroom 3526 * @strategy: packet buffer allocation strategy 3527 **/ 3528 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3529 int num_pb, 3530 u32 headroom, 3531 int strategy) 3532 { 3533 u32 pbsize = hw->mac.rx_pb_size; 3534 int i = 0; 3535 u32 rxpktsize, txpktsize, txpbthresh; 3536 3537 /* Reserve headroom */ 3538 pbsize -= headroom; 3539 3540 if (!num_pb) 3541 num_pb = 1; 3542 3543 /* Divide remaining packet buffer space amongst the number 3544 * of packet buffers requested using supplied strategy. 3545 */ 3546 switch (strategy) { 3547 case (PBA_STRATEGY_WEIGHTED): 3548 /* pba_80_48 strategy weight first half of packet buffer with 3549 * 5/8 of the packet buffer space. 3550 */ 3551 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3552 pbsize -= rxpktsize * (num_pb / 2); 3553 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3554 for (; i < (num_pb / 2); i++) 3555 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3556 fallthrough; /* configure remaining packet buffers */ 3557 case (PBA_STRATEGY_EQUAL): 3558 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3559 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3560 for (; i < num_pb; i++) 3561 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3562 break; 3563 default: 3564 break; 3565 } 3566 3567 /* 3568 * Setup Tx packet buffer and threshold equally for all TCs 3569 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3570 * 10 since the largest packet we support is just over 9K. 3571 */ 3572 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3573 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3574 for (i = 0; i < num_pb; i++) { 3575 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3576 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3577 } 3578 3579 /* Clear unused TCs, if any, to zero buffer size*/ 3580 for (; i < IXGBE_MAX_PB; i++) { 3581 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3582 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3583 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3584 } 3585 } 3586 3587 /** 3588 * ixgbe_calculate_checksum - Calculate checksum for buffer 3589 * @buffer: pointer to EEPROM 3590 * @length: size of EEPROM to calculate a checksum for 3591 * 3592 * Calculates the checksum for some buffer on a specified length. The 3593 * checksum calculated is returned. 3594 **/ 3595 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3596 { 3597 u32 i; 3598 u8 sum = 0; 3599 3600 if (!buffer) 3601 return 0; 3602 3603 for (i = 0; i < length; i++) 3604 sum += buffer[i]; 3605 3606 return (u8) (0 - sum); 3607 } 3608 3609 /** 3610 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 3611 * @hw: pointer to the HW structure 3612 * @buffer: command to write and where the return status will be placed 3613 * @length: length of buffer, must be multiple of 4 bytes 3614 * @timeout: time in ms to wait for command completion 3615 * 3616 * Communicates with the manageability block. On success return 0 3617 * else returns semaphore error when encountering an error acquiring 3618 * semaphore, -EINVAL when incorrect parameters passed or -EIO when 3619 * command fails. 3620 * 3621 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 3622 * by the caller. 3623 **/ 3624 int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 3625 u32 timeout) 3626 { 3627 u32 hicr, i, fwsts; 3628 u16 dword_len; 3629 3630 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3631 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3632 return -EINVAL; 3633 } 3634 3635 /* Set bit 9 of FWSTS clearing FW reset indication */ 3636 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3637 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 3638 3639 /* Check that the host interface is enabled. */ 3640 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3641 if (!(hicr & IXGBE_HICR_EN)) { 3642 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3643 return -EIO; 3644 } 3645 3646 /* Calculate length in DWORDs. We must be DWORD aligned */ 3647 if (length % sizeof(u32)) { 3648 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3649 return -EINVAL; 3650 } 3651 3652 dword_len = length >> 2; 3653 3654 /* The device driver writes the relevant command block 3655 * into the ram area. 3656 */ 3657 for (i = 0; i < dword_len; i++) 3658 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3659 i, (__force u32)cpu_to_le32(buffer[i])); 3660 3661 /* Setting this bit tells the ARC that a new command is pending. */ 3662 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3663 3664 for (i = 0; i < timeout; i++) { 3665 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3666 if (!(hicr & IXGBE_HICR_C)) 3667 break; 3668 usleep_range(1000, 2000); 3669 } 3670 3671 /* Check command successful completion. */ 3672 if ((timeout && i == timeout) || 3673 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) 3674 return -EIO; 3675 3676 return 0; 3677 } 3678 3679 /** 3680 * ixgbe_host_interface_command - Issue command to manageability block 3681 * @hw: pointer to the HW structure 3682 * @buffer: contains the command to write and where the return status will 3683 * be placed 3684 * @length: length of buffer, must be multiple of 4 bytes 3685 * @timeout: time in ms to wait for command completion 3686 * @return_data: read and return data from the buffer (true) or not (false) 3687 * Needed because FW structures are big endian and decoding of 3688 * these fields can be 8 bit or 16 bit based on command. Decoding 3689 * is not easily understood without making a table of commands. 3690 * So we will leave this up to the caller to read back the data 3691 * in these cases. 3692 * 3693 * Communicates with the manageability block. On success return 0 3694 * else return -EIO or -EINVAL. 3695 **/ 3696 int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, 3697 u32 length, u32 timeout, 3698 bool return_data) 3699 { 3700 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3701 struct ixgbe_hic_hdr *hdr = buffer; 3702 u16 buf_len, dword_len; 3703 u32 *u32arr = buffer; 3704 int status; 3705 u32 bi; 3706 3707 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3708 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3709 return -EINVAL; 3710 } 3711 /* Take management host interface semaphore */ 3712 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3713 if (status) 3714 return status; 3715 3716 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 3717 if (status) 3718 goto rel_out; 3719 3720 if (!return_data) 3721 goto rel_out; 3722 3723 /* Calculate length in DWORDs */ 3724 dword_len = hdr_size >> 2; 3725 3726 /* first pull in the header so we know the buffer length */ 3727 for (bi = 0; bi < dword_len; bi++) { 3728 u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3729 le32_to_cpus(&u32arr[bi]); 3730 } 3731 3732 /* If there is any thing in data position pull it in */ 3733 buf_len = hdr->buf_len; 3734 if (!buf_len) 3735 goto rel_out; 3736 3737 if (length < round_up(buf_len, 4) + hdr_size) { 3738 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3739 status = -EIO; 3740 goto rel_out; 3741 } 3742 3743 /* Calculate length in DWORDs, add 3 for odd lengths */ 3744 dword_len = (buf_len + 3) >> 2; 3745 3746 /* Pull in the rest of the buffer (bi is where we left off) */ 3747 for (; bi <= dword_len; bi++) { 3748 u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3749 le32_to_cpus(&u32arr[bi]); 3750 } 3751 3752 rel_out: 3753 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3754 3755 return status; 3756 } 3757 3758 /** 3759 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3760 * @hw: pointer to the HW structure 3761 * @maj: driver version major number 3762 * @min: driver version minor number 3763 * @build: driver version build number 3764 * @sub: driver version sub build number 3765 * @len: length of driver_ver string 3766 * @driver_ver: driver string 3767 * 3768 * Sends driver version number to firmware through the manageability 3769 * block. On success return 0 3770 * else returns -EBUSY when encountering an error acquiring 3771 * semaphore or -EIO when command fails. 3772 **/ 3773 int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3774 u8 build, u8 sub, __always_unused u16 len, 3775 __always_unused const char *driver_ver) 3776 { 3777 struct ixgbe_hic_drv_info fw_cmd; 3778 int ret_val; 3779 int i; 3780 3781 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3782 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3783 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3784 fw_cmd.port_num = hw->bus.func; 3785 fw_cmd.ver_maj = maj; 3786 fw_cmd.ver_min = min; 3787 fw_cmd.ver_build = build; 3788 fw_cmd.ver_sub = sub; 3789 fw_cmd.hdr.checksum = 0; 3790 fw_cmd.pad = 0; 3791 fw_cmd.pad2 = 0; 3792 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3793 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3794 3795 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3796 ret_val = ixgbe_host_interface_command(hw, &fw_cmd, 3797 sizeof(fw_cmd), 3798 IXGBE_HI_COMMAND_TIMEOUT, 3799 true); 3800 if (ret_val != 0) 3801 continue; 3802 3803 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3804 FW_CEM_RESP_STATUS_SUCCESS) 3805 ret_val = 0; 3806 else 3807 ret_val = -EIO; 3808 3809 break; 3810 } 3811 3812 return ret_val; 3813 } 3814 3815 /** 3816 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3817 * @hw: pointer to the hardware structure 3818 * 3819 * The 82599 and x540 MACs can experience issues if TX work is still pending 3820 * when a reset occurs. This function prevents this by flushing the PCIe 3821 * buffers on the system. 3822 **/ 3823 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3824 { 3825 u32 gcr_ext, hlreg0, i, poll; 3826 u16 value; 3827 3828 /* 3829 * If double reset is not requested then all transactions should 3830 * already be clear and as such there is no work to do 3831 */ 3832 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3833 return; 3834 3835 /* 3836 * Set loopback enable to prevent any transmits from being sent 3837 * should the link come up. This assumes that the RXCTRL.RXEN bit 3838 * has already been cleared. 3839 */ 3840 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3841 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3842 3843 /* wait for a last completion before clearing buffers */ 3844 IXGBE_WRITE_FLUSH(hw); 3845 usleep_range(3000, 6000); 3846 3847 /* Before proceeding, make sure that the PCIe block does not have 3848 * transactions pending. 3849 */ 3850 poll = ixgbe_pcie_timeout_poll(hw); 3851 for (i = 0; i < poll; i++) { 3852 usleep_range(100, 200); 3853 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 3854 if (ixgbe_removed(hw->hw_addr)) 3855 break; 3856 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3857 break; 3858 } 3859 3860 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3861 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3862 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3863 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3864 3865 /* Flush all writes and allow 20usec for all transactions to clear */ 3866 IXGBE_WRITE_FLUSH(hw); 3867 udelay(20); 3868 3869 /* restore previous register values */ 3870 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3871 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3872 } 3873 3874 static const u8 ixgbe_emc_temp_data[4] = { 3875 IXGBE_EMC_INTERNAL_DATA, 3876 IXGBE_EMC_DIODE1_DATA, 3877 IXGBE_EMC_DIODE2_DATA, 3878 IXGBE_EMC_DIODE3_DATA 3879 }; 3880 static const u8 ixgbe_emc_therm_limit[4] = { 3881 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3882 IXGBE_EMC_DIODE1_THERM_LIMIT, 3883 IXGBE_EMC_DIODE2_THERM_LIMIT, 3884 IXGBE_EMC_DIODE3_THERM_LIMIT 3885 }; 3886 3887 /** 3888 * ixgbe_get_ets_data - Extracts the ETS bit data 3889 * @hw: pointer to hardware structure 3890 * @ets_cfg: extected ETS data 3891 * @ets_offset: offset of ETS data 3892 * 3893 * Returns error code. 3894 **/ 3895 static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3896 u16 *ets_offset) 3897 { 3898 int status; 3899 3900 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3901 if (status) 3902 return status; 3903 3904 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) 3905 return -EOPNOTSUPP; 3906 3907 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3908 if (status) 3909 return status; 3910 3911 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) 3912 return -EOPNOTSUPP; 3913 3914 return 0; 3915 } 3916 3917 /** 3918 * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data 3919 * @hw: pointer to hardware structure 3920 * 3921 * Returns the thermal sensor data structure 3922 **/ 3923 int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3924 { 3925 u16 ets_offset; 3926 u16 ets_sensor; 3927 u8 num_sensors; 3928 u16 ets_cfg; 3929 int status; 3930 u8 i; 3931 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3932 3933 /* Only support thermal sensors attached to physical port 0 */ 3934 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3935 return -EOPNOTSUPP; 3936 3937 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3938 if (status) 3939 return status; 3940 3941 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3942 if (num_sensors > IXGBE_MAX_SENSORS) 3943 num_sensors = IXGBE_MAX_SENSORS; 3944 3945 for (i = 0; i < num_sensors; i++) { 3946 u8 sensor_index; 3947 u8 sensor_location; 3948 3949 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3950 &ets_sensor); 3951 if (status) 3952 return status; 3953 3954 sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, 3955 ets_sensor); 3956 sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, 3957 ets_sensor); 3958 3959 if (sensor_location != 0) { 3960 status = hw->phy.ops.read_i2c_byte(hw, 3961 ixgbe_emc_temp_data[sensor_index], 3962 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3963 &data->sensor[i].temp); 3964 if (status) 3965 return status; 3966 } 3967 } 3968 3969 return 0; 3970 } 3971 3972 /** 3973 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3974 * @hw: pointer to hardware structure 3975 * 3976 * Inits the thermal sensor thresholds according to the NVM map 3977 * and save off the threshold and location values into mac.thermal_sensor_data 3978 **/ 3979 int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3980 { 3981 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3982 u8 low_thresh_delta; 3983 u8 num_sensors; 3984 u8 therm_limit; 3985 u16 ets_sensor; 3986 u16 ets_offset; 3987 u16 ets_cfg; 3988 int status; 3989 u8 i; 3990 3991 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3992 3993 /* Only support thermal sensors attached to physical port 0 */ 3994 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3995 return -EOPNOTSUPP; 3996 3997 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3998 if (status) 3999 return status; 4000 4001 low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg); 4002 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 4003 if (num_sensors > IXGBE_MAX_SENSORS) 4004 num_sensors = IXGBE_MAX_SENSORS; 4005 4006 for (i = 0; i < num_sensors; i++) { 4007 u8 sensor_index; 4008 u8 sensor_location; 4009 4010 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 4011 hw_err(hw, "eeprom read at offset %d failed\n", 4012 ets_offset + 1 + i); 4013 continue; 4014 } 4015 sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, 4016 ets_sensor); 4017 sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, 4018 ets_sensor); 4019 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 4020 4021 hw->phy.ops.write_i2c_byte(hw, 4022 ixgbe_emc_therm_limit[sensor_index], 4023 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 4024 4025 if (sensor_location == 0) 4026 continue; 4027 4028 data->sensor[i].location = sensor_location; 4029 data->sensor[i].caution_thresh = therm_limit; 4030 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 4031 } 4032 4033 return 0; 4034 } 4035 4036 /** 4037 * ixgbe_get_orom_version - Return option ROM from EEPROM 4038 * 4039 * @hw: pointer to hardware structure 4040 * @nvm_ver: pointer to output structure 4041 * 4042 * if valid option ROM version, nvm_ver->or_valid set to true 4043 * else nvm_ver->or_valid is false. 4044 **/ 4045 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 4046 struct ixgbe_nvm_version *nvm_ver) 4047 { 4048 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 4049 4050 nvm_ver->or_valid = false; 4051 /* Option Rom may or may not be present. Start with pointer */ 4052 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 4053 4054 /* make sure offset is valid */ 4055 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4056 return; 4057 4058 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 4059 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 4060 4061 /* option rom exists and is valid */ 4062 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 4063 eeprom_cfg_blkl == NVM_VER_INVALID || 4064 eeprom_cfg_blkh == NVM_VER_INVALID) 4065 return; 4066 4067 nvm_ver->or_valid = true; 4068 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 4069 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 4070 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 4071 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 4072 } 4073 4074 /** 4075 * ixgbe_get_oem_prod_version - Etrack ID from EEPROM 4076 * @hw: pointer to hardware structure 4077 * @nvm_ver: pointer to output structure 4078 * 4079 * if valid OEM product version, nvm_ver->oem_valid set to true 4080 * else nvm_ver->oem_valid is false. 4081 **/ 4082 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 4083 struct ixgbe_nvm_version *nvm_ver) 4084 { 4085 u16 rel_num, prod_ver, mod_len, cap, offset; 4086 4087 nvm_ver->oem_valid = false; 4088 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4089 4090 /* Return is offset to OEM Product Version block is invalid */ 4091 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4092 return; 4093 4094 /* Read product version block */ 4095 hw->eeprom.ops.read(hw, offset, &mod_len); 4096 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 4097 4098 /* Return if OEM product version block is invalid */ 4099 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 4100 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 4101 return; 4102 4103 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 4104 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 4105 4106 /* Return if version is invalid */ 4107 if ((rel_num | prod_ver) == 0x0 || 4108 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 4109 return; 4110 4111 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 4112 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 4113 nvm_ver->oem_release = rel_num; 4114 nvm_ver->oem_valid = true; 4115 } 4116 4117 /** 4118 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 4119 * 4120 * @hw: pointer to hardware structure 4121 * @nvm_ver: pointer to output structure 4122 * 4123 * word read errors will return 0xFFFF 4124 **/ 4125 void ixgbe_get_etk_id(struct ixgbe_hw *hw, 4126 struct ixgbe_nvm_version *nvm_ver) 4127 { 4128 u16 etk_id_l, etk_id_h; 4129 4130 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 4131 etk_id_l = NVM_VER_INVALID; 4132 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 4133 etk_id_h = NVM_VER_INVALID; 4134 4135 /* The word order for the version format is determined by high order 4136 * word bit 15. 4137 */ 4138 if ((etk_id_h & NVM_ETK_VALID) == 0) { 4139 nvm_ver->etk_id = etk_id_h; 4140 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 4141 } else { 4142 nvm_ver->etk_id = etk_id_l; 4143 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 4144 } 4145 } 4146 4147 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4148 { 4149 u32 rxctrl; 4150 4151 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4152 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4153 if (hw->mac.type != ixgbe_mac_82598EB) { 4154 u32 pfdtxgswc; 4155 4156 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4157 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4158 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4159 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4160 hw->mac.set_lben = true; 4161 } else { 4162 hw->mac.set_lben = false; 4163 } 4164 } 4165 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4166 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4167 } 4168 } 4169 4170 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4171 { 4172 u32 rxctrl; 4173 4174 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4175 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4176 4177 if (hw->mac.type != ixgbe_mac_82598EB) { 4178 if (hw->mac.set_lben) { 4179 u32 pfdtxgswc; 4180 4181 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4182 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4183 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4184 hw->mac.set_lben = false; 4185 } 4186 } 4187 } 4188 4189 /** ixgbe_mng_present - returns true when management capability is present 4190 * @hw: pointer to hardware structure 4191 **/ 4192 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4193 { 4194 u32 fwsm; 4195 4196 if (hw->mac.type < ixgbe_mac_82599EB) 4197 return false; 4198 4199 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 4200 4201 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 4202 } 4203 4204 /** 4205 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4206 * @hw: pointer to hardware structure 4207 * @speed: new link speed 4208 * @autoneg_wait_to_complete: true when waiting for completion is needed 4209 * 4210 * Set the link speed in the MAC and/or PHY register and restarts link. 4211 */ 4212 int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4213 ixgbe_link_speed speed, 4214 bool autoneg_wait_to_complete) 4215 { 4216 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4217 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4218 bool autoneg, link_up = false; 4219 u32 speedcnt = 0; 4220 int status = 0; 4221 u32 i = 0; 4222 4223 /* Mask off requested but non-supported speeds */ 4224 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); 4225 if (status) 4226 return status; 4227 4228 speed &= link_speed; 4229 4230 /* Try each speed one by one, highest priority first. We do this in 4231 * software because 10Gb fiber doesn't support speed autonegotiation. 4232 */ 4233 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4234 speedcnt++; 4235 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4236 4237 /* Set the module link speed */ 4238 switch (hw->phy.media_type) { 4239 case ixgbe_media_type_fiber: 4240 hw->mac.ops.set_rate_select_speed(hw, 4241 IXGBE_LINK_SPEED_10GB_FULL); 4242 break; 4243 case ixgbe_media_type_fiber_qsfp: 4244 /* QSFP module automatically detects MAC link speed */ 4245 break; 4246 default: 4247 hw_dbg(hw, "Unexpected media type\n"); 4248 break; 4249 } 4250 4251 /* Allow module to change analog characteristics (1G->10G) */ 4252 msleep(40); 4253 4254 status = hw->mac.ops.setup_mac_link(hw, 4255 IXGBE_LINK_SPEED_10GB_FULL, 4256 autoneg_wait_to_complete); 4257 if (status) 4258 return status; 4259 4260 /* Flap the Tx laser if it has not already been done */ 4261 if (hw->mac.ops.flap_tx_laser) 4262 hw->mac.ops.flap_tx_laser(hw); 4263 4264 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4265 * Section 73.10.2, we may have to wait up to 500ms if KR is 4266 * attempted. 82599 uses the same timing for 10g SFI. 4267 */ 4268 for (i = 0; i < 5; i++) { 4269 /* Wait for the link partner to also set speed */ 4270 msleep(100); 4271 4272 /* If we have link, just jump out */ 4273 status = hw->mac.ops.check_link(hw, &link_speed, 4274 &link_up, false); 4275 if (status) 4276 return status; 4277 4278 if (link_up) 4279 goto out; 4280 } 4281 } 4282 4283 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4284 speedcnt++; 4285 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4286 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4287 4288 /* Set the module link speed */ 4289 switch (hw->phy.media_type) { 4290 case ixgbe_media_type_fiber: 4291 hw->mac.ops.set_rate_select_speed(hw, 4292 IXGBE_LINK_SPEED_1GB_FULL); 4293 break; 4294 case ixgbe_media_type_fiber_qsfp: 4295 /* QSFP module automatically detects link speed */ 4296 break; 4297 default: 4298 hw_dbg(hw, "Unexpected media type\n"); 4299 break; 4300 } 4301 4302 /* Allow module to change analog characteristics (10G->1G) */ 4303 msleep(40); 4304 4305 status = hw->mac.ops.setup_mac_link(hw, 4306 IXGBE_LINK_SPEED_1GB_FULL, 4307 autoneg_wait_to_complete); 4308 if (status) 4309 return status; 4310 4311 /* Flap the Tx laser if it has not already been done */ 4312 if (hw->mac.ops.flap_tx_laser) 4313 hw->mac.ops.flap_tx_laser(hw); 4314 4315 /* Wait for the link partner to also set speed */ 4316 msleep(100); 4317 4318 /* If we have link, just jump out */ 4319 status = hw->mac.ops.check_link(hw, &link_speed, &link_up, 4320 false); 4321 if (status) 4322 return status; 4323 4324 if (link_up) 4325 goto out; 4326 } 4327 4328 /* We didn't get link. Configure back to the highest speed we tried, 4329 * (if there was more than one). We call ourselves back with just the 4330 * single highest speed that the user requested. 4331 */ 4332 if (speedcnt > 1) 4333 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4334 highest_link_speed, 4335 autoneg_wait_to_complete); 4336 4337 out: 4338 /* Set autoneg_advertised value based on input link speed */ 4339 hw->phy.autoneg_advertised = 0; 4340 4341 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4342 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4343 4344 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4345 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4346 4347 return status; 4348 } 4349 4350 /** 4351 * ixgbe_set_soft_rate_select_speed - Set module link speed 4352 * @hw: pointer to hardware structure 4353 * @speed: link speed to set 4354 * 4355 * Set module link speed via the soft rate select. 4356 */ 4357 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4358 ixgbe_link_speed speed) 4359 { 4360 u8 rs, eeprom_data; 4361 int status; 4362 4363 switch (speed) { 4364 case IXGBE_LINK_SPEED_10GB_FULL: 4365 /* one bit mask same as setting on */ 4366 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4367 break; 4368 case IXGBE_LINK_SPEED_1GB_FULL: 4369 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4370 break; 4371 default: 4372 hw_dbg(hw, "Invalid fixed module speed\n"); 4373 return; 4374 } 4375 4376 /* Set RS0 */ 4377 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4378 IXGBE_I2C_EEPROM_DEV_ADDR2, 4379 &eeprom_data); 4380 if (status) { 4381 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); 4382 return; 4383 } 4384 4385 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4386 4387 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4388 IXGBE_I2C_EEPROM_DEV_ADDR2, 4389 eeprom_data); 4390 if (status) { 4391 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); 4392 return; 4393 } 4394 4395 /* Set RS1 */ 4396 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4397 IXGBE_I2C_EEPROM_DEV_ADDR2, 4398 &eeprom_data); 4399 if (status) { 4400 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); 4401 return; 4402 } 4403 4404 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4405 4406 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4407 IXGBE_I2C_EEPROM_DEV_ADDR2, 4408 eeprom_data); 4409 if (status) { 4410 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); 4411 return; 4412 } 4413 } 4414