1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2024 Intel Corporation. */ 3 4 #include <linux/pci.h> 5 #include <linux/delay.h> 6 #include <linux/sched.h> 7 #include <linux/netdevice.h> 8 9 #include "ixgbe.h" 10 #include "ixgbe_common.h" 11 #include "ixgbe_phy.h" 12 13 static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 14 static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 15 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 16 static int ixgbe_ready_eeprom(struct ixgbe_hw *hw); 17 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 18 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 19 u16 count); 20 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 21 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 22 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 23 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 24 25 static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 26 static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 27 static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 28 u16 words, u16 *data); 29 static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 30 u16 words, u16 *data); 31 static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 32 u16 offset); 33 static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); 34 35 /* Base table for registers values that change by MAC */ 36 const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { 37 IXGBE_MVALS_INIT(8259X) 38 }; 39 40 /** 41 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 42 * control 43 * @hw: pointer to hardware structure 44 * 45 * There are several phys that do not support autoneg flow control. This 46 * function check the device id to see if the associated phy supports 47 * autoneg flow control. 48 **/ 49 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 50 { 51 bool supported = false; 52 ixgbe_link_speed speed; 53 bool link_up; 54 55 switch (hw->phy.media_type) { 56 case ixgbe_media_type_fiber: 57 /* flow control autoneg black list */ 58 switch (hw->device_id) { 59 case IXGBE_DEV_ID_X550EM_A_SFP: 60 case IXGBE_DEV_ID_X550EM_A_SFP_N: 61 case IXGBE_DEV_ID_E610_SFP: 62 supported = false; 63 break; 64 default: 65 hw->mac.ops.check_link(hw, &speed, &link_up, false); 66 /* if link is down, assume supported */ 67 if (link_up) 68 supported = speed == IXGBE_LINK_SPEED_1GB_FULL; 69 else 70 supported = true; 71 } 72 73 break; 74 case ixgbe_media_type_backplane: 75 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 76 supported = false; 77 else 78 supported = true; 79 break; 80 case ixgbe_media_type_copper: 81 /* only some copper devices support flow control autoneg */ 82 switch (hw->device_id) { 83 case IXGBE_DEV_ID_82599_T3_LOM: 84 case IXGBE_DEV_ID_X540T: 85 case IXGBE_DEV_ID_X540T1: 86 case IXGBE_DEV_ID_X550T: 87 case IXGBE_DEV_ID_X550T1: 88 case IXGBE_DEV_ID_X550EM_X_10G_T: 89 case IXGBE_DEV_ID_X550EM_A_10G_T: 90 case IXGBE_DEV_ID_X550EM_A_1G_T: 91 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 92 case IXGBE_DEV_ID_E610_10G_T: 93 case IXGBE_DEV_ID_E610_2_5G_T: 94 supported = true; 95 break; 96 default: 97 break; 98 } 99 break; 100 default: 101 break; 102 } 103 104 if (!supported) 105 hw_dbg(hw, "Device %x does not support flow control autoneg\n", 106 hw->device_id); 107 108 return supported; 109 } 110 111 /** 112 * ixgbe_setup_fc_generic - Set up flow control 113 * @hw: pointer to hardware structure 114 * 115 * Called at init time to set up flow control. 116 **/ 117 int ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 118 { 119 u32 reg = 0, reg_bp = 0; 120 bool locked = false; 121 int ret_val = 0; 122 u16 reg_cu = 0; 123 124 /* 125 * Validate the requested mode. Strict IEEE mode does not allow 126 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 127 */ 128 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 129 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 130 return -EINVAL; 131 } 132 133 /* 134 * 10gig parts do not have a word in the EEPROM to determine the 135 * default flow control setting, so we explicitly set it to full. 136 */ 137 if (hw->fc.requested_mode == ixgbe_fc_default) 138 hw->fc.requested_mode = ixgbe_fc_full; 139 140 /* 141 * Set up the 1G and 10G flow control advertisement registers so the 142 * HW will be able to do fc autoneg once the cable is plugged in. If 143 * we link at 10G, the 1G advertisement is harmless and vice versa. 144 */ 145 switch (hw->phy.media_type) { 146 case ixgbe_media_type_backplane: 147 /* some MAC's need RMW protection on AUTOC */ 148 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 149 if (ret_val) 150 return ret_val; 151 152 fallthrough; /* only backplane uses autoc */ 153 case ixgbe_media_type_fiber: 154 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 155 156 break; 157 case ixgbe_media_type_copper: 158 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 159 MDIO_MMD_AN, ®_cu); 160 break; 161 default: 162 break; 163 } 164 165 /* 166 * The possible values of fc.requested_mode are: 167 * 0: Flow control is completely disabled 168 * 1: Rx flow control is enabled (we can receive pause frames, 169 * but not send pause frames). 170 * 2: Tx flow control is enabled (we can send pause frames but 171 * we do not support receiving pause frames). 172 * 3: Both Rx and Tx flow control (symmetric) are enabled. 173 * other: Invalid. 174 */ 175 switch (hw->fc.requested_mode) { 176 case ixgbe_fc_none: 177 /* Flow control completely disabled by software override. */ 178 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 179 if (hw->phy.media_type == ixgbe_media_type_backplane) 180 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 181 IXGBE_AUTOC_ASM_PAUSE); 182 else if (hw->phy.media_type == ixgbe_media_type_copper) 183 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 184 break; 185 case ixgbe_fc_tx_pause: 186 /* 187 * Tx Flow control is enabled, and Rx Flow control is 188 * disabled by software override. 189 */ 190 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 191 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 192 if (hw->phy.media_type == ixgbe_media_type_backplane) { 193 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 194 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 195 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 196 reg_cu |= IXGBE_TAF_ASM_PAUSE; 197 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 198 } 199 break; 200 case ixgbe_fc_rx_pause: 201 /* 202 * Rx Flow control is enabled and Tx Flow control is 203 * disabled by software override. Since there really 204 * isn't a way to advertise that we are capable of RX 205 * Pause ONLY, we will advertise that we support both 206 * symmetric and asymmetric Rx PAUSE, as such we fall 207 * through to the fc_full statement. Later, we will 208 * disable the adapter's ability to send PAUSE frames. 209 */ 210 case ixgbe_fc_full: 211 /* Flow control (both Rx and Tx) is enabled by SW override. */ 212 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 213 if (hw->phy.media_type == ixgbe_media_type_backplane) 214 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 215 IXGBE_AUTOC_ASM_PAUSE; 216 else if (hw->phy.media_type == ixgbe_media_type_copper) 217 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 218 break; 219 default: 220 hw_dbg(hw, "Flow control param set incorrectly\n"); 221 return -EIO; 222 } 223 224 if (hw->mac.type != ixgbe_mac_X540) { 225 /* 226 * Enable auto-negotiation between the MAC & PHY; 227 * the MAC will advertise clause 37 flow control. 228 */ 229 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 230 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 231 232 /* Disable AN timeout */ 233 if (hw->fc.strict_ieee) 234 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 235 236 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 237 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 238 } 239 240 /* 241 * AUTOC restart handles negotiation of 1G and 10G on backplane 242 * and copper. There is no need to set the PCS1GCTL register. 243 * 244 */ 245 if (hw->phy.media_type == ixgbe_media_type_backplane) { 246 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 247 * LESM is on, likewise reset_pipeline requires the lock as 248 * it also writes AUTOC. 249 */ 250 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 251 if (ret_val) 252 return ret_val; 253 254 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 255 ixgbe_device_supports_autoneg_fc(hw)) { 256 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 257 MDIO_MMD_AN, reg_cu); 258 } 259 260 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 261 return ret_val; 262 } 263 264 /** 265 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 266 * @hw: pointer to hardware structure 267 * 268 * Starts the hardware by filling the bus info structure and media type, clears 269 * all on chip counters, initializes receive address registers, multicast 270 * table, VLAN filter table, calls routine to set up link and flow control 271 * settings, and leaves transmit and receive units disabled and uninitialized 272 **/ 273 int ixgbe_start_hw_generic(struct ixgbe_hw *hw) 274 { 275 u16 device_caps; 276 u32 ctrl_ext; 277 int ret_val; 278 279 /* Set the media type */ 280 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 281 282 /* Identify the PHY */ 283 hw->phy.ops.identify(hw); 284 285 /* Clear the VLAN filter table */ 286 hw->mac.ops.clear_vfta(hw); 287 288 /* Clear statistics registers */ 289 hw->mac.ops.clear_hw_cntrs(hw); 290 291 /* Set No Snoop Disable */ 292 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 293 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 294 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 295 IXGBE_WRITE_FLUSH(hw); 296 297 /* Setup flow control if method for doing so */ 298 if (hw->mac.ops.setup_fc) { 299 ret_val = hw->mac.ops.setup_fc(hw); 300 if (ret_val) 301 return ret_val; 302 } 303 304 /* Cache bit indicating need for crosstalk fix */ 305 switch (hw->mac.type) { 306 case ixgbe_mac_82599EB: 307 case ixgbe_mac_X550EM_x: 308 case ixgbe_mac_x550em_a: 309 hw->mac.ops.get_device_caps(hw, &device_caps); 310 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 311 hw->need_crosstalk_fix = false; 312 else 313 hw->need_crosstalk_fix = true; 314 break; 315 default: 316 hw->need_crosstalk_fix = false; 317 break; 318 } 319 320 /* Clear adapter stopped flag */ 321 hw->adapter_stopped = false; 322 323 return 0; 324 } 325 326 /** 327 * ixgbe_start_hw_gen2 - Init sequence for common device family 328 * @hw: pointer to hw structure 329 * 330 * Performs the init sequence common to the second generation 331 * of 10 GbE devices. 332 * Devices in the second generation: 333 * 82599 334 * X540 335 * E610 336 **/ 337 int ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 338 { 339 u32 i; 340 341 /* Clear the rate limiters */ 342 for (i = 0; i < hw->mac.max_tx_queues; i++) { 343 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 344 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 345 } 346 IXGBE_WRITE_FLUSH(hw); 347 348 return 0; 349 } 350 351 /** 352 * ixgbe_init_hw_generic - Generic hardware initialization 353 * @hw: pointer to hardware structure 354 * 355 * Initialize the hardware by resetting the hardware, filling the bus info 356 * structure and media type, clears all on chip counters, initializes receive 357 * address registers, multicast table, VLAN filter table, calls routine to set 358 * up link and flow control settings, and leaves transmit and receive units 359 * disabled and uninitialized 360 **/ 361 int ixgbe_init_hw_generic(struct ixgbe_hw *hw) 362 { 363 int status; 364 365 /* Reset the hardware */ 366 status = hw->mac.ops.reset_hw(hw); 367 368 if (status == 0) { 369 /* Start the HW */ 370 status = hw->mac.ops.start_hw(hw); 371 } 372 373 /* Initialize the LED link active for LED blink support */ 374 if (hw->mac.ops.init_led_link_act) 375 hw->mac.ops.init_led_link_act(hw); 376 377 return status; 378 } 379 380 /** 381 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 382 * @hw: pointer to hardware structure 383 * 384 * Clears all hardware statistics counters by reading them from the hardware 385 * Statistics counters are clear on read. 386 **/ 387 int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 388 { 389 u16 i = 0; 390 391 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 392 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 393 IXGBE_READ_REG(hw, IXGBE_ERRBC); 394 IXGBE_READ_REG(hw, IXGBE_MSPDC); 395 for (i = 0; i < 8; i++) 396 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 397 398 IXGBE_READ_REG(hw, IXGBE_MLFC); 399 IXGBE_READ_REG(hw, IXGBE_MRFC); 400 IXGBE_READ_REG(hw, IXGBE_RLEC); 401 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 402 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 403 if (hw->mac.type >= ixgbe_mac_82599EB) { 404 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 405 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 406 } else { 407 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 408 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 409 } 410 411 for (i = 0; i < 8; i++) { 412 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 413 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 414 if (hw->mac.type >= ixgbe_mac_82599EB) { 415 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 416 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 417 } else { 418 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 419 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 420 } 421 } 422 if (hw->mac.type >= ixgbe_mac_82599EB) 423 for (i = 0; i < 8; i++) 424 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 425 IXGBE_READ_REG(hw, IXGBE_PRC64); 426 IXGBE_READ_REG(hw, IXGBE_PRC127); 427 IXGBE_READ_REG(hw, IXGBE_PRC255); 428 IXGBE_READ_REG(hw, IXGBE_PRC511); 429 IXGBE_READ_REG(hw, IXGBE_PRC1023); 430 IXGBE_READ_REG(hw, IXGBE_PRC1522); 431 IXGBE_READ_REG(hw, IXGBE_GPRC); 432 IXGBE_READ_REG(hw, IXGBE_BPRC); 433 IXGBE_READ_REG(hw, IXGBE_MPRC); 434 IXGBE_READ_REG(hw, IXGBE_GPTC); 435 IXGBE_READ_REG(hw, IXGBE_GORCL); 436 IXGBE_READ_REG(hw, IXGBE_GORCH); 437 IXGBE_READ_REG(hw, IXGBE_GOTCL); 438 IXGBE_READ_REG(hw, IXGBE_GOTCH); 439 if (hw->mac.type == ixgbe_mac_82598EB) 440 for (i = 0; i < 8; i++) 441 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 442 IXGBE_READ_REG(hw, IXGBE_RUC); 443 IXGBE_READ_REG(hw, IXGBE_RFC); 444 IXGBE_READ_REG(hw, IXGBE_ROC); 445 IXGBE_READ_REG(hw, IXGBE_RJC); 446 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 447 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 448 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 449 IXGBE_READ_REG(hw, IXGBE_TORL); 450 IXGBE_READ_REG(hw, IXGBE_TORH); 451 IXGBE_READ_REG(hw, IXGBE_TPR); 452 IXGBE_READ_REG(hw, IXGBE_TPT); 453 IXGBE_READ_REG(hw, IXGBE_PTC64); 454 IXGBE_READ_REG(hw, IXGBE_PTC127); 455 IXGBE_READ_REG(hw, IXGBE_PTC255); 456 IXGBE_READ_REG(hw, IXGBE_PTC511); 457 IXGBE_READ_REG(hw, IXGBE_PTC1023); 458 IXGBE_READ_REG(hw, IXGBE_PTC1522); 459 IXGBE_READ_REG(hw, IXGBE_MPTC); 460 IXGBE_READ_REG(hw, IXGBE_BPTC); 461 for (i = 0; i < 16; i++) { 462 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 463 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 464 if (hw->mac.type >= ixgbe_mac_82599EB) { 465 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 466 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 467 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 468 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 469 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 470 } else { 471 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 472 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 473 } 474 } 475 476 if (hw->mac.type == ixgbe_mac_X550 || 477 hw->mac.type == ixgbe_mac_X540 || 478 hw->mac.type == ixgbe_mac_e610) { 479 if (hw->phy.id == 0) 480 hw->phy.ops.identify(hw); 481 } 482 483 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 484 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); 485 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); 486 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); 487 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); 488 } 489 490 return 0; 491 } 492 493 /** 494 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 495 * @hw: pointer to hardware structure 496 * @pba_num: stores the part number string from the EEPROM 497 * @pba_num_size: part number string buffer length 498 * 499 * Reads the part number string from the EEPROM. 500 **/ 501 int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 502 u32 pba_num_size) 503 { 504 int ret_val; 505 u16 pba_ptr; 506 u16 offset; 507 u16 length; 508 u16 data; 509 510 if (pba_num == NULL) { 511 hw_dbg(hw, "PBA string buffer was null\n"); 512 return -EINVAL; 513 } 514 515 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 516 if (ret_val) { 517 hw_dbg(hw, "NVM Read Error\n"); 518 return ret_val; 519 } 520 521 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 522 if (ret_val) { 523 hw_dbg(hw, "NVM Read Error\n"); 524 return ret_val; 525 } 526 527 /* 528 * if data is not ptr guard the PBA must be in legacy format which 529 * means pba_ptr is actually our second data word for the PBA number 530 * and we can decode it into an ascii string 531 */ 532 if (data != IXGBE_PBANUM_PTR_GUARD) { 533 hw_dbg(hw, "NVM PBA number is not stored as string\n"); 534 535 /* we will need 11 characters to store the PBA */ 536 if (pba_num_size < 11) { 537 hw_dbg(hw, "PBA string buffer too small\n"); 538 return -ENOSPC; 539 } 540 541 /* extract hex string from data and pba_ptr */ 542 pba_num[0] = (data >> 12) & 0xF; 543 pba_num[1] = (data >> 8) & 0xF; 544 pba_num[2] = (data >> 4) & 0xF; 545 pba_num[3] = data & 0xF; 546 pba_num[4] = (pba_ptr >> 12) & 0xF; 547 pba_num[5] = (pba_ptr >> 8) & 0xF; 548 pba_num[6] = '-'; 549 pba_num[7] = 0; 550 pba_num[8] = (pba_ptr >> 4) & 0xF; 551 pba_num[9] = pba_ptr & 0xF; 552 553 /* put a null character on the end of our string */ 554 pba_num[10] = '\0'; 555 556 /* switch all the data but the '-' to hex char */ 557 for (offset = 0; offset < 10; offset++) { 558 if (pba_num[offset] < 0xA) 559 pba_num[offset] += '0'; 560 else if (pba_num[offset] < 0x10) 561 pba_num[offset] += 'A' - 0xA; 562 } 563 564 return 0; 565 } 566 567 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 568 if (ret_val) { 569 hw_dbg(hw, "NVM Read Error\n"); 570 return ret_val; 571 } 572 573 if (length == 0xFFFF || length == 0) { 574 hw_dbg(hw, "NVM PBA number section invalid length\n"); 575 return -EIO; 576 } 577 578 /* check if pba_num buffer is big enough */ 579 if (pba_num_size < (((u32)length * 2) - 1)) { 580 hw_dbg(hw, "PBA string buffer too small\n"); 581 return -ENOSPC; 582 } 583 584 /* trim pba length from start of string */ 585 pba_ptr++; 586 length--; 587 588 for (offset = 0; offset < length; offset++) { 589 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 590 if (ret_val) { 591 hw_dbg(hw, "NVM Read Error\n"); 592 return ret_val; 593 } 594 pba_num[offset * 2] = (u8)(data >> 8); 595 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 596 } 597 pba_num[offset * 2] = '\0'; 598 599 return 0; 600 } 601 602 /** 603 * ixgbe_get_mac_addr_generic - Generic get MAC address 604 * @hw: pointer to hardware structure 605 * @mac_addr: Adapter MAC address 606 * 607 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 608 * A reset of the adapter must be performed prior to calling this function 609 * in order for the MAC address to have been loaded from the EEPROM into RAR0 610 **/ 611 int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 612 { 613 u32 rar_high; 614 u32 rar_low; 615 u16 i; 616 617 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 618 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 619 620 for (i = 0; i < 4; i++) 621 mac_addr[i] = (u8)(rar_low >> (i*8)); 622 623 for (i = 0; i < 2; i++) 624 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 625 626 return 0; 627 } 628 629 enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) 630 { 631 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 632 case IXGBE_PCI_LINK_WIDTH_1: 633 return ixgbe_bus_width_pcie_x1; 634 case IXGBE_PCI_LINK_WIDTH_2: 635 return ixgbe_bus_width_pcie_x2; 636 case IXGBE_PCI_LINK_WIDTH_4: 637 return ixgbe_bus_width_pcie_x4; 638 case IXGBE_PCI_LINK_WIDTH_8: 639 return ixgbe_bus_width_pcie_x8; 640 default: 641 return ixgbe_bus_width_unknown; 642 } 643 } 644 645 enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) 646 { 647 switch (link_status & IXGBE_PCI_LINK_SPEED) { 648 case IXGBE_PCI_LINK_SPEED_2500: 649 return ixgbe_bus_speed_2500; 650 case IXGBE_PCI_LINK_SPEED_5000: 651 return ixgbe_bus_speed_5000; 652 case IXGBE_PCI_LINK_SPEED_8000: 653 return ixgbe_bus_speed_8000; 654 default: 655 return ixgbe_bus_speed_unknown; 656 } 657 } 658 659 /** 660 * ixgbe_get_bus_info_generic - Generic set PCI bus info 661 * @hw: pointer to hardware structure 662 * 663 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 664 **/ 665 int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 666 { 667 u16 link_status; 668 669 hw->bus.type = ixgbe_bus_type_pci_express; 670 671 /* Get the negotiated link width and speed from PCI config space */ 672 if (hw->mac.type == ixgbe_mac_e610) 673 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610); 674 else 675 link_status = ixgbe_read_pci_cfg_word(hw, 676 IXGBE_PCI_LINK_STATUS); 677 678 hw->bus.width = ixgbe_convert_bus_width(link_status); 679 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 680 681 hw->mac.ops.set_lan_id(hw); 682 683 return 0; 684 } 685 686 /** 687 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 688 * @hw: pointer to the HW structure 689 * 690 * Determines the LAN function id by reading memory-mapped registers 691 * and swaps the port value if requested. 692 **/ 693 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 694 { 695 struct ixgbe_bus_info *bus = &hw->bus; 696 u16 ee_ctrl_4; 697 u32 reg; 698 699 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 700 bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg); 701 bus->lan_id = bus->func; 702 703 /* check for a port swap */ 704 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 705 if (reg & IXGBE_FACTPS_LFS) 706 bus->func ^= 0x1; 707 708 /* Get MAC instance from EEPROM for configuring CS4227 */ 709 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 710 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 711 bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID, 712 ee_ctrl_4); 713 } 714 } 715 716 /** 717 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 718 * @hw: pointer to hardware structure 719 * 720 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 721 * disables transmit and receive units. The adapter_stopped flag is used by 722 * the shared code and drivers to determine if the adapter is in a stopped 723 * state and should not touch the hardware. 724 **/ 725 int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 726 { 727 u32 reg_val; 728 u16 i; 729 730 /* 731 * Set the adapter_stopped flag so other driver functions stop touching 732 * the hardware 733 */ 734 hw->adapter_stopped = true; 735 736 /* Disable the receive unit */ 737 hw->mac.ops.disable_rx(hw); 738 739 /* Clear interrupt mask to stop interrupts from being generated */ 740 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 741 742 /* Clear any pending interrupts, flush previous writes */ 743 IXGBE_READ_REG(hw, IXGBE_EICR); 744 745 /* Disable the transmit unit. Each queue must be disabled. */ 746 for (i = 0; i < hw->mac.max_tx_queues; i++) 747 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 748 749 /* Disable the receive unit by stopping each queue */ 750 for (i = 0; i < hw->mac.max_rx_queues; i++) { 751 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 752 reg_val &= ~IXGBE_RXDCTL_ENABLE; 753 reg_val |= IXGBE_RXDCTL_SWFLSH; 754 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 755 } 756 757 /* flush all queues disables */ 758 IXGBE_WRITE_FLUSH(hw); 759 usleep_range(1000, 2000); 760 761 /* 762 * Prevent the PCI-E bus from hanging by disabling PCI-E primary 763 * access and verify no pending requests 764 */ 765 return ixgbe_disable_pcie_primary(hw); 766 } 767 768 /** 769 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 770 * @hw: pointer to hardware structure 771 * 772 * Store the index for the link active LED. This will be used to support 773 * blinking the LED. 774 **/ 775 int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 776 { 777 struct ixgbe_mac_info *mac = &hw->mac; 778 u32 led_reg, led_mode; 779 u16 i; 780 781 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 782 783 /* Get LED link active from the LEDCTL register */ 784 for (i = 0; i < 4; i++) { 785 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 786 787 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 788 IXGBE_LED_LINK_ACTIVE) { 789 mac->led_link_act = i; 790 return 0; 791 } 792 } 793 794 /* If LEDCTL register does not have the LED link active set, then use 795 * known MAC defaults. 796 */ 797 switch (hw->mac.type) { 798 case ixgbe_mac_x550em_a: 799 mac->led_link_act = 0; 800 break; 801 case ixgbe_mac_X550EM_x: 802 mac->led_link_act = 1; 803 break; 804 default: 805 mac->led_link_act = 2; 806 } 807 808 return 0; 809 } 810 811 /** 812 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 813 * @hw: pointer to hardware structure 814 * @index: led number to turn on 815 **/ 816 int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 817 { 818 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 819 820 if (index > 3) 821 return -EINVAL; 822 823 /* To turn on the LED, set mode to ON. */ 824 led_reg &= ~IXGBE_LED_MODE_MASK(index); 825 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 826 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 827 IXGBE_WRITE_FLUSH(hw); 828 829 return 0; 830 } 831 832 /** 833 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 834 * @hw: pointer to hardware structure 835 * @index: led number to turn off 836 **/ 837 int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 838 { 839 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 840 841 if (index > 3) 842 return -EINVAL; 843 844 /* To turn off the LED, set mode to OFF. */ 845 led_reg &= ~IXGBE_LED_MODE_MASK(index); 846 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 847 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 848 IXGBE_WRITE_FLUSH(hw); 849 850 return 0; 851 } 852 853 /** 854 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 855 * @hw: pointer to hardware structure 856 * 857 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 858 * ixgbe_hw struct in order to set up EEPROM access. 859 **/ 860 int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 861 { 862 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 863 u32 eec; 864 u16 eeprom_size; 865 866 if (eeprom->type == ixgbe_eeprom_uninitialized) { 867 eeprom->type = ixgbe_eeprom_none; 868 /* Set default semaphore delay to 10ms which is a well 869 * tested value */ 870 eeprom->semaphore_delay = 10; 871 /* Clear EEPROM page size, it will be initialized as needed */ 872 eeprom->word_page_size = 0; 873 874 /* 875 * Check for EEPROM present first. 876 * If not present leave as none 877 */ 878 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 879 if (eec & IXGBE_EEC_PRES) { 880 eeprom->type = ixgbe_eeprom_spi; 881 882 /* 883 * SPI EEPROM is assumed here. This code would need to 884 * change if a future EEPROM is not SPI. 885 */ 886 eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec); 887 eeprom->word_size = BIT(eeprom_size + 888 IXGBE_EEPROM_WORD_SIZE_SHIFT); 889 } 890 891 if (eec & IXGBE_EEC_ADDR_SIZE) 892 eeprom->address_bits = 16; 893 else 894 eeprom->address_bits = 8; 895 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", 896 eeprom->type, eeprom->word_size, eeprom->address_bits); 897 } 898 899 return 0; 900 } 901 902 /** 903 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 904 * @hw: pointer to hardware structure 905 * @offset: offset within the EEPROM to write 906 * @words: number of words 907 * @data: 16 bit word(s) to write to EEPROM 908 * 909 * Reads 16 bit word(s) from EEPROM through bit-bang method 910 **/ 911 int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 912 u16 words, u16 *data) 913 { 914 u16 i, count; 915 int status; 916 917 hw->eeprom.ops.init_params(hw); 918 919 if (words == 0 || (offset + words > hw->eeprom.word_size)) 920 return -EINVAL; 921 922 /* 923 * The EEPROM page size cannot be queried from the chip. We do lazy 924 * initialization. It is worth to do that when we write large buffer. 925 */ 926 if ((hw->eeprom.word_page_size == 0) && 927 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 928 ixgbe_detect_eeprom_page_size_generic(hw, offset); 929 930 /* 931 * We cannot hold synchronization semaphores for too long 932 * to avoid other entity starvation. However it is more efficient 933 * to read in bursts than synchronizing access for each word. 934 */ 935 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 936 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 937 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 938 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 939 count, &data[i]); 940 941 if (status != 0) 942 break; 943 } 944 945 return status; 946 } 947 948 /** 949 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 950 * @hw: pointer to hardware structure 951 * @offset: offset within the EEPROM to be written to 952 * @words: number of word(s) 953 * @data: 16 bit word(s) to be written to the EEPROM 954 * 955 * If ixgbe_eeprom_update_checksum is not called after this function, the 956 * EEPROM will most likely contain an invalid checksum. 957 **/ 958 static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 959 u16 words, u16 *data) 960 { 961 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 962 u16 page_size; 963 int status; 964 u16 word; 965 u16 i; 966 967 /* Prepare the EEPROM for writing */ 968 status = ixgbe_acquire_eeprom(hw); 969 if (status) 970 return status; 971 972 if (ixgbe_ready_eeprom(hw) != 0) { 973 ixgbe_release_eeprom(hw); 974 return -EIO; 975 } 976 977 for (i = 0; i < words; i++) { 978 ixgbe_standby_eeprom(hw); 979 980 /* Send the WRITE ENABLE command (8 bit opcode) */ 981 ixgbe_shift_out_eeprom_bits(hw, 982 IXGBE_EEPROM_WREN_OPCODE_SPI, 983 IXGBE_EEPROM_OPCODE_BITS); 984 985 ixgbe_standby_eeprom(hw); 986 987 /* Some SPI eeproms use the 8th address bit embedded 988 * in the opcode 989 */ 990 if ((hw->eeprom.address_bits == 8) && 991 ((offset + i) >= 128)) 992 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 993 994 /* Send the Write command (8-bit opcode + addr) */ 995 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 996 IXGBE_EEPROM_OPCODE_BITS); 997 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 998 hw->eeprom.address_bits); 999 1000 page_size = hw->eeprom.word_page_size; 1001 1002 /* Send the data in burst via SPI */ 1003 do { 1004 word = data[i]; 1005 word = (word >> 8) | (word << 8); 1006 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1007 1008 if (page_size == 0) 1009 break; 1010 1011 /* do not wrap around page */ 1012 if (((offset + i) & (page_size - 1)) == 1013 (page_size - 1)) 1014 break; 1015 } while (++i < words); 1016 1017 ixgbe_standby_eeprom(hw); 1018 usleep_range(10000, 20000); 1019 } 1020 /* Done with writing - release the EEPROM */ 1021 ixgbe_release_eeprom(hw); 1022 1023 return 0; 1024 } 1025 1026 /** 1027 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1028 * @hw: pointer to hardware structure 1029 * @offset: offset within the EEPROM to be written to 1030 * @data: 16 bit word to be written to the EEPROM 1031 * 1032 * If ixgbe_eeprom_update_checksum is not called after this function, the 1033 * EEPROM will most likely contain an invalid checksum. 1034 **/ 1035 int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1036 { 1037 hw->eeprom.ops.init_params(hw); 1038 1039 if (offset >= hw->eeprom.word_size) 1040 return -EINVAL; 1041 1042 return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1043 } 1044 1045 /** 1046 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1047 * @hw: pointer to hardware structure 1048 * @offset: offset within the EEPROM to be read 1049 * @words: number of word(s) 1050 * @data: read 16 bit words(s) from EEPROM 1051 * 1052 * Reads 16 bit word(s) from EEPROM through bit-bang method 1053 **/ 1054 int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1055 u16 words, u16 *data) 1056 { 1057 u16 i, count; 1058 int status; 1059 1060 hw->eeprom.ops.init_params(hw); 1061 1062 if (words == 0 || (offset + words > hw->eeprom.word_size)) 1063 return -EINVAL; 1064 1065 /* 1066 * We cannot hold synchronization semaphores for too long 1067 * to avoid other entity starvation. However it is more efficient 1068 * to read in bursts than synchronizing access for each word. 1069 */ 1070 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1071 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1072 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1073 1074 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1075 count, &data[i]); 1076 1077 if (status) 1078 return status; 1079 } 1080 1081 return 0; 1082 } 1083 1084 /** 1085 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1086 * @hw: pointer to hardware structure 1087 * @offset: offset within the EEPROM to be read 1088 * @words: number of word(s) 1089 * @data: read 16 bit word(s) from EEPROM 1090 * 1091 * Reads 16 bit word(s) from EEPROM through bit-bang method 1092 **/ 1093 static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1094 u16 words, u16 *data) 1095 { 1096 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1097 u16 word_in; 1098 int status; 1099 u16 i; 1100 1101 /* Prepare the EEPROM for reading */ 1102 status = ixgbe_acquire_eeprom(hw); 1103 if (status) 1104 return status; 1105 1106 if (ixgbe_ready_eeprom(hw) != 0) { 1107 ixgbe_release_eeprom(hw); 1108 return -EIO; 1109 } 1110 1111 for (i = 0; i < words; i++) { 1112 ixgbe_standby_eeprom(hw); 1113 /* Some SPI eeproms use the 8th address bit embedded 1114 * in the opcode 1115 */ 1116 if ((hw->eeprom.address_bits == 8) && 1117 ((offset + i) >= 128)) 1118 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1119 1120 /* Send the READ command (opcode + addr) */ 1121 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1122 IXGBE_EEPROM_OPCODE_BITS); 1123 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1124 hw->eeprom.address_bits); 1125 1126 /* Read the data. */ 1127 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1128 data[i] = (word_in >> 8) | (word_in << 8); 1129 } 1130 1131 /* End this read operation */ 1132 ixgbe_release_eeprom(hw); 1133 1134 return 0; 1135 } 1136 1137 /** 1138 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1139 * @hw: pointer to hardware structure 1140 * @offset: offset within the EEPROM to be read 1141 * @data: read 16 bit value from EEPROM 1142 * 1143 * Reads 16 bit value from EEPROM through bit-bang method 1144 **/ 1145 int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1146 u16 *data) 1147 { 1148 hw->eeprom.ops.init_params(hw); 1149 1150 if (offset >= hw->eeprom.word_size) 1151 return -EINVAL; 1152 1153 return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1154 } 1155 1156 /** 1157 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1158 * @hw: pointer to hardware structure 1159 * @offset: offset of word in the EEPROM to read 1160 * @words: number of word(s) 1161 * @data: 16 bit word(s) from the EEPROM 1162 * 1163 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1164 **/ 1165 int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1166 u16 words, u16 *data) 1167 { 1168 int status; 1169 u32 eerd; 1170 u32 i; 1171 1172 hw->eeprom.ops.init_params(hw); 1173 1174 if (words == 0 || offset >= hw->eeprom.word_size) 1175 return -EINVAL; 1176 1177 for (i = 0; i < words; i++) { 1178 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1179 IXGBE_EEPROM_RW_REG_START; 1180 1181 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1182 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1183 1184 if (status == 0) { 1185 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1186 IXGBE_EEPROM_RW_REG_DATA); 1187 } else { 1188 hw_dbg(hw, "Eeprom read timed out\n"); 1189 return status; 1190 } 1191 } 1192 1193 return 0; 1194 } 1195 1196 /** 1197 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1198 * @hw: pointer to hardware structure 1199 * @offset: offset within the EEPROM to be used as a scratch pad 1200 * 1201 * Discover EEPROM page size by writing marching data at given offset. 1202 * This function is called only when we are writing a new large buffer 1203 * at given offset so the data would be overwritten anyway. 1204 **/ 1205 static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1206 u16 offset) 1207 { 1208 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1209 int status; 1210 u16 i; 1211 1212 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1213 data[i] = i; 1214 1215 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1216 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1217 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1218 hw->eeprom.word_page_size = 0; 1219 if (status) 1220 return status; 1221 1222 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1223 if (status) 1224 return status; 1225 1226 /* 1227 * When writing in burst more than the actual page size 1228 * EEPROM address wraps around current page. 1229 */ 1230 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1231 1232 hw_dbg(hw, "Detected EEPROM page size = %d words.\n", 1233 hw->eeprom.word_page_size); 1234 return 0; 1235 } 1236 1237 /** 1238 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1239 * @hw: pointer to hardware structure 1240 * @offset: offset of word in the EEPROM to read 1241 * @data: word read from the EEPROM 1242 * 1243 * Reads a 16 bit word from the EEPROM using the EERD register. 1244 **/ 1245 int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1246 { 1247 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1248 } 1249 1250 /** 1251 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1252 * @hw: pointer to hardware structure 1253 * @offset: offset of word in the EEPROM to write 1254 * @words: number of words 1255 * @data: word(s) write to the EEPROM 1256 * 1257 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1258 **/ 1259 int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1260 u16 words, u16 *data) 1261 { 1262 int status; 1263 u32 eewr; 1264 u16 i; 1265 1266 hw->eeprom.ops.init_params(hw); 1267 1268 if (words == 0 || offset >= hw->eeprom.word_size) 1269 return -EINVAL; 1270 1271 for (i = 0; i < words; i++) { 1272 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1273 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1274 IXGBE_EEPROM_RW_REG_START; 1275 1276 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1277 if (status) { 1278 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1279 return status; 1280 } 1281 1282 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1283 1284 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1285 if (status) { 1286 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 1287 return status; 1288 } 1289 } 1290 1291 return 0; 1292 } 1293 1294 /** 1295 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1296 * @hw: pointer to hardware structure 1297 * @offset: offset of word in the EEPROM to write 1298 * @data: word write to the EEPROM 1299 * 1300 * Write a 16 bit word to the EEPROM using the EEWR register. 1301 **/ 1302 int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1303 { 1304 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1305 } 1306 1307 /** 1308 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1309 * @hw: pointer to hardware structure 1310 * @ee_reg: EEPROM flag for polling 1311 * 1312 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1313 * read or write is done respectively. 1314 **/ 1315 static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1316 { 1317 u32 i; 1318 u32 reg; 1319 1320 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1321 if (ee_reg == IXGBE_NVM_POLL_READ) 1322 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1323 else 1324 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1325 1326 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1327 return 0; 1328 } 1329 udelay(5); 1330 } 1331 return -EIO; 1332 } 1333 1334 /** 1335 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1336 * @hw: pointer to hardware structure 1337 * 1338 * Prepares EEPROM for access using bit-bang method. This function should 1339 * be called before issuing a command to the EEPROM. 1340 **/ 1341 static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1342 { 1343 u32 eec; 1344 u32 i; 1345 1346 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1347 return -EBUSY; 1348 1349 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1350 1351 /* Request EEPROM Access */ 1352 eec |= IXGBE_EEC_REQ; 1353 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1354 1355 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1356 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1357 if (eec & IXGBE_EEC_GNT) 1358 break; 1359 udelay(5); 1360 } 1361 1362 /* Release if grant not acquired */ 1363 if (!(eec & IXGBE_EEC_GNT)) { 1364 eec &= ~IXGBE_EEC_REQ; 1365 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1366 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1367 1368 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1369 return -EIO; 1370 } 1371 1372 /* Setup EEPROM for Read/Write */ 1373 /* Clear CS and SK */ 1374 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1375 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1376 IXGBE_WRITE_FLUSH(hw); 1377 udelay(1); 1378 return 0; 1379 } 1380 1381 /** 1382 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1383 * @hw: pointer to hardware structure 1384 * 1385 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1386 **/ 1387 static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1388 { 1389 u32 timeout = 2000; 1390 u32 i; 1391 u32 swsm; 1392 1393 /* Get SMBI software semaphore between device drivers first */ 1394 for (i = 0; i < timeout; i++) { 1395 /* 1396 * If the SMBI bit is 0 when we read it, then the bit will be 1397 * set and we have the semaphore 1398 */ 1399 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1400 if (!(swsm & IXGBE_SWSM_SMBI)) 1401 break; 1402 usleep_range(50, 100); 1403 } 1404 1405 if (i == timeout) { 1406 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); 1407 /* this release is particularly important because our attempts 1408 * above to get the semaphore may have succeeded, and if there 1409 * was a timeout, we should unconditionally clear the semaphore 1410 * bits to free the driver to make progress 1411 */ 1412 ixgbe_release_eeprom_semaphore(hw); 1413 1414 usleep_range(50, 100); 1415 /* one last try 1416 * If the SMBI bit is 0 when we read it, then the bit will be 1417 * set and we have the semaphore 1418 */ 1419 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1420 if (swsm & IXGBE_SWSM_SMBI) { 1421 hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); 1422 return -EIO; 1423 } 1424 } 1425 1426 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1427 for (i = 0; i < timeout; i++) { 1428 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1429 1430 /* Set the SW EEPROM semaphore bit to request access */ 1431 swsm |= IXGBE_SWSM_SWESMBI; 1432 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1433 1434 /* If we set the bit successfully then we got the 1435 * semaphore. 1436 */ 1437 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1438 if (swsm & IXGBE_SWSM_SWESMBI) 1439 break; 1440 1441 usleep_range(50, 100); 1442 } 1443 1444 /* Release semaphores and return error if SW EEPROM semaphore 1445 * was not granted because we don't have access to the EEPROM 1446 */ 1447 if (i >= timeout) { 1448 hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); 1449 ixgbe_release_eeprom_semaphore(hw); 1450 return -EIO; 1451 } 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1458 * @hw: pointer to hardware structure 1459 * 1460 * This function clears hardware semaphore bits. 1461 **/ 1462 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1463 { 1464 u32 swsm; 1465 1466 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); 1467 1468 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1469 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1470 IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); 1471 IXGBE_WRITE_FLUSH(hw); 1472 } 1473 1474 /** 1475 * ixgbe_ready_eeprom - Polls for EEPROM ready 1476 * @hw: pointer to hardware structure 1477 **/ 1478 static int ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1479 { 1480 u16 i; 1481 u8 spi_stat_reg; 1482 1483 /* 1484 * Read "Status Register" repeatedly until the LSB is cleared. The 1485 * EEPROM will signal that the command has been completed by clearing 1486 * bit 0 of the internal status register. If it's not cleared within 1487 * 5 milliseconds, then error out. 1488 */ 1489 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1490 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1491 IXGBE_EEPROM_OPCODE_BITS); 1492 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1493 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1494 break; 1495 1496 udelay(5); 1497 ixgbe_standby_eeprom(hw); 1498 } 1499 1500 /* 1501 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1502 * devices (and only 0-5mSec on 5V devices) 1503 */ 1504 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1505 hw_dbg(hw, "SPI EEPROM Status error\n"); 1506 return -EIO; 1507 } 1508 1509 return 0; 1510 } 1511 1512 /** 1513 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1514 * @hw: pointer to hardware structure 1515 **/ 1516 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1517 { 1518 u32 eec; 1519 1520 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1521 1522 /* Toggle CS to flush commands */ 1523 eec |= IXGBE_EEC_CS; 1524 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1525 IXGBE_WRITE_FLUSH(hw); 1526 udelay(1); 1527 eec &= ~IXGBE_EEC_CS; 1528 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1529 IXGBE_WRITE_FLUSH(hw); 1530 udelay(1); 1531 } 1532 1533 /** 1534 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1535 * @hw: pointer to hardware structure 1536 * @data: data to send to the EEPROM 1537 * @count: number of bits to shift out 1538 **/ 1539 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1540 u16 count) 1541 { 1542 u32 eec; 1543 u32 mask; 1544 u32 i; 1545 1546 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1547 1548 /* 1549 * Mask is used to shift "count" bits of "data" out to the EEPROM 1550 * one bit at a time. Determine the starting bit based on count 1551 */ 1552 mask = BIT(count - 1); 1553 1554 for (i = 0; i < count; i++) { 1555 /* 1556 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1557 * "1", and then raising and then lowering the clock (the SK 1558 * bit controls the clock input to the EEPROM). A "0" is 1559 * shifted out to the EEPROM by setting "DI" to "0" and then 1560 * raising and then lowering the clock. 1561 */ 1562 if (data & mask) 1563 eec |= IXGBE_EEC_DI; 1564 else 1565 eec &= ~IXGBE_EEC_DI; 1566 1567 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1568 IXGBE_WRITE_FLUSH(hw); 1569 1570 udelay(1); 1571 1572 ixgbe_raise_eeprom_clk(hw, &eec); 1573 ixgbe_lower_eeprom_clk(hw, &eec); 1574 1575 /* 1576 * Shift mask to signify next bit of data to shift in to the 1577 * EEPROM 1578 */ 1579 mask = mask >> 1; 1580 } 1581 1582 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1583 eec &= ~IXGBE_EEC_DI; 1584 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1585 IXGBE_WRITE_FLUSH(hw); 1586 } 1587 1588 /** 1589 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1590 * @hw: pointer to hardware structure 1591 * @count: number of bits to shift 1592 **/ 1593 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1594 { 1595 u32 eec; 1596 u32 i; 1597 u16 data = 0; 1598 1599 /* 1600 * In order to read a register from the EEPROM, we need to shift 1601 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1602 * the clock input to the EEPROM (setting the SK bit), and then reading 1603 * the value of the "DO" bit. During this "shifting in" process the 1604 * "DI" bit should always be clear. 1605 */ 1606 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1607 1608 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1609 1610 for (i = 0; i < count; i++) { 1611 data = data << 1; 1612 ixgbe_raise_eeprom_clk(hw, &eec); 1613 1614 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1615 1616 eec &= ~(IXGBE_EEC_DI); 1617 if (eec & IXGBE_EEC_DO) 1618 data |= 1; 1619 1620 ixgbe_lower_eeprom_clk(hw, &eec); 1621 } 1622 1623 return data; 1624 } 1625 1626 /** 1627 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1628 * @hw: pointer to hardware structure 1629 * @eec: EEC register's current value 1630 **/ 1631 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1632 { 1633 /* 1634 * Raise the clock input to the EEPROM 1635 * (setting the SK bit), then delay 1636 */ 1637 *eec = *eec | IXGBE_EEC_SK; 1638 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1639 IXGBE_WRITE_FLUSH(hw); 1640 udelay(1); 1641 } 1642 1643 /** 1644 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1645 * @hw: pointer to hardware structure 1646 * @eec: EEC's current value 1647 **/ 1648 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1649 { 1650 /* 1651 * Lower the clock input to the EEPROM (clearing the SK bit), then 1652 * delay 1653 */ 1654 *eec = *eec & ~IXGBE_EEC_SK; 1655 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); 1656 IXGBE_WRITE_FLUSH(hw); 1657 udelay(1); 1658 } 1659 1660 /** 1661 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1662 * @hw: pointer to hardware structure 1663 **/ 1664 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1665 { 1666 u32 eec; 1667 1668 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 1669 1670 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1671 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1672 1673 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1674 IXGBE_WRITE_FLUSH(hw); 1675 1676 udelay(1); 1677 1678 /* Stop requesting EEPROM access */ 1679 eec &= ~IXGBE_EEC_REQ; 1680 IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); 1681 1682 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1683 1684 /* 1685 * Delay before attempt to obtain semaphore again to allow FW 1686 * access. semaphore_delay is in ms we need us for usleep_range 1687 */ 1688 usleep_range(hw->eeprom.semaphore_delay * 1000, 1689 hw->eeprom.semaphore_delay * 2000); 1690 } 1691 1692 /** 1693 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1694 * @hw: pointer to hardware structure 1695 **/ 1696 int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1697 { 1698 u16 i; 1699 u16 j; 1700 u16 checksum = 0; 1701 u16 length = 0; 1702 u16 pointer = 0; 1703 u16 word = 0; 1704 1705 /* Include 0x0-0x3F in the checksum */ 1706 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1707 if (hw->eeprom.ops.read(hw, i, &word)) { 1708 hw_dbg(hw, "EEPROM read failed\n"); 1709 break; 1710 } 1711 checksum += word; 1712 } 1713 1714 /* Include all data from pointers except for the fw pointer */ 1715 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1716 if (hw->eeprom.ops.read(hw, i, &pointer)) { 1717 hw_dbg(hw, "EEPROM read failed\n"); 1718 return -EIO; 1719 } 1720 1721 /* If the pointer seems invalid */ 1722 if (pointer == 0xFFFF || pointer == 0) 1723 continue; 1724 1725 if (hw->eeprom.ops.read(hw, pointer, &length)) { 1726 hw_dbg(hw, "EEPROM read failed\n"); 1727 return -EIO; 1728 } 1729 1730 if (length == 0xFFFF || length == 0) 1731 continue; 1732 1733 for (j = pointer + 1; j <= pointer + length; j++) { 1734 if (hw->eeprom.ops.read(hw, j, &word)) { 1735 hw_dbg(hw, "EEPROM read failed\n"); 1736 return -EIO; 1737 } 1738 checksum += word; 1739 } 1740 } 1741 1742 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1743 1744 return (int)checksum; 1745 } 1746 1747 /** 1748 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1749 * @hw: pointer to hardware structure 1750 * @checksum_val: calculated checksum 1751 * 1752 * Performs checksum calculation and validates the EEPROM checksum. If the 1753 * caller does not need checksum_val, the value can be NULL. 1754 **/ 1755 int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1756 u16 *checksum_val) 1757 { 1758 u16 read_checksum = 0; 1759 u16 checksum; 1760 int status; 1761 1762 /* 1763 * Read the first word from the EEPROM. If this times out or fails, do 1764 * not continue or we could be in for a very long wait while every 1765 * EEPROM read fails 1766 */ 1767 status = hw->eeprom.ops.read(hw, 0, &checksum); 1768 if (status) { 1769 hw_dbg(hw, "EEPROM read failed\n"); 1770 return status; 1771 } 1772 1773 status = hw->eeprom.ops.calc_checksum(hw); 1774 if (status < 0) 1775 return status; 1776 1777 checksum = (u16)(status & 0xffff); 1778 1779 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1780 if (status) { 1781 hw_dbg(hw, "EEPROM read failed\n"); 1782 return status; 1783 } 1784 1785 /* Verify read checksum from EEPROM is the same as 1786 * calculated checksum 1787 */ 1788 if (read_checksum != checksum) 1789 status = -EIO; 1790 1791 /* If the user cares, return the calculated checksum */ 1792 if (checksum_val) 1793 *checksum_val = checksum; 1794 1795 return status; 1796 } 1797 1798 /** 1799 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1800 * @hw: pointer to hardware structure 1801 **/ 1802 int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1803 { 1804 u16 checksum; 1805 int status; 1806 1807 /* 1808 * Read the first word from the EEPROM. If this times out or fails, do 1809 * not continue or we could be in for a very long wait while every 1810 * EEPROM read fails 1811 */ 1812 status = hw->eeprom.ops.read(hw, 0, &checksum); 1813 if (status) { 1814 hw_dbg(hw, "EEPROM read failed\n"); 1815 return status; 1816 } 1817 1818 status = hw->eeprom.ops.calc_checksum(hw); 1819 if (status < 0) 1820 return status; 1821 1822 checksum = (u16)(status & 0xffff); 1823 1824 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 1825 1826 return status; 1827 } 1828 1829 /** 1830 * ixgbe_set_rar_generic - Set Rx address register 1831 * @hw: pointer to hardware structure 1832 * @index: Receive address register to write 1833 * @addr: Address to put into receive address register 1834 * @vmdq: VMDq "set" or "pool" index 1835 * @enable_addr: set flag that address is active 1836 * 1837 * Puts an ethernet address into a receive address register. 1838 **/ 1839 int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1840 u32 enable_addr) 1841 { 1842 u32 rar_low, rar_high; 1843 u32 rar_entries = hw->mac.num_rar_entries; 1844 1845 /* Make sure we are using a valid rar index range */ 1846 if (index >= rar_entries) { 1847 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1848 return -EINVAL; 1849 } 1850 1851 /* setup VMDq pool selection before this RAR gets enabled */ 1852 hw->mac.ops.set_vmdq(hw, index, vmdq); 1853 1854 /* 1855 * HW expects these in little endian so we reverse the byte 1856 * order from network order (big endian) to little endian 1857 */ 1858 rar_low = ((u32)addr[0] | 1859 ((u32)addr[1] << 8) | 1860 ((u32)addr[2] << 16) | 1861 ((u32)addr[3] << 24)); 1862 /* 1863 * Some parts put the VMDq setting in the extra RAH bits, 1864 * so save everything except the lower 16 bits that hold part 1865 * of the address and the address valid bit. 1866 */ 1867 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1868 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1869 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1870 1871 if (enable_addr != 0) 1872 rar_high |= IXGBE_RAH_AV; 1873 1874 /* Record lower 32 bits of MAC address and then make 1875 * sure that write is flushed to hardware before writing 1876 * the upper 16 bits and setting the valid bit. 1877 */ 1878 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1879 IXGBE_WRITE_FLUSH(hw); 1880 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1881 1882 return 0; 1883 } 1884 1885 /** 1886 * ixgbe_clear_rar_generic - Remove Rx address register 1887 * @hw: pointer to hardware structure 1888 * @index: Receive address register to write 1889 * 1890 * Clears an ethernet address from a receive address register. 1891 **/ 1892 int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1893 { 1894 u32 rar_high; 1895 u32 rar_entries = hw->mac.num_rar_entries; 1896 1897 /* Make sure we are using a valid rar index range */ 1898 if (index >= rar_entries) { 1899 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1900 return -EINVAL; 1901 } 1902 1903 /* 1904 * Some parts put the VMDq setting in the extra RAH bits, 1905 * so save everything except the lower 16 bits that hold part 1906 * of the address and the address valid bit. 1907 */ 1908 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1909 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1910 1911 /* Clear the address valid bit and upper 16 bits of the address 1912 * before clearing the lower bits. This way we aren't updating 1913 * a live filter. 1914 */ 1915 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1916 IXGBE_WRITE_FLUSH(hw); 1917 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1918 1919 /* clear VMDq pool/queue selection for this RAR */ 1920 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1921 1922 return 0; 1923 } 1924 1925 /** 1926 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1927 * @hw: pointer to hardware structure 1928 * 1929 * Places the MAC address in receive address register 0 and clears the rest 1930 * of the receive address registers. Clears the multicast table. Assumes 1931 * the receiver is in reset when the routine is called. 1932 **/ 1933 int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1934 { 1935 u32 i; 1936 u32 rar_entries = hw->mac.num_rar_entries; 1937 1938 /* 1939 * If the current mac address is valid, assume it is a software override 1940 * to the permanent address. 1941 * Otherwise, use the permanent address from the eeprom. 1942 */ 1943 if (!is_valid_ether_addr(hw->mac.addr)) { 1944 /* Get the MAC address from the RAR0 for later reference */ 1945 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1946 1947 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); 1948 } else { 1949 /* Setup the receive address. */ 1950 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1951 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1952 1953 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1954 } 1955 1956 /* clear VMDq pool/queue selection for RAR 0 */ 1957 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 1958 1959 hw->addr_ctrl.overflow_promisc = 0; 1960 1961 hw->addr_ctrl.rar_used_count = 1; 1962 1963 /* Zero out the other receive addresses. */ 1964 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1965 for (i = 1; i < rar_entries; i++) { 1966 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1967 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1968 } 1969 1970 /* Clear the MTA */ 1971 hw->addr_ctrl.mta_in_use = 0; 1972 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1973 1974 hw_dbg(hw, " Clearing MTA\n"); 1975 for (i = 0; i < hw->mac.mcft_size; i++) 1976 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1977 1978 if (hw->mac.ops.init_uta_tables) 1979 hw->mac.ops.init_uta_tables(hw); 1980 1981 return 0; 1982 } 1983 1984 /** 1985 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1986 * @hw: pointer to hardware structure 1987 * @mc_addr: the multicast address 1988 * 1989 * Extracts the 12 bits, from a multicast address, to determine which 1990 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1991 * incoming rx multicast addresses, to determine the bit-vector to check in 1992 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1993 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1994 * to mc_filter_type. 1995 **/ 1996 static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1997 { 1998 u32 vector = 0; 1999 2000 switch (hw->mac.mc_filter_type) { 2001 case 0: /* use bits [47:36] of the address */ 2002 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2003 break; 2004 case 1: /* use bits [46:35] of the address */ 2005 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2006 break; 2007 case 2: /* use bits [45:34] of the address */ 2008 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2009 break; 2010 case 3: /* use bits [43:32] of the address */ 2011 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2012 break; 2013 default: /* Invalid mc_filter_type */ 2014 hw_dbg(hw, "MC filter type param set incorrectly\n"); 2015 break; 2016 } 2017 2018 /* vector can only be 12-bits or boundary will be exceeded */ 2019 vector &= 0xFFF; 2020 return vector; 2021 } 2022 2023 /** 2024 * ixgbe_set_mta - Set bit-vector in multicast table 2025 * @hw: pointer to hardware structure 2026 * @mc_addr: Multicast address 2027 * 2028 * Sets the bit-vector in the multicast table. 2029 **/ 2030 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2031 { 2032 u32 vector; 2033 u32 vector_bit; 2034 u32 vector_reg; 2035 2036 hw->addr_ctrl.mta_in_use++; 2037 2038 vector = ixgbe_mta_vector(hw, mc_addr); 2039 hw_dbg(hw, " bit-vector = 0x%03X\n", vector); 2040 2041 /* 2042 * The MTA is a register array of 128 32-bit registers. It is treated 2043 * like an array of 4096 bits. We want to set bit 2044 * BitArray[vector_value]. So we figure out what register the bit is 2045 * in, read it, OR in the new bit, then write back the new value. The 2046 * register is determined by the upper 7 bits of the vector value and 2047 * the bit within that register are determined by the lower 5 bits of 2048 * the value. 2049 */ 2050 vector_reg = (vector >> 5) & 0x7F; 2051 vector_bit = vector & 0x1F; 2052 hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); 2053 } 2054 2055 /** 2056 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2057 * @hw: pointer to hardware structure 2058 * @netdev: pointer to net device structure 2059 * 2060 * The given list replaces any existing list. Clears the MC addrs from receive 2061 * address registers and the multicast table. Uses unused receive address 2062 * registers for the first multicast addresses, and hashes the rest into the 2063 * multicast table. 2064 **/ 2065 int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 2066 struct net_device *netdev) 2067 { 2068 struct netdev_hw_addr *ha; 2069 u32 i; 2070 2071 /* 2072 * Set the new number of MC addresses that we are being requested to 2073 * use. 2074 */ 2075 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 2076 hw->addr_ctrl.mta_in_use = 0; 2077 2078 /* Clear mta_shadow */ 2079 hw_dbg(hw, " Clearing MTA\n"); 2080 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2081 2082 /* Update mta shadow */ 2083 netdev_for_each_mc_addr(ha, netdev) { 2084 hw_dbg(hw, " Adding the multicast addresses:\n"); 2085 ixgbe_set_mta(hw, ha->addr); 2086 } 2087 2088 /* Enable mta */ 2089 for (i = 0; i < hw->mac.mcft_size; i++) 2090 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2091 hw->mac.mta_shadow[i]); 2092 2093 if (hw->addr_ctrl.mta_in_use > 0) 2094 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2095 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2096 2097 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 2098 return 0; 2099 } 2100 2101 /** 2102 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2103 * @hw: pointer to hardware structure 2104 * 2105 * Enables multicast address in RAR and the use of the multicast hash table. 2106 **/ 2107 int ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2108 { 2109 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2110 2111 if (a->mta_in_use > 0) 2112 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2113 hw->mac.mc_filter_type); 2114 2115 return 0; 2116 } 2117 2118 /** 2119 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2120 * @hw: pointer to hardware structure 2121 * 2122 * Disables multicast address in RAR and the use of the multicast hash table. 2123 **/ 2124 int ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2125 { 2126 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2127 2128 if (a->mta_in_use > 0) 2129 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2130 2131 return 0; 2132 } 2133 2134 /** 2135 * ixgbe_fc_enable_generic - Enable flow control 2136 * @hw: pointer to hardware structure 2137 * 2138 * Enable flow control according to the current settings. 2139 **/ 2140 int ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2141 { 2142 u32 mflcn_reg, fccfg_reg; 2143 u32 reg; 2144 u32 fcrtl, fcrth; 2145 int i; 2146 2147 /* Validate the water mark configuration. */ 2148 if (!hw->fc.pause_time) 2149 return -EINVAL; 2150 2151 /* Low water mark of zero causes XOFF floods */ 2152 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2153 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2154 hw->fc.high_water[i]) { 2155 if (!hw->fc.low_water[i] || 2156 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2157 hw_dbg(hw, "Invalid water mark configuration\n"); 2158 return -EINVAL; 2159 } 2160 } 2161 } 2162 2163 /* Negotiate the fc mode to use */ 2164 hw->mac.ops.fc_autoneg(hw); 2165 2166 /* Disable any previous flow control settings */ 2167 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2168 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2169 2170 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2171 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2172 2173 /* 2174 * The possible values of fc.current_mode are: 2175 * 0: Flow control is completely disabled 2176 * 1: Rx flow control is enabled (we can receive pause frames, 2177 * but not send pause frames). 2178 * 2: Tx flow control is enabled (we can send pause frames but 2179 * we do not support receiving pause frames). 2180 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2181 * other: Invalid. 2182 */ 2183 switch (hw->fc.current_mode) { 2184 case ixgbe_fc_none: 2185 /* 2186 * Flow control is disabled by software override or autoneg. 2187 * The code below will actually disable it in the HW. 2188 */ 2189 break; 2190 case ixgbe_fc_rx_pause: 2191 /* 2192 * Rx Flow control is enabled and Tx Flow control is 2193 * disabled by software override. Since there really 2194 * isn't a way to advertise that we are capable of RX 2195 * Pause ONLY, we will advertise that we support both 2196 * symmetric and asymmetric Rx PAUSE. Later, we will 2197 * disable the adapter's ability to send PAUSE frames. 2198 */ 2199 mflcn_reg |= IXGBE_MFLCN_RFCE; 2200 break; 2201 case ixgbe_fc_tx_pause: 2202 /* 2203 * Tx Flow control is enabled, and Rx Flow control is 2204 * disabled by software override. 2205 */ 2206 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2207 break; 2208 case ixgbe_fc_full: 2209 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2210 mflcn_reg |= IXGBE_MFLCN_RFCE; 2211 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2212 break; 2213 default: 2214 hw_dbg(hw, "Flow control param set incorrectly\n"); 2215 return -EIO; 2216 } 2217 2218 /* Set 802.3x based flow control settings. */ 2219 mflcn_reg |= IXGBE_MFLCN_DPF; 2220 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2221 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2222 2223 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2224 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 2225 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2226 hw->fc.high_water[i]) { 2227 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2228 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2229 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2230 } else { 2231 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2232 /* 2233 * In order to prevent Tx hangs when the internal Tx 2234 * switch is enabled we must set the high water mark 2235 * to the Rx packet buffer size - 24KB. This allows 2236 * the Tx switch to function even under heavy Rx 2237 * workloads. 2238 */ 2239 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2240 } 2241 2242 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2243 } 2244 2245 /* Configure pause time (2 TCs per register) */ 2246 reg = hw->fc.pause_time * 0x00010001U; 2247 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 2248 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2249 2250 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2251 2252 return 0; 2253 } 2254 2255 /** 2256 * ixgbe_negotiate_fc - Negotiate flow control 2257 * @hw: pointer to hardware structure 2258 * @adv_reg: flow control advertised settings 2259 * @lp_reg: link partner's flow control settings 2260 * @adv_sym: symmetric pause bit in advertisement 2261 * @adv_asm: asymmetric pause bit in advertisement 2262 * @lp_sym: symmetric pause bit in link partner advertisement 2263 * @lp_asm: asymmetric pause bit in link partner advertisement 2264 * 2265 * Find the intersection between advertised settings and link partner's 2266 * advertised settings 2267 **/ 2268 int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2269 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2270 { 2271 if ((!(adv_reg)) || (!(lp_reg))) 2272 return -EINVAL; 2273 2274 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2275 /* 2276 * Now we need to check if the user selected Rx ONLY 2277 * of pause frames. In this case, we had to advertise 2278 * FULL flow control because we could not advertise RX 2279 * ONLY. Hence, we must now check to see if we need to 2280 * turn OFF the TRANSMISSION of PAUSE frames. 2281 */ 2282 if (hw->fc.requested_mode == ixgbe_fc_full) { 2283 hw->fc.current_mode = ixgbe_fc_full; 2284 hw_dbg(hw, "Flow Control = FULL.\n"); 2285 } else { 2286 hw->fc.current_mode = ixgbe_fc_rx_pause; 2287 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); 2288 } 2289 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2290 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2291 hw->fc.current_mode = ixgbe_fc_tx_pause; 2292 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); 2293 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2294 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2295 hw->fc.current_mode = ixgbe_fc_rx_pause; 2296 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 2297 } else { 2298 hw->fc.current_mode = ixgbe_fc_none; 2299 hw_dbg(hw, "Flow Control = NONE.\n"); 2300 } 2301 return 0; 2302 } 2303 2304 /** 2305 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2306 * @hw: pointer to hardware structure 2307 * 2308 * Enable flow control according on 1 gig fiber. 2309 **/ 2310 static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2311 { 2312 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2313 int ret_val; 2314 2315 /* 2316 * On multispeed fiber at 1g, bail out if 2317 * - link is up but AN did not complete, or if 2318 * - link is up and AN completed but timed out 2319 */ 2320 2321 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2322 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2323 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) 2324 return -EIO; 2325 2326 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2327 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2328 2329 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2330 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2331 IXGBE_PCS1GANA_ASM_PAUSE, 2332 IXGBE_PCS1GANA_SYM_PAUSE, 2333 IXGBE_PCS1GANA_ASM_PAUSE); 2334 2335 return ret_val; 2336 } 2337 2338 /** 2339 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2340 * @hw: pointer to hardware structure 2341 * 2342 * Enable flow control according to IEEE clause 37. 2343 **/ 2344 static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2345 { 2346 u32 links2, anlp1_reg, autoc_reg, links; 2347 int ret_val; 2348 2349 /* 2350 * On backplane, bail out if 2351 * - backplane autoneg was not completed, or if 2352 * - we are 82599 and link partner is not AN enabled 2353 */ 2354 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2355 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) 2356 return -EIO; 2357 2358 if (hw->mac.type == ixgbe_mac_82599EB) { 2359 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2360 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) 2361 return -EIO; 2362 } 2363 /* 2364 * Read the 10g AN autoc and LP ability registers and resolve 2365 * local flow control settings accordingly 2366 */ 2367 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2368 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2369 2370 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2371 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2372 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2373 2374 return ret_val; 2375 } 2376 2377 /** 2378 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2379 * @hw: pointer to hardware structure 2380 * 2381 * Enable flow control according to IEEE clause 37. 2382 **/ 2383 static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2384 { 2385 u16 technology_ability_reg = 0; 2386 u16 lp_technology_ability_reg = 0; 2387 2388 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2389 MDIO_MMD_AN, 2390 &technology_ability_reg); 2391 hw->phy.ops.read_reg(hw, MDIO_AN_LPA, 2392 MDIO_MMD_AN, 2393 &lp_technology_ability_reg); 2394 2395 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2396 (u32)lp_technology_ability_reg, 2397 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2398 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2399 } 2400 2401 /** 2402 * ixgbe_fc_autoneg - Configure flow control 2403 * @hw: pointer to hardware structure 2404 * 2405 * Compares our advertised flow control capabilities to those advertised by 2406 * our link partner, and determines the proper flow control mode to use. 2407 **/ 2408 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2409 { 2410 ixgbe_link_speed speed; 2411 int ret_val = -EIO; 2412 bool link_up; 2413 2414 /* 2415 * AN should have completed when the cable was plugged in. 2416 * Look for reasons to bail out. Bail out if: 2417 * - FC autoneg is disabled, or if 2418 * - link is not up. 2419 * 2420 * Since we're being called from an LSC, link is already known to be up. 2421 * So use link_up_wait_to_complete=false. 2422 */ 2423 if (hw->fc.disable_fc_autoneg) 2424 goto out; 2425 2426 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2427 if (!link_up) 2428 goto out; 2429 2430 switch (hw->phy.media_type) { 2431 /* Autoneg flow control on fiber adapters */ 2432 case ixgbe_media_type_fiber: 2433 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2434 ret_val = ixgbe_fc_autoneg_fiber(hw); 2435 break; 2436 2437 /* Autoneg flow control on backplane adapters */ 2438 case ixgbe_media_type_backplane: 2439 ret_val = ixgbe_fc_autoneg_backplane(hw); 2440 break; 2441 2442 /* Autoneg flow control on copper adapters */ 2443 case ixgbe_media_type_copper: 2444 if (ixgbe_device_supports_autoneg_fc(hw)) 2445 ret_val = ixgbe_fc_autoneg_copper(hw); 2446 break; 2447 2448 default: 2449 break; 2450 } 2451 2452 out: 2453 if (ret_val == 0) { 2454 hw->fc.fc_was_autonegged = true; 2455 } else { 2456 hw->fc.fc_was_autonegged = false; 2457 hw->fc.current_mode = hw->fc.requested_mode; 2458 } 2459 } 2460 2461 /** 2462 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 2463 * @hw: pointer to hardware structure 2464 * 2465 * System-wide timeout range is encoded in PCIe Device Control2 register. 2466 * 2467 * Add 10% to specified maximum and return the number of times to poll for 2468 * completion timeout, in units of 100 microsec. Never return less than 2469 * 800 = 80 millisec. 2470 **/ 2471 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 2472 { 2473 s16 devctl2; 2474 u32 pollcnt; 2475 2476 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); 2477 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 2478 2479 switch (devctl2) { 2480 case IXGBE_PCIDEVCTRL2_65_130ms: 2481 pollcnt = 1300; /* 130 millisec */ 2482 break; 2483 case IXGBE_PCIDEVCTRL2_260_520ms: 2484 pollcnt = 5200; /* 520 millisec */ 2485 break; 2486 case IXGBE_PCIDEVCTRL2_1_2s: 2487 pollcnt = 20000; /* 2 sec */ 2488 break; 2489 case IXGBE_PCIDEVCTRL2_4_8s: 2490 pollcnt = 80000; /* 8 sec */ 2491 break; 2492 case IXGBE_PCIDEVCTRL2_17_34s: 2493 pollcnt = 34000; /* 34 sec */ 2494 break; 2495 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 2496 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 2497 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 2498 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 2499 default: 2500 pollcnt = 800; /* 80 millisec minimum */ 2501 break; 2502 } 2503 2504 /* add 10% to spec maximum */ 2505 return (pollcnt * 11) / 10; 2506 } 2507 2508 /** 2509 * ixgbe_disable_pcie_primary - Disable PCI-express primary access 2510 * @hw: pointer to hardware structure 2511 * 2512 * Disables PCI-Express primary access and verifies there are no pending 2513 * requests. -EALREADY is returned if primary disable 2514 * bit hasn't caused the primary requests to be disabled, else 0 2515 * is returned signifying primary requests disabled. 2516 **/ 2517 static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) 2518 { 2519 u32 i, poll; 2520 u16 value; 2521 2522 /* Always set this bit to ensure any future transactions are blocked */ 2523 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2524 2525 /* Poll for bit to read as set */ 2526 for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { 2527 if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) 2528 break; 2529 usleep_range(100, 120); 2530 } 2531 if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) { 2532 hw_dbg(hw, "GIO disable did not set - requesting resets\n"); 2533 goto gio_disable_fail; 2534 } 2535 2536 /* Exit if primary requests are blocked */ 2537 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 2538 ixgbe_removed(hw->hw_addr)) 2539 return 0; 2540 2541 /* Poll for primary request bit to clear */ 2542 for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { 2543 udelay(100); 2544 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2545 return 0; 2546 } 2547 2548 /* 2549 * Two consecutive resets are required via CTRL.RST per datasheet 2550 * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine 2551 * of this need. The first reset prevents new primary requests from 2552 * being issued by our device. We then must wait 1usec or more for any 2553 * remaining completions from the PCIe bus to trickle in, and then reset 2554 * again to clear out any effects they may have had on our device. 2555 */ 2556 hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n"); 2557 gio_disable_fail: 2558 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2559 2560 if (hw->mac.type >= ixgbe_mac_X550) 2561 return 0; 2562 2563 /* 2564 * Before proceeding, make sure that the PCIe block does not have 2565 * transactions pending. 2566 */ 2567 poll = ixgbe_pcie_timeout_poll(hw); 2568 for (i = 0; i < poll; i++) { 2569 udelay(100); 2570 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 2571 if (ixgbe_removed(hw->hw_addr)) 2572 return 0; 2573 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2574 return 0; 2575 } 2576 2577 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); 2578 return -EALREADY; 2579 } 2580 2581 /** 2582 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2583 * @hw: pointer to hardware structure 2584 * @mask: Mask to specify which semaphore to acquire 2585 * 2586 * Acquires the SWFW semaphore through the GSSR register for the specified 2587 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2588 **/ 2589 int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2590 { 2591 u32 gssr = 0; 2592 u32 swmask = mask; 2593 u32 fwmask = mask << 5; 2594 u32 timeout = 200; 2595 u32 i; 2596 2597 for (i = 0; i < timeout; i++) { 2598 /* 2599 * SW NVM semaphore bit is used for access to all 2600 * SW_FW_SYNC bits (not just NVM) 2601 */ 2602 if (ixgbe_get_eeprom_semaphore(hw)) 2603 return -EBUSY; 2604 2605 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2606 if (!(gssr & (fwmask | swmask))) { 2607 gssr |= swmask; 2608 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2609 ixgbe_release_eeprom_semaphore(hw); 2610 return 0; 2611 } else { 2612 /* Resource is currently in use by FW or SW */ 2613 ixgbe_release_eeprom_semaphore(hw); 2614 usleep_range(5000, 10000); 2615 } 2616 } 2617 2618 /* If time expired clear the bits holding the lock and retry */ 2619 if (gssr & (fwmask | swmask)) 2620 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 2621 2622 usleep_range(5000, 10000); 2623 return -EBUSY; 2624 } 2625 2626 /** 2627 * ixgbe_release_swfw_sync - Release SWFW semaphore 2628 * @hw: pointer to hardware structure 2629 * @mask: Mask to specify which semaphore to release 2630 * 2631 * Releases the SWFW semaphore through the GSSR register for the specified 2632 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2633 **/ 2634 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 2635 { 2636 u32 gssr; 2637 u32 swmask = mask; 2638 2639 ixgbe_get_eeprom_semaphore(hw); 2640 2641 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2642 gssr &= ~swmask; 2643 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2644 2645 ixgbe_release_eeprom_semaphore(hw); 2646 } 2647 2648 /** 2649 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 2650 * @hw: pointer to hardware structure 2651 * @reg_val: Value we read from AUTOC 2652 * @locked: bool to indicate whether the SW/FW lock should be taken. Never 2653 * true in this the generic case. 2654 * 2655 * The default case requires no protection so just to the register read. 2656 **/ 2657 int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 2658 { 2659 *locked = false; 2660 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2661 return 0; 2662 } 2663 2664 /** 2665 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 2666 * @hw: pointer to hardware structure 2667 * @reg_val: value to write to AUTOC 2668 * @locked: bool to indicate whether the SW/FW lock was already taken by 2669 * previous read. 2670 **/ 2671 int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 2672 { 2673 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 2674 return 0; 2675 } 2676 2677 /** 2678 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2679 * @hw: pointer to hardware structure 2680 * 2681 * Stops the receive data path and waits for the HW to internally 2682 * empty the Rx security block. 2683 **/ 2684 int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) 2685 { 2686 #define IXGBE_MAX_SECRX_POLL 40 2687 int i; 2688 int secrxreg; 2689 2690 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2691 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2692 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2693 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2694 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2695 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2696 break; 2697 else 2698 /* Use interrupt-safe sleep just in case */ 2699 udelay(1000); 2700 } 2701 2702 /* For informational purposes only */ 2703 if (i >= IXGBE_MAX_SECRX_POLL) 2704 hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); 2705 2706 return 0; 2707 2708 } 2709 2710 /** 2711 * ixgbe_enable_rx_buff_generic - Enables the receive data path 2712 * @hw: pointer to hardware structure 2713 * 2714 * Enables the receive data path 2715 **/ 2716 int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) 2717 { 2718 u32 secrxreg; 2719 2720 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2721 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2722 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2723 IXGBE_WRITE_FLUSH(hw); 2724 2725 return 0; 2726 } 2727 2728 /** 2729 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2730 * @hw: pointer to hardware structure 2731 * @regval: register value to write to RXCTRL 2732 * 2733 * Enables the Rx DMA unit 2734 **/ 2735 int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2736 { 2737 if (regval & IXGBE_RXCTRL_RXEN) 2738 hw->mac.ops.enable_rx(hw); 2739 else 2740 hw->mac.ops.disable_rx(hw); 2741 2742 return 0; 2743 } 2744 2745 /** 2746 * ixgbe_blink_led_start_generic - Blink LED based on index. 2747 * @hw: pointer to hardware structure 2748 * @index: led number to blink 2749 **/ 2750 int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2751 { 2752 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2753 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2754 ixgbe_link_speed speed = 0; 2755 bool link_up = false; 2756 bool locked = false; 2757 int ret_val; 2758 2759 if (index > 3) 2760 return -EINVAL; 2761 2762 /* 2763 * Link must be up to auto-blink the LEDs; 2764 * Force it if link is down. 2765 */ 2766 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2767 2768 if (!link_up) { 2769 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2770 if (ret_val) 2771 return ret_val; 2772 2773 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2774 autoc_reg |= IXGBE_AUTOC_FLU; 2775 2776 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2777 if (ret_val) 2778 return ret_val; 2779 2780 IXGBE_WRITE_FLUSH(hw); 2781 2782 usleep_range(10000, 20000); 2783 } 2784 2785 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2786 led_reg |= IXGBE_LED_BLINK(index); 2787 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2788 IXGBE_WRITE_FLUSH(hw); 2789 2790 return 0; 2791 } 2792 2793 /** 2794 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2795 * @hw: pointer to hardware structure 2796 * @index: led number to stop blinking 2797 **/ 2798 int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2799 { 2800 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2801 bool locked = false; 2802 u32 autoc_reg = 0; 2803 int ret_val; 2804 2805 if (index > 3) 2806 return -EINVAL; 2807 2808 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 2809 if (ret_val) 2810 return ret_val; 2811 2812 autoc_reg &= ~IXGBE_AUTOC_FLU; 2813 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2814 2815 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 2816 if (ret_val) 2817 return ret_val; 2818 2819 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2820 led_reg &= ~IXGBE_LED_BLINK(index); 2821 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2822 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2823 IXGBE_WRITE_FLUSH(hw); 2824 2825 return 0; 2826 } 2827 2828 /** 2829 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2830 * @hw: pointer to hardware structure 2831 * @san_mac_offset: SAN MAC address offset 2832 * 2833 * This function will read the EEPROM location for the SAN MAC address 2834 * pointer, and returns the value at that location. This is used in both 2835 * get and set mac_addr routines. 2836 **/ 2837 static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2838 u16 *san_mac_offset) 2839 { 2840 int ret_val; 2841 2842 /* 2843 * First read the EEPROM pointer to see if the MAC addresses are 2844 * available. 2845 */ 2846 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 2847 san_mac_offset); 2848 if (ret_val) 2849 hw_err(hw, "eeprom read at offset %d failed\n", 2850 IXGBE_SAN_MAC_ADDR_PTR); 2851 2852 return ret_val; 2853 } 2854 2855 /** 2856 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2857 * @hw: pointer to hardware structure 2858 * @san_mac_addr: SAN MAC address 2859 * 2860 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2861 * per-port, so set_lan_id() must be called before reading the addresses. 2862 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2863 * upon for non-SFP connections, so we must call it here. 2864 **/ 2865 int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2866 { 2867 u16 san_mac_data, san_mac_offset; 2868 int ret_val; 2869 u8 i; 2870 2871 /* 2872 * First read the EEPROM pointer to see if the MAC addresses are 2873 * available. If they're not, no point in calling set_lan_id() here. 2874 */ 2875 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2876 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 2877 2878 goto san_mac_addr_clr; 2879 2880 /* make sure we know which port we need to program */ 2881 hw->mac.ops.set_lan_id(hw); 2882 /* apply the port offset to the address offset */ 2883 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2884 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2885 for (i = 0; i < 3; i++) { 2886 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 2887 &san_mac_data); 2888 if (ret_val) { 2889 hw_err(hw, "eeprom read at offset %d failed\n", 2890 san_mac_offset); 2891 goto san_mac_addr_clr; 2892 } 2893 san_mac_addr[i * 2] = (u8)(san_mac_data); 2894 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2895 san_mac_offset++; 2896 } 2897 return 0; 2898 2899 san_mac_addr_clr: 2900 /* No addresses available in this EEPROM. It's not necessarily an 2901 * error though, so just wipe the local address and return. 2902 */ 2903 for (i = 0; i < 6; i++) 2904 san_mac_addr[i] = 0xFF; 2905 return ret_val; 2906 } 2907 2908 /** 2909 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2910 * @hw: pointer to hardware structure 2911 * 2912 * Read PCIe configuration space, and get the MSI-X vector count from 2913 * the capabilities table. 2914 **/ 2915 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2916 { 2917 u16 msix_count; 2918 u16 max_msix_count; 2919 u16 pcie_offset; 2920 2921 switch (hw->mac.type) { 2922 case ixgbe_mac_82598EB: 2923 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 2924 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 2925 break; 2926 case ixgbe_mac_82599EB: 2927 case ixgbe_mac_X540: 2928 case ixgbe_mac_X550: 2929 case ixgbe_mac_X550EM_x: 2930 case ixgbe_mac_x550em_a: 2931 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2932 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2933 break; 2934 case ixgbe_mac_e610: 2935 pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS; 2936 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2937 break; 2938 default: 2939 return 1; 2940 } 2941 2942 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); 2943 if (ixgbe_removed(hw->hw_addr)) 2944 msix_count = 0; 2945 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2946 2947 /* MSI-X count is zero-based in HW */ 2948 msix_count++; 2949 2950 if (msix_count > max_msix_count) 2951 msix_count = max_msix_count; 2952 2953 return msix_count; 2954 } 2955 2956 /** 2957 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2958 * @hw: pointer to hardware struct 2959 * @rar: receive address register index to disassociate 2960 * @vmdq: VMDq pool index to remove from the rar 2961 **/ 2962 int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2963 { 2964 u32 mpsar_lo, mpsar_hi; 2965 u32 rar_entries = hw->mac.num_rar_entries; 2966 2967 /* Make sure we are using a valid rar index range */ 2968 if (rar >= rar_entries) { 2969 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2970 return -EINVAL; 2971 } 2972 2973 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2974 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2975 2976 if (ixgbe_removed(hw->hw_addr)) 2977 return 0; 2978 2979 if (!mpsar_lo && !mpsar_hi) 2980 return 0; 2981 2982 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2983 if (mpsar_lo) { 2984 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2985 mpsar_lo = 0; 2986 } 2987 if (mpsar_hi) { 2988 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2989 mpsar_hi = 0; 2990 } 2991 } else if (vmdq < 32) { 2992 mpsar_lo &= ~BIT(vmdq); 2993 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2994 } else { 2995 mpsar_hi &= ~BIT(vmdq - 32); 2996 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2997 } 2998 2999 /* was that the last pool using this rar? */ 3000 if (mpsar_lo == 0 && mpsar_hi == 0 && 3001 rar != 0 && rar != hw->mac.san_mac_rar_index) 3002 hw->mac.ops.clear_rar(hw, rar); 3003 3004 return 0; 3005 } 3006 3007 /** 3008 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3009 * @hw: pointer to hardware struct 3010 * @rar: receive address register index to associate with a VMDq index 3011 * @vmdq: VMDq pool index 3012 **/ 3013 int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3014 { 3015 u32 mpsar; 3016 u32 rar_entries = hw->mac.num_rar_entries; 3017 3018 /* Make sure we are using a valid rar index range */ 3019 if (rar >= rar_entries) { 3020 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 3021 return -EINVAL; 3022 } 3023 3024 if (vmdq < 32) { 3025 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3026 mpsar |= BIT(vmdq); 3027 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3028 } else { 3029 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3030 mpsar |= BIT(vmdq - 32); 3031 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3032 } 3033 return 0; 3034 } 3035 3036 /** 3037 * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address 3038 * @hw: pointer to hardware struct 3039 * @vmdq: VMDq pool index 3040 * 3041 * This function should only be involved in the IOV mode. 3042 * In IOV mode, Default pool is next pool after the number of 3043 * VFs advertized and not 0. 3044 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3045 **/ 3046 int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3047 { 3048 u32 rar = hw->mac.san_mac_rar_index; 3049 3050 if (vmdq < 32) { 3051 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); 3052 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3053 } else { 3054 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3055 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); 3056 } 3057 3058 return 0; 3059 } 3060 3061 /** 3062 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3063 * @hw: pointer to hardware structure 3064 **/ 3065 int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3066 { 3067 int i; 3068 3069 for (i = 0; i < 128; i++) 3070 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3071 3072 return 0; 3073 } 3074 3075 /** 3076 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3077 * @hw: pointer to hardware structure 3078 * @vlan: VLAN id to write to VLAN filter 3079 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if 3080 * vlanid not found 3081 * 3082 * return the VLVF index where this VLAN id should be placed 3083 * 3084 **/ 3085 static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3086 { 3087 int regindex, first_empty_slot; 3088 u32 bits; 3089 3090 /* short cut the special case */ 3091 if (vlan == 0) 3092 return 0; 3093 3094 /* if vlvf_bypass is set we don't want to use an empty slot, we 3095 * will simply bypass the VLVF if there are no entries present in the 3096 * VLVF that contain our VLAN 3097 */ 3098 first_empty_slot = vlvf_bypass ? -ENOSPC : 0; 3099 3100 /* add VLAN enable bit for comparison */ 3101 vlan |= IXGBE_VLVF_VIEN; 3102 3103 /* Search for the vlan id in the VLVF entries. Save off the first empty 3104 * slot found along the way. 3105 * 3106 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3107 */ 3108 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3109 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3110 if (bits == vlan) 3111 return regindex; 3112 if (!first_empty_slot && !bits) 3113 first_empty_slot = regindex; 3114 } 3115 3116 /* If we are here then we didn't find the VLAN. Return first empty 3117 * slot we found during our search, else error. 3118 */ 3119 if (!first_empty_slot) 3120 hw_dbg(hw, "No space in VLVF.\n"); 3121 3122 return first_empty_slot ? : -ENOSPC; 3123 } 3124 3125 /** 3126 * ixgbe_set_vfta_generic - Set VLAN filter table 3127 * @hw: pointer to hardware structure 3128 * @vlan: VLAN id to write to VLAN filter 3129 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3130 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3131 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3132 * 3133 * Turn on/off specified VLAN in the VLAN filter table. 3134 **/ 3135 int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3136 bool vlan_on, bool vlvf_bypass) 3137 { 3138 u32 regidx, vfta_delta, vfta, bits; 3139 int vlvf_index; 3140 3141 if ((vlan > 4095) || (vind > 63)) 3142 return -EINVAL; 3143 3144 /* 3145 * this is a 2 part operation - first the VFTA, then the 3146 * VLVF and VLVFB if VT Mode is set 3147 * We don't write the VFTA until we know the VLVF part succeeded. 3148 */ 3149 3150 /* Part 1 3151 * The VFTA is a bitstring made up of 128 32-bit registers 3152 * that enable the particular VLAN id, much like the MTA: 3153 * bits[11-5]: which register 3154 * bits[4-0]: which bit in the register 3155 */ 3156 regidx = vlan / 32; 3157 vfta_delta = BIT(vlan % 32); 3158 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3159 3160 /* vfta_delta represents the difference between the current value 3161 * of vfta and the value we want in the register. Since the diff 3162 * is an XOR mask we can just update vfta using an XOR. 3163 */ 3164 vfta_delta &= vlan_on ? ~vfta : vfta; 3165 vfta ^= vfta_delta; 3166 3167 /* Part 2 3168 * If VT Mode is set 3169 * Either vlan_on 3170 * make sure the vlan is in VLVF 3171 * set the vind bit in the matching VLVFB 3172 * Or !vlan_on 3173 * clear the pool bit and possibly the vind 3174 */ 3175 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 3176 goto vfta_update; 3177 3178 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 3179 if (vlvf_index < 0) { 3180 if (vlvf_bypass) 3181 goto vfta_update; 3182 return vlvf_index; 3183 } 3184 3185 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 3186 3187 /* set the pool bit */ 3188 bits |= BIT(vind % 32); 3189 if (vlan_on) 3190 goto vlvf_update; 3191 3192 /* clear the pool bit */ 3193 bits ^= BIT(vind % 32); 3194 3195 if (!bits && 3196 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 3197 /* Clear VFTA first, then disable VLVF. Otherwise 3198 * we run the risk of stray packets leaking into 3199 * the PF via the default pool 3200 */ 3201 if (vfta_delta) 3202 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3203 3204 /* disable VLVF and clear remaining bit from pool */ 3205 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3206 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 3207 3208 return 0; 3209 } 3210 3211 /* If there are still bits set in the VLVFB registers 3212 * for the VLAN ID indicated we need to see if the 3213 * caller is requesting that we clear the VFTA entry bit. 3214 * If the caller has requested that we clear the VFTA 3215 * entry bit but there are still pools/VFs using this VLAN 3216 * ID entry then ignore the request. We're not worried 3217 * about the case where we're turning the VFTA VLAN ID 3218 * entry bit on, only when requested to turn it off as 3219 * there may be multiple pools and/or VFs using the 3220 * VLAN ID entry. In that case we cannot clear the 3221 * VFTA bit until all pools/VFs using that VLAN ID have also 3222 * been cleared. This will be indicated by "bits" being 3223 * zero. 3224 */ 3225 vfta_delta = 0; 3226 3227 vlvf_update: 3228 /* record pool change and enable VLAN ID if not already enabled */ 3229 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 3230 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 3231 3232 vfta_update: 3233 /* Update VFTA now that we are ready for traffic */ 3234 if (vfta_delta) 3235 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 3236 3237 return 0; 3238 } 3239 3240 /** 3241 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3242 * @hw: pointer to hardware structure 3243 * 3244 * Clears the VLAN filter table, and the VMDq index associated with the filter 3245 **/ 3246 int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3247 { 3248 u32 offset; 3249 3250 for (offset = 0; offset < hw->mac.vft_size; offset++) 3251 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3252 3253 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3254 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3255 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 3256 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 3257 } 3258 3259 return 0; 3260 } 3261 3262 /** 3263 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 3264 * @hw: pointer to hardware structure 3265 * 3266 * Contains the logic to identify if we need to verify link for the 3267 * crosstalk fix 3268 **/ 3269 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 3270 { 3271 /* Does FW say we need the fix */ 3272 if (!hw->need_crosstalk_fix) 3273 return false; 3274 3275 /* Only consider SFP+ PHYs i.e. media type fiber */ 3276 switch (hw->mac.ops.get_media_type(hw)) { 3277 case ixgbe_media_type_fiber: 3278 case ixgbe_media_type_fiber_qsfp: 3279 break; 3280 default: 3281 return false; 3282 } 3283 3284 return true; 3285 } 3286 3287 /** 3288 * ixgbe_check_mac_link_generic - Determine link and speed status 3289 * @hw: pointer to hardware structure 3290 * @speed: pointer to link speed 3291 * @link_up: true when link is up 3292 * @link_up_wait_to_complete: bool used to wait for link up or not 3293 * 3294 * Reads the links register to determine if link is up and the current speed 3295 **/ 3296 int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3297 bool *link_up, bool link_up_wait_to_complete) 3298 { 3299 bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); 3300 u32 links_reg, links_orig; 3301 u32 i; 3302 3303 /* If Crosstalk fix enabled do the sanity check of making sure 3304 * the SFP+ cage is full. 3305 */ 3306 if (crosstalk_fix_active) { 3307 u32 sfp_cage_full; 3308 3309 switch (hw->mac.type) { 3310 case ixgbe_mac_82599EB: 3311 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3312 IXGBE_ESDP_SDP2; 3313 break; 3314 case ixgbe_mac_X550EM_x: 3315 case ixgbe_mac_x550em_a: 3316 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3317 IXGBE_ESDP_SDP0; 3318 break; 3319 default: 3320 /* sanity check - No SFP+ devices here */ 3321 sfp_cage_full = false; 3322 break; 3323 } 3324 3325 if (!sfp_cage_full) { 3326 *link_up = false; 3327 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3328 return 0; 3329 } 3330 } 3331 3332 /* clear the old state */ 3333 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3334 3335 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3336 3337 if (links_orig != links_reg) { 3338 hw_dbg(hw, "LINKS changed from %08X to %08X\n", 3339 links_orig, links_reg); 3340 } 3341 3342 if (link_up_wait_to_complete) { 3343 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3344 if (links_reg & IXGBE_LINKS_UP) { 3345 *link_up = true; 3346 break; 3347 } else { 3348 *link_up = false; 3349 } 3350 msleep(100); 3351 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3352 } 3353 } else { 3354 if (links_reg & IXGBE_LINKS_UP) { 3355 if (crosstalk_fix_active) { 3356 /* Check the link state again after a delay 3357 * to filter out spurious link up 3358 * notifications. 3359 */ 3360 mdelay(5); 3361 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3362 if (!(links_reg & IXGBE_LINKS_UP)) { 3363 *link_up = false; 3364 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3365 return 0; 3366 } 3367 } 3368 *link_up = true; 3369 } else { 3370 *link_up = false; 3371 } 3372 } 3373 3374 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3375 case IXGBE_LINKS_SPEED_10G_82599: 3376 if ((hw->mac.type >= ixgbe_mac_X550) && 3377 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3378 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3379 else 3380 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3381 break; 3382 case IXGBE_LINKS_SPEED_1G_82599: 3383 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3384 break; 3385 case IXGBE_LINKS_SPEED_100_82599: 3386 if ((hw->mac.type >= ixgbe_mac_X550 || 3387 hw->mac.type == ixgbe_mac_e610) && 3388 (links_reg & IXGBE_LINKS_SPEED_NON_STD)) 3389 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3390 else 3391 *speed = IXGBE_LINK_SPEED_100_FULL; 3392 break; 3393 case IXGBE_LINKS_SPEED_10_X550EM_A: 3394 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3395 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3396 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 3397 *speed = IXGBE_LINK_SPEED_10_FULL; 3398 } 3399 break; 3400 default: 3401 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3402 } 3403 3404 return 0; 3405 } 3406 3407 /** 3408 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3409 * the EEPROM 3410 * @hw: pointer to hardware structure 3411 * @wwnn_prefix: the alternative WWNN prefix 3412 * @wwpn_prefix: the alternative WWPN prefix 3413 * 3414 * This function will read the EEPROM from the alternative SAN MAC address 3415 * block to check the support for the alternative WWNN/WWPN prefix support. 3416 **/ 3417 int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3418 u16 *wwpn_prefix) 3419 { 3420 u16 offset, caps; 3421 u16 alt_san_mac_blk_offset; 3422 3423 /* clear output first */ 3424 *wwnn_prefix = 0xFFFF; 3425 *wwpn_prefix = 0xFFFF; 3426 3427 /* check if alternative SAN MAC is supported */ 3428 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 3429 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 3430 goto wwn_prefix_err; 3431 3432 if ((alt_san_mac_blk_offset == 0) || 3433 (alt_san_mac_blk_offset == 0xFFFF)) 3434 return 0; 3435 3436 /* check capability in alternative san mac address block */ 3437 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3438 if (hw->eeprom.ops.read(hw, offset, &caps)) 3439 goto wwn_prefix_err; 3440 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3441 return 0; 3442 3443 /* get the corresponding prefix for WWNN/WWPN */ 3444 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3445 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) 3446 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3447 3448 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3449 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 3450 goto wwn_prefix_err; 3451 3452 return 0; 3453 3454 wwn_prefix_err: 3455 hw_err(hw, "eeprom read at offset %d failed\n", offset); 3456 return 0; 3457 } 3458 3459 /** 3460 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3461 * @hw: pointer to hardware structure 3462 * @enable: enable or disable switch for MAC anti-spoofing 3463 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 3464 * 3465 **/ 3466 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3467 { 3468 int vf_target_reg = vf >> 3; 3469 int vf_target_shift = vf % 8; 3470 u32 pfvfspoof; 3471 3472 if (hw->mac.type == ixgbe_mac_82598EB) 3473 return; 3474 3475 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3476 if (enable) 3477 pfvfspoof |= BIT(vf_target_shift); 3478 else 3479 pfvfspoof &= ~BIT(vf_target_shift); 3480 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3481 } 3482 3483 /** 3484 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3485 * @hw: pointer to hardware structure 3486 * @enable: enable or disable switch for VLAN anti-spoofing 3487 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3488 * 3489 **/ 3490 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3491 { 3492 int vf_target_reg = vf >> 3; 3493 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3494 u32 pfvfspoof; 3495 3496 if (hw->mac.type == ixgbe_mac_82598EB) 3497 return; 3498 3499 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3500 if (enable) 3501 pfvfspoof |= BIT(vf_target_shift); 3502 else 3503 pfvfspoof &= ~BIT(vf_target_shift); 3504 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3505 } 3506 3507 /** 3508 * ixgbe_get_device_caps_generic - Get additional device capabilities 3509 * @hw: pointer to hardware structure 3510 * @device_caps: the EEPROM word with the extra device capabilities 3511 * 3512 * This function will read the EEPROM location for the device capabilities, 3513 * and return the word through device_caps. 3514 **/ 3515 int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3516 { 3517 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3518 3519 return 0; 3520 } 3521 3522 /** 3523 * ixgbe_set_rxpba_generic - Initialize RX packet buffer 3524 * @hw: pointer to hardware structure 3525 * @num_pb: number of packet buffers to allocate 3526 * @headroom: reserve n KB of headroom 3527 * @strategy: packet buffer allocation strategy 3528 **/ 3529 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, 3530 int num_pb, 3531 u32 headroom, 3532 int strategy) 3533 { 3534 u32 pbsize = hw->mac.rx_pb_size; 3535 int i = 0; 3536 u32 rxpktsize, txpktsize, txpbthresh; 3537 3538 /* Reserve headroom */ 3539 pbsize -= headroom; 3540 3541 if (!num_pb) 3542 num_pb = 1; 3543 3544 /* Divide remaining packet buffer space amongst the number 3545 * of packet buffers requested using supplied strategy. 3546 */ 3547 switch (strategy) { 3548 case (PBA_STRATEGY_WEIGHTED): 3549 /* pba_80_48 strategy weight first half of packet buffer with 3550 * 5/8 of the packet buffer space. 3551 */ 3552 rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); 3553 pbsize -= rxpktsize * (num_pb / 2); 3554 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 3555 for (; i < (num_pb / 2); i++) 3556 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3557 fallthrough; /* configure remaining packet buffers */ 3558 case (PBA_STRATEGY_EQUAL): 3559 /* Divide the remaining Rx packet buffer evenly among the TCs */ 3560 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 3561 for (; i < num_pb; i++) 3562 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 3563 break; 3564 default: 3565 break; 3566 } 3567 3568 /* 3569 * Setup Tx packet buffer and threshold equally for all TCs 3570 * TXPBTHRESH register is set in K so divide by 1024 and subtract 3571 * 10 since the largest packet we support is just over 9K. 3572 */ 3573 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 3574 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 3575 for (i = 0; i < num_pb; i++) { 3576 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 3577 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 3578 } 3579 3580 /* Clear unused TCs, if any, to zero buffer size*/ 3581 for (; i < IXGBE_MAX_PB; i++) { 3582 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 3583 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 3584 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 3585 } 3586 } 3587 3588 /** 3589 * ixgbe_calculate_checksum - Calculate checksum for buffer 3590 * @buffer: pointer to EEPROM 3591 * @length: size of EEPROM to calculate a checksum for 3592 * 3593 * Calculates the checksum for some buffer on a specified length. The 3594 * checksum calculated is returned. 3595 **/ 3596 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 3597 { 3598 u32 i; 3599 u8 sum = 0; 3600 3601 if (!buffer) 3602 return 0; 3603 3604 for (i = 0; i < length; i++) 3605 sum += buffer[i]; 3606 3607 return (u8) (0 - sum); 3608 } 3609 3610 /** 3611 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 3612 * @hw: pointer to the HW structure 3613 * @buffer: command to write and where the return status will be placed 3614 * @length: length of buffer, must be multiple of 4 bytes 3615 * @timeout: time in ms to wait for command completion 3616 * 3617 * Communicates with the manageability block. On success return 0 3618 * else returns semaphore error when encountering an error acquiring 3619 * semaphore, -EINVAL when incorrect parameters passed or -EIO when 3620 * command fails. 3621 * 3622 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 3623 * by the caller. 3624 **/ 3625 int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 3626 u32 timeout) 3627 { 3628 u32 hicr, i, fwsts; 3629 u16 dword_len; 3630 3631 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3632 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3633 return -EINVAL; 3634 } 3635 3636 /* Set bit 9 of FWSTS clearing FW reset indication */ 3637 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3638 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 3639 3640 /* Check that the host interface is enabled. */ 3641 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3642 if (!(hicr & IXGBE_HICR_EN)) { 3643 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3644 return -EIO; 3645 } 3646 3647 /* Calculate length in DWORDs. We must be DWORD aligned */ 3648 if (length % sizeof(u32)) { 3649 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3650 return -EINVAL; 3651 } 3652 3653 dword_len = length >> 2; 3654 3655 /* The device driver writes the relevant command block 3656 * into the ram area. 3657 */ 3658 for (i = 0; i < dword_len; i++) 3659 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3660 i, (__force u32)cpu_to_le32(buffer[i])); 3661 3662 /* Setting this bit tells the ARC that a new command is pending. */ 3663 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3664 3665 for (i = 0; i < timeout; i++) { 3666 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3667 if (!(hicr & IXGBE_HICR_C)) 3668 break; 3669 usleep_range(1000, 2000); 3670 } 3671 3672 /* Check command successful completion. */ 3673 if ((timeout && i == timeout) || 3674 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) 3675 return -EIO; 3676 3677 return 0; 3678 } 3679 3680 /** 3681 * ixgbe_host_interface_command - Issue command to manageability block 3682 * @hw: pointer to the HW structure 3683 * @buffer: contains the command to write and where the return status will 3684 * be placed 3685 * @length: length of buffer, must be multiple of 4 bytes 3686 * @timeout: time in ms to wait for command completion 3687 * @return_data: read and return data from the buffer (true) or not (false) 3688 * Needed because FW structures are big endian and decoding of 3689 * these fields can be 8 bit or 16 bit based on command. Decoding 3690 * is not easily understood without making a table of commands. 3691 * So we will leave this up to the caller to read back the data 3692 * in these cases. 3693 * 3694 * Communicates with the manageability block. On success return 0 3695 * else return -EIO or -EINVAL. 3696 **/ 3697 int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, 3698 u32 length, u32 timeout, 3699 bool return_data) 3700 { 3701 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3702 struct ixgbe_hic_hdr *hdr = buffer; 3703 u16 buf_len, dword_len; 3704 u32 *u32arr = buffer; 3705 int status; 3706 u32 bi; 3707 3708 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3709 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3710 return -EINVAL; 3711 } 3712 /* Take management host interface semaphore */ 3713 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3714 if (status) 3715 return status; 3716 3717 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 3718 if (status) 3719 goto rel_out; 3720 3721 if (!return_data) 3722 goto rel_out; 3723 3724 /* Calculate length in DWORDs */ 3725 dword_len = hdr_size >> 2; 3726 3727 /* first pull in the header so we know the buffer length */ 3728 for (bi = 0; bi < dword_len; bi++) { 3729 u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3730 le32_to_cpus(&u32arr[bi]); 3731 } 3732 3733 /* If there is any thing in data position pull it in */ 3734 buf_len = hdr->buf_len; 3735 if (!buf_len) 3736 goto rel_out; 3737 3738 if (length < round_up(buf_len, 4) + hdr_size) { 3739 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3740 status = -EIO; 3741 goto rel_out; 3742 } 3743 3744 /* Calculate length in DWORDs, add 3 for odd lengths */ 3745 dword_len = (buf_len + 3) >> 2; 3746 3747 /* Pull in the rest of the buffer (bi is where we left off) */ 3748 for (; bi <= dword_len; bi++) { 3749 u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3750 le32_to_cpus(&u32arr[bi]); 3751 } 3752 3753 rel_out: 3754 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 3755 3756 return status; 3757 } 3758 3759 /** 3760 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 3761 * @hw: pointer to the HW structure 3762 * @maj: driver version major number 3763 * @min: driver version minor number 3764 * @build: driver version build number 3765 * @sub: driver version sub build number 3766 * @len: length of driver_ver string 3767 * @driver_ver: driver string 3768 * 3769 * Sends driver version number to firmware through the manageability 3770 * block. On success return 0 3771 * else returns -EBUSY when encountering an error acquiring 3772 * semaphore or -EIO when command fails. 3773 **/ 3774 int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 3775 u8 build, u8 sub, __always_unused u16 len, 3776 __always_unused const char *driver_ver) 3777 { 3778 struct ixgbe_hic_drv_info fw_cmd; 3779 int ret_val; 3780 int i; 3781 3782 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3783 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3784 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3785 fw_cmd.port_num = hw->bus.func; 3786 fw_cmd.ver_maj = maj; 3787 fw_cmd.ver_min = min; 3788 fw_cmd.ver_build = build; 3789 fw_cmd.ver_sub = sub; 3790 fw_cmd.hdr.checksum = 0; 3791 fw_cmd.pad = 0; 3792 fw_cmd.pad2 = 0; 3793 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 3794 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 3795 3796 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3797 ret_val = ixgbe_host_interface_command(hw, &fw_cmd, 3798 sizeof(fw_cmd), 3799 IXGBE_HI_COMMAND_TIMEOUT, 3800 true); 3801 if (ret_val != 0) 3802 continue; 3803 3804 if (fw_cmd.hdr.cmd_or_resp.ret_status == 3805 FW_CEM_RESP_STATUS_SUCCESS) 3806 ret_val = 0; 3807 else 3808 ret_val = -EIO; 3809 3810 break; 3811 } 3812 3813 return ret_val; 3814 } 3815 3816 /** 3817 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 3818 * @hw: pointer to the hardware structure 3819 * 3820 * The 82599 and x540 MACs can experience issues if TX work is still pending 3821 * when a reset occurs. This function prevents this by flushing the PCIe 3822 * buffers on the system. 3823 **/ 3824 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 3825 { 3826 u32 gcr_ext, hlreg0, i, poll; 3827 u16 value; 3828 3829 /* 3830 * If double reset is not requested then all transactions should 3831 * already be clear and as such there is no work to do 3832 */ 3833 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 3834 return; 3835 3836 /* 3837 * Set loopback enable to prevent any transmits from being sent 3838 * should the link come up. This assumes that the RXCTRL.RXEN bit 3839 * has already been cleared. 3840 */ 3841 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3842 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 3843 3844 /* wait for a last completion before clearing buffers */ 3845 IXGBE_WRITE_FLUSH(hw); 3846 usleep_range(3000, 6000); 3847 3848 /* Before proceeding, make sure that the PCIe block does not have 3849 * transactions pending. 3850 */ 3851 poll = ixgbe_pcie_timeout_poll(hw); 3852 for (i = 0; i < poll; i++) { 3853 usleep_range(100, 200); 3854 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); 3855 if (ixgbe_removed(hw->hw_addr)) 3856 break; 3857 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3858 break; 3859 } 3860 3861 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 3862 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3863 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 3864 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 3865 3866 /* Flush all writes and allow 20usec for all transactions to clear */ 3867 IXGBE_WRITE_FLUSH(hw); 3868 udelay(20); 3869 3870 /* restore previous register values */ 3871 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3872 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3873 } 3874 3875 static const u8 ixgbe_emc_temp_data[4] = { 3876 IXGBE_EMC_INTERNAL_DATA, 3877 IXGBE_EMC_DIODE1_DATA, 3878 IXGBE_EMC_DIODE2_DATA, 3879 IXGBE_EMC_DIODE3_DATA 3880 }; 3881 static const u8 ixgbe_emc_therm_limit[4] = { 3882 IXGBE_EMC_INTERNAL_THERM_LIMIT, 3883 IXGBE_EMC_DIODE1_THERM_LIMIT, 3884 IXGBE_EMC_DIODE2_THERM_LIMIT, 3885 IXGBE_EMC_DIODE3_THERM_LIMIT 3886 }; 3887 3888 /** 3889 * ixgbe_get_ets_data - Extracts the ETS bit data 3890 * @hw: pointer to hardware structure 3891 * @ets_cfg: extected ETS data 3892 * @ets_offset: offset of ETS data 3893 * 3894 * Returns error code. 3895 **/ 3896 static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, 3897 u16 *ets_offset) 3898 { 3899 int status; 3900 3901 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); 3902 if (status) 3903 return status; 3904 3905 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) 3906 return -EOPNOTSUPP; 3907 3908 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); 3909 if (status) 3910 return status; 3911 3912 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) 3913 return -EOPNOTSUPP; 3914 3915 return 0; 3916 } 3917 3918 /** 3919 * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data 3920 * @hw: pointer to hardware structure 3921 * 3922 * Returns the thermal sensor data structure 3923 **/ 3924 int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) 3925 { 3926 u16 ets_offset; 3927 u16 ets_sensor; 3928 u8 num_sensors; 3929 u16 ets_cfg; 3930 int status; 3931 u8 i; 3932 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3933 3934 /* Only support thermal sensors attached to physical port 0 */ 3935 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3936 return -EOPNOTSUPP; 3937 3938 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3939 if (status) 3940 return status; 3941 3942 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 3943 if (num_sensors > IXGBE_MAX_SENSORS) 3944 num_sensors = IXGBE_MAX_SENSORS; 3945 3946 for (i = 0; i < num_sensors; i++) { 3947 u8 sensor_index; 3948 u8 sensor_location; 3949 3950 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), 3951 &ets_sensor); 3952 if (status) 3953 return status; 3954 3955 sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, 3956 ets_sensor); 3957 sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, 3958 ets_sensor); 3959 3960 if (sensor_location != 0) { 3961 status = hw->phy.ops.read_i2c_byte(hw, 3962 ixgbe_emc_temp_data[sensor_index], 3963 IXGBE_I2C_THERMAL_SENSOR_ADDR, 3964 &data->sensor[i].temp); 3965 if (status) 3966 return status; 3967 } 3968 } 3969 3970 return 0; 3971 } 3972 3973 /** 3974 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds 3975 * @hw: pointer to hardware structure 3976 * 3977 * Inits the thermal sensor thresholds according to the NVM map 3978 * and save off the threshold and location values into mac.thermal_sensor_data 3979 **/ 3980 int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) 3981 { 3982 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 3983 u8 low_thresh_delta; 3984 u8 num_sensors; 3985 u8 therm_limit; 3986 u16 ets_sensor; 3987 u16 ets_offset; 3988 u16 ets_cfg; 3989 int status; 3990 u8 i; 3991 3992 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); 3993 3994 /* Only support thermal sensors attached to physical port 0 */ 3995 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) 3996 return -EOPNOTSUPP; 3997 3998 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); 3999 if (status) 4000 return status; 4001 4002 low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg); 4003 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); 4004 if (num_sensors > IXGBE_MAX_SENSORS) 4005 num_sensors = IXGBE_MAX_SENSORS; 4006 4007 for (i = 0; i < num_sensors; i++) { 4008 u8 sensor_index; 4009 u8 sensor_location; 4010 4011 if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { 4012 hw_err(hw, "eeprom read at offset %d failed\n", 4013 ets_offset + 1 + i); 4014 continue; 4015 } 4016 sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK, 4017 ets_sensor); 4018 sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK, 4019 ets_sensor); 4020 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; 4021 4022 hw->phy.ops.write_i2c_byte(hw, 4023 ixgbe_emc_therm_limit[sensor_index], 4024 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); 4025 4026 if (sensor_location == 0) 4027 continue; 4028 4029 data->sensor[i].location = sensor_location; 4030 data->sensor[i].caution_thresh = therm_limit; 4031 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; 4032 } 4033 4034 return 0; 4035 } 4036 4037 /** 4038 * ixgbe_get_orom_version - Return option ROM from EEPROM 4039 * 4040 * @hw: pointer to hardware structure 4041 * @nvm_ver: pointer to output structure 4042 * 4043 * if valid option ROM version, nvm_ver->or_valid set to true 4044 * else nvm_ver->or_valid is false. 4045 **/ 4046 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 4047 struct ixgbe_nvm_version *nvm_ver) 4048 { 4049 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 4050 4051 nvm_ver->or_valid = false; 4052 /* Option Rom may or may not be present. Start with pointer */ 4053 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 4054 4055 /* make sure offset is valid */ 4056 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4057 return; 4058 4059 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 4060 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 4061 4062 /* option rom exists and is valid */ 4063 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 4064 eeprom_cfg_blkl == NVM_VER_INVALID || 4065 eeprom_cfg_blkh == NVM_VER_INVALID) 4066 return; 4067 4068 nvm_ver->or_valid = true; 4069 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 4070 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 4071 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 4072 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 4073 } 4074 4075 /** 4076 * ixgbe_get_oem_prod_version - Etrack ID from EEPROM 4077 * @hw: pointer to hardware structure 4078 * @nvm_ver: pointer to output structure 4079 * 4080 * if valid OEM product version, nvm_ver->oem_valid set to true 4081 * else nvm_ver->oem_valid is false. 4082 **/ 4083 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 4084 struct ixgbe_nvm_version *nvm_ver) 4085 { 4086 u16 rel_num, prod_ver, mod_len, cap, offset; 4087 4088 nvm_ver->oem_valid = false; 4089 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4090 4091 /* Return is offset to OEM Product Version block is invalid */ 4092 if (offset == 0x0 || offset == NVM_INVALID_PTR) 4093 return; 4094 4095 /* Read product version block */ 4096 hw->eeprom.ops.read(hw, offset, &mod_len); 4097 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 4098 4099 /* Return if OEM product version block is invalid */ 4100 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 4101 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 4102 return; 4103 4104 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 4105 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 4106 4107 /* Return if version is invalid */ 4108 if ((rel_num | prod_ver) == 0x0 || 4109 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 4110 return; 4111 4112 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 4113 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 4114 nvm_ver->oem_release = rel_num; 4115 nvm_ver->oem_valid = true; 4116 } 4117 4118 /** 4119 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 4120 * 4121 * @hw: pointer to hardware structure 4122 * @nvm_ver: pointer to output structure 4123 * 4124 * word read errors will return 0xFFFF 4125 **/ 4126 void ixgbe_get_etk_id(struct ixgbe_hw *hw, 4127 struct ixgbe_nvm_version *nvm_ver) 4128 { 4129 u16 etk_id_l, etk_id_h; 4130 4131 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 4132 etk_id_l = NVM_VER_INVALID; 4133 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 4134 etk_id_h = NVM_VER_INVALID; 4135 4136 /* The word order for the version format is determined by high order 4137 * word bit 15. 4138 */ 4139 if ((etk_id_h & NVM_ETK_VALID) == 0) { 4140 nvm_ver->etk_id = etk_id_h; 4141 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 4142 } else { 4143 nvm_ver->etk_id = etk_id_l; 4144 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 4145 } 4146 } 4147 4148 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4149 { 4150 u32 rxctrl; 4151 4152 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4153 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4154 if (hw->mac.type != ixgbe_mac_82598EB) { 4155 u32 pfdtxgswc; 4156 4157 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4158 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4159 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4160 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4161 hw->mac.set_lben = true; 4162 } else { 4163 hw->mac.set_lben = false; 4164 } 4165 } 4166 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4167 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4168 } 4169 } 4170 4171 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4172 { 4173 u32 rxctrl; 4174 4175 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4176 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4177 4178 if (hw->mac.type != ixgbe_mac_82598EB) { 4179 if (hw->mac.set_lben) { 4180 u32 pfdtxgswc; 4181 4182 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4183 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4184 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4185 hw->mac.set_lben = false; 4186 } 4187 } 4188 } 4189 4190 /** ixgbe_mng_present - returns true when management capability is present 4191 * @hw: pointer to hardware structure 4192 **/ 4193 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4194 { 4195 u32 fwsm; 4196 4197 if (hw->mac.type < ixgbe_mac_82599EB) 4198 return false; 4199 4200 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 4201 4202 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 4203 } 4204 4205 /** 4206 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4207 * @hw: pointer to hardware structure 4208 * @speed: new link speed 4209 * @autoneg_wait_to_complete: true when waiting for completion is needed 4210 * 4211 * Set the link speed in the MAC and/or PHY register and restarts link. 4212 */ 4213 int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4214 ixgbe_link_speed speed, 4215 bool autoneg_wait_to_complete) 4216 { 4217 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4218 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4219 bool autoneg, link_up = false; 4220 u32 speedcnt = 0; 4221 int status = 0; 4222 u32 i = 0; 4223 4224 /* Mask off requested but non-supported speeds */ 4225 status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); 4226 if (status) 4227 return status; 4228 4229 speed &= link_speed; 4230 4231 /* Try each speed one by one, highest priority first. We do this in 4232 * software because 10Gb fiber doesn't support speed autonegotiation. 4233 */ 4234 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4235 speedcnt++; 4236 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4237 4238 /* Set the module link speed */ 4239 switch (hw->phy.media_type) { 4240 case ixgbe_media_type_fiber: 4241 hw->mac.ops.set_rate_select_speed(hw, 4242 IXGBE_LINK_SPEED_10GB_FULL); 4243 break; 4244 case ixgbe_media_type_fiber_qsfp: 4245 /* QSFP module automatically detects MAC link speed */ 4246 break; 4247 default: 4248 hw_dbg(hw, "Unexpected media type\n"); 4249 break; 4250 } 4251 4252 /* Allow module to change analog characteristics (1G->10G) */ 4253 msleep(40); 4254 4255 status = hw->mac.ops.setup_mac_link(hw, 4256 IXGBE_LINK_SPEED_10GB_FULL, 4257 autoneg_wait_to_complete); 4258 if (status) 4259 return status; 4260 4261 /* Flap the Tx laser if it has not already been done */ 4262 if (hw->mac.ops.flap_tx_laser) 4263 hw->mac.ops.flap_tx_laser(hw); 4264 4265 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4266 * Section 73.10.2, we may have to wait up to 500ms if KR is 4267 * attempted. 82599 uses the same timing for 10g SFI. 4268 */ 4269 for (i = 0; i < 5; i++) { 4270 /* Wait for the link partner to also set speed */ 4271 msleep(100); 4272 4273 /* If we have link, just jump out */ 4274 status = hw->mac.ops.check_link(hw, &link_speed, 4275 &link_up, false); 4276 if (status) 4277 return status; 4278 4279 if (link_up) 4280 goto out; 4281 } 4282 } 4283 4284 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4285 speedcnt++; 4286 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4287 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4288 4289 /* Set the module link speed */ 4290 switch (hw->phy.media_type) { 4291 case ixgbe_media_type_fiber: 4292 hw->mac.ops.set_rate_select_speed(hw, 4293 IXGBE_LINK_SPEED_1GB_FULL); 4294 break; 4295 case ixgbe_media_type_fiber_qsfp: 4296 /* QSFP module automatically detects link speed */ 4297 break; 4298 default: 4299 hw_dbg(hw, "Unexpected media type\n"); 4300 break; 4301 } 4302 4303 /* Allow module to change analog characteristics (10G->1G) */ 4304 msleep(40); 4305 4306 status = hw->mac.ops.setup_mac_link(hw, 4307 IXGBE_LINK_SPEED_1GB_FULL, 4308 autoneg_wait_to_complete); 4309 if (status) 4310 return status; 4311 4312 /* Flap the Tx laser if it has not already been done */ 4313 if (hw->mac.ops.flap_tx_laser) 4314 hw->mac.ops.flap_tx_laser(hw); 4315 4316 /* Wait for the link partner to also set speed */ 4317 msleep(100); 4318 4319 /* If we have link, just jump out */ 4320 status = hw->mac.ops.check_link(hw, &link_speed, &link_up, 4321 false); 4322 if (status) 4323 return status; 4324 4325 if (link_up) 4326 goto out; 4327 } 4328 4329 /* We didn't get link. Configure back to the highest speed we tried, 4330 * (if there was more than one). We call ourselves back with just the 4331 * single highest speed that the user requested. 4332 */ 4333 if (speedcnt > 1) 4334 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4335 highest_link_speed, 4336 autoneg_wait_to_complete); 4337 4338 out: 4339 /* Set autoneg_advertised value based on input link speed */ 4340 hw->phy.autoneg_advertised = 0; 4341 4342 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4343 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4344 4345 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4346 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4347 4348 return status; 4349 } 4350 4351 /** 4352 * ixgbe_set_soft_rate_select_speed - Set module link speed 4353 * @hw: pointer to hardware structure 4354 * @speed: link speed to set 4355 * 4356 * Set module link speed via the soft rate select. 4357 */ 4358 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4359 ixgbe_link_speed speed) 4360 { 4361 u8 rs, eeprom_data; 4362 int status; 4363 4364 switch (speed) { 4365 case IXGBE_LINK_SPEED_10GB_FULL: 4366 /* one bit mask same as setting on */ 4367 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4368 break; 4369 case IXGBE_LINK_SPEED_1GB_FULL: 4370 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4371 break; 4372 default: 4373 hw_dbg(hw, "Invalid fixed module speed\n"); 4374 return; 4375 } 4376 4377 /* Set RS0 */ 4378 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4379 IXGBE_I2C_EEPROM_DEV_ADDR2, 4380 &eeprom_data); 4381 if (status) { 4382 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); 4383 return; 4384 } 4385 4386 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4387 4388 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4389 IXGBE_I2C_EEPROM_DEV_ADDR2, 4390 eeprom_data); 4391 if (status) { 4392 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); 4393 return; 4394 } 4395 4396 /* Set RS1 */ 4397 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4398 IXGBE_I2C_EEPROM_DEV_ADDR2, 4399 &eeprom_data); 4400 if (status) { 4401 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); 4402 return; 4403 } 4404 4405 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4406 4407 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4408 IXGBE_I2C_EEPROM_DEV_ADDR2, 4409 eeprom_data); 4410 if (status) { 4411 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); 4412 return; 4413 } 4414 } 4415