1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 /* IntelVersion: 1.155 v2-7-8_2009-4-7 */ 30 31 #include "ixgbe_type.h" 32 #include "ixgbe_api.h" 33 #include "ixgbe_common.h" 34 #include "ixgbe_phy.h" 35 36 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); 37 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 38 ixgbe_link_speed *speed, bool *autoneg); 39 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); 40 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw); 41 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, 42 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); 43 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw); 44 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, 45 ixgbe_link_speed *speed, 46 bool *link_up, bool link_up_wait_to_complete); 47 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, 48 ixgbe_link_speed speed, bool autoneg, 49 bool autoneg_wait_to_complete); 50 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw); 51 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw, 52 ixgbe_link_speed speed, bool autoneg, 53 bool autoneg_wait_to_complete); 54 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); 55 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); 56 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); 57 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 58 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 59 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 60 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, 61 u32 vind, bool vlan_on); 62 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); 63 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); 64 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 65 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 66 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); 67 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 68 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); 69 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 70 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); 71 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw, 72 u16 *san_mac_offset); 73 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr); 74 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr); 75 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps); 76 77 void 78 ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 79 { 80 struct ixgbe_mac_info *mac = &hw->mac; 81 82 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 83 84 if (hw->phy.multispeed_fiber) { 85 /* Set up dual speed SFP+ support */ 86 mac->ops.setup_link = 87 &ixgbe_setup_mac_link_multispeed_fiber; 88 mac->ops.setup_link_speed = 89 &ixgbe_setup_mac_link_speed_multispeed_fiber; 90 } else { 91 mac->ops.setup_link = 92 &ixgbe_setup_mac_link_82599; 93 mac->ops.setup_link_speed = 94 &ixgbe_setup_mac_link_speed_82599; 95 } 96 } 97 98 /* 99 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 100 * @hw: pointer to hardware structure 101 * 102 * Initialize any function pointers that were not able to be 103 * set during init_shared_code because the PHY/SFP type was 104 * not known. Perform the SFP init if necessary. 105 * 106 */ 107 s32 108 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 109 { 110 struct ixgbe_mac_info *mac = &hw->mac; 111 struct ixgbe_phy_info *phy = &hw->phy; 112 s32 ret_val = IXGBE_SUCCESS; 113 114 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 115 116 /* Identify the PHY or SFP module */ 117 ret_val = phy->ops.identify(hw); 118 119 /* Setup function pointers based on detected SFP module and speeds */ 120 ixgbe_init_mac_link_ops_82599(hw); 121 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 122 hw->phy.ops.reset = NULL; 123 124 /* If copper media, overwrite with copper function pointers */ 125 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 126 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 127 mac->ops.setup_link_speed = 128 &ixgbe_setup_copper_link_speed_82599; 129 mac->ops.get_link_capabilities = 130 &ixgbe_get_copper_link_capabilities_generic; 131 } 132 133 /* Set necessary function pointers based on phy type */ 134 switch (hw->phy.type) { 135 case ixgbe_phy_tn: 136 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 137 phy->ops.get_firmware_version = 138 &ixgbe_get_phy_firmware_version_tnx; 139 break; 140 default: 141 break; 142 } 143 144 return (ret_val); 145 } 146 147 s32 148 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 149 { 150 s32 ret_val = IXGBE_SUCCESS; 151 u16 list_offset, data_offset, data_value; 152 153 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 154 155 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 156 ixgbe_init_mac_link_ops_82599(hw); 157 158 hw->phy.ops.reset = NULL; 159 160 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 161 &data_offset); 162 163 if (ret_val != IXGBE_SUCCESS) 164 goto setup_sfp_out; 165 166 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 167 while (data_value != 0xffff) { 168 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 169 IXGBE_WRITE_FLUSH(hw); 170 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 171 } 172 /* Now restart DSP */ 173 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102); 174 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d); 175 IXGBE_WRITE_FLUSH(hw); 176 } 177 178 setup_sfp_out: 179 return (ret_val); 180 } 181 182 /* 183 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count 184 * @hw: pointer to hardware structure 185 * 186 * Read PCIe configuration space, and get the MSI-X vector count from 187 * the capabilities table. 188 */ 189 u32 190 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw) 191 { 192 u32 msix_count = 64; 193 194 if (hw->mac.msix_vectors_from_pcie) { 195 msix_count = IXGBE_READ_PCIE_WORD(hw, 196 IXGBE_PCIE_MSIX_82599_CAPS); 197 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 198 199 /* 200 * MSI-X count is zero-based in HW, so increment to give 201 * proper value 202 */ 203 msix_count++; 204 } 205 206 return (msix_count); 207 } 208 209 /* 210 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 211 * @hw: pointer to hardware structure 212 * 213 * Initialize the function pointers and assign the MAC type for 82599. 214 * Does not touch the hardware. 215 */ 216 217 s32 218 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 219 { 220 struct ixgbe_mac_info *mac = &hw->mac; 221 struct ixgbe_phy_info *phy = &hw->phy; 222 s32 ret_val; 223 224 ret_val = ixgbe_init_phy_ops_generic(hw); 225 ret_val = ixgbe_init_ops_generic(hw); 226 227 /* PHY */ 228 phy->ops.identify = &ixgbe_identify_phy_82599; 229 phy->ops.init = &ixgbe_init_phy_ops_82599; 230 231 /* MAC */ 232 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 233 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 234 mac->ops.get_supported_physical_layer = 235 &ixgbe_get_supported_physical_layer_82599; 236 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 237 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 238 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 239 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 240 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599; 241 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_82599; 242 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599; 243 244 /* RAR, Multicast, VLAN */ 245 mac->ops.set_vmdq = &ixgbe_set_vmdq_82599; 246 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82599; 247 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_82599; 248 mac->rar_highwater = 1; 249 mac->ops.set_vfta = &ixgbe_set_vfta_82599; 250 mac->ops.clear_vfta = &ixgbe_clear_vfta_82599; 251 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_82599; 252 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 253 254 /* Link */ 255 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 256 mac->ops.check_link = &ixgbe_check_mac_link_82599; 257 ixgbe_init_mac_link_ops_82599(hw); 258 259 mac->mcft_size = 128; 260 mac->vft_size = 128; 261 mac->num_rar_entries = 128; 262 mac->max_tx_queues = 128; 263 mac->max_rx_queues = 128; 264 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); 265 266 return (ret_val); 267 } 268 269 /* 270 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 271 * @hw: pointer to hardware structure 272 * @speed: pointer to link speed 273 * @negotiation: true when autoneg or autotry is enabled 274 * 275 * Determines the link capabilities by reading the AUTOC register. 276 */ 277 s32 278 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 279 ixgbe_link_speed *speed, bool *negotiation) 280 { 281 s32 status = IXGBE_SUCCESS; 282 u32 autoc = 0; 283 284 /* 285 * Determine link capabilities based on the stored value of AUTOC, 286 * which represents EEPROM defaults. If AUTOC value has not 287 * been stored, use the current register values. 288 */ 289 if (hw->mac.orig_link_settings_stored) 290 autoc = hw->mac.orig_autoc; 291 else 292 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 293 294 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 295 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 296 *speed = IXGBE_LINK_SPEED_1GB_FULL; 297 *negotiation = false; 298 break; 299 300 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 301 *speed = IXGBE_LINK_SPEED_10GB_FULL; 302 *negotiation = false; 303 break; 304 305 case IXGBE_AUTOC_LMS_1G_AN: 306 *speed = IXGBE_LINK_SPEED_1GB_FULL; 307 *negotiation = true; 308 break; 309 310 case IXGBE_AUTOC_LMS_10G_SERIAL: 311 *speed = IXGBE_LINK_SPEED_10GB_FULL; 312 *negotiation = false; 313 break; 314 315 case IXGBE_AUTOC_LMS_KX4_KX_KR: 316 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 317 *speed = IXGBE_LINK_SPEED_UNKNOWN; 318 if (autoc & IXGBE_AUTOC_KR_SUPP) 319 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 320 if (autoc & IXGBE_AUTOC_KX4_SUPP) 321 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 322 if (autoc & IXGBE_AUTOC_KX_SUPP) 323 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 324 *negotiation = true; 325 break; 326 327 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 328 *speed = IXGBE_LINK_SPEED_100_FULL; 329 if (autoc & IXGBE_AUTOC_KR_SUPP) 330 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 331 if (autoc & IXGBE_AUTOC_KX4_SUPP) 332 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 333 if (autoc & IXGBE_AUTOC_KX_SUPP) 334 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 335 *negotiation = true; 336 break; 337 338 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 339 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 340 *negotiation = false; 341 break; 342 343 default: 344 status = IXGBE_ERR_LINK_SETUP; 345 goto out; 346 } 347 348 if (hw->phy.multispeed_fiber) { 349 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 350 IXGBE_LINK_SPEED_1GB_FULL; 351 *negotiation = true; 352 } 353 354 out: 355 return (status); 356 } 357 358 /* 359 * ixgbe_get_media_type_82599 - Get media type 360 * @hw: pointer to hardware structure 361 * 362 * Returns the media type (fiber, copper, backplane) 363 */ 364 enum ixgbe_media_type 365 ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 366 { 367 enum ixgbe_media_type media_type; 368 369 /* Detect if there is a copper PHY attached. */ 370 if (hw->phy.type == ixgbe_phy_cu_unknown || 371 hw->phy.type == ixgbe_phy_tn) { 372 media_type = ixgbe_media_type_copper; 373 goto out; 374 } 375 376 switch (hw->device_id) { 377 case IXGBE_DEV_ID_82599_KX4: 378 case IXGBE_DEV_ID_82599_KX4_SIK: 379 /* Default device ID is mezzanine card KX/KX4 */ 380 media_type = ixgbe_media_type_backplane; 381 break; 382 case IXGBE_DEV_ID_82599_SFP: 383 case IXGBE_DEV_ID_82599_SPW: 384 media_type = ixgbe_media_type_fiber; 385 break; 386 case IXGBE_DEV_ID_82599_CX4: 387 media_type = ixgbe_media_type_fiber; 388 break; 389 default: 390 media_type = ixgbe_media_type_unknown; 391 break; 392 } 393 out: 394 return (media_type); 395 } 396 397 /* 398 * ixgbe_setup_mac_link_82599 - Setup MAC link settings 399 * @hw: pointer to hardware structure 400 * 401 * Configures link settings based on values in the ixgbe_hw struct. 402 * Restarts the link. Performs autonegotiation if needed. 403 */ 404 s32 405 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw) 406 { 407 u32 autoc_reg; 408 u32 links_reg; 409 u32 i; 410 s32 status = IXGBE_SUCCESS; 411 412 /* Restart link */ 413 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 414 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 415 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 416 417 /* Only poll for autoneg to complete if specified to do so */ 418 if (hw->phy.autoneg_wait_to_complete) { 419 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 420 IXGBE_AUTOC_LMS_KX4_KX_KR || 421 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 422 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 423 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 424 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 425 links_reg = 0; /* Just in case Autoneg time = 0 */ 426 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 427 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 428 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 429 break; 430 msec_delay(100); 431 } 432 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 433 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 434 DEBUGOUT("Autoneg did not complete.\n"); 435 } 436 } 437 } 438 439 /* Add delay to filter out noises during initial link setup */ 440 msec_delay(50); 441 442 return (status); 443 } 444 445 /* 446 * ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings 447 * @hw: pointer to hardware structure 448 * 449 * Configures link settings based on values in the ixgbe_hw struct. 450 * Restarts the link for multi-speed fiber at 1G speed, if link 451 * fails at 10G. 452 * Performs autonegotiation if needed. 453 */ 454 s32 455 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw) 456 { 457 s32 status = IXGBE_SUCCESS; 458 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG; 459 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 460 461 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw, 462 link_speed, true, true); 463 return (status); 464 } 465 466 /* 467 * ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed 468 * @hw: pointer to hardware structure 469 * @speed: new link speed 470 * @autoneg: true if autonegotiation enabled 471 * @autoneg_wait_to_complete: true when waiting for completion is needed 472 * 473 * Set the link speed in the AUTOC register and restarts link. 474 */ 475 s32 476 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, 477 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 478 { 479 s32 status = IXGBE_SUCCESS; 480 ixgbe_link_speed link_speed; 481 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 482 u32 speedcnt = 0; 483 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 484 bool link_up = false; 485 bool negotiation; 486 487 /* Mask off requested but non-supported speeds */ 488 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 489 if (status != IXGBE_SUCCESS) 490 goto out; 491 492 speed &= link_speed; 493 494 /* 495 * Try each speed one by one, highest priority first. We do this in 496 * software because 10gb fiber doesn't support speed autonegotiation. 497 */ 498 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 499 speedcnt++; 500 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 501 502 /* If we already have link at this speed, just jump out */ 503 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 504 if (status != IXGBE_SUCCESS) 505 goto out; 506 507 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 508 goto out; 509 510 /* Set hardware SDP's */ 511 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 512 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 513 514 /* Allow module to change analog characteristics (1G->10G) */ 515 msec_delay(40); 516 517 status = ixgbe_setup_mac_link_speed_82599( 518 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, 519 autoneg_wait_to_complete); 520 if (status != IXGBE_SUCCESS) 521 goto out; 522 523 msec_delay(100); 524 525 /* If we have link, just jump out */ 526 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 527 if (status != IXGBE_SUCCESS) 528 goto out; 529 530 if (link_up) 531 goto out; 532 } 533 534 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 535 speedcnt++; 536 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 537 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 538 539 /* If we already have link at this speed, just jump out */ 540 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 541 if (status != IXGBE_SUCCESS) 542 goto out; 543 544 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 545 goto out; 546 547 /* Set hardware SDP's */ 548 esdp_reg &= ~IXGBE_ESDP_SDP5; 549 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 550 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 551 552 /* Allow module to change analog characteristics (10G->1G) */ 553 msec_delay(40); 554 555 status = ixgbe_setup_mac_link_speed_82599( 556 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, 557 autoneg_wait_to_complete); 558 if (status != IXGBE_SUCCESS) 559 goto out; 560 561 msec_delay(100); 562 563 /* If we have link, just jump out */ 564 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 565 if (status != IXGBE_SUCCESS) 566 goto out; 567 568 if (link_up) 569 goto out; 570 } 571 572 /* 573 * We didn't get link. Configure back to the highest speed we tried, 574 * (if there was more than one). We call ourselves back with just the 575 * single highest speed that the user requested. 576 */ 577 if (speedcnt > 1) 578 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw, 579 highest_link_speed, autoneg, autoneg_wait_to_complete); 580 581 out: 582 return (status); 583 } 584 585 /* 586 * ixgbe_check_mac_link_82599 - Determine link and speed status 587 * @hw: pointer to hardware structure 588 * @speed: pointer to link speed 589 * @link_up: true when link is up 590 * @link_up_wait_to_complete: bool used to wait for link up or not 591 * 592 * Reads the links register to determine if link is up and the current speed 593 */ 594 s32 595 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 596 bool *link_up, bool link_up_wait_to_complete) 597 { 598 u32 links_reg; 599 u32 i; 600 601 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 602 if (link_up_wait_to_complete) { 603 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 604 if (links_reg & IXGBE_LINKS_UP) { 605 *link_up = true; 606 break; 607 } else { 608 *link_up = false; 609 } 610 msec_delay(100); 611 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 612 } 613 } else { 614 if (links_reg & IXGBE_LINKS_UP) 615 *link_up = true; 616 else 617 *link_up = false; 618 } 619 620 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 621 IXGBE_LINKS_SPEED_10G_82599) 622 *speed = IXGBE_LINK_SPEED_10GB_FULL; 623 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 624 IXGBE_LINKS_SPEED_1G_82599) 625 *speed = IXGBE_LINK_SPEED_1GB_FULL; 626 else 627 *speed = IXGBE_LINK_SPEED_100_FULL; 628 629 /* if link is down, zero out the current_mode */ 630 if (*link_up == false) { 631 hw->fc.current_mode = ixgbe_fc_none; 632 hw->fc.fc_was_autonegged = false; 633 } 634 635 return (IXGBE_SUCCESS); 636 } 637 638 /* 639 * ixgbe_setup_mac_link_speed_82599 - Set MAC link speed 640 * @hw: pointer to hardware structure 641 * @speed: new link speed 642 * @autoneg: true if autonegotiation enabled 643 * @autoneg_wait_to_complete: true when waiting for completion is needed 644 * 645 * Set the link speed in the AUTOC register and restarts link. 646 */ 647 s32 648 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, 649 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 650 { 651 s32 status = IXGBE_SUCCESS; 652 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 653 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 654 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 655 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 656 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 657 u32 links_reg; 658 u32 i; 659 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 660 661 /* Check to see if speed passed in is supported. */ 662 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 663 if (status != IXGBE_SUCCESS) 664 goto out; 665 666 speed &= link_capabilities; 667 668 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 669 status = IXGBE_ERR_LINK_SETUP; 670 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 671 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 672 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 673 /* Set KX4/KX/KR support according to speed requested */ 674 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 675 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 676 if (autoc & IXGBE_AUTOC_KX4_SUPP) 677 autoc |= IXGBE_AUTOC_KX4_SUPP; 678 if (autoc & IXGBE_AUTOC_KR_SUPP) 679 autoc |= IXGBE_AUTOC_KR_SUPP; 680 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 681 autoc |= IXGBE_AUTOC_KX_SUPP; 682 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 683 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 684 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 685 /* Switch from 1G SFI to 10G SFI if requested */ 686 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 687 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 688 autoc &= ~IXGBE_AUTOC_LMS_MASK; 689 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 690 } 691 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 692 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 693 /* Switch from 10G SFI to 1G SFI if requested */ 694 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 695 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 696 autoc &= ~IXGBE_AUTOC_LMS_MASK; 697 if (autoneg) 698 autoc |= IXGBE_AUTOC_LMS_1G_AN; 699 else 700 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 701 } 702 } 703 704 if (status == IXGBE_SUCCESS) { 705 /* Restart link */ 706 autoc |= IXGBE_AUTOC_AN_RESTART; 707 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 708 709 /* Only poll for autoneg to complete if specified to do so */ 710 if (autoneg_wait_to_complete) { 711 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 712 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 713 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 714 links_reg = 0; /* Just in case Autoneg time=0 */ 715 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 716 links_reg = 717 IXGBE_READ_REG(hw, IXGBE_LINKS); 718 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 719 break; 720 msec_delay(100); 721 } 722 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 723 status = 724 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 725 DEBUGOUT("Autoneg did not complete.\n"); 726 } 727 } 728 } 729 730 /* Add delay to filter out noises during initial link setup */ 731 msec_delay(50); 732 } 733 734 out: 735 return (status); 736 } 737 738 /* 739 * ixgbe_setup_copper_link_82599 - Setup copper link settings 740 * @hw: pointer to hardware structure 741 * 742 * Restarts the link on PHY and then MAC. Performs autonegotiation if needed. 743 */ 744 static s32 745 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw) 746 { 747 s32 status; 748 749 /* Restart autonegotiation on PHY */ 750 status = hw->phy.ops.setup_link(hw); 751 752 /* Set up MAC */ 753 (void) ixgbe_setup_mac_link_82599(hw); 754 755 return (status); 756 } 757 758 /* 759 * ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field 760 * @hw: pointer to hardware structure 761 * @speed: new link speed 762 * @autoneg: true if autonegotiation enabled 763 * @autoneg_wait_to_complete: true if waiting is needed to complete 764 * 765 * Restarts link on PHY and MAC based on settings passed in. 766 */ 767 static s32 768 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw, 769 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 770 { 771 s32 status; 772 773 /* Setup the PHY according to input speed */ 774 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 775 autoneg_wait_to_complete); 776 /* Set up MAC */ 777 (void) ixgbe_setup_mac_link_82599(hw); 778 779 return (status); 780 } 781 /* 782 * ixgbe_reset_hw_82599 - Perform hardware reset 783 * @hw: pointer to hardware structure 784 * 785 * Resets the hardware by resetting the transmit and receive units, masks 786 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 787 * reset. 788 */ 789 s32 790 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 791 { 792 s32 status = IXGBE_SUCCESS; 793 u32 ctrl, ctrl_ext; 794 u32 i; 795 u32 autoc; 796 u32 autoc2; 797 798 /* Call adapter stop to disable tx/rx and clear interrupts */ 799 hw->mac.ops.stop_adapter(hw); 800 801 /* PHY ops must be identified and initialized prior to reset */ 802 803 /* Identify PHY and related function pointers */ 804 status = hw->phy.ops.init(hw); 805 806 /* Setup SFP module if there is one present. */ 807 if (hw->phy.sfp_setup_needed) { 808 status = hw->mac.ops.setup_sfp(hw); 809 hw->phy.sfp_setup_needed = false; 810 } 811 812 /* Reset PHY */ 813 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 814 hw->phy.ops.reset(hw); 815 816 /* 817 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 818 * access and verify no pending requests before reset 819 */ 820 status = ixgbe_disable_pcie_master(hw); 821 if (status != IXGBE_SUCCESS) { 822 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 823 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 824 } 825 826 /* 827 * Issue global reset to the MAC. This needs to be a SW reset. 828 * If link reset is used, it might reset the MAC when mng is using it 829 */ 830 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 831 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 832 IXGBE_WRITE_FLUSH(hw); 833 834 /* Poll for reset bit to self-clear indicating reset is complete */ 835 for (i = 0; i < 10; i++) { 836 usec_delay(1); 837 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 838 if (!(ctrl & IXGBE_CTRL_RST)) { 839 break; 840 } 841 } 842 if (ctrl & IXGBE_CTRL_RST) { 843 status = IXGBE_ERR_RESET_FAILED; 844 DEBUGOUT("Reset polling failed to complete.\n"); 845 } 846 847 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 848 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 849 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 850 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 851 852 msec_delay(50); 853 854 /* 855 * Store the original AUTOC/AUTOC2 values if they have not been 856 * stored off yet. Otherwise restore the stored original 857 * values since the reset operation sets back to defaults. 858 */ 859 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 860 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 861 if (hw->mac.orig_link_settings_stored == false) { 862 hw->mac.orig_autoc = autoc; 863 hw->mac.orig_autoc2 = autoc2; 864 hw->mac.orig_link_settings_stored = true; 865 } else { 866 if (autoc != hw->mac.orig_autoc) { 867 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 868 IXGBE_AUTOC_AN_RESTART)); 869 } 870 871 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 872 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 873 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 874 autoc2 |= (hw->mac.orig_autoc2 & 875 IXGBE_AUTOC2_UPPER_MASK); 876 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 877 } 878 } 879 880 /* 881 * Store MAC address from RAR0, clear receive address registers, and 882 * clear the multicast table. Also reset num_rar_entries to 128, 883 * since we modify this value when programming the SAN MAC address. 884 */ 885 hw->mac.num_rar_entries = 128; 886 hw->mac.ops.init_rx_addrs(hw); 887 888 /* Store the permanent mac address */ 889 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 890 891 /* Add the SAN MAC address to the RAR only if it's a valid address */ 892 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 893 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 894 hw->mac.san_addr, 0, IXGBE_RAH_AV); 895 896 /* Reserve the last RAR for the SAN MAC address */ 897 hw->mac.num_rar_entries--; 898 } 899 900 return (status); 901 } 902 903 /* 904 * ixgbe_insert_mac_addr_82599 - Find a RAR for this mac address 905 * @hw: pointer to hardware structure 906 * @addr: Address to put into receive address register 907 * @vmdq: VMDq pool to assign 908 * 909 * Puts an ethernet address into a receive address register, or 910 * finds the rar that it is aleady in; adds to the pool list 911 */ 912 s32 913 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 914 { 915 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 916 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 917 u32 rar; 918 u32 rar_low, rar_high; 919 u32 addr_low, addr_high; 920 921 /* swap bytes for HW little endian */ 922 addr_low = addr[0] | (addr[1] << 8) 923 | (addr[2] << 16) 924 | (addr[3] << 24); 925 addr_high = addr[4] | (addr[5] << 8); 926 927 /* 928 * Either find the mac_id in rar or find the first empty space. 929 * rar_highwater points to just after the highest currently used 930 * rar in order to shorten the search. It grows when we add a new 931 * rar to the top. 932 */ 933 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 934 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 935 936 if (((IXGBE_RAH_AV & rar_high) == 0) && 937 first_empty_rar == NO_EMPTY_RAR_FOUND) { 938 first_empty_rar = rar; 939 } else if ((rar_high & 0xFFFF) == addr_high) { 940 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 941 if (rar_low == addr_low) 942 break; /* found it already in the rars */ 943 } 944 } 945 946 if (rar < hw->mac.rar_highwater) { 947 /* already there so just add to the pool bits */ 948 (void) ixgbe_set_vmdq(hw, rar, vmdq); 949 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 950 /* stick it into first empty RAR slot we found */ 951 rar = first_empty_rar; 952 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 953 } else if (rar == hw->mac.rar_highwater) { 954 /* add it to the top of the list and inc the highwater mark */ 955 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 956 hw->mac.rar_highwater++; 957 } else if (rar >= hw->mac.num_rar_entries) { 958 return (IXGBE_ERR_INVALID_MAC_ADDR); 959 } 960 961 /* 962 * If we found rar[0], make sure the default pool bit (we use pool 0) 963 * remains cleared to be sure default pool packets will get delivered 964 */ 965 if (rar == 0) 966 (void) ixgbe_clear_vmdq(hw, rar, 0); 967 968 return (rar); 969 } 970 971 /* 972 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address 973 * @hw: pointer to hardware struct 974 * @rar: receive address register index to disassociate 975 * @vmdq: VMDq pool index to remove from the rar 976 */ 977 s32 978 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 979 { 980 u32 mpsar_lo, mpsar_hi; 981 u32 rar_entries = hw->mac.num_rar_entries; 982 983 if (rar < rar_entries) { 984 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 985 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 986 987 if (!mpsar_lo && !mpsar_hi) { 988 goto done; 989 } 990 991 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 992 if (mpsar_lo) { 993 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 994 mpsar_lo = 0; 995 } 996 if (mpsar_hi) { 997 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 998 mpsar_hi = 0; 999 } 1000 } else if (vmdq < 32) { 1001 mpsar_lo &= ~(1 << vmdq); 1002 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 1003 } else { 1004 mpsar_hi &= ~(1 << (vmdq - 32)); 1005 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 1006 } 1007 1008 /* was that the last pool using this rar? */ 1009 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) { 1010 hw->mac.ops.clear_rar(hw, rar); 1011 } 1012 } else { 1013 DEBUGOUT1("RAR index %d is out of range.\n", rar); 1014 } 1015 done: 1016 return (IXGBE_SUCCESS); 1017 } 1018 1019 /* 1020 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address 1021 * @hw: pointer to hardware struct 1022 * @rar: receive address register index to associate with a VMDq index 1023 * @vmdq: VMDq pool index 1024 */ 1025 s32 1026 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 1027 { 1028 u32 mpsar; 1029 u32 rar_entries = hw->mac.num_rar_entries; 1030 1031 if (rar < rar_entries) { 1032 if (vmdq < 32) { 1033 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 1034 mpsar |= 1 << vmdq; 1035 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 1036 } else { 1037 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 1038 mpsar |= 1 << (vmdq - 32); 1039 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 1040 } 1041 } else { 1042 DEBUGOUT1("RAR index %d is out of range.\n", rar); 1043 } 1044 1045 return (IXGBE_SUCCESS); 1046 } 1047 1048 /* 1049 * ixgbe_set_vfta_82599 - Set VLAN filter table 1050 * @hw: pointer to hardware structure 1051 * @vlan: VLAN id to write to VLAN filter 1052 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 1053 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 1054 * 1055 * Turn on/off specified VLAN in the VLAN filter table. 1056 */ 1057 s32 1058 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) 1059 { 1060 u32 regindex; 1061 u32 bitindex; 1062 u32 bits; 1063 u32 first_empty_slot; 1064 1065 if (vlan > 4095) { 1066 return (IXGBE_ERR_PARAM); 1067 } 1068 1069 /* 1070 * this is a 2 part operation - first the VFTA, then the 1071 * VLVF and VLVFB if vind is set 1072 */ 1073 1074 /* 1075 * Part 1 1076 * The VFTA is a bitstring made up of 128 32-bit registers 1077 * that enable the particular VLAN id, much like the MTA: 1078 * bits[11-5]: which register 1079 * bits[4-0]: which bit in the register 1080 */ 1081 regindex = (vlan >> 5) & 0x7F; 1082 bitindex = vlan & 0x1F; 1083 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1084 if (vlan_on) { 1085 bits |= (1 << bitindex); 1086 } else { 1087 bits &= ~(1 << bitindex); 1088 } 1089 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1090 1091 1092 /* 1093 * Part 2 1094 * If the vind is set 1095 * Either vlan_on 1096 * make sure the vlan is in VLVF 1097 * set the vind bit in the matching VLVFB 1098 * Or !vlan_on 1099 * clear the pool bit and possibly the vind 1100 */ 1101 if (vind) { 1102 /* find the vlanid or the first empty slot */ 1103 first_empty_slot = 0; 1104 1105 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 1106 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 1107 if (!bits && !first_empty_slot) 1108 first_empty_slot = regindex; 1109 else if ((bits & 0x0FFF) == vlan) 1110 break; 1111 } 1112 1113 if (regindex >= IXGBE_VLVF_ENTRIES) { 1114 if (first_empty_slot) 1115 regindex = first_empty_slot; 1116 else { 1117 DEBUGOUT("No space in VLVF.\n"); 1118 } 1119 } 1120 1121 1122 if (vlan_on) { 1123 /* set the pool bit */ 1124 if (vind < 32) { 1125 bits = 1126 IXGBE_READ_REG(hw, IXGBE_VLVFB(regindex*2)); 1127 bits |= (1 << vind); 1128 IXGBE_WRITE_REG(hw, 1129 IXGBE_VLVFB(regindex*2), bits); 1130 } else { 1131 bits = IXGBE_READ_REG(hw, 1132 IXGBE_VLVFB((regindex*2)+1)); 1133 bits |= (1 << vind); 1134 IXGBE_WRITE_REG(hw, 1135 IXGBE_VLVFB((regindex*2)+1), bits); 1136 } 1137 } else { 1138 /* clear the pool bit */ 1139 if (vind < 32) { 1140 bits = IXGBE_READ_REG(hw, 1141 IXGBE_VLVFB(regindex*2)); 1142 bits &= ~(1 << vind); 1143 IXGBE_WRITE_REG(hw, 1144 IXGBE_VLVFB(regindex*2), bits); 1145 bits |= IXGBE_READ_REG(hw, 1146 IXGBE_VLVFB((regindex*2)+1)); 1147 } else { 1148 bits = IXGBE_READ_REG(hw, 1149 IXGBE_VLVFB((regindex*2)+1)); 1150 bits &= ~(1 << vind); 1151 IXGBE_WRITE_REG(hw, 1152 IXGBE_VLVFB((regindex*2)+1), bits); 1153 bits |= IXGBE_READ_REG(hw, 1154 IXGBE_VLVFB(regindex*2)); 1155 } 1156 } 1157 1158 if (bits) 1159 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 1160 (IXGBE_VLVF_VIEN | vlan)); 1161 else 1162 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); 1163 } 1164 1165 return (IXGBE_SUCCESS); 1166 } 1167 1168 /* 1169 * ixgbe_clear_vfta_82599 - Clear VLAN filter table 1170 * @hw: pointer to hardware structure 1171 * 1172 * Clears the VLAN filer table, and the VMDq index associated with the filter 1173 */ 1174 s32 1175 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw) 1176 { 1177 u32 offset; 1178 1179 for (offset = 0; offset < hw->mac.vft_size; offset++) 1180 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1181 1182 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 1183 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 1184 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 1185 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 1186 } 1187 1188 return (IXGBE_SUCCESS); 1189 } 1190 1191 /* 1192 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array 1193 * @hw: pointer to hardware structure 1194 */ 1195 s32 1196 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw) 1197 { 1198 int i; 1199 DEBUGOUT(" Clearing UTA\n"); 1200 1201 for (i = 0; i < 128; i++) 1202 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 1203 1204 return (IXGBE_SUCCESS); 1205 } 1206 1207 /* 1208 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1209 * @hw: pointer to hardware structure 1210 */ 1211 s32 1212 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1213 { 1214 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1215 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1216 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1217 IXGBE_WRITE_FLUSH(hw); 1218 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1219 1220 return (IXGBE_SUCCESS); 1221 } 1222 1223 #define IXGBE_FDIR_INIT_DONE_POLL 10 1224 /* 1225 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1226 * @hw: pointer to hardware structure 1227 * @pballoc: which mode to allocate filters with 1228 */ 1229 s32 1230 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1231 { 1232 u32 fdirctrl = 0; 1233 u32 pbsize; 1234 int i; 1235 1236 /* 1237 * Before enabling Flow Director, the Rx Packet Buffer size 1238 * must be reduced. The new value is the current size minus 1239 * flow director memory usage size. 1240 */ 1241 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1242 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1243 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); 1244 1245 /* 1246 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1247 * intialized to zero for non DCB mode otherwise actual total RX PB 1248 * would be bigger than programmed and filter space would run into 1249 * the PB 0 region. 1250 */ 1251 for (i = 1; i < 8; i++) 1252 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1253 1254 /* Send interrupt when 64 filters are left */ 1255 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1256 1257 /* Set the maximum length per hash bucket to 0xA filters */ 1258 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; 1259 1260 switch (pballoc) { 1261 case IXGBE_FDIR_PBALLOC_64K: 1262 /* 8k - 1 signature filters */ 1263 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1264 break; 1265 case IXGBE_FDIR_PBALLOC_128K: 1266 /* 16k - 1 signature filters */ 1267 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1268 break; 1269 case IXGBE_FDIR_PBALLOC_256K: 1270 /* 32k - 1 signature filters */ 1271 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1272 break; 1273 default: 1274 /* bad value */ 1275 return (IXGBE_ERR_CONFIG); 1276 }; 1277 1278 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1279 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1280 1281 /* Prime the keys for hashing */ 1282 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1283 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1284 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1285 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1286 1287 /* 1288 * Poll init-done after we write the register. Estimated times: 1289 * 10G: PBALLOC = 11b, timing is 60us 1290 * 1G: PBALLOC = 11b, timing is 600us 1291 * 100M: PBALLOC = 11b, timing is 6ms 1292 * 1293 * Multiple these timings by 4 if under full Rx load 1294 * 1295 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1296 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1297 * this might not finish in our poll time, but we can live with that 1298 * for now. 1299 */ 1300 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1301 IXGBE_WRITE_FLUSH(hw); 1302 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1303 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1304 IXGBE_FDIRCTRL_INIT_DONE) 1305 break; 1306 1307 msec_delay(1); 1308 } 1309 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1310 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1311 } 1312 1313 return (IXGBE_SUCCESS); 1314 } 1315 1316 /* 1317 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1318 * @hw: pointer to hardware structure 1319 * @pballoc: which mode to allocate filters with 1320 */ 1321 s32 1322 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1323 { 1324 u32 fdirctrl = 0; 1325 u32 pbsize; 1326 int i; 1327 1328 /* 1329 * Before enabling Flow Director, the Rx Packet Buffer size 1330 * must be reduced. The new value is the current size minus 1331 * flow director memory usage size. 1332 */ 1333 1334 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1335 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1336 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); 1337 1338 /* 1339 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1340 * intialized to zero for non DCB mode otherwise actual total RX PB 1341 * would be bigger than programmed and filter space would run into 1342 * the PB 0 region. 1343 */ 1344 for (i = 1; i < 8; i++) 1345 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1346 1347 /* Send interrupt when 64 filters are left */ 1348 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1349 1350 switch (pballoc) { 1351 case IXGBE_FDIR_PBALLOC_64K: 1352 /* 2k - 1 perfect filters */ 1353 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1354 break; 1355 case IXGBE_FDIR_PBALLOC_128K: 1356 /* 4k - 1 perfect filters */ 1357 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1358 break; 1359 case IXGBE_FDIR_PBALLOC_256K: 1360 /* 8k - 1 perfect filters */ 1361 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1362 break; 1363 default: 1364 /* bad value */ 1365 return (IXGBE_ERR_CONFIG); 1366 }; 1367 1368 /* Turn perfect match filtering on */ 1369 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; 1370 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1371 1372 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1373 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1374 1375 /* Prime the keys for hashing */ 1376 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1377 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1378 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1379 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1380 1381 /* 1382 * Poll init-done after we write the register. Estimated times: 1383 * 10G: PBALLOC = 11b, timing is 60us 1384 * 1G: PBALLOC = 11b, timing is 600us 1385 * 100M: PBALLOC = 11b, timing is 6ms 1386 * 1387 * Multiple these timings by 4 if under full Rx load 1388 * 1389 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1390 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1391 * this might not finish in our poll time, but we can live with that 1392 * for now. 1393 */ 1394 1395 /* Set the maximum length per hash bucket to 0xA filters */ 1396 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); 1397 1398 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1399 IXGBE_WRITE_FLUSH(hw); 1400 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1401 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1402 IXGBE_FDIRCTRL_INIT_DONE) 1403 break; 1404 1405 msec_delay(1); 1406 } 1407 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1408 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); 1409 } 1410 1411 return (IXGBE_SUCCESS); 1412 } 1413 1414 /* 1415 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1416 * @stream: input bitstream to compute the hash on 1417 * @key: 32-bit hash key 1418 */ 1419 u16 1420 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) 1421 { 1422 /* 1423 * The algorithm is as follows: 1424 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 1425 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] 1426 * and A[n] x B[n] is bitwise AND between same length strings 1427 * 1428 * K[n] is 16 bits, defined as: 1429 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] 1430 * for n modulo 32 < 15, K[n] = 1431 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] 1432 * 1433 * S[n] is 16 bits, defined as: 1434 * for n >= 15, S[n] = S[n:n - 15] 1435 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] 1436 * 1437 * To simplify for programming, the algorithm is implemented 1438 * in software this way: 1439 * 1440 * Key[31:0], Stream[335:0] 1441 * 1442 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1443 * int_key[350:0] = tmp_key[351:1] 1444 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] 1445 * 1446 * hash[15:0] = 0; 1447 * for (i = 0; i < 351; i++) { 1448 * if (int_key[i]) 1449 * hash ^= int_stream[(i + 15):i]; 1450 * } 1451 */ 1452 1453 union { 1454 u32 key[11]; 1455 u8 key_stream[44]; 1456 } tmp_key; 1457 1458 u8 *stream = (u8 *)atr_input; 1459 u8 int_key[44]; /* upper-most bit unused */ 1460 u8 hash_str[46]; /* upper-most 2 bits unused */ 1461 u16 hash_result = 0; 1462 u16 tmp = 0; 1463 int i, j, k, h; 1464 1465 (void) memset(&tmp_key, 0, sizeof (tmp_key)); 1466 /* First load the temporary key stream */ 1467 for (i = 0; i < 11; i++) 1468 tmp_key.key[i] = key; 1469 1470 /* 1471 * Set the interim key for the hashing. Bit 352 is unused, so we must 1472 * shift and compensate when building the key. 1473 */ 1474 int_key[0] = tmp_key.key_stream[0] >> 1; 1475 for (i = 1, j = 0; i < 44; i++) { 1476 int_key[i] = (tmp_key.key_stream[j] & 0x1) << 7; 1477 j++; 1478 int_key[i] |= tmp_key.key_stream[j] >> 1; 1479 } 1480 1481 /* 1482 * Set the interim bit string for the hashing. Bits 368 and 367 are 1483 * unused, so shift and compensate when building the string. 1484 */ 1485 hash_str[0] = (stream[40] & 0x7f) >> 1; 1486 for (i = 1, j = 40; i < 46; i++) { 1487 hash_str[i] = (stream[j] & 0x1) << 7; 1488 j++; 1489 if (j > 41) 1490 j = 0; 1491 hash_str[i] |= stream[j] >> 1; 1492 } 1493 1494 /* 1495 * Now compute the hash. i is the index into hash_str, j is into our 1496 * key stream, k is counting the number of bits, and h interates within 1497 * each byte. 1498 */ 1499 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1500 for (h = 0; h < 8 && k < 351; h++, k++) { 1501 if ((int_key[j] >> h) & 0x1) { 1502 /* 1503 * Key bit is set, XOR in the current 16-bit 1504 * string. Example of processing: 1505 * h = 0, 1506 * tmp = (hash_str[i - 2] & 0 << 16) | 1507 * (hash_str[i - 1] & 0xff << 8) | 1508 * (hash_str[i] & 0xff >> 0) 1509 * So tmp = hash_str[15 + k:k], since the 1510 * i + 2 clause rolls off the 16-bit value 1511 * h = 7, 1512 * tmp = (hash_str[i - 2] & 0x7f << 9) | 1513 * (hash_str[i - 1] & 0xff << 1) | 1514 * (hash_str[i] & 0x80 >> 7) 1515 */ 1516 tmp = ((hash_str[i] & (0xff << h)) >> h); 1517 tmp |= ((hash_str[i - 1] & 0xff) << (8 - h)); 1518 tmp |= (hash_str[i - 2] & (0xff >> (8 - h))) 1519 << (16 - h); 1520 hash_result ^= tmp; 1521 } 1522 } 1523 } 1524 1525 return (hash_result); 1526 } 1527 1528 /* 1529 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream 1530 * @input: input stream to modify 1531 * @vlan: the VLAN id to load 1532 */ 1533 s32 1534 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) 1535 { 1536 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; 1537 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; 1538 1539 return (IXGBE_SUCCESS); 1540 } 1541 1542 /* 1543 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address 1544 * @input: input stream to modify 1545 * @src_addr: the IP address to load 1546 */ 1547 s32 1548 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) 1549 { 1550 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; 1551 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = 1552 (src_addr >> 16) & 0xff; 1553 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = 1554 (src_addr >> 8) & 0xff; 1555 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; 1556 1557 return (IXGBE_SUCCESS); 1558 } 1559 1560 /* 1561 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address 1562 * @input: input stream to modify 1563 * @dst_addr: the IP address to load 1564 */ 1565 s32 1566 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) 1567 { 1568 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; 1569 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = 1570 (dst_addr >> 16) & 0xff; 1571 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = 1572 (dst_addr >> 8) & 0xff; 1573 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; 1574 1575 return (IXGBE_SUCCESS); 1576 } 1577 1578 /* 1579 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address 1580 * @input: input stream to modify 1581 * @src_addr_1: the first 4 bytes of the IP address to load 1582 * @src_addr_2: the second 4 bytes of the IP address to load 1583 * @src_addr_3: the third 4 bytes of the IP address to load 1584 * @src_addr_4: the fourth 4 bytes of the IP address to load 1585 */ 1586 s32 1587 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, 1588 u32 src_addr_1, u32 src_addr_2, u32 src_addr_3, u32 src_addr_4) 1589 { 1590 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; 1591 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = 1592 (src_addr_4 >> 8) & 0xff; 1593 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = 1594 (src_addr_4 >> 16) & 0xff; 1595 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; 1596 1597 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; 1598 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = 1599 (src_addr_3 >> 8) & 0xff; 1600 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = 1601 (src_addr_3 >> 16) & 0xff; 1602 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; 1603 1604 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; 1605 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = 1606 (src_addr_2 >> 8) & 0xff; 1607 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = 1608 (src_addr_2 >> 16) & 0xff; 1609 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; 1610 1611 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; 1612 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = 1613 (src_addr_1 >> 8) & 0xff; 1614 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = 1615 (src_addr_1 >> 16) & 0xff; 1616 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; 1617 1618 return (IXGBE_SUCCESS); 1619 } 1620 1621 /* 1622 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address 1623 * @input: input stream to modify 1624 * @dst_addr_1: the first 4 bytes of the IP address to load 1625 * @dst_addr_2: the second 4 bytes of the IP address to load 1626 * @dst_addr_3: the third 4 bytes of the IP address to load 1627 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1628 */ 1629 s32 1630 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, 1631 u32 dst_addr_1, u32 dst_addr_2, u32 dst_addr_3, u32 dst_addr_4) 1632 { 1633 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; 1634 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = 1635 (dst_addr_4 >> 8) & 0xff; 1636 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = 1637 (dst_addr_4 >> 16) & 0xff; 1638 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; 1639 1640 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; 1641 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = 1642 (dst_addr_3 >> 8) & 0xff; 1643 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = 1644 (dst_addr_3 >> 16) & 0xff; 1645 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; 1646 1647 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; 1648 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = 1649 (dst_addr_2 >> 8) & 0xff; 1650 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = 1651 (dst_addr_2 >> 16) & 0xff; 1652 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; 1653 1654 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; 1655 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = 1656 (dst_addr_1 >> 8) & 0xff; 1657 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = 1658 (dst_addr_1 >> 16) & 0xff; 1659 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; 1660 1661 return (IXGBE_SUCCESS); 1662 } 1663 1664 /* 1665 * ixgbe_atr_set_src_port_82599 - Sets the source port 1666 * @input: input stream to modify 1667 * @src_port: the source port to load 1668 */ 1669 s32 1670 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) 1671 { 1672 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; 1673 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; 1674 1675 return (IXGBE_SUCCESS); 1676 } 1677 1678 /* 1679 * ixgbe_atr_set_dst_port_82599 - Sets the destination port 1680 * @input: input stream to modify 1681 * @dst_port: the destination port to load 1682 */ 1683 s32 1684 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) 1685 { 1686 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; 1687 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; 1688 1689 return (IXGBE_SUCCESS); 1690 } 1691 1692 /* 1693 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes 1694 * @input: input stream to modify 1695 * @flex_bytes: the flexible bytes to load 1696 */ 1697 s32 1698 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) 1699 { 1700 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; 1701 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; 1702 1703 return (IXGBE_SUCCESS); 1704 } 1705 1706 /* 1707 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool 1708 * @input: input stream to modify 1709 * @vm_pool: the Virtual Machine pool to load 1710 */ 1711 s32 1712 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) 1713 { 1714 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; 1715 1716 return (IXGBE_SUCCESS); 1717 } 1718 1719 /* 1720 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type 1721 * @input: input stream to modify 1722 * @l4type: the layer 4 type value to load 1723 */ 1724 s32 1725 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) 1726 { 1727 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; 1728 1729 return (IXGBE_SUCCESS); 1730 } 1731 1732 /* 1733 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream 1734 * @input: input stream to search 1735 * @vlan: the VLAN id to load 1736 */ 1737 s32 1738 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) 1739 { 1740 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; 1741 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; 1742 1743 return (IXGBE_SUCCESS); 1744 } 1745 1746 /* 1747 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address 1748 * @input: input stream to search 1749 * @src_addr: the IP address to load 1750 */ 1751 s32 1752 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) 1753 { 1754 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; 1755 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; 1756 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; 1757 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; 1758 1759 return (IXGBE_SUCCESS); 1760 } 1761 1762 /* 1763 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1764 * @input: input stream to search 1765 * @dst_addr: the IP address to load 1766 */ 1767 s32 1768 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) 1769 { 1770 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; 1771 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; 1772 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; 1773 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; 1774 1775 return (IXGBE_SUCCESS); 1776 } 1777 1778 /* 1779 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1780 * @input: input stream to search 1781 * @src_addr_1: the first 4 bytes of the IP address to load 1782 * @src_addr_2: the second 4 bytes of the IP address to load 1783 * @src_addr_3: the third 4 bytes of the IP address to load 1784 * @src_addr_4: the fourth 4 bytes of the IP address to load 1785 */ 1786 s32 1787 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1788 u32 *src_addr_1, u32 *src_addr_2, u32 *src_addr_3, u32 *src_addr_4) 1789 { 1790 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1791 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1792 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1793 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1794 1795 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1796 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1797 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1798 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1799 1800 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1801 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; 1802 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; 1803 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; 1804 1805 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; 1806 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; 1807 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; 1808 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; 1809 1810 return (IXGBE_SUCCESS); 1811 } 1812 1813 /* 1814 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address 1815 * @input: input stream to search 1816 * @dst_addr_1: the first 4 bytes of the IP address to load 1817 * @dst_addr_2: the second 4 bytes of the IP address to load 1818 * @dst_addr_3: the third 4 bytes of the IP address to load 1819 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1820 */ 1821 s32 1822 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, 1823 u32 *dst_addr_1, u32 *dst_addr_2, u32 *dst_addr_3, u32 *dst_addr_4) 1824 { 1825 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; 1826 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; 1827 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; 1828 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; 1829 1830 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; 1831 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; 1832 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; 1833 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; 1834 1835 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; 1836 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; 1837 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; 1838 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; 1839 1840 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; 1841 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; 1842 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; 1843 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; 1844 1845 return (IXGBE_SUCCESS); 1846 } 1847 1848 /* 1849 * ixgbe_atr_get_src_port_82599 - Gets the source port 1850 * @input: input stream to modify 1851 * @src_port: the source port to load 1852 * 1853 * Even though the input is given in big-endian, the FDIRPORT registers 1854 * expect the ports to be programmed in little-endian. Hence the need to swap 1855 * endianness when retrieving the data. This can be confusing since the 1856 * internal hash engine expects it to be big-endian. 1857 */ 1858 s32 1859 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) 1860 { 1861 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1862 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1863 1864 return (IXGBE_SUCCESS); 1865 } 1866 1867 /* 1868 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1869 * @input: input stream to modify 1870 * @dst_port: the destination port to load 1871 * 1872 * Even though the input is given in big-endian, the FDIRPORT registers 1873 * expect the ports to be programmed in little-endian. Hence the need to swap 1874 * endianness when retrieving the data. This can be confusing since the 1875 * internal hash engine expects it to be big-endian. 1876 */ 1877 s32 1878 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) 1879 { 1880 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; 1881 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; 1882 1883 return (IXGBE_SUCCESS); 1884 } 1885 1886 /* 1887 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1888 * @input: input stream to modify 1889 * @flex_bytes: the flexible bytes to load 1890 */ 1891 s32 1892 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) 1893 { 1894 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; 1895 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; 1896 1897 return (IXGBE_SUCCESS); 1898 } 1899 1900 /* 1901 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool 1902 * @input: input stream to modify 1903 * @vm_pool: the Virtual Machine pool to load 1904 */ 1905 s32 1906 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) 1907 { 1908 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; 1909 1910 return (IXGBE_SUCCESS); 1911 } 1912 1913 /* 1914 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1915 * @input: input stream to modify 1916 * @l4type: the layer 4 type value to load 1917 */ 1918 s32 1919 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) 1920 { 1921 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1922 1923 return (IXGBE_SUCCESS); 1924 } 1925 1926 /* 1927 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1928 * @hw: pointer to hardware structure 1929 * @stream: input bitstream 1930 * @queue: queue index to direct traffic to 1931 */ 1932 s32 1933 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1934 struct ixgbe_atr_input *input, u8 queue) 1935 { 1936 u64 fdirhashcmd; 1937 u64 fdircmd; 1938 u32 fdirhash; 1939 u16 bucket_hash, sig_hash; 1940 u8 l4type; 1941 1942 bucket_hash = ixgbe_atr_compute_hash_82599(input, 1943 IXGBE_ATR_BUCKET_HASH_KEY); 1944 1945 /* bucket_hash is only 15 bits */ 1946 bucket_hash &= IXGBE_ATR_HASH_MASK; 1947 1948 sig_hash = ixgbe_atr_compute_hash_82599(input, 1949 IXGBE_ATR_SIGNATURE_HASH_KEY); 1950 1951 /* Get the l4type in order to program FDIRCMD properly */ 1952 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ 1953 (void) ixgbe_atr_get_l4type_82599(input, &l4type); 1954 1955 /* 1956 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1957 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1958 */ 1959 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1960 1961 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1962 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1963 1964 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1965 case IXGBE_ATR_L4TYPE_TCP: 1966 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1967 break; 1968 case IXGBE_ATR_L4TYPE_UDP: 1969 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1970 break; 1971 case IXGBE_ATR_L4TYPE_SCTP: 1972 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1973 break; 1974 default: 1975 DEBUGOUT(" Error on l4type input\n"); 1976 return (IXGBE_ERR_CONFIG); 1977 } 1978 1979 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1980 fdircmd |= IXGBE_FDIRCMD_IPV6; 1981 1982 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1983 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1984 1985 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF); 1986 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1987 1988 return (IXGBE_SUCCESS); 1989 } 1990 1991 /* 1992 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1993 * @hw: pointer to hardware structure 1994 * @input: input bitstream 1995 * @queue: queue index to direct traffic to 1996 * 1997 * Note that the caller to this function must lock before calling, since the 1998 * hardware writes must be protected from one another. 1999 */ 2000 s32 2001 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 2002 struct ixgbe_atr_input *input, u16 soft_id, u8 queue) 2003 { 2004 u32 fdircmd = 0; 2005 u32 fdirhash; 2006 u32 src_ipv4, dst_ipv4; 2007 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 2008 u16 src_port, dst_port, vlan_id, flex_bytes; 2009 u16 bucket_hash; 2010 u8 l4type; 2011 2012 /* Get our input values */ 2013 (void) ixgbe_atr_get_l4type_82599(input, &l4type); 2014 2015 /* 2016 * Check l4type formatting, and bail out before we touch the hardware 2017 * if there's a configuration issue 2018 */ 2019 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 2020 case IXGBE_ATR_L4TYPE_TCP: 2021 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 2022 break; 2023 case IXGBE_ATR_L4TYPE_UDP: 2024 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 2025 break; 2026 case IXGBE_ATR_L4TYPE_SCTP: 2027 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 2028 break; 2029 default: 2030 DEBUGOUT(" Error on l4type input\n"); 2031 return (IXGBE_ERR_CONFIG); 2032 } 2033 2034 bucket_hash = ixgbe_atr_compute_hash_82599(input, 2035 IXGBE_ATR_BUCKET_HASH_KEY); 2036 2037 /* bucket_hash is only 15 bits */ 2038 bucket_hash &= IXGBE_ATR_HASH_MASK; 2039 2040 (void) ixgbe_atr_get_vlan_id_82599(input, &vlan_id); 2041 (void) ixgbe_atr_get_src_port_82599(input, &src_port); 2042 (void) ixgbe_atr_get_dst_port_82599(input, &dst_port); 2043 (void) ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); 2044 2045 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 2046 2047 /* Now figure out if we're IPv4 or IPv6 */ 2048 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { 2049 /* IPv6 */ 2050 (void) ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, 2051 &src_ipv6_2, &src_ipv6_3, &src_ipv6_4); 2052 2053 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); 2054 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); 2055 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); 2056 /* The last 4 bytes is the same register as IPv4 */ 2057 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); 2058 2059 fdircmd |= IXGBE_FDIRCMD_IPV6; 2060 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; 2061 } else { 2062 /* IPv4 */ 2063 (void) ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); 2064 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); 2065 2066 } 2067 2068 (void) ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); 2069 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); 2070 2071 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | 2072 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); 2073 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | 2074 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 2075 2076 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 2077 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 2078 fdircmd |= IXGBE_FDIRCMD_LAST; 2079 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 2080 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 2081 2082 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 2083 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 2084 2085 return (IXGBE_SUCCESS); 2086 } 2087 2088 /* 2089 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2090 * @hw: pointer to hardware structure 2091 * @reg: analog register to read 2092 * @val: read value 2093 * 2094 * Performs read operation to Omer analog register specified. 2095 */ 2096 s32 2097 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2098 { 2099 u32 core_ctl; 2100 2101 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2102 (reg << 8)); 2103 IXGBE_WRITE_FLUSH(hw); 2104 usec_delay(10); 2105 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2106 *val = (u8)core_ctl; 2107 2108 return (IXGBE_SUCCESS); 2109 } 2110 2111 /* 2112 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2113 * @hw: pointer to hardware structure 2114 * @reg: atlas register to write 2115 * @val: value to write 2116 * 2117 * Performs write operation to Omer analog register specified. 2118 */ 2119 s32 2120 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2121 { 2122 u32 core_ctl; 2123 2124 core_ctl = (reg << 8) | val; 2125 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2126 IXGBE_WRITE_FLUSH(hw); 2127 usec_delay(10); 2128 2129 return (IXGBE_SUCCESS); 2130 } 2131 2132 /* 2133 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 2134 * @hw: pointer to hardware structure 2135 * 2136 * Starts the hardware using the generic start_hw function. 2137 * Then performs revision-specific operations: 2138 * Clears the rate limiter registers. 2139 */ 2140 s32 2141 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 2142 { 2143 u32 q_num; 2144 s32 ret_val = IXGBE_SUCCESS; 2145 2146 ret_val = ixgbe_start_hw_generic(hw); 2147 2148 /* Clear the rate limiters */ 2149 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { 2150 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); 2151 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 2152 } 2153 IXGBE_WRITE_FLUSH(hw); 2154 2155 return (ret_val); 2156 } 2157 2158 /* 2159 * ixgbe_identify_phy_82599 - Get physical layer module 2160 * @hw: pointer to hardware structure 2161 * 2162 * Determines the physical layer module found on the current adapter. 2163 * If PHY already detected, maintains current PHY type in hw struct, 2164 * otherwise executes the PHY detection routine. 2165 */ 2166 s32 2167 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2168 { 2169 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 2170 2171 /* Detect PHY if not unknown - returns success if already detected. */ 2172 status = ixgbe_identify_phy_generic(hw); 2173 if (status != IXGBE_SUCCESS) 2174 status = ixgbe_identify_sfp_module_generic(hw); 2175 /* Set PHY type none if no PHY detected */ 2176 if (hw->phy.type == ixgbe_phy_unknown) { 2177 hw->phy.type = ixgbe_phy_none; 2178 status = IXGBE_SUCCESS; 2179 } 2180 2181 /* Return error if SFP module has been detected but is not supported */ 2182 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2183 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 2184 2185 return (status); 2186 } 2187 2188 /* 2189 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2190 * @hw: pointer to hardware structure 2191 * 2192 * Determines physical layer capabilities of the current configuration. 2193 */ 2194 u32 2195 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2196 { 2197 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2198 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2199 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2200 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2201 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2202 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2203 u16 ext_ability = 0; 2204 u8 comp_codes_10g = 0; 2205 2206 hw->phy.ops.identify(hw); 2207 2208 if (hw->phy.type == ixgbe_phy_tn || 2209 hw->phy.type == ixgbe_phy_cu_unknown) { 2210 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2211 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2212 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2213 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2214 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2215 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2216 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2217 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2218 goto out; 2219 } 2220 2221 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2222 case IXGBE_AUTOC_LMS_1G_AN: 2223 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2224 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2225 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2226 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2227 goto out; 2228 } else 2229 /* SFI mode so read SFP module */ 2230 goto sfp_check; 2231 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2232 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2233 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2234 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2235 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2236 goto out; 2237 case IXGBE_AUTOC_LMS_10G_SERIAL: 2238 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2239 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2240 goto out; 2241 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2242 goto sfp_check; 2243 break; 2244 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2245 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2246 if (autoc & IXGBE_AUTOC_KX_SUPP) 2247 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2248 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2249 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2250 if (autoc & IXGBE_AUTOC_KR_SUPP) 2251 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2252 goto out; 2253 default: 2254 goto out; 2255 } 2256 2257 sfp_check: 2258 /* 2259 * SFP check must be done last since DA modules are sometimes used to 2260 * test KR mode - we need to id KR mode correctly before SFP module. 2261 * Call identify_sfp because the pluggable module may have changed 2262 */ 2263 hw->phy.ops.identify_sfp(hw); 2264 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2265 goto out; 2266 2267 switch (hw->phy.type) { 2268 case ixgbe_phy_tw_tyco: 2269 case ixgbe_phy_tw_unknown: 2270 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2271 break; 2272 case ixgbe_phy_sfp_avago: 2273 case ixgbe_phy_sfp_ftl: 2274 case ixgbe_phy_sfp_intel: 2275 case ixgbe_phy_sfp_unknown: 2276 hw->phy.ops.read_i2c_eeprom(hw, 2277 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2278 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2279 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2280 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2281 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2282 break; 2283 default: 2284 break; 2285 } 2286 2287 out: 2288 return (physical_layer); 2289 } 2290 2291 /* 2292 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2293 * @hw: pointer to hardware structure 2294 * @regval: register value to write to RXCTRL 2295 * 2296 * Enables the Rx DMA unit for 82599 2297 */ 2298 s32 2299 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2300 { 2301 #define IXGBE_MAX_SECRX_POLL 30 2302 int i; 2303 int secrxreg; 2304 2305 /* 2306 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2307 * If traffic is incoming before we enable the Rx unit, it could hang 2308 * the Rx DMA unit. Therefore, make sure the security engine is 2309 * completely disabled prior to enabling the Rx unit. 2310 */ 2311 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2312 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2313 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2314 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2315 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2316 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2317 break; 2318 else 2319 /* Use interrupt-safe sleep just in case */ 2320 usec_delay(10); 2321 } 2322 2323 /* For informational purposes only */ 2324 if (i >= IXGBE_MAX_SECRX_POLL) 2325 DEBUGOUT("Rx unit being enabled before security " 2326 "path fully disabled. Continuing with init.\n"); 2327 2328 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2329 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2330 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2331 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2332 IXGBE_WRITE_FLUSH(hw); 2333 2334 return (IXGBE_SUCCESS); 2335 } 2336 2337 /* 2338 * ixgbe_get_device_caps_82599 - Get additional device capabilities 2339 * @hw: pointer to hardware structure 2340 * @device_caps: the EEPROM word with the extra device capabilities 2341 * 2342 * This function will read the EEPROM location for the device capabilities, 2343 * and return the word through device_caps. 2344 */ 2345 s32 2346 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) 2347 { 2348 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 2349 2350 return (IXGBE_SUCCESS); 2351 } 2352 2353 /* 2354 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599 2355 * @hw: pointer to hardware structure 2356 * @san_mac_offset: SAN MAC address offset 2357 * 2358 * This function will read the EEPROM location for the SAN MAC address 2359 * pointer, and returns the value at that location. This is used in both 2360 * get and set mac_addr routines. 2361 */ 2362 s32 2363 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw, u16 *san_mac_offset) 2364 { 2365 /* 2366 * First read the EEPROM pointer to see if the MAC addresses are 2367 * available. 2368 */ 2369 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2370 2371 return (IXGBE_SUCCESS); 2372 } 2373 2374 /* 2375 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599 2376 * @hw: pointer to hardware structure 2377 * @san_mac_addr: SAN MAC address 2378 * 2379 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2380 * per-port, so set_lan_id() must be called before reading the addresses. 2381 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2382 * upon for non-SFP connections, so we must call it here. 2383 */ 2384 s32 2385 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr) 2386 { 2387 u16 san_mac_data, san_mac_offset; 2388 u8 i; 2389 2390 /* 2391 * First read the EEPROM pointer to see if the MAC addresses are 2392 * available. If they're not, no point in calling set_lan_id() here. 2393 */ 2394 (void) ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset); 2395 2396 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2397 /* 2398 * No addresses available in this EEPROM. It's not an 2399 * error though, so just wipe the local address and return. 2400 */ 2401 for (i = 0; i < 6; i++) 2402 san_mac_addr[i] = 0xFF; 2403 2404 goto san_mac_addr_out; 2405 } 2406 2407 /* make sure we know which port we need to program */ 2408 hw->mac.ops.set_lan_id(hw); 2409 /* apply the port offset to the address offset */ 2410 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2411 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2412 for (i = 0; i < 3; i++) { 2413 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2414 san_mac_addr[i * 2] = (u8)(san_mac_data); 2415 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2416 san_mac_offset++; 2417 } 2418 2419 san_mac_addr_out: 2420 return (IXGBE_SUCCESS); 2421 } 2422 2423 /* 2424 * ixgbe_set_san_mac_addr_82599 - Write the SAN MAC address to the EEPROM 2425 * @hw: pointer to hardware structure 2426 * @san_mac_addr: SAN MAC address 2427 * 2428 * Write a SAN MAC address to the EEPROM. 2429 */ 2430 s32 2431 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr) 2432 { 2433 s32 status = IXGBE_SUCCESS; 2434 u16 san_mac_data, san_mac_offset; 2435 u8 i; 2436 2437 /* Look for SAN mac address pointer. If not defined, return */ 2438 (void) ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset); 2439 2440 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2441 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 2442 goto san_mac_addr_out; 2443 } 2444 2445 /* Make sure we know which port we need to write */ 2446 hw->mac.ops.set_lan_id(hw); 2447 /* Apply the port offset to the address offset */ 2448 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2449 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2450 2451 for (i = 0; i < 3; i++) { 2452 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 2453 san_mac_data |= (u16)(san_mac_addr[i * 2]); 2454 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 2455 san_mac_offset++; 2456 } 2457 2458 san_mac_addr_out: 2459 return (status); 2460 } 2461