1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/string.h> 9 #include <linux/etherdevice.h> 10 #include <linux/phylink.h> 11 #include <net/ip.h> 12 #include <linux/if_vlan.h> 13 14 #include "../libwx/wx_type.h" 15 #include "../libwx/wx_lib.h" 16 #include "../libwx/wx_ptp.h" 17 #include "../libwx/wx_hw.h" 18 #include "../libwx/wx_mbx.h" 19 #include "../libwx/wx_sriov.h" 20 #include "txgbe_type.h" 21 #include "txgbe_hw.h" 22 #include "txgbe_phy.h" 23 #include "txgbe_irq.h" 24 #include "txgbe_fdir.h" 25 #include "txgbe_ethtool.h" 26 27 char txgbe_driver_name[] = "txgbe"; 28 29 /* txgbe_pci_tbl - PCI Device ID Table 30 * 31 * Wildcard entries (PCI_ANY_ID) should come last 32 * Last entry must be all 0s 33 * 34 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * Class, Class Mask, private data (not used) } 36 */ 37 static const struct pci_device_id txgbe_pci_tbl[] = { 38 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, 39 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, 40 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0}, 41 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0}, 42 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0}, 43 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0}, 44 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0}, 45 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0}, 46 /* required last entry */ 47 { .device = 0 } 48 }; 49 50 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 51 52 static void txgbe_check_minimum_link(struct wx *wx) 53 { 54 struct pci_dev *pdev; 55 56 pdev = wx->pdev; 57 pcie_print_link_status(pdev); 58 } 59 60 /** 61 * txgbe_enumerate_functions - Get the number of ports this device has 62 * @wx: wx structure 63 * 64 * This function enumerates the phsyical functions co-located on a single slot, 65 * in order to determine how many ports a device has. This is most useful in 66 * determining the required GT/s of PCIe bandwidth necessary for optimal 67 * performance. 68 **/ 69 static int txgbe_enumerate_functions(struct wx *wx) 70 { 71 struct pci_dev *entry, *pdev = wx->pdev; 72 int physfns = 0; 73 74 list_for_each_entry(entry, &pdev->bus->devices, bus_list) { 75 /* When the devices on the bus don't all match our device ID, 76 * we can't reliably determine the correct number of 77 * functions. This can occur if a function has been direct 78 * attached to a virtual machine using VT-d. 79 */ 80 if (entry->vendor != pdev->vendor || 81 entry->device != pdev->device) 82 return -EINVAL; 83 84 physfns++; 85 } 86 87 return physfns; 88 } 89 90 static void txgbe_up_complete(struct wx *wx) 91 { 92 struct net_device *netdev = wx->netdev; 93 94 wx_control_hw(wx, true); 95 wx_configure_vectors(wx); 96 97 /* make sure to complete pre-operations */ 98 smp_mb__before_atomic(); 99 wx_napi_enable_all(wx); 100 101 if (wx->mac.type == wx_mac_aml) { 102 u32 reg; 103 104 reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); 105 reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; 106 reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; 107 wr32(wx, WX_MAC_TX_CFG, reg); 108 txgbe_enable_sec_tx_path(wx); 109 netif_carrier_on(wx->netdev); 110 } else { 111 phylink_start(wx->phylink); 112 } 113 114 /* clear any pending interrupts, may auto mask */ 115 rd32(wx, WX_PX_IC(0)); 116 rd32(wx, WX_PX_IC(1)); 117 rd32(wx, WX_PX_MISC_IC); 118 txgbe_irq_enable(wx, true); 119 120 /* enable transmits */ 121 netif_tx_start_all_queues(netdev); 122 123 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 124 wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, 125 WX_CFG_PORT_CTL_PFRSTD); 126 /* update setting rx tx for all active vfs */ 127 wx_set_all_vfs(wx); 128 } 129 130 static void txgbe_reset(struct wx *wx) 131 { 132 struct net_device *netdev = wx->netdev; 133 u8 old_addr[ETH_ALEN]; 134 int err; 135 136 err = txgbe_reset_hw(wx); 137 if (err != 0) 138 wx_err(wx, "Hardware Error: %d\n", err); 139 140 wx_start_hw(wx); 141 /* do not flush user set addresses */ 142 memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); 143 wx_flush_sw_mac_table(wx); 144 wx_mac_set_default_filter(wx, old_addr); 145 146 if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) 147 wx_ptp_reset(wx); 148 } 149 150 static void txgbe_disable_device(struct wx *wx) 151 { 152 struct net_device *netdev = wx->netdev; 153 u32 i; 154 155 wx_disable_pcie_master(wx); 156 /* disable receives */ 157 wx_disable_rx(wx); 158 159 /* disable all enabled rx queues */ 160 for (i = 0; i < wx->num_rx_queues; i++) 161 /* this call also flushes the previous write */ 162 wx_disable_rx_queue(wx, wx->rx_ring[i]); 163 164 netif_tx_stop_all_queues(netdev); 165 netif_tx_disable(netdev); 166 167 wx_irq_disable(wx); 168 wx_napi_disable_all(wx); 169 170 if (wx->bus.func < 2) 171 wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); 172 else 173 wx_err(wx, "%s: invalid bus lan id %d\n", 174 __func__, wx->bus.func); 175 176 if (wx->num_vfs) { 177 /* Clear EITR Select mapping */ 178 wr32(wx, WX_PX_ITRSEL, 0); 179 /* Mark all the VFs as inactive */ 180 for (i = 0; i < wx->num_vfs; i++) 181 wx->vfinfo[i].clear_to_send = 0; 182 /* update setting rx tx for all active vfs */ 183 wx_set_all_vfs(wx); 184 } 185 186 if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || 187 ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { 188 /* disable mac transmiter */ 189 wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); 190 } 191 192 /* disable transmits in the hardware now that interrupts are off */ 193 for (i = 0; i < wx->num_tx_queues; i++) { 194 u8 reg_idx = wx->tx_ring[i]->reg_idx; 195 196 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); 197 } 198 199 /* Disable the Tx DMA engine */ 200 wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); 201 202 wx_update_stats(wx); 203 } 204 205 void txgbe_down(struct wx *wx) 206 { 207 txgbe_disable_device(wx); 208 txgbe_reset(wx); 209 if (wx->mac.type == wx_mac_aml) 210 netif_carrier_off(wx->netdev); 211 else 212 phylink_stop(wx->phylink); 213 214 wx_clean_all_tx_rings(wx); 215 wx_clean_all_rx_rings(wx); 216 } 217 218 void txgbe_up(struct wx *wx) 219 { 220 wx_configure(wx); 221 wx_ptp_init(wx); 222 txgbe_up_complete(wx); 223 } 224 225 /** 226 * txgbe_init_type_code - Initialize the shared code 227 * @wx: pointer to hardware structure 228 **/ 229 static void txgbe_init_type_code(struct wx *wx) 230 { 231 u8 device_type = wx->subsystem_device_id & 0xF0; 232 233 switch (wx->device_id) { 234 case TXGBE_DEV_ID_SP1000: 235 case TXGBE_DEV_ID_WX1820: 236 wx->mac.type = wx_mac_sp; 237 break; 238 case TXGBE_DEV_ID_AML5010: 239 case TXGBE_DEV_ID_AML5110: 240 case TXGBE_DEV_ID_AML5025: 241 case TXGBE_DEV_ID_AML5125: 242 case TXGBE_DEV_ID_AML5040: 243 case TXGBE_DEV_ID_AML5140: 244 wx->mac.type = wx_mac_aml; 245 break; 246 default: 247 wx->mac.type = wx_mac_unknown; 248 break; 249 } 250 251 switch (device_type) { 252 case TXGBE_ID_SFP: 253 wx->media_type = sp_media_fiber; 254 break; 255 case TXGBE_ID_XAUI: 256 case TXGBE_ID_SGMII: 257 wx->media_type = sp_media_copper; 258 break; 259 case TXGBE_ID_KR_KX_KX4: 260 case TXGBE_ID_MAC_XAUI: 261 case TXGBE_ID_MAC_SGMII: 262 wx->media_type = sp_media_backplane; 263 break; 264 case TXGBE_ID_SFI_XAUI: 265 if (wx->bus.func == 0) 266 wx->media_type = sp_media_fiber; 267 else 268 wx->media_type = sp_media_copper; 269 break; 270 default: 271 wx->media_type = sp_media_unknown; 272 break; 273 } 274 } 275 276 /** 277 * txgbe_sw_init - Initialize general software structures (struct wx) 278 * @wx: board private structure to initialize 279 **/ 280 static int txgbe_sw_init(struct wx *wx) 281 { 282 u16 msix_count = 0; 283 int err; 284 285 wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; 286 wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; 287 wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; 288 wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; 289 wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; 290 wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; 291 wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; 292 293 /* PCI config space info */ 294 err = wx_sw_init(wx); 295 if (err < 0) 296 return err; 297 298 txgbe_init_type_code(wx); 299 300 /* Set common capability flags and settings */ 301 wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; 302 err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); 303 if (err) 304 wx_err(wx, "Do not support MSI-X\n"); 305 wx->mac.max_msix_vectors = msix_count; 306 307 wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, 308 num_online_cpus()); 309 wx->rss_enabled = true; 310 311 wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES, 312 num_online_cpus()); 313 set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags); 314 set_bit(WX_FLAG_FDIR_HASH, wx->flags); 315 wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE; 316 wx->atr = txgbe_atr; 317 wx->configure_fdir = txgbe_configure_fdir; 318 319 set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); 320 321 /* enable itr by default in dynamic mode */ 322 wx->rx_itr_setting = 1; 323 wx->tx_itr_setting = 1; 324 325 /* set default ring sizes */ 326 wx->tx_ring_count = TXGBE_DEFAULT_TXD; 327 wx->rx_ring_count = TXGBE_DEFAULT_RXD; 328 wx->mbx.size = WX_VXMAILBOX_SIZE; 329 330 /* set default work limits */ 331 wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; 332 wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; 333 334 wx->setup_tc = txgbe_setup_tc; 335 wx->do_reset = txgbe_do_reset; 336 set_bit(0, &wx->fwd_bitmask); 337 338 switch (wx->mac.type) { 339 case wx_mac_sp: 340 break; 341 case wx_mac_aml: 342 set_bit(WX_FLAG_SWFW_RING, wx->flags); 343 wx->swfw_index = 0; 344 break; 345 default: 346 break; 347 } 348 349 return 0; 350 } 351 352 static void txgbe_init_fdir(struct txgbe *txgbe) 353 { 354 txgbe->fdir_filter_count = 0; 355 spin_lock_init(&txgbe->fdir_perfect_lock); 356 } 357 358 /** 359 * txgbe_open - Called when a network interface is made active 360 * @netdev: network interface device structure 361 * 362 * Returns 0 on success, negative value on failure 363 * 364 * The open entry point is called when a network interface is made 365 * active by the system (IFF_UP). 366 **/ 367 static int txgbe_open(struct net_device *netdev) 368 { 369 struct wx *wx = netdev_priv(netdev); 370 int err; 371 372 err = wx_setup_resources(wx); 373 if (err) 374 goto err_reset; 375 376 wx_configure(wx); 377 378 err = txgbe_request_queue_irqs(wx); 379 if (err) 380 goto err_free_resources; 381 382 /* Notify the stack of the actual queue counts. */ 383 err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); 384 if (err) 385 goto err_free_irq; 386 387 err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); 388 if (err) 389 goto err_free_irq; 390 391 wx_ptp_init(wx); 392 393 txgbe_up_complete(wx); 394 395 return 0; 396 397 err_free_irq: 398 wx_free_irq(wx); 399 err_free_resources: 400 wx_free_resources(wx); 401 err_reset: 402 txgbe_reset(wx); 403 404 return err; 405 } 406 407 /** 408 * txgbe_close_suspend - actions necessary to both suspend and close flows 409 * @wx: the private wx struct 410 * 411 * This function should contain the necessary work common to both suspending 412 * and closing of the device. 413 */ 414 static void txgbe_close_suspend(struct wx *wx) 415 { 416 wx_ptp_suspend(wx); 417 txgbe_disable_device(wx); 418 wx_free_resources(wx); 419 } 420 421 /** 422 * txgbe_close - Disables a network interface 423 * @netdev: network interface device structure 424 * 425 * Returns 0, this is not allowed to fail 426 * 427 * The close entry point is called when an interface is de-activated 428 * by the OS. The hardware is still under the drivers control, but 429 * needs to be disabled. A global MAC reset is issued to stop the 430 * hardware, and all transmit and receive resources are freed. 431 **/ 432 static int txgbe_close(struct net_device *netdev) 433 { 434 struct wx *wx = netdev_priv(netdev); 435 436 wx_ptp_stop(wx); 437 txgbe_down(wx); 438 wx_free_irq(wx); 439 wx_free_resources(wx); 440 txgbe_fdir_filter_exit(wx); 441 wx_control_hw(wx, false); 442 443 return 0; 444 } 445 446 static void txgbe_dev_shutdown(struct pci_dev *pdev) 447 { 448 struct wx *wx = pci_get_drvdata(pdev); 449 struct net_device *netdev; 450 451 netdev = wx->netdev; 452 netif_device_detach(netdev); 453 454 rtnl_lock(); 455 if (netif_running(netdev)) 456 txgbe_close_suspend(wx); 457 rtnl_unlock(); 458 459 wx_control_hw(wx, false); 460 461 pci_disable_device(pdev); 462 } 463 464 static void txgbe_shutdown(struct pci_dev *pdev) 465 { 466 txgbe_dev_shutdown(pdev); 467 468 if (system_state == SYSTEM_POWER_OFF) { 469 pci_wake_from_d3(pdev, false); 470 pci_set_power_state(pdev, PCI_D3hot); 471 } 472 } 473 474 /** 475 * txgbe_setup_tc - routine to configure net_device for multiple traffic 476 * classes. 477 * 478 * @dev: net device to configure 479 * @tc: number of traffic classes to enable 480 */ 481 int txgbe_setup_tc(struct net_device *dev, u8 tc) 482 { 483 struct wx *wx = netdev_priv(dev); 484 struct txgbe *txgbe = wx->priv; 485 486 /* Hardware has to reinitialize queues and interrupts to 487 * match packet buffer alignment. Unfortunately, the 488 * hardware is not flexible enough to do this dynamically. 489 */ 490 if (netif_running(dev)) 491 txgbe_close(dev); 492 else 493 txgbe_reset(wx); 494 495 txgbe_free_misc_irq(txgbe); 496 wx_clear_interrupt_scheme(wx); 497 498 if (tc) 499 netdev_set_num_tc(dev, tc); 500 else 501 netdev_reset_tc(dev); 502 503 wx_init_interrupt_scheme(wx); 504 txgbe_setup_misc_irq(txgbe); 505 506 if (netif_running(dev)) 507 txgbe_open(dev); 508 509 return 0; 510 } 511 512 static void txgbe_reinit_locked(struct wx *wx) 513 { 514 int err = 0; 515 516 netif_trans_update(wx->netdev); 517 518 err = wx_set_state_reset(wx); 519 if (err) { 520 wx_err(wx, "wait device reset timeout\n"); 521 return; 522 } 523 524 txgbe_down(wx); 525 txgbe_up(wx); 526 527 clear_bit(WX_STATE_RESETTING, wx->state); 528 } 529 530 void txgbe_do_reset(struct net_device *netdev) 531 { 532 struct wx *wx = netdev_priv(netdev); 533 534 if (netif_running(netdev)) 535 txgbe_reinit_locked(wx); 536 else 537 txgbe_reset(wx); 538 } 539 540 static const struct net_device_ops txgbe_netdev_ops = { 541 .ndo_open = txgbe_open, 542 .ndo_stop = txgbe_close, 543 .ndo_change_mtu = wx_change_mtu, 544 .ndo_start_xmit = wx_xmit_frame, 545 .ndo_set_rx_mode = wx_set_rx_mode, 546 .ndo_set_features = wx_set_features, 547 .ndo_fix_features = wx_fix_features, 548 .ndo_validate_addr = eth_validate_addr, 549 .ndo_set_mac_address = wx_set_mac, 550 .ndo_get_stats64 = wx_get_stats64, 551 .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, 552 .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, 553 .ndo_hwtstamp_set = wx_hwtstamp_set, 554 .ndo_hwtstamp_get = wx_hwtstamp_get, 555 }; 556 557 /** 558 * txgbe_probe - Device Initialization Routine 559 * @pdev: PCI device information struct 560 * @ent: entry in txgbe_pci_tbl 561 * 562 * Returns 0 on success, negative on failure 563 * 564 * txgbe_probe initializes an adapter identified by a pci_dev structure. 565 * The OS initialization, configuring of the wx private structure, 566 * and a hardware reset occur. 567 **/ 568 static int txgbe_probe(struct pci_dev *pdev, 569 const struct pci_device_id __always_unused *ent) 570 { 571 struct net_device *netdev; 572 int err, expected_gts; 573 struct wx *wx = NULL; 574 struct txgbe *txgbe; 575 576 u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; 577 u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; 578 u16 build = 0, major = 0, patch = 0; 579 u32 etrack_id = 0; 580 581 err = pci_enable_device_mem(pdev); 582 if (err) 583 return err; 584 585 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 586 if (err) { 587 dev_err(&pdev->dev, 588 "No usable DMA configuration, aborting\n"); 589 goto err_pci_disable_dev; 590 } 591 592 err = pci_request_selected_regions(pdev, 593 pci_select_bars(pdev, IORESOURCE_MEM), 594 txgbe_driver_name); 595 if (err) { 596 dev_err(&pdev->dev, 597 "pci_request_selected_regions failed 0x%x\n", err); 598 goto err_pci_disable_dev; 599 } 600 601 pci_set_master(pdev); 602 603 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 604 sizeof(struct wx), 605 TXGBE_MAX_TX_QUEUES, 606 TXGBE_MAX_RX_QUEUES); 607 if (!netdev) { 608 err = -ENOMEM; 609 goto err_pci_release_regions; 610 } 611 612 SET_NETDEV_DEV(netdev, &pdev->dev); 613 614 wx = netdev_priv(netdev); 615 wx->netdev = netdev; 616 wx->pdev = pdev; 617 618 wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 619 620 wx->hw_addr = devm_ioremap(&pdev->dev, 621 pci_resource_start(pdev, 0), 622 pci_resource_len(pdev, 0)); 623 if (!wx->hw_addr) { 624 err = -EIO; 625 goto err_pci_release_regions; 626 } 627 628 /* The sapphire supports up to 63 VFs per pf, but physical 629 * function also need one pool for basic networking. 630 */ 631 pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); 632 wx->driver_name = txgbe_driver_name; 633 txgbe_set_ethtool_ops(netdev); 634 netdev->netdev_ops = &txgbe_netdev_ops; 635 636 /* setup the private structure */ 637 err = txgbe_sw_init(wx); 638 if (err) 639 goto err_free_mac_table; 640 641 /* check if flash load is done after hw power up */ 642 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); 643 if (err) 644 goto err_free_mac_table; 645 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); 646 if (err) 647 goto err_free_mac_table; 648 649 err = wx_mng_present(wx); 650 if (err) { 651 dev_err(&pdev->dev, "Management capability is not present\n"); 652 goto err_free_mac_table; 653 } 654 655 err = txgbe_reset_hw(wx); 656 if (err) { 657 dev_err(&pdev->dev, "HW Init failed: %d\n", err); 658 goto err_free_mac_table; 659 } 660 661 netdev->features = NETIF_F_SG | 662 NETIF_F_TSO | 663 NETIF_F_TSO6 | 664 NETIF_F_RXHASH | 665 NETIF_F_RXCSUM | 666 NETIF_F_HW_CSUM; 667 668 netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; 669 netdev->features |= netdev->gso_partial_features; 670 netdev->features |= NETIF_F_SCTP_CRC; 671 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 672 netdev->hw_enc_features |= netdev->vlan_features; 673 netdev->features |= NETIF_F_VLAN_FEATURES; 674 /* copy netdev features into list of user selectable features */ 675 netdev->hw_features |= netdev->features | NETIF_F_RXALL; 676 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; 677 netdev->features |= NETIF_F_HIGHDMA; 678 netdev->hw_features |= NETIF_F_GRO; 679 netdev->features |= NETIF_F_GRO; 680 681 netdev->priv_flags |= IFF_UNICAST_FLT; 682 netdev->priv_flags |= IFF_SUPP_NOFCS; 683 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 684 685 netdev->min_mtu = ETH_MIN_MTU; 686 netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - 687 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 688 689 /* make sure the EEPROM is good */ 690 err = txgbe_validate_eeprom_checksum(wx, NULL); 691 if (err != 0) { 692 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 693 wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); 694 err = -EIO; 695 goto err_free_mac_table; 696 } 697 698 eth_hw_addr_set(netdev, wx->mac.perm_addr); 699 wx_mac_set_default_filter(wx, wx->mac.perm_addr); 700 701 err = wx_init_interrupt_scheme(wx); 702 if (err) 703 goto err_free_mac_table; 704 705 /* Save off EEPROM version number and Option Rom version which 706 * together make a unique identify for the eeprom 707 */ 708 wx_read_ee_hostif(wx, 709 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, 710 &eeprom_verh); 711 wx_read_ee_hostif(wx, 712 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, 713 &eeprom_verl); 714 etrack_id = (eeprom_verh << 16) | eeprom_verl; 715 716 wx_read_ee_hostif(wx, 717 wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, 718 &offset); 719 720 /* Make sure offset to SCSI block is valid */ 721 if (!(offset == 0x0) && !(offset == 0xffff)) { 722 wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); 723 wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); 724 725 /* Only display Option Rom if exist */ 726 if (eeprom_cfg_blkl && eeprom_cfg_blkh) { 727 major = eeprom_cfg_blkl >> 8; 728 build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); 729 patch = eeprom_cfg_blkh & 0x00ff; 730 731 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 732 "0x%08x, %d.%d.%d", etrack_id, major, build, 733 patch); 734 } else { 735 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 736 "0x%08x", etrack_id); 737 } 738 } else { 739 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 740 "0x%08x", etrack_id); 741 } 742 743 if (etrack_id < 0x20010) 744 dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); 745 746 txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); 747 if (!txgbe) { 748 err = -ENOMEM; 749 goto err_release_hw; 750 } 751 752 txgbe->wx = wx; 753 wx->priv = txgbe; 754 755 txgbe_init_fdir(txgbe); 756 757 err = txgbe_setup_misc_irq(txgbe); 758 if (err) 759 goto err_release_hw; 760 761 err = txgbe_init_phy(txgbe); 762 if (err) 763 goto err_free_misc_irq; 764 765 err = register_netdev(netdev); 766 if (err) 767 goto err_remove_phy; 768 769 pci_set_drvdata(pdev, wx); 770 771 netif_tx_stop_all_queues(netdev); 772 773 /* calculate the expected PCIe bandwidth required for optimal 774 * performance. Note that some older parts will never have enough 775 * bandwidth due to being older generation PCIe parts. We clamp these 776 * parts to ensure that no warning is displayed, as this could confuse 777 * users otherwise. 778 */ 779 expected_gts = txgbe_enumerate_functions(wx) * 10; 780 781 /* don't check link if we failed to enumerate functions */ 782 if (expected_gts > 0) 783 txgbe_check_minimum_link(wx); 784 else 785 dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); 786 787 return 0; 788 789 err_remove_phy: 790 txgbe_remove_phy(txgbe); 791 err_free_misc_irq: 792 txgbe_free_misc_irq(txgbe); 793 err_release_hw: 794 wx_clear_interrupt_scheme(wx); 795 wx_control_hw(wx, false); 796 err_free_mac_table: 797 kfree(wx->mac_table); 798 err_pci_release_regions: 799 pci_release_selected_regions(pdev, 800 pci_select_bars(pdev, IORESOURCE_MEM)); 801 err_pci_disable_dev: 802 pci_disable_device(pdev); 803 return err; 804 } 805 806 /** 807 * txgbe_remove - Device Removal Routine 808 * @pdev: PCI device information struct 809 * 810 * txgbe_remove is called by the PCI subsystem to alert the driver 811 * that it should release a PCI device. The could be caused by a 812 * Hot-Plug event, or because the driver is going to be removed from 813 * memory. 814 **/ 815 static void txgbe_remove(struct pci_dev *pdev) 816 { 817 struct wx *wx = pci_get_drvdata(pdev); 818 struct txgbe *txgbe = wx->priv; 819 struct net_device *netdev; 820 821 netdev = wx->netdev; 822 wx_disable_sriov(wx); 823 unregister_netdev(netdev); 824 825 txgbe_remove_phy(txgbe); 826 txgbe_free_misc_irq(txgbe); 827 wx_free_isb_resources(wx); 828 829 pci_release_selected_regions(pdev, 830 pci_select_bars(pdev, IORESOURCE_MEM)); 831 832 kfree(wx->rss_key); 833 kfree(wx->mac_table); 834 wx_clear_interrupt_scheme(wx); 835 836 pci_disable_device(pdev); 837 } 838 839 static struct pci_driver txgbe_driver = { 840 .name = txgbe_driver_name, 841 .id_table = txgbe_pci_tbl, 842 .probe = txgbe_probe, 843 .remove = txgbe_remove, 844 .shutdown = txgbe_shutdown, 845 .sriov_configure = wx_pci_sriov_configure, 846 }; 847 848 module_pci_driver(txgbe_driver); 849 850 MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); 851 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); 852 MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); 853 MODULE_LICENSE("GPL"); 854