1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/netdevice.h> 8 #include <linux/string.h> 9 #include <linux/etherdevice.h> 10 #include <linux/phylink.h> 11 #include <net/udp_tunnel.h> 12 #include <net/ip.h> 13 #include <linux/if_vlan.h> 14 15 #include "../libwx/wx_type.h" 16 #include "../libwx/wx_lib.h" 17 #include "../libwx/wx_ptp.h" 18 #include "../libwx/wx_hw.h" 19 #include "../libwx/wx_mbx.h" 20 #include "../libwx/wx_sriov.h" 21 #include "txgbe_type.h" 22 #include "txgbe_hw.h" 23 #include "txgbe_phy.h" 24 #include "txgbe_aml.h" 25 #include "txgbe_irq.h" 26 #include "txgbe_fdir.h" 27 #include "txgbe_ethtool.h" 28 29 char txgbe_driver_name[] = "txgbe"; 30 31 /* txgbe_pci_tbl - PCI Device ID Table 32 * 33 * Wildcard entries (PCI_ANY_ID) should come last 34 * Last entry must be all 0s 35 * 36 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 37 * Class, Class Mask, private data (not used) } 38 */ 39 static const struct pci_device_id txgbe_pci_tbl[] = { 40 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, 41 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, 42 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0}, 43 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0}, 44 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0}, 45 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0}, 46 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0}, 47 { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0}, 48 /* required last entry */ 49 { .device = 0 } 50 }; 51 52 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 53 54 static void txgbe_check_minimum_link(struct wx *wx) 55 { 56 struct pci_dev *pdev; 57 58 pdev = wx->pdev; 59 pcie_print_link_status(pdev); 60 } 61 62 /** 63 * txgbe_enumerate_functions - Get the number of ports this device has 64 * @wx: wx structure 65 * 66 * This function enumerates the phsyical functions co-located on a single slot, 67 * in order to determine how many ports a device has. This is most useful in 68 * determining the required GT/s of PCIe bandwidth necessary for optimal 69 * performance. 70 **/ 71 static int txgbe_enumerate_functions(struct wx *wx) 72 { 73 struct pci_dev *entry, *pdev = wx->pdev; 74 int physfns = 0; 75 76 list_for_each_entry(entry, &pdev->bus->devices, bus_list) { 77 /* When the devices on the bus don't all match our device ID, 78 * we can't reliably determine the correct number of 79 * functions. This can occur if a function has been direct 80 * attached to a virtual machine using VT-d. 81 */ 82 if (entry->vendor != pdev->vendor || 83 entry->device != pdev->device) 84 return -EINVAL; 85 86 physfns++; 87 } 88 89 return physfns; 90 } 91 92 static void txgbe_module_detection_subtask(struct wx *wx) 93 { 94 int err; 95 96 if (!test_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags)) 97 return; 98 99 /* wait for SFF module ready */ 100 msleep(200); 101 102 err = txgbe_identify_module(wx); 103 if (err) 104 return; 105 106 clear_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags); 107 } 108 109 static void txgbe_link_config_subtask(struct wx *wx) 110 { 111 int err; 112 113 if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags)) 114 return; 115 116 err = txgbe_set_phy_link(wx); 117 if (err) 118 return; 119 120 clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); 121 } 122 123 /** 124 * txgbe_service_task - manages and runs subtasks 125 * @work: pointer to work_struct containing our data 126 **/ 127 static void txgbe_service_task(struct work_struct *work) 128 { 129 struct wx *wx = container_of(work, struct wx, service_task); 130 131 txgbe_module_detection_subtask(wx); 132 txgbe_link_config_subtask(wx); 133 134 wx_service_event_complete(wx); 135 } 136 137 static void txgbe_init_service(struct wx *wx) 138 { 139 timer_setup(&wx->service_timer, wx_service_timer, 0); 140 INIT_WORK(&wx->service_task, txgbe_service_task); 141 clear_bit(WX_STATE_SERVICE_SCHED, wx->state); 142 } 143 144 static void txgbe_up_complete(struct wx *wx) 145 { 146 struct net_device *netdev = wx->netdev; 147 148 wx_control_hw(wx, true); 149 wx_configure_vectors(wx); 150 151 /* make sure to complete pre-operations */ 152 smp_mb__before_atomic(); 153 wx_napi_enable_all(wx); 154 155 switch (wx->mac.type) { 156 case wx_mac_aml40: 157 txgbe_setup_link(wx); 158 phylink_start(wx->phylink); 159 break; 160 case wx_mac_aml: 161 /* Enable TX laser */ 162 wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0); 163 txgbe_setup_link(wx); 164 phylink_start(wx->phylink); 165 break; 166 case wx_mac_sp: 167 phylink_start(wx->phylink); 168 break; 169 default: 170 break; 171 } 172 173 /* clear any pending interrupts, may auto mask */ 174 rd32(wx, WX_PX_IC(0)); 175 rd32(wx, WX_PX_IC(1)); 176 rd32(wx, WX_PX_MISC_IC); 177 txgbe_irq_enable(wx, true); 178 179 /* enable transmits */ 180 netif_tx_start_all_queues(netdev); 181 mod_timer(&wx->service_timer, jiffies); 182 183 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 184 wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, 185 WX_CFG_PORT_CTL_PFRSTD); 186 /* update setting rx tx for all active vfs */ 187 wx_set_all_vfs(wx); 188 } 189 190 static void txgbe_reset(struct wx *wx) 191 { 192 struct net_device *netdev = wx->netdev; 193 u8 old_addr[ETH_ALEN]; 194 int err; 195 196 err = txgbe_reset_hw(wx); 197 if (err != 0) 198 wx_err(wx, "Hardware Error: %d\n", err); 199 200 wx_start_hw(wx); 201 /* do not flush user set addresses */ 202 memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); 203 wx_flush_sw_mac_table(wx); 204 wx_mac_set_default_filter(wx, old_addr); 205 206 if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) 207 wx_ptp_reset(wx); 208 } 209 210 static void txgbe_disable_device(struct wx *wx) 211 { 212 struct net_device *netdev = wx->netdev; 213 u32 i; 214 215 wx_disable_pcie_master(wx); 216 /* disable receives */ 217 wx_disable_rx(wx); 218 219 /* disable all enabled rx queues */ 220 for (i = 0; i < wx->num_rx_queues; i++) 221 /* this call also flushes the previous write */ 222 wx_disable_rx_queue(wx, wx->rx_ring[i]); 223 224 netif_tx_stop_all_queues(netdev); 225 netif_tx_disable(netdev); 226 227 wx_irq_disable(wx); 228 wx_napi_disable_all(wx); 229 230 timer_delete_sync(&wx->service_timer); 231 232 if (wx->bus.func < 2) 233 wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); 234 else 235 wx_err(wx, "%s: invalid bus lan id %d\n", 236 __func__, wx->bus.func); 237 238 if (wx->num_vfs) { 239 /* Clear EITR Select mapping */ 240 wr32(wx, WX_PX_ITRSEL, 0); 241 /* Mark all the VFs as inactive */ 242 for (i = 0; i < wx->num_vfs; i++) 243 wx->vfinfo[i].clear_to_send = 0; 244 /* update setting rx tx for all active vfs */ 245 wx_set_all_vfs(wx); 246 } 247 248 if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || 249 ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { 250 /* disable mac transmiter */ 251 wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); 252 } 253 254 /* disable transmits in the hardware now that interrupts are off */ 255 for (i = 0; i < wx->num_tx_queues; i++) { 256 u8 reg_idx = wx->tx_ring[i]->reg_idx; 257 258 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); 259 } 260 261 /* Disable the Tx DMA engine */ 262 wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); 263 264 wx_update_stats(wx); 265 } 266 267 void txgbe_down(struct wx *wx) 268 { 269 txgbe_disable_device(wx); 270 txgbe_reset(wx); 271 272 switch (wx->mac.type) { 273 case wx_mac_aml40: 274 phylink_stop(wx->phylink); 275 break; 276 case wx_mac_aml: 277 phylink_stop(wx->phylink); 278 /* Disable TX laser */ 279 wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1); 280 break; 281 case wx_mac_sp: 282 phylink_stop(wx->phylink); 283 break; 284 default: 285 break; 286 } 287 288 wx_clean_all_tx_rings(wx); 289 wx_clean_all_rx_rings(wx); 290 } 291 292 void txgbe_up(struct wx *wx) 293 { 294 wx_configure(wx); 295 wx_ptp_init(wx); 296 txgbe_up_complete(wx); 297 } 298 299 /** 300 * txgbe_init_type_code - Initialize the shared code 301 * @wx: pointer to hardware structure 302 **/ 303 static void txgbe_init_type_code(struct wx *wx) 304 { 305 u8 device_type = wx->subsystem_device_id & 0xF0; 306 307 switch (wx->device_id) { 308 case TXGBE_DEV_ID_SP1000: 309 case TXGBE_DEV_ID_WX1820: 310 wx->mac.type = wx_mac_sp; 311 break; 312 case TXGBE_DEV_ID_AML5010: 313 case TXGBE_DEV_ID_AML5110: 314 case TXGBE_DEV_ID_AML5025: 315 case TXGBE_DEV_ID_AML5125: 316 wx->mac.type = wx_mac_aml; 317 break; 318 case TXGBE_DEV_ID_AML5040: 319 case TXGBE_DEV_ID_AML5140: 320 wx->mac.type = wx_mac_aml40; 321 break; 322 default: 323 wx->mac.type = wx_mac_unknown; 324 break; 325 } 326 327 switch (device_type) { 328 case TXGBE_ID_SFP: 329 wx->media_type = wx_media_fiber; 330 break; 331 case TXGBE_ID_XAUI: 332 case TXGBE_ID_SGMII: 333 wx->media_type = wx_media_copper; 334 break; 335 case TXGBE_ID_KR_KX_KX4: 336 case TXGBE_ID_MAC_XAUI: 337 case TXGBE_ID_MAC_SGMII: 338 wx->media_type = wx_media_backplane; 339 break; 340 case TXGBE_ID_SFI_XAUI: 341 if (wx->bus.func == 0) 342 wx->media_type = wx_media_fiber; 343 else 344 wx->media_type = wx_media_copper; 345 break; 346 default: 347 wx->media_type = wx_media_unknown; 348 break; 349 } 350 } 351 352 /** 353 * txgbe_sw_init - Initialize general software structures (struct wx) 354 * @wx: board private structure to initialize 355 **/ 356 static int txgbe_sw_init(struct wx *wx) 357 { 358 u16 msix_count = 0; 359 int err; 360 361 wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; 362 wx->mac.max_tx_queues = TXGBE_MAX_TXQ; 363 wx->mac.max_rx_queues = TXGBE_MAX_RXQ; 364 wx->mac.mcft_size = TXGBE_MC_TBL_SIZE; 365 wx->mac.vft_size = TXGBE_VFT_TBL_SIZE; 366 wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE; 367 wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ; 368 369 /* PCI config space info */ 370 err = wx_sw_init(wx); 371 if (err < 0) 372 return err; 373 374 txgbe_init_type_code(wx); 375 376 /* Set common capability flags and settings */ 377 wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; 378 err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); 379 if (err) 380 wx_err(wx, "Do not support MSI-X\n"); 381 wx->mac.max_msix_vectors = msix_count; 382 383 wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, 384 num_online_cpus()); 385 wx->rss_enabled = true; 386 387 wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES, 388 num_online_cpus()); 389 set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags); 390 set_bit(WX_FLAG_FDIR_HASH, wx->flags); 391 wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE; 392 wx->atr = txgbe_atr; 393 wx->configure_fdir = txgbe_configure_fdir; 394 395 set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); 396 set_bit(WX_FLAG_RSC_ENABLED, wx->flags); 397 set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); 398 399 /* enable itr by default in dynamic mode */ 400 wx->adaptive_itr = true; 401 wx->rx_itr_setting = 1; 402 wx->tx_itr_setting = 1; 403 404 /* set default ring sizes */ 405 wx->tx_ring_count = TXGBE_DEFAULT_TXD; 406 wx->rx_ring_count = TXGBE_DEFAULT_RXD; 407 wx->mbx.size = WX_VXMAILBOX_SIZE; 408 409 /* set default work limits */ 410 wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; 411 wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; 412 413 wx->setup_tc = txgbe_setup_tc; 414 wx->do_reset = txgbe_do_reset; 415 set_bit(0, &wx->fwd_bitmask); 416 417 switch (wx->mac.type) { 418 case wx_mac_sp: 419 break; 420 case wx_mac_aml: 421 case wx_mac_aml40: 422 set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags); 423 set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags); 424 set_bit(WX_FLAG_SWFW_RING, wx->flags); 425 wx->swfw_index = 0; 426 break; 427 default: 428 break; 429 } 430 431 return 0; 432 } 433 434 static void txgbe_init_fdir(struct txgbe *txgbe) 435 { 436 txgbe->fdir_filter_count = 0; 437 spin_lock_init(&txgbe->fdir_perfect_lock); 438 } 439 440 /** 441 * txgbe_open - Called when a network interface is made active 442 * @netdev: network interface device structure 443 * 444 * Returns 0 on success, negative value on failure 445 * 446 * The open entry point is called when a network interface is made 447 * active by the system (IFF_UP). 448 **/ 449 static int txgbe_open(struct net_device *netdev) 450 { 451 struct wx *wx = netdev_priv(netdev); 452 int err; 453 454 err = wx_setup_resources(wx); 455 if (err) 456 goto err_reset; 457 458 wx_configure(wx); 459 460 err = txgbe_setup_misc_irq(wx->priv); 461 if (err) 462 goto err_free_resources; 463 464 err = txgbe_request_queue_irqs(wx); 465 if (err) 466 goto err_free_misc_irq; 467 468 /* Notify the stack of the actual queue counts. */ 469 err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); 470 if (err) 471 goto err_free_irq; 472 473 err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); 474 if (err) 475 goto err_free_irq; 476 477 wx_ptp_init(wx); 478 479 txgbe_up_complete(wx); 480 481 return 0; 482 483 err_free_irq: 484 wx_free_irq(wx); 485 err_free_misc_irq: 486 txgbe_free_misc_irq(wx->priv); 487 wx_reset_interrupt_capability(wx); 488 err_free_resources: 489 wx_free_resources(wx); 490 err_reset: 491 txgbe_reset(wx); 492 493 return err; 494 } 495 496 /** 497 * txgbe_close_suspend - actions necessary to both suspend and close flows 498 * @wx: the private wx struct 499 * 500 * This function should contain the necessary work common to both suspending 501 * and closing of the device. 502 */ 503 static void txgbe_close_suspend(struct wx *wx) 504 { 505 wx_ptp_suspend(wx); 506 txgbe_disable_device(wx); 507 wx_free_resources(wx); 508 } 509 510 /** 511 * txgbe_close - Disables a network interface 512 * @netdev: network interface device structure 513 * 514 * Returns 0, this is not allowed to fail 515 * 516 * The close entry point is called when an interface is de-activated 517 * by the OS. The hardware is still under the drivers control, but 518 * needs to be disabled. A global MAC reset is issued to stop the 519 * hardware, and all transmit and receive resources are freed. 520 **/ 521 static int txgbe_close(struct net_device *netdev) 522 { 523 struct wx *wx = netdev_priv(netdev); 524 525 wx_ptp_stop(wx); 526 txgbe_down(wx); 527 wx_free_irq(wx); 528 txgbe_free_misc_irq(wx->priv); 529 wx_free_resources(wx); 530 txgbe_fdir_filter_exit(wx); 531 wx_control_hw(wx, false); 532 533 return 0; 534 } 535 536 static void txgbe_dev_shutdown(struct pci_dev *pdev) 537 { 538 struct wx *wx = pci_get_drvdata(pdev); 539 struct net_device *netdev; 540 541 netdev = wx->netdev; 542 netif_device_detach(netdev); 543 544 rtnl_lock(); 545 if (netif_running(netdev)) 546 txgbe_close_suspend(wx); 547 rtnl_unlock(); 548 549 wx_control_hw(wx, false); 550 551 pci_disable_device(pdev); 552 } 553 554 static void txgbe_shutdown(struct pci_dev *pdev) 555 { 556 txgbe_dev_shutdown(pdev); 557 558 if (system_state == SYSTEM_POWER_OFF) { 559 pci_wake_from_d3(pdev, false); 560 pci_set_power_state(pdev, PCI_D3hot); 561 } 562 } 563 564 /** 565 * txgbe_setup_tc - routine to configure net_device for multiple traffic 566 * classes. 567 * 568 * @dev: net device to configure 569 * @tc: number of traffic classes to enable 570 */ 571 int txgbe_setup_tc(struct net_device *dev, u8 tc) 572 { 573 struct wx *wx = netdev_priv(dev); 574 575 /* Hardware has to reinitialize queues and interrupts to 576 * match packet buffer alignment. Unfortunately, the 577 * hardware is not flexible enough to do this dynamically. 578 */ 579 if (netif_running(dev)) 580 txgbe_close(dev); 581 else 582 txgbe_reset(wx); 583 584 wx_clear_interrupt_scheme(wx); 585 586 if (tc) 587 netdev_set_num_tc(dev, tc); 588 else 589 netdev_reset_tc(dev); 590 591 wx_init_interrupt_scheme(wx); 592 593 if (netif_running(dev)) 594 txgbe_open(dev); 595 596 return 0; 597 } 598 599 static void txgbe_reinit_locked(struct wx *wx) 600 { 601 int err = 0; 602 603 netif_trans_update(wx->netdev); 604 605 err = wx_set_state_reset(wx); 606 if (err) { 607 wx_err(wx, "wait device reset timeout\n"); 608 return; 609 } 610 611 txgbe_down(wx); 612 txgbe_up(wx); 613 614 clear_bit(WX_STATE_RESETTING, wx->state); 615 } 616 617 void txgbe_do_reset(struct net_device *netdev) 618 { 619 struct wx *wx = netdev_priv(netdev); 620 621 if (netif_running(netdev)) 622 txgbe_reinit_locked(wx); 623 else 624 txgbe_reset(wx); 625 } 626 627 static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) 628 { 629 struct wx *wx = netdev_priv(dev); 630 struct udp_tunnel_info ti; 631 632 udp_tunnel_nic_get_port(dev, table, 0, &ti); 633 switch (ti.type) { 634 case UDP_TUNNEL_TYPE_VXLAN: 635 wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port)); 636 break; 637 case UDP_TUNNEL_TYPE_VXLAN_GPE: 638 wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port)); 639 break; 640 case UDP_TUNNEL_TYPE_GENEVE: 641 wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port)); 642 break; 643 default: 644 break; 645 } 646 647 return 0; 648 } 649 650 static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { 651 .sync_table = txgbe_udp_tunnel_sync, 652 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 653 .tables = { 654 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 655 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 656 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 657 }, 658 }; 659 660 static const struct net_device_ops txgbe_netdev_ops = { 661 .ndo_open = txgbe_open, 662 .ndo_stop = txgbe_close, 663 .ndo_change_mtu = wx_change_mtu, 664 .ndo_start_xmit = wx_xmit_frame, 665 .ndo_set_rx_mode = wx_set_rx_mode, 666 .ndo_set_features = wx_set_features, 667 .ndo_fix_features = wx_fix_features, 668 .ndo_features_check = wx_features_check, 669 .ndo_validate_addr = eth_validate_addr, 670 .ndo_set_mac_address = wx_set_mac, 671 .ndo_get_stats64 = wx_get_stats64, 672 .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, 673 .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, 674 .ndo_hwtstamp_set = wx_hwtstamp_set, 675 .ndo_hwtstamp_get = wx_hwtstamp_get, 676 }; 677 678 /** 679 * txgbe_probe - Device Initialization Routine 680 * @pdev: PCI device information struct 681 * @ent: entry in txgbe_pci_tbl 682 * 683 * Returns 0 on success, negative on failure 684 * 685 * txgbe_probe initializes an adapter identified by a pci_dev structure. 686 * The OS initialization, configuring of the wx private structure, 687 * and a hardware reset occur. 688 **/ 689 static int txgbe_probe(struct pci_dev *pdev, 690 const struct pci_device_id __always_unused *ent) 691 { 692 struct net_device *netdev; 693 int err, expected_gts; 694 struct wx *wx = NULL; 695 struct txgbe *txgbe; 696 697 u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; 698 u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; 699 u16 build = 0, major = 0, patch = 0; 700 u32 etrack_id = 0; 701 702 err = pci_enable_device_mem(pdev); 703 if (err) 704 return err; 705 706 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 707 if (err) { 708 dev_err(&pdev->dev, 709 "No usable DMA configuration, aborting\n"); 710 goto err_pci_disable_dev; 711 } 712 713 err = pci_request_selected_regions(pdev, 714 pci_select_bars(pdev, IORESOURCE_MEM), 715 txgbe_driver_name); 716 if (err) { 717 dev_err(&pdev->dev, 718 "pci_request_selected_regions failed 0x%x\n", err); 719 goto err_pci_disable_dev; 720 } 721 722 pci_set_master(pdev); 723 724 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 725 sizeof(struct wx), 726 TXGBE_MAX_TX_QUEUES, 727 TXGBE_MAX_RX_QUEUES); 728 if (!netdev) { 729 err = -ENOMEM; 730 goto err_pci_release_regions; 731 } 732 733 SET_NETDEV_DEV(netdev, &pdev->dev); 734 735 wx = netdev_priv(netdev); 736 wx->netdev = netdev; 737 wx->pdev = pdev; 738 739 wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 740 741 wx->hw_addr = devm_ioremap(&pdev->dev, 742 pci_resource_start(pdev, 0), 743 pci_resource_len(pdev, 0)); 744 if (!wx->hw_addr) { 745 err = -EIO; 746 goto err_pci_release_regions; 747 } 748 749 /* The sapphire supports up to 63 VFs per pf, but physical 750 * function also need one pool for basic networking. 751 */ 752 pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); 753 wx->driver_name = txgbe_driver_name; 754 txgbe_set_ethtool_ops(netdev); 755 netdev->netdev_ops = &txgbe_netdev_ops; 756 netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels; 757 758 /* setup the private structure */ 759 err = txgbe_sw_init(wx); 760 if (err) 761 goto err_pci_release_regions; 762 763 /* check if flash load is done after hw power up */ 764 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); 765 if (err) 766 goto err_free_mac_table; 767 err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); 768 if (err) 769 goto err_free_mac_table; 770 771 err = wx_mng_present(wx); 772 if (err) { 773 dev_err(&pdev->dev, "Management capability is not present\n"); 774 goto err_free_mac_table; 775 } 776 777 err = txgbe_reset_hw(wx); 778 if (err) { 779 dev_err(&pdev->dev, "HW Init failed: %d\n", err); 780 goto err_free_mac_table; 781 } 782 783 netdev->features = NETIF_F_SG | 784 NETIF_F_TSO | 785 NETIF_F_TSO6 | 786 NETIF_F_RXHASH | 787 NETIF_F_RXCSUM | 788 NETIF_F_HW_CSUM; 789 790 netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; 791 netdev->features |= netdev->gso_partial_features; 792 netdev->features |= NETIF_F_SCTP_CRC; 793 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 794 netdev->hw_enc_features |= netdev->vlan_features; 795 netdev->features |= NETIF_F_VLAN_FEATURES; 796 /* copy netdev features into list of user selectable features */ 797 netdev->hw_features |= netdev->features | NETIF_F_RXALL; 798 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; 799 netdev->features |= NETIF_F_HIGHDMA; 800 netdev->hw_features |= NETIF_F_GRO; 801 netdev->features |= NETIF_F_GRO; 802 netdev->hw_features |= NETIF_F_LRO; 803 netdev->features |= NETIF_F_LRO; 804 netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 805 806 netdev->priv_flags |= IFF_UNICAST_FLT; 807 netdev->priv_flags |= IFF_SUPP_NOFCS; 808 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 809 810 netdev->min_mtu = ETH_MIN_MTU; 811 netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - 812 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 813 814 /* make sure the EEPROM is good */ 815 err = txgbe_validate_eeprom_checksum(wx, NULL); 816 if (err != 0) { 817 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 818 wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); 819 err = -EIO; 820 goto err_free_mac_table; 821 } 822 823 eth_hw_addr_set(netdev, wx->mac.perm_addr); 824 wx_mac_set_default_filter(wx, wx->mac.perm_addr); 825 826 txgbe_init_service(wx); 827 828 err = wx_init_interrupt_scheme(wx); 829 if (err) 830 goto err_cancel_service; 831 832 /* Save off EEPROM version number and Option Rom version which 833 * together make a unique identify for the eeprom 834 */ 835 wx_read_ee_hostif(wx, 836 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, 837 &eeprom_verh); 838 wx_read_ee_hostif(wx, 839 wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, 840 &eeprom_verl); 841 etrack_id = (eeprom_verh << 16) | eeprom_verl; 842 843 wx_read_ee_hostif(wx, 844 wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, 845 &offset); 846 847 /* Make sure offset to SCSI block is valid */ 848 if (!(offset == 0x0) && !(offset == 0xffff)) { 849 wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); 850 wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); 851 852 /* Only display Option Rom if exist */ 853 if (eeprom_cfg_blkl && eeprom_cfg_blkh) { 854 major = eeprom_cfg_blkl >> 8; 855 build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); 856 patch = eeprom_cfg_blkh & 0x00ff; 857 858 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 859 "0x%08x, %d.%d.%d", etrack_id, major, build, 860 patch); 861 } else { 862 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 863 "0x%08x", etrack_id); 864 } 865 } else { 866 snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), 867 "0x%08x", etrack_id); 868 } 869 870 if (etrack_id < 0x20010) 871 dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); 872 873 err = txgbe_test_hostif(wx); 874 if (err != 0) { 875 dev_err(&pdev->dev, "Mismatched Firmware version\n"); 876 err = -EIO; 877 goto err_release_hw; 878 } 879 880 txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); 881 if (!txgbe) { 882 err = -ENOMEM; 883 goto err_release_hw; 884 } 885 886 txgbe->wx = wx; 887 wx->priv = txgbe; 888 889 txgbe_init_fdir(txgbe); 890 891 err = txgbe_init_phy(txgbe); 892 if (err) 893 goto err_release_hw; 894 895 err = register_netdev(netdev); 896 if (err) 897 goto err_remove_phy; 898 899 pci_set_drvdata(pdev, wx); 900 901 netif_tx_stop_all_queues(netdev); 902 903 /* calculate the expected PCIe bandwidth required for optimal 904 * performance. Note that some older parts will never have enough 905 * bandwidth due to being older generation PCIe parts. We clamp these 906 * parts to ensure that no warning is displayed, as this could confuse 907 * users otherwise. 908 */ 909 expected_gts = txgbe_enumerate_functions(wx) * 10; 910 911 /* don't check link if we failed to enumerate functions */ 912 if (expected_gts > 0) 913 txgbe_check_minimum_link(wx); 914 else 915 dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); 916 917 return 0; 918 919 err_remove_phy: 920 txgbe_remove_phy(txgbe); 921 err_release_hw: 922 wx_clear_interrupt_scheme(wx); 923 wx_control_hw(wx, false); 924 err_cancel_service: 925 timer_delete_sync(&wx->service_timer); 926 cancel_work_sync(&wx->service_task); 927 err_free_mac_table: 928 kfree(wx->rss_key); 929 kfree(wx->mac_table); 930 err_pci_release_regions: 931 pci_release_selected_regions(pdev, 932 pci_select_bars(pdev, IORESOURCE_MEM)); 933 err_pci_disable_dev: 934 pci_disable_device(pdev); 935 return err; 936 } 937 938 /** 939 * txgbe_remove - Device Removal Routine 940 * @pdev: PCI device information struct 941 * 942 * txgbe_remove is called by the PCI subsystem to alert the driver 943 * that it should release a PCI device. The could be caused by a 944 * Hot-Plug event, or because the driver is going to be removed from 945 * memory. 946 **/ 947 static void txgbe_remove(struct pci_dev *pdev) 948 { 949 struct wx *wx = pci_get_drvdata(pdev); 950 struct txgbe *txgbe = wx->priv; 951 struct net_device *netdev; 952 953 cancel_work_sync(&wx->service_task); 954 955 netdev = wx->netdev; 956 wx_disable_sriov(wx); 957 unregister_netdev(netdev); 958 959 txgbe_remove_phy(txgbe); 960 wx_free_isb_resources(wx); 961 962 pci_release_selected_regions(pdev, 963 pci_select_bars(pdev, IORESOURCE_MEM)); 964 965 kfree(wx->rss_key); 966 kfree(wx->mac_table); 967 wx_clear_interrupt_scheme(wx); 968 969 pci_disable_device(pdev); 970 } 971 972 static struct pci_driver txgbe_driver = { 973 .name = txgbe_driver_name, 974 .id_table = txgbe_pci_tbl, 975 .probe = txgbe_probe, 976 .remove = txgbe_remove, 977 .shutdown = txgbe_shutdown, 978 .sriov_configure = wx_pci_sriov_configure, 979 }; 980 981 module_pci_driver(txgbe_driver); 982 983 MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); 984 MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); 985 MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver"); 986 MODULE_LICENSE("GPL"); 987