1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/filter.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/delay.h> 14 #include <linux/notifier.h> 15 #include <linux/ip.h> 16 #include <linux/tcp.h> 17 #include <linux/in.h> 18 #include <linux/ethtool.h> 19 #include <linux/topology.h> 20 #include <linux/gfp.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include <net/gre.h> 24 #include <net/udp_tunnel.h> 25 #include <net/netdev_queues.h> 26 #include "efx.h" 27 #include "efx_common.h" 28 #include "efx_channels.h" 29 #include "ef100.h" 30 #include "rx_common.h" 31 #include "tx_common.h" 32 #include "nic.h" 33 #include "io.h" 34 #include "selftest.h" 35 #include "sriov.h" 36 #include "efx_devlink.h" 37 38 #include "mcdi_port_common.h" 39 #include "mcdi_pcol.h" 40 #include "workarounds.h" 41 42 /************************************************************************** 43 * 44 * Configurable values 45 * 46 *************************************************************************/ 47 48 module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); 49 MODULE_PARM_DESC(interrupt_mode, 50 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 51 52 module_param(rss_cpus, uint, 0444); 53 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 54 55 /* 56 * Use separate channels for TX and RX events 57 * 58 * Set this to 1 to use separate channels for TX and RX. It allows us 59 * to control interrupt affinity separately for TX and RX. 60 * 61 * This is only used in MSI-X interrupt mode 62 */ 63 bool efx_separate_tx_channels; 64 module_param(efx_separate_tx_channels, bool, 0444); 65 MODULE_PARM_DESC(efx_separate_tx_channels, 66 "Use separate channels for TX and RX"); 67 68 /* Initial interrupt moderation settings. They can be modified after 69 * module load with ethtool. 70 * 71 * The default for RX should strike a balance between increasing the 72 * round-trip latency and reducing overhead. 73 */ 74 static unsigned int rx_irq_mod_usec = 60; 75 76 /* Initial interrupt moderation settings. They can be modified after 77 * module load with ethtool. 78 * 79 * This default is chosen to ensure that a 10G link does not go idle 80 * while a TX queue is stopped after it has become full. A queue is 81 * restarted when it drops below half full. The time this takes (assuming 82 * worst case 3 descriptors per packet and 1024 descriptors) is 83 * 512 / 3 * 1.2 = 205 usec. 84 */ 85 static unsigned int tx_irq_mod_usec = 150; 86 87 static bool phy_flash_cfg; 88 module_param(phy_flash_cfg, bool, 0644); 89 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 90 91 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 92 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 93 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 94 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 95 module_param(debug, uint, 0); 96 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 97 98 /************************************************************************** 99 * 100 * Utility functions and prototypes 101 * 102 *************************************************************************/ 103 104 static void efx_remove_port(struct efx_nic *efx); 105 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); 106 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); 107 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 108 u32 flags); 109 110 /************************************************************************** 111 * 112 * Port handling 113 * 114 **************************************************************************/ 115 116 static void efx_fini_port(struct efx_nic *efx); 117 118 static int efx_probe_port(struct efx_nic *efx) 119 { 120 int rc; 121 122 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 123 124 if (phy_flash_cfg) 125 efx->phy_mode = PHY_MODE_SPECIAL; 126 127 /* Connect up MAC/PHY operations table */ 128 rc = efx->type->probe_port(efx); 129 if (rc) 130 return rc; 131 132 /* Initialise MAC address to permanent address */ 133 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); 134 135 return 0; 136 } 137 138 static int efx_init_port(struct efx_nic *efx) 139 { 140 int rc; 141 142 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 143 144 mutex_lock(&efx->mac_lock); 145 146 efx->port_initialized = true; 147 148 /* Ensure the PHY advertises the correct flow control settings */ 149 rc = efx_mcdi_port_reconfigure(efx); 150 if (rc && rc != -EPERM) 151 goto fail; 152 153 mutex_unlock(&efx->mac_lock); 154 return 0; 155 156 fail: 157 mutex_unlock(&efx->mac_lock); 158 return rc; 159 } 160 161 static void efx_fini_port(struct efx_nic *efx) 162 { 163 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 164 165 if (!efx->port_initialized) 166 return; 167 168 efx->port_initialized = false; 169 170 efx->link_state.up = false; 171 efx_link_status_changed(efx); 172 } 173 174 static void efx_remove_port(struct efx_nic *efx) 175 { 176 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 177 178 efx->type->remove_port(efx); 179 } 180 181 /************************************************************************** 182 * 183 * NIC handling 184 * 185 **************************************************************************/ 186 187 static LIST_HEAD(efx_primary_list); 188 static LIST_HEAD(efx_unassociated_list); 189 190 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 191 { 192 return left->type == right->type && 193 left->vpd_sn && right->vpd_sn && 194 !strcmp(left->vpd_sn, right->vpd_sn); 195 } 196 197 static void efx_associate(struct efx_nic *efx) 198 { 199 struct efx_nic *other, *next; 200 201 if (efx->primary == efx) { 202 /* Adding primary function; look for secondaries */ 203 204 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 205 list_add_tail(&efx->node, &efx_primary_list); 206 207 list_for_each_entry_safe(other, next, &efx_unassociated_list, 208 node) { 209 if (efx_same_controller(efx, other)) { 210 list_del(&other->node); 211 netif_dbg(other, probe, other->net_dev, 212 "moving to secondary list of %s %s\n", 213 pci_name(efx->pci_dev), 214 efx->net_dev->name); 215 list_add_tail(&other->node, 216 &efx->secondary_list); 217 other->primary = efx; 218 } 219 } 220 } else { 221 /* Adding secondary function; look for primary */ 222 223 list_for_each_entry(other, &efx_primary_list, node) { 224 if (efx_same_controller(efx, other)) { 225 netif_dbg(efx, probe, efx->net_dev, 226 "adding to secondary list of %s %s\n", 227 pci_name(other->pci_dev), 228 other->net_dev->name); 229 list_add_tail(&efx->node, 230 &other->secondary_list); 231 efx->primary = other; 232 return; 233 } 234 } 235 236 netif_dbg(efx, probe, efx->net_dev, 237 "adding to unassociated list\n"); 238 list_add_tail(&efx->node, &efx_unassociated_list); 239 } 240 } 241 242 static void efx_dissociate(struct efx_nic *efx) 243 { 244 struct efx_nic *other, *next; 245 246 list_del(&efx->node); 247 efx->primary = NULL; 248 249 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 250 list_del(&other->node); 251 netif_dbg(other, probe, other->net_dev, 252 "moving to unassociated list\n"); 253 list_add_tail(&other->node, &efx_unassociated_list); 254 other->primary = NULL; 255 } 256 } 257 258 static int efx_probe_nic(struct efx_nic *efx) 259 { 260 int rc; 261 262 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 263 264 /* Carry out hardware-type specific initialisation */ 265 rc = efx->type->probe(efx); 266 if (rc) 267 return rc; 268 269 do { 270 if (!efx->max_channels || !efx->max_tx_channels) { 271 netif_err(efx, drv, efx->net_dev, 272 "Insufficient resources to allocate" 273 " any channels\n"); 274 rc = -ENOSPC; 275 goto fail1; 276 } 277 278 /* Determine the number of channels and queues by trying 279 * to hook in MSI-X interrupts. 280 */ 281 rc = efx_probe_interrupts(efx); 282 if (rc) 283 goto fail1; 284 285 rc = efx_set_channels(efx); 286 if (rc) 287 goto fail1; 288 289 /* dimension_resources can fail with EAGAIN */ 290 rc = efx->type->dimension_resources(efx); 291 if (rc != 0 && rc != -EAGAIN) 292 goto fail2; 293 294 if (rc == -EAGAIN) 295 /* try again with new max_channels */ 296 efx_remove_interrupts(efx); 297 298 } while (rc == -EAGAIN); 299 300 if (efx->n_channels > 1) 301 netdev_rss_key_fill(efx->rss_context.rx_hash_key, 302 sizeof(efx->rss_context.rx_hash_key)); 303 efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); 304 305 /* Initialise the interrupt moderation settings */ 306 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 307 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 308 true); 309 310 return 0; 311 312 fail2: 313 efx_remove_interrupts(efx); 314 fail1: 315 efx->type->remove(efx); 316 return rc; 317 } 318 319 static void efx_remove_nic(struct efx_nic *efx) 320 { 321 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 322 323 efx_remove_interrupts(efx); 324 efx->type->remove(efx); 325 } 326 327 /************************************************************************** 328 * 329 * NIC startup/shutdown 330 * 331 *************************************************************************/ 332 333 static int efx_probe_all(struct efx_nic *efx) 334 { 335 int rc; 336 337 rc = efx_probe_nic(efx); 338 if (rc) { 339 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 340 goto fail1; 341 } 342 343 rc = efx_probe_port(efx); 344 if (rc) { 345 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 346 goto fail2; 347 } 348 349 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 350 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 351 rc = -EINVAL; 352 goto fail3; 353 } 354 355 #ifdef CONFIG_SFC_SRIOV 356 rc = efx->type->vswitching_probe(efx); 357 if (rc) /* not fatal; the PF will still work fine */ 358 netif_warn(efx, probe, efx->net_dev, 359 "failed to setup vswitching rc=%d;" 360 " VFs may not function\n", rc); 361 #endif 362 363 rc = efx_probe_filters(efx); 364 if (rc) { 365 netif_err(efx, probe, efx->net_dev, 366 "failed to create filter tables\n"); 367 goto fail4; 368 } 369 370 rc = efx_probe_channels(efx); 371 if (rc) 372 goto fail5; 373 374 efx->state = STATE_NET_DOWN; 375 376 return 0; 377 378 fail5: 379 efx_remove_filters(efx); 380 fail4: 381 #ifdef CONFIG_SFC_SRIOV 382 efx->type->vswitching_remove(efx); 383 #endif 384 fail3: 385 efx_remove_port(efx); 386 fail2: 387 efx_remove_nic(efx); 388 fail1: 389 return rc; 390 } 391 392 static void efx_remove_all(struct efx_nic *efx) 393 { 394 rtnl_lock(); 395 efx_xdp_setup_prog(efx, NULL); 396 rtnl_unlock(); 397 398 efx_remove_channels(efx); 399 efx_remove_filters(efx); 400 #ifdef CONFIG_SFC_SRIOV 401 efx->type->vswitching_remove(efx); 402 #endif 403 efx_remove_port(efx); 404 efx_remove_nic(efx); 405 } 406 407 /************************************************************************** 408 * 409 * Interrupt moderation 410 * 411 **************************************************************************/ 412 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) 413 { 414 if (usecs == 0) 415 return 0; 416 if (usecs * 1000 < efx->timer_quantum_ns) 417 return 1; /* never round down to 0 */ 418 return usecs * 1000 / efx->timer_quantum_ns; 419 } 420 421 /* Set interrupt moderation parameters */ 422 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 423 unsigned int rx_usecs, bool rx_adaptive, 424 bool rx_may_override_tx) 425 { 426 struct efx_channel *channel; 427 unsigned int timer_max_us; 428 429 EFX_ASSERT_RESET_SERIALISED(efx); 430 431 timer_max_us = efx->timer_max_ns / 1000; 432 433 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 434 return -EINVAL; 435 436 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 437 !rx_may_override_tx) { 438 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 439 "RX and TX IRQ moderation must be equal\n"); 440 return -EINVAL; 441 } 442 443 efx->irq_rx_adaptive = rx_adaptive; 444 efx->irq_rx_moderation_us = rx_usecs; 445 efx_for_each_channel(channel, efx) { 446 if (efx_channel_has_rx_queue(channel)) 447 channel->irq_moderation_us = rx_usecs; 448 else if (efx_channel_has_tx_queues(channel)) 449 channel->irq_moderation_us = tx_usecs; 450 else if (efx_channel_is_xdp_tx(channel)) 451 channel->irq_moderation_us = tx_usecs; 452 } 453 454 return 0; 455 } 456 457 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 458 unsigned int *rx_usecs, bool *rx_adaptive) 459 { 460 *rx_adaptive = efx->irq_rx_adaptive; 461 *rx_usecs = efx->irq_rx_moderation_us; 462 463 /* If channels are shared between RX and TX, so is IRQ 464 * moderation. Otherwise, IRQ moderation is the same for all 465 * TX channels and is not adaptive. 466 */ 467 if (efx->tx_channel_offset == 0) { 468 *tx_usecs = *rx_usecs; 469 } else { 470 struct efx_channel *tx_channel; 471 472 tx_channel = efx->channel[efx->tx_channel_offset]; 473 *tx_usecs = tx_channel->irq_moderation_us; 474 } 475 } 476 477 /************************************************************************** 478 * 479 * ioctls 480 * 481 *************************************************************************/ 482 483 /* Net device ioctl 484 * Context: process, rtnl_lock() held. 485 */ 486 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 487 { 488 struct efx_nic *efx = efx_netdev_priv(net_dev); 489 struct mii_ioctl_data *data = if_mii(ifr); 490 491 /* Convert phy_id from older PRTAD/DEVAD format */ 492 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 493 (data->phy_id & 0xfc00) == 0x0400) 494 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 495 496 return mdio_mii_ioctl(&efx->mdio, data, cmd); 497 } 498 499 /************************************************************************** 500 * 501 * Kernel net device interface 502 * 503 *************************************************************************/ 504 505 /* Context: process, rtnl_lock() held. */ 506 int efx_net_open(struct net_device *net_dev) 507 { 508 struct efx_nic *efx = efx_netdev_priv(net_dev); 509 int rc; 510 511 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 512 raw_smp_processor_id()); 513 514 rc = efx_check_disabled(efx); 515 if (rc) 516 return rc; 517 if (efx->phy_mode & PHY_MODE_SPECIAL) 518 return -EBUSY; 519 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 520 return -EIO; 521 522 /* Notify the kernel of the link state polled during driver load, 523 * before the monitor starts running */ 524 efx_link_status_changed(efx); 525 526 efx_start_all(efx); 527 if (efx->state == STATE_DISABLED || efx->reset_pending) 528 netif_device_detach(efx->net_dev); 529 else 530 efx->state = STATE_NET_UP; 531 532 return 0; 533 } 534 535 /* Context: process, rtnl_lock() held. 536 * Note that the kernel will ignore our return code; this method 537 * should really be a void. 538 */ 539 int efx_net_stop(struct net_device *net_dev) 540 { 541 struct efx_nic *efx = efx_netdev_priv(net_dev); 542 543 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 544 raw_smp_processor_id()); 545 546 /* Stop the device and flush all the channels */ 547 efx_stop_all(efx); 548 549 return 0; 550 } 551 552 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) 553 { 554 struct efx_nic *efx = efx_netdev_priv(net_dev); 555 556 if (efx->type->vlan_rx_add_vid) 557 return efx->type->vlan_rx_add_vid(efx, proto, vid); 558 else 559 return -EOPNOTSUPP; 560 } 561 562 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) 563 { 564 struct efx_nic *efx = efx_netdev_priv(net_dev); 565 566 if (efx->type->vlan_rx_kill_vid) 567 return efx->type->vlan_rx_kill_vid(efx, proto, vid); 568 else 569 return -EOPNOTSUPP; 570 } 571 572 static int efx_hwtstamp_set(struct net_device *net_dev, 573 struct kernel_hwtstamp_config *config, 574 struct netlink_ext_ack *extack) 575 { 576 struct efx_nic *efx = efx_netdev_priv(net_dev); 577 578 return efx_ptp_set_ts_config(efx, config, extack); 579 } 580 581 static int efx_hwtstamp_get(struct net_device *net_dev, 582 struct kernel_hwtstamp_config *config) 583 { 584 struct efx_nic *efx = efx_netdev_priv(net_dev); 585 586 return efx_ptp_get_ts_config(efx, config); 587 } 588 589 static const struct net_device_ops efx_netdev_ops = { 590 .ndo_open = efx_net_open, 591 .ndo_stop = efx_net_stop, 592 .ndo_get_stats64 = efx_net_stats, 593 .ndo_tx_timeout = efx_watchdog, 594 .ndo_start_xmit = efx_hard_start_xmit, 595 .ndo_validate_addr = eth_validate_addr, 596 .ndo_eth_ioctl = efx_ioctl, 597 .ndo_change_mtu = efx_change_mtu, 598 .ndo_set_mac_address = efx_set_mac_address, 599 .ndo_set_rx_mode = efx_set_rx_mode, 600 .ndo_set_features = efx_set_features, 601 .ndo_features_check = efx_features_check, 602 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, 603 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, 604 .ndo_hwtstamp_set = efx_hwtstamp_set, 605 .ndo_hwtstamp_get = efx_hwtstamp_get, 606 #ifdef CONFIG_SFC_SRIOV 607 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 608 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 609 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 610 .ndo_get_vf_config = efx_sriov_get_vf_config, 611 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 612 #endif 613 .ndo_get_phys_port_id = efx_get_phys_port_id, 614 .ndo_get_phys_port_name = efx_get_phys_port_name, 615 #ifdef CONFIG_RFS_ACCEL 616 .ndo_rx_flow_steer = efx_filter_rfs, 617 #endif 618 .ndo_xdp_xmit = efx_xdp_xmit, 619 .ndo_bpf = efx_xdp 620 }; 621 622 static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx, 623 struct netdev_queue_stats_rx *stats) 624 { 625 struct efx_nic *efx = efx_netdev_priv(net_dev); 626 struct efx_rx_queue *rx_queue; 627 struct efx_channel *channel; 628 629 channel = efx_get_channel(efx, idx); 630 rx_queue = efx_channel_get_rx_queue(channel); 631 /* Count only packets since last time datapath was started */ 632 stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets; 633 stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes; 634 stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) - 635 channel->old_n_rx_hw_drops; 636 stats->hw_drop_overruns = channel->n_rx_nodesc_trunc - 637 channel->old_n_rx_hw_drop_overruns; 638 } 639 640 static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx, 641 struct netdev_queue_stats_tx *stats) 642 { 643 struct efx_nic *efx = efx_netdev_priv(net_dev); 644 struct efx_tx_queue *tx_queue; 645 struct efx_channel *channel; 646 647 channel = efx_get_tx_channel(efx, idx); 648 stats->packets = 0; 649 stats->bytes = 0; 650 stats->hw_gso_packets = 0; 651 stats->hw_gso_wire_packets = 0; 652 efx_for_each_channel_tx_queue(tx_queue, channel) { 653 stats->packets += tx_queue->complete_packets - 654 tx_queue->old_complete_packets; 655 stats->bytes += tx_queue->complete_bytes - 656 tx_queue->old_complete_bytes; 657 /* Note that, unlike stats->packets and stats->bytes, 658 * these count TXes enqueued, rather than completed, 659 * which may not be what users expect. 660 */ 661 stats->hw_gso_packets += tx_queue->tso_bursts - 662 tx_queue->old_tso_bursts; 663 stats->hw_gso_wire_packets += tx_queue->tso_packets - 664 tx_queue->old_tso_packets; 665 } 666 } 667 668 static void efx_get_base_stats(struct net_device *net_dev, 669 struct netdev_queue_stats_rx *rx, 670 struct netdev_queue_stats_tx *tx) 671 { 672 struct efx_nic *efx = efx_netdev_priv(net_dev); 673 struct efx_tx_queue *tx_queue; 674 struct efx_rx_queue *rx_queue; 675 struct efx_channel *channel; 676 677 rx->packets = 0; 678 rx->bytes = 0; 679 rx->hw_drops = 0; 680 rx->hw_drop_overruns = 0; 681 tx->packets = 0; 682 tx->bytes = 0; 683 tx->hw_gso_packets = 0; 684 tx->hw_gso_wire_packets = 0; 685 686 /* Count all packets on non-core queues, and packets before last 687 * datapath start on core queues. 688 */ 689 efx_for_each_channel(channel, efx) { 690 rx_queue = efx_channel_get_rx_queue(channel); 691 if (channel->channel >= net_dev->real_num_rx_queues) { 692 rx->packets += rx_queue->rx_packets; 693 rx->bytes += rx_queue->rx_bytes; 694 rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel); 695 rx->hw_drop_overruns += channel->n_rx_nodesc_trunc; 696 } else { 697 rx->packets += rx_queue->old_rx_packets; 698 rx->bytes += rx_queue->old_rx_bytes; 699 rx->hw_drops += channel->old_n_rx_hw_drops; 700 rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns; 701 } 702 efx_for_each_channel_tx_queue(tx_queue, channel) { 703 if (channel->channel < efx->tx_channel_offset || 704 channel->channel >= efx->tx_channel_offset + 705 net_dev->real_num_tx_queues) { 706 tx->packets += tx_queue->complete_packets; 707 tx->bytes += tx_queue->complete_bytes; 708 tx->hw_gso_packets += tx_queue->tso_bursts; 709 tx->hw_gso_wire_packets += tx_queue->tso_packets; 710 } else { 711 tx->packets += tx_queue->old_complete_packets; 712 tx->bytes += tx_queue->old_complete_bytes; 713 tx->hw_gso_packets += tx_queue->old_tso_bursts; 714 tx->hw_gso_wire_packets += tx_queue->old_tso_packets; 715 } 716 /* Include XDP TX in device-wide stats */ 717 tx->packets += tx_queue->complete_xdp_packets; 718 tx->bytes += tx_queue->complete_xdp_bytes; 719 } 720 } 721 } 722 723 static const struct netdev_stat_ops efx_stat_ops = { 724 .get_queue_stats_rx = efx_get_queue_stats_rx, 725 .get_queue_stats_tx = efx_get_queue_stats_tx, 726 .get_base_stats = efx_get_base_stats, 727 }; 728 729 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) 730 { 731 struct bpf_prog *old_prog; 732 733 if (efx->xdp_rxq_info_failed) { 734 netif_err(efx, drv, efx->net_dev, 735 "Unable to bind XDP program due to previous failure of rxq_info\n"); 736 return -EINVAL; 737 } 738 739 if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { 740 netif_err(efx, drv, efx->net_dev, 741 "Unable to configure XDP with MTU of %d (max: %d)\n", 742 efx->net_dev->mtu, efx_xdp_max_mtu(efx)); 743 return -EINVAL; 744 } 745 746 old_prog = rtnl_dereference(efx->xdp_prog); 747 rcu_assign_pointer(efx->xdp_prog, prog); 748 /* Release the reference that was originally passed by the caller. */ 749 if (old_prog) 750 bpf_prog_put(old_prog); 751 752 return 0; 753 } 754 755 /* Context: process, rtnl_lock() held. */ 756 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) 757 { 758 struct efx_nic *efx = efx_netdev_priv(dev); 759 760 switch (xdp->command) { 761 case XDP_SETUP_PROG: 762 return efx_xdp_setup_prog(efx, xdp->prog); 763 default: 764 return -EINVAL; 765 } 766 } 767 768 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 769 u32 flags) 770 { 771 struct efx_nic *efx = efx_netdev_priv(dev); 772 773 if (!netif_running(dev)) 774 return -EINVAL; 775 776 return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); 777 } 778 779 static void efx_update_name(struct efx_nic *efx) 780 { 781 strcpy(efx->name, efx->net_dev->name); 782 efx_mtd_rename(efx); 783 efx_set_channel_names(efx); 784 } 785 786 static int efx_netdev_event(struct notifier_block *this, 787 unsigned long event, void *ptr) 788 { 789 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 790 791 if ((net_dev->netdev_ops == &efx_netdev_ops) && 792 event == NETDEV_CHANGENAME) 793 efx_update_name(efx_netdev_priv(net_dev)); 794 795 return NOTIFY_DONE; 796 } 797 798 static struct notifier_block efx_netdev_notifier = { 799 .notifier_call = efx_netdev_event, 800 }; 801 802 static ssize_t phy_type_show(struct device *dev, 803 struct device_attribute *attr, char *buf) 804 { 805 struct efx_nic *efx = dev_get_drvdata(dev); 806 return sprintf(buf, "%d\n", efx->phy_type); 807 } 808 static DEVICE_ATTR_RO(phy_type); 809 810 static int efx_register_netdev(struct efx_nic *efx) 811 { 812 struct net_device *net_dev = efx->net_dev; 813 struct efx_channel *channel; 814 int rc; 815 816 net_dev->watchdog_timeo = 5 * HZ; 817 net_dev->irq = efx->pci_dev->irq; 818 net_dev->netdev_ops = &efx_netdev_ops; 819 net_dev->stat_ops = &efx_stat_ops; 820 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 821 net_dev->priv_flags |= IFF_UNICAST_FLT; 822 net_dev->ethtool_ops = &efx_ethtool_ops; 823 netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); 824 net_dev->min_mtu = EFX_MIN_MTU; 825 net_dev->max_mtu = EFX_MAX_MTU; 826 827 rtnl_lock(); 828 829 /* Enable resets to be scheduled and check whether any were 830 * already requested. If so, the NIC is probably hosed so we 831 * abort. 832 */ 833 if (efx->reset_pending) { 834 pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); 835 rc = -EIO; 836 goto fail_locked; 837 } 838 839 rc = dev_alloc_name(net_dev, net_dev->name); 840 if (rc < 0) 841 goto fail_locked; 842 efx_update_name(efx); 843 844 /* Always start with carrier off; PHY events will detect the link */ 845 netif_carrier_off(net_dev); 846 847 rc = register_netdevice(net_dev); 848 if (rc) 849 goto fail_locked; 850 851 efx_for_each_channel(channel, efx) { 852 struct efx_tx_queue *tx_queue; 853 efx_for_each_channel_tx_queue(tx_queue, channel) 854 efx_init_tx_queue_core_txq(tx_queue); 855 } 856 857 efx_associate(efx); 858 859 efx->state = STATE_NET_DOWN; 860 861 rtnl_unlock(); 862 863 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 864 if (rc) { 865 netif_err(efx, drv, efx->net_dev, 866 "failed to init net dev attributes\n"); 867 goto fail_registered; 868 } 869 870 efx_init_mcdi_logging(efx); 871 872 return 0; 873 874 fail_registered: 875 rtnl_lock(); 876 efx_dissociate(efx); 877 unregister_netdevice(net_dev); 878 fail_locked: 879 efx->state = STATE_UNINIT; 880 rtnl_unlock(); 881 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 882 return rc; 883 } 884 885 static void efx_unregister_netdev(struct efx_nic *efx) 886 { 887 if (!efx->net_dev) 888 return; 889 890 if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx)) 891 return; 892 893 if (efx_dev_registered(efx)) { 894 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 895 efx_fini_mcdi_logging(efx); 896 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 897 unregister_netdev(efx->net_dev); 898 } 899 } 900 901 /************************************************************************** 902 * 903 * List of NICs we support 904 * 905 **************************************************************************/ 906 907 /* PCI device ID table */ 908 static const struct pci_device_id efx_pci_table[] = { 909 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 910 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 911 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 912 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 913 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 914 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 915 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ 916 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 917 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ 918 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 919 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ 920 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 921 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ 922 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 923 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ 924 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 925 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ 926 .driver_data = (unsigned long)&efx_x4_nic_type}, 927 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ 928 .driver_data = (unsigned long)&efx_x4_nic_type}, 929 {0} /* end of list */ 930 }; 931 932 /************************************************************************** 933 * 934 * Data housekeeping 935 * 936 **************************************************************************/ 937 938 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 939 { 940 u64 n_rx_nodesc_trunc = 0; 941 struct efx_channel *channel; 942 943 efx_for_each_channel(channel, efx) 944 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 945 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 946 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 947 } 948 949 /************************************************************************** 950 * 951 * PCI interface 952 * 953 **************************************************************************/ 954 955 /* Main body of final NIC shutdown code 956 * This is called only at module unload (or hotplug removal). 957 */ 958 static void efx_pci_remove_main(struct efx_nic *efx) 959 { 960 /* Flush reset_work. It can no longer be scheduled since we 961 * are not READY. 962 */ 963 WARN_ON(efx_net_active(efx->state)); 964 efx_flush_reset_workqueue(efx); 965 966 efx_disable_interrupts(efx); 967 efx_clear_interrupt_affinity(efx); 968 efx_nic_fini_interrupt(efx); 969 efx_fini_port(efx); 970 efx->type->fini(efx); 971 efx_fini_napi(efx); 972 efx_remove_all(efx); 973 } 974 975 /* Final NIC shutdown 976 * This is called only at module unload (or hotplug removal). A PF can call 977 * this on its VFs to ensure they are unbound first. 978 */ 979 static void efx_pci_remove(struct pci_dev *pci_dev) 980 { 981 struct efx_probe_data *probe_data; 982 struct efx_nic *efx; 983 984 efx = pci_get_drvdata(pci_dev); 985 if (!efx) 986 return; 987 988 /* Mark the NIC as fini, then stop the interface */ 989 rtnl_lock(); 990 efx_dissociate(efx); 991 dev_close(efx->net_dev); 992 efx_disable_interrupts(efx); 993 efx->state = STATE_UNINIT; 994 rtnl_unlock(); 995 996 if (efx->type->sriov_fini) 997 efx->type->sriov_fini(efx); 998 999 efx_fini_devlink_lock(efx); 1000 efx_unregister_netdev(efx); 1001 1002 efx_mtd_remove(efx); 1003 1004 efx_pci_remove_main(efx); 1005 1006 efx_fini_io(efx); 1007 pci_dbg(efx->pci_dev, "shutdown successful\n"); 1008 1009 efx_fini_devlink_and_unlock(efx); 1010 efx_fini_struct(efx); 1011 free_netdev(efx->net_dev); 1012 probe_data = container_of(efx, struct efx_probe_data, efx); 1013 kfree(probe_data); 1014 }; 1015 1016 /* NIC VPD information 1017 * Called during probe to display the part number of the 1018 * installed NIC. 1019 */ 1020 static void efx_probe_vpd_strings(struct efx_nic *efx) 1021 { 1022 struct pci_dev *dev = efx->pci_dev; 1023 unsigned int vpd_size, kw_len; 1024 u8 *vpd_data; 1025 int start; 1026 1027 vpd_data = pci_vpd_alloc(dev, &vpd_size); 1028 if (IS_ERR(vpd_data)) { 1029 pci_warn(dev, "Unable to read VPD\n"); 1030 return; 1031 } 1032 1033 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1034 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 1035 if (start < 0) 1036 pci_err(dev, "Part number not found or incomplete\n"); 1037 else 1038 pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); 1039 1040 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1041 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); 1042 if (start < 0) 1043 pci_err(dev, "Serial number not found or incomplete\n"); 1044 else 1045 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); 1046 1047 kfree(vpd_data); 1048 } 1049 1050 1051 /* Main body of NIC initialisation 1052 * This is called at module load (or hotplug insertion, theoretically). 1053 */ 1054 static int efx_pci_probe_main(struct efx_nic *efx) 1055 { 1056 int rc; 1057 1058 /* Do start-of-day initialisation */ 1059 rc = efx_probe_all(efx); 1060 if (rc) 1061 goto fail1; 1062 1063 efx_init_napi(efx); 1064 1065 down_write(&efx->filter_sem); 1066 rc = efx->type->init(efx); 1067 up_write(&efx->filter_sem); 1068 if (rc) { 1069 pci_err(efx->pci_dev, "failed to initialise NIC\n"); 1070 goto fail3; 1071 } 1072 1073 rc = efx_init_port(efx); 1074 if (rc) { 1075 netif_err(efx, probe, efx->net_dev, 1076 "failed to initialise port\n"); 1077 goto fail4; 1078 } 1079 1080 rc = efx_nic_init_interrupt(efx); 1081 if (rc) 1082 goto fail5; 1083 1084 efx_set_interrupt_affinity(efx); 1085 rc = efx_enable_interrupts(efx); 1086 if (rc) 1087 goto fail6; 1088 1089 return 0; 1090 1091 fail6: 1092 efx_clear_interrupt_affinity(efx); 1093 efx_nic_fini_interrupt(efx); 1094 fail5: 1095 efx_fini_port(efx); 1096 fail4: 1097 efx->type->fini(efx); 1098 fail3: 1099 efx_fini_napi(efx); 1100 efx_remove_all(efx); 1101 fail1: 1102 return rc; 1103 } 1104 1105 static int efx_pci_probe_post_io(struct efx_nic *efx) 1106 { 1107 struct net_device *net_dev = efx->net_dev; 1108 int rc = efx_pci_probe_main(efx); 1109 1110 if (rc) 1111 return rc; 1112 1113 if (efx->type->sriov_init) { 1114 rc = efx->type->sriov_init(efx); 1115 if (rc) 1116 pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", 1117 rc); 1118 } 1119 1120 /* Determine netdevice features */ 1121 net_dev->features |= efx->type->offload_features; 1122 1123 /* Add TSO features */ 1124 if (efx->type->tso_versions && efx->type->tso_versions(efx)) 1125 net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1126 1127 /* Mask for features that also apply to VLAN devices */ 1128 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 1129 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 1130 NETIF_F_RXCSUM); 1131 1132 /* Determine user configurable features */ 1133 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; 1134 1135 /* Disable receiving frames with bad FCS, by default. */ 1136 net_dev->features &= ~NETIF_F_RXALL; 1137 1138 /* Disable VLAN filtering by default. It may be enforced if 1139 * the feature is fixed (i.e. VLAN filters are required to 1140 * receive VLAN tagged packets due to vPort restrictions). 1141 */ 1142 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1143 net_dev->features |= efx->fixed_features; 1144 1145 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | 1146 NETDEV_XDP_ACT_REDIRECT | 1147 NETDEV_XDP_ACT_NDO_XMIT; 1148 1149 /* devlink creation, registration and lock */ 1150 rc = efx_probe_devlink_and_lock(efx); 1151 if (rc) 1152 pci_err(efx->pci_dev, "devlink registration failed"); 1153 1154 rc = efx_register_netdev(efx); 1155 efx_probe_devlink_unlock(efx); 1156 if (!rc) 1157 return 0; 1158 1159 efx_pci_remove_main(efx); 1160 return rc; 1161 } 1162 1163 /* NIC initialisation 1164 * 1165 * This is called at module load (or hotplug insertion, 1166 * theoretically). It sets up PCI mappings, resets the NIC, 1167 * sets up and registers the network devices with the kernel and hooks 1168 * the interrupt service routine. It does not prepare the device for 1169 * transmission; this is left to the first time one of the network 1170 * interfaces is brought up (i.e. efx_net_open). 1171 */ 1172 static int efx_pci_probe(struct pci_dev *pci_dev, 1173 const struct pci_device_id *entry) 1174 { 1175 struct efx_probe_data *probe_data, **probe_ptr; 1176 struct net_device *net_dev; 1177 struct efx_nic *efx; 1178 int rc; 1179 1180 /* Allocate probe data and struct efx_nic */ 1181 probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); 1182 if (!probe_data) 1183 return -ENOMEM; 1184 probe_data->pci_dev = pci_dev; 1185 efx = &probe_data->efx; 1186 1187 /* Allocate and initialise a struct net_device */ 1188 net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); 1189 if (!net_dev) { 1190 rc = -ENOMEM; 1191 goto fail0; 1192 } 1193 probe_ptr = netdev_priv(net_dev); 1194 *probe_ptr = probe_data; 1195 efx->net_dev = net_dev; 1196 efx->type = (const struct efx_nic_type *) entry->driver_data; 1197 efx->fixed_features |= NETIF_F_HIGHDMA; 1198 1199 pci_set_drvdata(pci_dev, efx); 1200 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 1201 rc = efx_init_struct(efx, pci_dev); 1202 if (rc) 1203 goto fail1; 1204 efx->mdio.dev = net_dev; 1205 1206 pci_info(pci_dev, "Solarflare NIC detected\n"); 1207 1208 if (!efx->type->is_vf) 1209 efx_probe_vpd_strings(efx); 1210 1211 /* Set up basic I/O (BAR mappings etc) */ 1212 rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, 1213 efx->type->mem_map_size(efx)); 1214 if (rc) 1215 goto fail2; 1216 1217 rc = efx_pci_probe_post_io(efx); 1218 if (rc) { 1219 /* On failure, retry once immediately. 1220 * If we aborted probe due to a scheduled reset, dismiss it. 1221 */ 1222 efx->reset_pending = 0; 1223 rc = efx_pci_probe_post_io(efx); 1224 if (rc) { 1225 /* On another failure, retry once more 1226 * after a 50-305ms delay. 1227 */ 1228 unsigned char r; 1229 1230 get_random_bytes(&r, 1); 1231 msleep((unsigned int)r + 50); 1232 efx->reset_pending = 0; 1233 rc = efx_pci_probe_post_io(efx); 1234 } 1235 } 1236 if (rc) 1237 goto fail3; 1238 1239 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 1240 1241 /* Try to create MTDs, but allow this to fail */ 1242 rtnl_lock(); 1243 rc = efx_mtd_probe(efx); 1244 rtnl_unlock(); 1245 if (rc && rc != -EPERM) 1246 netif_warn(efx, probe, efx->net_dev, 1247 "failed to create MTDs (%d)\n", rc); 1248 1249 if (efx->type->udp_tnl_push_ports) 1250 efx->type->udp_tnl_push_ports(efx); 1251 1252 return 0; 1253 1254 fail3: 1255 efx_fini_io(efx); 1256 fail2: 1257 efx_fini_struct(efx); 1258 fail1: 1259 WARN_ON(rc > 0); 1260 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 1261 free_netdev(net_dev); 1262 fail0: 1263 kfree(probe_data); 1264 return rc; 1265 } 1266 1267 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 1268 * enabled on success 1269 */ 1270 #ifdef CONFIG_SFC_SRIOV 1271 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 1272 { 1273 int rc; 1274 struct efx_nic *efx = pci_get_drvdata(dev); 1275 1276 if (efx->type->sriov_configure) { 1277 rc = efx->type->sriov_configure(efx, num_vfs); 1278 if (rc) 1279 return rc; 1280 else 1281 return num_vfs; 1282 } else 1283 return -EOPNOTSUPP; 1284 } 1285 #endif 1286 1287 static int efx_pm_freeze(struct device *dev) 1288 { 1289 struct efx_nic *efx = dev_get_drvdata(dev); 1290 1291 rtnl_lock(); 1292 1293 if (efx_net_active(efx->state)) { 1294 efx_device_detach_sync(efx); 1295 1296 efx_stop_all(efx); 1297 efx_disable_interrupts(efx); 1298 1299 efx->state = efx_freeze(efx->state); 1300 } 1301 1302 rtnl_unlock(); 1303 1304 return 0; 1305 } 1306 1307 static void efx_pci_shutdown(struct pci_dev *pci_dev) 1308 { 1309 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1310 1311 if (!efx) 1312 return; 1313 1314 efx_pm_freeze(&pci_dev->dev); 1315 pci_disable_device(pci_dev); 1316 } 1317 1318 static int efx_pm_thaw(struct device *dev) 1319 { 1320 int rc; 1321 struct efx_nic *efx = dev_get_drvdata(dev); 1322 1323 rtnl_lock(); 1324 1325 if (efx_frozen(efx->state)) { 1326 rc = efx_enable_interrupts(efx); 1327 if (rc) 1328 goto fail; 1329 1330 mutex_lock(&efx->mac_lock); 1331 efx_mcdi_port_reconfigure(efx); 1332 mutex_unlock(&efx->mac_lock); 1333 1334 efx_start_all(efx); 1335 1336 efx_device_attach_if_not_resetting(efx); 1337 1338 efx->state = efx_thaw(efx->state); 1339 1340 efx->type->resume_wol(efx); 1341 } 1342 1343 rtnl_unlock(); 1344 1345 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 1346 efx_queue_reset_work(efx); 1347 1348 return 0; 1349 1350 fail: 1351 rtnl_unlock(); 1352 1353 return rc; 1354 } 1355 1356 static int efx_pm_poweroff(struct device *dev) 1357 { 1358 struct pci_dev *pci_dev = to_pci_dev(dev); 1359 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1360 1361 efx->type->fini(efx); 1362 1363 efx->reset_pending = 0; 1364 1365 pci_save_state(pci_dev); 1366 return pci_set_power_state(pci_dev, PCI_D3hot); 1367 } 1368 1369 /* Used for both resume and restore */ 1370 static int efx_pm_resume(struct device *dev) 1371 { 1372 struct pci_dev *pci_dev = to_pci_dev(dev); 1373 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1374 int rc; 1375 1376 rc = pci_set_power_state(pci_dev, PCI_D0); 1377 if (rc) 1378 return rc; 1379 pci_restore_state(pci_dev); 1380 rc = pci_enable_device(pci_dev); 1381 if (rc) 1382 return rc; 1383 pci_set_master(efx->pci_dev); 1384 rc = efx->type->reset(efx, RESET_TYPE_ALL); 1385 if (rc) 1386 return rc; 1387 down_write(&efx->filter_sem); 1388 rc = efx->type->init(efx); 1389 up_write(&efx->filter_sem); 1390 if (rc) 1391 return rc; 1392 rc = efx_pm_thaw(dev); 1393 return rc; 1394 } 1395 1396 static int efx_pm_suspend(struct device *dev) 1397 { 1398 int rc; 1399 1400 efx_pm_freeze(dev); 1401 rc = efx_pm_poweroff(dev); 1402 if (rc) 1403 efx_pm_resume(dev); 1404 return rc; 1405 } 1406 1407 static const struct dev_pm_ops efx_pm_ops = { 1408 .suspend = efx_pm_suspend, 1409 .resume = efx_pm_resume, 1410 .freeze = efx_pm_freeze, 1411 .thaw = efx_pm_thaw, 1412 .poweroff = efx_pm_poweroff, 1413 .restore = efx_pm_resume, 1414 }; 1415 1416 static struct pci_driver efx_pci_driver = { 1417 .name = KBUILD_MODNAME, 1418 .id_table = efx_pci_table, 1419 .probe = efx_pci_probe, 1420 .remove = efx_pci_remove, 1421 .driver.pm = &efx_pm_ops, 1422 .shutdown = efx_pci_shutdown, 1423 .err_handler = &efx_err_handlers, 1424 #ifdef CONFIG_SFC_SRIOV 1425 .sriov_configure = efx_pci_sriov_configure, 1426 #endif 1427 }; 1428 1429 /************************************************************************** 1430 * 1431 * Kernel module interface 1432 * 1433 *************************************************************************/ 1434 1435 static int __init efx_init_module(void) 1436 { 1437 int rc; 1438 1439 printk(KERN_INFO "Solarflare NET driver\n"); 1440 1441 rc = register_netdevice_notifier(&efx_netdev_notifier); 1442 if (rc) 1443 goto err_notifier; 1444 1445 rc = efx_create_reset_workqueue(); 1446 if (rc) 1447 goto err_reset; 1448 1449 rc = pci_register_driver(&efx_pci_driver); 1450 if (rc < 0) 1451 goto err_pci; 1452 1453 rc = pci_register_driver(&ef100_pci_driver); 1454 if (rc < 0) 1455 goto err_pci_ef100; 1456 1457 return 0; 1458 1459 err_pci_ef100: 1460 pci_unregister_driver(&efx_pci_driver); 1461 err_pci: 1462 efx_destroy_reset_workqueue(); 1463 err_reset: 1464 unregister_netdevice_notifier(&efx_netdev_notifier); 1465 err_notifier: 1466 return rc; 1467 } 1468 1469 static void __exit efx_exit_module(void) 1470 { 1471 printk(KERN_INFO "Solarflare NET driver unloading\n"); 1472 1473 pci_unregister_driver(&ef100_pci_driver); 1474 pci_unregister_driver(&efx_pci_driver); 1475 efx_destroy_reset_workqueue(); 1476 unregister_netdevice_notifier(&efx_netdev_notifier); 1477 1478 } 1479 1480 module_init(efx_init_module); 1481 module_exit(efx_exit_module); 1482 1483 MODULE_AUTHOR("Solarflare Communications and " 1484 "Michael Brown <mbrown@fensystems.co.uk>"); 1485 MODULE_DESCRIPTION("Solarflare network driver"); 1486 MODULE_LICENSE("GPL"); 1487 MODULE_DEVICE_TABLE(pci, efx_pci_table); 1488