1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/filter.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/delay.h> 14 #include <linux/notifier.h> 15 #include <linux/ip.h> 16 #include <linux/tcp.h> 17 #include <linux/in.h> 18 #include <linux/ethtool.h> 19 #include <linux/topology.h> 20 #include <linux/gfp.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include <net/gre.h> 24 #include <net/udp_tunnel.h> 25 #include <net/netdev_queues.h> 26 #include "efx.h" 27 #include "efx_common.h" 28 #include "efx_channels.h" 29 #include "ef100.h" 30 #include "rx_common.h" 31 #include "tx_common.h" 32 #include "nic.h" 33 #include "io.h" 34 #include "selftest.h" 35 #include "sriov.h" 36 #include "efx_devlink.h" 37 38 #include "mcdi_port_common.h" 39 #include "mcdi_pcol.h" 40 #include "workarounds.h" 41 42 /************************************************************************** 43 * 44 * Configurable values 45 * 46 *************************************************************************/ 47 48 module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); 49 MODULE_PARM_DESC(interrupt_mode, 50 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 51 52 module_param(rss_cpus, uint, 0444); 53 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 54 55 /* 56 * Use separate channels for TX and RX events 57 * 58 * Set this to 1 to use separate channels for TX and RX. It allows us 59 * to control interrupt affinity separately for TX and RX. 60 * 61 * This is only used in MSI-X interrupt mode 62 */ 63 bool efx_separate_tx_channels; 64 module_param(efx_separate_tx_channels, bool, 0444); 65 MODULE_PARM_DESC(efx_separate_tx_channels, 66 "Use separate channels for TX and RX"); 67 68 /* Initial interrupt moderation settings. They can be modified after 69 * module load with ethtool. 70 * 71 * The default for RX should strike a balance between increasing the 72 * round-trip latency and reducing overhead. 73 */ 74 static unsigned int rx_irq_mod_usec = 60; 75 76 /* Initial interrupt moderation settings. They can be modified after 77 * module load with ethtool. 78 * 79 * This default is chosen to ensure that a 10G link does not go idle 80 * while a TX queue is stopped after it has become full. A queue is 81 * restarted when it drops below half full. The time this takes (assuming 82 * worst case 3 descriptors per packet and 1024 descriptors) is 83 * 512 / 3 * 1.2 = 205 usec. 84 */ 85 static unsigned int tx_irq_mod_usec = 150; 86 87 static bool phy_flash_cfg; 88 module_param(phy_flash_cfg, bool, 0644); 89 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 90 91 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 92 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 93 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 94 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 95 module_param(debug, uint, 0); 96 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 97 98 /************************************************************************** 99 * 100 * Utility functions and prototypes 101 * 102 *************************************************************************/ 103 104 static void efx_remove_port(struct efx_nic *efx); 105 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); 106 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); 107 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 108 u32 flags); 109 110 /************************************************************************** 111 * 112 * Port handling 113 * 114 **************************************************************************/ 115 116 static void efx_fini_port(struct efx_nic *efx); 117 118 static int efx_probe_port(struct efx_nic *efx) 119 { 120 int rc; 121 122 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 123 124 if (phy_flash_cfg) 125 efx->phy_mode = PHY_MODE_SPECIAL; 126 127 /* Connect up MAC/PHY operations table */ 128 rc = efx->type->probe_port(efx); 129 if (rc) 130 return rc; 131 132 /* Initialise MAC address to permanent address */ 133 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); 134 135 return 0; 136 } 137 138 static int efx_init_port(struct efx_nic *efx) 139 { 140 int rc; 141 142 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 143 144 mutex_lock(&efx->mac_lock); 145 146 efx->port_initialized = true; 147 148 /* Ensure the PHY advertises the correct flow control settings */ 149 rc = efx_mcdi_port_reconfigure(efx); 150 if (rc && rc != -EPERM) 151 goto fail; 152 153 mutex_unlock(&efx->mac_lock); 154 return 0; 155 156 fail: 157 mutex_unlock(&efx->mac_lock); 158 return rc; 159 } 160 161 static void efx_fini_port(struct efx_nic *efx) 162 { 163 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 164 165 if (!efx->port_initialized) 166 return; 167 168 efx->port_initialized = false; 169 170 efx->link_state.up = false; 171 efx_link_status_changed(efx); 172 } 173 174 static void efx_remove_port(struct efx_nic *efx) 175 { 176 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 177 178 efx->type->remove_port(efx); 179 } 180 181 /************************************************************************** 182 * 183 * NIC handling 184 * 185 **************************************************************************/ 186 187 static LIST_HEAD(efx_primary_list); 188 static LIST_HEAD(efx_unassociated_list); 189 190 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 191 { 192 return left->type == right->type && 193 left->vpd_sn && right->vpd_sn && 194 !strcmp(left->vpd_sn, right->vpd_sn); 195 } 196 197 static void efx_associate(struct efx_nic *efx) 198 { 199 struct efx_nic *other, *next; 200 201 if (efx->primary == efx) { 202 /* Adding primary function; look for secondaries */ 203 204 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 205 list_add_tail(&efx->node, &efx_primary_list); 206 207 list_for_each_entry_safe(other, next, &efx_unassociated_list, 208 node) { 209 if (efx_same_controller(efx, other)) { 210 list_del(&other->node); 211 netif_dbg(other, probe, other->net_dev, 212 "moving to secondary list of %s %s\n", 213 pci_name(efx->pci_dev), 214 efx->net_dev->name); 215 list_add_tail(&other->node, 216 &efx->secondary_list); 217 other->primary = efx; 218 } 219 } 220 } else { 221 /* Adding secondary function; look for primary */ 222 223 list_for_each_entry(other, &efx_primary_list, node) { 224 if (efx_same_controller(efx, other)) { 225 netif_dbg(efx, probe, efx->net_dev, 226 "adding to secondary list of %s %s\n", 227 pci_name(other->pci_dev), 228 other->net_dev->name); 229 list_add_tail(&efx->node, 230 &other->secondary_list); 231 efx->primary = other; 232 return; 233 } 234 } 235 236 netif_dbg(efx, probe, efx->net_dev, 237 "adding to unassociated list\n"); 238 list_add_tail(&efx->node, &efx_unassociated_list); 239 } 240 } 241 242 static void efx_dissociate(struct efx_nic *efx) 243 { 244 struct efx_nic *other, *next; 245 246 list_del(&efx->node); 247 efx->primary = NULL; 248 249 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 250 list_del(&other->node); 251 netif_dbg(other, probe, other->net_dev, 252 "moving to unassociated list\n"); 253 list_add_tail(&other->node, &efx_unassociated_list); 254 other->primary = NULL; 255 } 256 } 257 258 static int efx_probe_nic(struct efx_nic *efx) 259 { 260 int rc; 261 262 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 263 264 /* Carry out hardware-type specific initialisation */ 265 rc = efx->type->probe(efx); 266 if (rc) 267 return rc; 268 269 do { 270 if (!efx->max_channels || !efx->max_tx_channels) { 271 netif_err(efx, drv, efx->net_dev, 272 "Insufficient resources to allocate" 273 " any channels\n"); 274 rc = -ENOSPC; 275 goto fail1; 276 } 277 278 /* Determine the number of channels and queues by trying 279 * to hook in MSI-X interrupts. 280 */ 281 rc = efx_probe_interrupts(efx); 282 if (rc) 283 goto fail1; 284 285 rc = efx_set_channels(efx); 286 if (rc) 287 goto fail1; 288 289 /* dimension_resources can fail with EAGAIN */ 290 rc = efx->type->dimension_resources(efx); 291 if (rc != 0 && rc != -EAGAIN) 292 goto fail2; 293 294 if (rc == -EAGAIN) 295 /* try again with new max_channels */ 296 efx_remove_interrupts(efx); 297 298 } while (rc == -EAGAIN); 299 300 if (efx->n_channels > 1) 301 netdev_rss_key_fill(efx->rss_context.rx_hash_key, 302 sizeof(efx->rss_context.rx_hash_key)); 303 efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); 304 305 /* Initialise the interrupt moderation settings */ 306 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 307 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 308 true); 309 310 return 0; 311 312 fail2: 313 efx_remove_interrupts(efx); 314 fail1: 315 efx->type->remove(efx); 316 return rc; 317 } 318 319 static void efx_remove_nic(struct efx_nic *efx) 320 { 321 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 322 323 efx_remove_interrupts(efx); 324 efx->type->remove(efx); 325 } 326 327 /************************************************************************** 328 * 329 * NIC startup/shutdown 330 * 331 *************************************************************************/ 332 333 static int efx_probe_all(struct efx_nic *efx) 334 { 335 int rc; 336 337 rc = efx_probe_nic(efx); 338 if (rc) { 339 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 340 goto fail1; 341 } 342 343 rc = efx_probe_port(efx); 344 if (rc) { 345 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 346 goto fail2; 347 } 348 349 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 350 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 351 rc = -EINVAL; 352 goto fail3; 353 } 354 355 #ifdef CONFIG_SFC_SRIOV 356 rc = efx->type->vswitching_probe(efx); 357 if (rc) /* not fatal; the PF will still work fine */ 358 netif_warn(efx, probe, efx->net_dev, 359 "failed to setup vswitching rc=%d;" 360 " VFs may not function\n", rc); 361 #endif 362 363 rc = efx_probe_filters(efx); 364 if (rc) { 365 netif_err(efx, probe, efx->net_dev, 366 "failed to create filter tables\n"); 367 goto fail4; 368 } 369 370 rc = efx_probe_channels(efx); 371 if (rc) 372 goto fail5; 373 374 efx->state = STATE_NET_DOWN; 375 376 return 0; 377 378 fail5: 379 efx_remove_filters(efx); 380 fail4: 381 #ifdef CONFIG_SFC_SRIOV 382 efx->type->vswitching_remove(efx); 383 #endif 384 fail3: 385 efx_remove_port(efx); 386 fail2: 387 efx_remove_nic(efx); 388 fail1: 389 return rc; 390 } 391 392 static void efx_remove_all(struct efx_nic *efx) 393 { 394 rtnl_lock(); 395 efx_xdp_setup_prog(efx, NULL); 396 rtnl_unlock(); 397 398 efx_remove_channels(efx); 399 efx_remove_filters(efx); 400 #ifdef CONFIG_SFC_SRIOV 401 efx->type->vswitching_remove(efx); 402 #endif 403 efx_remove_port(efx); 404 efx_remove_nic(efx); 405 } 406 407 /************************************************************************** 408 * 409 * Interrupt moderation 410 * 411 **************************************************************************/ 412 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) 413 { 414 if (usecs == 0) 415 return 0; 416 if (usecs * 1000 < efx->timer_quantum_ns) 417 return 1; /* never round down to 0 */ 418 return usecs * 1000 / efx->timer_quantum_ns; 419 } 420 421 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) 422 { 423 /* We must round up when converting ticks to microseconds 424 * because we round down when converting the other way. 425 */ 426 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); 427 } 428 429 /* Set interrupt moderation parameters */ 430 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 431 unsigned int rx_usecs, bool rx_adaptive, 432 bool rx_may_override_tx) 433 { 434 struct efx_channel *channel; 435 unsigned int timer_max_us; 436 437 EFX_ASSERT_RESET_SERIALISED(efx); 438 439 timer_max_us = efx->timer_max_ns / 1000; 440 441 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 442 return -EINVAL; 443 444 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 445 !rx_may_override_tx) { 446 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 447 "RX and TX IRQ moderation must be equal\n"); 448 return -EINVAL; 449 } 450 451 efx->irq_rx_adaptive = rx_adaptive; 452 efx->irq_rx_moderation_us = rx_usecs; 453 efx_for_each_channel(channel, efx) { 454 if (efx_channel_has_rx_queue(channel)) 455 channel->irq_moderation_us = rx_usecs; 456 else if (efx_channel_has_tx_queues(channel)) 457 channel->irq_moderation_us = tx_usecs; 458 else if (efx_channel_is_xdp_tx(channel)) 459 channel->irq_moderation_us = tx_usecs; 460 } 461 462 return 0; 463 } 464 465 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 466 unsigned int *rx_usecs, bool *rx_adaptive) 467 { 468 *rx_adaptive = efx->irq_rx_adaptive; 469 *rx_usecs = efx->irq_rx_moderation_us; 470 471 /* If channels are shared between RX and TX, so is IRQ 472 * moderation. Otherwise, IRQ moderation is the same for all 473 * TX channels and is not adaptive. 474 */ 475 if (efx->tx_channel_offset == 0) { 476 *tx_usecs = *rx_usecs; 477 } else { 478 struct efx_channel *tx_channel; 479 480 tx_channel = efx->channel[efx->tx_channel_offset]; 481 *tx_usecs = tx_channel->irq_moderation_us; 482 } 483 } 484 485 /************************************************************************** 486 * 487 * ioctls 488 * 489 *************************************************************************/ 490 491 /* Net device ioctl 492 * Context: process, rtnl_lock() held. 493 */ 494 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 495 { 496 struct efx_nic *efx = efx_netdev_priv(net_dev); 497 struct mii_ioctl_data *data = if_mii(ifr); 498 499 /* Convert phy_id from older PRTAD/DEVAD format */ 500 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 501 (data->phy_id & 0xfc00) == 0x0400) 502 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 503 504 return mdio_mii_ioctl(&efx->mdio, data, cmd); 505 } 506 507 /************************************************************************** 508 * 509 * Kernel net device interface 510 * 511 *************************************************************************/ 512 513 /* Context: process, rtnl_lock() held. */ 514 int efx_net_open(struct net_device *net_dev) 515 { 516 struct efx_nic *efx = efx_netdev_priv(net_dev); 517 int rc; 518 519 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 520 raw_smp_processor_id()); 521 522 rc = efx_check_disabled(efx); 523 if (rc) 524 return rc; 525 if (efx->phy_mode & PHY_MODE_SPECIAL) 526 return -EBUSY; 527 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 528 return -EIO; 529 530 /* Notify the kernel of the link state polled during driver load, 531 * before the monitor starts running */ 532 efx_link_status_changed(efx); 533 534 efx_start_all(efx); 535 if (efx->state == STATE_DISABLED || efx->reset_pending) 536 netif_device_detach(efx->net_dev); 537 else 538 efx->state = STATE_NET_UP; 539 540 return 0; 541 } 542 543 /* Context: process, rtnl_lock() held. 544 * Note that the kernel will ignore our return code; this method 545 * should really be a void. 546 */ 547 int efx_net_stop(struct net_device *net_dev) 548 { 549 struct efx_nic *efx = efx_netdev_priv(net_dev); 550 551 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 552 raw_smp_processor_id()); 553 554 /* Stop the device and flush all the channels */ 555 efx_stop_all(efx); 556 557 return 0; 558 } 559 560 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) 561 { 562 struct efx_nic *efx = efx_netdev_priv(net_dev); 563 564 if (efx->type->vlan_rx_add_vid) 565 return efx->type->vlan_rx_add_vid(efx, proto, vid); 566 else 567 return -EOPNOTSUPP; 568 } 569 570 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) 571 { 572 struct efx_nic *efx = efx_netdev_priv(net_dev); 573 574 if (efx->type->vlan_rx_kill_vid) 575 return efx->type->vlan_rx_kill_vid(efx, proto, vid); 576 else 577 return -EOPNOTSUPP; 578 } 579 580 static int efx_hwtstamp_set(struct net_device *net_dev, 581 struct kernel_hwtstamp_config *config, 582 struct netlink_ext_ack *extack) 583 { 584 struct efx_nic *efx = efx_netdev_priv(net_dev); 585 586 return efx_ptp_set_ts_config(efx, config, extack); 587 } 588 589 static int efx_hwtstamp_get(struct net_device *net_dev, 590 struct kernel_hwtstamp_config *config) 591 { 592 struct efx_nic *efx = efx_netdev_priv(net_dev); 593 594 return efx_ptp_get_ts_config(efx, config); 595 } 596 597 static const struct net_device_ops efx_netdev_ops = { 598 .ndo_open = efx_net_open, 599 .ndo_stop = efx_net_stop, 600 .ndo_get_stats64 = efx_net_stats, 601 .ndo_tx_timeout = efx_watchdog, 602 .ndo_start_xmit = efx_hard_start_xmit, 603 .ndo_validate_addr = eth_validate_addr, 604 .ndo_eth_ioctl = efx_ioctl, 605 .ndo_change_mtu = efx_change_mtu, 606 .ndo_set_mac_address = efx_set_mac_address, 607 .ndo_set_rx_mode = efx_set_rx_mode, 608 .ndo_set_features = efx_set_features, 609 .ndo_features_check = efx_features_check, 610 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, 611 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, 612 .ndo_hwtstamp_set = efx_hwtstamp_set, 613 .ndo_hwtstamp_get = efx_hwtstamp_get, 614 #ifdef CONFIG_SFC_SRIOV 615 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 616 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 617 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 618 .ndo_get_vf_config = efx_sriov_get_vf_config, 619 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 620 #endif 621 .ndo_get_phys_port_id = efx_get_phys_port_id, 622 .ndo_get_phys_port_name = efx_get_phys_port_name, 623 #ifdef CONFIG_RFS_ACCEL 624 .ndo_rx_flow_steer = efx_filter_rfs, 625 #endif 626 .ndo_xdp_xmit = efx_xdp_xmit, 627 .ndo_bpf = efx_xdp 628 }; 629 630 static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx, 631 struct netdev_queue_stats_rx *stats) 632 { 633 struct efx_nic *efx = efx_netdev_priv(net_dev); 634 struct efx_rx_queue *rx_queue; 635 struct efx_channel *channel; 636 637 channel = efx_get_channel(efx, idx); 638 rx_queue = efx_channel_get_rx_queue(channel); 639 /* Count only packets since last time datapath was started */ 640 stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets; 641 stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes; 642 stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) - 643 channel->old_n_rx_hw_drops; 644 stats->hw_drop_overruns = channel->n_rx_nodesc_trunc - 645 channel->old_n_rx_hw_drop_overruns; 646 } 647 648 static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx, 649 struct netdev_queue_stats_tx *stats) 650 { 651 struct efx_nic *efx = efx_netdev_priv(net_dev); 652 struct efx_tx_queue *tx_queue; 653 struct efx_channel *channel; 654 655 channel = efx_get_tx_channel(efx, idx); 656 stats->packets = 0; 657 stats->bytes = 0; 658 stats->hw_gso_packets = 0; 659 stats->hw_gso_wire_packets = 0; 660 efx_for_each_channel_tx_queue(tx_queue, channel) { 661 stats->packets += tx_queue->complete_packets - 662 tx_queue->old_complete_packets; 663 stats->bytes += tx_queue->complete_bytes - 664 tx_queue->old_complete_bytes; 665 /* Note that, unlike stats->packets and stats->bytes, 666 * these count TXes enqueued, rather than completed, 667 * which may not be what users expect. 668 */ 669 stats->hw_gso_packets += tx_queue->tso_bursts - 670 tx_queue->old_tso_bursts; 671 stats->hw_gso_wire_packets += tx_queue->tso_packets - 672 tx_queue->old_tso_packets; 673 } 674 } 675 676 static void efx_get_base_stats(struct net_device *net_dev, 677 struct netdev_queue_stats_rx *rx, 678 struct netdev_queue_stats_tx *tx) 679 { 680 struct efx_nic *efx = efx_netdev_priv(net_dev); 681 struct efx_tx_queue *tx_queue; 682 struct efx_rx_queue *rx_queue; 683 struct efx_channel *channel; 684 685 rx->packets = 0; 686 rx->bytes = 0; 687 rx->hw_drops = 0; 688 rx->hw_drop_overruns = 0; 689 tx->packets = 0; 690 tx->bytes = 0; 691 tx->hw_gso_packets = 0; 692 tx->hw_gso_wire_packets = 0; 693 694 /* Count all packets on non-core queues, and packets before last 695 * datapath start on core queues. 696 */ 697 efx_for_each_channel(channel, efx) { 698 rx_queue = efx_channel_get_rx_queue(channel); 699 if (channel->channel >= net_dev->real_num_rx_queues) { 700 rx->packets += rx_queue->rx_packets; 701 rx->bytes += rx_queue->rx_bytes; 702 rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel); 703 rx->hw_drop_overruns += channel->n_rx_nodesc_trunc; 704 } else { 705 rx->packets += rx_queue->old_rx_packets; 706 rx->bytes += rx_queue->old_rx_bytes; 707 rx->hw_drops += channel->old_n_rx_hw_drops; 708 rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns; 709 } 710 efx_for_each_channel_tx_queue(tx_queue, channel) { 711 if (channel->channel < efx->tx_channel_offset || 712 channel->channel >= efx->tx_channel_offset + 713 net_dev->real_num_tx_queues) { 714 tx->packets += tx_queue->complete_packets; 715 tx->bytes += tx_queue->complete_bytes; 716 tx->hw_gso_packets += tx_queue->tso_bursts; 717 tx->hw_gso_wire_packets += tx_queue->tso_packets; 718 } else { 719 tx->packets += tx_queue->old_complete_packets; 720 tx->bytes += tx_queue->old_complete_bytes; 721 tx->hw_gso_packets += tx_queue->old_tso_bursts; 722 tx->hw_gso_wire_packets += tx_queue->old_tso_packets; 723 } 724 /* Include XDP TX in device-wide stats */ 725 tx->packets += tx_queue->complete_xdp_packets; 726 tx->bytes += tx_queue->complete_xdp_bytes; 727 } 728 } 729 } 730 731 static const struct netdev_stat_ops efx_stat_ops = { 732 .get_queue_stats_rx = efx_get_queue_stats_rx, 733 .get_queue_stats_tx = efx_get_queue_stats_tx, 734 .get_base_stats = efx_get_base_stats, 735 }; 736 737 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) 738 { 739 struct bpf_prog *old_prog; 740 741 if (efx->xdp_rxq_info_failed) { 742 netif_err(efx, drv, efx->net_dev, 743 "Unable to bind XDP program due to previous failure of rxq_info\n"); 744 return -EINVAL; 745 } 746 747 if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { 748 netif_err(efx, drv, efx->net_dev, 749 "Unable to configure XDP with MTU of %d (max: %d)\n", 750 efx->net_dev->mtu, efx_xdp_max_mtu(efx)); 751 return -EINVAL; 752 } 753 754 old_prog = rtnl_dereference(efx->xdp_prog); 755 rcu_assign_pointer(efx->xdp_prog, prog); 756 /* Release the reference that was originally passed by the caller. */ 757 if (old_prog) 758 bpf_prog_put(old_prog); 759 760 return 0; 761 } 762 763 /* Context: process, rtnl_lock() held. */ 764 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) 765 { 766 struct efx_nic *efx = efx_netdev_priv(dev); 767 768 switch (xdp->command) { 769 case XDP_SETUP_PROG: 770 return efx_xdp_setup_prog(efx, xdp->prog); 771 default: 772 return -EINVAL; 773 } 774 } 775 776 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 777 u32 flags) 778 { 779 struct efx_nic *efx = efx_netdev_priv(dev); 780 781 if (!netif_running(dev)) 782 return -EINVAL; 783 784 return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); 785 } 786 787 static void efx_update_name(struct efx_nic *efx) 788 { 789 strcpy(efx->name, efx->net_dev->name); 790 efx_mtd_rename(efx); 791 efx_set_channel_names(efx); 792 } 793 794 static int efx_netdev_event(struct notifier_block *this, 795 unsigned long event, void *ptr) 796 { 797 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 798 799 if ((net_dev->netdev_ops == &efx_netdev_ops) && 800 event == NETDEV_CHANGENAME) 801 efx_update_name(efx_netdev_priv(net_dev)); 802 803 return NOTIFY_DONE; 804 } 805 806 static struct notifier_block efx_netdev_notifier = { 807 .notifier_call = efx_netdev_event, 808 }; 809 810 static ssize_t phy_type_show(struct device *dev, 811 struct device_attribute *attr, char *buf) 812 { 813 struct efx_nic *efx = dev_get_drvdata(dev); 814 return sprintf(buf, "%d\n", efx->phy_type); 815 } 816 static DEVICE_ATTR_RO(phy_type); 817 818 static int efx_register_netdev(struct efx_nic *efx) 819 { 820 struct net_device *net_dev = efx->net_dev; 821 struct efx_channel *channel; 822 int rc; 823 824 net_dev->watchdog_timeo = 5 * HZ; 825 net_dev->irq = efx->pci_dev->irq; 826 net_dev->netdev_ops = &efx_netdev_ops; 827 net_dev->stat_ops = &efx_stat_ops; 828 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 829 net_dev->priv_flags |= IFF_UNICAST_FLT; 830 net_dev->ethtool_ops = &efx_ethtool_ops; 831 netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); 832 net_dev->min_mtu = EFX_MIN_MTU; 833 net_dev->max_mtu = EFX_MAX_MTU; 834 835 rtnl_lock(); 836 837 /* Enable resets to be scheduled and check whether any were 838 * already requested. If so, the NIC is probably hosed so we 839 * abort. 840 */ 841 if (efx->reset_pending) { 842 pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); 843 rc = -EIO; 844 goto fail_locked; 845 } 846 847 rc = dev_alloc_name(net_dev, net_dev->name); 848 if (rc < 0) 849 goto fail_locked; 850 efx_update_name(efx); 851 852 /* Always start with carrier off; PHY events will detect the link */ 853 netif_carrier_off(net_dev); 854 855 rc = register_netdevice(net_dev); 856 if (rc) 857 goto fail_locked; 858 859 efx_for_each_channel(channel, efx) { 860 struct efx_tx_queue *tx_queue; 861 efx_for_each_channel_tx_queue(tx_queue, channel) 862 efx_init_tx_queue_core_txq(tx_queue); 863 } 864 865 efx_associate(efx); 866 867 efx->state = STATE_NET_DOWN; 868 869 rtnl_unlock(); 870 871 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 872 if (rc) { 873 netif_err(efx, drv, efx->net_dev, 874 "failed to init net dev attributes\n"); 875 goto fail_registered; 876 } 877 878 efx_init_mcdi_logging(efx); 879 880 return 0; 881 882 fail_registered: 883 rtnl_lock(); 884 efx_dissociate(efx); 885 unregister_netdevice(net_dev); 886 fail_locked: 887 efx->state = STATE_UNINIT; 888 rtnl_unlock(); 889 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 890 return rc; 891 } 892 893 static void efx_unregister_netdev(struct efx_nic *efx) 894 { 895 if (!efx->net_dev) 896 return; 897 898 if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx)) 899 return; 900 901 if (efx_dev_registered(efx)) { 902 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 903 efx_fini_mcdi_logging(efx); 904 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 905 unregister_netdev(efx->net_dev); 906 } 907 } 908 909 /************************************************************************** 910 * 911 * List of NICs we support 912 * 913 **************************************************************************/ 914 915 /* PCI device ID table */ 916 static const struct pci_device_id efx_pci_table[] = { 917 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 918 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 919 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 920 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 921 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 922 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 923 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ 924 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 925 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ 926 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 927 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ 928 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 929 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ 930 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 931 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ 932 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 933 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ 934 .driver_data = (unsigned long)&efx_x4_nic_type}, 935 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ 936 .driver_data = (unsigned long)&efx_x4_nic_type}, 937 {0} /* end of list */ 938 }; 939 940 /************************************************************************** 941 * 942 * Data housekeeping 943 * 944 **************************************************************************/ 945 946 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 947 { 948 u64 n_rx_nodesc_trunc = 0; 949 struct efx_channel *channel; 950 951 efx_for_each_channel(channel, efx) 952 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 953 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 954 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 955 } 956 957 /************************************************************************** 958 * 959 * PCI interface 960 * 961 **************************************************************************/ 962 963 /* Main body of final NIC shutdown code 964 * This is called only at module unload (or hotplug removal). 965 */ 966 static void efx_pci_remove_main(struct efx_nic *efx) 967 { 968 /* Flush reset_work. It can no longer be scheduled since we 969 * are not READY. 970 */ 971 WARN_ON(efx_net_active(efx->state)); 972 efx_flush_reset_workqueue(efx); 973 974 efx_disable_interrupts(efx); 975 efx_clear_interrupt_affinity(efx); 976 efx_nic_fini_interrupt(efx); 977 efx_fini_port(efx); 978 efx->type->fini(efx); 979 efx_fini_napi(efx); 980 efx_remove_all(efx); 981 } 982 983 /* Final NIC shutdown 984 * This is called only at module unload (or hotplug removal). A PF can call 985 * this on its VFs to ensure they are unbound first. 986 */ 987 static void efx_pci_remove(struct pci_dev *pci_dev) 988 { 989 struct efx_probe_data *probe_data; 990 struct efx_nic *efx; 991 992 efx = pci_get_drvdata(pci_dev); 993 if (!efx) 994 return; 995 996 /* Mark the NIC as fini, then stop the interface */ 997 rtnl_lock(); 998 efx_dissociate(efx); 999 dev_close(efx->net_dev); 1000 efx_disable_interrupts(efx); 1001 efx->state = STATE_UNINIT; 1002 rtnl_unlock(); 1003 1004 if (efx->type->sriov_fini) 1005 efx->type->sriov_fini(efx); 1006 1007 efx_fini_devlink_lock(efx); 1008 efx_unregister_netdev(efx); 1009 1010 efx_mtd_remove(efx); 1011 1012 efx_pci_remove_main(efx); 1013 1014 efx_fini_io(efx); 1015 pci_dbg(efx->pci_dev, "shutdown successful\n"); 1016 1017 efx_fini_devlink_and_unlock(efx); 1018 efx_fini_struct(efx); 1019 free_netdev(efx->net_dev); 1020 probe_data = container_of(efx, struct efx_probe_data, efx); 1021 kfree(probe_data); 1022 }; 1023 1024 /* NIC VPD information 1025 * Called during probe to display the part number of the 1026 * installed NIC. 1027 */ 1028 static void efx_probe_vpd_strings(struct efx_nic *efx) 1029 { 1030 struct pci_dev *dev = efx->pci_dev; 1031 unsigned int vpd_size, kw_len; 1032 u8 *vpd_data; 1033 int start; 1034 1035 vpd_data = pci_vpd_alloc(dev, &vpd_size); 1036 if (IS_ERR(vpd_data)) { 1037 pci_warn(dev, "Unable to read VPD\n"); 1038 return; 1039 } 1040 1041 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1042 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 1043 if (start < 0) 1044 pci_err(dev, "Part number not found or incomplete\n"); 1045 else 1046 pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); 1047 1048 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1049 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); 1050 if (start < 0) 1051 pci_err(dev, "Serial number not found or incomplete\n"); 1052 else 1053 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); 1054 1055 kfree(vpd_data); 1056 } 1057 1058 1059 /* Main body of NIC initialisation 1060 * This is called at module load (or hotplug insertion, theoretically). 1061 */ 1062 static int efx_pci_probe_main(struct efx_nic *efx) 1063 { 1064 int rc; 1065 1066 /* Do start-of-day initialisation */ 1067 rc = efx_probe_all(efx); 1068 if (rc) 1069 goto fail1; 1070 1071 efx_init_napi(efx); 1072 1073 down_write(&efx->filter_sem); 1074 rc = efx->type->init(efx); 1075 up_write(&efx->filter_sem); 1076 if (rc) { 1077 pci_err(efx->pci_dev, "failed to initialise NIC\n"); 1078 goto fail3; 1079 } 1080 1081 rc = efx_init_port(efx); 1082 if (rc) { 1083 netif_err(efx, probe, efx->net_dev, 1084 "failed to initialise port\n"); 1085 goto fail4; 1086 } 1087 1088 rc = efx_nic_init_interrupt(efx); 1089 if (rc) 1090 goto fail5; 1091 1092 efx_set_interrupt_affinity(efx); 1093 rc = efx_enable_interrupts(efx); 1094 if (rc) 1095 goto fail6; 1096 1097 return 0; 1098 1099 fail6: 1100 efx_clear_interrupt_affinity(efx); 1101 efx_nic_fini_interrupt(efx); 1102 fail5: 1103 efx_fini_port(efx); 1104 fail4: 1105 efx->type->fini(efx); 1106 fail3: 1107 efx_fini_napi(efx); 1108 efx_remove_all(efx); 1109 fail1: 1110 return rc; 1111 } 1112 1113 static int efx_pci_probe_post_io(struct efx_nic *efx) 1114 { 1115 struct net_device *net_dev = efx->net_dev; 1116 int rc = efx_pci_probe_main(efx); 1117 1118 if (rc) 1119 return rc; 1120 1121 if (efx->type->sriov_init) { 1122 rc = efx->type->sriov_init(efx); 1123 if (rc) 1124 pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", 1125 rc); 1126 } 1127 1128 /* Determine netdevice features */ 1129 net_dev->features |= efx->type->offload_features; 1130 1131 /* Add TSO features */ 1132 if (efx->type->tso_versions && efx->type->tso_versions(efx)) 1133 net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1134 1135 /* Mask for features that also apply to VLAN devices */ 1136 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 1137 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 1138 NETIF_F_RXCSUM); 1139 1140 /* Determine user configurable features */ 1141 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; 1142 1143 /* Disable receiving frames with bad FCS, by default. */ 1144 net_dev->features &= ~NETIF_F_RXALL; 1145 1146 /* Disable VLAN filtering by default. It may be enforced if 1147 * the feature is fixed (i.e. VLAN filters are required to 1148 * receive VLAN tagged packets due to vPort restrictions). 1149 */ 1150 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1151 net_dev->features |= efx->fixed_features; 1152 1153 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | 1154 NETDEV_XDP_ACT_REDIRECT | 1155 NETDEV_XDP_ACT_NDO_XMIT; 1156 1157 /* devlink creation, registration and lock */ 1158 rc = efx_probe_devlink_and_lock(efx); 1159 if (rc) 1160 pci_err(efx->pci_dev, "devlink registration failed"); 1161 1162 rc = efx_register_netdev(efx); 1163 efx_probe_devlink_unlock(efx); 1164 if (!rc) 1165 return 0; 1166 1167 efx_pci_remove_main(efx); 1168 return rc; 1169 } 1170 1171 /* NIC initialisation 1172 * 1173 * This is called at module load (or hotplug insertion, 1174 * theoretically). It sets up PCI mappings, resets the NIC, 1175 * sets up and registers the network devices with the kernel and hooks 1176 * the interrupt service routine. It does not prepare the device for 1177 * transmission; this is left to the first time one of the network 1178 * interfaces is brought up (i.e. efx_net_open). 1179 */ 1180 static int efx_pci_probe(struct pci_dev *pci_dev, 1181 const struct pci_device_id *entry) 1182 { 1183 struct efx_probe_data *probe_data, **probe_ptr; 1184 struct net_device *net_dev; 1185 struct efx_nic *efx; 1186 int rc; 1187 1188 /* Allocate probe data and struct efx_nic */ 1189 probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); 1190 if (!probe_data) 1191 return -ENOMEM; 1192 probe_data->pci_dev = pci_dev; 1193 efx = &probe_data->efx; 1194 1195 /* Allocate and initialise a struct net_device */ 1196 net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); 1197 if (!net_dev) { 1198 rc = -ENOMEM; 1199 goto fail0; 1200 } 1201 probe_ptr = netdev_priv(net_dev); 1202 *probe_ptr = probe_data; 1203 efx->net_dev = net_dev; 1204 efx->type = (const struct efx_nic_type *) entry->driver_data; 1205 efx->fixed_features |= NETIF_F_HIGHDMA; 1206 1207 pci_set_drvdata(pci_dev, efx); 1208 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 1209 rc = efx_init_struct(efx, pci_dev); 1210 if (rc) 1211 goto fail1; 1212 efx->mdio.dev = net_dev; 1213 1214 pci_info(pci_dev, "Solarflare NIC detected\n"); 1215 1216 if (!efx->type->is_vf) 1217 efx_probe_vpd_strings(efx); 1218 1219 /* Set up basic I/O (BAR mappings etc) */ 1220 rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, 1221 efx->type->mem_map_size(efx)); 1222 if (rc) 1223 goto fail2; 1224 1225 rc = efx_pci_probe_post_io(efx); 1226 if (rc) { 1227 /* On failure, retry once immediately. 1228 * If we aborted probe due to a scheduled reset, dismiss it. 1229 */ 1230 efx->reset_pending = 0; 1231 rc = efx_pci_probe_post_io(efx); 1232 if (rc) { 1233 /* On another failure, retry once more 1234 * after a 50-305ms delay. 1235 */ 1236 unsigned char r; 1237 1238 get_random_bytes(&r, 1); 1239 msleep((unsigned int)r + 50); 1240 efx->reset_pending = 0; 1241 rc = efx_pci_probe_post_io(efx); 1242 } 1243 } 1244 if (rc) 1245 goto fail3; 1246 1247 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 1248 1249 /* Try to create MTDs, but allow this to fail */ 1250 rtnl_lock(); 1251 rc = efx_mtd_probe(efx); 1252 rtnl_unlock(); 1253 if (rc && rc != -EPERM) 1254 netif_warn(efx, probe, efx->net_dev, 1255 "failed to create MTDs (%d)\n", rc); 1256 1257 if (efx->type->udp_tnl_push_ports) 1258 efx->type->udp_tnl_push_ports(efx); 1259 1260 return 0; 1261 1262 fail3: 1263 efx_fini_io(efx); 1264 fail2: 1265 efx_fini_struct(efx); 1266 fail1: 1267 WARN_ON(rc > 0); 1268 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 1269 free_netdev(net_dev); 1270 fail0: 1271 kfree(probe_data); 1272 return rc; 1273 } 1274 1275 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 1276 * enabled on success 1277 */ 1278 #ifdef CONFIG_SFC_SRIOV 1279 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 1280 { 1281 int rc; 1282 struct efx_nic *efx = pci_get_drvdata(dev); 1283 1284 if (efx->type->sriov_configure) { 1285 rc = efx->type->sriov_configure(efx, num_vfs); 1286 if (rc) 1287 return rc; 1288 else 1289 return num_vfs; 1290 } else 1291 return -EOPNOTSUPP; 1292 } 1293 #endif 1294 1295 static int efx_pm_freeze(struct device *dev) 1296 { 1297 struct efx_nic *efx = dev_get_drvdata(dev); 1298 1299 rtnl_lock(); 1300 1301 if (efx_net_active(efx->state)) { 1302 efx_device_detach_sync(efx); 1303 1304 efx_stop_all(efx); 1305 efx_disable_interrupts(efx); 1306 1307 efx->state = efx_freeze(efx->state); 1308 } 1309 1310 rtnl_unlock(); 1311 1312 return 0; 1313 } 1314 1315 static void efx_pci_shutdown(struct pci_dev *pci_dev) 1316 { 1317 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1318 1319 if (!efx) 1320 return; 1321 1322 efx_pm_freeze(&pci_dev->dev); 1323 pci_disable_device(pci_dev); 1324 } 1325 1326 static int efx_pm_thaw(struct device *dev) 1327 { 1328 int rc; 1329 struct efx_nic *efx = dev_get_drvdata(dev); 1330 1331 rtnl_lock(); 1332 1333 if (efx_frozen(efx->state)) { 1334 rc = efx_enable_interrupts(efx); 1335 if (rc) 1336 goto fail; 1337 1338 mutex_lock(&efx->mac_lock); 1339 efx_mcdi_port_reconfigure(efx); 1340 mutex_unlock(&efx->mac_lock); 1341 1342 efx_start_all(efx); 1343 1344 efx_device_attach_if_not_resetting(efx); 1345 1346 efx->state = efx_thaw(efx->state); 1347 1348 efx->type->resume_wol(efx); 1349 } 1350 1351 rtnl_unlock(); 1352 1353 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 1354 efx_queue_reset_work(efx); 1355 1356 return 0; 1357 1358 fail: 1359 rtnl_unlock(); 1360 1361 return rc; 1362 } 1363 1364 static int efx_pm_poweroff(struct device *dev) 1365 { 1366 struct pci_dev *pci_dev = to_pci_dev(dev); 1367 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1368 1369 efx->type->fini(efx); 1370 1371 efx->reset_pending = 0; 1372 1373 pci_save_state(pci_dev); 1374 return pci_set_power_state(pci_dev, PCI_D3hot); 1375 } 1376 1377 /* Used for both resume and restore */ 1378 static int efx_pm_resume(struct device *dev) 1379 { 1380 struct pci_dev *pci_dev = to_pci_dev(dev); 1381 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1382 int rc; 1383 1384 rc = pci_set_power_state(pci_dev, PCI_D0); 1385 if (rc) 1386 return rc; 1387 pci_restore_state(pci_dev); 1388 rc = pci_enable_device(pci_dev); 1389 if (rc) 1390 return rc; 1391 pci_set_master(efx->pci_dev); 1392 rc = efx->type->reset(efx, RESET_TYPE_ALL); 1393 if (rc) 1394 return rc; 1395 down_write(&efx->filter_sem); 1396 rc = efx->type->init(efx); 1397 up_write(&efx->filter_sem); 1398 if (rc) 1399 return rc; 1400 rc = efx_pm_thaw(dev); 1401 return rc; 1402 } 1403 1404 static int efx_pm_suspend(struct device *dev) 1405 { 1406 int rc; 1407 1408 efx_pm_freeze(dev); 1409 rc = efx_pm_poweroff(dev); 1410 if (rc) 1411 efx_pm_resume(dev); 1412 return rc; 1413 } 1414 1415 static const struct dev_pm_ops efx_pm_ops = { 1416 .suspend = efx_pm_suspend, 1417 .resume = efx_pm_resume, 1418 .freeze = efx_pm_freeze, 1419 .thaw = efx_pm_thaw, 1420 .poweroff = efx_pm_poweroff, 1421 .restore = efx_pm_resume, 1422 }; 1423 1424 static struct pci_driver efx_pci_driver = { 1425 .name = KBUILD_MODNAME, 1426 .id_table = efx_pci_table, 1427 .probe = efx_pci_probe, 1428 .remove = efx_pci_remove, 1429 .driver.pm = &efx_pm_ops, 1430 .shutdown = efx_pci_shutdown, 1431 .err_handler = &efx_err_handlers, 1432 #ifdef CONFIG_SFC_SRIOV 1433 .sriov_configure = efx_pci_sriov_configure, 1434 #endif 1435 }; 1436 1437 /************************************************************************** 1438 * 1439 * Kernel module interface 1440 * 1441 *************************************************************************/ 1442 1443 static int __init efx_init_module(void) 1444 { 1445 int rc; 1446 1447 printk(KERN_INFO "Solarflare NET driver\n"); 1448 1449 rc = register_netdevice_notifier(&efx_netdev_notifier); 1450 if (rc) 1451 goto err_notifier; 1452 1453 rc = efx_create_reset_workqueue(); 1454 if (rc) 1455 goto err_reset; 1456 1457 rc = pci_register_driver(&efx_pci_driver); 1458 if (rc < 0) 1459 goto err_pci; 1460 1461 rc = pci_register_driver(&ef100_pci_driver); 1462 if (rc < 0) 1463 goto err_pci_ef100; 1464 1465 return 0; 1466 1467 err_pci_ef100: 1468 pci_unregister_driver(&efx_pci_driver); 1469 err_pci: 1470 efx_destroy_reset_workqueue(); 1471 err_reset: 1472 unregister_netdevice_notifier(&efx_netdev_notifier); 1473 err_notifier: 1474 return rc; 1475 } 1476 1477 static void __exit efx_exit_module(void) 1478 { 1479 printk(KERN_INFO "Solarflare NET driver unloading\n"); 1480 1481 pci_unregister_driver(&ef100_pci_driver); 1482 pci_unregister_driver(&efx_pci_driver); 1483 efx_destroy_reset_workqueue(); 1484 unregister_netdevice_notifier(&efx_netdev_notifier); 1485 1486 } 1487 1488 module_init(efx_init_module); 1489 module_exit(efx_exit_module); 1490 1491 MODULE_AUTHOR("Solarflare Communications and " 1492 "Michael Brown <mbrown@fensystems.co.uk>"); 1493 MODULE_DESCRIPTION("Solarflare network driver"); 1494 MODULE_LICENSE("GPL"); 1495 MODULE_DEVICE_TABLE(pci, efx_pci_table); 1496