1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/delay.h> 13 #include <linux/notifier.h> 14 #include <linux/ip.h> 15 #include <linux/tcp.h> 16 #include <linux/in.h> 17 #include <linux/ethtool.h> 18 #include <linux/topology.h> 19 #include <linux/gfp.h> 20 #include <linux/aer.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include <net/gre.h> 24 #include <net/udp_tunnel.h> 25 #include "efx.h" 26 #include "efx_common.h" 27 #include "efx_channels.h" 28 #include "rx_common.h" 29 #include "tx_common.h" 30 #include "nic.h" 31 #include "io.h" 32 #include "selftest.h" 33 #include "sriov.h" 34 35 #include "mcdi.h" 36 #include "mcdi_pcol.h" 37 #include "workarounds.h" 38 39 /************************************************************************** 40 * 41 * Type name strings 42 * 43 ************************************************************************** 44 */ 45 46 /* UDP tunnel type names */ 47 static const char *const efx_udp_tunnel_type_names[] = { 48 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", 49 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", 50 }; 51 52 void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) 53 { 54 if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && 55 efx_udp_tunnel_type_names[type] != NULL) 56 snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); 57 else 58 snprintf(buf, buflen, "type %d", type); 59 } 60 61 /************************************************************************** 62 * 63 * Configurable values 64 * 65 *************************************************************************/ 66 67 module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); 68 MODULE_PARM_DESC(interrupt_mode, 69 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 70 71 module_param(rss_cpus, uint, 0444); 72 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 73 74 /* 75 * Use separate channels for TX and RX events 76 * 77 * Set this to 1 to use separate channels for TX and RX. It allows us 78 * to control interrupt affinity separately for TX and RX. 79 * 80 * This is only used in MSI-X interrupt mode 81 */ 82 bool efx_separate_tx_channels; 83 module_param(efx_separate_tx_channels, bool, 0444); 84 MODULE_PARM_DESC(efx_separate_tx_channels, 85 "Use separate channels for TX and RX"); 86 87 /* Initial interrupt moderation settings. They can be modified after 88 * module load with ethtool. 89 * 90 * The default for RX should strike a balance between increasing the 91 * round-trip latency and reducing overhead. 92 */ 93 static unsigned int rx_irq_mod_usec = 60; 94 95 /* Initial interrupt moderation settings. They can be modified after 96 * module load with ethtool. 97 * 98 * This default is chosen to ensure that a 10G link does not go idle 99 * while a TX queue is stopped after it has become full. A queue is 100 * restarted when it drops below half full. The time this takes (assuming 101 * worst case 3 descriptors per packet and 1024 descriptors) is 102 * 512 / 3 * 1.2 = 205 usec. 103 */ 104 static unsigned int tx_irq_mod_usec = 150; 105 106 static bool phy_flash_cfg; 107 module_param(phy_flash_cfg, bool, 0644); 108 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 109 110 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 111 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 112 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 113 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 114 module_param(debug, uint, 0); 115 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 116 117 /************************************************************************** 118 * 119 * Utility functions and prototypes 120 * 121 *************************************************************************/ 122 123 static void efx_remove_port(struct efx_nic *efx); 124 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); 125 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); 126 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 127 u32 flags); 128 129 #define EFX_ASSERT_RESET_SERIALISED(efx) \ 130 do { \ 131 if ((efx->state == STATE_READY) || \ 132 (efx->state == STATE_RECOVERY) || \ 133 (efx->state == STATE_DISABLED)) \ 134 ASSERT_RTNL(); \ 135 } while (0) 136 137 /************************************************************************** 138 * 139 * Port handling 140 * 141 **************************************************************************/ 142 143 static void efx_fini_port(struct efx_nic *efx); 144 145 static int efx_probe_port(struct efx_nic *efx) 146 { 147 int rc; 148 149 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 150 151 if (phy_flash_cfg) 152 efx->phy_mode = PHY_MODE_SPECIAL; 153 154 /* Connect up MAC/PHY operations table */ 155 rc = efx->type->probe_port(efx); 156 if (rc) 157 return rc; 158 159 /* Initialise MAC address to permanent address */ 160 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); 161 162 return 0; 163 } 164 165 static int efx_init_port(struct efx_nic *efx) 166 { 167 int rc; 168 169 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 170 171 mutex_lock(&efx->mac_lock); 172 173 rc = efx->phy_op->init(efx); 174 if (rc) 175 goto fail1; 176 177 efx->port_initialized = true; 178 179 /* Ensure the PHY advertises the correct flow control settings */ 180 rc = efx->phy_op->reconfigure(efx); 181 if (rc && rc != -EPERM) 182 goto fail2; 183 184 mutex_unlock(&efx->mac_lock); 185 return 0; 186 187 fail2: 188 efx->phy_op->fini(efx); 189 fail1: 190 mutex_unlock(&efx->mac_lock); 191 return rc; 192 } 193 194 static void efx_fini_port(struct efx_nic *efx) 195 { 196 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 197 198 if (!efx->port_initialized) 199 return; 200 201 efx->phy_op->fini(efx); 202 efx->port_initialized = false; 203 204 efx->link_state.up = false; 205 efx_link_status_changed(efx); 206 } 207 208 static void efx_remove_port(struct efx_nic *efx) 209 { 210 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 211 212 efx->type->remove_port(efx); 213 } 214 215 /************************************************************************** 216 * 217 * NIC handling 218 * 219 **************************************************************************/ 220 221 static LIST_HEAD(efx_primary_list); 222 static LIST_HEAD(efx_unassociated_list); 223 224 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 225 { 226 return left->type == right->type && 227 left->vpd_sn && right->vpd_sn && 228 !strcmp(left->vpd_sn, right->vpd_sn); 229 } 230 231 static void efx_associate(struct efx_nic *efx) 232 { 233 struct efx_nic *other, *next; 234 235 if (efx->primary == efx) { 236 /* Adding primary function; look for secondaries */ 237 238 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 239 list_add_tail(&efx->node, &efx_primary_list); 240 241 list_for_each_entry_safe(other, next, &efx_unassociated_list, 242 node) { 243 if (efx_same_controller(efx, other)) { 244 list_del(&other->node); 245 netif_dbg(other, probe, other->net_dev, 246 "moving to secondary list of %s %s\n", 247 pci_name(efx->pci_dev), 248 efx->net_dev->name); 249 list_add_tail(&other->node, 250 &efx->secondary_list); 251 other->primary = efx; 252 } 253 } 254 } else { 255 /* Adding secondary function; look for primary */ 256 257 list_for_each_entry(other, &efx_primary_list, node) { 258 if (efx_same_controller(efx, other)) { 259 netif_dbg(efx, probe, efx->net_dev, 260 "adding to secondary list of %s %s\n", 261 pci_name(other->pci_dev), 262 other->net_dev->name); 263 list_add_tail(&efx->node, 264 &other->secondary_list); 265 efx->primary = other; 266 return; 267 } 268 } 269 270 netif_dbg(efx, probe, efx->net_dev, 271 "adding to unassociated list\n"); 272 list_add_tail(&efx->node, &efx_unassociated_list); 273 } 274 } 275 276 static void efx_dissociate(struct efx_nic *efx) 277 { 278 struct efx_nic *other, *next; 279 280 list_del(&efx->node); 281 efx->primary = NULL; 282 283 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 284 list_del(&other->node); 285 netif_dbg(other, probe, other->net_dev, 286 "moving to unassociated list\n"); 287 list_add_tail(&other->node, &efx_unassociated_list); 288 other->primary = NULL; 289 } 290 } 291 292 static int efx_probe_nic(struct efx_nic *efx) 293 { 294 int rc; 295 296 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 297 298 /* Carry out hardware-type specific initialisation */ 299 rc = efx->type->probe(efx); 300 if (rc) 301 return rc; 302 303 do { 304 if (!efx->max_channels || !efx->max_tx_channels) { 305 netif_err(efx, drv, efx->net_dev, 306 "Insufficient resources to allocate" 307 " any channels\n"); 308 rc = -ENOSPC; 309 goto fail1; 310 } 311 312 /* Determine the number of channels and queues by trying 313 * to hook in MSI-X interrupts. 314 */ 315 rc = efx_probe_interrupts(efx); 316 if (rc) 317 goto fail1; 318 319 rc = efx_set_channels(efx); 320 if (rc) 321 goto fail1; 322 323 /* dimension_resources can fail with EAGAIN */ 324 rc = efx->type->dimension_resources(efx); 325 if (rc != 0 && rc != -EAGAIN) 326 goto fail2; 327 328 if (rc == -EAGAIN) 329 /* try again with new max_channels */ 330 efx_remove_interrupts(efx); 331 332 } while (rc == -EAGAIN); 333 334 if (efx->n_channels > 1) 335 netdev_rss_key_fill(efx->rss_context.rx_hash_key, 336 sizeof(efx->rss_context.rx_hash_key)); 337 efx_set_default_rx_indir_table(efx, &efx->rss_context); 338 339 /* Initialise the interrupt moderation settings */ 340 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 341 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 342 true); 343 344 return 0; 345 346 fail2: 347 efx_remove_interrupts(efx); 348 fail1: 349 efx->type->remove(efx); 350 return rc; 351 } 352 353 static void efx_remove_nic(struct efx_nic *efx) 354 { 355 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 356 357 efx_remove_interrupts(efx); 358 efx->type->remove(efx); 359 } 360 361 /************************************************************************** 362 * 363 * NIC startup/shutdown 364 * 365 *************************************************************************/ 366 367 static int efx_probe_all(struct efx_nic *efx) 368 { 369 int rc; 370 371 rc = efx_probe_nic(efx); 372 if (rc) { 373 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 374 goto fail1; 375 } 376 377 rc = efx_probe_port(efx); 378 if (rc) { 379 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 380 goto fail2; 381 } 382 383 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 384 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 385 rc = -EINVAL; 386 goto fail3; 387 } 388 389 #ifdef CONFIG_SFC_SRIOV 390 rc = efx->type->vswitching_probe(efx); 391 if (rc) /* not fatal; the PF will still work fine */ 392 netif_warn(efx, probe, efx->net_dev, 393 "failed to setup vswitching rc=%d;" 394 " VFs may not function\n", rc); 395 #endif 396 397 rc = efx_probe_filters(efx); 398 if (rc) { 399 netif_err(efx, probe, efx->net_dev, 400 "failed to create filter tables\n"); 401 goto fail4; 402 } 403 404 rc = efx_probe_channels(efx); 405 if (rc) 406 goto fail5; 407 408 return 0; 409 410 fail5: 411 efx_remove_filters(efx); 412 fail4: 413 #ifdef CONFIG_SFC_SRIOV 414 efx->type->vswitching_remove(efx); 415 #endif 416 fail3: 417 efx_remove_port(efx); 418 fail2: 419 efx_remove_nic(efx); 420 fail1: 421 return rc; 422 } 423 424 static void efx_remove_all(struct efx_nic *efx) 425 { 426 rtnl_lock(); 427 efx_xdp_setup_prog(efx, NULL); 428 rtnl_unlock(); 429 430 efx_remove_channels(efx); 431 efx_remove_filters(efx); 432 #ifdef CONFIG_SFC_SRIOV 433 efx->type->vswitching_remove(efx); 434 #endif 435 efx_remove_port(efx); 436 efx_remove_nic(efx); 437 } 438 439 /************************************************************************** 440 * 441 * Interrupt moderation 442 * 443 **************************************************************************/ 444 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) 445 { 446 if (usecs == 0) 447 return 0; 448 if (usecs * 1000 < efx->timer_quantum_ns) 449 return 1; /* never round down to 0 */ 450 return usecs * 1000 / efx->timer_quantum_ns; 451 } 452 453 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) 454 { 455 /* We must round up when converting ticks to microseconds 456 * because we round down when converting the other way. 457 */ 458 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); 459 } 460 461 /* Set interrupt moderation parameters */ 462 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 463 unsigned int rx_usecs, bool rx_adaptive, 464 bool rx_may_override_tx) 465 { 466 struct efx_channel *channel; 467 unsigned int timer_max_us; 468 469 EFX_ASSERT_RESET_SERIALISED(efx); 470 471 timer_max_us = efx->timer_max_ns / 1000; 472 473 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 474 return -EINVAL; 475 476 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 477 !rx_may_override_tx) { 478 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 479 "RX and TX IRQ moderation must be equal\n"); 480 return -EINVAL; 481 } 482 483 efx->irq_rx_adaptive = rx_adaptive; 484 efx->irq_rx_moderation_us = rx_usecs; 485 efx_for_each_channel(channel, efx) { 486 if (efx_channel_has_rx_queue(channel)) 487 channel->irq_moderation_us = rx_usecs; 488 else if (efx_channel_has_tx_queues(channel)) 489 channel->irq_moderation_us = tx_usecs; 490 else if (efx_channel_is_xdp_tx(channel)) 491 channel->irq_moderation_us = tx_usecs; 492 } 493 494 return 0; 495 } 496 497 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 498 unsigned int *rx_usecs, bool *rx_adaptive) 499 { 500 *rx_adaptive = efx->irq_rx_adaptive; 501 *rx_usecs = efx->irq_rx_moderation_us; 502 503 /* If channels are shared between RX and TX, so is IRQ 504 * moderation. Otherwise, IRQ moderation is the same for all 505 * TX channels and is not adaptive. 506 */ 507 if (efx->tx_channel_offset == 0) { 508 *tx_usecs = *rx_usecs; 509 } else { 510 struct efx_channel *tx_channel; 511 512 tx_channel = efx->channel[efx->tx_channel_offset]; 513 *tx_usecs = tx_channel->irq_moderation_us; 514 } 515 } 516 517 /************************************************************************** 518 * 519 * ioctls 520 * 521 *************************************************************************/ 522 523 /* Net device ioctl 524 * Context: process, rtnl_lock() held. 525 */ 526 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 527 { 528 struct efx_nic *efx = netdev_priv(net_dev); 529 struct mii_ioctl_data *data = if_mii(ifr); 530 531 if (cmd == SIOCSHWTSTAMP) 532 return efx_ptp_set_ts_config(efx, ifr); 533 if (cmd == SIOCGHWTSTAMP) 534 return efx_ptp_get_ts_config(efx, ifr); 535 536 /* Convert phy_id from older PRTAD/DEVAD format */ 537 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 538 (data->phy_id & 0xfc00) == 0x0400) 539 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 540 541 return mdio_mii_ioctl(&efx->mdio, data, cmd); 542 } 543 544 /************************************************************************** 545 * 546 * Kernel net device interface 547 * 548 *************************************************************************/ 549 550 /* Context: process, rtnl_lock() held. */ 551 int efx_net_open(struct net_device *net_dev) 552 { 553 struct efx_nic *efx = netdev_priv(net_dev); 554 int rc; 555 556 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 557 raw_smp_processor_id()); 558 559 rc = efx_check_disabled(efx); 560 if (rc) 561 return rc; 562 if (efx->phy_mode & PHY_MODE_SPECIAL) 563 return -EBUSY; 564 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 565 return -EIO; 566 567 /* Notify the kernel of the link state polled during driver load, 568 * before the monitor starts running */ 569 efx_link_status_changed(efx); 570 571 efx_start_all(efx); 572 if (efx->state == STATE_DISABLED || efx->reset_pending) 573 netif_device_detach(efx->net_dev); 574 efx_selftest_async_start(efx); 575 return 0; 576 } 577 578 /* Context: process, rtnl_lock() held. 579 * Note that the kernel will ignore our return code; this method 580 * should really be a void. 581 */ 582 int efx_net_stop(struct net_device *net_dev) 583 { 584 struct efx_nic *efx = netdev_priv(net_dev); 585 586 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 587 raw_smp_processor_id()); 588 589 /* Stop the device and flush all the channels */ 590 efx_stop_all(efx); 591 592 return 0; 593 } 594 595 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) 596 { 597 struct efx_nic *efx = netdev_priv(net_dev); 598 599 if (efx->type->vlan_rx_add_vid) 600 return efx->type->vlan_rx_add_vid(efx, proto, vid); 601 else 602 return -EOPNOTSUPP; 603 } 604 605 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) 606 { 607 struct efx_nic *efx = netdev_priv(net_dev); 608 609 if (efx->type->vlan_rx_kill_vid) 610 return efx->type->vlan_rx_kill_vid(efx, proto, vid); 611 else 612 return -EOPNOTSUPP; 613 } 614 615 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) 616 { 617 switch (in) { 618 case UDP_TUNNEL_TYPE_VXLAN: 619 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; 620 case UDP_TUNNEL_TYPE_GENEVE: 621 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; 622 default: 623 return -1; 624 } 625 } 626 627 static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) 628 { 629 struct efx_nic *efx = netdev_priv(dev); 630 struct efx_udp_tunnel tnl; 631 int efx_tunnel_type; 632 633 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); 634 if (efx_tunnel_type < 0) 635 return; 636 637 tnl.type = (u16)efx_tunnel_type; 638 tnl.port = ti->port; 639 640 if (efx->type->udp_tnl_add_port) 641 (void)efx->type->udp_tnl_add_port(efx, tnl); 642 } 643 644 static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) 645 { 646 struct efx_nic *efx = netdev_priv(dev); 647 struct efx_udp_tunnel tnl; 648 int efx_tunnel_type; 649 650 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); 651 if (efx_tunnel_type < 0) 652 return; 653 654 tnl.type = (u16)efx_tunnel_type; 655 tnl.port = ti->port; 656 657 if (efx->type->udp_tnl_del_port) 658 (void)efx->type->udp_tnl_del_port(efx, tnl); 659 } 660 661 static const struct net_device_ops efx_netdev_ops = { 662 .ndo_open = efx_net_open, 663 .ndo_stop = efx_net_stop, 664 .ndo_get_stats64 = efx_net_stats, 665 .ndo_tx_timeout = efx_watchdog, 666 .ndo_start_xmit = efx_hard_start_xmit, 667 .ndo_validate_addr = eth_validate_addr, 668 .ndo_do_ioctl = efx_ioctl, 669 .ndo_change_mtu = efx_change_mtu, 670 .ndo_set_mac_address = efx_set_mac_address, 671 .ndo_set_rx_mode = efx_set_rx_mode, 672 .ndo_set_features = efx_set_features, 673 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, 674 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, 675 #ifdef CONFIG_SFC_SRIOV 676 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 677 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 678 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 679 .ndo_get_vf_config = efx_sriov_get_vf_config, 680 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 681 #endif 682 .ndo_get_phys_port_id = efx_get_phys_port_id, 683 .ndo_get_phys_port_name = efx_get_phys_port_name, 684 .ndo_setup_tc = efx_setup_tc, 685 #ifdef CONFIG_RFS_ACCEL 686 .ndo_rx_flow_steer = efx_filter_rfs, 687 #endif 688 .ndo_udp_tunnel_add = efx_udp_tunnel_add, 689 .ndo_udp_tunnel_del = efx_udp_tunnel_del, 690 .ndo_xdp_xmit = efx_xdp_xmit, 691 .ndo_bpf = efx_xdp 692 }; 693 694 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) 695 { 696 struct bpf_prog *old_prog; 697 698 if (efx->xdp_rxq_info_failed) { 699 netif_err(efx, drv, efx->net_dev, 700 "Unable to bind XDP program due to previous failure of rxq_info\n"); 701 return -EINVAL; 702 } 703 704 if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { 705 netif_err(efx, drv, efx->net_dev, 706 "Unable to configure XDP with MTU of %d (max: %d)\n", 707 efx->net_dev->mtu, efx_xdp_max_mtu(efx)); 708 return -EINVAL; 709 } 710 711 old_prog = rtnl_dereference(efx->xdp_prog); 712 rcu_assign_pointer(efx->xdp_prog, prog); 713 /* Release the reference that was originally passed by the caller. */ 714 if (old_prog) 715 bpf_prog_put(old_prog); 716 717 return 0; 718 } 719 720 /* Context: process, rtnl_lock() held. */ 721 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) 722 { 723 struct efx_nic *efx = netdev_priv(dev); 724 struct bpf_prog *xdp_prog; 725 726 switch (xdp->command) { 727 case XDP_SETUP_PROG: 728 return efx_xdp_setup_prog(efx, xdp->prog); 729 case XDP_QUERY_PROG: 730 xdp_prog = rtnl_dereference(efx->xdp_prog); 731 xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0; 732 return 0; 733 default: 734 return -EINVAL; 735 } 736 } 737 738 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 739 u32 flags) 740 { 741 struct efx_nic *efx = netdev_priv(dev); 742 743 if (!netif_running(dev)) 744 return -EINVAL; 745 746 return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); 747 } 748 749 static void efx_update_name(struct efx_nic *efx) 750 { 751 strcpy(efx->name, efx->net_dev->name); 752 efx_mtd_rename(efx); 753 efx_set_channel_names(efx); 754 } 755 756 static int efx_netdev_event(struct notifier_block *this, 757 unsigned long event, void *ptr) 758 { 759 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 760 761 if ((net_dev->netdev_ops == &efx_netdev_ops) && 762 event == NETDEV_CHANGENAME) 763 efx_update_name(netdev_priv(net_dev)); 764 765 return NOTIFY_DONE; 766 } 767 768 static struct notifier_block efx_netdev_notifier = { 769 .notifier_call = efx_netdev_event, 770 }; 771 772 static ssize_t 773 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 774 { 775 struct efx_nic *efx = dev_get_drvdata(dev); 776 return sprintf(buf, "%d\n", efx->phy_type); 777 } 778 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); 779 780 static int efx_register_netdev(struct efx_nic *efx) 781 { 782 struct net_device *net_dev = efx->net_dev; 783 struct efx_channel *channel; 784 int rc; 785 786 net_dev->watchdog_timeo = 5 * HZ; 787 net_dev->irq = efx->pci_dev->irq; 788 net_dev->netdev_ops = &efx_netdev_ops; 789 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 790 net_dev->priv_flags |= IFF_UNICAST_FLT; 791 net_dev->ethtool_ops = &efx_ethtool_ops; 792 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 793 net_dev->min_mtu = EFX_MIN_MTU; 794 net_dev->max_mtu = EFX_MAX_MTU; 795 796 rtnl_lock(); 797 798 /* Enable resets to be scheduled and check whether any were 799 * already requested. If so, the NIC is probably hosed so we 800 * abort. 801 */ 802 efx->state = STATE_READY; 803 smp_mb(); /* ensure we change state before checking reset_pending */ 804 if (efx->reset_pending) { 805 netif_err(efx, probe, efx->net_dev, 806 "aborting probe due to scheduled reset\n"); 807 rc = -EIO; 808 goto fail_locked; 809 } 810 811 rc = dev_alloc_name(net_dev, net_dev->name); 812 if (rc < 0) 813 goto fail_locked; 814 efx_update_name(efx); 815 816 /* Always start with carrier off; PHY events will detect the link */ 817 netif_carrier_off(net_dev); 818 819 rc = register_netdevice(net_dev); 820 if (rc) 821 goto fail_locked; 822 823 efx_for_each_channel(channel, efx) { 824 struct efx_tx_queue *tx_queue; 825 efx_for_each_channel_tx_queue(tx_queue, channel) 826 efx_init_tx_queue_core_txq(tx_queue); 827 } 828 829 efx_associate(efx); 830 831 rtnl_unlock(); 832 833 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 834 if (rc) { 835 netif_err(efx, drv, efx->net_dev, 836 "failed to init net dev attributes\n"); 837 goto fail_registered; 838 } 839 840 efx_init_mcdi_logging(efx); 841 842 return 0; 843 844 fail_registered: 845 rtnl_lock(); 846 efx_dissociate(efx); 847 unregister_netdevice(net_dev); 848 fail_locked: 849 efx->state = STATE_UNINIT; 850 rtnl_unlock(); 851 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 852 return rc; 853 } 854 855 static void efx_unregister_netdev(struct efx_nic *efx) 856 { 857 if (!efx->net_dev) 858 return; 859 860 BUG_ON(netdev_priv(efx->net_dev) != efx); 861 862 if (efx_dev_registered(efx)) { 863 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 864 efx_fini_mcdi_logging(efx); 865 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 866 unregister_netdev(efx->net_dev); 867 } 868 } 869 870 /************************************************************************** 871 * 872 * List of NICs we support 873 * 874 **************************************************************************/ 875 876 /* PCI device ID table */ 877 static const struct pci_device_id efx_pci_table[] = { 878 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ 879 .driver_data = (unsigned long) &siena_a0_nic_type}, 880 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 881 .driver_data = (unsigned long) &siena_a0_nic_type}, 882 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 883 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 884 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 885 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 886 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 887 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 888 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ 889 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 890 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ 891 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 892 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ 893 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 894 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ 895 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 896 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ 897 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 898 {0} /* end of list */ 899 }; 900 901 /************************************************************************** 902 * 903 * Data housekeeping 904 * 905 **************************************************************************/ 906 907 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 908 { 909 u64 n_rx_nodesc_trunc = 0; 910 struct efx_channel *channel; 911 912 efx_for_each_channel(channel, efx) 913 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 914 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 915 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 916 } 917 918 /************************************************************************** 919 * 920 * PCI interface 921 * 922 **************************************************************************/ 923 924 /* Main body of final NIC shutdown code 925 * This is called only at module unload (or hotplug removal). 926 */ 927 static void efx_pci_remove_main(struct efx_nic *efx) 928 { 929 /* Flush reset_work. It can no longer be scheduled since we 930 * are not READY. 931 */ 932 BUG_ON(efx->state == STATE_READY); 933 efx_flush_reset_workqueue(efx); 934 935 efx_disable_interrupts(efx); 936 efx_clear_interrupt_affinity(efx); 937 efx_nic_fini_interrupt(efx); 938 efx_fini_port(efx); 939 efx->type->fini(efx); 940 efx_fini_napi(efx); 941 efx_remove_all(efx); 942 } 943 944 /* Final NIC shutdown 945 * This is called only at module unload (or hotplug removal). A PF can call 946 * this on its VFs to ensure they are unbound first. 947 */ 948 static void efx_pci_remove(struct pci_dev *pci_dev) 949 { 950 struct efx_nic *efx; 951 952 efx = pci_get_drvdata(pci_dev); 953 if (!efx) 954 return; 955 956 /* Mark the NIC as fini, then stop the interface */ 957 rtnl_lock(); 958 efx_dissociate(efx); 959 dev_close(efx->net_dev); 960 efx_disable_interrupts(efx); 961 efx->state = STATE_UNINIT; 962 rtnl_unlock(); 963 964 if (efx->type->sriov_fini) 965 efx->type->sriov_fini(efx); 966 967 efx_unregister_netdev(efx); 968 969 efx_mtd_remove(efx); 970 971 efx_pci_remove_main(efx); 972 973 efx_fini_io(efx); 974 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 975 976 efx_fini_struct(efx); 977 free_netdev(efx->net_dev); 978 979 pci_disable_pcie_error_reporting(pci_dev); 980 }; 981 982 /* NIC VPD information 983 * Called during probe to display the part number of the 984 * installed NIC. VPD is potentially very large but this should 985 * always appear within the first 512 bytes. 986 */ 987 #define SFC_VPD_LEN 512 988 static void efx_probe_vpd_strings(struct efx_nic *efx) 989 { 990 struct pci_dev *dev = efx->pci_dev; 991 char vpd_data[SFC_VPD_LEN]; 992 ssize_t vpd_size; 993 int ro_start, ro_size, i, j; 994 995 /* Get the vpd data from the device */ 996 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 997 if (vpd_size <= 0) { 998 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); 999 return; 1000 } 1001 1002 /* Get the Read only section */ 1003 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 1004 if (ro_start < 0) { 1005 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); 1006 return; 1007 } 1008 1009 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1010 j = ro_size; 1011 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1012 if (i + j > vpd_size) 1013 j = vpd_size - i; 1014 1015 /* Get the Part number */ 1016 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); 1017 if (i < 0) { 1018 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); 1019 return; 1020 } 1021 1022 j = pci_vpd_info_field_size(&vpd_data[i]); 1023 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1024 if (i + j > vpd_size) { 1025 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); 1026 return; 1027 } 1028 1029 netif_info(efx, drv, efx->net_dev, 1030 "Part Number : %.*s\n", j, &vpd_data[i]); 1031 1032 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1033 j = ro_size; 1034 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); 1035 if (i < 0) { 1036 netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); 1037 return; 1038 } 1039 1040 j = pci_vpd_info_field_size(&vpd_data[i]); 1041 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1042 if (i + j > vpd_size) { 1043 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); 1044 return; 1045 } 1046 1047 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); 1048 if (!efx->vpd_sn) 1049 return; 1050 1051 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); 1052 } 1053 1054 1055 /* Main body of NIC initialisation 1056 * This is called at module load (or hotplug insertion, theoretically). 1057 */ 1058 static int efx_pci_probe_main(struct efx_nic *efx) 1059 { 1060 int rc; 1061 1062 /* Do start-of-day initialisation */ 1063 rc = efx_probe_all(efx); 1064 if (rc) 1065 goto fail1; 1066 1067 efx_init_napi(efx); 1068 1069 down_write(&efx->filter_sem); 1070 rc = efx->type->init(efx); 1071 up_write(&efx->filter_sem); 1072 if (rc) { 1073 netif_err(efx, probe, efx->net_dev, 1074 "failed to initialise NIC\n"); 1075 goto fail3; 1076 } 1077 1078 rc = efx_init_port(efx); 1079 if (rc) { 1080 netif_err(efx, probe, efx->net_dev, 1081 "failed to initialise port\n"); 1082 goto fail4; 1083 } 1084 1085 rc = efx_nic_init_interrupt(efx); 1086 if (rc) 1087 goto fail5; 1088 1089 efx_set_interrupt_affinity(efx); 1090 rc = efx_enable_interrupts(efx); 1091 if (rc) 1092 goto fail6; 1093 1094 return 0; 1095 1096 fail6: 1097 efx_clear_interrupt_affinity(efx); 1098 efx_nic_fini_interrupt(efx); 1099 fail5: 1100 efx_fini_port(efx); 1101 fail4: 1102 efx->type->fini(efx); 1103 fail3: 1104 efx_fini_napi(efx); 1105 efx_remove_all(efx); 1106 fail1: 1107 return rc; 1108 } 1109 1110 static int efx_pci_probe_post_io(struct efx_nic *efx) 1111 { 1112 struct net_device *net_dev = efx->net_dev; 1113 int rc = efx_pci_probe_main(efx); 1114 1115 if (rc) 1116 return rc; 1117 1118 if (efx->type->sriov_init) { 1119 rc = efx->type->sriov_init(efx); 1120 if (rc) 1121 netif_err(efx, probe, efx->net_dev, 1122 "SR-IOV can't be enabled rc %d\n", rc); 1123 } 1124 1125 /* Determine netdevice features */ 1126 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 1127 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); 1128 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 1129 net_dev->features |= NETIF_F_TSO6; 1130 /* Check whether device supports TSO */ 1131 if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) 1132 net_dev->features &= ~NETIF_F_ALL_TSO; 1133 /* Mask for features that also apply to VLAN devices */ 1134 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 1135 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 1136 NETIF_F_RXCSUM); 1137 1138 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; 1139 1140 /* Disable receiving frames with bad FCS, by default. */ 1141 net_dev->features &= ~NETIF_F_RXALL; 1142 1143 /* Disable VLAN filtering by default. It may be enforced if 1144 * the feature is fixed (i.e. VLAN filters are required to 1145 * receive VLAN tagged packets due to vPort restrictions). 1146 */ 1147 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1148 net_dev->features |= efx->fixed_features; 1149 1150 rc = efx_register_netdev(efx); 1151 if (!rc) 1152 return 0; 1153 1154 efx_pci_remove_main(efx); 1155 return rc; 1156 } 1157 1158 /* NIC initialisation 1159 * 1160 * This is called at module load (or hotplug insertion, 1161 * theoretically). It sets up PCI mappings, resets the NIC, 1162 * sets up and registers the network devices with the kernel and hooks 1163 * the interrupt service routine. It does not prepare the device for 1164 * transmission; this is left to the first time one of the network 1165 * interfaces is brought up (i.e. efx_net_open). 1166 */ 1167 static int efx_pci_probe(struct pci_dev *pci_dev, 1168 const struct pci_device_id *entry) 1169 { 1170 struct net_device *net_dev; 1171 struct efx_nic *efx; 1172 int rc; 1173 1174 /* Allocate and initialise a struct net_device and struct efx_nic */ 1175 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 1176 EFX_MAX_RX_QUEUES); 1177 if (!net_dev) 1178 return -ENOMEM; 1179 efx = netdev_priv(net_dev); 1180 efx->type = (const struct efx_nic_type *) entry->driver_data; 1181 efx->fixed_features |= NETIF_F_HIGHDMA; 1182 1183 pci_set_drvdata(pci_dev, efx); 1184 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 1185 rc = efx_init_struct(efx, pci_dev, net_dev); 1186 if (rc) 1187 goto fail1; 1188 1189 netif_info(efx, probe, efx->net_dev, 1190 "Solarflare NIC detected\n"); 1191 1192 if (!efx->type->is_vf) 1193 efx_probe_vpd_strings(efx); 1194 1195 /* Set up basic I/O (BAR mappings etc) */ 1196 rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, 1197 efx->type->mem_map_size(efx)); 1198 if (rc) 1199 goto fail2; 1200 1201 rc = efx_pci_probe_post_io(efx); 1202 if (rc) { 1203 /* On failure, retry once immediately. 1204 * If we aborted probe due to a scheduled reset, dismiss it. 1205 */ 1206 efx->reset_pending = 0; 1207 rc = efx_pci_probe_post_io(efx); 1208 if (rc) { 1209 /* On another failure, retry once more 1210 * after a 50-305ms delay. 1211 */ 1212 unsigned char r; 1213 1214 get_random_bytes(&r, 1); 1215 msleep((unsigned int)r + 50); 1216 efx->reset_pending = 0; 1217 rc = efx_pci_probe_post_io(efx); 1218 } 1219 } 1220 if (rc) 1221 goto fail3; 1222 1223 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 1224 1225 /* Try to create MTDs, but allow this to fail */ 1226 rtnl_lock(); 1227 rc = efx_mtd_probe(efx); 1228 rtnl_unlock(); 1229 if (rc && rc != -EPERM) 1230 netif_warn(efx, probe, efx->net_dev, 1231 "failed to create MTDs (%d)\n", rc); 1232 1233 (void)pci_enable_pcie_error_reporting(pci_dev); 1234 1235 if (efx->type->udp_tnl_push_ports) 1236 efx->type->udp_tnl_push_ports(efx); 1237 1238 return 0; 1239 1240 fail3: 1241 efx_fini_io(efx); 1242 fail2: 1243 efx_fini_struct(efx); 1244 fail1: 1245 WARN_ON(rc > 0); 1246 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 1247 free_netdev(net_dev); 1248 return rc; 1249 } 1250 1251 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 1252 * enabled on success 1253 */ 1254 #ifdef CONFIG_SFC_SRIOV 1255 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 1256 { 1257 int rc; 1258 struct efx_nic *efx = pci_get_drvdata(dev); 1259 1260 if (efx->type->sriov_configure) { 1261 rc = efx->type->sriov_configure(efx, num_vfs); 1262 if (rc) 1263 return rc; 1264 else 1265 return num_vfs; 1266 } else 1267 return -EOPNOTSUPP; 1268 } 1269 #endif 1270 1271 static int efx_pm_freeze(struct device *dev) 1272 { 1273 struct efx_nic *efx = dev_get_drvdata(dev); 1274 1275 rtnl_lock(); 1276 1277 if (efx->state != STATE_DISABLED) { 1278 efx->state = STATE_UNINIT; 1279 1280 efx_device_detach_sync(efx); 1281 1282 efx_stop_all(efx); 1283 efx_disable_interrupts(efx); 1284 } 1285 1286 rtnl_unlock(); 1287 1288 return 0; 1289 } 1290 1291 static int efx_pm_thaw(struct device *dev) 1292 { 1293 int rc; 1294 struct efx_nic *efx = dev_get_drvdata(dev); 1295 1296 rtnl_lock(); 1297 1298 if (efx->state != STATE_DISABLED) { 1299 rc = efx_enable_interrupts(efx); 1300 if (rc) 1301 goto fail; 1302 1303 mutex_lock(&efx->mac_lock); 1304 efx->phy_op->reconfigure(efx); 1305 mutex_unlock(&efx->mac_lock); 1306 1307 efx_start_all(efx); 1308 1309 efx_device_attach_if_not_resetting(efx); 1310 1311 efx->state = STATE_READY; 1312 1313 efx->type->resume_wol(efx); 1314 } 1315 1316 rtnl_unlock(); 1317 1318 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 1319 efx_queue_reset_work(efx); 1320 1321 return 0; 1322 1323 fail: 1324 rtnl_unlock(); 1325 1326 return rc; 1327 } 1328 1329 static int efx_pm_poweroff(struct device *dev) 1330 { 1331 struct pci_dev *pci_dev = to_pci_dev(dev); 1332 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1333 1334 efx->type->fini(efx); 1335 1336 efx->reset_pending = 0; 1337 1338 pci_save_state(pci_dev); 1339 return pci_set_power_state(pci_dev, PCI_D3hot); 1340 } 1341 1342 /* Used for both resume and restore */ 1343 static int efx_pm_resume(struct device *dev) 1344 { 1345 struct pci_dev *pci_dev = to_pci_dev(dev); 1346 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1347 int rc; 1348 1349 rc = pci_set_power_state(pci_dev, PCI_D0); 1350 if (rc) 1351 return rc; 1352 pci_restore_state(pci_dev); 1353 rc = pci_enable_device(pci_dev); 1354 if (rc) 1355 return rc; 1356 pci_set_master(efx->pci_dev); 1357 rc = efx->type->reset(efx, RESET_TYPE_ALL); 1358 if (rc) 1359 return rc; 1360 down_write(&efx->filter_sem); 1361 rc = efx->type->init(efx); 1362 up_write(&efx->filter_sem); 1363 if (rc) 1364 return rc; 1365 rc = efx_pm_thaw(dev); 1366 return rc; 1367 } 1368 1369 static int efx_pm_suspend(struct device *dev) 1370 { 1371 int rc; 1372 1373 efx_pm_freeze(dev); 1374 rc = efx_pm_poweroff(dev); 1375 if (rc) 1376 efx_pm_resume(dev); 1377 return rc; 1378 } 1379 1380 static const struct dev_pm_ops efx_pm_ops = { 1381 .suspend = efx_pm_suspend, 1382 .resume = efx_pm_resume, 1383 .freeze = efx_pm_freeze, 1384 .thaw = efx_pm_thaw, 1385 .poweroff = efx_pm_poweroff, 1386 .restore = efx_pm_resume, 1387 }; 1388 1389 static struct pci_driver efx_pci_driver = { 1390 .name = KBUILD_MODNAME, 1391 .id_table = efx_pci_table, 1392 .probe = efx_pci_probe, 1393 .remove = efx_pci_remove, 1394 .driver.pm = &efx_pm_ops, 1395 .err_handler = &efx_err_handlers, 1396 #ifdef CONFIG_SFC_SRIOV 1397 .sriov_configure = efx_pci_sriov_configure, 1398 #endif 1399 }; 1400 1401 /************************************************************************** 1402 * 1403 * Kernel module interface 1404 * 1405 *************************************************************************/ 1406 1407 static int __init efx_init_module(void) 1408 { 1409 int rc; 1410 1411 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 1412 1413 rc = register_netdevice_notifier(&efx_netdev_notifier); 1414 if (rc) 1415 goto err_notifier; 1416 1417 #ifdef CONFIG_SFC_SRIOV 1418 rc = efx_init_sriov(); 1419 if (rc) 1420 goto err_sriov; 1421 #endif 1422 1423 rc = efx_create_reset_workqueue(); 1424 if (rc) 1425 goto err_reset; 1426 1427 rc = pci_register_driver(&efx_pci_driver); 1428 if (rc < 0) 1429 goto err_pci; 1430 1431 return 0; 1432 1433 err_pci: 1434 efx_destroy_reset_workqueue(); 1435 err_reset: 1436 #ifdef CONFIG_SFC_SRIOV 1437 efx_fini_sriov(); 1438 err_sriov: 1439 #endif 1440 unregister_netdevice_notifier(&efx_netdev_notifier); 1441 err_notifier: 1442 return rc; 1443 } 1444 1445 static void __exit efx_exit_module(void) 1446 { 1447 printk(KERN_INFO "Solarflare NET driver unloading\n"); 1448 1449 pci_unregister_driver(&efx_pci_driver); 1450 efx_destroy_reset_workqueue(); 1451 #ifdef CONFIG_SFC_SRIOV 1452 efx_fini_sriov(); 1453 #endif 1454 unregister_netdevice_notifier(&efx_netdev_notifier); 1455 1456 } 1457 1458 module_init(efx_init_module); 1459 module_exit(efx_exit_module); 1460 1461 MODULE_AUTHOR("Solarflare Communications and " 1462 "Michael Brown <mbrown@fensystems.co.uk>"); 1463 MODULE_DESCRIPTION("Solarflare network driver"); 1464 MODULE_LICENSE("GPL"); 1465 MODULE_DEVICE_TABLE(pci, efx_pci_table); 1466 MODULE_VERSION(EFX_DRIVER_VERSION); 1467