1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver 3893ce44dSCatherine Sullivan * 4893ce44dSCatherine Sullivan * Copyright (C) 2015-2019 Google, Inc. 5893ce44dSCatherine Sullivan */ 6893ce44dSCatherine Sullivan 7893ce44dSCatherine Sullivan #include <linux/cpumask.h> 8893ce44dSCatherine Sullivan #include <linux/etherdevice.h> 9893ce44dSCatherine Sullivan #include <linux/interrupt.h> 10893ce44dSCatherine Sullivan #include <linux/module.h> 11893ce44dSCatherine Sullivan #include <linux/pci.h> 12893ce44dSCatherine Sullivan #include <linux/sched.h> 13893ce44dSCatherine Sullivan #include <linux/timer.h> 149e5f7d26SCatherine Sullivan #include <linux/workqueue.h> 15893ce44dSCatherine Sullivan #include <net/sch_generic.h> 16893ce44dSCatherine Sullivan #include "gve.h" 17893ce44dSCatherine Sullivan #include "gve_adminq.h" 18893ce44dSCatherine Sullivan #include "gve_register.h" 19893ce44dSCatherine Sullivan 20f5cedc84SCatherine Sullivan #define GVE_DEFAULT_RX_COPYBREAK (256) 21f5cedc84SCatherine Sullivan 22893ce44dSCatherine Sullivan #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK) 23893ce44dSCatherine Sullivan #define GVE_VERSION "1.0.0" 24893ce44dSCatherine Sullivan #define GVE_VERSION_PREFIX "GVE-" 25893ce44dSCatherine Sullivan 26e5b845dcSCatherine Sullivan const char gve_version_str[] = GVE_VERSION; 27893ce44dSCatherine Sullivan static const char gve_version_prefix[] = GVE_VERSION_PREFIX; 28893ce44dSCatherine Sullivan 29f5cedc84SCatherine Sullivan static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) 30f5cedc84SCatherine Sullivan { 31f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev); 32f5cedc84SCatherine Sullivan unsigned int start; 33f5cedc84SCatherine Sullivan int ring; 34f5cedc84SCatherine Sullivan 35f5cedc84SCatherine Sullivan if (priv->rx) { 36f5cedc84SCatherine Sullivan for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { 37f5cedc84SCatherine Sullivan do { 383c13ce74SCatherine Sullivan start = 39f5cedc84SCatherine Sullivan u64_stats_fetch_begin(&priv->rx[ring].statss); 40f5cedc84SCatherine Sullivan s->rx_packets += priv->rx[ring].rpackets; 41f5cedc84SCatherine Sullivan s->rx_bytes += priv->rx[ring].rbytes; 42f5cedc84SCatherine Sullivan } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 43f5cedc84SCatherine Sullivan start)); 44f5cedc84SCatherine Sullivan } 45f5cedc84SCatherine Sullivan } 46f5cedc84SCatherine Sullivan if (priv->tx) { 47f5cedc84SCatherine Sullivan for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { 48f5cedc84SCatherine Sullivan do { 493c13ce74SCatherine Sullivan start = 50f5cedc84SCatherine Sullivan u64_stats_fetch_begin(&priv->tx[ring].statss); 51f5cedc84SCatherine Sullivan s->tx_packets += priv->tx[ring].pkt_done; 52f5cedc84SCatherine Sullivan s->tx_bytes += priv->tx[ring].bytes_done; 53f5cedc84SCatherine Sullivan } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 54f5cedc84SCatherine Sullivan start)); 55f5cedc84SCatherine Sullivan } 56f5cedc84SCatherine Sullivan } 57f5cedc84SCatherine Sullivan } 58f5cedc84SCatherine Sullivan 59893ce44dSCatherine Sullivan static int gve_alloc_counter_array(struct gve_priv *priv) 60893ce44dSCatherine Sullivan { 61893ce44dSCatherine Sullivan priv->counter_array = 62893ce44dSCatherine Sullivan dma_alloc_coherent(&priv->pdev->dev, 63893ce44dSCatherine Sullivan priv->num_event_counters * 64893ce44dSCatherine Sullivan sizeof(*priv->counter_array), 65893ce44dSCatherine Sullivan &priv->counter_array_bus, GFP_KERNEL); 66893ce44dSCatherine Sullivan if (!priv->counter_array) 67893ce44dSCatherine Sullivan return -ENOMEM; 68893ce44dSCatherine Sullivan 69893ce44dSCatherine Sullivan return 0; 70893ce44dSCatherine Sullivan } 71893ce44dSCatherine Sullivan 72893ce44dSCatherine Sullivan static void gve_free_counter_array(struct gve_priv *priv) 73893ce44dSCatherine Sullivan { 74893ce44dSCatherine Sullivan dma_free_coherent(&priv->pdev->dev, 75893ce44dSCatherine Sullivan priv->num_event_counters * 76893ce44dSCatherine Sullivan sizeof(*priv->counter_array), 77893ce44dSCatherine Sullivan priv->counter_array, priv->counter_array_bus); 78893ce44dSCatherine Sullivan priv->counter_array = NULL; 79893ce44dSCatherine Sullivan } 80893ce44dSCatherine Sullivan 81893ce44dSCatherine Sullivan static irqreturn_t gve_mgmnt_intr(int irq, void *arg) 82893ce44dSCatherine Sullivan { 839e5f7d26SCatherine Sullivan struct gve_priv *priv = arg; 849e5f7d26SCatherine Sullivan 859e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task); 86893ce44dSCatherine Sullivan return IRQ_HANDLED; 87893ce44dSCatherine Sullivan } 88893ce44dSCatherine Sullivan 89893ce44dSCatherine Sullivan static irqreturn_t gve_intr(int irq, void *arg) 90893ce44dSCatherine Sullivan { 91f5cedc84SCatherine Sullivan struct gve_notify_block *block = arg; 92f5cedc84SCatherine Sullivan struct gve_priv *priv = block->priv; 93f5cedc84SCatherine Sullivan 94f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); 95f5cedc84SCatherine Sullivan napi_schedule_irqoff(&block->napi); 96893ce44dSCatherine Sullivan return IRQ_HANDLED; 97893ce44dSCatherine Sullivan } 98893ce44dSCatherine Sullivan 99f5cedc84SCatherine Sullivan static int gve_napi_poll(struct napi_struct *napi, int budget) 100f5cedc84SCatherine Sullivan { 101f5cedc84SCatherine Sullivan struct gve_notify_block *block; 102f5cedc84SCatherine Sullivan __be32 __iomem *irq_doorbell; 103f5cedc84SCatherine Sullivan bool reschedule = false; 104f5cedc84SCatherine Sullivan struct gve_priv *priv; 105f5cedc84SCatherine Sullivan 106f5cedc84SCatherine Sullivan block = container_of(napi, struct gve_notify_block, napi); 107f5cedc84SCatherine Sullivan priv = block->priv; 108f5cedc84SCatherine Sullivan 109f5cedc84SCatherine Sullivan if (block->tx) 110f5cedc84SCatherine Sullivan reschedule |= gve_tx_poll(block, budget); 111f5cedc84SCatherine Sullivan if (block->rx) 112f5cedc84SCatherine Sullivan reschedule |= gve_rx_poll(block, budget); 113f5cedc84SCatherine Sullivan 114f5cedc84SCatherine Sullivan if (reschedule) 115f5cedc84SCatherine Sullivan return budget; 116f5cedc84SCatherine Sullivan 117f5cedc84SCatherine Sullivan napi_complete(napi); 118f5cedc84SCatherine Sullivan irq_doorbell = gve_irq_doorbell(priv, block); 119f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); 120f5cedc84SCatherine Sullivan 121f5cedc84SCatherine Sullivan /* Double check we have no extra work. 122f5cedc84SCatherine Sullivan * Ensure unmask synchronizes with checking for work. 123f5cedc84SCatherine Sullivan */ 124f5cedc84SCatherine Sullivan dma_rmb(); 125f5cedc84SCatherine Sullivan if (block->tx) 126f5cedc84SCatherine Sullivan reschedule |= gve_tx_poll(block, -1); 127f5cedc84SCatherine Sullivan if (block->rx) 128f5cedc84SCatherine Sullivan reschedule |= gve_rx_poll(block, -1); 129f5cedc84SCatherine Sullivan if (reschedule && napi_reschedule(napi)) 130f5cedc84SCatherine Sullivan iowrite32be(GVE_IRQ_MASK, irq_doorbell); 131f5cedc84SCatherine Sullivan 132f5cedc84SCatherine Sullivan return 0; 133f5cedc84SCatherine Sullivan } 134f5cedc84SCatherine Sullivan 135893ce44dSCatherine Sullivan static int gve_alloc_notify_blocks(struct gve_priv *priv) 136893ce44dSCatherine Sullivan { 137893ce44dSCatherine Sullivan int num_vecs_requested = priv->num_ntfy_blks + 1; 138893ce44dSCatherine Sullivan char *name = priv->dev->name; 139893ce44dSCatherine Sullivan unsigned int active_cpus; 140893ce44dSCatherine Sullivan int vecs_enabled; 141893ce44dSCatherine Sullivan int i, j; 142893ce44dSCatherine Sullivan int err; 143893ce44dSCatherine Sullivan 144893ce44dSCatherine Sullivan priv->msix_vectors = kvzalloc(num_vecs_requested * 145893ce44dSCatherine Sullivan sizeof(*priv->msix_vectors), GFP_KERNEL); 146893ce44dSCatherine Sullivan if (!priv->msix_vectors) 147893ce44dSCatherine Sullivan return -ENOMEM; 148893ce44dSCatherine Sullivan for (i = 0; i < num_vecs_requested; i++) 149893ce44dSCatherine Sullivan priv->msix_vectors[i].entry = i; 150893ce44dSCatherine Sullivan vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, 151893ce44dSCatherine Sullivan GVE_MIN_MSIX, num_vecs_requested); 152893ce44dSCatherine Sullivan if (vecs_enabled < 0) { 153893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", 154893ce44dSCatherine Sullivan GVE_MIN_MSIX, vecs_enabled); 155893ce44dSCatherine Sullivan err = vecs_enabled; 156893ce44dSCatherine Sullivan goto abort_with_msix_vectors; 157893ce44dSCatherine Sullivan } 158893ce44dSCatherine Sullivan if (vecs_enabled != num_vecs_requested) { 159f5cedc84SCatherine Sullivan int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1; 160f5cedc84SCatherine Sullivan int vecs_per_type = new_num_ntfy_blks / 2; 161f5cedc84SCatherine Sullivan int vecs_left = new_num_ntfy_blks % 2; 162f5cedc84SCatherine Sullivan 163f5cedc84SCatherine Sullivan priv->num_ntfy_blks = new_num_ntfy_blks; 164f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, 165f5cedc84SCatherine Sullivan vecs_per_type); 166f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, 167f5cedc84SCatherine Sullivan vecs_per_type + vecs_left); 168893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 169f5cedc84SCatherine Sullivan "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n", 170f5cedc84SCatherine Sullivan vecs_enabled, priv->tx_cfg.max_queues, 171f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues); 172f5cedc84SCatherine Sullivan if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) 173f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; 174f5cedc84SCatherine Sullivan if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) 175f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; 176893ce44dSCatherine Sullivan } 177893ce44dSCatherine Sullivan /* Half the notification blocks go to TX and half to RX */ 178893ce44dSCatherine Sullivan active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); 179893ce44dSCatherine Sullivan 180893ce44dSCatherine Sullivan /* Setup Management Vector - the last vector */ 181893ce44dSCatherine Sullivan snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt", 182893ce44dSCatherine Sullivan name); 183893ce44dSCatherine Sullivan err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, 184893ce44dSCatherine Sullivan gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); 185893ce44dSCatherine Sullivan if (err) { 186893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); 187893ce44dSCatherine Sullivan goto abort_with_msix_enabled; 188893ce44dSCatherine Sullivan } 189893ce44dSCatherine Sullivan priv->ntfy_blocks = 190893ce44dSCatherine Sullivan dma_alloc_coherent(&priv->pdev->dev, 191893ce44dSCatherine Sullivan priv->num_ntfy_blks * 192893ce44dSCatherine Sullivan sizeof(*priv->ntfy_blocks), 193893ce44dSCatherine Sullivan &priv->ntfy_block_bus, GFP_KERNEL); 194893ce44dSCatherine Sullivan if (!priv->ntfy_blocks) { 195893ce44dSCatherine Sullivan err = -ENOMEM; 196893ce44dSCatherine Sullivan goto abort_with_mgmt_vector; 197893ce44dSCatherine Sullivan } 198893ce44dSCatherine Sullivan /* Setup the other blocks - the first n-1 vectors */ 199893ce44dSCatherine Sullivan for (i = 0; i < priv->num_ntfy_blks; i++) { 200893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[i]; 201893ce44dSCatherine Sullivan int msix_idx = i; 202893ce44dSCatherine Sullivan 203893ce44dSCatherine Sullivan snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d", 204893ce44dSCatherine Sullivan name, i); 205893ce44dSCatherine Sullivan block->priv = priv; 206893ce44dSCatherine Sullivan err = request_irq(priv->msix_vectors[msix_idx].vector, 207893ce44dSCatherine Sullivan gve_intr, 0, block->name, block); 208893ce44dSCatherine Sullivan if (err) { 209893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 210893ce44dSCatherine Sullivan "Failed to receive msix vector %d\n", i); 211893ce44dSCatherine Sullivan goto abort_with_some_ntfy_blocks; 212893ce44dSCatherine Sullivan } 213893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, 214893ce44dSCatherine Sullivan get_cpu_mask(i % active_cpus)); 215893ce44dSCatherine Sullivan } 216893ce44dSCatherine Sullivan return 0; 217893ce44dSCatherine Sullivan abort_with_some_ntfy_blocks: 218893ce44dSCatherine Sullivan for (j = 0; j < i; j++) { 219893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[j]; 220893ce44dSCatherine Sullivan int msix_idx = j; 221893ce44dSCatherine Sullivan 222893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, 223893ce44dSCatherine Sullivan NULL); 224893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[msix_idx].vector, block); 225893ce44dSCatherine Sullivan } 226893ce44dSCatherine Sullivan dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * 227893ce44dSCatherine Sullivan sizeof(*priv->ntfy_blocks), 228893ce44dSCatherine Sullivan priv->ntfy_blocks, priv->ntfy_block_bus); 229893ce44dSCatherine Sullivan priv->ntfy_blocks = NULL; 230893ce44dSCatherine Sullivan abort_with_mgmt_vector: 231893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); 232893ce44dSCatherine Sullivan abort_with_msix_enabled: 233893ce44dSCatherine Sullivan pci_disable_msix(priv->pdev); 234893ce44dSCatherine Sullivan abort_with_msix_vectors: 235893ce44dSCatherine Sullivan kfree(priv->msix_vectors); 236893ce44dSCatherine Sullivan priv->msix_vectors = NULL; 237893ce44dSCatherine Sullivan return err; 238893ce44dSCatherine Sullivan } 239893ce44dSCatherine Sullivan 240893ce44dSCatherine Sullivan static void gve_free_notify_blocks(struct gve_priv *priv) 241893ce44dSCatherine Sullivan { 242893ce44dSCatherine Sullivan int i; 243893ce44dSCatherine Sullivan 244893ce44dSCatherine Sullivan /* Free the irqs */ 245893ce44dSCatherine Sullivan for (i = 0; i < priv->num_ntfy_blks; i++) { 246893ce44dSCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[i]; 247893ce44dSCatherine Sullivan int msix_idx = i; 248893ce44dSCatherine Sullivan 249893ce44dSCatherine Sullivan irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, 250893ce44dSCatherine Sullivan NULL); 251893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[msix_idx].vector, block); 252893ce44dSCatherine Sullivan } 253893ce44dSCatherine Sullivan dma_free_coherent(&priv->pdev->dev, 254893ce44dSCatherine Sullivan priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks), 255893ce44dSCatherine Sullivan priv->ntfy_blocks, priv->ntfy_block_bus); 256893ce44dSCatherine Sullivan priv->ntfy_blocks = NULL; 257893ce44dSCatherine Sullivan free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); 258893ce44dSCatherine Sullivan pci_disable_msix(priv->pdev); 259893ce44dSCatherine Sullivan kfree(priv->msix_vectors); 260893ce44dSCatherine Sullivan priv->msix_vectors = NULL; 261893ce44dSCatherine Sullivan } 262893ce44dSCatherine Sullivan 263893ce44dSCatherine Sullivan static int gve_setup_device_resources(struct gve_priv *priv) 264893ce44dSCatherine Sullivan { 265893ce44dSCatherine Sullivan int err; 266893ce44dSCatherine Sullivan 267893ce44dSCatherine Sullivan err = gve_alloc_counter_array(priv); 268893ce44dSCatherine Sullivan if (err) 269893ce44dSCatherine Sullivan return err; 270893ce44dSCatherine Sullivan err = gve_alloc_notify_blocks(priv); 271893ce44dSCatherine Sullivan if (err) 272893ce44dSCatherine Sullivan goto abort_with_counter; 273893ce44dSCatherine Sullivan err = gve_adminq_configure_device_resources(priv, 274893ce44dSCatherine Sullivan priv->counter_array_bus, 275893ce44dSCatherine Sullivan priv->num_event_counters, 276893ce44dSCatherine Sullivan priv->ntfy_block_bus, 277893ce44dSCatherine Sullivan priv->num_ntfy_blks); 278893ce44dSCatherine Sullivan if (unlikely(err)) { 279893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 280893ce44dSCatherine Sullivan "could not setup device_resources: err=%d\n", err); 281893ce44dSCatherine Sullivan err = -ENXIO; 282893ce44dSCatherine Sullivan goto abort_with_ntfy_blocks; 283893ce44dSCatherine Sullivan } 284893ce44dSCatherine Sullivan gve_set_device_resources_ok(priv); 285893ce44dSCatherine Sullivan return 0; 286893ce44dSCatherine Sullivan abort_with_ntfy_blocks: 287893ce44dSCatherine Sullivan gve_free_notify_blocks(priv); 288893ce44dSCatherine Sullivan abort_with_counter: 289893ce44dSCatherine Sullivan gve_free_counter_array(priv); 290893ce44dSCatherine Sullivan return err; 291893ce44dSCatherine Sullivan } 292893ce44dSCatherine Sullivan 2939e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv); 2949e5f7d26SCatherine Sullivan 295893ce44dSCatherine Sullivan static void gve_teardown_device_resources(struct gve_priv *priv) 296893ce44dSCatherine Sullivan { 297893ce44dSCatherine Sullivan int err; 298893ce44dSCatherine Sullivan 299893ce44dSCatherine Sullivan /* Tell device its resources are being freed */ 300893ce44dSCatherine Sullivan if (gve_get_device_resources_ok(priv)) { 301893ce44dSCatherine Sullivan err = gve_adminq_deconfigure_device_resources(priv); 302893ce44dSCatherine Sullivan if (err) { 303893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 304893ce44dSCatherine Sullivan "Could not deconfigure device resources: err=%d\n", 305893ce44dSCatherine Sullivan err); 3069e5f7d26SCatherine Sullivan gve_trigger_reset(priv); 307893ce44dSCatherine Sullivan } 308893ce44dSCatherine Sullivan } 309893ce44dSCatherine Sullivan gve_free_counter_array(priv); 310893ce44dSCatherine Sullivan gve_free_notify_blocks(priv); 311893ce44dSCatherine Sullivan gve_clear_device_resources_ok(priv); 312893ce44dSCatherine Sullivan } 313893ce44dSCatherine Sullivan 314f5cedc84SCatherine Sullivan static void gve_add_napi(struct gve_priv *priv, int ntfy_idx) 315f5cedc84SCatherine Sullivan { 316f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 317f5cedc84SCatherine Sullivan 318f5cedc84SCatherine Sullivan netif_napi_add(priv->dev, &block->napi, gve_napi_poll, 319f5cedc84SCatherine Sullivan NAPI_POLL_WEIGHT); 320f5cedc84SCatherine Sullivan } 321f5cedc84SCatherine Sullivan 322f5cedc84SCatherine Sullivan static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) 323f5cedc84SCatherine Sullivan { 324f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 325f5cedc84SCatherine Sullivan 326f5cedc84SCatherine Sullivan netif_napi_del(&block->napi); 327f5cedc84SCatherine Sullivan } 328f5cedc84SCatherine Sullivan 329f5cedc84SCatherine Sullivan static int gve_register_qpls(struct gve_priv *priv) 330f5cedc84SCatherine Sullivan { 331f5cedc84SCatherine Sullivan int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); 332f5cedc84SCatherine Sullivan int err; 333f5cedc84SCatherine Sullivan int i; 334f5cedc84SCatherine Sullivan 335f5cedc84SCatherine Sullivan for (i = 0; i < num_qpls; i++) { 336f5cedc84SCatherine Sullivan err = gve_adminq_register_page_list(priv, &priv->qpls[i]); 337f5cedc84SCatherine Sullivan if (err) { 338f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, 339f5cedc84SCatherine Sullivan "failed to register queue page list %d\n", 340f5cedc84SCatherine Sullivan priv->qpls[i].id); 3419e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean 3429e5f7d26SCatherine Sullivan * up 3439e5f7d26SCatherine Sullivan */ 344f5cedc84SCatherine Sullivan return err; 345f5cedc84SCatherine Sullivan } 346f5cedc84SCatherine Sullivan } 347f5cedc84SCatherine Sullivan return 0; 348f5cedc84SCatherine Sullivan } 349f5cedc84SCatherine Sullivan 350f5cedc84SCatherine Sullivan static int gve_unregister_qpls(struct gve_priv *priv) 351f5cedc84SCatherine Sullivan { 352f5cedc84SCatherine Sullivan int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); 353f5cedc84SCatherine Sullivan int err; 354f5cedc84SCatherine Sullivan int i; 355f5cedc84SCatherine Sullivan 356f5cedc84SCatherine Sullivan for (i = 0; i < num_qpls; i++) { 357f5cedc84SCatherine Sullivan err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); 3589e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean up */ 359f5cedc84SCatherine Sullivan if (err) { 360f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, 361f5cedc84SCatherine Sullivan "Failed to unregister queue page list %d\n", 362f5cedc84SCatherine Sullivan priv->qpls[i].id); 363f5cedc84SCatherine Sullivan return err; 364f5cedc84SCatherine Sullivan } 365f5cedc84SCatherine Sullivan } 366f5cedc84SCatherine Sullivan return 0; 367f5cedc84SCatherine Sullivan } 368f5cedc84SCatherine Sullivan 369f5cedc84SCatherine Sullivan static int gve_create_rings(struct gve_priv *priv) 370f5cedc84SCatherine Sullivan { 371f5cedc84SCatherine Sullivan int err; 372f5cedc84SCatherine Sullivan int i; 373f5cedc84SCatherine Sullivan 374f5cedc84SCatherine Sullivan for (i = 0; i < priv->tx_cfg.num_queues; i++) { 375f5cedc84SCatherine Sullivan err = gve_adminq_create_tx_queue(priv, i); 376f5cedc84SCatherine Sullivan if (err) { 377f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n", 378f5cedc84SCatherine Sullivan i); 3799e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean 3809e5f7d26SCatherine Sullivan * up 3819e5f7d26SCatherine Sullivan */ 382f5cedc84SCatherine Sullivan return err; 383f5cedc84SCatherine Sullivan } 384f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i); 385f5cedc84SCatherine Sullivan } 386f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) { 387f5cedc84SCatherine Sullivan err = gve_adminq_create_rx_queue(priv, i); 388f5cedc84SCatherine Sullivan if (err) { 389f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n", 390f5cedc84SCatherine Sullivan i); 3919e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean 3929e5f7d26SCatherine Sullivan * up 3939e5f7d26SCatherine Sullivan */ 394f5cedc84SCatherine Sullivan return err; 395f5cedc84SCatherine Sullivan } 396f5cedc84SCatherine Sullivan /* Rx data ring has been prefilled with packet buffers at 397f5cedc84SCatherine Sullivan * queue allocation time. 398f5cedc84SCatherine Sullivan * Write the doorbell to provide descriptor slots and packet 399f5cedc84SCatherine Sullivan * buffers to the NIC. 400f5cedc84SCatherine Sullivan */ 401f5cedc84SCatherine Sullivan gve_rx_write_doorbell(priv, &priv->rx[i]); 402f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i); 403f5cedc84SCatherine Sullivan } 404f5cedc84SCatherine Sullivan 405f5cedc84SCatherine Sullivan return 0; 406f5cedc84SCatherine Sullivan } 407f5cedc84SCatherine Sullivan 408f5cedc84SCatherine Sullivan static int gve_alloc_rings(struct gve_priv *priv) 409f5cedc84SCatherine Sullivan { 410f5cedc84SCatherine Sullivan int ntfy_idx; 411f5cedc84SCatherine Sullivan int err; 412f5cedc84SCatherine Sullivan int i; 413f5cedc84SCatherine Sullivan 414f5cedc84SCatherine Sullivan /* Setup tx rings */ 415f5cedc84SCatherine Sullivan priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx), 416f5cedc84SCatherine Sullivan GFP_KERNEL); 417f5cedc84SCatherine Sullivan if (!priv->tx) 418f5cedc84SCatherine Sullivan return -ENOMEM; 419f5cedc84SCatherine Sullivan err = gve_tx_alloc_rings(priv); 420f5cedc84SCatherine Sullivan if (err) 421f5cedc84SCatherine Sullivan goto free_tx; 422f5cedc84SCatherine Sullivan /* Setup rx rings */ 423f5cedc84SCatherine Sullivan priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx), 424f5cedc84SCatherine Sullivan GFP_KERNEL); 425f5cedc84SCatherine Sullivan if (!priv->rx) { 426f5cedc84SCatherine Sullivan err = -ENOMEM; 427f5cedc84SCatherine Sullivan goto free_tx_queue; 428f5cedc84SCatherine Sullivan } 429f5cedc84SCatherine Sullivan err = gve_rx_alloc_rings(priv); 430f5cedc84SCatherine Sullivan if (err) 431f5cedc84SCatherine Sullivan goto free_rx; 432f5cedc84SCatherine Sullivan /* Add tx napi & init sync stats*/ 433f5cedc84SCatherine Sullivan for (i = 0; i < priv->tx_cfg.num_queues; i++) { 434f5cedc84SCatherine Sullivan u64_stats_init(&priv->tx[i].statss); 435f5cedc84SCatherine Sullivan ntfy_idx = gve_tx_idx_to_ntfy(priv, i); 436f5cedc84SCatherine Sullivan gve_add_napi(priv, ntfy_idx); 437f5cedc84SCatherine Sullivan } 438f5cedc84SCatherine Sullivan /* Add rx napi & init sync stats*/ 439f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) { 440f5cedc84SCatherine Sullivan u64_stats_init(&priv->rx[i].statss); 441f5cedc84SCatherine Sullivan ntfy_idx = gve_rx_idx_to_ntfy(priv, i); 442f5cedc84SCatherine Sullivan gve_add_napi(priv, ntfy_idx); 443f5cedc84SCatherine Sullivan } 444f5cedc84SCatherine Sullivan 445f5cedc84SCatherine Sullivan return 0; 446f5cedc84SCatherine Sullivan 447f5cedc84SCatherine Sullivan free_rx: 448f5cedc84SCatherine Sullivan kfree(priv->rx); 449f5cedc84SCatherine Sullivan priv->rx = NULL; 450f5cedc84SCatherine Sullivan free_tx_queue: 451f5cedc84SCatherine Sullivan gve_tx_free_rings(priv); 452f5cedc84SCatherine Sullivan free_tx: 453f5cedc84SCatherine Sullivan kfree(priv->tx); 454f5cedc84SCatherine Sullivan priv->tx = NULL; 455f5cedc84SCatherine Sullivan return err; 456f5cedc84SCatherine Sullivan } 457f5cedc84SCatherine Sullivan 458f5cedc84SCatherine Sullivan static int gve_destroy_rings(struct gve_priv *priv) 459f5cedc84SCatherine Sullivan { 460f5cedc84SCatherine Sullivan int err; 461f5cedc84SCatherine Sullivan int i; 462f5cedc84SCatherine Sullivan 463f5cedc84SCatherine Sullivan for (i = 0; i < priv->tx_cfg.num_queues; i++) { 464f5cedc84SCatherine Sullivan err = gve_adminq_destroy_tx_queue(priv, i); 465f5cedc84SCatherine Sullivan if (err) { 466f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, 467f5cedc84SCatherine Sullivan "failed to destroy tx queue %d\n", 468f5cedc84SCatherine Sullivan i); 4699e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean 4709e5f7d26SCatherine Sullivan * up 4719e5f7d26SCatherine Sullivan */ 472f5cedc84SCatherine Sullivan return err; 473f5cedc84SCatherine Sullivan } 474f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i); 475f5cedc84SCatherine Sullivan } 476f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) { 477f5cedc84SCatherine Sullivan err = gve_adminq_destroy_rx_queue(priv, i); 478f5cedc84SCatherine Sullivan if (err) { 479f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, 480f5cedc84SCatherine Sullivan "failed to destroy rx queue %d\n", 481f5cedc84SCatherine Sullivan i); 4829e5f7d26SCatherine Sullivan /* This failure will trigger a reset - no need to clean 4839e5f7d26SCatherine Sullivan * up 4849e5f7d26SCatherine Sullivan */ 485f5cedc84SCatherine Sullivan return err; 486f5cedc84SCatherine Sullivan } 487f5cedc84SCatherine Sullivan netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i); 488f5cedc84SCatherine Sullivan } 489f5cedc84SCatherine Sullivan return 0; 490f5cedc84SCatherine Sullivan } 491f5cedc84SCatherine Sullivan 492f5cedc84SCatherine Sullivan static void gve_free_rings(struct gve_priv *priv) 493f5cedc84SCatherine Sullivan { 494f5cedc84SCatherine Sullivan int ntfy_idx; 495f5cedc84SCatherine Sullivan int i; 496f5cedc84SCatherine Sullivan 497f5cedc84SCatherine Sullivan if (priv->tx) { 498f5cedc84SCatherine Sullivan for (i = 0; i < priv->tx_cfg.num_queues; i++) { 499f5cedc84SCatherine Sullivan ntfy_idx = gve_tx_idx_to_ntfy(priv, i); 500f5cedc84SCatherine Sullivan gve_remove_napi(priv, ntfy_idx); 501f5cedc84SCatherine Sullivan } 502f5cedc84SCatherine Sullivan gve_tx_free_rings(priv); 503f5cedc84SCatherine Sullivan kfree(priv->tx); 504f5cedc84SCatherine Sullivan priv->tx = NULL; 505f5cedc84SCatherine Sullivan } 506f5cedc84SCatherine Sullivan if (priv->rx) { 507f5cedc84SCatherine Sullivan for (i = 0; i < priv->rx_cfg.num_queues; i++) { 508f5cedc84SCatherine Sullivan ntfy_idx = gve_rx_idx_to_ntfy(priv, i); 509f5cedc84SCatherine Sullivan gve_remove_napi(priv, ntfy_idx); 510f5cedc84SCatherine Sullivan } 511f5cedc84SCatherine Sullivan gve_rx_free_rings(priv); 512f5cedc84SCatherine Sullivan kfree(priv->rx); 513f5cedc84SCatherine Sullivan priv->rx = NULL; 514f5cedc84SCatherine Sullivan } 515f5cedc84SCatherine Sullivan } 516f5cedc84SCatherine Sullivan 517f5cedc84SCatherine Sullivan int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma, 518f5cedc84SCatherine Sullivan enum dma_data_direction dir) 519f5cedc84SCatherine Sullivan { 520f5cedc84SCatherine Sullivan *page = alloc_page(GFP_KERNEL); 521a51df9f8SColin Ian King if (!*page) 522f5cedc84SCatherine Sullivan return -ENOMEM; 523f5cedc84SCatherine Sullivan *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir); 524f5cedc84SCatherine Sullivan if (dma_mapping_error(dev, *dma)) { 525f5cedc84SCatherine Sullivan put_page(*page); 526f5cedc84SCatherine Sullivan return -ENOMEM; 527f5cedc84SCatherine Sullivan } 528f5cedc84SCatherine Sullivan return 0; 529f5cedc84SCatherine Sullivan } 530f5cedc84SCatherine Sullivan 531f5cedc84SCatherine Sullivan static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, 532f5cedc84SCatherine Sullivan int pages) 533f5cedc84SCatherine Sullivan { 534f5cedc84SCatherine Sullivan struct gve_queue_page_list *qpl = &priv->qpls[id]; 535f5cedc84SCatherine Sullivan int err; 536f5cedc84SCatherine Sullivan int i; 537f5cedc84SCatherine Sullivan 538f5cedc84SCatherine Sullivan if (pages + priv->num_registered_pages > priv->max_registered_pages) { 539f5cedc84SCatherine Sullivan netif_err(priv, drv, priv->dev, 540f5cedc84SCatherine Sullivan "Reached max number of registered pages %llu > %llu\n", 541f5cedc84SCatherine Sullivan pages + priv->num_registered_pages, 542f5cedc84SCatherine Sullivan priv->max_registered_pages); 543f5cedc84SCatherine Sullivan return -EINVAL; 544f5cedc84SCatherine Sullivan } 545f5cedc84SCatherine Sullivan 546f5cedc84SCatherine Sullivan qpl->id = id; 547f5cedc84SCatherine Sullivan qpl->num_entries = pages; 548f5cedc84SCatherine Sullivan qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); 549f5cedc84SCatherine Sullivan /* caller handles clean up */ 550f5cedc84SCatherine Sullivan if (!qpl->pages) 551f5cedc84SCatherine Sullivan return -ENOMEM; 552f5cedc84SCatherine Sullivan qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), 553f5cedc84SCatherine Sullivan GFP_KERNEL); 554f5cedc84SCatherine Sullivan /* caller handles clean up */ 555f5cedc84SCatherine Sullivan if (!qpl->page_buses) 556f5cedc84SCatherine Sullivan return -ENOMEM; 557f5cedc84SCatherine Sullivan 558f5cedc84SCatherine Sullivan for (i = 0; i < pages; i++) { 559f5cedc84SCatherine Sullivan err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i], 560f5cedc84SCatherine Sullivan &qpl->page_buses[i], 561f5cedc84SCatherine Sullivan gve_qpl_dma_dir(priv, id)); 562f5cedc84SCatherine Sullivan /* caller handles clean up */ 563f5cedc84SCatherine Sullivan if (err) 564f5cedc84SCatherine Sullivan return -ENOMEM; 565f5cedc84SCatherine Sullivan } 566f5cedc84SCatherine Sullivan priv->num_registered_pages += pages; 567f5cedc84SCatherine Sullivan 568f5cedc84SCatherine Sullivan return 0; 569f5cedc84SCatherine Sullivan } 570f5cedc84SCatherine Sullivan 571f5cedc84SCatherine Sullivan void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, 572f5cedc84SCatherine Sullivan enum dma_data_direction dir) 573f5cedc84SCatherine Sullivan { 574f5cedc84SCatherine Sullivan if (!dma_mapping_error(dev, dma)) 575f5cedc84SCatherine Sullivan dma_unmap_page(dev, dma, PAGE_SIZE, dir); 576f5cedc84SCatherine Sullivan if (page) 577f5cedc84SCatherine Sullivan put_page(page); 578f5cedc84SCatherine Sullivan } 579f5cedc84SCatherine Sullivan 580f5cedc84SCatherine Sullivan static void gve_free_queue_page_list(struct gve_priv *priv, 581f5cedc84SCatherine Sullivan int id) 582f5cedc84SCatherine Sullivan { 583f5cedc84SCatherine Sullivan struct gve_queue_page_list *qpl = &priv->qpls[id]; 584f5cedc84SCatherine Sullivan int i; 585f5cedc84SCatherine Sullivan 586f5cedc84SCatherine Sullivan if (!qpl->pages) 587f5cedc84SCatherine Sullivan return; 588f5cedc84SCatherine Sullivan if (!qpl->page_buses) 589f5cedc84SCatherine Sullivan goto free_pages; 590f5cedc84SCatherine Sullivan 591f5cedc84SCatherine Sullivan for (i = 0; i < qpl->num_entries; i++) 592f5cedc84SCatherine Sullivan gve_free_page(&priv->pdev->dev, qpl->pages[i], 593f5cedc84SCatherine Sullivan qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); 594f5cedc84SCatherine Sullivan 595f5cedc84SCatherine Sullivan kfree(qpl->page_buses); 596f5cedc84SCatherine Sullivan free_pages: 597f5cedc84SCatherine Sullivan kfree(qpl->pages); 598f5cedc84SCatherine Sullivan priv->num_registered_pages -= qpl->num_entries; 599f5cedc84SCatherine Sullivan } 600f5cedc84SCatherine Sullivan 601f5cedc84SCatherine Sullivan static int gve_alloc_qpls(struct gve_priv *priv) 602f5cedc84SCatherine Sullivan { 603f5cedc84SCatherine Sullivan int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); 604f5cedc84SCatherine Sullivan int i, j; 605f5cedc84SCatherine Sullivan int err; 606f5cedc84SCatherine Sullivan 607f5cedc84SCatherine Sullivan priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); 608f5cedc84SCatherine Sullivan if (!priv->qpls) 609f5cedc84SCatherine Sullivan return -ENOMEM; 610f5cedc84SCatherine Sullivan 611f5cedc84SCatherine Sullivan for (i = 0; i < gve_num_tx_qpls(priv); i++) { 612f5cedc84SCatherine Sullivan err = gve_alloc_queue_page_list(priv, i, 613f5cedc84SCatherine Sullivan priv->tx_pages_per_qpl); 614f5cedc84SCatherine Sullivan if (err) 615f5cedc84SCatherine Sullivan goto free_qpls; 616f5cedc84SCatherine Sullivan } 617f5cedc84SCatherine Sullivan for (; i < num_qpls; i++) { 618f5cedc84SCatherine Sullivan err = gve_alloc_queue_page_list(priv, i, 619f5cedc84SCatherine Sullivan priv->rx_pages_per_qpl); 620f5cedc84SCatherine Sullivan if (err) 621f5cedc84SCatherine Sullivan goto free_qpls; 622f5cedc84SCatherine Sullivan } 623f5cedc84SCatherine Sullivan 624f5cedc84SCatherine Sullivan priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * 625f5cedc84SCatherine Sullivan sizeof(unsigned long) * BITS_PER_BYTE; 626f5cedc84SCatherine Sullivan priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) * 627f5cedc84SCatherine Sullivan sizeof(unsigned long), GFP_KERNEL); 628*877cb240SWei Yongjun if (!priv->qpl_cfg.qpl_id_map) { 629*877cb240SWei Yongjun err = -ENOMEM; 630f5cedc84SCatherine Sullivan goto free_qpls; 631*877cb240SWei Yongjun } 632f5cedc84SCatherine Sullivan 633f5cedc84SCatherine Sullivan return 0; 634f5cedc84SCatherine Sullivan 635f5cedc84SCatherine Sullivan free_qpls: 636f5cedc84SCatherine Sullivan for (j = 0; j <= i; j++) 637f5cedc84SCatherine Sullivan gve_free_queue_page_list(priv, j); 638f5cedc84SCatherine Sullivan kfree(priv->qpls); 639f5cedc84SCatherine Sullivan return err; 640f5cedc84SCatherine Sullivan } 641f5cedc84SCatherine Sullivan 642f5cedc84SCatherine Sullivan static void gve_free_qpls(struct gve_priv *priv) 643f5cedc84SCatherine Sullivan { 644f5cedc84SCatherine Sullivan int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); 645f5cedc84SCatherine Sullivan int i; 646f5cedc84SCatherine Sullivan 647f5cedc84SCatherine Sullivan kfree(priv->qpl_cfg.qpl_id_map); 648f5cedc84SCatherine Sullivan 649f5cedc84SCatherine Sullivan for (i = 0; i < num_qpls; i++) 650f5cedc84SCatherine Sullivan gve_free_queue_page_list(priv, i); 651f5cedc84SCatherine Sullivan 652f5cedc84SCatherine Sullivan kfree(priv->qpls); 653f5cedc84SCatherine Sullivan } 654f5cedc84SCatherine Sullivan 6559e5f7d26SCatherine Sullivan /* Use this to schedule a reset when the device is capable of continuing 6569e5f7d26SCatherine Sullivan * to handle other requests in its current state. If it is not, do a reset 6579e5f7d26SCatherine Sullivan * in thread instead. 6589e5f7d26SCatherine Sullivan */ 6599e5f7d26SCatherine Sullivan void gve_schedule_reset(struct gve_priv *priv) 6609e5f7d26SCatherine Sullivan { 6619e5f7d26SCatherine Sullivan gve_set_do_reset(priv); 6629e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task); 6639e5f7d26SCatherine Sullivan } 6649e5f7d26SCatherine Sullivan 6659e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up); 6669e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up); 667f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv); 668f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv); 669f5cedc84SCatherine Sullivan 670f5cedc84SCatherine Sullivan static int gve_open(struct net_device *dev) 671f5cedc84SCatherine Sullivan { 672f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev); 673f5cedc84SCatherine Sullivan int err; 674f5cedc84SCatherine Sullivan 675f5cedc84SCatherine Sullivan err = gve_alloc_qpls(priv); 676f5cedc84SCatherine Sullivan if (err) 677f5cedc84SCatherine Sullivan return err; 678f5cedc84SCatherine Sullivan err = gve_alloc_rings(priv); 679f5cedc84SCatherine Sullivan if (err) 680f5cedc84SCatherine Sullivan goto free_qpls; 681f5cedc84SCatherine Sullivan 682f5cedc84SCatherine Sullivan err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); 683f5cedc84SCatherine Sullivan if (err) 684f5cedc84SCatherine Sullivan goto free_rings; 685f5cedc84SCatherine Sullivan err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); 686f5cedc84SCatherine Sullivan if (err) 687f5cedc84SCatherine Sullivan goto free_rings; 688f5cedc84SCatherine Sullivan 689f5cedc84SCatherine Sullivan err = gve_register_qpls(priv); 690f5cedc84SCatherine Sullivan if (err) 6919e5f7d26SCatherine Sullivan goto reset; 692f5cedc84SCatherine Sullivan err = gve_create_rings(priv); 693f5cedc84SCatherine Sullivan if (err) 6949e5f7d26SCatherine Sullivan goto reset; 695f5cedc84SCatherine Sullivan gve_set_device_rings_ok(priv); 696f5cedc84SCatherine Sullivan 697f5cedc84SCatherine Sullivan gve_turnup(priv); 698f5cedc84SCatherine Sullivan netif_carrier_on(dev); 699f5cedc84SCatherine Sullivan return 0; 700f5cedc84SCatherine Sullivan 701f5cedc84SCatherine Sullivan free_rings: 702f5cedc84SCatherine Sullivan gve_free_rings(priv); 703f5cedc84SCatherine Sullivan free_qpls: 704f5cedc84SCatherine Sullivan gve_free_qpls(priv); 705f5cedc84SCatherine Sullivan return err; 7069e5f7d26SCatherine Sullivan 7079e5f7d26SCatherine Sullivan reset: 7089e5f7d26SCatherine Sullivan /* This must have been called from a reset due to the rtnl lock 7099e5f7d26SCatherine Sullivan * so just return at this point. 7109e5f7d26SCatherine Sullivan */ 7119e5f7d26SCatherine Sullivan if (gve_get_reset_in_progress(priv)) 7129e5f7d26SCatherine Sullivan return err; 7139e5f7d26SCatherine Sullivan /* Otherwise reset before returning */ 7149e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, true); 7159e5f7d26SCatherine Sullivan /* if this fails there is nothing we can do so just ignore the return */ 7169e5f7d26SCatherine Sullivan gve_reset_recovery(priv, false); 7179e5f7d26SCatherine Sullivan /* return the original error */ 7189e5f7d26SCatherine Sullivan return err; 719f5cedc84SCatherine Sullivan } 720f5cedc84SCatherine Sullivan 721f5cedc84SCatherine Sullivan static int gve_close(struct net_device *dev) 722f5cedc84SCatherine Sullivan { 723f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev); 724f5cedc84SCatherine Sullivan int err; 725f5cedc84SCatherine Sullivan 726f5cedc84SCatherine Sullivan netif_carrier_off(dev); 727f5cedc84SCatherine Sullivan if (gve_get_device_rings_ok(priv)) { 728f5cedc84SCatherine Sullivan gve_turndown(priv); 729f5cedc84SCatherine Sullivan err = gve_destroy_rings(priv); 730f5cedc84SCatherine Sullivan if (err) 7319e5f7d26SCatherine Sullivan goto err; 732f5cedc84SCatherine Sullivan err = gve_unregister_qpls(priv); 733f5cedc84SCatherine Sullivan if (err) 7349e5f7d26SCatherine Sullivan goto err; 735f5cedc84SCatherine Sullivan gve_clear_device_rings_ok(priv); 736f5cedc84SCatherine Sullivan } 737f5cedc84SCatherine Sullivan 738f5cedc84SCatherine Sullivan gve_free_rings(priv); 739f5cedc84SCatherine Sullivan gve_free_qpls(priv); 740f5cedc84SCatherine Sullivan return 0; 7419e5f7d26SCatherine Sullivan 7429e5f7d26SCatherine Sullivan err: 7439e5f7d26SCatherine Sullivan /* This must have been called from a reset due to the rtnl lock 7449e5f7d26SCatherine Sullivan * so just return at this point. 7459e5f7d26SCatherine Sullivan */ 7469e5f7d26SCatherine Sullivan if (gve_get_reset_in_progress(priv)) 7479e5f7d26SCatherine Sullivan return err; 7489e5f7d26SCatherine Sullivan /* Otherwise reset before returning */ 7499e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, true); 7509e5f7d26SCatherine Sullivan return gve_reset_recovery(priv, false); 751f5cedc84SCatherine Sullivan } 752f5cedc84SCatherine Sullivan 753e5b845dcSCatherine Sullivan int gve_adjust_queues(struct gve_priv *priv, 754e5b845dcSCatherine Sullivan struct gve_queue_config new_rx_config, 755e5b845dcSCatherine Sullivan struct gve_queue_config new_tx_config) 756e5b845dcSCatherine Sullivan { 757e5b845dcSCatherine Sullivan int err; 758e5b845dcSCatherine Sullivan 759e5b845dcSCatherine Sullivan if (netif_carrier_ok(priv->dev)) { 760e5b845dcSCatherine Sullivan /* To make this process as simple as possible we teardown the 761e5b845dcSCatherine Sullivan * device, set the new configuration, and then bring the device 762e5b845dcSCatherine Sullivan * up again. 763e5b845dcSCatherine Sullivan */ 764e5b845dcSCatherine Sullivan err = gve_close(priv->dev); 765e5b845dcSCatherine Sullivan /* we have already tried to reset in close, 766e5b845dcSCatherine Sullivan * just fail at this point 767e5b845dcSCatherine Sullivan */ 768e5b845dcSCatherine Sullivan if (err) 769e5b845dcSCatherine Sullivan return err; 770e5b845dcSCatherine Sullivan priv->tx_cfg = new_tx_config; 771e5b845dcSCatherine Sullivan priv->rx_cfg = new_rx_config; 772e5b845dcSCatherine Sullivan 773e5b845dcSCatherine Sullivan err = gve_open(priv->dev); 774e5b845dcSCatherine Sullivan if (err) 775e5b845dcSCatherine Sullivan goto err; 776e5b845dcSCatherine Sullivan 777e5b845dcSCatherine Sullivan return 0; 778e5b845dcSCatherine Sullivan } 779e5b845dcSCatherine Sullivan /* Set the config for the next up. */ 780e5b845dcSCatherine Sullivan priv->tx_cfg = new_tx_config; 781e5b845dcSCatherine Sullivan priv->rx_cfg = new_rx_config; 782e5b845dcSCatherine Sullivan 783e5b845dcSCatherine Sullivan return 0; 784e5b845dcSCatherine Sullivan err: 785e5b845dcSCatherine Sullivan netif_err(priv, drv, priv->dev, 786e5b845dcSCatherine Sullivan "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n"); 787e5b845dcSCatherine Sullivan gve_turndown(priv); 788e5b845dcSCatherine Sullivan return err; 789e5b845dcSCatherine Sullivan } 790e5b845dcSCatherine Sullivan 791f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv) 792f5cedc84SCatherine Sullivan { 793f5cedc84SCatherine Sullivan int idx; 794f5cedc84SCatherine Sullivan 795f5cedc84SCatherine Sullivan if (netif_carrier_ok(priv->dev)) 796f5cedc84SCatherine Sullivan netif_carrier_off(priv->dev); 797f5cedc84SCatherine Sullivan 798f5cedc84SCatherine Sullivan if (!gve_get_napi_enabled(priv)) 799f5cedc84SCatherine Sullivan return; 800f5cedc84SCatherine Sullivan 801f5cedc84SCatherine Sullivan /* Disable napi to prevent more work from coming in */ 802f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 803f5cedc84SCatherine Sullivan int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); 804f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 805f5cedc84SCatherine Sullivan 806f5cedc84SCatherine Sullivan napi_disable(&block->napi); 807f5cedc84SCatherine Sullivan } 808f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 809f5cedc84SCatherine Sullivan int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 810f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 811f5cedc84SCatherine Sullivan 812f5cedc84SCatherine Sullivan napi_disable(&block->napi); 813f5cedc84SCatherine Sullivan } 814f5cedc84SCatherine Sullivan 815f5cedc84SCatherine Sullivan /* Stop tx queues */ 816f5cedc84SCatherine Sullivan netif_tx_disable(priv->dev); 817f5cedc84SCatherine Sullivan 818f5cedc84SCatherine Sullivan gve_clear_napi_enabled(priv); 819f5cedc84SCatherine Sullivan } 820f5cedc84SCatherine Sullivan 821f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv) 822f5cedc84SCatherine Sullivan { 823f5cedc84SCatherine Sullivan int idx; 824f5cedc84SCatherine Sullivan 825f5cedc84SCatherine Sullivan /* Start the tx queues */ 826f5cedc84SCatherine Sullivan netif_tx_start_all_queues(priv->dev); 827f5cedc84SCatherine Sullivan 828f5cedc84SCatherine Sullivan /* Enable napi and unmask interrupts for all queues */ 829f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { 830f5cedc84SCatherine Sullivan int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); 831f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 832f5cedc84SCatherine Sullivan 833f5cedc84SCatherine Sullivan napi_enable(&block->napi); 834f5cedc84SCatherine Sullivan iowrite32be(0, gve_irq_doorbell(priv, block)); 835f5cedc84SCatherine Sullivan } 836f5cedc84SCatherine Sullivan for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { 837f5cedc84SCatherine Sullivan int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); 838f5cedc84SCatherine Sullivan struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; 839f5cedc84SCatherine Sullivan 840f5cedc84SCatherine Sullivan napi_enable(&block->napi); 841f5cedc84SCatherine Sullivan iowrite32be(0, gve_irq_doorbell(priv, block)); 842f5cedc84SCatherine Sullivan } 843f5cedc84SCatherine Sullivan 844f5cedc84SCatherine Sullivan gve_set_napi_enabled(priv); 845f5cedc84SCatherine Sullivan } 846f5cedc84SCatherine Sullivan 847f5cedc84SCatherine Sullivan static void gve_tx_timeout(struct net_device *dev) 848f5cedc84SCatherine Sullivan { 849f5cedc84SCatherine Sullivan struct gve_priv *priv = netdev_priv(dev); 850f5cedc84SCatherine Sullivan 8519e5f7d26SCatherine Sullivan gve_schedule_reset(priv); 852f5cedc84SCatherine Sullivan priv->tx_timeo_cnt++; 853f5cedc84SCatherine Sullivan } 854f5cedc84SCatherine Sullivan 855f5cedc84SCatherine Sullivan static const struct net_device_ops gve_netdev_ops = { 856f5cedc84SCatherine Sullivan .ndo_start_xmit = gve_tx, 857f5cedc84SCatherine Sullivan .ndo_open = gve_open, 858f5cedc84SCatherine Sullivan .ndo_stop = gve_close, 859f5cedc84SCatherine Sullivan .ndo_get_stats64 = gve_get_stats, 860f5cedc84SCatherine Sullivan .ndo_tx_timeout = gve_tx_timeout, 861f5cedc84SCatherine Sullivan }; 862f5cedc84SCatherine Sullivan 8639e5f7d26SCatherine Sullivan static void gve_handle_status(struct gve_priv *priv, u32 status) 8649e5f7d26SCatherine Sullivan { 8659e5f7d26SCatherine Sullivan if (GVE_DEVICE_STATUS_RESET_MASK & status) { 8669e5f7d26SCatherine Sullivan dev_info(&priv->pdev->dev, "Device requested reset.\n"); 8679e5f7d26SCatherine Sullivan gve_set_do_reset(priv); 8689e5f7d26SCatherine Sullivan } 8699e5f7d26SCatherine Sullivan } 8709e5f7d26SCatherine Sullivan 8719e5f7d26SCatherine Sullivan static void gve_handle_reset(struct gve_priv *priv) 8729e5f7d26SCatherine Sullivan { 8739e5f7d26SCatherine Sullivan /* A service task will be scheduled at the end of probe to catch any 8749e5f7d26SCatherine Sullivan * resets that need to happen, and we don't want to reset until 8759e5f7d26SCatherine Sullivan * probe is done. 8769e5f7d26SCatherine Sullivan */ 8779e5f7d26SCatherine Sullivan if (gve_get_probe_in_progress(priv)) 8789e5f7d26SCatherine Sullivan return; 8799e5f7d26SCatherine Sullivan 8809e5f7d26SCatherine Sullivan if (gve_get_do_reset(priv)) { 8819e5f7d26SCatherine Sullivan rtnl_lock(); 8829e5f7d26SCatherine Sullivan gve_reset(priv, false); 8839e5f7d26SCatherine Sullivan rtnl_unlock(); 8849e5f7d26SCatherine Sullivan } 8859e5f7d26SCatherine Sullivan } 8869e5f7d26SCatherine Sullivan 8879e5f7d26SCatherine Sullivan /* Handle NIC status register changes and reset requests */ 8889e5f7d26SCatherine Sullivan static void gve_service_task(struct work_struct *work) 8899e5f7d26SCatherine Sullivan { 8909e5f7d26SCatherine Sullivan struct gve_priv *priv = container_of(work, struct gve_priv, 8919e5f7d26SCatherine Sullivan service_task); 8929e5f7d26SCatherine Sullivan 8939e5f7d26SCatherine Sullivan gve_handle_status(priv, 8949e5f7d26SCatherine Sullivan ioread32be(&priv->reg_bar0->device_status)); 8959e5f7d26SCatherine Sullivan 8969e5f7d26SCatherine Sullivan gve_handle_reset(priv); 8979e5f7d26SCatherine Sullivan } 8989e5f7d26SCatherine Sullivan 899893ce44dSCatherine Sullivan static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) 900893ce44dSCatherine Sullivan { 901893ce44dSCatherine Sullivan int num_ntfy; 902893ce44dSCatherine Sullivan int err; 903893ce44dSCatherine Sullivan 904893ce44dSCatherine Sullivan /* Set up the adminq */ 905893ce44dSCatherine Sullivan err = gve_adminq_alloc(&priv->pdev->dev, priv); 906893ce44dSCatherine Sullivan if (err) { 907893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 908893ce44dSCatherine Sullivan "Failed to alloc admin queue: err=%d\n", err); 909893ce44dSCatherine Sullivan return err; 910893ce44dSCatherine Sullivan } 911893ce44dSCatherine Sullivan 912893ce44dSCatherine Sullivan if (skip_describe_device) 913893ce44dSCatherine Sullivan goto setup_device; 914893ce44dSCatherine Sullivan 915893ce44dSCatherine Sullivan /* Get the initial information we need from the device */ 916893ce44dSCatherine Sullivan err = gve_adminq_describe_device(priv); 917893ce44dSCatherine Sullivan if (err) { 918893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 919893ce44dSCatherine Sullivan "Could not get device information: err=%d\n", err); 920893ce44dSCatherine Sullivan goto err; 921893ce44dSCatherine Sullivan } 922893ce44dSCatherine Sullivan if (priv->dev->max_mtu > PAGE_SIZE) { 923893ce44dSCatherine Sullivan priv->dev->max_mtu = PAGE_SIZE; 924893ce44dSCatherine Sullivan err = gve_adminq_set_mtu(priv, priv->dev->mtu); 925893ce44dSCatherine Sullivan if (err) { 926893ce44dSCatherine Sullivan netif_err(priv, drv, priv->dev, "Could not set mtu"); 927893ce44dSCatherine Sullivan goto err; 928893ce44dSCatherine Sullivan } 929893ce44dSCatherine Sullivan } 930893ce44dSCatherine Sullivan priv->dev->mtu = priv->dev->max_mtu; 931893ce44dSCatherine Sullivan num_ntfy = pci_msix_vec_count(priv->pdev); 932893ce44dSCatherine Sullivan if (num_ntfy <= 0) { 933893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, 934893ce44dSCatherine Sullivan "could not count MSI-x vectors: err=%d\n", num_ntfy); 935893ce44dSCatherine Sullivan err = num_ntfy; 936893ce44dSCatherine Sullivan goto err; 937893ce44dSCatherine Sullivan } else if (num_ntfy < GVE_MIN_MSIX) { 938893ce44dSCatherine Sullivan dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", 939893ce44dSCatherine Sullivan GVE_MIN_MSIX, num_ntfy); 940893ce44dSCatherine Sullivan err = -EINVAL; 941893ce44dSCatherine Sullivan goto err; 942893ce44dSCatherine Sullivan } 943893ce44dSCatherine Sullivan 944f5cedc84SCatherine Sullivan priv->num_registered_pages = 0; 945f5cedc84SCatherine Sullivan priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; 946893ce44dSCatherine Sullivan /* gvnic has one Notification Block per MSI-x vector, except for the 947893ce44dSCatherine Sullivan * management vector 948893ce44dSCatherine Sullivan */ 949893ce44dSCatherine Sullivan priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; 950893ce44dSCatherine Sullivan priv->mgmt_msix_idx = priv->num_ntfy_blks; 951893ce44dSCatherine Sullivan 952f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues = 953f5cedc84SCatherine Sullivan min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); 954f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues = 955f5cedc84SCatherine Sullivan min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); 956f5cedc84SCatherine Sullivan 957f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; 958f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; 959f5cedc84SCatherine Sullivan if (priv->default_num_queues > 0) { 960f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, 961f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues); 962f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, 963f5cedc84SCatherine Sullivan priv->rx_cfg.num_queues); 964f5cedc84SCatherine Sullivan } 965f5cedc84SCatherine Sullivan 966f5cedc84SCatherine Sullivan netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n", 967f5cedc84SCatherine Sullivan priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); 968f5cedc84SCatherine Sullivan netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n", 969f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); 970f5cedc84SCatherine Sullivan 971893ce44dSCatherine Sullivan setup_device: 972893ce44dSCatherine Sullivan err = gve_setup_device_resources(priv); 973893ce44dSCatherine Sullivan if (!err) 974893ce44dSCatherine Sullivan return 0; 975893ce44dSCatherine Sullivan err: 976893ce44dSCatherine Sullivan gve_adminq_free(&priv->pdev->dev, priv); 977893ce44dSCatherine Sullivan return err; 978893ce44dSCatherine Sullivan } 979893ce44dSCatherine Sullivan 980893ce44dSCatherine Sullivan static void gve_teardown_priv_resources(struct gve_priv *priv) 981893ce44dSCatherine Sullivan { 982893ce44dSCatherine Sullivan gve_teardown_device_resources(priv); 983893ce44dSCatherine Sullivan gve_adminq_free(&priv->pdev->dev, priv); 984893ce44dSCatherine Sullivan } 985893ce44dSCatherine Sullivan 9869e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv) 9879e5f7d26SCatherine Sullivan { 9889e5f7d26SCatherine Sullivan /* Reset the device by releasing the AQ */ 9899e5f7d26SCatherine Sullivan gve_adminq_release(priv); 9909e5f7d26SCatherine Sullivan } 9919e5f7d26SCatherine Sullivan 9929e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) 9939e5f7d26SCatherine Sullivan { 9949e5f7d26SCatherine Sullivan gve_trigger_reset(priv); 9959e5f7d26SCatherine Sullivan /* With the reset having already happened, close cannot fail */ 9969e5f7d26SCatherine Sullivan if (was_up) 9979e5f7d26SCatherine Sullivan gve_close(priv->dev); 9989e5f7d26SCatherine Sullivan gve_teardown_priv_resources(priv); 9999e5f7d26SCatherine Sullivan } 10009e5f7d26SCatherine Sullivan 10019e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up) 10029e5f7d26SCatherine Sullivan { 10039e5f7d26SCatherine Sullivan int err; 10049e5f7d26SCatherine Sullivan 10059e5f7d26SCatherine Sullivan err = gve_init_priv(priv, true); 10069e5f7d26SCatherine Sullivan if (err) 10079e5f7d26SCatherine Sullivan goto err; 10089e5f7d26SCatherine Sullivan if (was_up) { 10099e5f7d26SCatherine Sullivan err = gve_open(priv->dev); 10109e5f7d26SCatherine Sullivan if (err) 10119e5f7d26SCatherine Sullivan goto err; 10129e5f7d26SCatherine Sullivan } 10139e5f7d26SCatherine Sullivan return 0; 10149e5f7d26SCatherine Sullivan err: 10159e5f7d26SCatherine Sullivan dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); 10169e5f7d26SCatherine Sullivan gve_turndown(priv); 10179e5f7d26SCatherine Sullivan return err; 10189e5f7d26SCatherine Sullivan } 10199e5f7d26SCatherine Sullivan 10209e5f7d26SCatherine Sullivan int gve_reset(struct gve_priv *priv, bool attempt_teardown) 10219e5f7d26SCatherine Sullivan { 10229e5f7d26SCatherine Sullivan bool was_up = netif_carrier_ok(priv->dev); 10239e5f7d26SCatherine Sullivan int err; 10249e5f7d26SCatherine Sullivan 10259e5f7d26SCatherine Sullivan dev_info(&priv->pdev->dev, "Performing reset\n"); 10269e5f7d26SCatherine Sullivan gve_clear_do_reset(priv); 10279e5f7d26SCatherine Sullivan gve_set_reset_in_progress(priv); 10289e5f7d26SCatherine Sullivan /* If we aren't attempting to teardown normally, just go turndown and 10299e5f7d26SCatherine Sullivan * reset right away. 10309e5f7d26SCatherine Sullivan */ 10319e5f7d26SCatherine Sullivan if (!attempt_teardown) { 10329e5f7d26SCatherine Sullivan gve_turndown(priv); 10339e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, was_up); 10349e5f7d26SCatherine Sullivan } else { 10359e5f7d26SCatherine Sullivan /* Otherwise attempt to close normally */ 10369e5f7d26SCatherine Sullivan if (was_up) { 10379e5f7d26SCatherine Sullivan err = gve_close(priv->dev); 10389e5f7d26SCatherine Sullivan /* If that fails reset as we did above */ 10399e5f7d26SCatherine Sullivan if (err) 10409e5f7d26SCatherine Sullivan gve_reset_and_teardown(priv, was_up); 10419e5f7d26SCatherine Sullivan } 10429e5f7d26SCatherine Sullivan /* Clean up any remaining resources */ 10439e5f7d26SCatherine Sullivan gve_teardown_priv_resources(priv); 10449e5f7d26SCatherine Sullivan } 10459e5f7d26SCatherine Sullivan 10469e5f7d26SCatherine Sullivan /* Set it all back up */ 10479e5f7d26SCatherine Sullivan err = gve_reset_recovery(priv, was_up); 10489e5f7d26SCatherine Sullivan gve_clear_reset_in_progress(priv); 10499e5f7d26SCatherine Sullivan return err; 10509e5f7d26SCatherine Sullivan } 10519e5f7d26SCatherine Sullivan 1052893ce44dSCatherine Sullivan static void gve_write_version(u8 __iomem *driver_version_register) 1053893ce44dSCatherine Sullivan { 1054893ce44dSCatherine Sullivan const char *c = gve_version_prefix; 1055893ce44dSCatherine Sullivan 1056893ce44dSCatherine Sullivan while (*c) { 1057893ce44dSCatherine Sullivan writeb(*c, driver_version_register); 1058893ce44dSCatherine Sullivan c++; 1059893ce44dSCatherine Sullivan } 1060893ce44dSCatherine Sullivan 1061893ce44dSCatherine Sullivan c = gve_version_str; 1062893ce44dSCatherine Sullivan while (*c) { 1063893ce44dSCatherine Sullivan writeb(*c, driver_version_register); 1064893ce44dSCatherine Sullivan c++; 1065893ce44dSCatherine Sullivan } 1066893ce44dSCatherine Sullivan writeb('\n', driver_version_register); 1067893ce44dSCatherine Sullivan } 1068893ce44dSCatherine Sullivan 1069893ce44dSCatherine Sullivan static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1070893ce44dSCatherine Sullivan { 1071893ce44dSCatherine Sullivan int max_tx_queues, max_rx_queues; 1072893ce44dSCatherine Sullivan struct net_device *dev; 1073893ce44dSCatherine Sullivan __be32 __iomem *db_bar; 1074893ce44dSCatherine Sullivan struct gve_registers __iomem *reg_bar; 1075893ce44dSCatherine Sullivan struct gve_priv *priv; 1076893ce44dSCatherine Sullivan int err; 1077893ce44dSCatherine Sullivan 1078893ce44dSCatherine Sullivan err = pci_enable_device(pdev); 1079893ce44dSCatherine Sullivan if (err) 1080893ce44dSCatherine Sullivan return -ENXIO; 1081893ce44dSCatherine Sullivan 1082893ce44dSCatherine Sullivan err = pci_request_regions(pdev, "gvnic-cfg"); 1083893ce44dSCatherine Sullivan if (err) 1084893ce44dSCatherine Sullivan goto abort_with_enabled; 1085893ce44dSCatherine Sullivan 1086893ce44dSCatherine Sullivan pci_set_master(pdev); 1087893ce44dSCatherine Sullivan 1088893ce44dSCatherine Sullivan err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1089893ce44dSCatherine Sullivan if (err) { 1090893ce44dSCatherine Sullivan dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); 1091893ce44dSCatherine Sullivan goto abort_with_pci_region; 1092893ce44dSCatherine Sullivan } 1093893ce44dSCatherine Sullivan 1094893ce44dSCatherine Sullivan err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1095893ce44dSCatherine Sullivan if (err) { 1096893ce44dSCatherine Sullivan dev_err(&pdev->dev, 1097893ce44dSCatherine Sullivan "Failed to set consistent dma mask: err=%d\n", err); 1098893ce44dSCatherine Sullivan goto abort_with_pci_region; 1099893ce44dSCatherine Sullivan } 1100893ce44dSCatherine Sullivan 1101893ce44dSCatherine Sullivan reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0); 1102893ce44dSCatherine Sullivan if (!reg_bar) { 1103f5cedc84SCatherine Sullivan dev_err(&pdev->dev, "Failed to map pci bar!\n"); 1104893ce44dSCatherine Sullivan err = -ENOMEM; 1105893ce44dSCatherine Sullivan goto abort_with_pci_region; 1106893ce44dSCatherine Sullivan } 1107893ce44dSCatherine Sullivan 1108893ce44dSCatherine Sullivan db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0); 1109893ce44dSCatherine Sullivan if (!db_bar) { 1110893ce44dSCatherine Sullivan dev_err(&pdev->dev, "Failed to map doorbell bar!\n"); 1111893ce44dSCatherine Sullivan err = -ENOMEM; 1112893ce44dSCatherine Sullivan goto abort_with_reg_bar; 1113893ce44dSCatherine Sullivan } 1114893ce44dSCatherine Sullivan 1115893ce44dSCatherine Sullivan gve_write_version(®_bar->driver_version); 1116893ce44dSCatherine Sullivan /* Get max queues to alloc etherdev */ 1117893ce44dSCatherine Sullivan max_rx_queues = ioread32be(®_bar->max_tx_queues); 1118893ce44dSCatherine Sullivan max_tx_queues = ioread32be(®_bar->max_rx_queues); 1119893ce44dSCatherine Sullivan /* Alloc and setup the netdev and priv */ 1120893ce44dSCatherine Sullivan dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); 1121893ce44dSCatherine Sullivan if (!dev) { 1122893ce44dSCatherine Sullivan dev_err(&pdev->dev, "could not allocate netdev\n"); 1123893ce44dSCatherine Sullivan goto abort_with_db_bar; 1124893ce44dSCatherine Sullivan } 1125893ce44dSCatherine Sullivan SET_NETDEV_DEV(dev, &pdev->dev); 1126893ce44dSCatherine Sullivan pci_set_drvdata(pdev, dev); 1127e5b845dcSCatherine Sullivan dev->ethtool_ops = &gve_ethtool_ops; 1128f5cedc84SCatherine Sullivan dev->netdev_ops = &gve_netdev_ops; 1129893ce44dSCatherine Sullivan /* advertise features */ 1130893ce44dSCatherine Sullivan dev->hw_features = NETIF_F_HIGHDMA; 1131893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_SG; 1132893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_HW_CSUM; 1133893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO; 1134893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO6; 1135893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_TSO_ECN; 1136893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_RXCSUM; 1137893ce44dSCatherine Sullivan dev->hw_features |= NETIF_F_RXHASH; 1138893ce44dSCatherine Sullivan dev->features = dev->hw_features; 1139f5cedc84SCatherine Sullivan dev->watchdog_timeo = 5 * HZ; 1140893ce44dSCatherine Sullivan dev->min_mtu = ETH_MIN_MTU; 1141893ce44dSCatherine Sullivan netif_carrier_off(dev); 1142893ce44dSCatherine Sullivan 1143893ce44dSCatherine Sullivan priv = netdev_priv(dev); 1144893ce44dSCatherine Sullivan priv->dev = dev; 1145893ce44dSCatherine Sullivan priv->pdev = pdev; 1146893ce44dSCatherine Sullivan priv->msg_enable = DEFAULT_MSG_LEVEL; 1147893ce44dSCatherine Sullivan priv->reg_bar0 = reg_bar; 1148893ce44dSCatherine Sullivan priv->db_bar2 = db_bar; 11499e5f7d26SCatherine Sullivan priv->service_task_flags = 0x0; 1150893ce44dSCatherine Sullivan priv->state_flags = 0x0; 11519e5f7d26SCatherine Sullivan 11529e5f7d26SCatherine Sullivan gve_set_probe_in_progress(priv); 11539e5f7d26SCatherine Sullivan priv->gve_wq = alloc_ordered_workqueue("gve", 0); 11549e5f7d26SCatherine Sullivan if (!priv->gve_wq) { 11559e5f7d26SCatherine Sullivan dev_err(&pdev->dev, "Could not allocate workqueue"); 11569e5f7d26SCatherine Sullivan err = -ENOMEM; 11579e5f7d26SCatherine Sullivan goto abort_with_netdev; 11589e5f7d26SCatherine Sullivan } 11599e5f7d26SCatherine Sullivan INIT_WORK(&priv->service_task, gve_service_task); 1160f5cedc84SCatherine Sullivan priv->tx_cfg.max_queues = max_tx_queues; 1161f5cedc84SCatherine Sullivan priv->rx_cfg.max_queues = max_rx_queues; 1162893ce44dSCatherine Sullivan 1163893ce44dSCatherine Sullivan err = gve_init_priv(priv, false); 1164893ce44dSCatherine Sullivan if (err) 11659e5f7d26SCatherine Sullivan goto abort_with_wq; 1166893ce44dSCatherine Sullivan 1167893ce44dSCatherine Sullivan err = register_netdev(dev); 1168893ce44dSCatherine Sullivan if (err) 11699e5f7d26SCatherine Sullivan goto abort_with_wq; 1170893ce44dSCatherine Sullivan 1171893ce44dSCatherine Sullivan dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); 11729e5f7d26SCatherine Sullivan gve_clear_probe_in_progress(priv); 11739e5f7d26SCatherine Sullivan queue_work(priv->gve_wq, &priv->service_task); 1174893ce44dSCatherine Sullivan return 0; 1175893ce44dSCatherine Sullivan 11769e5f7d26SCatherine Sullivan abort_with_wq: 11779e5f7d26SCatherine Sullivan destroy_workqueue(priv->gve_wq); 11789e5f7d26SCatherine Sullivan 1179893ce44dSCatherine Sullivan abort_with_netdev: 1180893ce44dSCatherine Sullivan free_netdev(dev); 1181893ce44dSCatherine Sullivan 1182893ce44dSCatherine Sullivan abort_with_db_bar: 1183893ce44dSCatherine Sullivan pci_iounmap(pdev, db_bar); 1184893ce44dSCatherine Sullivan 1185893ce44dSCatherine Sullivan abort_with_reg_bar: 1186893ce44dSCatherine Sullivan pci_iounmap(pdev, reg_bar); 1187893ce44dSCatherine Sullivan 1188893ce44dSCatherine Sullivan abort_with_pci_region: 1189893ce44dSCatherine Sullivan pci_release_regions(pdev); 1190893ce44dSCatherine Sullivan 1191893ce44dSCatherine Sullivan abort_with_enabled: 1192893ce44dSCatherine Sullivan pci_disable_device(pdev); 1193893ce44dSCatherine Sullivan return -ENXIO; 1194893ce44dSCatherine Sullivan } 1195893ce44dSCatherine Sullivan EXPORT_SYMBOL(gve_probe); 1196893ce44dSCatherine Sullivan 1197893ce44dSCatherine Sullivan static void gve_remove(struct pci_dev *pdev) 1198893ce44dSCatherine Sullivan { 1199893ce44dSCatherine Sullivan struct net_device *netdev = pci_get_drvdata(pdev); 1200893ce44dSCatherine Sullivan struct gve_priv *priv = netdev_priv(netdev); 1201893ce44dSCatherine Sullivan __be32 __iomem *db_bar = priv->db_bar2; 1202893ce44dSCatherine Sullivan void __iomem *reg_bar = priv->reg_bar0; 1203893ce44dSCatherine Sullivan 1204893ce44dSCatherine Sullivan unregister_netdev(netdev); 1205893ce44dSCatherine Sullivan gve_teardown_priv_resources(priv); 12069e5f7d26SCatherine Sullivan destroy_workqueue(priv->gve_wq); 1207893ce44dSCatherine Sullivan free_netdev(netdev); 1208893ce44dSCatherine Sullivan pci_iounmap(pdev, db_bar); 1209893ce44dSCatherine Sullivan pci_iounmap(pdev, reg_bar); 1210893ce44dSCatherine Sullivan pci_release_regions(pdev); 1211893ce44dSCatherine Sullivan pci_disable_device(pdev); 1212893ce44dSCatherine Sullivan } 1213893ce44dSCatherine Sullivan 1214893ce44dSCatherine Sullivan static const struct pci_device_id gve_id_table[] = { 1215893ce44dSCatherine Sullivan { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) }, 1216893ce44dSCatherine Sullivan { } 1217893ce44dSCatherine Sullivan }; 1218893ce44dSCatherine Sullivan 1219893ce44dSCatherine Sullivan static struct pci_driver gvnic_driver = { 1220893ce44dSCatherine Sullivan .name = "gvnic", 1221893ce44dSCatherine Sullivan .id_table = gve_id_table, 1222893ce44dSCatherine Sullivan .probe = gve_probe, 1223893ce44dSCatherine Sullivan .remove = gve_remove, 1224893ce44dSCatherine Sullivan }; 1225893ce44dSCatherine Sullivan 1226893ce44dSCatherine Sullivan module_pci_driver(gvnic_driver); 1227893ce44dSCatherine Sullivan 1228893ce44dSCatherine Sullivan MODULE_DEVICE_TABLE(pci, gve_id_table); 1229893ce44dSCatherine Sullivan MODULE_AUTHOR("Google, Inc."); 1230893ce44dSCatherine Sullivan MODULE_DESCRIPTION("gVNIC Driver"); 1231893ce44dSCatherine Sullivan MODULE_LICENSE("Dual MIT/GPL"); 1232893ce44dSCatherine Sullivan MODULE_VERSION(GVE_VERSION); 1233