Lines Matching refs:priv

42 static int gve_verify_driver_compatibility(struct gve_priv *priv)  in gve_verify_driver_compatibility()  argument
48 driver_info = dma_alloc_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
71 err = gve_adminq_verify_driver_compatibility(priv, in gve_verify_driver_compatibility()
79 dma_free_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
89 struct gve_priv *priv = netdev_priv(dev); in gve_features_check() local
91 if (!gve_is_gqi(priv)) in gve_features_check()
99 struct gve_priv *priv = netdev_priv(dev); in gve_start_xmit() local
101 if (gve_is_gqi(priv)) in gve_start_xmit()
109 struct gve_priv *priv = netdev_priv(dev); in gve_get_stats() local
115 num_tx_queues = gve_num_tx_queues(priv); in gve_get_stats()
116 if (priv->rx) { in gve_get_stats()
117 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
120 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
121 packets = priv->rx[ring].rpackets; in gve_get_stats()
122 bytes = priv->rx[ring].rbytes; in gve_get_stats()
123 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
129 if (priv->tx) { in gve_get_stats()
133 u64_stats_fetch_begin(&priv->tx[ring].statss); in gve_get_stats()
134 packets = priv->tx[ring].pkt_done; in gve_get_stats()
135 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
136 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, in gve_get_stats()
144 static int gve_alloc_flow_rule_caches(struct gve_priv *priv) in gve_alloc_flow_rule_caches() argument
146 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_alloc_flow_rule_caches()
149 if (!priv->max_flow_rules) in gve_alloc_flow_rule_caches()
156 dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); in gve_alloc_flow_rule_caches()
164 dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n"); in gve_alloc_flow_rule_caches()
177 static void gve_free_flow_rule_caches(struct gve_priv *priv) in gve_free_flow_rule_caches() argument
179 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; in gve_free_flow_rule_caches()
187 static int gve_alloc_counter_array(struct gve_priv *priv) in gve_alloc_counter_array() argument
189 priv->counter_array = in gve_alloc_counter_array()
190 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
191 priv->num_event_counters * in gve_alloc_counter_array()
192 sizeof(*priv->counter_array), in gve_alloc_counter_array()
193 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
194 if (!priv->counter_array) in gve_alloc_counter_array()
200 static void gve_free_counter_array(struct gve_priv *priv) in gve_free_counter_array() argument
202 if (!priv->counter_array) in gve_free_counter_array()
205 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
206 priv->num_event_counters * in gve_free_counter_array()
207 sizeof(*priv->counter_array), in gve_free_counter_array()
208 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
209 priv->counter_array = NULL; in gve_free_counter_array()
215 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_stats_report_task() local
217 if (gve_get_do_report_stats(priv)) { in gve_stats_report_task()
218 gve_handle_report_stats(priv); in gve_stats_report_task()
219 gve_clear_do_report_stats(priv); in gve_stats_report_task()
223 static void gve_stats_report_schedule(struct gve_priv *priv) in gve_stats_report_schedule() argument
225 if (!gve_get_probe_in_progress(priv) && in gve_stats_report_schedule()
226 !gve_get_reset_in_progress(priv)) { in gve_stats_report_schedule()
227 gve_set_do_report_stats(priv); in gve_stats_report_schedule()
228 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
234 struct gve_priv *priv = from_timer(priv, t, stats_report_timer); in gve_stats_report_timer() local
236 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
238 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
239 gve_stats_report_schedule(priv); in gve_stats_report_timer()
242 static int gve_alloc_stats_report(struct gve_priv *priv) in gve_alloc_stats_report() argument
247 gve_num_tx_queues(priv); in gve_alloc_stats_report()
249 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
250 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
252 priv->stats_report = in gve_alloc_stats_report()
253 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
254 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
255 if (!priv->stats_report) in gve_alloc_stats_report()
258 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
259 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
263 static void gve_free_stats_report(struct gve_priv *priv) in gve_free_stats_report() argument
265 if (!priv->stats_report) in gve_free_stats_report()
268 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
269 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
270 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
271 priv->stats_report = NULL; in gve_free_stats_report()
276 struct gve_priv *priv = arg; in gve_mgmnt_intr() local
278 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
285 struct gve_priv *priv = block->priv; in gve_intr() local
287 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_intr()
301 static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq) in gve_is_napi_on_home_cpu() argument
318 struct gve_priv *priv; in gve_napi_poll() local
322 priv = block->priv; in gve_napi_poll()
325 if (block->tx->q_num < priv->tx_cfg.num_queues) in gve_napi_poll()
340 if (priv->xdp_prog) in gve_napi_poll()
352 irq_doorbell = gve_irq_doorbell(priv, block); in gve_napi_poll()
361 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
375 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo() local
394 if (likely(gve_is_napi_on_home_cpu(priv, block->irq))) in gve_napi_poll_dqo()
415 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
422 static int gve_alloc_notify_blocks(struct gve_priv *priv) in gve_alloc_notify_blocks() argument
424 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
430 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
431 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
432 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
435 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
436 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
439 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
449 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
450 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
451 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
453 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
455 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
457 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
458 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
459 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
460 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
461 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
462 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
465 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
468 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", in gve_alloc_notify_blocks()
469 pci_name(priv->pdev)); in gve_alloc_notify_blocks()
470 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
471 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
473 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
476 priv->irq_db_indices = in gve_alloc_notify_blocks()
477 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
478 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
479 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
480 &priv->irq_db_indices_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
481 if (!priv->irq_db_indices) { in gve_alloc_notify_blocks()
486 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * in gve_alloc_notify_blocks()
487 sizeof(*priv->ntfy_blocks), GFP_KERNEL); in gve_alloc_notify_blocks()
488 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
494 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
495 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
499 i, pci_name(priv->pdev)); in gve_alloc_notify_blocks()
500 block->priv = priv; in gve_alloc_notify_blocks()
501 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
502 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, in gve_alloc_notify_blocks()
505 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
509 block->irq = priv->msix_vectors[msix_idx].vector; in gve_alloc_notify_blocks()
510 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
512 block->irq_db_index = &priv->irq_db_indices[i].index; in gve_alloc_notify_blocks()
517 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
520 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
522 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
525 kvfree(priv->ntfy_blocks); in gve_alloc_notify_blocks()
526 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
528 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
529 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
530 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_alloc_notify_blocks()
531 priv->irq_db_indices = NULL; in gve_alloc_notify_blocks()
533 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
535 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
537 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
538 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
542 static void gve_free_notify_blocks(struct gve_priv *priv) in gve_free_notify_blocks() argument
546 if (!priv->msix_vectors) in gve_free_notify_blocks()
550 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
551 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
554 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
556 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
559 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
560 kvfree(priv->ntfy_blocks); in gve_free_notify_blocks()
561 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
562 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_free_notify_blocks()
563 sizeof(*priv->irq_db_indices), in gve_free_notify_blocks()
564 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_free_notify_blocks()
565 priv->irq_db_indices = NULL; in gve_free_notify_blocks()
566 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
567 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
568 priv->msix_vectors = NULL; in gve_free_notify_blocks()
571 static int gve_setup_device_resources(struct gve_priv *priv) in gve_setup_device_resources() argument
575 err = gve_alloc_flow_rule_caches(priv); in gve_setup_device_resources()
578 err = gve_alloc_counter_array(priv); in gve_setup_device_resources()
581 err = gve_alloc_notify_blocks(priv); in gve_setup_device_resources()
584 err = gve_alloc_stats_report(priv); in gve_setup_device_resources()
587 err = gve_adminq_configure_device_resources(priv, in gve_setup_device_resources()
588 priv->counter_array_bus, in gve_setup_device_resources()
589 priv->num_event_counters, in gve_setup_device_resources()
590 priv->irq_db_indices_bus, in gve_setup_device_resources()
591 priv->num_ntfy_blks); in gve_setup_device_resources()
593 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
599 if (!gve_is_gqi(priv)) { in gve_setup_device_resources()
600 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
602 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
606 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
608 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
614 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
615 priv->stats_report_bus, in gve_setup_device_resources()
618 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
620 gve_set_device_resources_ok(priv); in gve_setup_device_resources()
624 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
625 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
627 gve_free_stats_report(priv); in gve_setup_device_resources()
629 gve_free_notify_blocks(priv); in gve_setup_device_resources()
631 gve_free_counter_array(priv); in gve_setup_device_resources()
633 gve_free_flow_rule_caches(priv); in gve_setup_device_resources()
638 static void gve_trigger_reset(struct gve_priv *priv);
640 static void gve_teardown_device_resources(struct gve_priv *priv) in gve_teardown_device_resources() argument
645 if (gve_get_device_resources_ok(priv)) { in gve_teardown_device_resources()
646 err = gve_flow_rules_reset(priv); in gve_teardown_device_resources()
648 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
650 gve_trigger_reset(priv); in gve_teardown_device_resources()
653 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); in gve_teardown_device_resources()
655 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
657 gve_trigger_reset(priv); in gve_teardown_device_resources()
659 err = gve_adminq_deconfigure_device_resources(priv); in gve_teardown_device_resources()
661 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
664 gve_trigger_reset(priv); in gve_teardown_device_resources()
668 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
669 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
671 gve_free_flow_rule_caches(priv); in gve_teardown_device_resources()
672 gve_free_counter_array(priv); in gve_teardown_device_resources()
673 gve_free_notify_blocks(priv); in gve_teardown_device_resources()
674 gve_free_stats_report(priv); in gve_teardown_device_resources()
675 gve_clear_device_resources_ok(priv); in gve_teardown_device_resources()
678 static int gve_unregister_qpl(struct gve_priv *priv, in gve_unregister_qpl() argument
686 err = gve_adminq_unregister_page_list(priv, qpl->id); in gve_unregister_qpl()
688 netif_err(priv, drv, priv->dev, in gve_unregister_qpl()
694 priv->num_registered_pages -= qpl->num_entries; in gve_unregister_qpl()
698 static int gve_register_qpl(struct gve_priv *priv, in gve_register_qpl() argument
709 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_register_qpl()
710 netif_err(priv, drv, priv->dev, in gve_register_qpl()
712 pages + priv->num_registered_pages, in gve_register_qpl()
713 priv->max_registered_pages); in gve_register_qpl()
717 err = gve_adminq_register_page_list(priv, qpl); in gve_register_qpl()
719 netif_err(priv, drv, priv->dev, in gve_register_qpl()
725 priv->num_registered_pages += pages; in gve_register_qpl()
729 static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx) in gve_tx_get_qpl() argument
731 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_get_qpl()
733 if (gve_is_gqi(priv)) in gve_tx_get_qpl()
739 static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx) in gve_rx_get_qpl() argument
741 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_get_qpl()
743 if (gve_is_gqi(priv)) in gve_rx_get_qpl()
749 static int gve_register_xdp_qpls(struct gve_priv *priv) in gve_register_xdp_qpls() argument
755 start_id = gve_xdp_tx_start_queue_id(priv); in gve_register_xdp_qpls()
756 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { in gve_register_xdp_qpls()
757 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_register_xdp_qpls()
765 static int gve_register_qpls(struct gve_priv *priv) in gve_register_qpls() argument
771 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_register_qpls()
772 gve_is_qpl(priv)); in gve_register_qpls()
773 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_register_qpls()
776 err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_register_qpls()
782 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i)); in gve_register_qpls()
790 static int gve_unregister_xdp_qpls(struct gve_priv *priv) in gve_unregister_xdp_qpls() argument
796 start_id = gve_xdp_tx_start_queue_id(priv); in gve_unregister_xdp_qpls()
797 for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { in gve_unregister_xdp_qpls()
798 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_unregister_xdp_qpls()
806 static int gve_unregister_qpls(struct gve_priv *priv) in gve_unregister_qpls() argument
812 num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), in gve_unregister_qpls()
813 gve_is_qpl(priv)); in gve_unregister_qpls()
814 num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); in gve_unregister_qpls()
817 err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); in gve_unregister_qpls()
824 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i)); in gve_unregister_qpls()
832 static int gve_create_xdp_rings(struct gve_priv *priv) in gve_create_xdp_rings() argument
836 err = gve_adminq_create_tx_queues(priv, in gve_create_xdp_rings()
837 gve_xdp_tx_start_queue_id(priv), in gve_create_xdp_rings()
838 priv->num_xdp_queues); in gve_create_xdp_rings()
840 netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", in gve_create_xdp_rings()
841 priv->num_xdp_queues); in gve_create_xdp_rings()
847 netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", in gve_create_xdp_rings()
848 priv->num_xdp_queues); in gve_create_xdp_rings()
853 static int gve_create_rings(struct gve_priv *priv) in gve_create_rings() argument
855 int num_tx_queues = gve_num_tx_queues(priv); in gve_create_rings()
859 err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues); in gve_create_rings()
861 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
868 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
871 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
873 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
874 priv->rx_cfg.num_queues); in gve_create_rings()
880 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
881 priv->rx_cfg.num_queues); in gve_create_rings()
883 if (gve_is_gqi(priv)) { in gve_create_rings()
890 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
891 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
893 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
895 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
902 static void init_xdp_sync_stats(struct gve_priv *priv) in init_xdp_sync_stats() argument
904 int start_id = gve_xdp_tx_start_queue_id(priv); in init_xdp_sync_stats()
908 for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { in init_xdp_sync_stats()
909 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in init_xdp_sync_stats()
911 u64_stats_init(&priv->tx[i].statss); in init_xdp_sync_stats()
912 priv->tx[i].ntfy_id = ntfy_idx; in init_xdp_sync_stats()
916 static void gve_init_sync_stats(struct gve_priv *priv) in gve_init_sync_stats() argument
920 for (i = 0; i < priv->tx_cfg.num_queues; i++) in gve_init_sync_stats()
921 u64_stats_init(&priv->tx[i].statss); in gve_init_sync_stats()
924 init_xdp_sync_stats(priv); in gve_init_sync_stats()
926 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_init_sync_stats()
927 u64_stats_init(&priv->rx[i].statss); in gve_init_sync_stats()
930 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, in gve_tx_get_curr_alloc_cfg() argument
933 int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0; in gve_tx_get_curr_alloc_cfg()
935 cfg->qcfg = &priv->tx_cfg; in gve_tx_get_curr_alloc_cfg()
936 cfg->raw_addressing = !gve_is_qpl(priv); in gve_tx_get_curr_alloc_cfg()
937 cfg->ring_size = priv->tx_desc_cnt; in gve_tx_get_curr_alloc_cfg()
939 cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues; in gve_tx_get_curr_alloc_cfg()
940 cfg->tx = priv->tx; in gve_tx_get_curr_alloc_cfg()
943 static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings) in gve_tx_stop_rings() argument
947 if (!priv->tx) in gve_tx_stop_rings()
951 if (gve_is_gqi(priv)) in gve_tx_stop_rings()
952 gve_tx_stop_ring_gqi(priv, i); in gve_tx_stop_rings()
954 gve_tx_stop_ring_dqo(priv, i); in gve_tx_stop_rings()
958 static void gve_tx_start_rings(struct gve_priv *priv, int start_id, in gve_tx_start_rings() argument
964 if (gve_is_gqi(priv)) in gve_tx_start_rings()
965 gve_tx_start_ring_gqi(priv, i); in gve_tx_start_rings()
967 gve_tx_start_ring_dqo(priv, i); in gve_tx_start_rings()
971 static int gve_alloc_xdp_rings(struct gve_priv *priv) in gve_alloc_xdp_rings() argument
976 if (!priv->num_xdp_queues) in gve_alloc_xdp_rings()
979 gve_tx_get_curr_alloc_cfg(priv, &cfg); in gve_alloc_xdp_rings()
980 cfg.start_idx = gve_xdp_tx_start_queue_id(priv); in gve_alloc_xdp_rings()
981 cfg.num_rings = priv->num_xdp_queues; in gve_alloc_xdp_rings()
983 err = gve_tx_alloc_rings_gqi(priv, &cfg); in gve_alloc_xdp_rings()
987 gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings); in gve_alloc_xdp_rings()
988 init_xdp_sync_stats(priv); in gve_alloc_xdp_rings()
993 static int gve_queues_mem_alloc(struct gve_priv *priv, in gve_queues_mem_alloc() argument
999 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
1000 err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1002 err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1006 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
1007 err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg); in gve_queues_mem_alloc()
1009 err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg); in gve_queues_mem_alloc()
1016 if (gve_is_gqi(priv)) in gve_queues_mem_alloc()
1017 gve_tx_free_rings_gqi(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1019 gve_tx_free_rings_dqo(priv, tx_alloc_cfg); in gve_queues_mem_alloc()
1023 static int gve_destroy_xdp_rings(struct gve_priv *priv) in gve_destroy_xdp_rings() argument
1028 start_id = gve_xdp_tx_start_queue_id(priv); in gve_destroy_xdp_rings()
1029 err = gve_adminq_destroy_tx_queues(priv, in gve_destroy_xdp_rings()
1031 priv->num_xdp_queues); in gve_destroy_xdp_rings()
1033 netif_err(priv, drv, priv->dev, in gve_destroy_xdp_rings()
1038 netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); in gve_destroy_xdp_rings()
1043 static int gve_destroy_rings(struct gve_priv *priv) in gve_destroy_rings() argument
1045 int num_tx_queues = gve_num_tx_queues(priv); in gve_destroy_rings()
1048 err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues); in gve_destroy_rings()
1050 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1055 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
1056 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
1058 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
1063 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
1067 static void gve_free_xdp_rings(struct gve_priv *priv) in gve_free_xdp_rings() argument
1071 gve_tx_get_curr_alloc_cfg(priv, &cfg); in gve_free_xdp_rings()
1072 cfg.start_idx = gve_xdp_tx_start_queue_id(priv); in gve_free_xdp_rings()
1073 cfg.num_rings = priv->num_xdp_queues; in gve_free_xdp_rings()
1075 if (priv->tx) { in gve_free_xdp_rings()
1076 gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings); in gve_free_xdp_rings()
1077 gve_tx_free_rings_gqi(priv, &cfg); in gve_free_xdp_rings()
1081 static void gve_queues_mem_free(struct gve_priv *priv, in gve_queues_mem_free() argument
1085 if (gve_is_gqi(priv)) { in gve_queues_mem_free()
1086 gve_tx_free_rings_gqi(priv, tx_cfg); in gve_queues_mem_free()
1087 gve_rx_free_rings_gqi(priv, rx_cfg); in gve_queues_mem_free()
1089 gve_tx_free_rings_dqo(priv, tx_cfg); in gve_queues_mem_free()
1090 gve_rx_free_rings_dqo(priv, rx_cfg); in gve_queues_mem_free()
1094 int gve_alloc_page(struct gve_priv *priv, struct device *dev, in gve_alloc_page() argument
1100 priv->page_alloc_fail++; in gve_alloc_page()
1105 priv->dma_mapping_error++; in gve_alloc_page()
1112 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, in gve_alloc_queue_page_list() argument
1134 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
1136 gve_qpl_dma_dir(priv, id), GFP_KERNEL); in gve_alloc_queue_page_list()
1145 gve_free_queue_page_list(priv, qpl, id); in gve_alloc_queue_page_list()
1158 void gve_free_queue_page_list(struct gve_priv *priv, in gve_free_queue_page_list() argument
1172 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
1173 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
1188 void gve_schedule_reset(struct gve_priv *priv) in gve_schedule_reset() argument
1190 gve_set_do_reset(priv); in gve_schedule_reset()
1191 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
1194 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1195 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1196 static void gve_turndown(struct gve_priv *priv);
1197 static void gve_turnup(struct gve_priv *priv);
1199 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) in gve_reg_xdp_info() argument
1207 if (!priv->num_xdp_queues) in gve_reg_xdp_info()
1210 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_reg_xdp_info()
1211 rx = &priv->rx[i]; in gve_reg_xdp_info()
1212 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_reg_xdp_info()
1237 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_reg_xdp_info()
1238 tx_qid = gve_xdp_tx_queue_id(priv, i); in gve_reg_xdp_info()
1239 priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1245 rx = &priv->rx[j]; in gve_reg_xdp_info()
1254 static void gve_unreg_xdp_info(struct gve_priv *priv) in gve_unreg_xdp_info() argument
1258 if (!priv->num_xdp_queues) in gve_unreg_xdp_info()
1261 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_unreg_xdp_info()
1262 struct gve_rx_ring *rx = &priv->rx[i]; in gve_unreg_xdp_info()
1271 for (i = 0; i < priv->num_xdp_queues; i++) { in gve_unreg_xdp_info()
1272 tx_qid = gve_xdp_tx_queue_id(priv, i); in gve_unreg_xdp_info()
1273 priv->tx[tx_qid].xsk_pool = NULL; in gve_unreg_xdp_info()
1277 static void gve_drain_page_cache(struct gve_priv *priv) in gve_drain_page_cache() argument
1281 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_drain_page_cache()
1282 page_frag_cache_drain(&priv->rx[i].page_cache); in gve_drain_page_cache()
1285 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, in gve_rx_get_curr_alloc_cfg() argument
1288 cfg->qcfg = &priv->rx_cfg; in gve_rx_get_curr_alloc_cfg()
1289 cfg->qcfg_tx = &priv->tx_cfg; in gve_rx_get_curr_alloc_cfg()
1290 cfg->raw_addressing = !gve_is_qpl(priv); in gve_rx_get_curr_alloc_cfg()
1291 cfg->enable_header_split = priv->header_split_enabled; in gve_rx_get_curr_alloc_cfg()
1292 cfg->ring_size = priv->rx_desc_cnt; in gve_rx_get_curr_alloc_cfg()
1293 cfg->packet_buffer_size = gve_is_gqi(priv) ? in gve_rx_get_curr_alloc_cfg()
1295 priv->data_buffer_size_dqo; in gve_rx_get_curr_alloc_cfg()
1296 cfg->rx = priv->rx; in gve_rx_get_curr_alloc_cfg()
1299 void gve_get_curr_alloc_cfgs(struct gve_priv *priv, in gve_get_curr_alloc_cfgs() argument
1303 gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg); in gve_get_curr_alloc_cfgs()
1304 gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg); in gve_get_curr_alloc_cfgs()
1307 static void gve_rx_start_ring(struct gve_priv *priv, int i) in gve_rx_start_ring() argument
1309 if (gve_is_gqi(priv)) in gve_rx_start_ring()
1310 gve_rx_start_ring_gqi(priv, i); in gve_rx_start_ring()
1312 gve_rx_start_ring_dqo(priv, i); in gve_rx_start_ring()
1315 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings) in gve_rx_start_rings() argument
1320 gve_rx_start_ring(priv, i); in gve_rx_start_rings()
1323 static void gve_rx_stop_ring(struct gve_priv *priv, int i) in gve_rx_stop_ring() argument
1325 if (gve_is_gqi(priv)) in gve_rx_stop_ring()
1326 gve_rx_stop_ring_gqi(priv, i); in gve_rx_stop_ring()
1328 gve_rx_stop_ring_dqo(priv, i); in gve_rx_stop_ring()
1331 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings) in gve_rx_stop_rings() argument
1335 if (!priv->rx) in gve_rx_stop_rings()
1339 gve_rx_stop_ring(priv, i); in gve_rx_stop_rings()
1342 static void gve_queues_mem_remove(struct gve_priv *priv) in gve_queues_mem_remove() argument
1347 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_queues_mem_remove()
1348 gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_queues_mem_remove()
1349 priv->tx = NULL; in gve_queues_mem_remove()
1350 priv->rx = NULL; in gve_queues_mem_remove()
1356 static int gve_queues_start(struct gve_priv *priv, in gve_queues_start() argument
1360 struct net_device *dev = priv->dev; in gve_queues_start()
1364 priv->tx = tx_alloc_cfg->tx; in gve_queues_start()
1365 priv->rx = rx_alloc_cfg->rx; in gve_queues_start()
1368 priv->tx_cfg = *tx_alloc_cfg->qcfg; in gve_queues_start()
1369 priv->rx_cfg = *rx_alloc_cfg->qcfg; in gve_queues_start()
1370 priv->tx_desc_cnt = tx_alloc_cfg->ring_size; in gve_queues_start()
1371 priv->rx_desc_cnt = rx_alloc_cfg->ring_size; in gve_queues_start()
1373 if (priv->xdp_prog) in gve_queues_start()
1374 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_queues_start()
1376 priv->num_xdp_queues = 0; in gve_queues_start()
1378 gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings); in gve_queues_start()
1379 gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); in gve_queues_start()
1380 gve_init_sync_stats(priv); in gve_queues_start()
1382 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_queues_start()
1385 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_queues_start()
1389 err = gve_reg_xdp_info(priv, dev); in gve_queues_start()
1393 err = gve_register_qpls(priv); in gve_queues_start()
1397 priv->header_split_enabled = rx_alloc_cfg->enable_header_split; in gve_queues_start()
1398 priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size; in gve_queues_start()
1400 err = gve_create_rings(priv); in gve_queues_start()
1404 gve_set_device_rings_ok(priv); in gve_queues_start()
1406 if (gve_get_report_stats(priv)) in gve_queues_start()
1407 mod_timer(&priv->stats_report_timer, in gve_queues_start()
1409 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_queues_start()
1411 gve_turnup(priv); in gve_queues_start()
1412 queue_work(priv->gve_wq, &priv->service_task); in gve_queues_start()
1413 priv->interface_up_cnt++; in gve_queues_start()
1417 if (gve_get_reset_in_progress(priv)) in gve_queues_start()
1419 gve_reset_and_teardown(priv, true); in gve_queues_start()
1421 gve_reset_recovery(priv, false); in gve_queues_start()
1425 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); in gve_queues_start()
1426 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_start()
1427 gve_queues_mem_remove(priv); in gve_queues_start()
1435 struct gve_priv *priv = netdev_priv(dev); in gve_open() local
1438 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1440 err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1447 err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_open()
1454 static int gve_queues_stop(struct gve_priv *priv) in gve_queues_stop() argument
1458 netif_carrier_off(priv->dev); in gve_queues_stop()
1459 if (gve_get_device_rings_ok(priv)) { in gve_queues_stop()
1460 gve_turndown(priv); in gve_queues_stop()
1461 gve_drain_page_cache(priv); in gve_queues_stop()
1462 err = gve_destroy_rings(priv); in gve_queues_stop()
1465 err = gve_unregister_qpls(priv); in gve_queues_stop()
1468 gve_clear_device_rings_ok(priv); in gve_queues_stop()
1470 del_timer_sync(&priv->stats_report_timer); in gve_queues_stop()
1472 gve_unreg_xdp_info(priv); in gve_queues_stop()
1474 gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); in gve_queues_stop()
1475 gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); in gve_queues_stop()
1477 priv->interface_down_cnt++; in gve_queues_stop()
1484 if (gve_get_reset_in_progress(priv)) in gve_queues_stop()
1487 gve_reset_and_teardown(priv, true); in gve_queues_stop()
1488 return gve_reset_recovery(priv, false); in gve_queues_stop()
1493 struct gve_priv *priv = netdev_priv(dev); in gve_close() local
1496 err = gve_queues_stop(priv); in gve_close()
1500 gve_queues_mem_remove(priv); in gve_close()
1504 static int gve_remove_xdp_queues(struct gve_priv *priv) in gve_remove_xdp_queues() argument
1508 err = gve_destroy_xdp_rings(priv); in gve_remove_xdp_queues()
1512 err = gve_unregister_xdp_qpls(priv); in gve_remove_xdp_queues()
1516 gve_unreg_xdp_info(priv); in gve_remove_xdp_queues()
1517 gve_free_xdp_rings(priv); in gve_remove_xdp_queues()
1519 priv->num_xdp_queues = 0; in gve_remove_xdp_queues()
1523 static int gve_add_xdp_queues(struct gve_priv *priv) in gve_add_xdp_queues() argument
1527 priv->num_xdp_queues = priv->rx_cfg.num_queues; in gve_add_xdp_queues()
1529 err = gve_alloc_xdp_rings(priv); in gve_add_xdp_queues()
1533 err = gve_reg_xdp_info(priv, priv->dev); in gve_add_xdp_queues()
1537 err = gve_register_xdp_qpls(priv); in gve_add_xdp_queues()
1541 err = gve_create_xdp_rings(priv); in gve_add_xdp_queues()
1548 gve_free_xdp_rings(priv); in gve_add_xdp_queues()
1550 priv->num_xdp_queues = 0; in gve_add_xdp_queues()
1554 static void gve_handle_link_status(struct gve_priv *priv, bool link_status) in gve_handle_link_status() argument
1556 if (!gve_get_napi_enabled(priv)) in gve_handle_link_status()
1559 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1563 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1564 netif_carrier_on(priv->dev); in gve_handle_link_status()
1566 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1567 netif_carrier_off(priv->dev); in gve_handle_link_status()
1571 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, in gve_set_xdp() argument
1578 old_prog = READ_ONCE(priv->xdp_prog); in gve_set_xdp()
1579 if (!netif_running(priv->dev)) { in gve_set_xdp()
1580 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1586 gve_turndown(priv); in gve_set_xdp()
1590 err = gve_add_xdp_queues(priv); in gve_set_xdp()
1596 err = gve_remove_xdp_queues(priv); in gve_set_xdp()
1600 WRITE_ONCE(priv->xdp_prog, prog); in gve_set_xdp()
1605 gve_turnup(priv); in gve_set_xdp()
1606 status = ioread32be(&priv->reg_bar0->device_status); in gve_set_xdp()
1607 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_set_xdp()
1615 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_pool_enable() local
1621 if (qid >= priv->rx_cfg.num_queues) { in gve_xsk_pool_enable()
1622 dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid); in gve_xsk_pool_enable()
1626 priv->dev->max_mtu + sizeof(struct ethhdr)) { in gve_xsk_pool_enable()
1627 dev_err(&priv->pdev->dev, "xsk pool frame_len too small"); in gve_xsk_pool_enable()
1631 err = xsk_pool_dma_map(pool, &priv->pdev->dev, in gve_xsk_pool_enable()
1637 if (!priv->xdp_prog || !netif_running(dev)) in gve_xsk_pool_enable()
1640 rx = &priv->rx[qid]; in gve_xsk_pool_enable()
1641 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_xsk_pool_enable()
1654 tx_qid = gve_xdp_tx_queue_id(priv, qid); in gve_xsk_pool_enable()
1655 priv->tx[tx_qid].xsk_pool = pool; in gve_xsk_pool_enable()
1670 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_pool_disable() local
1679 if (qid >= priv->rx_cfg.num_queues) in gve_xsk_pool_disable()
1685 if (!priv->xdp_prog || !netif_running(dev)) in gve_xsk_pool_disable()
1688 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; in gve_xsk_pool_disable()
1691 tx_qid = gve_xdp_tx_queue_id(priv, qid); in gve_xsk_pool_disable()
1692 napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi; in gve_xsk_pool_disable()
1695 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1696 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1697 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1701 if (gve_rx_work_pending(&priv->rx[qid])) in gve_xsk_pool_disable()
1705 if (gve_tx_clean_pending(priv, &priv->tx[tx_qid])) in gve_xsk_pool_disable()
1716 struct gve_priv *priv = netdev_priv(dev); in gve_xsk_wakeup() local
1719 if (!gve_get_napi_enabled(priv)) in gve_xsk_wakeup()
1722 if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog) in gve_xsk_wakeup()
1725 napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi; in gve_xsk_wakeup()
1738 struct gve_priv *priv = netdev_priv(dev); in verify_xdp_configuration() local
1745 if (priv->queue_format != GVE_GQI_QPL_FORMAT) { in verify_xdp_configuration()
1747 priv->queue_format); in verify_xdp_configuration()
1757 if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || in verify_xdp_configuration()
1758 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { in verify_xdp_configuration()
1760 priv->rx_cfg.num_queues, in verify_xdp_configuration()
1761 priv->tx_cfg.num_queues, in verify_xdp_configuration()
1762 priv->tx_cfg.max_queues); in verify_xdp_configuration()
1770 struct gve_priv *priv = netdev_priv(dev); in gve_xdp() local
1778 return gve_set_xdp(priv, xdp->prog, xdp->extack); in gve_xdp()
1789 int gve_flow_rules_reset(struct gve_priv *priv) in gve_flow_rules_reset() argument
1791 if (!priv->max_flow_rules) in gve_flow_rules_reset()
1794 return gve_adminq_reset_flow_rules(priv); in gve_flow_rules_reset()
1797 int gve_adjust_config(struct gve_priv *priv, in gve_adjust_config() argument
1804 err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1806 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1812 err = gve_close(priv->dev); in gve_adjust_config()
1814 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1816 gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1821 err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg); in gve_adjust_config()
1823 netif_err(priv, drv, priv->dev, in gve_adjust_config()
1828 gve_turndown(priv); in gve_adjust_config()
1835 int gve_adjust_queues(struct gve_priv *priv, in gve_adjust_queues() argument
1844 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_adjust_queues()
1853 num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0; in gve_adjust_queues()
1856 if (netif_running(priv->dev)) { in gve_adjust_queues()
1857 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_adjust_queues()
1861 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1862 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1867 static void gve_turndown(struct gve_priv *priv) in gve_turndown() argument
1871 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1872 netif_carrier_off(priv->dev); in gve_turndown()
1874 if (!gve_get_napi_enabled(priv)) in gve_turndown()
1878 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_turndown()
1879 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turndown()
1880 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1882 if (!gve_tx_was_added_to_block(priv, idx)) in gve_turndown()
1885 if (idx < priv->tx_cfg.num_queues) in gve_turndown()
1886 netif_queue_set_napi(priv->dev, idx, in gve_turndown()
1891 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1892 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turndown()
1893 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1895 if (!gve_rx_was_added_to_block(priv, idx)) in gve_turndown()
1898 netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, in gve_turndown()
1904 netif_tx_disable(priv->dev); in gve_turndown()
1906 gve_clear_napi_enabled(priv); in gve_turndown()
1907 gve_clear_report_stats(priv); in gve_turndown()
1913 static void gve_turnup(struct gve_priv *priv) in gve_turnup() argument
1918 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1921 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_turnup()
1922 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turnup()
1923 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1925 if (!gve_tx_was_added_to_block(priv, idx)) in gve_turnup()
1930 if (idx < priv->tx_cfg.num_queues) in gve_turnup()
1931 netif_queue_set_napi(priv->dev, idx, in gve_turnup()
1935 if (gve_is_gqi(priv)) { in gve_turnup()
1936 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1938 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1939 priv->tx_coalesce_usecs); in gve_turnup()
1949 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1950 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turnup()
1951 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1953 if (!gve_rx_was_added_to_block(priv, idx)) in gve_turnup()
1957 netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, in gve_turnup()
1960 if (gve_is_gqi(priv)) { in gve_turnup()
1961 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1963 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1964 priv->rx_coalesce_usecs); in gve_turnup()
1975 gve_set_napi_enabled(priv); in gve_turnup()
1978 static void gve_turnup_and_check_status(struct gve_priv *priv) in gve_turnup_and_check_status() argument
1982 gve_turnup(priv); in gve_turnup_and_check_status()
1983 status = ioread32be(&priv->reg_bar0->device_status); in gve_turnup_and_check_status()
1984 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_turnup_and_check_status()
1991 struct gve_priv *priv; in gve_tx_timeout() local
1997 priv = netdev_priv(dev); in gve_tx_timeout()
1998 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
2001 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); in gve_tx_timeout()
2002 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
2005 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
2015 last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_timeout()
2018 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_tx_timeout()
2025 gve_schedule_reset(priv); in gve_tx_timeout()
2030 priv->tx_timeo_cnt++; in gve_tx_timeout()
2033 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit) in gve_get_pkt_buf_size() argument
2035 if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE) in gve_get_pkt_buf_size()
2042 bool gve_header_split_supported(const struct gve_priv *priv) in gve_header_split_supported() argument
2044 return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT; in gve_header_split_supported()
2047 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) in gve_set_hsplit_config() argument
2057 if (!gve_header_split_supported(priv)) { in gve_set_hsplit_config()
2058 dev_err(&priv->pdev->dev, "Header-split not supported\n"); in gve_set_hsplit_config()
2067 if (enable_hdr_split == priv->header_split_enabled) in gve_set_hsplit_config()
2070 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_hsplit_config()
2073 rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split); in gve_set_hsplit_config()
2075 if (netif_running(priv->dev)) in gve_set_hsplit_config()
2076 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_hsplit_config()
2086 struct gve_priv *priv = netdev_priv(netdev); in gve_set_features() local
2089 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_features()
2094 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); in gve_set_features()
2100 err = gve_flow_rules_reset(priv); in gve_set_features()
2125 static void gve_handle_status(struct gve_priv *priv, u32 status) in gve_handle_status() argument
2128 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
2129 gve_set_do_reset(priv); in gve_handle_status()
2132 priv->stats_report_trigger_cnt++; in gve_handle_status()
2133 gve_set_do_report_stats(priv); in gve_handle_status()
2137 static void gve_handle_reset(struct gve_priv *priv) in gve_handle_reset() argument
2143 if (gve_get_probe_in_progress(priv)) in gve_handle_reset()
2146 if (gve_get_do_reset(priv)) { in gve_handle_reset()
2148 gve_reset(priv, false); in gve_handle_reset()
2153 void gve_handle_report_stats(struct gve_priv *priv) in gve_handle_report_stats() argument
2155 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
2160 if (!gve_get_report_stats(priv)) in gve_handle_report_stats()
2163 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
2165 if (priv->tx) { in gve_handle_report_stats()
2166 for (idx = 0; idx < gve_num_tx_queues(priv); idx++) { in gve_handle_report_stats()
2171 if (gve_is_gqi(priv)) { in gve_handle_report_stats()
2172 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
2173 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
2177 start = u64_stats_fetch_begin(&priv->tx[idx].statss); in gve_handle_report_stats()
2178 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
2179 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
2182 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
2187 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
2207 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
2213 if (priv->rx) { in gve_handle_report_stats()
2214 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
2217 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
2222 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
2232 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_service_task() local
2234 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
2236 gve_handle_status(priv, status); in gve_service_task()
2238 gve_handle_reset(priv); in gve_service_task()
2239 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_service_task()
2242 static void gve_set_netdev_xdp_features(struct gve_priv *priv) in gve_set_netdev_xdp_features() argument
2246 if (priv->queue_format == GVE_GQI_QPL_FORMAT) { in gve_set_netdev_xdp_features()
2255 xdp_set_features_flag(priv->dev, xdp_features); in gve_set_netdev_xdp_features()
2258 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) in gve_init_priv() argument
2264 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
2266 dev_err(&priv->pdev->dev, in gve_init_priv()
2271 err = gve_verify_driver_compatibility(priv); in gve_init_priv()
2273 dev_err(&priv->pdev->dev, in gve_init_priv()
2278 priv->num_registered_pages = 0; in gve_init_priv()
2283 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
2285 err = gve_adminq_describe_device(priv); in gve_init_priv()
2287 dev_err(&priv->pdev->dev, in gve_init_priv()
2291 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
2292 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
2294 dev_err(&priv->pdev->dev, in gve_init_priv()
2299 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
2306 if (!gve_is_gqi(priv)) in gve_init_priv()
2307 netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); in gve_init_priv()
2309 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
2313 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
2314 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
2316 priv->tx_cfg.max_queues = in gve_init_priv()
2317 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2318 priv->rx_cfg.max_queues = in gve_init_priv()
2319 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
2321 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
2322 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
2323 if (priv->default_num_queues > 0) { in gve_init_priv()
2324 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2325 priv->tx_cfg.num_queues); in gve_init_priv()
2326 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
2327 priv->rx_cfg.num_queues); in gve_init_priv()
2330 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
2331 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
2332 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
2333 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
2335 if (!gve_is_gqi(priv)) { in gve_init_priv()
2336 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2337 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
2341 gve_set_netdev_xdp_features(priv); in gve_init_priv()
2342 err = gve_setup_device_resources(priv); in gve_init_priv()
2346 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
2350 static void gve_teardown_priv_resources(struct gve_priv *priv) in gve_teardown_priv_resources() argument
2352 gve_teardown_device_resources(priv); in gve_teardown_priv_resources()
2353 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
2356 static void gve_trigger_reset(struct gve_priv *priv) in gve_trigger_reset() argument
2359 gve_adminq_release(priv); in gve_trigger_reset()
2362 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) in gve_reset_and_teardown() argument
2364 gve_trigger_reset(priv); in gve_reset_and_teardown()
2367 gve_close(priv->dev); in gve_reset_and_teardown()
2368 gve_teardown_priv_resources(priv); in gve_reset_and_teardown()
2371 static int gve_reset_recovery(struct gve_priv *priv, bool was_up) in gve_reset_recovery() argument
2375 err = gve_init_priv(priv, true); in gve_reset_recovery()
2379 err = gve_open(priv->dev); in gve_reset_recovery()
2385 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
2386 gve_turndown(priv); in gve_reset_recovery()
2390 int gve_reset(struct gve_priv *priv, bool attempt_teardown) in gve_reset() argument
2392 bool was_up = netif_running(priv->dev); in gve_reset()
2395 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
2396 gve_clear_do_reset(priv); in gve_reset()
2397 gve_set_reset_in_progress(priv); in gve_reset()
2402 gve_turndown(priv); in gve_reset()
2403 gve_reset_and_teardown(priv, was_up); in gve_reset()
2407 err = gve_close(priv->dev); in gve_reset()
2410 gve_reset_and_teardown(priv, was_up); in gve_reset()
2413 gve_teardown_priv_resources(priv); in gve_reset()
2417 err = gve_reset_recovery(priv, was_up); in gve_reset()
2418 gve_clear_reset_in_progress(priv); in gve_reset()
2419 priv->reset_cnt++; in gve_reset()
2420 priv->interface_up_cnt = 0; in gve_reset()
2421 priv->interface_down_cnt = 0; in gve_reset()
2422 priv->stats_report_trigger_cnt = 0; in gve_reset()
2445 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_stop() local
2449 if (!priv->rx) in gve_rx_queue_stop()
2453 if (!gve_is_gqi(priv) && idx == 0) in gve_rx_queue_stop()
2457 gve_turndown(priv); in gve_rx_queue_stop()
2460 err = gve_adminq_destroy_single_rx_queue(priv, idx); in gve_rx_queue_stop()
2464 if (gve_is_qpl(priv)) { in gve_rx_queue_stop()
2466 err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx)); in gve_rx_queue_stop()
2471 gve_rx_stop_ring(priv, idx); in gve_rx_queue_stop()
2474 gve_turnup_and_check_status(priv); in gve_rx_queue_stop()
2477 *gve_per_q_mem = priv->rx[idx]; in gve_rx_queue_stop()
2478 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_stop()
2484 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_mem_free() local
2489 gve_rx_get_curr_alloc_cfg(priv, &cfg); in gve_rx_queue_mem_free()
2491 if (gve_is_gqi(priv)) in gve_rx_queue_mem_free()
2492 gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg); in gve_rx_queue_mem_free()
2494 gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg); in gve_rx_queue_mem_free()
2500 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_mem_alloc() local
2505 if (!priv->rx) in gve_rx_queue_mem_alloc()
2509 gve_rx_get_curr_alloc_cfg(priv, &cfg); in gve_rx_queue_mem_alloc()
2511 if (gve_is_gqi(priv)) in gve_rx_queue_mem_alloc()
2512 err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx); in gve_rx_queue_mem_alloc()
2514 err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx); in gve_rx_queue_mem_alloc()
2521 struct gve_priv *priv = netdev_priv(dev); in gve_rx_queue_start() local
2525 if (!priv->rx) in gve_rx_queue_start()
2529 priv->rx[idx] = *gve_per_q_mem; in gve_rx_queue_start()
2532 gve_turndown(priv); in gve_rx_queue_start()
2534 gve_rx_start_ring(priv, idx); in gve_rx_queue_start()
2536 if (gve_is_qpl(priv)) { in gve_rx_queue_start()
2538 err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx)); in gve_rx_queue_start()
2544 err = gve_adminq_create_single_rx_queue(priv, idx); in gve_rx_queue_start()
2548 if (gve_is_gqi(priv)) in gve_rx_queue_start()
2549 gve_rx_write_doorbell(priv, &priv->rx[idx]); in gve_rx_queue_start()
2551 gve_rx_post_buffers_dqo(&priv->rx[idx]); in gve_rx_queue_start()
2554 gve_turnup_and_check_status(priv); in gve_rx_queue_start()
2558 gve_rx_stop_ring(priv, idx); in gve_rx_queue_start()
2565 memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); in gve_rx_queue_start()
2580 struct gve_priv *priv = netdev_priv(dev); in gve_get_rx_queue_stats() local
2581 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_get_rx_queue_stats()
2596 struct gve_priv *priv = netdev_priv(dev); in gve_get_tx_queue_stats() local
2597 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_get_tx_queue_stats()
2631 struct gve_priv *priv; in gve_probe() local
2669 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); in gve_probe()
2700 priv = netdev_priv(dev); in gve_probe()
2701 priv->dev = dev; in gve_probe()
2702 priv->pdev = pdev; in gve_probe()
2703 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
2704 priv->reg_bar0 = reg_bar; in gve_probe()
2705 priv->db_bar2 = db_bar; in gve_probe()
2706 priv->service_task_flags = 0x0; in gve_probe()
2707 priv->state_flags = 0x0; in gve_probe()
2708 priv->ethtool_flags = 0x0; in gve_probe()
2709 priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2710 priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_probe()
2712 gve_set_probe_in_progress(priv); in gve_probe()
2713 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
2714 if (!priv->gve_wq) { in gve_probe()
2719 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
2720 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
2721 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
2722 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
2724 err = gve_init_priv(priv, false); in gve_probe()
2733 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
2734 gve_clear_probe_in_progress(priv); in gve_probe()
2735 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
2739 gve_teardown_priv_resources(priv); in gve_probe()
2742 destroy_workqueue(priv->gve_wq); in gve_probe()
2764 struct gve_priv *priv = netdev_priv(netdev); in gve_remove() local
2765 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
2766 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
2769 gve_teardown_priv_resources(priv); in gve_remove()
2770 destroy_workqueue(priv->gve_wq); in gve_remove()
2781 struct gve_priv *priv = netdev_priv(netdev); in gve_shutdown() local
2782 bool was_up = netif_running(priv->dev); in gve_shutdown()
2785 if (was_up && gve_close(priv->dev)) { in gve_shutdown()
2787 gve_reset_and_teardown(priv, was_up); in gve_shutdown()
2790 gve_teardown_priv_resources(priv); in gve_shutdown()
2799 struct gve_priv *priv = netdev_priv(netdev); in gve_suspend() local
2800 bool was_up = netif_running(priv->dev); in gve_suspend()
2802 priv->suspend_cnt++; in gve_suspend()
2804 if (was_up && gve_close(priv->dev)) { in gve_suspend()
2806 gve_reset_and_teardown(priv, was_up); in gve_suspend()
2809 gve_teardown_priv_resources(priv); in gve_suspend()
2811 priv->up_before_suspend = was_up; in gve_suspend()
2819 struct gve_priv *priv = netdev_priv(netdev); in gve_resume() local
2822 priv->resume_cnt++; in gve_resume()
2824 err = gve_reset_recovery(priv, priv->up_before_suspend); in gve_resume()