xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision f13697cc7a19225307d85b0a77ef1c15abe95a7d)
1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver
3893ce44dSCatherine Sullivan  *
4a5886ef4SBailey Forrest  * Copyright (C) 2015-2021 Google, Inc.
5893ce44dSCatherine Sullivan  */
6893ce44dSCatherine Sullivan 
775eaae15SPraveen Kaligineedi #include <linux/bpf.h>
8893ce44dSCatherine Sullivan #include <linux/cpumask.h>
9893ce44dSCatherine Sullivan #include <linux/etherdevice.h>
1075eaae15SPraveen Kaligineedi #include <linux/filter.h>
11893ce44dSCatherine Sullivan #include <linux/interrupt.h>
12893ce44dSCatherine Sullivan #include <linux/module.h>
13893ce44dSCatherine Sullivan #include <linux/pci.h>
14893ce44dSCatherine Sullivan #include <linux/sched.h>
15893ce44dSCatherine Sullivan #include <linux/timer.h>
169e5f7d26SCatherine Sullivan #include <linux/workqueue.h>
17c2a0c3edSJeroen de Borst #include <linux/utsname.h>
18c2a0c3edSJeroen de Borst #include <linux/version.h>
19893ce44dSCatherine Sullivan #include <net/sch_generic.h>
20fd8e4032SPraveen Kaligineedi #include <net/xdp_sock_drv.h>
21893ce44dSCatherine Sullivan #include "gve.h"
225e8c5adfSBailey Forrest #include "gve_dqo.h"
23893ce44dSCatherine Sullivan #include "gve_adminq.h"
24893ce44dSCatherine Sullivan #include "gve_register.h"
251dfc2e46SShailend Chand #include "gve_utils.h"
26893ce44dSCatherine Sullivan 
27f5cedc84SCatherine Sullivan #define GVE_DEFAULT_RX_COPYBREAK	(256)
28f5cedc84SCatherine Sullivan 
29893ce44dSCatherine Sullivan #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
30893ce44dSCatherine Sullivan #define GVE_VERSION		"1.0.0"
31893ce44dSCatherine Sullivan #define GVE_VERSION_PREFIX	"GVE-"
32893ce44dSCatherine Sullivan 
3387a7f321SJohn Fraker // Minimum amount of time between queue kicks in msec (10 seconds)
3487a7f321SJohn Fraker #define MIN_TX_TIMEOUT_GAP (1000 * 10)
3587a7f321SJohn Fraker 
369d0aba98SJunfeng Guo char gve_driver_name[] = "gve";
37e5b845dcSCatherine Sullivan const char gve_version_str[] = GVE_VERSION;
38893ce44dSCatherine Sullivan static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
39893ce44dSCatherine Sullivan 
40c2a0c3edSJeroen de Borst static int gve_verify_driver_compatibility(struct gve_priv *priv)
41c2a0c3edSJeroen de Borst {
42c2a0c3edSJeroen de Borst 	int err;
43c2a0c3edSJeroen de Borst 	struct gve_driver_info *driver_info;
44c2a0c3edSJeroen de Borst 	dma_addr_t driver_info_bus;
45c2a0c3edSJeroen de Borst 
46c2a0c3edSJeroen de Borst 	driver_info = dma_alloc_coherent(&priv->pdev->dev,
47c2a0c3edSJeroen de Borst 					 sizeof(struct gve_driver_info),
48c2a0c3edSJeroen de Borst 					 &driver_info_bus, GFP_KERNEL);
49c2a0c3edSJeroen de Borst 	if (!driver_info)
50c2a0c3edSJeroen de Borst 		return -ENOMEM;
51c2a0c3edSJeroen de Borst 
52c2a0c3edSJeroen de Borst 	*driver_info = (struct gve_driver_info) {
53c2a0c3edSJeroen de Borst 		.os_type = 1, /* Linux */
54c2a0c3edSJeroen de Borst 		.os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
55c2a0c3edSJeroen de Borst 		.os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
56c2a0c3edSJeroen de Borst 		.os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
57c2a0c3edSJeroen de Borst 		.driver_capability_flags = {
58c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
59c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
60c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
61c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
62c2a0c3edSJeroen de Borst 		},
63c2a0c3edSJeroen de Borst 	};
64c2a0c3edSJeroen de Borst 	strscpy(driver_info->os_version_str1, utsname()->release,
65c2a0c3edSJeroen de Borst 		sizeof(driver_info->os_version_str1));
66c2a0c3edSJeroen de Borst 	strscpy(driver_info->os_version_str2, utsname()->version,
67c2a0c3edSJeroen de Borst 		sizeof(driver_info->os_version_str2));
68c2a0c3edSJeroen de Borst 
69c2a0c3edSJeroen de Borst 	err = gve_adminq_verify_driver_compatibility(priv,
70c2a0c3edSJeroen de Borst 						     sizeof(struct gve_driver_info),
71c2a0c3edSJeroen de Borst 						     driver_info_bus);
72c2a0c3edSJeroen de Borst 
73c2a0c3edSJeroen de Borst 	/* It's ok if the device doesn't support this */
74c2a0c3edSJeroen de Borst 	if (err == -EOPNOTSUPP)
75c2a0c3edSJeroen de Borst 		err = 0;
76c2a0c3edSJeroen de Borst 
77c2a0c3edSJeroen de Borst 	dma_free_coherent(&priv->pdev->dev,
78c2a0c3edSJeroen de Borst 			  sizeof(struct gve_driver_info),
79c2a0c3edSJeroen de Borst 			  driver_info, driver_info_bus);
80c2a0c3edSJeroen de Borst 	return err;
81c2a0c3edSJeroen de Borst }
82c2a0c3edSJeroen de Borst 
8318de1e51SEric Dumazet static netdev_features_t gve_features_check(struct sk_buff *skb,
8418de1e51SEric Dumazet 					    struct net_device *dev,
8518de1e51SEric Dumazet 					    netdev_features_t features)
8618de1e51SEric Dumazet {
8718de1e51SEric Dumazet 	struct gve_priv *priv = netdev_priv(dev);
8818de1e51SEric Dumazet 
8918de1e51SEric Dumazet 	if (!gve_is_gqi(priv))
9018de1e51SEric Dumazet 		return gve_features_check_dqo(skb, dev, features);
9118de1e51SEric Dumazet 
9218de1e51SEric Dumazet 	return features;
9318de1e51SEric Dumazet }
9418de1e51SEric Dumazet 
955e8c5adfSBailey Forrest static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
965e8c5adfSBailey Forrest {
975e8c5adfSBailey Forrest 	struct gve_priv *priv = netdev_priv(dev);
985e8c5adfSBailey Forrest 
995e8c5adfSBailey Forrest 	if (gve_is_gqi(priv))
1005e8c5adfSBailey Forrest 		return gve_tx(skb, dev);
1015e8c5adfSBailey Forrest 	else
1025e8c5adfSBailey Forrest 		return gve_tx_dqo(skb, dev);
1035e8c5adfSBailey Forrest }
1045e8c5adfSBailey Forrest 
105f5cedc84SCatherine Sullivan static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
106f5cedc84SCatherine Sullivan {
107f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
108f5cedc84SCatherine Sullivan 	unsigned int start;
1092f57d497SEric Dumazet 	u64 packets, bytes;
1102e80aeaeSPraveen Kaligineedi 	int num_tx_queues;
111f5cedc84SCatherine Sullivan 	int ring;
112f5cedc84SCatherine Sullivan 
1132e80aeaeSPraveen Kaligineedi 	num_tx_queues = gve_num_tx_queues(priv);
114f5cedc84SCatherine Sullivan 	if (priv->rx) {
115f5cedc84SCatherine Sullivan 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
116f5cedc84SCatherine Sullivan 			do {
1173c13ce74SCatherine Sullivan 				start =
118068c38adSThomas Gleixner 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
1192f57d497SEric Dumazet 				packets = priv->rx[ring].rpackets;
1202f57d497SEric Dumazet 				bytes = priv->rx[ring].rbytes;
121068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
122f5cedc84SCatherine Sullivan 						       start));
1232f57d497SEric Dumazet 			s->rx_packets += packets;
1242f57d497SEric Dumazet 			s->rx_bytes += bytes;
125f5cedc84SCatherine Sullivan 		}
126f5cedc84SCatherine Sullivan 	}
127f5cedc84SCatherine Sullivan 	if (priv->tx) {
1282e80aeaeSPraveen Kaligineedi 		for (ring = 0; ring < num_tx_queues; ring++) {
129f5cedc84SCatherine Sullivan 			do {
1303c13ce74SCatherine Sullivan 				start =
131068c38adSThomas Gleixner 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
1322f57d497SEric Dumazet 				packets = priv->tx[ring].pkt_done;
1332f57d497SEric Dumazet 				bytes = priv->tx[ring].bytes_done;
134068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
135f5cedc84SCatherine Sullivan 						       start));
1362f57d497SEric Dumazet 			s->tx_packets += packets;
1372f57d497SEric Dumazet 			s->tx_bytes += bytes;
138f5cedc84SCatherine Sullivan 		}
139f5cedc84SCatherine Sullivan 	}
140f5cedc84SCatherine Sullivan }
141f5cedc84SCatherine Sullivan 
142893ce44dSCatherine Sullivan static int gve_alloc_counter_array(struct gve_priv *priv)
143893ce44dSCatherine Sullivan {
144893ce44dSCatherine Sullivan 	priv->counter_array =
145893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
146893ce44dSCatherine Sullivan 				   priv->num_event_counters *
147893ce44dSCatherine Sullivan 				   sizeof(*priv->counter_array),
148893ce44dSCatherine Sullivan 				   &priv->counter_array_bus, GFP_KERNEL);
149893ce44dSCatherine Sullivan 	if (!priv->counter_array)
150893ce44dSCatherine Sullivan 		return -ENOMEM;
151893ce44dSCatherine Sullivan 
152893ce44dSCatherine Sullivan 	return 0;
153893ce44dSCatherine Sullivan }
154893ce44dSCatherine Sullivan 
155893ce44dSCatherine Sullivan static void gve_free_counter_array(struct gve_priv *priv)
156893ce44dSCatherine Sullivan {
157922aa9bcSTao Liu 	if (!priv->counter_array)
158922aa9bcSTao Liu 		return;
159922aa9bcSTao Liu 
160893ce44dSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev,
161893ce44dSCatherine Sullivan 			  priv->num_event_counters *
162893ce44dSCatherine Sullivan 			  sizeof(*priv->counter_array),
163893ce44dSCatherine Sullivan 			  priv->counter_array, priv->counter_array_bus);
164893ce44dSCatherine Sullivan 	priv->counter_array = NULL;
165893ce44dSCatherine Sullivan }
166893ce44dSCatherine Sullivan 
16724aeb56fSKuo Zhao /* NIC requests to report stats */
16824aeb56fSKuo Zhao static void gve_stats_report_task(struct work_struct *work)
16924aeb56fSKuo Zhao {
17024aeb56fSKuo Zhao 	struct gve_priv *priv = container_of(work, struct gve_priv,
17124aeb56fSKuo Zhao 					     stats_report_task);
17224aeb56fSKuo Zhao 	if (gve_get_do_report_stats(priv)) {
17324aeb56fSKuo Zhao 		gve_handle_report_stats(priv);
17424aeb56fSKuo Zhao 		gve_clear_do_report_stats(priv);
17524aeb56fSKuo Zhao 	}
17624aeb56fSKuo Zhao }
17724aeb56fSKuo Zhao 
17824aeb56fSKuo Zhao static void gve_stats_report_schedule(struct gve_priv *priv)
17924aeb56fSKuo Zhao {
18024aeb56fSKuo Zhao 	if (!gve_get_probe_in_progress(priv) &&
18124aeb56fSKuo Zhao 	    !gve_get_reset_in_progress(priv)) {
18224aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
18324aeb56fSKuo Zhao 		queue_work(priv->gve_wq, &priv->stats_report_task);
18424aeb56fSKuo Zhao 	}
18524aeb56fSKuo Zhao }
18624aeb56fSKuo Zhao 
18724aeb56fSKuo Zhao static void gve_stats_report_timer(struct timer_list *t)
18824aeb56fSKuo Zhao {
18924aeb56fSKuo Zhao 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
19024aeb56fSKuo Zhao 
19124aeb56fSKuo Zhao 	mod_timer(&priv->stats_report_timer,
19224aeb56fSKuo Zhao 		  round_jiffies(jiffies +
19324aeb56fSKuo Zhao 		  msecs_to_jiffies(priv->stats_report_timer_period)));
19424aeb56fSKuo Zhao 	gve_stats_report_schedule(priv);
19524aeb56fSKuo Zhao }
19624aeb56fSKuo Zhao 
19724aeb56fSKuo Zhao static int gve_alloc_stats_report(struct gve_priv *priv)
19824aeb56fSKuo Zhao {
19924aeb56fSKuo Zhao 	int tx_stats_num, rx_stats_num;
20024aeb56fSKuo Zhao 
2012f523dc3SDavid Awogbemila 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
2022e80aeaeSPraveen Kaligineedi 		       gve_num_tx_queues(priv);
2032f523dc3SDavid Awogbemila 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
20424aeb56fSKuo Zhao 		       priv->rx_cfg.num_queues;
205691f4077SGustavo A. R. Silva 	priv->stats_report_len = struct_size(priv->stats_report, stats,
206d692873cSGustavo A. R. Silva 					     size_add(tx_stats_num, rx_stats_num));
20724aeb56fSKuo Zhao 	priv->stats_report =
20824aeb56fSKuo Zhao 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
20924aeb56fSKuo Zhao 				   &priv->stats_report_bus, GFP_KERNEL);
21024aeb56fSKuo Zhao 	if (!priv->stats_report)
21124aeb56fSKuo Zhao 		return -ENOMEM;
21224aeb56fSKuo Zhao 	/* Set up timer for the report-stats task */
21324aeb56fSKuo Zhao 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
21424aeb56fSKuo Zhao 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
21524aeb56fSKuo Zhao 	return 0;
21624aeb56fSKuo Zhao }
21724aeb56fSKuo Zhao 
21824aeb56fSKuo Zhao static void gve_free_stats_report(struct gve_priv *priv)
21924aeb56fSKuo Zhao {
220922aa9bcSTao Liu 	if (!priv->stats_report)
221922aa9bcSTao Liu 		return;
222922aa9bcSTao Liu 
22324aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
22424aeb56fSKuo Zhao 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
22524aeb56fSKuo Zhao 			  priv->stats_report, priv->stats_report_bus);
22624aeb56fSKuo Zhao 	priv->stats_report = NULL;
22724aeb56fSKuo Zhao }
22824aeb56fSKuo Zhao 
229893ce44dSCatherine Sullivan static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
230893ce44dSCatherine Sullivan {
2319e5f7d26SCatherine Sullivan 	struct gve_priv *priv = arg;
2329e5f7d26SCatherine Sullivan 
2339e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
234893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
235893ce44dSCatherine Sullivan }
236893ce44dSCatherine Sullivan 
237893ce44dSCatherine Sullivan static irqreturn_t gve_intr(int irq, void *arg)
238893ce44dSCatherine Sullivan {
239f5cedc84SCatherine Sullivan 	struct gve_notify_block *block = arg;
240f5cedc84SCatherine Sullivan 	struct gve_priv *priv = block->priv;
241f5cedc84SCatherine Sullivan 
242f5cedc84SCatherine Sullivan 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
243f5cedc84SCatherine Sullivan 	napi_schedule_irqoff(&block->napi);
244893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
245893ce44dSCatherine Sullivan }
246893ce44dSCatherine Sullivan 
2475e8c5adfSBailey Forrest static irqreturn_t gve_intr_dqo(int irq, void *arg)
2485e8c5adfSBailey Forrest {
2495e8c5adfSBailey Forrest 	struct gve_notify_block *block = arg;
2505e8c5adfSBailey Forrest 
2515e8c5adfSBailey Forrest 	/* Interrupts are automatically masked */
2525e8c5adfSBailey Forrest 	napi_schedule_irqoff(&block->napi);
2535e8c5adfSBailey Forrest 	return IRQ_HANDLED;
2545e8c5adfSBailey Forrest }
2555e8c5adfSBailey Forrest 
2561dfc2e46SShailend Chand int gve_napi_poll(struct napi_struct *napi, int budget)
257f5cedc84SCatherine Sullivan {
258f5cedc84SCatherine Sullivan 	struct gve_notify_block *block;
259f5cedc84SCatherine Sullivan 	__be32 __iomem *irq_doorbell;
260f5cedc84SCatherine Sullivan 	bool reschedule = false;
261f5cedc84SCatherine Sullivan 	struct gve_priv *priv;
2622cb67ab1SYangchun Fu 	int work_done = 0;
263f5cedc84SCatherine Sullivan 
264f5cedc84SCatherine Sullivan 	block = container_of(napi, struct gve_notify_block, napi);
265f5cedc84SCatherine Sullivan 	priv = block->priv;
266f5cedc84SCatherine Sullivan 
26775eaae15SPraveen Kaligineedi 	if (block->tx) {
26875eaae15SPraveen Kaligineedi 		if (block->tx->q_num < priv->tx_cfg.num_queues)
269f5cedc84SCatherine Sullivan 			reschedule |= gve_tx_poll(block, budget);
270278a370cSZiwei Xiao 		else if (budget)
27175eaae15SPraveen Kaligineedi 			reschedule |= gve_xdp_poll(block, budget);
27275eaae15SPraveen Kaligineedi 	}
27375eaae15SPraveen Kaligineedi 
274278a370cSZiwei Xiao 	if (!budget)
275278a370cSZiwei Xiao 		return 0;
276278a370cSZiwei Xiao 
2772cb67ab1SYangchun Fu 	if (block->rx) {
2782cb67ab1SYangchun Fu 		work_done = gve_rx_poll(block, budget);
2792cb67ab1SYangchun Fu 		reschedule |= work_done == budget;
2802cb67ab1SYangchun Fu 	}
281f5cedc84SCatherine Sullivan 
282f5cedc84SCatherine Sullivan 	if (reschedule)
283f5cedc84SCatherine Sullivan 		return budget;
284f5cedc84SCatherine Sullivan 
2852cb67ab1SYangchun Fu        /* Complete processing - don't unmask irq if busy polling is enabled */
2862cb67ab1SYangchun Fu 	if (likely(napi_complete_done(napi, work_done))) {
287f5cedc84SCatherine Sullivan 		irq_doorbell = gve_irq_doorbell(priv, block);
288f5cedc84SCatherine Sullivan 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
289f5cedc84SCatherine Sullivan 
29061d72c7eSTao Liu 		/* Ensure IRQ ACK is visible before we check pending work.
29161d72c7eSTao Liu 		 * If queue had issued updates, it would be truly visible.
292f5cedc84SCatherine Sullivan 		 */
293f8178183SCatherine Sullivan 		mb();
2942cb67ab1SYangchun Fu 
295f5cedc84SCatherine Sullivan 		if (block->tx)
29661d72c7eSTao Liu 			reschedule |= gve_tx_clean_pending(priv, block->tx);
297f5cedc84SCatherine Sullivan 		if (block->rx)
2982cb67ab1SYangchun Fu 			reschedule |= gve_rx_work_pending(block->rx);
2992cb67ab1SYangchun Fu 
30073382e91SChristian Marangi 		if (reschedule && napi_schedule(napi))
301f5cedc84SCatherine Sullivan 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
3022cb67ab1SYangchun Fu 	}
3032cb67ab1SYangchun Fu 	return work_done;
304f5cedc84SCatherine Sullivan }
305f5cedc84SCatherine Sullivan 
3061dfc2e46SShailend Chand int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
3075e8c5adfSBailey Forrest {
3085e8c5adfSBailey Forrest 	struct gve_notify_block *block =
3095e8c5adfSBailey Forrest 		container_of(napi, struct gve_notify_block, napi);
3105e8c5adfSBailey Forrest 	struct gve_priv *priv = block->priv;
3115e8c5adfSBailey Forrest 	bool reschedule = false;
3125e8c5adfSBailey Forrest 	int work_done = 0;
3135e8c5adfSBailey Forrest 
3145e8c5adfSBailey Forrest 	if (block->tx)
3155e8c5adfSBailey Forrest 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
3165e8c5adfSBailey Forrest 
317278a370cSZiwei Xiao 	if (!budget)
318278a370cSZiwei Xiao 		return 0;
319278a370cSZiwei Xiao 
3205e8c5adfSBailey Forrest 	if (block->rx) {
3215e8c5adfSBailey Forrest 		work_done = gve_rx_poll_dqo(block, budget);
3225e8c5adfSBailey Forrest 		reschedule |= work_done == budget;
3235e8c5adfSBailey Forrest 	}
3245e8c5adfSBailey Forrest 
3255e8c5adfSBailey Forrest 	if (reschedule)
3265e8c5adfSBailey Forrest 		return budget;
3275e8c5adfSBailey Forrest 
3285e8c5adfSBailey Forrest 	if (likely(napi_complete_done(napi, work_done))) {
3295e8c5adfSBailey Forrest 		/* Enable interrupts again.
3305e8c5adfSBailey Forrest 		 *
3315e8c5adfSBailey Forrest 		 * We don't need to repoll afterwards because HW supports the
3325e8c5adfSBailey Forrest 		 * PCI MSI-X PBA feature.
3335e8c5adfSBailey Forrest 		 *
3345e8c5adfSBailey Forrest 		 * Another interrupt would be triggered if a new event came in
3355e8c5adfSBailey Forrest 		 * since the last one.
3365e8c5adfSBailey Forrest 		 */
3375e8c5adfSBailey Forrest 		gve_write_irq_doorbell_dqo(priv, block,
3385e8c5adfSBailey Forrest 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
3395e8c5adfSBailey Forrest 	}
3405e8c5adfSBailey Forrest 
3415e8c5adfSBailey Forrest 	return work_done;
3425e8c5adfSBailey Forrest }
3435e8c5adfSBailey Forrest 
344893ce44dSCatherine Sullivan static int gve_alloc_notify_blocks(struct gve_priv *priv)
345893ce44dSCatherine Sullivan {
346893ce44dSCatherine Sullivan 	int num_vecs_requested = priv->num_ntfy_blks + 1;
347893ce44dSCatherine Sullivan 	unsigned int active_cpus;
348893ce44dSCatherine Sullivan 	int vecs_enabled;
349893ce44dSCatherine Sullivan 	int i, j;
350893ce44dSCatherine Sullivan 	int err;
351893ce44dSCatherine Sullivan 
3527fec4d39SGustavo A. R. Silva 	priv->msix_vectors = kvcalloc(num_vecs_requested,
353893ce44dSCatherine Sullivan 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
354893ce44dSCatherine Sullivan 	if (!priv->msix_vectors)
355893ce44dSCatherine Sullivan 		return -ENOMEM;
356893ce44dSCatherine Sullivan 	for (i = 0; i < num_vecs_requested; i++)
357893ce44dSCatherine Sullivan 		priv->msix_vectors[i].entry = i;
358893ce44dSCatherine Sullivan 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
359893ce44dSCatherine Sullivan 					     GVE_MIN_MSIX, num_vecs_requested);
360893ce44dSCatherine Sullivan 	if (vecs_enabled < 0) {
361893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
362893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, vecs_enabled);
363893ce44dSCatherine Sullivan 		err = vecs_enabled;
364893ce44dSCatherine Sullivan 		goto abort_with_msix_vectors;
365893ce44dSCatherine Sullivan 	}
366893ce44dSCatherine Sullivan 	if (vecs_enabled != num_vecs_requested) {
367f5cedc84SCatherine Sullivan 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
368f5cedc84SCatherine Sullivan 		int vecs_per_type = new_num_ntfy_blks / 2;
369f5cedc84SCatherine Sullivan 		int vecs_left = new_num_ntfy_blks % 2;
370f5cedc84SCatherine Sullivan 
371f5cedc84SCatherine Sullivan 		priv->num_ntfy_blks = new_num_ntfy_blks;
372e96b491aSDavid Awogbemila 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
373f5cedc84SCatherine Sullivan 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
374f5cedc84SCatherine Sullivan 						vecs_per_type);
375f5cedc84SCatherine Sullivan 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
376f5cedc84SCatherine Sullivan 						vecs_per_type + vecs_left);
377893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
378f5cedc84SCatherine Sullivan 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
379f5cedc84SCatherine Sullivan 			vecs_enabled, priv->tx_cfg.max_queues,
380f5cedc84SCatherine Sullivan 			priv->rx_cfg.max_queues);
381f5cedc84SCatherine Sullivan 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
382f5cedc84SCatherine Sullivan 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
383f5cedc84SCatherine Sullivan 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
384f5cedc84SCatherine Sullivan 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
385893ce44dSCatherine Sullivan 	}
386893ce44dSCatherine Sullivan 	/* Half the notification blocks go to TX and half to RX */
387893ce44dSCatherine Sullivan 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
388893ce44dSCatherine Sullivan 
389893ce44dSCatherine Sullivan 	/* Setup Management Vector  - the last vector */
39084371145SPraveen Kaligineedi 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
39184371145SPraveen Kaligineedi 		 pci_name(priv->pdev));
392893ce44dSCatherine Sullivan 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
393893ce44dSCatherine Sullivan 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
394893ce44dSCatherine Sullivan 	if (err) {
395893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
396893ce44dSCatherine Sullivan 		goto abort_with_msix_enabled;
397893ce44dSCatherine Sullivan 	}
398d30baaccSCatherine Sullivan 	priv->irq_db_indices =
399893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
400893ce44dSCatherine Sullivan 				   priv->num_ntfy_blks *
401d30baaccSCatherine Sullivan 				   sizeof(*priv->irq_db_indices),
402d30baaccSCatherine Sullivan 				   &priv->irq_db_indices_bus, GFP_KERNEL);
403d30baaccSCatherine Sullivan 	if (!priv->irq_db_indices) {
404893ce44dSCatherine Sullivan 		err = -ENOMEM;
405893ce44dSCatherine Sullivan 		goto abort_with_mgmt_vector;
406893ce44dSCatherine Sullivan 	}
407d30baaccSCatherine Sullivan 
408d30baaccSCatherine Sullivan 	priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
409d30baaccSCatherine Sullivan 				     sizeof(*priv->ntfy_blocks), GFP_KERNEL);
410d30baaccSCatherine Sullivan 	if (!priv->ntfy_blocks) {
411d30baaccSCatherine Sullivan 		err = -ENOMEM;
412d30baaccSCatherine Sullivan 		goto abort_with_irq_db_indices;
413d30baaccSCatherine Sullivan 	}
414d30baaccSCatherine Sullivan 
415893ce44dSCatherine Sullivan 	/* Setup the other blocks - the first n-1 vectors */
416893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
417893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
418893ce44dSCatherine Sullivan 		int msix_idx = i;
419893ce44dSCatherine Sullivan 
42084371145SPraveen Kaligineedi 		snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
42184371145SPraveen Kaligineedi 			 i, pci_name(priv->pdev));
422893ce44dSCatherine Sullivan 		block->priv = priv;
423893ce44dSCatherine Sullivan 		err = request_irq(priv->msix_vectors[msix_idx].vector,
4245e8c5adfSBailey Forrest 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
4255e8c5adfSBailey Forrest 				  0, block->name, block);
426893ce44dSCatherine Sullivan 		if (err) {
427893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
428893ce44dSCatherine Sullivan 				"Failed to receive msix vector %d\n", i);
429893ce44dSCatherine Sullivan 			goto abort_with_some_ntfy_blocks;
430893ce44dSCatherine Sullivan 		}
431893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
432893ce44dSCatherine Sullivan 				      get_cpu_mask(i % active_cpus));
433d30baaccSCatherine Sullivan 		block->irq_db_index = &priv->irq_db_indices[i].index;
434893ce44dSCatherine Sullivan 	}
435893ce44dSCatherine Sullivan 	return 0;
436893ce44dSCatherine Sullivan abort_with_some_ntfy_blocks:
437893ce44dSCatherine Sullivan 	for (j = 0; j < i; j++) {
438893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
439893ce44dSCatherine Sullivan 		int msix_idx = j;
440893ce44dSCatherine Sullivan 
441893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
442893ce44dSCatherine Sullivan 				      NULL);
443893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
444893ce44dSCatherine Sullivan 	}
445d30baaccSCatherine Sullivan 	kvfree(priv->ntfy_blocks);
446893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
447d30baaccSCatherine Sullivan abort_with_irq_db_indices:
448d30baaccSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
449d30baaccSCatherine Sullivan 			  sizeof(*priv->irq_db_indices),
450d30baaccSCatherine Sullivan 			  priv->irq_db_indices, priv->irq_db_indices_bus);
451d30baaccSCatherine Sullivan 	priv->irq_db_indices = NULL;
452893ce44dSCatherine Sullivan abort_with_mgmt_vector:
453893ce44dSCatherine Sullivan 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
454893ce44dSCatherine Sullivan abort_with_msix_enabled:
455893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
456893ce44dSCatherine Sullivan abort_with_msix_vectors:
4578ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
458893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
459893ce44dSCatherine Sullivan 	return err;
460893ce44dSCatherine Sullivan }
461893ce44dSCatherine Sullivan 
462893ce44dSCatherine Sullivan static void gve_free_notify_blocks(struct gve_priv *priv)
463893ce44dSCatherine Sullivan {
464893ce44dSCatherine Sullivan 	int i;
465893ce44dSCatherine Sullivan 
466922aa9bcSTao Liu 	if (!priv->msix_vectors)
467922aa9bcSTao Liu 		return;
468922aa9bcSTao Liu 
469893ce44dSCatherine Sullivan 	/* Free the irqs */
470893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
471893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
472893ce44dSCatherine Sullivan 		int msix_idx = i;
473893ce44dSCatherine Sullivan 
474893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
475893ce44dSCatherine Sullivan 				      NULL);
476893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
477893ce44dSCatherine Sullivan 	}
4785218e919SDavid Awogbemila 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
479d30baaccSCatherine Sullivan 	kvfree(priv->ntfy_blocks);
480893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
481d30baaccSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
482d30baaccSCatherine Sullivan 			  sizeof(*priv->irq_db_indices),
483d30baaccSCatherine Sullivan 			  priv->irq_db_indices, priv->irq_db_indices_bus);
484d30baaccSCatherine Sullivan 	priv->irq_db_indices = NULL;
485893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
4868ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
487893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
488893ce44dSCatherine Sullivan }
489893ce44dSCatherine Sullivan 
490893ce44dSCatherine Sullivan static int gve_setup_device_resources(struct gve_priv *priv)
491893ce44dSCatherine Sullivan {
492893ce44dSCatherine Sullivan 	int err;
493893ce44dSCatherine Sullivan 
494893ce44dSCatherine Sullivan 	err = gve_alloc_counter_array(priv);
495893ce44dSCatherine Sullivan 	if (err)
496893ce44dSCatherine Sullivan 		return err;
497893ce44dSCatherine Sullivan 	err = gve_alloc_notify_blocks(priv);
498893ce44dSCatherine Sullivan 	if (err)
499893ce44dSCatherine Sullivan 		goto abort_with_counter;
50024aeb56fSKuo Zhao 	err = gve_alloc_stats_report(priv);
50124aeb56fSKuo Zhao 	if (err)
50224aeb56fSKuo Zhao 		goto abort_with_ntfy_blocks;
503893ce44dSCatherine Sullivan 	err = gve_adminq_configure_device_resources(priv,
504893ce44dSCatherine Sullivan 						    priv->counter_array_bus,
505893ce44dSCatherine Sullivan 						    priv->num_event_counters,
506d30baaccSCatherine Sullivan 						    priv->irq_db_indices_bus,
507893ce44dSCatherine Sullivan 						    priv->num_ntfy_blks);
508893ce44dSCatherine Sullivan 	if (unlikely(err)) {
509893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
510893ce44dSCatherine Sullivan 			"could not setup device_resources: err=%d\n", err);
511893ce44dSCatherine Sullivan 		err = -ENXIO;
51224aeb56fSKuo Zhao 		goto abort_with_stats_report;
513893ce44dSCatherine Sullivan 	}
514c4b87ac8SBailey Forrest 
51566ce8e6bSRushil Gupta 	if (!gve_is_gqi(priv)) {
516c4b87ac8SBailey Forrest 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
517c4b87ac8SBailey Forrest 					       GFP_KERNEL);
518c4b87ac8SBailey Forrest 		if (!priv->ptype_lut_dqo) {
519c4b87ac8SBailey Forrest 			err = -ENOMEM;
520c4b87ac8SBailey Forrest 			goto abort_with_stats_report;
521c4b87ac8SBailey Forrest 		}
522c4b87ac8SBailey Forrest 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
523c4b87ac8SBailey Forrest 		if (err) {
524c4b87ac8SBailey Forrest 			dev_err(&priv->pdev->dev,
525c4b87ac8SBailey Forrest 				"Failed to get ptype map: err=%d\n", err);
526c4b87ac8SBailey Forrest 			goto abort_with_ptype_lut;
527c4b87ac8SBailey Forrest 		}
528c4b87ac8SBailey Forrest 	}
529c4b87ac8SBailey Forrest 
53024aeb56fSKuo Zhao 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
53124aeb56fSKuo Zhao 				      priv->stats_report_bus,
53224aeb56fSKuo Zhao 				      GVE_STATS_REPORT_TIMER_PERIOD);
53324aeb56fSKuo Zhao 	if (err)
53424aeb56fSKuo Zhao 		dev_err(&priv->pdev->dev,
53524aeb56fSKuo Zhao 			"Failed to report stats: err=%d\n", err);
536893ce44dSCatherine Sullivan 	gve_set_device_resources_ok(priv);
537893ce44dSCatherine Sullivan 	return 0;
538c4b87ac8SBailey Forrest 
539c4b87ac8SBailey Forrest abort_with_ptype_lut:
540c4b87ac8SBailey Forrest 	kvfree(priv->ptype_lut_dqo);
541c4b87ac8SBailey Forrest 	priv->ptype_lut_dqo = NULL;
54224aeb56fSKuo Zhao abort_with_stats_report:
54324aeb56fSKuo Zhao 	gve_free_stats_report(priv);
544893ce44dSCatherine Sullivan abort_with_ntfy_blocks:
545893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
546893ce44dSCatherine Sullivan abort_with_counter:
547893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
548c4b87ac8SBailey Forrest 
549893ce44dSCatherine Sullivan 	return err;
550893ce44dSCatherine Sullivan }
551893ce44dSCatherine Sullivan 
5529e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv);
5539e5f7d26SCatherine Sullivan 
554893ce44dSCatherine Sullivan static void gve_teardown_device_resources(struct gve_priv *priv)
555893ce44dSCatherine Sullivan {
556893ce44dSCatherine Sullivan 	int err;
557893ce44dSCatherine Sullivan 
558893ce44dSCatherine Sullivan 	/* Tell device its resources are being freed */
559893ce44dSCatherine Sullivan 	if (gve_get_device_resources_ok(priv)) {
56024aeb56fSKuo Zhao 		/* detach the stats report */
56124aeb56fSKuo Zhao 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
56224aeb56fSKuo Zhao 		if (err) {
56324aeb56fSKuo Zhao 			dev_err(&priv->pdev->dev,
56424aeb56fSKuo Zhao 				"Failed to detach stats report: err=%d\n", err);
56524aeb56fSKuo Zhao 			gve_trigger_reset(priv);
56624aeb56fSKuo Zhao 		}
567893ce44dSCatherine Sullivan 		err = gve_adminq_deconfigure_device_resources(priv);
568893ce44dSCatherine Sullivan 		if (err) {
569893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
570893ce44dSCatherine Sullivan 				"Could not deconfigure device resources: err=%d\n",
571893ce44dSCatherine Sullivan 				err);
5729e5f7d26SCatherine Sullivan 			gve_trigger_reset(priv);
573893ce44dSCatherine Sullivan 		}
574893ce44dSCatherine Sullivan 	}
575c4b87ac8SBailey Forrest 
576c4b87ac8SBailey Forrest 	kvfree(priv->ptype_lut_dqo);
577c4b87ac8SBailey Forrest 	priv->ptype_lut_dqo = NULL;
578c4b87ac8SBailey Forrest 
579893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
580893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
58124aeb56fSKuo Zhao 	gve_free_stats_report(priv);
582893ce44dSCatherine Sullivan 	gve_clear_device_resources_ok(priv);
583893ce44dSCatherine Sullivan }
584893ce44dSCatherine Sullivan 
585*f13697ccSShailend Chand static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
586*f13697ccSShailend Chand {
587*f13697ccSShailend Chand 	int err;
588*f13697ccSShailend Chand 
589*f13697ccSShailend Chand 	err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
590*f13697ccSShailend Chand 	if (err) {
591*f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
592*f13697ccSShailend Chand 			  "Failed to unregister queue page list %d\n",
593*f13697ccSShailend Chand 			  priv->qpls[i].id);
594*f13697ccSShailend Chand 		return err;
595*f13697ccSShailend Chand 	}
596*f13697ccSShailend Chand 
597*f13697ccSShailend Chand 	priv->num_registered_pages -= priv->qpls[i].num_entries;
598*f13697ccSShailend Chand 	return 0;
599*f13697ccSShailend Chand }
600*f13697ccSShailend Chand 
601*f13697ccSShailend Chand static int gve_register_qpl(struct gve_priv *priv, u32 i)
602*f13697ccSShailend Chand {
603*f13697ccSShailend Chand 	int num_rx_qpls;
604*f13697ccSShailend Chand 	int pages;
605*f13697ccSShailend Chand 	int err;
606*f13697ccSShailend Chand 
607*f13697ccSShailend Chand 	/* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
608*f13697ccSShailend Chand 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
609*f13697ccSShailend Chand 	if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
610*f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
611*f13697ccSShailend Chand 			  "Cannot register nonexisting QPL at index %d\n", i);
612*f13697ccSShailend Chand 		return -EINVAL;
613*f13697ccSShailend Chand 	}
614*f13697ccSShailend Chand 
615*f13697ccSShailend Chand 	pages = priv->qpls[i].num_entries;
616*f13697ccSShailend Chand 
617*f13697ccSShailend Chand 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
618*f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
619*f13697ccSShailend Chand 			  "Reached max number of registered pages %llu > %llu\n",
620*f13697ccSShailend Chand 			  pages + priv->num_registered_pages,
621*f13697ccSShailend Chand 			  priv->max_registered_pages);
622*f13697ccSShailend Chand 		return -EINVAL;
623*f13697ccSShailend Chand 	}
624*f13697ccSShailend Chand 
625*f13697ccSShailend Chand 	err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
626*f13697ccSShailend Chand 	if (err) {
627*f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
628*f13697ccSShailend Chand 			  "failed to register queue page list %d\n",
629*f13697ccSShailend Chand 			  priv->qpls[i].id);
630*f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean
631*f13697ccSShailend Chand 		 * up
632*f13697ccSShailend Chand 		 */
633*f13697ccSShailend Chand 		return err;
634*f13697ccSShailend Chand 	}
635*f13697ccSShailend Chand 
636*f13697ccSShailend Chand 	priv->num_registered_pages += pages;
637*f13697ccSShailend Chand 	return 0;
638*f13697ccSShailend Chand }
639*f13697ccSShailend Chand 
64075eaae15SPraveen Kaligineedi static int gve_register_xdp_qpls(struct gve_priv *priv)
64175eaae15SPraveen Kaligineedi {
64275eaae15SPraveen Kaligineedi 	int start_id;
64375eaae15SPraveen Kaligineedi 	int err;
64475eaae15SPraveen Kaligineedi 	int i;
64575eaae15SPraveen Kaligineedi 
646*f13697ccSShailend Chand 	start_id = gve_xdp_tx_start_queue_id(priv);
64775eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
648*f13697ccSShailend Chand 		err = gve_register_qpl(priv, i);
649*f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean up */
650*f13697ccSShailend Chand 		if (err)
65175eaae15SPraveen Kaligineedi 			return err;
65275eaae15SPraveen Kaligineedi 	}
65375eaae15SPraveen Kaligineedi 	return 0;
65475eaae15SPraveen Kaligineedi }
65575eaae15SPraveen Kaligineedi 
656f5cedc84SCatherine Sullivan static int gve_register_qpls(struct gve_priv *priv)
657f5cedc84SCatherine Sullivan {
658*f13697ccSShailend Chand 	int num_tx_qpls, num_rx_qpls;
6597fc2bf78SPraveen Kaligineedi 	int start_id;
660f5cedc84SCatherine Sullivan 	int err;
661f5cedc84SCatherine Sullivan 	int i;
662f5cedc84SCatherine Sullivan 
663*f13697ccSShailend Chand 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
664*f13697ccSShailend Chand 				      gve_is_qpl(priv));
665*f13697ccSShailend Chand 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
666*f13697ccSShailend Chand 
667*f13697ccSShailend Chand 	for (i = 0; i < num_tx_qpls; i++) {
668*f13697ccSShailend Chand 		err = gve_register_qpl(priv, i);
669*f13697ccSShailend Chand 		if (err)
6707fc2bf78SPraveen Kaligineedi 			return err;
6717fc2bf78SPraveen Kaligineedi 	}
6727fc2bf78SPraveen Kaligineedi 
673*f13697ccSShailend Chand 	/* there might be a gap between the tx and rx qpl ids */
674*f13697ccSShailend Chand 	start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
675*f13697ccSShailend Chand 	for (i = 0; i < num_rx_qpls; i++) {
676*f13697ccSShailend Chand 		err = gve_register_qpl(priv, start_id + i);
677*f13697ccSShailend Chand 		if (err)
678f5cedc84SCatherine Sullivan 			return err;
679f5cedc84SCatherine Sullivan 	}
680*f13697ccSShailend Chand 
681f5cedc84SCatherine Sullivan 	return 0;
682f5cedc84SCatherine Sullivan }
683f5cedc84SCatherine Sullivan 
68475eaae15SPraveen Kaligineedi static int gve_unregister_xdp_qpls(struct gve_priv *priv)
68575eaae15SPraveen Kaligineedi {
68675eaae15SPraveen Kaligineedi 	int start_id;
68775eaae15SPraveen Kaligineedi 	int err;
68875eaae15SPraveen Kaligineedi 	int i;
68975eaae15SPraveen Kaligineedi 
690*f13697ccSShailend Chand 	start_id = gve_xdp_tx_start_queue_id(priv);
69175eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
692*f13697ccSShailend Chand 		err = gve_unregister_qpl(priv, i);
693*f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
694*f13697ccSShailend Chand 		if (err)
69575eaae15SPraveen Kaligineedi 			return err;
69675eaae15SPraveen Kaligineedi 	}
69775eaae15SPraveen Kaligineedi 	return 0;
69875eaae15SPraveen Kaligineedi }
69975eaae15SPraveen Kaligineedi 
700f5cedc84SCatherine Sullivan static int gve_unregister_qpls(struct gve_priv *priv)
701f5cedc84SCatherine Sullivan {
702*f13697ccSShailend Chand 	int num_tx_qpls, num_rx_qpls;
7037fc2bf78SPraveen Kaligineedi 	int start_id;
704f5cedc84SCatherine Sullivan 	int err;
705f5cedc84SCatherine Sullivan 	int i;
706f5cedc84SCatherine Sullivan 
707*f13697ccSShailend Chand 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
708*f13697ccSShailend Chand 				      gve_is_qpl(priv));
709*f13697ccSShailend Chand 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
710*f13697ccSShailend Chand 
711*f13697ccSShailend Chand 	for (i = 0; i < num_tx_qpls; i++) {
712*f13697ccSShailend Chand 		err = gve_unregister_qpl(priv, i);
713*f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
714*f13697ccSShailend Chand 		if (err)
7157fc2bf78SPraveen Kaligineedi 			return err;
7167fc2bf78SPraveen Kaligineedi 	}
7177fc2bf78SPraveen Kaligineedi 
718*f13697ccSShailend Chand 	start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
719*f13697ccSShailend Chand 	for (i = 0; i < num_rx_qpls; i++) {
720*f13697ccSShailend Chand 		err = gve_unregister_qpl(priv, start_id + i);
721*f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
722*f13697ccSShailend Chand 		if (err)
723f5cedc84SCatherine Sullivan 			return err;
724f5cedc84SCatherine Sullivan 	}
725f5cedc84SCatherine Sullivan 	return 0;
726f5cedc84SCatherine Sullivan }
727f5cedc84SCatherine Sullivan 
72875eaae15SPraveen Kaligineedi static int gve_create_xdp_rings(struct gve_priv *priv)
72975eaae15SPraveen Kaligineedi {
73075eaae15SPraveen Kaligineedi 	int err;
73175eaae15SPraveen Kaligineedi 
73275eaae15SPraveen Kaligineedi 	err = gve_adminq_create_tx_queues(priv,
73375eaae15SPraveen Kaligineedi 					  gve_xdp_tx_start_queue_id(priv),
73475eaae15SPraveen Kaligineedi 					  priv->num_xdp_queues);
73575eaae15SPraveen Kaligineedi 	if (err) {
73675eaae15SPraveen Kaligineedi 		netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
73775eaae15SPraveen Kaligineedi 			  priv->num_xdp_queues);
73875eaae15SPraveen Kaligineedi 		/* This failure will trigger a reset - no need to clean
73975eaae15SPraveen Kaligineedi 		 * up
74075eaae15SPraveen Kaligineedi 		 */
74175eaae15SPraveen Kaligineedi 		return err;
74275eaae15SPraveen Kaligineedi 	}
74375eaae15SPraveen Kaligineedi 	netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
74475eaae15SPraveen Kaligineedi 		  priv->num_xdp_queues);
74575eaae15SPraveen Kaligineedi 
74675eaae15SPraveen Kaligineedi 	return 0;
74775eaae15SPraveen Kaligineedi }
74875eaae15SPraveen Kaligineedi 
749f5cedc84SCatherine Sullivan static int gve_create_rings(struct gve_priv *priv)
750f5cedc84SCatherine Sullivan {
7512e80aeaeSPraveen Kaligineedi 	int num_tx_queues = gve_num_tx_queues(priv);
752f5cedc84SCatherine Sullivan 	int err;
753f5cedc84SCatherine Sullivan 	int i;
754f5cedc84SCatherine Sullivan 
7557fc2bf78SPraveen Kaligineedi 	err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
756f5cedc84SCatherine Sullivan 	if (err) {
7575cdad90dSSagi Shahar 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
7582e80aeaeSPraveen Kaligineedi 			  num_tx_queues);
7599e5f7d26SCatherine Sullivan 		/* This failure will trigger a reset - no need to clean
7609e5f7d26SCatherine Sullivan 		 * up
7619e5f7d26SCatherine Sullivan 		 */
762f5cedc84SCatherine Sullivan 		return err;
763f5cedc84SCatherine Sullivan 	}
7645cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
7652e80aeaeSPraveen Kaligineedi 		  num_tx_queues);
7665cdad90dSSagi Shahar 
7675cdad90dSSagi Shahar 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
768f5cedc84SCatherine Sullivan 	if (err) {
7695cdad90dSSagi Shahar 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
7705cdad90dSSagi Shahar 			  priv->rx_cfg.num_queues);
7719e5f7d26SCatherine Sullivan 		/* This failure will trigger a reset - no need to clean
7729e5f7d26SCatherine Sullivan 		 * up
7739e5f7d26SCatherine Sullivan 		 */
774f5cedc84SCatherine Sullivan 		return err;
775f5cedc84SCatherine Sullivan 	}
7765cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
7775cdad90dSSagi Shahar 		  priv->rx_cfg.num_queues);
7785cdad90dSSagi Shahar 
7799c1a59a2SBailey Forrest 	if (gve_is_gqi(priv)) {
7805cdad90dSSagi Shahar 		/* Rx data ring has been prefilled with packet buffers at queue
7815cdad90dSSagi Shahar 		 * allocation time.
7829c1a59a2SBailey Forrest 		 *
7839c1a59a2SBailey Forrest 		 * Write the doorbell to provide descriptor slots and packet
7849c1a59a2SBailey Forrest 		 * buffers to the NIC.
785f5cedc84SCatherine Sullivan 		 */
7865cdad90dSSagi Shahar 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
787f5cedc84SCatherine Sullivan 			gve_rx_write_doorbell(priv, &priv->rx[i]);
7889c1a59a2SBailey Forrest 	} else {
7899c1a59a2SBailey Forrest 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
7909c1a59a2SBailey Forrest 			/* Post buffers and ring doorbell. */
7919c1a59a2SBailey Forrest 			gve_rx_post_buffers_dqo(&priv->rx[i]);
7929c1a59a2SBailey Forrest 		}
7939c1a59a2SBailey Forrest 	}
794f5cedc84SCatherine Sullivan 
795f5cedc84SCatherine Sullivan 	return 0;
796f5cedc84SCatherine Sullivan }
797f5cedc84SCatherine Sullivan 
798*f13697ccSShailend Chand static void init_xdp_sync_stats(struct gve_priv *priv)
79975eaae15SPraveen Kaligineedi {
80075eaae15SPraveen Kaligineedi 	int start_id = gve_xdp_tx_start_queue_id(priv);
80175eaae15SPraveen Kaligineedi 	int i;
80275eaae15SPraveen Kaligineedi 
803*f13697ccSShailend Chand 	/* Init stats */
80475eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
80575eaae15SPraveen Kaligineedi 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
80675eaae15SPraveen Kaligineedi 
80775eaae15SPraveen Kaligineedi 		u64_stats_init(&priv->tx[i].statss);
80875eaae15SPraveen Kaligineedi 		priv->tx[i].ntfy_id = ntfy_idx;
80975eaae15SPraveen Kaligineedi 	}
81075eaae15SPraveen Kaligineedi }
81175eaae15SPraveen Kaligineedi 
812*f13697ccSShailend Chand static void gve_init_sync_stats(struct gve_priv *priv)
8135e8c5adfSBailey Forrest {
8145e8c5adfSBailey Forrest 	int i;
8155e8c5adfSBailey Forrest 
816*f13697ccSShailend Chand 	for (i = 0; i < priv->tx_cfg.num_queues; i++)
8175e8c5adfSBailey Forrest 		u64_stats_init(&priv->tx[i].statss);
8185e8c5adfSBailey Forrest 
819*f13697ccSShailend Chand 	/* Init stats for XDP TX queues */
820*f13697ccSShailend Chand 	init_xdp_sync_stats(priv);
821*f13697ccSShailend Chand 
822*f13697ccSShailend Chand 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
8235e8c5adfSBailey Forrest 		u64_stats_init(&priv->rx[i].statss);
824*f13697ccSShailend Chand }
825*f13697ccSShailend Chand 
826*f13697ccSShailend Chand static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
827*f13697ccSShailend Chand 				      struct gve_tx_alloc_rings_cfg *cfg)
828*f13697ccSShailend Chand {
829*f13697ccSShailend Chand 	cfg->qcfg = &priv->tx_cfg;
830*f13697ccSShailend Chand 	cfg->raw_addressing = !gve_is_qpl(priv);
831*f13697ccSShailend Chand 	cfg->qpls = priv->qpls;
832*f13697ccSShailend Chand 	cfg->qpl_cfg = &priv->qpl_cfg;
833*f13697ccSShailend Chand 	cfg->ring_size = priv->tx_desc_cnt;
834*f13697ccSShailend Chand 	cfg->start_idx = 0;
835*f13697ccSShailend Chand 	cfg->num_rings = gve_num_tx_queues(priv);
836*f13697ccSShailend Chand 	cfg->tx = priv->tx;
837*f13697ccSShailend Chand }
838*f13697ccSShailend Chand 
839*f13697ccSShailend Chand static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
840*f13697ccSShailend Chand {
841*f13697ccSShailend Chand 	int i;
842*f13697ccSShailend Chand 
843*f13697ccSShailend Chand 	if (!priv->tx)
844*f13697ccSShailend Chand 		return;
845*f13697ccSShailend Chand 
846*f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_rings; i++) {
847*f13697ccSShailend Chand 		if (gve_is_gqi(priv))
848*f13697ccSShailend Chand 			gve_tx_stop_ring_gqi(priv, i);
849*f13697ccSShailend Chand 		else
850*f13697ccSShailend Chand 			gve_tx_stop_ring_dqo(priv, i);
8515e8c5adfSBailey Forrest 	}
8525e8c5adfSBailey Forrest }
8535e8c5adfSBailey Forrest 
854*f13697ccSShailend Chand static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
855*f13697ccSShailend Chand 			       int num_rings)
8569c1a59a2SBailey Forrest {
857*f13697ccSShailend Chand 	int i;
858*f13697ccSShailend Chand 
859*f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_rings; i++) {
860*f13697ccSShailend Chand 		if (gve_is_gqi(priv))
861*f13697ccSShailend Chand 			gve_tx_start_ring_gqi(priv, i);
862*f13697ccSShailend Chand 		else
863*f13697ccSShailend Chand 			gve_tx_start_ring_dqo(priv, i);
8649c1a59a2SBailey Forrest 	}
8659c1a59a2SBailey Forrest }
8669c1a59a2SBailey Forrest 
86775eaae15SPraveen Kaligineedi static int gve_alloc_xdp_rings(struct gve_priv *priv)
86875eaae15SPraveen Kaligineedi {
869*f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg cfg = {0};
87075eaae15SPraveen Kaligineedi 	int err = 0;
87175eaae15SPraveen Kaligineedi 
87275eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
87375eaae15SPraveen Kaligineedi 		return 0;
87475eaae15SPraveen Kaligineedi 
875*f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
876*f13697ccSShailend Chand 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
877*f13697ccSShailend Chand 	cfg.num_rings = priv->num_xdp_queues;
878*f13697ccSShailend Chand 
879*f13697ccSShailend Chand 	err = gve_tx_alloc_rings_gqi(priv, &cfg);
88075eaae15SPraveen Kaligineedi 	if (err)
88175eaae15SPraveen Kaligineedi 		return err;
882*f13697ccSShailend Chand 
883*f13697ccSShailend Chand 	gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
884*f13697ccSShailend Chand 	init_xdp_sync_stats(priv);
88575eaae15SPraveen Kaligineedi 
88675eaae15SPraveen Kaligineedi 	return 0;
88775eaae15SPraveen Kaligineedi }
88875eaae15SPraveen Kaligineedi 
889*f13697ccSShailend Chand static int gve_alloc_rings(struct gve_priv *priv,
890*f13697ccSShailend Chand 			   struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
891*f13697ccSShailend Chand 			   struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
892f5cedc84SCatherine Sullivan {
893f5cedc84SCatherine Sullivan 	int err;
894f5cedc84SCatherine Sullivan 
895*f13697ccSShailend Chand 	if (gve_is_gqi(priv))
896*f13697ccSShailend Chand 		err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
897*f13697ccSShailend Chand 	else
898*f13697ccSShailend Chand 		err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
899*f13697ccSShailend Chand 	if (err)
900*f13697ccSShailend Chand 		return err;
9019c1a59a2SBailey Forrest 
9029c1a59a2SBailey Forrest 	if (gve_is_gqi(priv))
903*f13697ccSShailend Chand 		err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
9049c1a59a2SBailey Forrest 	else
905*f13697ccSShailend Chand 		err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
906f5cedc84SCatherine Sullivan 	if (err)
907f5cedc84SCatherine Sullivan 		goto free_tx;
9089c1a59a2SBailey Forrest 
909f5cedc84SCatherine Sullivan 	return 0;
910f5cedc84SCatherine Sullivan 
911f5cedc84SCatherine Sullivan free_tx:
912*f13697ccSShailend Chand 	if (gve_is_gqi(priv))
913*f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
914*f13697ccSShailend Chand 	else
915*f13697ccSShailend Chand 		gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
916f5cedc84SCatherine Sullivan 	return err;
917f5cedc84SCatherine Sullivan }
918f5cedc84SCatherine Sullivan 
91975eaae15SPraveen Kaligineedi static int gve_destroy_xdp_rings(struct gve_priv *priv)
92075eaae15SPraveen Kaligineedi {
92175eaae15SPraveen Kaligineedi 	int start_id;
92275eaae15SPraveen Kaligineedi 	int err;
92375eaae15SPraveen Kaligineedi 
92475eaae15SPraveen Kaligineedi 	start_id = gve_xdp_tx_start_queue_id(priv);
92575eaae15SPraveen Kaligineedi 	err = gve_adminq_destroy_tx_queues(priv,
92675eaae15SPraveen Kaligineedi 					   start_id,
92775eaae15SPraveen Kaligineedi 					   priv->num_xdp_queues);
92875eaae15SPraveen Kaligineedi 	if (err) {
92975eaae15SPraveen Kaligineedi 		netif_err(priv, drv, priv->dev,
93075eaae15SPraveen Kaligineedi 			  "failed to destroy XDP queues\n");
93175eaae15SPraveen Kaligineedi 		/* This failure will trigger a reset - no need to clean up */
93275eaae15SPraveen Kaligineedi 		return err;
93375eaae15SPraveen Kaligineedi 	}
93475eaae15SPraveen Kaligineedi 	netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
93575eaae15SPraveen Kaligineedi 
93675eaae15SPraveen Kaligineedi 	return 0;
93775eaae15SPraveen Kaligineedi }
93875eaae15SPraveen Kaligineedi 
939f5cedc84SCatherine Sullivan static int gve_destroy_rings(struct gve_priv *priv)
940f5cedc84SCatherine Sullivan {
9412e80aeaeSPraveen Kaligineedi 	int num_tx_queues = gve_num_tx_queues(priv);
942f5cedc84SCatherine Sullivan 	int err;
943f5cedc84SCatherine Sullivan 
9447fc2bf78SPraveen Kaligineedi 	err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
945f5cedc84SCatherine Sullivan 	if (err) {
946f5cedc84SCatherine Sullivan 		netif_err(priv, drv, priv->dev,
9475cdad90dSSagi Shahar 			  "failed to destroy tx queues\n");
9485cdad90dSSagi Shahar 		/* This failure will trigger a reset - no need to clean up */
949f5cedc84SCatherine Sullivan 		return err;
950f5cedc84SCatherine Sullivan 	}
9515cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
9525cdad90dSSagi Shahar 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
953f5cedc84SCatherine Sullivan 	if (err) {
954f5cedc84SCatherine Sullivan 		netif_err(priv, drv, priv->dev,
9555cdad90dSSagi Shahar 			  "failed to destroy rx queues\n");
9565cdad90dSSagi Shahar 		/* This failure will trigger a reset - no need to clean up */
957f5cedc84SCatherine Sullivan 		return err;
958f5cedc84SCatherine Sullivan 	}
9595cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
960f5cedc84SCatherine Sullivan 	return 0;
961f5cedc84SCatherine Sullivan }
962f5cedc84SCatherine Sullivan 
96375eaae15SPraveen Kaligineedi static void gve_free_xdp_rings(struct gve_priv *priv)
96475eaae15SPraveen Kaligineedi {
965*f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg cfg = {0};
96675eaae15SPraveen Kaligineedi 
967*f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
968*f13697ccSShailend Chand 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
969*f13697ccSShailend Chand 	cfg.num_rings = priv->num_xdp_queues;
970*f13697ccSShailend Chand 
97175eaae15SPraveen Kaligineedi 	if (priv->tx) {
972*f13697ccSShailend Chand 		gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
973*f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, &cfg);
97475eaae15SPraveen Kaligineedi 	}
97575eaae15SPraveen Kaligineedi }
97675eaae15SPraveen Kaligineedi 
977*f13697ccSShailend Chand static void gve_free_rings(struct gve_priv *priv,
978*f13697ccSShailend Chand 			   struct gve_tx_alloc_rings_cfg *tx_cfg,
979*f13697ccSShailend Chand 			   struct gve_rx_alloc_rings_cfg *rx_cfg)
980f5cedc84SCatherine Sullivan {
981*f13697ccSShailend Chand 	if (gve_is_gqi(priv)) {
982*f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, tx_cfg);
983*f13697ccSShailend Chand 		gve_rx_free_rings_gqi(priv, rx_cfg);
984*f13697ccSShailend Chand 	} else {
985*f13697ccSShailend Chand 		gve_tx_free_rings_dqo(priv, tx_cfg);
986*f13697ccSShailend Chand 		gve_rx_free_rings_dqo(priv, rx_cfg);
987f5cedc84SCatherine Sullivan 	}
988f5cedc84SCatherine Sullivan }
989f5cedc84SCatherine Sullivan 
990433e274bSKuo Zhao int gve_alloc_page(struct gve_priv *priv, struct device *dev,
991433e274bSKuo Zhao 		   struct page **page, dma_addr_t *dma,
992a92f7a6fSCatherine Sullivan 		   enum dma_data_direction dir, gfp_t gfp_flags)
993f5cedc84SCatherine Sullivan {
994a92f7a6fSCatherine Sullivan 	*page = alloc_page(gfp_flags);
995433e274bSKuo Zhao 	if (!*page) {
996433e274bSKuo Zhao 		priv->page_alloc_fail++;
997f5cedc84SCatherine Sullivan 		return -ENOMEM;
998433e274bSKuo Zhao 	}
999f5cedc84SCatherine Sullivan 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
1000f5cedc84SCatherine Sullivan 	if (dma_mapping_error(dev, *dma)) {
1001433e274bSKuo Zhao 		priv->dma_mapping_error++;
1002f5cedc84SCatherine Sullivan 		put_page(*page);
1003f5cedc84SCatherine Sullivan 		return -ENOMEM;
1004f5cedc84SCatherine Sullivan 	}
1005f5cedc84SCatherine Sullivan 	return 0;
1006f5cedc84SCatherine Sullivan }
1007f5cedc84SCatherine Sullivan 
1008*f13697ccSShailend Chand static int gve_alloc_queue_page_list(struct gve_priv *priv,
1009*f13697ccSShailend Chand 				     struct gve_queue_page_list *qpl,
1010*f13697ccSShailend Chand 				     u32 id, int pages)
1011f5cedc84SCatherine Sullivan {
1012f5cedc84SCatherine Sullivan 	int err;
1013f5cedc84SCatherine Sullivan 	int i;
1014f5cedc84SCatherine Sullivan 
1015f5cedc84SCatherine Sullivan 	qpl->id = id;
1016a95069ecSJeroen de Borst 	qpl->num_entries = 0;
10177fec4d39SGustavo A. R. Silva 	qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
1018f5cedc84SCatherine Sullivan 	/* caller handles clean up */
1019f5cedc84SCatherine Sullivan 	if (!qpl->pages)
1020f5cedc84SCatherine Sullivan 		return -ENOMEM;
10217fec4d39SGustavo A. R. Silva 	qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
1022f5cedc84SCatherine Sullivan 	/* caller handles clean up */
1023f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
1024f5cedc84SCatherine Sullivan 		return -ENOMEM;
1025f5cedc84SCatherine Sullivan 
1026f5cedc84SCatherine Sullivan 	for (i = 0; i < pages; i++) {
1027433e274bSKuo Zhao 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1028f5cedc84SCatherine Sullivan 				     &qpl->page_buses[i],
1029a92f7a6fSCatherine Sullivan 				     gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1030f5cedc84SCatherine Sullivan 		/* caller handles clean up */
1031f5cedc84SCatherine Sullivan 		if (err)
1032f5cedc84SCatherine Sullivan 			return -ENOMEM;
1033a95069ecSJeroen de Borst 		qpl->num_entries++;
1034f5cedc84SCatherine Sullivan 	}
1035f5cedc84SCatherine Sullivan 
1036f5cedc84SCatherine Sullivan 	return 0;
1037f5cedc84SCatherine Sullivan }
1038f5cedc84SCatherine Sullivan 
1039f5cedc84SCatherine Sullivan void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1040f5cedc84SCatherine Sullivan 		   enum dma_data_direction dir)
1041f5cedc84SCatherine Sullivan {
1042f5cedc84SCatherine Sullivan 	if (!dma_mapping_error(dev, dma))
1043f5cedc84SCatherine Sullivan 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1044f5cedc84SCatherine Sullivan 	if (page)
1045f5cedc84SCatherine Sullivan 		put_page(page);
1046f5cedc84SCatherine Sullivan }
1047f5cedc84SCatherine Sullivan 
1048*f13697ccSShailend Chand static void gve_free_queue_page_list(struct gve_priv *priv,
1049*f13697ccSShailend Chand 				     struct gve_queue_page_list *qpl,
1050*f13697ccSShailend Chand 				     int id)
1051f5cedc84SCatherine Sullivan {
1052f5cedc84SCatherine Sullivan 	int i;
1053f5cedc84SCatherine Sullivan 
1054f5cedc84SCatherine Sullivan 	if (!qpl->pages)
1055f5cedc84SCatherine Sullivan 		return;
1056f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
1057f5cedc84SCatherine Sullivan 		goto free_pages;
1058f5cedc84SCatherine Sullivan 
1059f5cedc84SCatherine Sullivan 	for (i = 0; i < qpl->num_entries; i++)
1060f5cedc84SCatherine Sullivan 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
1061f5cedc84SCatherine Sullivan 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1062f5cedc84SCatherine Sullivan 
10638ec1e900SChuhong Yuan 	kvfree(qpl->page_buses);
10647fc2bf78SPraveen Kaligineedi 	qpl->page_buses = NULL;
1065f5cedc84SCatherine Sullivan free_pages:
10668ec1e900SChuhong Yuan 	kvfree(qpl->pages);
10677fc2bf78SPraveen Kaligineedi 	qpl->pages = NULL;
1068f5cedc84SCatherine Sullivan }
1069f5cedc84SCatherine Sullivan 
1070*f13697ccSShailend Chand static void gve_free_n_qpls(struct gve_priv *priv,
1071*f13697ccSShailend Chand 			    struct gve_queue_page_list *qpls,
1072*f13697ccSShailend Chand 			    int start_id,
1073*f13697ccSShailend Chand 			    int num_qpls)
107475eaae15SPraveen Kaligineedi {
1075*f13697ccSShailend Chand 	int i;
107675eaae15SPraveen Kaligineedi 
1077*f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_qpls; i++)
1078*f13697ccSShailend Chand 		gve_free_queue_page_list(priv, &qpls[i], i);
1079*f13697ccSShailend Chand }
1080*f13697ccSShailend Chand 
1081*f13697ccSShailend Chand static int gve_alloc_n_qpls(struct gve_priv *priv,
1082*f13697ccSShailend Chand 			    struct gve_queue_page_list *qpls,
1083*f13697ccSShailend Chand 			    int page_count,
1084*f13697ccSShailend Chand 			    int start_id,
1085*f13697ccSShailend Chand 			    int num_qpls)
1086*f13697ccSShailend Chand {
1087*f13697ccSShailend Chand 	int err;
1088*f13697ccSShailend Chand 	int i;
1089*f13697ccSShailend Chand 
1090*f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_qpls; i++) {
1091*f13697ccSShailend Chand 		err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
109275eaae15SPraveen Kaligineedi 		if (err)
109375eaae15SPraveen Kaligineedi 			goto free_qpls;
109475eaae15SPraveen Kaligineedi 	}
109575eaae15SPraveen Kaligineedi 
109675eaae15SPraveen Kaligineedi 	return 0;
109775eaae15SPraveen Kaligineedi 
109875eaae15SPraveen Kaligineedi free_qpls:
1099*f13697ccSShailend Chand 	/* Must include the failing QPL too for gve_alloc_queue_page_list fails
1100*f13697ccSShailend Chand 	 * without cleaning up.
1101*f13697ccSShailend Chand 	 */
1102*f13697ccSShailend Chand 	gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
110375eaae15SPraveen Kaligineedi 	return err;
110475eaae15SPraveen Kaligineedi }
110575eaae15SPraveen Kaligineedi 
1106*f13697ccSShailend Chand static int gve_alloc_qpls(struct gve_priv *priv,
1107*f13697ccSShailend Chand 			  struct gve_qpls_alloc_cfg *cfg)
1108f5cedc84SCatherine Sullivan {
1109*f13697ccSShailend Chand 	int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
1110*f13697ccSShailend Chand 	int rx_start_id, tx_num_qpls, rx_num_qpls;
1111*f13697ccSShailend Chand 	struct gve_queue_page_list *qpls;
111266ce8e6bSRushil Gupta 	int page_count;
1113f5cedc84SCatherine Sullivan 	int err;
1114f5cedc84SCatherine Sullivan 
1115*f13697ccSShailend Chand 	if (cfg->raw_addressing)
11164944db80SCatherine Sullivan 		return 0;
11174944db80SCatherine Sullivan 
1118*f13697ccSShailend Chand 	qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
1119*f13697ccSShailend Chand 	if (!qpls)
1120f5cedc84SCatherine Sullivan 		return -ENOMEM;
1121f5cedc84SCatherine Sullivan 
1122*f13697ccSShailend Chand 	cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
1123*f13697ccSShailend Chand 		sizeof(unsigned long) * BITS_PER_BYTE;
1124*f13697ccSShailend Chand 	cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
1125*f13697ccSShailend Chand 					    sizeof(unsigned long), GFP_KERNEL);
1126*f13697ccSShailend Chand 	if (!cfg->qpl_cfg->qpl_id_map) {
1127*f13697ccSShailend Chand 		err = -ENOMEM;
1128*f13697ccSShailend Chand 		goto free_qpl_array;
1129f5cedc84SCatherine Sullivan 	}
11307fc2bf78SPraveen Kaligineedi 
1131*f13697ccSShailend Chand 	/* Allocate TX QPLs */
1132*f13697ccSShailend Chand 	page_count = priv->tx_pages_per_qpl;
1133*f13697ccSShailend Chand 	tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
1134*f13697ccSShailend Chand 				      gve_is_qpl(priv));
1135*f13697ccSShailend Chand 	err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
1136*f13697ccSShailend Chand 	if (err)
1137*f13697ccSShailend Chand 		goto free_qpl_map;
113866ce8e6bSRushil Gupta 
1139*f13697ccSShailend Chand 	/* Allocate RX QPLs */
1140*f13697ccSShailend Chand 	rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
114166ce8e6bSRushil Gupta 	/* For GQI_QPL number of pages allocated have 1:1 relationship with
114266ce8e6bSRushil Gupta 	 * number of descriptors. For DQO, number of pages required are
114366ce8e6bSRushil Gupta 	 * more than descriptors (because of out of order completions).
114466ce8e6bSRushil Gupta 	 */
1145*f13697ccSShailend Chand 	page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
1146*f13697ccSShailend Chand 	rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
1147*f13697ccSShailend Chand 	err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
1148f5cedc84SCatherine Sullivan 	if (err)
1149*f13697ccSShailend Chand 		goto free_tx_qpls;
1150f5cedc84SCatherine Sullivan 
1151*f13697ccSShailend Chand 	cfg->qpls = qpls;
1152f5cedc84SCatherine Sullivan 	return 0;
1153f5cedc84SCatherine Sullivan 
1154*f13697ccSShailend Chand free_tx_qpls:
1155*f13697ccSShailend Chand 	gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
1156*f13697ccSShailend Chand free_qpl_map:
1157*f13697ccSShailend Chand 	kvfree(cfg->qpl_cfg->qpl_id_map);
1158*f13697ccSShailend Chand 	cfg->qpl_cfg->qpl_id_map = NULL;
1159*f13697ccSShailend Chand free_qpl_array:
1160*f13697ccSShailend Chand 	kvfree(qpls);
1161f5cedc84SCatherine Sullivan 	return err;
1162f5cedc84SCatherine Sullivan }
1163f5cedc84SCatherine Sullivan 
1164*f13697ccSShailend Chand static void gve_free_qpls(struct gve_priv *priv,
1165*f13697ccSShailend Chand 			  struct gve_qpls_alloc_cfg *cfg)
116675eaae15SPraveen Kaligineedi {
1167*f13697ccSShailend Chand 	int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
1168*f13697ccSShailend Chand 	struct gve_queue_page_list *qpls = cfg->qpls;
116975eaae15SPraveen Kaligineedi 	int i;
117075eaae15SPraveen Kaligineedi 
1171*f13697ccSShailend Chand 	if (!qpls)
11724944db80SCatherine Sullivan 		return;
11734944db80SCatherine Sullivan 
1174*f13697ccSShailend Chand 	kvfree(cfg->qpl_cfg->qpl_id_map);
1175*f13697ccSShailend Chand 	cfg->qpl_cfg->qpl_id_map = NULL;
1176f5cedc84SCatherine Sullivan 
11777fc2bf78SPraveen Kaligineedi 	for (i = 0; i < max_queues; i++)
1178*f13697ccSShailend Chand 		gve_free_queue_page_list(priv, &qpls[i], i);
1179f5cedc84SCatherine Sullivan 
1180*f13697ccSShailend Chand 	kvfree(qpls);
1181*f13697ccSShailend Chand 	cfg->qpls = NULL;
1182f5cedc84SCatherine Sullivan }
1183f5cedc84SCatherine Sullivan 
11849e5f7d26SCatherine Sullivan /* Use this to schedule a reset when the device is capable of continuing
11859e5f7d26SCatherine Sullivan  * to handle other requests in its current state. If it is not, do a reset
11869e5f7d26SCatherine Sullivan  * in thread instead.
11879e5f7d26SCatherine Sullivan  */
11889e5f7d26SCatherine Sullivan void gve_schedule_reset(struct gve_priv *priv)
11899e5f7d26SCatherine Sullivan {
11909e5f7d26SCatherine Sullivan 	gve_set_do_reset(priv);
11919e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
11929e5f7d26SCatherine Sullivan }
11939e5f7d26SCatherine Sullivan 
11949e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
11959e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1196f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv);
1197f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv);
1198f5cedc84SCatherine Sullivan 
119975eaae15SPraveen Kaligineedi static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
120075eaae15SPraveen Kaligineedi {
120175eaae15SPraveen Kaligineedi 	struct napi_struct *napi;
120275eaae15SPraveen Kaligineedi 	struct gve_rx_ring *rx;
120375eaae15SPraveen Kaligineedi 	int err = 0;
120475eaae15SPraveen Kaligineedi 	int i, j;
1205fd8e4032SPraveen Kaligineedi 	u32 tx_qid;
120675eaae15SPraveen Kaligineedi 
120775eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
120875eaae15SPraveen Kaligineedi 		return 0;
120975eaae15SPraveen Kaligineedi 
121075eaae15SPraveen Kaligineedi 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
121175eaae15SPraveen Kaligineedi 		rx = &priv->rx[i];
121275eaae15SPraveen Kaligineedi 		napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
121375eaae15SPraveen Kaligineedi 
121475eaae15SPraveen Kaligineedi 		err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
121575eaae15SPraveen Kaligineedi 				       napi->napi_id);
121675eaae15SPraveen Kaligineedi 		if (err)
121775eaae15SPraveen Kaligineedi 			goto err;
121875eaae15SPraveen Kaligineedi 		err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
121975eaae15SPraveen Kaligineedi 						 MEM_TYPE_PAGE_SHARED, NULL);
122075eaae15SPraveen Kaligineedi 		if (err)
122175eaae15SPraveen Kaligineedi 			goto err;
1222fd8e4032SPraveen Kaligineedi 		rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
1223fd8e4032SPraveen Kaligineedi 		if (rx->xsk_pool) {
1224fd8e4032SPraveen Kaligineedi 			err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
1225fd8e4032SPraveen Kaligineedi 					       napi->napi_id);
1226fd8e4032SPraveen Kaligineedi 			if (err)
1227fd8e4032SPraveen Kaligineedi 				goto err;
1228fd8e4032SPraveen Kaligineedi 			err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1229fd8e4032SPraveen Kaligineedi 							 MEM_TYPE_XSK_BUFF_POOL, NULL);
1230fd8e4032SPraveen Kaligineedi 			if (err)
1231fd8e4032SPraveen Kaligineedi 				goto err;
1232fd8e4032SPraveen Kaligineedi 			xsk_pool_set_rxq_info(rx->xsk_pool,
1233fd8e4032SPraveen Kaligineedi 					      &rx->xsk_rxq);
1234fd8e4032SPraveen Kaligineedi 		}
1235fd8e4032SPraveen Kaligineedi 	}
1236fd8e4032SPraveen Kaligineedi 
1237fd8e4032SPraveen Kaligineedi 	for (i = 0; i < priv->num_xdp_queues; i++) {
1238fd8e4032SPraveen Kaligineedi 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1239fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
124075eaae15SPraveen Kaligineedi 	}
124175eaae15SPraveen Kaligineedi 	return 0;
124275eaae15SPraveen Kaligineedi 
124375eaae15SPraveen Kaligineedi err:
124475eaae15SPraveen Kaligineedi 	for (j = i; j >= 0; j--) {
124575eaae15SPraveen Kaligineedi 		rx = &priv->rx[j];
124675eaae15SPraveen Kaligineedi 		if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
124775eaae15SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xdp_rxq);
1248fd8e4032SPraveen Kaligineedi 		if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1249fd8e4032SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xsk_rxq);
125075eaae15SPraveen Kaligineedi 	}
125175eaae15SPraveen Kaligineedi 	return err;
125275eaae15SPraveen Kaligineedi }
125375eaae15SPraveen Kaligineedi 
125475eaae15SPraveen Kaligineedi static void gve_unreg_xdp_info(struct gve_priv *priv)
125575eaae15SPraveen Kaligineedi {
1256fd8e4032SPraveen Kaligineedi 	int i, tx_qid;
125775eaae15SPraveen Kaligineedi 
125875eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
125975eaae15SPraveen Kaligineedi 		return;
126075eaae15SPraveen Kaligineedi 
126175eaae15SPraveen Kaligineedi 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
126275eaae15SPraveen Kaligineedi 		struct gve_rx_ring *rx = &priv->rx[i];
126375eaae15SPraveen Kaligineedi 
126475eaae15SPraveen Kaligineedi 		xdp_rxq_info_unreg(&rx->xdp_rxq);
1265fd8e4032SPraveen Kaligineedi 		if (rx->xsk_pool) {
1266fd8e4032SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xsk_rxq);
1267fd8e4032SPraveen Kaligineedi 			rx->xsk_pool = NULL;
1268fd8e4032SPraveen Kaligineedi 		}
1269fd8e4032SPraveen Kaligineedi 	}
1270fd8e4032SPraveen Kaligineedi 
1271fd8e4032SPraveen Kaligineedi 	for (i = 0; i < priv->num_xdp_queues; i++) {
1272fd8e4032SPraveen Kaligineedi 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1273fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = NULL;
127475eaae15SPraveen Kaligineedi 	}
127575eaae15SPraveen Kaligineedi }
127675eaae15SPraveen Kaligineedi 
127739a7f4aaSPraveen Kaligineedi static void gve_drain_page_cache(struct gve_priv *priv)
127839a7f4aaSPraveen Kaligineedi {
127939a7f4aaSPraveen Kaligineedi 	struct page_frag_cache *nc;
128039a7f4aaSPraveen Kaligineedi 	int i;
128139a7f4aaSPraveen Kaligineedi 
128239a7f4aaSPraveen Kaligineedi 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
128339a7f4aaSPraveen Kaligineedi 		nc = &priv->rx[i].page_cache;
128439a7f4aaSPraveen Kaligineedi 		if (nc->va) {
128539a7f4aaSPraveen Kaligineedi 			__page_frag_cache_drain(virt_to_page(nc->va),
128639a7f4aaSPraveen Kaligineedi 						nc->pagecnt_bias);
128739a7f4aaSPraveen Kaligineedi 			nc->va = NULL;
128839a7f4aaSPraveen Kaligineedi 		}
128939a7f4aaSPraveen Kaligineedi 	}
129039a7f4aaSPraveen Kaligineedi }
129139a7f4aaSPraveen Kaligineedi 
1292*f13697ccSShailend Chand static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
1293*f13697ccSShailend Chand 					struct gve_qpls_alloc_cfg *cfg)
1294*f13697ccSShailend Chand {
1295*f13697ccSShailend Chand 	  cfg->raw_addressing = !gve_is_qpl(priv);
1296*f13697ccSShailend Chand 	  cfg->is_gqi = gve_is_gqi(priv);
1297*f13697ccSShailend Chand 	  cfg->num_xdp_queues = priv->num_xdp_queues;
1298*f13697ccSShailend Chand 	  cfg->qpl_cfg = &priv->qpl_cfg;
1299*f13697ccSShailend Chand 	  cfg->tx_cfg = &priv->tx_cfg;
1300*f13697ccSShailend Chand 	  cfg->rx_cfg = &priv->rx_cfg;
1301*f13697ccSShailend Chand 	  cfg->qpls = priv->qpls;
1302*f13697ccSShailend Chand }
1303*f13697ccSShailend Chand 
1304*f13697ccSShailend Chand static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1305*f13697ccSShailend Chand 				      struct gve_rx_alloc_rings_cfg *cfg)
1306*f13697ccSShailend Chand {
1307*f13697ccSShailend Chand 	cfg->qcfg = &priv->rx_cfg;
1308*f13697ccSShailend Chand 	cfg->qcfg_tx = &priv->tx_cfg;
1309*f13697ccSShailend Chand 	cfg->raw_addressing = !gve_is_qpl(priv);
1310*f13697ccSShailend Chand 	cfg->qpls = priv->qpls;
1311*f13697ccSShailend Chand 	cfg->qpl_cfg = &priv->qpl_cfg;
1312*f13697ccSShailend Chand 	cfg->ring_size = priv->rx_desc_cnt;
1313*f13697ccSShailend Chand 	cfg->rx = priv->rx;
1314*f13697ccSShailend Chand }
1315*f13697ccSShailend Chand 
1316*f13697ccSShailend Chand static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1317*f13697ccSShailend Chand 				    struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1318*f13697ccSShailend Chand 				    struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1319*f13697ccSShailend Chand 				    struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1320*f13697ccSShailend Chand {
1321*f13697ccSShailend Chand 	gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
1322*f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1323*f13697ccSShailend Chand 	gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1324*f13697ccSShailend Chand }
1325*f13697ccSShailend Chand 
1326*f13697ccSShailend Chand static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1327*f13697ccSShailend Chand {
1328*f13697ccSShailend Chand 	int i;
1329*f13697ccSShailend Chand 
1330*f13697ccSShailend Chand 	for (i = 0; i < num_rings; i++) {
1331*f13697ccSShailend Chand 		if (gve_is_gqi(priv))
1332*f13697ccSShailend Chand 			gve_rx_start_ring_gqi(priv, i);
1333*f13697ccSShailend Chand 		else
1334*f13697ccSShailend Chand 			gve_rx_start_ring_dqo(priv, i);
1335*f13697ccSShailend Chand 	}
1336*f13697ccSShailend Chand }
1337*f13697ccSShailend Chand 
1338*f13697ccSShailend Chand static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1339*f13697ccSShailend Chand {
1340*f13697ccSShailend Chand 	int i;
1341*f13697ccSShailend Chand 
1342*f13697ccSShailend Chand 	if (!priv->rx)
1343*f13697ccSShailend Chand 		return;
1344*f13697ccSShailend Chand 
1345*f13697ccSShailend Chand 	for (i = 0; i < num_rings; i++) {
1346*f13697ccSShailend Chand 		if (gve_is_gqi(priv))
1347*f13697ccSShailend Chand 			gve_rx_stop_ring_gqi(priv, i);
1348*f13697ccSShailend Chand 		else
1349*f13697ccSShailend Chand 			gve_rx_stop_ring_dqo(priv, i);
1350*f13697ccSShailend Chand 	}
1351*f13697ccSShailend Chand }
1352*f13697ccSShailend Chand 
1353f5cedc84SCatherine Sullivan static int gve_open(struct net_device *dev)
1354f5cedc84SCatherine Sullivan {
1355*f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1356*f13697ccSShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1357*f13697ccSShailend Chand 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1358f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
1359f5cedc84SCatherine Sullivan 	int err;
1360f5cedc84SCatherine Sullivan 
136175eaae15SPraveen Kaligineedi 	if (priv->xdp_prog)
136275eaae15SPraveen Kaligineedi 		priv->num_xdp_queues = priv->rx_cfg.num_queues;
136375eaae15SPraveen Kaligineedi 	else
136475eaae15SPraveen Kaligineedi 		priv->num_xdp_queues = 0;
136575eaae15SPraveen Kaligineedi 
1366*f13697ccSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1367*f13697ccSShailend Chand 				&tx_alloc_cfg, &rx_alloc_cfg);
1368*f13697ccSShailend Chand 	err = gve_alloc_qpls(priv, &qpls_alloc_cfg);
1369f5cedc84SCatherine Sullivan 	if (err)
1370f5cedc84SCatherine Sullivan 		return err;
1371*f13697ccSShailend Chand 	priv->qpls = qpls_alloc_cfg.qpls;
1372*f13697ccSShailend Chand 	tx_alloc_cfg.qpls = priv->qpls;
1373*f13697ccSShailend Chand 	rx_alloc_cfg.qpls = priv->qpls;
1374*f13697ccSShailend Chand 	err = gve_alloc_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1375f5cedc84SCatherine Sullivan 	if (err)
1376f5cedc84SCatherine Sullivan 		goto free_qpls;
1377f5cedc84SCatherine Sullivan 
1378*f13697ccSShailend Chand 	gve_tx_start_rings(priv, 0, tx_alloc_cfg.num_rings);
1379*f13697ccSShailend Chand 	gve_rx_start_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1380*f13697ccSShailend Chand 	gve_init_sync_stats(priv);
1381*f13697ccSShailend Chand 
1382f5cedc84SCatherine Sullivan 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1383f5cedc84SCatherine Sullivan 	if (err)
1384f5cedc84SCatherine Sullivan 		goto free_rings;
1385f5cedc84SCatherine Sullivan 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1386f5cedc84SCatherine Sullivan 	if (err)
1387f5cedc84SCatherine Sullivan 		goto free_rings;
1388f5cedc84SCatherine Sullivan 
138975eaae15SPraveen Kaligineedi 	err = gve_reg_xdp_info(priv, dev);
139075eaae15SPraveen Kaligineedi 	if (err)
139175eaae15SPraveen Kaligineedi 		goto free_rings;
139275eaae15SPraveen Kaligineedi 
1393f5cedc84SCatherine Sullivan 	err = gve_register_qpls(priv);
1394f5cedc84SCatherine Sullivan 	if (err)
13959e5f7d26SCatherine Sullivan 		goto reset;
13965e8c5adfSBailey Forrest 
13975e8c5adfSBailey Forrest 	if (!gve_is_gqi(priv)) {
13985e8c5adfSBailey Forrest 		/* Hard code this for now. This may be tuned in the future for
13995e8c5adfSBailey Forrest 		 * performance.
14005e8c5adfSBailey Forrest 		 */
1401da7d4b42SJohn Fraker 		priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
14025e8c5adfSBailey Forrest 	}
1403f5cedc84SCatherine Sullivan 	err = gve_create_rings(priv);
1404f5cedc84SCatherine Sullivan 	if (err)
14059e5f7d26SCatherine Sullivan 		goto reset;
14065e8c5adfSBailey Forrest 
1407f5cedc84SCatherine Sullivan 	gve_set_device_rings_ok(priv);
1408f5cedc84SCatherine Sullivan 
140924aeb56fSKuo Zhao 	if (gve_get_report_stats(priv))
141024aeb56fSKuo Zhao 		mod_timer(&priv->stats_report_timer,
141124aeb56fSKuo Zhao 			  round_jiffies(jiffies +
141224aeb56fSKuo Zhao 				msecs_to_jiffies(priv->stats_report_timer_period)));
141324aeb56fSKuo Zhao 
1414f5cedc84SCatherine Sullivan 	gve_turnup(priv);
14153b7cc736SPatricio Noyola 	queue_work(priv->gve_wq, &priv->service_task);
1416433e274bSKuo Zhao 	priv->interface_up_cnt++;
1417f5cedc84SCatherine Sullivan 	return 0;
1418f5cedc84SCatherine Sullivan 
1419f5cedc84SCatherine Sullivan free_rings:
1420*f13697ccSShailend Chand 	gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
1421*f13697ccSShailend Chand 	gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1422*f13697ccSShailend Chand 	gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1423f5cedc84SCatherine Sullivan free_qpls:
1424*f13697ccSShailend Chand 	gve_free_qpls(priv, &qpls_alloc_cfg);
1425f5cedc84SCatherine Sullivan 	return err;
14269e5f7d26SCatherine Sullivan 
14279e5f7d26SCatherine Sullivan reset:
14289e5f7d26SCatherine Sullivan 	/* This must have been called from a reset due to the rtnl lock
14299e5f7d26SCatherine Sullivan 	 * so just return at this point.
14309e5f7d26SCatherine Sullivan 	 */
14319e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
14329e5f7d26SCatherine Sullivan 		return err;
14339e5f7d26SCatherine Sullivan 	/* Otherwise reset before returning */
14349e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
14359e5f7d26SCatherine Sullivan 	/* if this fails there is nothing we can do so just ignore the return */
14369e5f7d26SCatherine Sullivan 	gve_reset_recovery(priv, false);
14379e5f7d26SCatherine Sullivan 	/* return the original error */
14389e5f7d26SCatherine Sullivan 	return err;
1439f5cedc84SCatherine Sullivan }
1440f5cedc84SCatherine Sullivan 
1441f5cedc84SCatherine Sullivan static int gve_close(struct net_device *dev)
1442f5cedc84SCatherine Sullivan {
1443*f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1444*f13697ccSShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1445*f13697ccSShailend Chand 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1446f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
1447f5cedc84SCatherine Sullivan 	int err;
1448f5cedc84SCatherine Sullivan 
1449f5cedc84SCatherine Sullivan 	netif_carrier_off(dev);
1450f5cedc84SCatherine Sullivan 	if (gve_get_device_rings_ok(priv)) {
1451f5cedc84SCatherine Sullivan 		gve_turndown(priv);
145239a7f4aaSPraveen Kaligineedi 		gve_drain_page_cache(priv);
1453f5cedc84SCatherine Sullivan 		err = gve_destroy_rings(priv);
1454f5cedc84SCatherine Sullivan 		if (err)
14559e5f7d26SCatherine Sullivan 			goto err;
1456f5cedc84SCatherine Sullivan 		err = gve_unregister_qpls(priv);
1457f5cedc84SCatherine Sullivan 		if (err)
14589e5f7d26SCatherine Sullivan 			goto err;
1459f5cedc84SCatherine Sullivan 		gve_clear_device_rings_ok(priv);
1460f5cedc84SCatherine Sullivan 	}
146124aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
1462f5cedc84SCatherine Sullivan 
146375eaae15SPraveen Kaligineedi 	gve_unreg_xdp_info(priv);
1464*f13697ccSShailend Chand 
1465*f13697ccSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1466*f13697ccSShailend Chand 				&tx_alloc_cfg, &rx_alloc_cfg);
1467*f13697ccSShailend Chand 	gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
1468*f13697ccSShailend Chand 	gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1469*f13697ccSShailend Chand 	gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1470*f13697ccSShailend Chand 	gve_free_qpls(priv, &qpls_alloc_cfg);
1471*f13697ccSShailend Chand 
1472433e274bSKuo Zhao 	priv->interface_down_cnt++;
1473f5cedc84SCatherine Sullivan 	return 0;
14749e5f7d26SCatherine Sullivan 
14759e5f7d26SCatherine Sullivan err:
14769e5f7d26SCatherine Sullivan 	/* This must have been called from a reset due to the rtnl lock
14779e5f7d26SCatherine Sullivan 	 * so just return at this point.
14789e5f7d26SCatherine Sullivan 	 */
14799e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
14809e5f7d26SCatherine Sullivan 		return err;
14819e5f7d26SCatherine Sullivan 	/* Otherwise reset before returning */
14829e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
14839e5f7d26SCatherine Sullivan 	return gve_reset_recovery(priv, false);
1484f5cedc84SCatherine Sullivan }
1485f5cedc84SCatherine Sullivan 
148675eaae15SPraveen Kaligineedi static int gve_remove_xdp_queues(struct gve_priv *priv)
148775eaae15SPraveen Kaligineedi {
1488*f13697ccSShailend Chand 	int qpl_start_id;
148975eaae15SPraveen Kaligineedi 	int err;
149075eaae15SPraveen Kaligineedi 
1491*f13697ccSShailend Chand 	qpl_start_id = gve_xdp_tx_start_queue_id(priv);
1492*f13697ccSShailend Chand 
149375eaae15SPraveen Kaligineedi 	err = gve_destroy_xdp_rings(priv);
149475eaae15SPraveen Kaligineedi 	if (err)
149575eaae15SPraveen Kaligineedi 		return err;
149675eaae15SPraveen Kaligineedi 
149775eaae15SPraveen Kaligineedi 	err = gve_unregister_xdp_qpls(priv);
149875eaae15SPraveen Kaligineedi 	if (err)
149975eaae15SPraveen Kaligineedi 		return err;
150075eaae15SPraveen Kaligineedi 
150175eaae15SPraveen Kaligineedi 	gve_unreg_xdp_info(priv);
150275eaae15SPraveen Kaligineedi 	gve_free_xdp_rings(priv);
1503*f13697ccSShailend Chand 
1504*f13697ccSShailend Chand 	gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
150575eaae15SPraveen Kaligineedi 	priv->num_xdp_queues = 0;
150675eaae15SPraveen Kaligineedi 	return 0;
150775eaae15SPraveen Kaligineedi }
150875eaae15SPraveen Kaligineedi 
150975eaae15SPraveen Kaligineedi static int gve_add_xdp_queues(struct gve_priv *priv)
151075eaae15SPraveen Kaligineedi {
1511*f13697ccSShailend Chand 	int start_id;
151275eaae15SPraveen Kaligineedi 	int err;
151375eaae15SPraveen Kaligineedi 
1514*f13697ccSShailend Chand 	priv->num_xdp_queues = priv->rx_cfg.num_queues;
151575eaae15SPraveen Kaligineedi 
1516*f13697ccSShailend Chand 	start_id = gve_xdp_tx_start_queue_id(priv);
1517*f13697ccSShailend Chand 	err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
1518*f13697ccSShailend Chand 			       start_id, gve_num_xdp_qpls(priv));
151975eaae15SPraveen Kaligineedi 	if (err)
152075eaae15SPraveen Kaligineedi 		goto err;
152175eaae15SPraveen Kaligineedi 
152275eaae15SPraveen Kaligineedi 	err = gve_alloc_xdp_rings(priv);
152375eaae15SPraveen Kaligineedi 	if (err)
152475eaae15SPraveen Kaligineedi 		goto free_xdp_qpls;
152575eaae15SPraveen Kaligineedi 
152675eaae15SPraveen Kaligineedi 	err = gve_reg_xdp_info(priv, priv->dev);
152775eaae15SPraveen Kaligineedi 	if (err)
152875eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
152975eaae15SPraveen Kaligineedi 
153075eaae15SPraveen Kaligineedi 	err = gve_register_xdp_qpls(priv);
153175eaae15SPraveen Kaligineedi 	if (err)
153275eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
153375eaae15SPraveen Kaligineedi 
153475eaae15SPraveen Kaligineedi 	err = gve_create_xdp_rings(priv);
153575eaae15SPraveen Kaligineedi 	if (err)
153675eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
153775eaae15SPraveen Kaligineedi 
153875eaae15SPraveen Kaligineedi 	return 0;
153975eaae15SPraveen Kaligineedi 
154075eaae15SPraveen Kaligineedi free_xdp_rings:
154175eaae15SPraveen Kaligineedi 	gve_free_xdp_rings(priv);
154275eaae15SPraveen Kaligineedi free_xdp_qpls:
1543*f13697ccSShailend Chand 	gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
154475eaae15SPraveen Kaligineedi err:
154575eaae15SPraveen Kaligineedi 	priv->num_xdp_queues = 0;
154675eaae15SPraveen Kaligineedi 	return err;
154775eaae15SPraveen Kaligineedi }
154875eaae15SPraveen Kaligineedi 
154975eaae15SPraveen Kaligineedi static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
155075eaae15SPraveen Kaligineedi {
155175eaae15SPraveen Kaligineedi 	if (!gve_get_napi_enabled(priv))
155275eaae15SPraveen Kaligineedi 		return;
155375eaae15SPraveen Kaligineedi 
155475eaae15SPraveen Kaligineedi 	if (link_status == netif_carrier_ok(priv->dev))
155575eaae15SPraveen Kaligineedi 		return;
155675eaae15SPraveen Kaligineedi 
155775eaae15SPraveen Kaligineedi 	if (link_status) {
155875eaae15SPraveen Kaligineedi 		netdev_info(priv->dev, "Device link is up.\n");
155975eaae15SPraveen Kaligineedi 		netif_carrier_on(priv->dev);
156075eaae15SPraveen Kaligineedi 	} else {
156175eaae15SPraveen Kaligineedi 		netdev_info(priv->dev, "Device link is down.\n");
156275eaae15SPraveen Kaligineedi 		netif_carrier_off(priv->dev);
156375eaae15SPraveen Kaligineedi 	}
156475eaae15SPraveen Kaligineedi }
156575eaae15SPraveen Kaligineedi 
156675eaae15SPraveen Kaligineedi static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
156775eaae15SPraveen Kaligineedi 		       struct netlink_ext_ack *extack)
156875eaae15SPraveen Kaligineedi {
156975eaae15SPraveen Kaligineedi 	struct bpf_prog *old_prog;
157075eaae15SPraveen Kaligineedi 	int err = 0;
157175eaae15SPraveen Kaligineedi 	u32 status;
157275eaae15SPraveen Kaligineedi 
157375eaae15SPraveen Kaligineedi 	old_prog = READ_ONCE(priv->xdp_prog);
157475eaae15SPraveen Kaligineedi 	if (!netif_carrier_ok(priv->dev)) {
157575eaae15SPraveen Kaligineedi 		WRITE_ONCE(priv->xdp_prog, prog);
157675eaae15SPraveen Kaligineedi 		if (old_prog)
157775eaae15SPraveen Kaligineedi 			bpf_prog_put(old_prog);
157875eaae15SPraveen Kaligineedi 		return 0;
157975eaae15SPraveen Kaligineedi 	}
158075eaae15SPraveen Kaligineedi 
158175eaae15SPraveen Kaligineedi 	gve_turndown(priv);
158275eaae15SPraveen Kaligineedi 	if (!old_prog && prog) {
158375eaae15SPraveen Kaligineedi 		// Allocate XDP TX queues if an XDP program is
158475eaae15SPraveen Kaligineedi 		// being installed
158575eaae15SPraveen Kaligineedi 		err = gve_add_xdp_queues(priv);
158675eaae15SPraveen Kaligineedi 		if (err)
158775eaae15SPraveen Kaligineedi 			goto out;
158875eaae15SPraveen Kaligineedi 	} else if (old_prog && !prog) {
158975eaae15SPraveen Kaligineedi 		// Remove XDP TX queues if an XDP program is
159075eaae15SPraveen Kaligineedi 		// being uninstalled
159175eaae15SPraveen Kaligineedi 		err = gve_remove_xdp_queues(priv);
159275eaae15SPraveen Kaligineedi 		if (err)
159375eaae15SPraveen Kaligineedi 			goto out;
159475eaae15SPraveen Kaligineedi 	}
159575eaae15SPraveen Kaligineedi 	WRITE_ONCE(priv->xdp_prog, prog);
159675eaae15SPraveen Kaligineedi 	if (old_prog)
159775eaae15SPraveen Kaligineedi 		bpf_prog_put(old_prog);
159875eaae15SPraveen Kaligineedi 
159975eaae15SPraveen Kaligineedi out:
160075eaae15SPraveen Kaligineedi 	gve_turnup(priv);
160175eaae15SPraveen Kaligineedi 	status = ioread32be(&priv->reg_bar0->device_status);
160275eaae15SPraveen Kaligineedi 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
160375eaae15SPraveen Kaligineedi 	return err;
160475eaae15SPraveen Kaligineedi }
160575eaae15SPraveen Kaligineedi 
1606fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_enable(struct net_device *dev,
1607fd8e4032SPraveen Kaligineedi 			       struct xsk_buff_pool *pool,
1608fd8e4032SPraveen Kaligineedi 			       u16 qid)
1609fd8e4032SPraveen Kaligineedi {
1610fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1611fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi;
1612fd8e4032SPraveen Kaligineedi 	struct gve_rx_ring *rx;
1613fd8e4032SPraveen Kaligineedi 	int tx_qid;
1614fd8e4032SPraveen Kaligineedi 	int err;
1615fd8e4032SPraveen Kaligineedi 
1616fd8e4032SPraveen Kaligineedi 	if (qid >= priv->rx_cfg.num_queues) {
1617fd8e4032SPraveen Kaligineedi 		dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1618fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1619fd8e4032SPraveen Kaligineedi 	}
1620fd8e4032SPraveen Kaligineedi 	if (xsk_pool_get_rx_frame_size(pool) <
1621fd8e4032SPraveen Kaligineedi 	     priv->dev->max_mtu + sizeof(struct ethhdr)) {
1622fd8e4032SPraveen Kaligineedi 		dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1623fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1624fd8e4032SPraveen Kaligineedi 	}
1625fd8e4032SPraveen Kaligineedi 
1626fd8e4032SPraveen Kaligineedi 	err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1627fd8e4032SPraveen Kaligineedi 			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1628fd8e4032SPraveen Kaligineedi 	if (err)
1629fd8e4032SPraveen Kaligineedi 		return err;
1630fd8e4032SPraveen Kaligineedi 
1631fd8e4032SPraveen Kaligineedi 	/* If XDP prog is not installed, return */
1632fd8e4032SPraveen Kaligineedi 	if (!priv->xdp_prog)
1633fd8e4032SPraveen Kaligineedi 		return 0;
1634fd8e4032SPraveen Kaligineedi 
1635fd8e4032SPraveen Kaligineedi 	rx = &priv->rx[qid];
1636fd8e4032SPraveen Kaligineedi 	napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1637fd8e4032SPraveen Kaligineedi 	err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
1638fd8e4032SPraveen Kaligineedi 	if (err)
1639fd8e4032SPraveen Kaligineedi 		goto err;
1640fd8e4032SPraveen Kaligineedi 
1641fd8e4032SPraveen Kaligineedi 	err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1642fd8e4032SPraveen Kaligineedi 					 MEM_TYPE_XSK_BUFF_POOL, NULL);
1643fd8e4032SPraveen Kaligineedi 	if (err)
1644fd8e4032SPraveen Kaligineedi 		goto err;
1645fd8e4032SPraveen Kaligineedi 
1646fd8e4032SPraveen Kaligineedi 	xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
1647fd8e4032SPraveen Kaligineedi 	rx->xsk_pool = pool;
1648fd8e4032SPraveen Kaligineedi 
1649fd8e4032SPraveen Kaligineedi 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1650fd8e4032SPraveen Kaligineedi 	priv->tx[tx_qid].xsk_pool = pool;
1651fd8e4032SPraveen Kaligineedi 
1652fd8e4032SPraveen Kaligineedi 	return 0;
1653fd8e4032SPraveen Kaligineedi err:
1654fd8e4032SPraveen Kaligineedi 	if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1655fd8e4032SPraveen Kaligineedi 		xdp_rxq_info_unreg(&rx->xsk_rxq);
1656fd8e4032SPraveen Kaligineedi 
1657fd8e4032SPraveen Kaligineedi 	xsk_pool_dma_unmap(pool,
1658fd8e4032SPraveen Kaligineedi 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1659fd8e4032SPraveen Kaligineedi 	return err;
1660fd8e4032SPraveen Kaligineedi }
1661fd8e4032SPraveen Kaligineedi 
1662fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_disable(struct net_device *dev,
1663fd8e4032SPraveen Kaligineedi 				u16 qid)
1664fd8e4032SPraveen Kaligineedi {
1665fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1666fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi_rx;
1667fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi_tx;
1668fd8e4032SPraveen Kaligineedi 	struct xsk_buff_pool *pool;
1669fd8e4032SPraveen Kaligineedi 	int tx_qid;
1670fd8e4032SPraveen Kaligineedi 
1671fd8e4032SPraveen Kaligineedi 	pool = xsk_get_pool_from_qid(dev, qid);
1672fd8e4032SPraveen Kaligineedi 	if (!pool)
1673fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1674fd8e4032SPraveen Kaligineedi 	if (qid >= priv->rx_cfg.num_queues)
1675fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1676fd8e4032SPraveen Kaligineedi 
1677fd8e4032SPraveen Kaligineedi 	/* If XDP prog is not installed, unmap DMA and return */
1678fd8e4032SPraveen Kaligineedi 	if (!priv->xdp_prog)
1679fd8e4032SPraveen Kaligineedi 		goto done;
1680fd8e4032SPraveen Kaligineedi 
1681fd8e4032SPraveen Kaligineedi 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1682fd8e4032SPraveen Kaligineedi 	if (!netif_running(dev)) {
1683fd8e4032SPraveen Kaligineedi 		priv->rx[qid].xsk_pool = NULL;
1684fd8e4032SPraveen Kaligineedi 		xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1685fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = NULL;
1686fd8e4032SPraveen Kaligineedi 		goto done;
1687fd8e4032SPraveen Kaligineedi 	}
1688fd8e4032SPraveen Kaligineedi 
1689fd8e4032SPraveen Kaligineedi 	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1690fd8e4032SPraveen Kaligineedi 	napi_disable(napi_rx); /* make sure current rx poll is done */
1691fd8e4032SPraveen Kaligineedi 
1692fd8e4032SPraveen Kaligineedi 	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1693fd8e4032SPraveen Kaligineedi 	napi_disable(napi_tx); /* make sure current tx poll is done */
1694fd8e4032SPraveen Kaligineedi 
1695fd8e4032SPraveen Kaligineedi 	priv->rx[qid].xsk_pool = NULL;
1696fd8e4032SPraveen Kaligineedi 	xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1697fd8e4032SPraveen Kaligineedi 	priv->tx[tx_qid].xsk_pool = NULL;
1698fd8e4032SPraveen Kaligineedi 	smp_mb(); /* Make sure it is visible to the workers on datapath */
1699fd8e4032SPraveen Kaligineedi 
1700fd8e4032SPraveen Kaligineedi 	napi_enable(napi_rx);
1701fd8e4032SPraveen Kaligineedi 	if (gve_rx_work_pending(&priv->rx[qid]))
1702fd8e4032SPraveen Kaligineedi 		napi_schedule(napi_rx);
1703fd8e4032SPraveen Kaligineedi 
1704fd8e4032SPraveen Kaligineedi 	napi_enable(napi_tx);
1705fd8e4032SPraveen Kaligineedi 	if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1706fd8e4032SPraveen Kaligineedi 		napi_schedule(napi_tx);
1707fd8e4032SPraveen Kaligineedi 
1708fd8e4032SPraveen Kaligineedi done:
1709fd8e4032SPraveen Kaligineedi 	xsk_pool_dma_unmap(pool,
1710fd8e4032SPraveen Kaligineedi 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1711fd8e4032SPraveen Kaligineedi 	return 0;
1712fd8e4032SPraveen Kaligineedi }
1713fd8e4032SPraveen Kaligineedi 
1714fd8e4032SPraveen Kaligineedi static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1715fd8e4032SPraveen Kaligineedi {
1716fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1717fd8e4032SPraveen Kaligineedi 	int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
1718fd8e4032SPraveen Kaligineedi 
1719fd8e4032SPraveen Kaligineedi 	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1720fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1721fd8e4032SPraveen Kaligineedi 
1722fd8e4032SPraveen Kaligineedi 	if (flags & XDP_WAKEUP_TX) {
1723fd8e4032SPraveen Kaligineedi 		struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
1724fd8e4032SPraveen Kaligineedi 		struct napi_struct *napi =
1725fd8e4032SPraveen Kaligineedi 			&priv->ntfy_blocks[tx->ntfy_id].napi;
1726fd8e4032SPraveen Kaligineedi 
1727fd8e4032SPraveen Kaligineedi 		if (!napi_if_scheduled_mark_missed(napi)) {
1728fd8e4032SPraveen Kaligineedi 			/* Call local_bh_enable to trigger SoftIRQ processing */
1729fd8e4032SPraveen Kaligineedi 			local_bh_disable();
1730fd8e4032SPraveen Kaligineedi 			napi_schedule(napi);
1731fd8e4032SPraveen Kaligineedi 			local_bh_enable();
1732fd8e4032SPraveen Kaligineedi 		}
1733fd8e4032SPraveen Kaligineedi 
1734fd8e4032SPraveen Kaligineedi 		tx->xdp_xsk_wakeup++;
1735fd8e4032SPraveen Kaligineedi 	}
1736fd8e4032SPraveen Kaligineedi 
1737fd8e4032SPraveen Kaligineedi 	return 0;
1738fd8e4032SPraveen Kaligineedi }
1739fd8e4032SPraveen Kaligineedi 
174075eaae15SPraveen Kaligineedi static int verify_xdp_configuration(struct net_device *dev)
174175eaae15SPraveen Kaligineedi {
174275eaae15SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
174375eaae15SPraveen Kaligineedi 
174475eaae15SPraveen Kaligineedi 	if (dev->features & NETIF_F_LRO) {
174575eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported when LRO is on.\n");
174675eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
174775eaae15SPraveen Kaligineedi 	}
174875eaae15SPraveen Kaligineedi 
174975eaae15SPraveen Kaligineedi 	if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
175075eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported in mode %d.\n",
175175eaae15SPraveen Kaligineedi 			    priv->queue_format);
175275eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
175375eaae15SPraveen Kaligineedi 	}
175475eaae15SPraveen Kaligineedi 
1755da7d4b42SJohn Fraker 	if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
175675eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported for mtu %d.\n",
175775eaae15SPraveen Kaligineedi 			    dev->mtu);
175875eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
175975eaae15SPraveen Kaligineedi 	}
176075eaae15SPraveen Kaligineedi 
176175eaae15SPraveen Kaligineedi 	if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
176275eaae15SPraveen Kaligineedi 	    (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
176375eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
176475eaae15SPraveen Kaligineedi 			    priv->rx_cfg.num_queues,
176575eaae15SPraveen Kaligineedi 			    priv->tx_cfg.num_queues,
176675eaae15SPraveen Kaligineedi 			    priv->tx_cfg.max_queues);
176775eaae15SPraveen Kaligineedi 		return -EINVAL;
176875eaae15SPraveen Kaligineedi 	}
176975eaae15SPraveen Kaligineedi 	return 0;
177075eaae15SPraveen Kaligineedi }
177175eaae15SPraveen Kaligineedi 
177275eaae15SPraveen Kaligineedi static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
177375eaae15SPraveen Kaligineedi {
177475eaae15SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
177575eaae15SPraveen Kaligineedi 	int err;
177675eaae15SPraveen Kaligineedi 
177775eaae15SPraveen Kaligineedi 	err = verify_xdp_configuration(dev);
177875eaae15SPraveen Kaligineedi 	if (err)
177975eaae15SPraveen Kaligineedi 		return err;
178075eaae15SPraveen Kaligineedi 	switch (xdp->command) {
178175eaae15SPraveen Kaligineedi 	case XDP_SETUP_PROG:
178275eaae15SPraveen Kaligineedi 		return gve_set_xdp(priv, xdp->prog, xdp->extack);
1783fd8e4032SPraveen Kaligineedi 	case XDP_SETUP_XSK_POOL:
1784fd8e4032SPraveen Kaligineedi 		if (xdp->xsk.pool)
1785fd8e4032SPraveen Kaligineedi 			return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1786fd8e4032SPraveen Kaligineedi 		else
1787fd8e4032SPraveen Kaligineedi 			return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
178875eaae15SPraveen Kaligineedi 	default:
178975eaae15SPraveen Kaligineedi 		return -EINVAL;
179075eaae15SPraveen Kaligineedi 	}
179175eaae15SPraveen Kaligineedi }
179275eaae15SPraveen Kaligineedi 
1793e5b845dcSCatherine Sullivan int gve_adjust_queues(struct gve_priv *priv,
1794e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_rx_config,
1795e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_tx_config)
1796e5b845dcSCatherine Sullivan {
1797e5b845dcSCatherine Sullivan 	int err;
1798e5b845dcSCatherine Sullivan 
1799e5b845dcSCatherine Sullivan 	if (netif_carrier_ok(priv->dev)) {
1800e5b845dcSCatherine Sullivan 		/* To make this process as simple as possible we teardown the
1801e5b845dcSCatherine Sullivan 		 * device, set the new configuration, and then bring the device
1802e5b845dcSCatherine Sullivan 		 * up again.
1803e5b845dcSCatherine Sullivan 		 */
1804e5b845dcSCatherine Sullivan 		err = gve_close(priv->dev);
1805e5b845dcSCatherine Sullivan 		/* we have already tried to reset in close,
1806e5b845dcSCatherine Sullivan 		 * just fail at this point
1807e5b845dcSCatherine Sullivan 		 */
1808e5b845dcSCatherine Sullivan 		if (err)
1809e5b845dcSCatherine Sullivan 			return err;
1810e5b845dcSCatherine Sullivan 		priv->tx_cfg = new_tx_config;
1811e5b845dcSCatherine Sullivan 		priv->rx_cfg = new_rx_config;
1812e5b845dcSCatherine Sullivan 
1813e5b845dcSCatherine Sullivan 		err = gve_open(priv->dev);
1814e5b845dcSCatherine Sullivan 		if (err)
1815e5b845dcSCatherine Sullivan 			goto err;
1816e5b845dcSCatherine Sullivan 
1817e5b845dcSCatherine Sullivan 		return 0;
1818e5b845dcSCatherine Sullivan 	}
1819e5b845dcSCatherine Sullivan 	/* Set the config for the next up. */
1820e5b845dcSCatherine Sullivan 	priv->tx_cfg = new_tx_config;
1821e5b845dcSCatherine Sullivan 	priv->rx_cfg = new_rx_config;
1822e5b845dcSCatherine Sullivan 
1823e5b845dcSCatherine Sullivan 	return 0;
1824e5b845dcSCatherine Sullivan err:
1825e5b845dcSCatherine Sullivan 	netif_err(priv, drv, priv->dev,
1826e5b845dcSCatherine Sullivan 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1827e5b845dcSCatherine Sullivan 	gve_turndown(priv);
1828e5b845dcSCatherine Sullivan 	return err;
1829e5b845dcSCatherine Sullivan }
1830e5b845dcSCatherine Sullivan 
1831f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv)
1832f5cedc84SCatherine Sullivan {
1833f5cedc84SCatherine Sullivan 	int idx;
1834f5cedc84SCatherine Sullivan 
1835f5cedc84SCatherine Sullivan 	if (netif_carrier_ok(priv->dev))
1836f5cedc84SCatherine Sullivan 		netif_carrier_off(priv->dev);
1837f5cedc84SCatherine Sullivan 
1838f5cedc84SCatherine Sullivan 	if (!gve_get_napi_enabled(priv))
1839f5cedc84SCatherine Sullivan 		return;
1840f5cedc84SCatherine Sullivan 
1841f5cedc84SCatherine Sullivan 	/* Disable napi to prevent more work from coming in */
18422e80aeaeSPraveen Kaligineedi 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1843f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1844f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1845f5cedc84SCatherine Sullivan 
1846f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
1847f5cedc84SCatherine Sullivan 	}
1848f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1849f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1850f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1851f5cedc84SCatherine Sullivan 
1852f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
1853f5cedc84SCatherine Sullivan 	}
1854f5cedc84SCatherine Sullivan 
1855f5cedc84SCatherine Sullivan 	/* Stop tx queues */
1856f5cedc84SCatherine Sullivan 	netif_tx_disable(priv->dev);
1857f5cedc84SCatherine Sullivan 
1858f5cedc84SCatherine Sullivan 	gve_clear_napi_enabled(priv);
185924aeb56fSKuo Zhao 	gve_clear_report_stats(priv);
1860f5cedc84SCatherine Sullivan }
1861f5cedc84SCatherine Sullivan 
1862f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv)
1863f5cedc84SCatherine Sullivan {
1864f5cedc84SCatherine Sullivan 	int idx;
1865f5cedc84SCatherine Sullivan 
1866f5cedc84SCatherine Sullivan 	/* Start the tx queues */
1867f5cedc84SCatherine Sullivan 	netif_tx_start_all_queues(priv->dev);
1868f5cedc84SCatherine Sullivan 
1869f5cedc84SCatherine Sullivan 	/* Enable napi and unmask interrupts for all queues */
18702e80aeaeSPraveen Kaligineedi 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1871f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1872f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1873f5cedc84SCatherine Sullivan 
1874f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
18750dcc144aSBailey Forrest 		if (gve_is_gqi(priv)) {
1876f5cedc84SCatherine Sullivan 			iowrite32be(0, gve_irq_doorbell(priv, block));
18770dcc144aSBailey Forrest 		} else {
18786081ac20STao Liu 			gve_set_itr_coalesce_usecs_dqo(priv, block,
18796081ac20STao Liu 						       priv->tx_coalesce_usecs);
18800dcc144aSBailey Forrest 		}
1881f5cedc84SCatherine Sullivan 	}
1882f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1883f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1884f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1885f5cedc84SCatherine Sullivan 
1886f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
18870dcc144aSBailey Forrest 		if (gve_is_gqi(priv)) {
1888f5cedc84SCatherine Sullivan 			iowrite32be(0, gve_irq_doorbell(priv, block));
18890dcc144aSBailey Forrest 		} else {
18906081ac20STao Liu 			gve_set_itr_coalesce_usecs_dqo(priv, block,
18916081ac20STao Liu 						       priv->rx_coalesce_usecs);
18920dcc144aSBailey Forrest 		}
1893f5cedc84SCatherine Sullivan 	}
1894f5cedc84SCatherine Sullivan 
1895f5cedc84SCatherine Sullivan 	gve_set_napi_enabled(priv);
1896f5cedc84SCatherine Sullivan }
1897f5cedc84SCatherine Sullivan 
18980290bd29SMichael S. Tsirkin static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1899f5cedc84SCatherine Sullivan {
190087a7f321SJohn Fraker 	struct gve_notify_block *block;
190187a7f321SJohn Fraker 	struct gve_tx_ring *tx = NULL;
190287a7f321SJohn Fraker 	struct gve_priv *priv;
190387a7f321SJohn Fraker 	u32 last_nic_done;
190487a7f321SJohn Fraker 	u32 current_time;
190587a7f321SJohn Fraker 	u32 ntfy_idx;
1906f5cedc84SCatherine Sullivan 
190787a7f321SJohn Fraker 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
190887a7f321SJohn Fraker 	priv = netdev_priv(dev);
190987a7f321SJohn Fraker 	if (txqueue > priv->tx_cfg.num_queues)
191087a7f321SJohn Fraker 		goto reset;
191187a7f321SJohn Fraker 
191287a7f321SJohn Fraker 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
19131c360cc1SDan Carpenter 	if (ntfy_idx >= priv->num_ntfy_blks)
191487a7f321SJohn Fraker 		goto reset;
191587a7f321SJohn Fraker 
191687a7f321SJohn Fraker 	block = &priv->ntfy_blocks[ntfy_idx];
191787a7f321SJohn Fraker 	tx = block->tx;
191887a7f321SJohn Fraker 
191987a7f321SJohn Fraker 	current_time = jiffies_to_msecs(jiffies);
192087a7f321SJohn Fraker 	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
192187a7f321SJohn Fraker 		goto reset;
192287a7f321SJohn Fraker 
192387a7f321SJohn Fraker 	/* Check to see if there are missed completions, which will allow us to
192487a7f321SJohn Fraker 	 * kick the queue.
192587a7f321SJohn Fraker 	 */
192687a7f321SJohn Fraker 	last_nic_done = gve_tx_load_event_counter(priv, tx);
192787a7f321SJohn Fraker 	if (last_nic_done - tx->done) {
192887a7f321SJohn Fraker 		netdev_info(dev, "Kicking queue %d", txqueue);
192987a7f321SJohn Fraker 		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
193087a7f321SJohn Fraker 		napi_schedule(&block->napi);
193187a7f321SJohn Fraker 		tx->last_kick_msec = current_time;
193287a7f321SJohn Fraker 		goto out;
193387a7f321SJohn Fraker 	} // Else reset.
193487a7f321SJohn Fraker 
193587a7f321SJohn Fraker reset:
19369e5f7d26SCatherine Sullivan 	gve_schedule_reset(priv);
193787a7f321SJohn Fraker 
193887a7f321SJohn Fraker out:
193987a7f321SJohn Fraker 	if (tx)
194087a7f321SJohn Fraker 		tx->queue_timeout++;
1941f5cedc84SCatherine Sullivan 	priv->tx_timeo_cnt++;
1942f5cedc84SCatherine Sullivan }
1943f5cedc84SCatherine Sullivan 
19445e8c5adfSBailey Forrest static int gve_set_features(struct net_device *netdev,
19455e8c5adfSBailey Forrest 			    netdev_features_t features)
19465e8c5adfSBailey Forrest {
19475e8c5adfSBailey Forrest 	const netdev_features_t orig_features = netdev->features;
19485e8c5adfSBailey Forrest 	struct gve_priv *priv = netdev_priv(netdev);
19495e8c5adfSBailey Forrest 	int err;
19505e8c5adfSBailey Forrest 
19515e8c5adfSBailey Forrest 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
19525e8c5adfSBailey Forrest 		netdev->features ^= NETIF_F_LRO;
19535e8c5adfSBailey Forrest 		if (netif_carrier_ok(netdev)) {
19545e8c5adfSBailey Forrest 			/* To make this process as simple as possible we
19555e8c5adfSBailey Forrest 			 * teardown the device, set the new configuration,
19565e8c5adfSBailey Forrest 			 * and then bring the device up again.
19575e8c5adfSBailey Forrest 			 */
19585e8c5adfSBailey Forrest 			err = gve_close(netdev);
19595e8c5adfSBailey Forrest 			/* We have already tried to reset in close, just fail
19605e8c5adfSBailey Forrest 			 * at this point.
19615e8c5adfSBailey Forrest 			 */
19625e8c5adfSBailey Forrest 			if (err)
19635e8c5adfSBailey Forrest 				goto err;
19645e8c5adfSBailey Forrest 
19655e8c5adfSBailey Forrest 			err = gve_open(netdev);
19665e8c5adfSBailey Forrest 			if (err)
19675e8c5adfSBailey Forrest 				goto err;
19685e8c5adfSBailey Forrest 		}
19695e8c5adfSBailey Forrest 	}
19705e8c5adfSBailey Forrest 
19715e8c5adfSBailey Forrest 	return 0;
19725e8c5adfSBailey Forrest err:
19735e8c5adfSBailey Forrest 	/* Reverts the change on error. */
19745e8c5adfSBailey Forrest 	netdev->features = orig_features;
19755e8c5adfSBailey Forrest 	netif_err(priv, drv, netdev,
19765e8c5adfSBailey Forrest 		  "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
19775e8c5adfSBailey Forrest 	return err;
19785e8c5adfSBailey Forrest }
19795e8c5adfSBailey Forrest 
1980f5cedc84SCatherine Sullivan static const struct net_device_ops gve_netdev_ops = {
19815e8c5adfSBailey Forrest 	.ndo_start_xmit		=	gve_start_xmit,
198218de1e51SEric Dumazet 	.ndo_features_check	=	gve_features_check,
1983f5cedc84SCatherine Sullivan 	.ndo_open		=	gve_open,
1984f5cedc84SCatherine Sullivan 	.ndo_stop		=	gve_close,
1985f5cedc84SCatherine Sullivan 	.ndo_get_stats64	=	gve_get_stats,
1986f5cedc84SCatherine Sullivan 	.ndo_tx_timeout         =       gve_tx_timeout,
19875e8c5adfSBailey Forrest 	.ndo_set_features	=	gve_set_features,
198875eaae15SPraveen Kaligineedi 	.ndo_bpf		=	gve_xdp,
198939a7f4aaSPraveen Kaligineedi 	.ndo_xdp_xmit		=	gve_xdp_xmit,
1990fd8e4032SPraveen Kaligineedi 	.ndo_xsk_wakeup		=	gve_xsk_wakeup,
1991f5cedc84SCatherine Sullivan };
1992f5cedc84SCatherine Sullivan 
19939e5f7d26SCatherine Sullivan static void gve_handle_status(struct gve_priv *priv, u32 status)
19949e5f7d26SCatherine Sullivan {
19959e5f7d26SCatherine Sullivan 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
19969e5f7d26SCatherine Sullivan 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
19979e5f7d26SCatherine Sullivan 		gve_set_do_reset(priv);
19989e5f7d26SCatherine Sullivan 	}
199924aeb56fSKuo Zhao 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
200024aeb56fSKuo Zhao 		priv->stats_report_trigger_cnt++;
200124aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
200224aeb56fSKuo Zhao 	}
20039e5f7d26SCatherine Sullivan }
20049e5f7d26SCatherine Sullivan 
20059e5f7d26SCatherine Sullivan static void gve_handle_reset(struct gve_priv *priv)
20069e5f7d26SCatherine Sullivan {
20079e5f7d26SCatherine Sullivan 	/* A service task will be scheduled at the end of probe to catch any
20089e5f7d26SCatherine Sullivan 	 * resets that need to happen, and we don't want to reset until
20099e5f7d26SCatherine Sullivan 	 * probe is done.
20109e5f7d26SCatherine Sullivan 	 */
20119e5f7d26SCatherine Sullivan 	if (gve_get_probe_in_progress(priv))
20129e5f7d26SCatherine Sullivan 		return;
20139e5f7d26SCatherine Sullivan 
20149e5f7d26SCatherine Sullivan 	if (gve_get_do_reset(priv)) {
20159e5f7d26SCatherine Sullivan 		rtnl_lock();
20169e5f7d26SCatherine Sullivan 		gve_reset(priv, false);
20179e5f7d26SCatherine Sullivan 		rtnl_unlock();
20189e5f7d26SCatherine Sullivan 	}
20199e5f7d26SCatherine Sullivan }
20209e5f7d26SCatherine Sullivan 
202124aeb56fSKuo Zhao void gve_handle_report_stats(struct gve_priv *priv)
202224aeb56fSKuo Zhao {
202324aeb56fSKuo Zhao 	struct stats *stats = priv->stats_report->stats;
202417c37d74SEric Dumazet 	int idx, stats_idx = 0;
202517c37d74SEric Dumazet 	unsigned int start = 0;
202617c37d74SEric Dumazet 	u64 tx_bytes;
202724aeb56fSKuo Zhao 
202824aeb56fSKuo Zhao 	if (!gve_get_report_stats(priv))
202924aeb56fSKuo Zhao 		return;
203024aeb56fSKuo Zhao 
203124aeb56fSKuo Zhao 	be64_add_cpu(&priv->stats_report->written_count, 1);
203224aeb56fSKuo Zhao 	/* tx stats */
203324aeb56fSKuo Zhao 	if (priv->tx) {
20342e80aeaeSPraveen Kaligineedi 		for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
20355e8c5adfSBailey Forrest 			u32 last_completion = 0;
20365e8c5adfSBailey Forrest 			u32 tx_frames = 0;
20375e8c5adfSBailey Forrest 
20385e8c5adfSBailey Forrest 			/* DQO doesn't currently support these metrics. */
20395e8c5adfSBailey Forrest 			if (gve_is_gqi(priv)) {
20405e8c5adfSBailey Forrest 				last_completion = priv->tx[idx].done;
20415e8c5adfSBailey Forrest 				tx_frames = priv->tx[idx].req;
20425e8c5adfSBailey Forrest 			}
20435e8c5adfSBailey Forrest 
204424aeb56fSKuo Zhao 			do {
2045068c38adSThomas Gleixner 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
204624aeb56fSKuo Zhao 				tx_bytes = priv->tx[idx].bytes_done;
2047068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
204824aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
204924aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
205024aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
205124aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
205224aeb56fSKuo Zhao 			};
205324aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
205424aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_STOP_CNT),
205524aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
205624aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
205724aeb56fSKuo Zhao 			};
205824aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
205924aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
20605e8c5adfSBailey Forrest 				.value = cpu_to_be64(tx_frames),
206124aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
206224aeb56fSKuo Zhao 			};
206324aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
206424aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
206524aeb56fSKuo Zhao 				.value = cpu_to_be64(tx_bytes),
206624aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
206724aeb56fSKuo Zhao 			};
206824aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
206924aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
20705e8c5adfSBailey Forrest 				.value = cpu_to_be64(last_completion),
207124aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
207224aeb56fSKuo Zhao 			};
207387a7f321SJohn Fraker 			stats[stats_idx++] = (struct stats) {
207487a7f321SJohn Fraker 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
207587a7f321SJohn Fraker 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
207687a7f321SJohn Fraker 				.queue_id = cpu_to_be32(idx),
207787a7f321SJohn Fraker 			};
207824aeb56fSKuo Zhao 		}
207924aeb56fSKuo Zhao 	}
208024aeb56fSKuo Zhao 	/* rx stats */
208124aeb56fSKuo Zhao 	if (priv->rx) {
208224aeb56fSKuo Zhao 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
208324aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
208424aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
208524aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
208624aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
208724aeb56fSKuo Zhao 			};
208824aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
208924aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
209024aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
209124aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
209224aeb56fSKuo Zhao 			};
209324aeb56fSKuo Zhao 		}
209424aeb56fSKuo Zhao 	}
209524aeb56fSKuo Zhao }
209624aeb56fSKuo Zhao 
209724aeb56fSKuo Zhao /* Handle NIC status register changes, reset requests and report stats */
20989e5f7d26SCatherine Sullivan static void gve_service_task(struct work_struct *work)
20999e5f7d26SCatherine Sullivan {
21009e5f7d26SCatherine Sullivan 	struct gve_priv *priv = container_of(work, struct gve_priv,
21019e5f7d26SCatherine Sullivan 					     service_task);
21023b7cc736SPatricio Noyola 	u32 status = ioread32be(&priv->reg_bar0->device_status);
21039e5f7d26SCatherine Sullivan 
21043b7cc736SPatricio Noyola 	gve_handle_status(priv, status);
21059e5f7d26SCatherine Sullivan 
21069e5f7d26SCatherine Sullivan 	gve_handle_reset(priv);
21073b7cc736SPatricio Noyola 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
21089e5f7d26SCatherine Sullivan }
21099e5f7d26SCatherine Sullivan 
211075eaae15SPraveen Kaligineedi static void gve_set_netdev_xdp_features(struct gve_priv *priv)
211175eaae15SPraveen Kaligineedi {
211275eaae15SPraveen Kaligineedi 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
211375eaae15SPraveen Kaligineedi 		priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
211439a7f4aaSPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
211539a7f4aaSPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
2116fd8e4032SPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
211775eaae15SPraveen Kaligineedi 	} else {
211875eaae15SPraveen Kaligineedi 		priv->dev->xdp_features = 0;
211975eaae15SPraveen Kaligineedi 	}
212075eaae15SPraveen Kaligineedi }
212175eaae15SPraveen Kaligineedi 
2122893ce44dSCatherine Sullivan static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2123893ce44dSCatherine Sullivan {
2124893ce44dSCatherine Sullivan 	int num_ntfy;
2125893ce44dSCatherine Sullivan 	int err;
2126893ce44dSCatherine Sullivan 
2127893ce44dSCatherine Sullivan 	/* Set up the adminq */
2128893ce44dSCatherine Sullivan 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
2129893ce44dSCatherine Sullivan 	if (err) {
2130893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2131893ce44dSCatherine Sullivan 			"Failed to alloc admin queue: err=%d\n", err);
2132893ce44dSCatherine Sullivan 		return err;
2133893ce44dSCatherine Sullivan 	}
2134893ce44dSCatherine Sullivan 
2135c2a0c3edSJeroen de Borst 	err = gve_verify_driver_compatibility(priv);
2136c2a0c3edSJeroen de Borst 	if (err) {
2137c2a0c3edSJeroen de Borst 		dev_err(&priv->pdev->dev,
2138c2a0c3edSJeroen de Borst 			"Could not verify driver compatibility: err=%d\n", err);
2139c2a0c3edSJeroen de Borst 		goto err;
2140c2a0c3edSJeroen de Borst 	}
2141c2a0c3edSJeroen de Borst 
2142*f13697ccSShailend Chand 	priv->num_registered_pages = 0;
2143*f13697ccSShailend Chand 
2144893ce44dSCatherine Sullivan 	if (skip_describe_device)
2145893ce44dSCatherine Sullivan 		goto setup_device;
2146893ce44dSCatherine Sullivan 
2147a5886ef4SBailey Forrest 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2148893ce44dSCatherine Sullivan 	/* Get the initial information we need from the device */
2149893ce44dSCatherine Sullivan 	err = gve_adminq_describe_device(priv);
2150893ce44dSCatherine Sullivan 	if (err) {
2151893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2152893ce44dSCatherine Sullivan 			"Could not get device information: err=%d\n", err);
2153893ce44dSCatherine Sullivan 		goto err;
2154893ce44dSCatherine Sullivan 	}
2155893ce44dSCatherine Sullivan 	priv->dev->mtu = priv->dev->max_mtu;
2156893ce44dSCatherine Sullivan 	num_ntfy = pci_msix_vec_count(priv->pdev);
2157893ce44dSCatherine Sullivan 	if (num_ntfy <= 0) {
2158893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2159893ce44dSCatherine Sullivan 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
2160893ce44dSCatherine Sullivan 		err = num_ntfy;
2161893ce44dSCatherine Sullivan 		goto err;
2162893ce44dSCatherine Sullivan 	} else if (num_ntfy < GVE_MIN_MSIX) {
2163893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2164893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, num_ntfy);
2165893ce44dSCatherine Sullivan 		err = -EINVAL;
2166893ce44dSCatherine Sullivan 		goto err;
2167893ce44dSCatherine Sullivan 	}
2168893ce44dSCatherine Sullivan 
2169a695641cSCoco Li 	/* Big TCP is only supported on DQ*/
2170a695641cSCoco Li 	if (!gve_is_gqi(priv))
217166ce8e6bSRushil Gupta 		netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2172a695641cSCoco Li 
2173f5cedc84SCatherine Sullivan 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2174893ce44dSCatherine Sullivan 	/* gvnic has one Notification Block per MSI-x vector, except for the
2175893ce44dSCatherine Sullivan 	 * management vector
2176893ce44dSCatherine Sullivan 	 */
2177893ce44dSCatherine Sullivan 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2178893ce44dSCatherine Sullivan 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
2179893ce44dSCatherine Sullivan 
2180f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues =
2181f5cedc84SCatherine Sullivan 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2182f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues =
2183f5cedc84SCatherine Sullivan 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2184f5cedc84SCatherine Sullivan 
2185f5cedc84SCatherine Sullivan 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2186f5cedc84SCatherine Sullivan 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2187f5cedc84SCatherine Sullivan 	if (priv->default_num_queues > 0) {
2188f5cedc84SCatherine Sullivan 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2189f5cedc84SCatherine Sullivan 						priv->tx_cfg.num_queues);
2190f5cedc84SCatherine Sullivan 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2191f5cedc84SCatherine Sullivan 						priv->rx_cfg.num_queues);
2192f5cedc84SCatherine Sullivan 	}
2193f5cedc84SCatherine Sullivan 
21940d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2195f5cedc84SCatherine Sullivan 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
21960d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2197f5cedc84SCatherine Sullivan 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2198f5cedc84SCatherine Sullivan 
21996081ac20STao Liu 	if (!gve_is_gqi(priv)) {
22006081ac20STao Liu 		priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
22016081ac20STao Liu 		priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
22026081ac20STao Liu 	}
22036081ac20STao Liu 
2204893ce44dSCatherine Sullivan setup_device:
220575eaae15SPraveen Kaligineedi 	gve_set_netdev_xdp_features(priv);
2206893ce44dSCatherine Sullivan 	err = gve_setup_device_resources(priv);
2207893ce44dSCatherine Sullivan 	if (!err)
2208893ce44dSCatherine Sullivan 		return 0;
2209893ce44dSCatherine Sullivan err:
2210893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
2211893ce44dSCatherine Sullivan 	return err;
2212893ce44dSCatherine Sullivan }
2213893ce44dSCatherine Sullivan 
2214893ce44dSCatherine Sullivan static void gve_teardown_priv_resources(struct gve_priv *priv)
2215893ce44dSCatherine Sullivan {
2216893ce44dSCatherine Sullivan 	gve_teardown_device_resources(priv);
2217893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
2218893ce44dSCatherine Sullivan }
2219893ce44dSCatherine Sullivan 
22209e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv)
22219e5f7d26SCatherine Sullivan {
22229e5f7d26SCatherine Sullivan 	/* Reset the device by releasing the AQ */
22239e5f7d26SCatherine Sullivan 	gve_adminq_release(priv);
22249e5f7d26SCatherine Sullivan }
22259e5f7d26SCatherine Sullivan 
22269e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
22279e5f7d26SCatherine Sullivan {
22289e5f7d26SCatherine Sullivan 	gve_trigger_reset(priv);
22299e5f7d26SCatherine Sullivan 	/* With the reset having already happened, close cannot fail */
22309e5f7d26SCatherine Sullivan 	if (was_up)
22319e5f7d26SCatherine Sullivan 		gve_close(priv->dev);
22329e5f7d26SCatherine Sullivan 	gve_teardown_priv_resources(priv);
22339e5f7d26SCatherine Sullivan }
22349e5f7d26SCatherine Sullivan 
22359e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
22369e5f7d26SCatherine Sullivan {
22379e5f7d26SCatherine Sullivan 	int err;
22389e5f7d26SCatherine Sullivan 
22399e5f7d26SCatherine Sullivan 	err = gve_init_priv(priv, true);
22409e5f7d26SCatherine Sullivan 	if (err)
22419e5f7d26SCatherine Sullivan 		goto err;
22429e5f7d26SCatherine Sullivan 	if (was_up) {
22439e5f7d26SCatherine Sullivan 		err = gve_open(priv->dev);
22449e5f7d26SCatherine Sullivan 		if (err)
22459e5f7d26SCatherine Sullivan 			goto err;
22469e5f7d26SCatherine Sullivan 	}
22479e5f7d26SCatherine Sullivan 	return 0;
22489e5f7d26SCatherine Sullivan err:
22499e5f7d26SCatherine Sullivan 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
22509e5f7d26SCatherine Sullivan 	gve_turndown(priv);
22519e5f7d26SCatherine Sullivan 	return err;
22529e5f7d26SCatherine Sullivan }
22539e5f7d26SCatherine Sullivan 
22549e5f7d26SCatherine Sullivan int gve_reset(struct gve_priv *priv, bool attempt_teardown)
22559e5f7d26SCatherine Sullivan {
22569e5f7d26SCatherine Sullivan 	bool was_up = netif_carrier_ok(priv->dev);
22579e5f7d26SCatherine Sullivan 	int err;
22589e5f7d26SCatherine Sullivan 
22599e5f7d26SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Performing reset\n");
22609e5f7d26SCatherine Sullivan 	gve_clear_do_reset(priv);
22619e5f7d26SCatherine Sullivan 	gve_set_reset_in_progress(priv);
22629e5f7d26SCatherine Sullivan 	/* If we aren't attempting to teardown normally, just go turndown and
22639e5f7d26SCatherine Sullivan 	 * reset right away.
22649e5f7d26SCatherine Sullivan 	 */
22659e5f7d26SCatherine Sullivan 	if (!attempt_teardown) {
22669e5f7d26SCatherine Sullivan 		gve_turndown(priv);
22679e5f7d26SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
22689e5f7d26SCatherine Sullivan 	} else {
22699e5f7d26SCatherine Sullivan 		/* Otherwise attempt to close normally */
22709e5f7d26SCatherine Sullivan 		if (was_up) {
22719e5f7d26SCatherine Sullivan 			err = gve_close(priv->dev);
22729e5f7d26SCatherine Sullivan 			/* If that fails reset as we did above */
22739e5f7d26SCatherine Sullivan 			if (err)
22749e5f7d26SCatherine Sullivan 				gve_reset_and_teardown(priv, was_up);
22759e5f7d26SCatherine Sullivan 		}
22769e5f7d26SCatherine Sullivan 		/* Clean up any remaining resources */
22779e5f7d26SCatherine Sullivan 		gve_teardown_priv_resources(priv);
22789e5f7d26SCatherine Sullivan 	}
22799e5f7d26SCatherine Sullivan 
22809e5f7d26SCatherine Sullivan 	/* Set it all back up */
22819e5f7d26SCatherine Sullivan 	err = gve_reset_recovery(priv, was_up);
22829e5f7d26SCatherine Sullivan 	gve_clear_reset_in_progress(priv);
2283433e274bSKuo Zhao 	priv->reset_cnt++;
2284433e274bSKuo Zhao 	priv->interface_up_cnt = 0;
2285433e274bSKuo Zhao 	priv->interface_down_cnt = 0;
228624aeb56fSKuo Zhao 	priv->stats_report_trigger_cnt = 0;
22879e5f7d26SCatherine Sullivan 	return err;
22889e5f7d26SCatherine Sullivan }
22899e5f7d26SCatherine Sullivan 
2290893ce44dSCatherine Sullivan static void gve_write_version(u8 __iomem *driver_version_register)
2291893ce44dSCatherine Sullivan {
2292893ce44dSCatherine Sullivan 	const char *c = gve_version_prefix;
2293893ce44dSCatherine Sullivan 
2294893ce44dSCatherine Sullivan 	while (*c) {
2295893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
2296893ce44dSCatherine Sullivan 		c++;
2297893ce44dSCatherine Sullivan 	}
2298893ce44dSCatherine Sullivan 
2299893ce44dSCatherine Sullivan 	c = gve_version_str;
2300893ce44dSCatherine Sullivan 	while (*c) {
2301893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
2302893ce44dSCatherine Sullivan 		c++;
2303893ce44dSCatherine Sullivan 	}
2304893ce44dSCatherine Sullivan 	writeb('\n', driver_version_register);
2305893ce44dSCatherine Sullivan }
2306893ce44dSCatherine Sullivan 
2307893ce44dSCatherine Sullivan static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2308893ce44dSCatherine Sullivan {
2309893ce44dSCatherine Sullivan 	int max_tx_queues, max_rx_queues;
2310893ce44dSCatherine Sullivan 	struct net_device *dev;
2311893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar;
2312893ce44dSCatherine Sullivan 	struct gve_registers __iomem *reg_bar;
2313893ce44dSCatherine Sullivan 	struct gve_priv *priv;
2314893ce44dSCatherine Sullivan 	int err;
2315893ce44dSCatherine Sullivan 
2316893ce44dSCatherine Sullivan 	err = pci_enable_device(pdev);
2317893ce44dSCatherine Sullivan 	if (err)
23186dce38b4SChristophe JAILLET 		return err;
2319893ce44dSCatherine Sullivan 
23209d0aba98SJunfeng Guo 	err = pci_request_regions(pdev, gve_driver_name);
2321893ce44dSCatherine Sullivan 	if (err)
2322893ce44dSCatherine Sullivan 		goto abort_with_enabled;
2323893ce44dSCatherine Sullivan 
2324893ce44dSCatherine Sullivan 	pci_set_master(pdev);
2325893ce44dSCatherine Sullivan 
2326bde3c8ffSChristophe JAILLET 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2327893ce44dSCatherine Sullivan 	if (err) {
2328893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2329893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
2330893ce44dSCatherine Sullivan 	}
2331893ce44dSCatherine Sullivan 
2332893ce44dSCatherine Sullivan 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2333893ce44dSCatherine Sullivan 	if (!reg_bar) {
2334f5cedc84SCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
2335893ce44dSCatherine Sullivan 		err = -ENOMEM;
2336893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
2337893ce44dSCatherine Sullivan 	}
2338893ce44dSCatherine Sullivan 
2339893ce44dSCatherine Sullivan 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2340893ce44dSCatherine Sullivan 	if (!db_bar) {
2341893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2342893ce44dSCatherine Sullivan 		err = -ENOMEM;
2343893ce44dSCatherine Sullivan 		goto abort_with_reg_bar;
2344893ce44dSCatherine Sullivan 	}
2345893ce44dSCatherine Sullivan 
2346893ce44dSCatherine Sullivan 	gve_write_version(&reg_bar->driver_version);
2347893ce44dSCatherine Sullivan 	/* Get max queues to alloc etherdev */
23481db1a862SBailey Forrest 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
23491db1a862SBailey Forrest 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
2350893ce44dSCatherine Sullivan 	/* Alloc and setup the netdev and priv */
2351893ce44dSCatherine Sullivan 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2352893ce44dSCatherine Sullivan 	if (!dev) {
2353893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "could not allocate netdev\n");
23546dce38b4SChristophe JAILLET 		err = -ENOMEM;
2355893ce44dSCatherine Sullivan 		goto abort_with_db_bar;
2356893ce44dSCatherine Sullivan 	}
2357893ce44dSCatherine Sullivan 	SET_NETDEV_DEV(dev, &pdev->dev);
2358893ce44dSCatherine Sullivan 	pci_set_drvdata(pdev, dev);
2359e5b845dcSCatherine Sullivan 	dev->ethtool_ops = &gve_ethtool_ops;
2360f5cedc84SCatherine Sullivan 	dev->netdev_ops = &gve_netdev_ops;
23615e8c5adfSBailey Forrest 
23625e8c5adfSBailey Forrest 	/* Set default and supported features.
23635e8c5adfSBailey Forrest 	 *
23645e8c5adfSBailey Forrest 	 * Features might be set in other locations as well (such as
23655e8c5adfSBailey Forrest 	 * `gve_adminq_describe_device`).
23665e8c5adfSBailey Forrest 	 */
2367893ce44dSCatherine Sullivan 	dev->hw_features = NETIF_F_HIGHDMA;
2368893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_SG;
2369893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_HW_CSUM;
2370893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO;
2371893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO6;
2372893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO_ECN;
2373893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXCSUM;
2374893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXHASH;
2375893ce44dSCatherine Sullivan 	dev->features = dev->hw_features;
2376f5cedc84SCatherine Sullivan 	dev->watchdog_timeo = 5 * HZ;
2377893ce44dSCatherine Sullivan 	dev->min_mtu = ETH_MIN_MTU;
2378893ce44dSCatherine Sullivan 	netif_carrier_off(dev);
2379893ce44dSCatherine Sullivan 
2380893ce44dSCatherine Sullivan 	priv = netdev_priv(dev);
2381893ce44dSCatherine Sullivan 	priv->dev = dev;
2382893ce44dSCatherine Sullivan 	priv->pdev = pdev;
2383893ce44dSCatherine Sullivan 	priv->msg_enable = DEFAULT_MSG_LEVEL;
2384893ce44dSCatherine Sullivan 	priv->reg_bar0 = reg_bar;
2385893ce44dSCatherine Sullivan 	priv->db_bar2 = db_bar;
23869e5f7d26SCatherine Sullivan 	priv->service_task_flags = 0x0;
2387893ce44dSCatherine Sullivan 	priv->state_flags = 0x0;
238824aeb56fSKuo Zhao 	priv->ethtool_flags = 0x0;
23899e5f7d26SCatherine Sullivan 
23909e5f7d26SCatherine Sullivan 	gve_set_probe_in_progress(priv);
23919e5f7d26SCatherine Sullivan 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
23929e5f7d26SCatherine Sullivan 	if (!priv->gve_wq) {
23939e5f7d26SCatherine Sullivan 		dev_err(&pdev->dev, "Could not allocate workqueue");
23949e5f7d26SCatherine Sullivan 		err = -ENOMEM;
23959e5f7d26SCatherine Sullivan 		goto abort_with_netdev;
23969e5f7d26SCatherine Sullivan 	}
23979e5f7d26SCatherine Sullivan 	INIT_WORK(&priv->service_task, gve_service_task);
239824aeb56fSKuo Zhao 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2399f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues = max_tx_queues;
2400f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues = max_rx_queues;
2401893ce44dSCatherine Sullivan 
2402893ce44dSCatherine Sullivan 	err = gve_init_priv(priv, false);
2403893ce44dSCatherine Sullivan 	if (err)
24049e5f7d26SCatherine Sullivan 		goto abort_with_wq;
2405893ce44dSCatherine Sullivan 
2406893ce44dSCatherine Sullivan 	err = register_netdev(dev);
2407893ce44dSCatherine Sullivan 	if (err)
24082342ae10SChristophe JAILLET 		goto abort_with_gve_init;
2409893ce44dSCatherine Sullivan 
2410893ce44dSCatherine Sullivan 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2411a5886ef4SBailey Forrest 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
24129e5f7d26SCatherine Sullivan 	gve_clear_probe_in_progress(priv);
24139e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
2414893ce44dSCatherine Sullivan 	return 0;
2415893ce44dSCatherine Sullivan 
24162342ae10SChristophe JAILLET abort_with_gve_init:
24172342ae10SChristophe JAILLET 	gve_teardown_priv_resources(priv);
24182342ae10SChristophe JAILLET 
24199e5f7d26SCatherine Sullivan abort_with_wq:
24209e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
24219e5f7d26SCatherine Sullivan 
2422893ce44dSCatherine Sullivan abort_with_netdev:
2423893ce44dSCatherine Sullivan 	free_netdev(dev);
2424893ce44dSCatherine Sullivan 
2425893ce44dSCatherine Sullivan abort_with_db_bar:
2426893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
2427893ce44dSCatherine Sullivan 
2428893ce44dSCatherine Sullivan abort_with_reg_bar:
2429893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
2430893ce44dSCatherine Sullivan 
2431893ce44dSCatherine Sullivan abort_with_pci_region:
2432893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
2433893ce44dSCatherine Sullivan 
2434893ce44dSCatherine Sullivan abort_with_enabled:
2435893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
24366dce38b4SChristophe JAILLET 	return err;
2437893ce44dSCatherine Sullivan }
2438893ce44dSCatherine Sullivan 
2439893ce44dSCatherine Sullivan static void gve_remove(struct pci_dev *pdev)
2440893ce44dSCatherine Sullivan {
2441893ce44dSCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2442893ce44dSCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2443893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar = priv->db_bar2;
2444893ce44dSCatherine Sullivan 	void __iomem *reg_bar = priv->reg_bar0;
2445893ce44dSCatherine Sullivan 
2446893ce44dSCatherine Sullivan 	unregister_netdev(netdev);
2447893ce44dSCatherine Sullivan 	gve_teardown_priv_resources(priv);
24489e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
2449893ce44dSCatherine Sullivan 	free_netdev(netdev);
2450893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
2451893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
2452893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
2453893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
2454893ce44dSCatherine Sullivan }
2455893ce44dSCatherine Sullivan 
2456974365e5SCatherine Sullivan static void gve_shutdown(struct pci_dev *pdev)
2457974365e5SCatherine Sullivan {
2458974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2459974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2460974365e5SCatherine Sullivan 	bool was_up = netif_carrier_ok(priv->dev);
2461974365e5SCatherine Sullivan 
2462974365e5SCatherine Sullivan 	rtnl_lock();
2463974365e5SCatherine Sullivan 	if (was_up && gve_close(priv->dev)) {
2464974365e5SCatherine Sullivan 		/* If the dev was up, attempt to close, if close fails, reset */
2465974365e5SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
2466974365e5SCatherine Sullivan 	} else {
2467974365e5SCatherine Sullivan 		/* If the dev wasn't up or close worked, finish tearing down */
2468974365e5SCatherine Sullivan 		gve_teardown_priv_resources(priv);
2469974365e5SCatherine Sullivan 	}
2470974365e5SCatherine Sullivan 	rtnl_unlock();
2471974365e5SCatherine Sullivan }
2472974365e5SCatherine Sullivan 
2473974365e5SCatherine Sullivan #ifdef CONFIG_PM
2474974365e5SCatherine Sullivan static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2475974365e5SCatherine Sullivan {
2476974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2477974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2478974365e5SCatherine Sullivan 	bool was_up = netif_carrier_ok(priv->dev);
2479974365e5SCatherine Sullivan 
2480974365e5SCatherine Sullivan 	priv->suspend_cnt++;
2481974365e5SCatherine Sullivan 	rtnl_lock();
2482974365e5SCatherine Sullivan 	if (was_up && gve_close(priv->dev)) {
2483974365e5SCatherine Sullivan 		/* If the dev was up, attempt to close, if close fails, reset */
2484974365e5SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
2485974365e5SCatherine Sullivan 	} else {
2486974365e5SCatherine Sullivan 		/* If the dev wasn't up or close worked, finish tearing down */
2487974365e5SCatherine Sullivan 		gve_teardown_priv_resources(priv);
2488974365e5SCatherine Sullivan 	}
2489974365e5SCatherine Sullivan 	priv->up_before_suspend = was_up;
2490974365e5SCatherine Sullivan 	rtnl_unlock();
2491974365e5SCatherine Sullivan 	return 0;
2492974365e5SCatherine Sullivan }
2493974365e5SCatherine Sullivan 
2494974365e5SCatherine Sullivan static int gve_resume(struct pci_dev *pdev)
2495974365e5SCatherine Sullivan {
2496974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2497974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2498974365e5SCatherine Sullivan 	int err;
2499974365e5SCatherine Sullivan 
2500974365e5SCatherine Sullivan 	priv->resume_cnt++;
2501974365e5SCatherine Sullivan 	rtnl_lock();
2502974365e5SCatherine Sullivan 	err = gve_reset_recovery(priv, priv->up_before_suspend);
2503974365e5SCatherine Sullivan 	rtnl_unlock();
2504974365e5SCatherine Sullivan 	return err;
2505974365e5SCatherine Sullivan }
2506974365e5SCatherine Sullivan #endif /* CONFIG_PM */
2507974365e5SCatherine Sullivan 
2508893ce44dSCatherine Sullivan static const struct pci_device_id gve_id_table[] = {
2509893ce44dSCatherine Sullivan 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2510893ce44dSCatherine Sullivan 	{ }
2511893ce44dSCatherine Sullivan };
2512893ce44dSCatherine Sullivan 
25139d0aba98SJunfeng Guo static struct pci_driver gve_driver = {
25149d0aba98SJunfeng Guo 	.name		= gve_driver_name,
2515893ce44dSCatherine Sullivan 	.id_table	= gve_id_table,
2516893ce44dSCatherine Sullivan 	.probe		= gve_probe,
2517893ce44dSCatherine Sullivan 	.remove		= gve_remove,
2518974365e5SCatherine Sullivan 	.shutdown	= gve_shutdown,
2519974365e5SCatherine Sullivan #ifdef CONFIG_PM
2520974365e5SCatherine Sullivan 	.suspend        = gve_suspend,
2521974365e5SCatherine Sullivan 	.resume         = gve_resume,
2522974365e5SCatherine Sullivan #endif
2523893ce44dSCatherine Sullivan };
2524893ce44dSCatherine Sullivan 
25259d0aba98SJunfeng Guo module_pci_driver(gve_driver);
2526893ce44dSCatherine Sullivan 
2527893ce44dSCatherine Sullivan MODULE_DEVICE_TABLE(pci, gve_id_table);
2528893ce44dSCatherine Sullivan MODULE_AUTHOR("Google, Inc.");
25299d0aba98SJunfeng Guo MODULE_DESCRIPTION("Google Virtual NIC Driver");
2530893ce44dSCatherine Sullivan MODULE_LICENSE("Dual MIT/GPL");
2531893ce44dSCatherine Sullivan MODULE_VERSION(GVE_VERSION);
2532