xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision 2f523dc34ac8c355609e9b847852bf25bbdb30bf)
1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver
3893ce44dSCatherine Sullivan  *
4893ce44dSCatherine Sullivan  * Copyright (C) 2015-2019 Google, Inc.
5893ce44dSCatherine Sullivan  */
6893ce44dSCatherine Sullivan 
7893ce44dSCatherine Sullivan #include <linux/cpumask.h>
8893ce44dSCatherine Sullivan #include <linux/etherdevice.h>
9893ce44dSCatherine Sullivan #include <linux/interrupt.h>
10893ce44dSCatherine Sullivan #include <linux/module.h>
11893ce44dSCatherine Sullivan #include <linux/pci.h>
12893ce44dSCatherine Sullivan #include <linux/sched.h>
13893ce44dSCatherine Sullivan #include <linux/timer.h>
149e5f7d26SCatherine Sullivan #include <linux/workqueue.h>
15893ce44dSCatherine Sullivan #include <net/sch_generic.h>
16893ce44dSCatherine Sullivan #include "gve.h"
17893ce44dSCatherine Sullivan #include "gve_adminq.h"
18893ce44dSCatherine Sullivan #include "gve_register.h"
19893ce44dSCatherine Sullivan 
20f5cedc84SCatherine Sullivan #define GVE_DEFAULT_RX_COPYBREAK	(256)
21f5cedc84SCatherine Sullivan 
22893ce44dSCatherine Sullivan #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
23893ce44dSCatherine Sullivan #define GVE_VERSION		"1.0.0"
24893ce44dSCatherine Sullivan #define GVE_VERSION_PREFIX	"GVE-"
25893ce44dSCatherine Sullivan 
26e5b845dcSCatherine Sullivan const char gve_version_str[] = GVE_VERSION;
27893ce44dSCatherine Sullivan static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28893ce44dSCatherine Sullivan 
29f5cedc84SCatherine Sullivan static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30f5cedc84SCatherine Sullivan {
31f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
32f5cedc84SCatherine Sullivan 	unsigned int start;
33f5cedc84SCatherine Sullivan 	int ring;
34f5cedc84SCatherine Sullivan 
35f5cedc84SCatherine Sullivan 	if (priv->rx) {
36f5cedc84SCatherine Sullivan 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37f5cedc84SCatherine Sullivan 			do {
383c13ce74SCatherine Sullivan 				start =
39f5cedc84SCatherine Sullivan 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
40f5cedc84SCatherine Sullivan 				s->rx_packets += priv->rx[ring].rpackets;
41f5cedc84SCatherine Sullivan 				s->rx_bytes += priv->rx[ring].rbytes;
42f5cedc84SCatherine Sullivan 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43f5cedc84SCatherine Sullivan 						       start));
44f5cedc84SCatherine Sullivan 		}
45f5cedc84SCatherine Sullivan 	}
46f5cedc84SCatherine Sullivan 	if (priv->tx) {
47f5cedc84SCatherine Sullivan 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48f5cedc84SCatherine Sullivan 			do {
493c13ce74SCatherine Sullivan 				start =
50f5cedc84SCatherine Sullivan 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
51f5cedc84SCatherine Sullivan 				s->tx_packets += priv->tx[ring].pkt_done;
52f5cedc84SCatherine Sullivan 				s->tx_bytes += priv->tx[ring].bytes_done;
53cc07db5aSDan Carpenter 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54f5cedc84SCatherine Sullivan 						       start));
55f5cedc84SCatherine Sullivan 		}
56f5cedc84SCatherine Sullivan 	}
57f5cedc84SCatherine Sullivan }
58f5cedc84SCatherine Sullivan 
59893ce44dSCatherine Sullivan static int gve_alloc_counter_array(struct gve_priv *priv)
60893ce44dSCatherine Sullivan {
61893ce44dSCatherine Sullivan 	priv->counter_array =
62893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
63893ce44dSCatherine Sullivan 				   priv->num_event_counters *
64893ce44dSCatherine Sullivan 				   sizeof(*priv->counter_array),
65893ce44dSCatherine Sullivan 				   &priv->counter_array_bus, GFP_KERNEL);
66893ce44dSCatherine Sullivan 	if (!priv->counter_array)
67893ce44dSCatherine Sullivan 		return -ENOMEM;
68893ce44dSCatherine Sullivan 
69893ce44dSCatherine Sullivan 	return 0;
70893ce44dSCatherine Sullivan }
71893ce44dSCatherine Sullivan 
72893ce44dSCatherine Sullivan static void gve_free_counter_array(struct gve_priv *priv)
73893ce44dSCatherine Sullivan {
74893ce44dSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev,
75893ce44dSCatherine Sullivan 			  priv->num_event_counters *
76893ce44dSCatherine Sullivan 			  sizeof(*priv->counter_array),
77893ce44dSCatherine Sullivan 			  priv->counter_array, priv->counter_array_bus);
78893ce44dSCatherine Sullivan 	priv->counter_array = NULL;
79893ce44dSCatherine Sullivan }
80893ce44dSCatherine Sullivan 
8124aeb56fSKuo Zhao /* NIC requests to report stats */
8224aeb56fSKuo Zhao static void gve_stats_report_task(struct work_struct *work)
8324aeb56fSKuo Zhao {
8424aeb56fSKuo Zhao 	struct gve_priv *priv = container_of(work, struct gve_priv,
8524aeb56fSKuo Zhao 					     stats_report_task);
8624aeb56fSKuo Zhao 	if (gve_get_do_report_stats(priv)) {
8724aeb56fSKuo Zhao 		gve_handle_report_stats(priv);
8824aeb56fSKuo Zhao 		gve_clear_do_report_stats(priv);
8924aeb56fSKuo Zhao 	}
9024aeb56fSKuo Zhao }
9124aeb56fSKuo Zhao 
9224aeb56fSKuo Zhao static void gve_stats_report_schedule(struct gve_priv *priv)
9324aeb56fSKuo Zhao {
9424aeb56fSKuo Zhao 	if (!gve_get_probe_in_progress(priv) &&
9524aeb56fSKuo Zhao 	    !gve_get_reset_in_progress(priv)) {
9624aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
9724aeb56fSKuo Zhao 		queue_work(priv->gve_wq, &priv->stats_report_task);
9824aeb56fSKuo Zhao 	}
9924aeb56fSKuo Zhao }
10024aeb56fSKuo Zhao 
10124aeb56fSKuo Zhao static void gve_stats_report_timer(struct timer_list *t)
10224aeb56fSKuo Zhao {
10324aeb56fSKuo Zhao 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
10424aeb56fSKuo Zhao 
10524aeb56fSKuo Zhao 	mod_timer(&priv->stats_report_timer,
10624aeb56fSKuo Zhao 		  round_jiffies(jiffies +
10724aeb56fSKuo Zhao 		  msecs_to_jiffies(priv->stats_report_timer_period)));
10824aeb56fSKuo Zhao 	gve_stats_report_schedule(priv);
10924aeb56fSKuo Zhao }
11024aeb56fSKuo Zhao 
11124aeb56fSKuo Zhao static int gve_alloc_stats_report(struct gve_priv *priv)
11224aeb56fSKuo Zhao {
11324aeb56fSKuo Zhao 	int tx_stats_num, rx_stats_num;
11424aeb56fSKuo Zhao 
115*2f523dc3SDavid Awogbemila 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
11624aeb56fSKuo Zhao 		       priv->tx_cfg.num_queues;
117*2f523dc3SDavid Awogbemila 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
11824aeb56fSKuo Zhao 		       priv->rx_cfg.num_queues;
11924aeb56fSKuo Zhao 	priv->stats_report_len = sizeof(struct gve_stats_report) +
12024aeb56fSKuo Zhao 				 (tx_stats_num + rx_stats_num) *
12124aeb56fSKuo Zhao 				 sizeof(struct stats);
12224aeb56fSKuo Zhao 	priv->stats_report =
12324aeb56fSKuo Zhao 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
12424aeb56fSKuo Zhao 				   &priv->stats_report_bus, GFP_KERNEL);
12524aeb56fSKuo Zhao 	if (!priv->stats_report)
12624aeb56fSKuo Zhao 		return -ENOMEM;
12724aeb56fSKuo Zhao 	/* Set up timer for the report-stats task */
12824aeb56fSKuo Zhao 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
12924aeb56fSKuo Zhao 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
13024aeb56fSKuo Zhao 	return 0;
13124aeb56fSKuo Zhao }
13224aeb56fSKuo Zhao 
13324aeb56fSKuo Zhao static void gve_free_stats_report(struct gve_priv *priv)
13424aeb56fSKuo Zhao {
13524aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
13624aeb56fSKuo Zhao 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
13724aeb56fSKuo Zhao 			  priv->stats_report, priv->stats_report_bus);
13824aeb56fSKuo Zhao 	priv->stats_report = NULL;
13924aeb56fSKuo Zhao }
14024aeb56fSKuo Zhao 
141893ce44dSCatherine Sullivan static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
142893ce44dSCatherine Sullivan {
1439e5f7d26SCatherine Sullivan 	struct gve_priv *priv = arg;
1449e5f7d26SCatherine Sullivan 
1459e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
146893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
147893ce44dSCatherine Sullivan }
148893ce44dSCatherine Sullivan 
149893ce44dSCatherine Sullivan static irqreturn_t gve_intr(int irq, void *arg)
150893ce44dSCatherine Sullivan {
151f5cedc84SCatherine Sullivan 	struct gve_notify_block *block = arg;
152f5cedc84SCatherine Sullivan 	struct gve_priv *priv = block->priv;
153f5cedc84SCatherine Sullivan 
154f5cedc84SCatherine Sullivan 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
155f5cedc84SCatherine Sullivan 	napi_schedule_irqoff(&block->napi);
156893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
157893ce44dSCatherine Sullivan }
158893ce44dSCatherine Sullivan 
159f5cedc84SCatherine Sullivan static int gve_napi_poll(struct napi_struct *napi, int budget)
160f5cedc84SCatherine Sullivan {
161f5cedc84SCatherine Sullivan 	struct gve_notify_block *block;
162f5cedc84SCatherine Sullivan 	__be32 __iomem *irq_doorbell;
163f5cedc84SCatherine Sullivan 	bool reschedule = false;
164f5cedc84SCatherine Sullivan 	struct gve_priv *priv;
165f5cedc84SCatherine Sullivan 
166f5cedc84SCatherine Sullivan 	block = container_of(napi, struct gve_notify_block, napi);
167f5cedc84SCatherine Sullivan 	priv = block->priv;
168f5cedc84SCatherine Sullivan 
169f5cedc84SCatherine Sullivan 	if (block->tx)
170f5cedc84SCatherine Sullivan 		reschedule |= gve_tx_poll(block, budget);
171f5cedc84SCatherine Sullivan 	if (block->rx)
172f5cedc84SCatherine Sullivan 		reschedule |= gve_rx_poll(block, budget);
173f5cedc84SCatherine Sullivan 
174f5cedc84SCatherine Sullivan 	if (reschedule)
175f5cedc84SCatherine Sullivan 		return budget;
176f5cedc84SCatherine Sullivan 
177f5cedc84SCatherine Sullivan 	napi_complete(napi);
178f5cedc84SCatherine Sullivan 	irq_doorbell = gve_irq_doorbell(priv, block);
179f5cedc84SCatherine Sullivan 	iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
180f5cedc84SCatherine Sullivan 
181f5cedc84SCatherine Sullivan 	/* Double check we have no extra work.
182f5cedc84SCatherine Sullivan 	 * Ensure unmask synchronizes with checking for work.
183f5cedc84SCatherine Sullivan 	 */
184f5cedc84SCatherine Sullivan 	dma_rmb();
185f5cedc84SCatherine Sullivan 	if (block->tx)
186f5cedc84SCatherine Sullivan 		reschedule |= gve_tx_poll(block, -1);
187f5cedc84SCatherine Sullivan 	if (block->rx)
188f5cedc84SCatherine Sullivan 		reschedule |= gve_rx_poll(block, -1);
189f5cedc84SCatherine Sullivan 	if (reschedule && napi_reschedule(napi))
190f5cedc84SCatherine Sullivan 		iowrite32be(GVE_IRQ_MASK, irq_doorbell);
191f5cedc84SCatherine Sullivan 
192f5cedc84SCatherine Sullivan 	return 0;
193f5cedc84SCatherine Sullivan }
194f5cedc84SCatherine Sullivan 
195893ce44dSCatherine Sullivan static int gve_alloc_notify_blocks(struct gve_priv *priv)
196893ce44dSCatherine Sullivan {
197893ce44dSCatherine Sullivan 	int num_vecs_requested = priv->num_ntfy_blks + 1;
198893ce44dSCatherine Sullivan 	char *name = priv->dev->name;
199893ce44dSCatherine Sullivan 	unsigned int active_cpus;
200893ce44dSCatherine Sullivan 	int vecs_enabled;
201893ce44dSCatherine Sullivan 	int i, j;
202893ce44dSCatherine Sullivan 	int err;
203893ce44dSCatherine Sullivan 
204893ce44dSCatherine Sullivan 	priv->msix_vectors = kvzalloc(num_vecs_requested *
205893ce44dSCatherine Sullivan 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
206893ce44dSCatherine Sullivan 	if (!priv->msix_vectors)
207893ce44dSCatherine Sullivan 		return -ENOMEM;
208893ce44dSCatherine Sullivan 	for (i = 0; i < num_vecs_requested; i++)
209893ce44dSCatherine Sullivan 		priv->msix_vectors[i].entry = i;
210893ce44dSCatherine Sullivan 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
211893ce44dSCatherine Sullivan 					     GVE_MIN_MSIX, num_vecs_requested);
212893ce44dSCatherine Sullivan 	if (vecs_enabled < 0) {
213893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
214893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, vecs_enabled);
215893ce44dSCatherine Sullivan 		err = vecs_enabled;
216893ce44dSCatherine Sullivan 		goto abort_with_msix_vectors;
217893ce44dSCatherine Sullivan 	}
218893ce44dSCatherine Sullivan 	if (vecs_enabled != num_vecs_requested) {
219f5cedc84SCatherine Sullivan 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
220f5cedc84SCatherine Sullivan 		int vecs_per_type = new_num_ntfy_blks / 2;
221f5cedc84SCatherine Sullivan 		int vecs_left = new_num_ntfy_blks % 2;
222f5cedc84SCatherine Sullivan 
223f5cedc84SCatherine Sullivan 		priv->num_ntfy_blks = new_num_ntfy_blks;
224f5cedc84SCatherine Sullivan 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
225f5cedc84SCatherine Sullivan 						vecs_per_type);
226f5cedc84SCatherine Sullivan 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
227f5cedc84SCatherine Sullivan 						vecs_per_type + vecs_left);
228893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
229f5cedc84SCatherine Sullivan 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
230f5cedc84SCatherine Sullivan 			vecs_enabled, priv->tx_cfg.max_queues,
231f5cedc84SCatherine Sullivan 			priv->rx_cfg.max_queues);
232f5cedc84SCatherine Sullivan 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
233f5cedc84SCatherine Sullivan 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
234f5cedc84SCatherine Sullivan 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
235f5cedc84SCatherine Sullivan 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
236893ce44dSCatherine Sullivan 	}
237893ce44dSCatherine Sullivan 	/* Half the notification blocks go to TX and half to RX */
238893ce44dSCatherine Sullivan 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
239893ce44dSCatherine Sullivan 
240893ce44dSCatherine Sullivan 	/* Setup Management Vector  - the last vector */
241893ce44dSCatherine Sullivan 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
242893ce44dSCatherine Sullivan 		 name);
243893ce44dSCatherine Sullivan 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
244893ce44dSCatherine Sullivan 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
245893ce44dSCatherine Sullivan 	if (err) {
246893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
247893ce44dSCatherine Sullivan 		goto abort_with_msix_enabled;
248893ce44dSCatherine Sullivan 	}
249893ce44dSCatherine Sullivan 	priv->ntfy_blocks =
250893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
251893ce44dSCatherine Sullivan 				   priv->num_ntfy_blks *
252893ce44dSCatherine Sullivan 				   sizeof(*priv->ntfy_blocks),
253893ce44dSCatherine Sullivan 				   &priv->ntfy_block_bus, GFP_KERNEL);
254893ce44dSCatherine Sullivan 	if (!priv->ntfy_blocks) {
255893ce44dSCatherine Sullivan 		err = -ENOMEM;
256893ce44dSCatherine Sullivan 		goto abort_with_mgmt_vector;
257893ce44dSCatherine Sullivan 	}
258893ce44dSCatherine Sullivan 	/* Setup the other blocks - the first n-1 vectors */
259893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
260893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
261893ce44dSCatherine Sullivan 		int msix_idx = i;
262893ce44dSCatherine Sullivan 
263893ce44dSCatherine Sullivan 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
264893ce44dSCatherine Sullivan 			 name, i);
265893ce44dSCatherine Sullivan 		block->priv = priv;
266893ce44dSCatherine Sullivan 		err = request_irq(priv->msix_vectors[msix_idx].vector,
267893ce44dSCatherine Sullivan 				  gve_intr, 0, block->name, block);
268893ce44dSCatherine Sullivan 		if (err) {
269893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
270893ce44dSCatherine Sullivan 				"Failed to receive msix vector %d\n", i);
271893ce44dSCatherine Sullivan 			goto abort_with_some_ntfy_blocks;
272893ce44dSCatherine Sullivan 		}
273893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
274893ce44dSCatherine Sullivan 				      get_cpu_mask(i % active_cpus));
275893ce44dSCatherine Sullivan 	}
276893ce44dSCatherine Sullivan 	return 0;
277893ce44dSCatherine Sullivan abort_with_some_ntfy_blocks:
278893ce44dSCatherine Sullivan 	for (j = 0; j < i; j++) {
279893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
280893ce44dSCatherine Sullivan 		int msix_idx = j;
281893ce44dSCatherine Sullivan 
282893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
283893ce44dSCatherine Sullivan 				      NULL);
284893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
285893ce44dSCatherine Sullivan 	}
286893ce44dSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
287893ce44dSCatherine Sullivan 			  sizeof(*priv->ntfy_blocks),
288893ce44dSCatherine Sullivan 			  priv->ntfy_blocks, priv->ntfy_block_bus);
289893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
290893ce44dSCatherine Sullivan abort_with_mgmt_vector:
291893ce44dSCatherine Sullivan 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
292893ce44dSCatherine Sullivan abort_with_msix_enabled:
293893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
294893ce44dSCatherine Sullivan abort_with_msix_vectors:
2958ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
296893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
297893ce44dSCatherine Sullivan 	return err;
298893ce44dSCatherine Sullivan }
299893ce44dSCatherine Sullivan 
300893ce44dSCatherine Sullivan static void gve_free_notify_blocks(struct gve_priv *priv)
301893ce44dSCatherine Sullivan {
302893ce44dSCatherine Sullivan 	int i;
303893ce44dSCatherine Sullivan 
304893ce44dSCatherine Sullivan 	/* Free the irqs */
305893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
306893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
307893ce44dSCatherine Sullivan 		int msix_idx = i;
308893ce44dSCatherine Sullivan 
309893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
310893ce44dSCatherine Sullivan 				      NULL);
311893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
312893ce44dSCatherine Sullivan 	}
313893ce44dSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev,
314893ce44dSCatherine Sullivan 			  priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
315893ce44dSCatherine Sullivan 			  priv->ntfy_blocks, priv->ntfy_block_bus);
316893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
317893ce44dSCatherine Sullivan 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
318893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
3198ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
320893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
321893ce44dSCatherine Sullivan }
322893ce44dSCatherine Sullivan 
323893ce44dSCatherine Sullivan static int gve_setup_device_resources(struct gve_priv *priv)
324893ce44dSCatherine Sullivan {
325893ce44dSCatherine Sullivan 	int err;
326893ce44dSCatherine Sullivan 
327893ce44dSCatherine Sullivan 	err = gve_alloc_counter_array(priv);
328893ce44dSCatherine Sullivan 	if (err)
329893ce44dSCatherine Sullivan 		return err;
330893ce44dSCatherine Sullivan 	err = gve_alloc_notify_blocks(priv);
331893ce44dSCatherine Sullivan 	if (err)
332893ce44dSCatherine Sullivan 		goto abort_with_counter;
33324aeb56fSKuo Zhao 	err = gve_alloc_stats_report(priv);
33424aeb56fSKuo Zhao 	if (err)
33524aeb56fSKuo Zhao 		goto abort_with_ntfy_blocks;
336893ce44dSCatherine Sullivan 	err = gve_adminq_configure_device_resources(priv,
337893ce44dSCatherine Sullivan 						    priv->counter_array_bus,
338893ce44dSCatherine Sullivan 						    priv->num_event_counters,
339893ce44dSCatherine Sullivan 						    priv->ntfy_block_bus,
340893ce44dSCatherine Sullivan 						    priv->num_ntfy_blks);
341893ce44dSCatherine Sullivan 	if (unlikely(err)) {
342893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
343893ce44dSCatherine Sullivan 			"could not setup device_resources: err=%d\n", err);
344893ce44dSCatherine Sullivan 		err = -ENXIO;
34524aeb56fSKuo Zhao 		goto abort_with_stats_report;
346893ce44dSCatherine Sullivan 	}
34724aeb56fSKuo Zhao 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
34824aeb56fSKuo Zhao 				      priv->stats_report_bus,
34924aeb56fSKuo Zhao 				      GVE_STATS_REPORT_TIMER_PERIOD);
35024aeb56fSKuo Zhao 	if (err)
35124aeb56fSKuo Zhao 		dev_err(&priv->pdev->dev,
35224aeb56fSKuo Zhao 			"Failed to report stats: err=%d\n", err);
353893ce44dSCatherine Sullivan 	gve_set_device_resources_ok(priv);
354893ce44dSCatherine Sullivan 	return 0;
35524aeb56fSKuo Zhao abort_with_stats_report:
35624aeb56fSKuo Zhao 	gve_free_stats_report(priv);
357893ce44dSCatherine Sullivan abort_with_ntfy_blocks:
358893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
359893ce44dSCatherine Sullivan abort_with_counter:
360893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
361893ce44dSCatherine Sullivan 	return err;
362893ce44dSCatherine Sullivan }
363893ce44dSCatherine Sullivan 
3649e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv);
3659e5f7d26SCatherine Sullivan 
366893ce44dSCatherine Sullivan static void gve_teardown_device_resources(struct gve_priv *priv)
367893ce44dSCatherine Sullivan {
368893ce44dSCatherine Sullivan 	int err;
369893ce44dSCatherine Sullivan 
370893ce44dSCatherine Sullivan 	/* Tell device its resources are being freed */
371893ce44dSCatherine Sullivan 	if (gve_get_device_resources_ok(priv)) {
37224aeb56fSKuo Zhao 		/* detach the stats report */
37324aeb56fSKuo Zhao 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
37424aeb56fSKuo Zhao 		if (err) {
37524aeb56fSKuo Zhao 			dev_err(&priv->pdev->dev,
37624aeb56fSKuo Zhao 				"Failed to detach stats report: err=%d\n", err);
37724aeb56fSKuo Zhao 			gve_trigger_reset(priv);
37824aeb56fSKuo Zhao 		}
379893ce44dSCatherine Sullivan 		err = gve_adminq_deconfigure_device_resources(priv);
380893ce44dSCatherine Sullivan 		if (err) {
381893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
382893ce44dSCatherine Sullivan 				"Could not deconfigure device resources: err=%d\n",
383893ce44dSCatherine Sullivan 				err);
3849e5f7d26SCatherine Sullivan 			gve_trigger_reset(priv);
385893ce44dSCatherine Sullivan 		}
386893ce44dSCatherine Sullivan 	}
387893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
388893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
38924aeb56fSKuo Zhao 	gve_free_stats_report(priv);
390893ce44dSCatherine Sullivan 	gve_clear_device_resources_ok(priv);
391893ce44dSCatherine Sullivan }
392893ce44dSCatherine Sullivan 
393f5cedc84SCatherine Sullivan static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
394f5cedc84SCatherine Sullivan {
395f5cedc84SCatherine Sullivan 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
396f5cedc84SCatherine Sullivan 
397f5cedc84SCatherine Sullivan 	netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
398f5cedc84SCatherine Sullivan 		       NAPI_POLL_WEIGHT);
399f5cedc84SCatherine Sullivan }
400f5cedc84SCatherine Sullivan 
401f5cedc84SCatherine Sullivan static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
402f5cedc84SCatherine Sullivan {
403f5cedc84SCatherine Sullivan 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
404f5cedc84SCatherine Sullivan 
405f5cedc84SCatherine Sullivan 	netif_napi_del(&block->napi);
406f5cedc84SCatherine Sullivan }
407f5cedc84SCatherine Sullivan 
408f5cedc84SCatherine Sullivan static int gve_register_qpls(struct gve_priv *priv)
409f5cedc84SCatherine Sullivan {
410f5cedc84SCatherine Sullivan 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
411f5cedc84SCatherine Sullivan 	int err;
412f5cedc84SCatherine Sullivan 	int i;
413f5cedc84SCatherine Sullivan 
414f5cedc84SCatherine Sullivan 	for (i = 0; i < num_qpls; i++) {
415f5cedc84SCatherine Sullivan 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
416f5cedc84SCatherine Sullivan 		if (err) {
417f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev,
418f5cedc84SCatherine Sullivan 				  "failed to register queue page list %d\n",
419f5cedc84SCatherine Sullivan 				  priv->qpls[i].id);
4209e5f7d26SCatherine Sullivan 			/* This failure will trigger a reset - no need to clean
4219e5f7d26SCatherine Sullivan 			 * up
4229e5f7d26SCatherine Sullivan 			 */
423f5cedc84SCatherine Sullivan 			return err;
424f5cedc84SCatherine Sullivan 		}
425f5cedc84SCatherine Sullivan 	}
426f5cedc84SCatherine Sullivan 	return 0;
427f5cedc84SCatherine Sullivan }
428f5cedc84SCatherine Sullivan 
429f5cedc84SCatherine Sullivan static int gve_unregister_qpls(struct gve_priv *priv)
430f5cedc84SCatherine Sullivan {
431f5cedc84SCatherine Sullivan 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
432f5cedc84SCatherine Sullivan 	int err;
433f5cedc84SCatherine Sullivan 	int i;
434f5cedc84SCatherine Sullivan 
435f5cedc84SCatherine Sullivan 	for (i = 0; i < num_qpls; i++) {
436f5cedc84SCatherine Sullivan 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
4379e5f7d26SCatherine Sullivan 		/* This failure will trigger a reset - no need to clean up */
438f5cedc84SCatherine Sullivan 		if (err) {
439f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev,
440f5cedc84SCatherine Sullivan 				  "Failed to unregister queue page list %d\n",
441f5cedc84SCatherine Sullivan 				  priv->qpls[i].id);
442f5cedc84SCatherine Sullivan 			return err;
443f5cedc84SCatherine Sullivan 		}
444f5cedc84SCatherine Sullivan 	}
445f5cedc84SCatherine Sullivan 	return 0;
446f5cedc84SCatherine Sullivan }
447f5cedc84SCatherine Sullivan 
448f5cedc84SCatherine Sullivan static int gve_create_rings(struct gve_priv *priv)
449f5cedc84SCatherine Sullivan {
450f5cedc84SCatherine Sullivan 	int err;
451f5cedc84SCatherine Sullivan 	int i;
452f5cedc84SCatherine Sullivan 
453f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
454f5cedc84SCatherine Sullivan 		err = gve_adminq_create_tx_queue(priv, i);
455f5cedc84SCatherine Sullivan 		if (err) {
456f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
457f5cedc84SCatherine Sullivan 				  i);
4589e5f7d26SCatherine Sullivan 			/* This failure will trigger a reset - no need to clean
4599e5f7d26SCatherine Sullivan 			 * up
4609e5f7d26SCatherine Sullivan 			 */
461f5cedc84SCatherine Sullivan 			return err;
462f5cedc84SCatherine Sullivan 		}
463f5cedc84SCatherine Sullivan 		netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
464f5cedc84SCatherine Sullivan 	}
465f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
466f5cedc84SCatherine Sullivan 		err = gve_adminq_create_rx_queue(priv, i);
467f5cedc84SCatherine Sullivan 		if (err) {
468f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
469f5cedc84SCatherine Sullivan 				  i);
4709e5f7d26SCatherine Sullivan 			/* This failure will trigger a reset - no need to clean
4719e5f7d26SCatherine Sullivan 			 * up
4729e5f7d26SCatherine Sullivan 			 */
473f5cedc84SCatherine Sullivan 			return err;
474f5cedc84SCatherine Sullivan 		}
475f5cedc84SCatherine Sullivan 		/* Rx data ring has been prefilled with packet buffers at
476f5cedc84SCatherine Sullivan 		 * queue allocation time.
477f5cedc84SCatherine Sullivan 		 * Write the doorbell to provide descriptor slots and packet
478f5cedc84SCatherine Sullivan 		 * buffers to the NIC.
479f5cedc84SCatherine Sullivan 		 */
480f5cedc84SCatherine Sullivan 		gve_rx_write_doorbell(priv, &priv->rx[i]);
481f5cedc84SCatherine Sullivan 		netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
482f5cedc84SCatherine Sullivan 	}
483f5cedc84SCatherine Sullivan 
484f5cedc84SCatherine Sullivan 	return 0;
485f5cedc84SCatherine Sullivan }
486f5cedc84SCatherine Sullivan 
487f5cedc84SCatherine Sullivan static int gve_alloc_rings(struct gve_priv *priv)
488f5cedc84SCatherine Sullivan {
489f5cedc84SCatherine Sullivan 	int ntfy_idx;
490f5cedc84SCatherine Sullivan 	int err;
491f5cedc84SCatherine Sullivan 	int i;
492f5cedc84SCatherine Sullivan 
493f5cedc84SCatherine Sullivan 	/* Setup tx rings */
494f5cedc84SCatherine Sullivan 	priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
495f5cedc84SCatherine Sullivan 			    GFP_KERNEL);
496f5cedc84SCatherine Sullivan 	if (!priv->tx)
497f5cedc84SCatherine Sullivan 		return -ENOMEM;
498f5cedc84SCatherine Sullivan 	err = gve_tx_alloc_rings(priv);
499f5cedc84SCatherine Sullivan 	if (err)
500f5cedc84SCatherine Sullivan 		goto free_tx;
501f5cedc84SCatherine Sullivan 	/* Setup rx rings */
502f5cedc84SCatherine Sullivan 	priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
503f5cedc84SCatherine Sullivan 			    GFP_KERNEL);
504f5cedc84SCatherine Sullivan 	if (!priv->rx) {
505f5cedc84SCatherine Sullivan 		err = -ENOMEM;
506f5cedc84SCatherine Sullivan 		goto free_tx_queue;
507f5cedc84SCatherine Sullivan 	}
508f5cedc84SCatherine Sullivan 	err = gve_rx_alloc_rings(priv);
509f5cedc84SCatherine Sullivan 	if (err)
510f5cedc84SCatherine Sullivan 		goto free_rx;
511f5cedc84SCatherine Sullivan 	/* Add tx napi & init sync stats*/
512f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
513f5cedc84SCatherine Sullivan 		u64_stats_init(&priv->tx[i].statss);
514f5cedc84SCatherine Sullivan 		ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
515f5cedc84SCatherine Sullivan 		gve_add_napi(priv, ntfy_idx);
516f5cedc84SCatherine Sullivan 	}
517f5cedc84SCatherine Sullivan 	/* Add rx napi  & init sync stats*/
518f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
519f5cedc84SCatherine Sullivan 		u64_stats_init(&priv->rx[i].statss);
520f5cedc84SCatherine Sullivan 		ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
521f5cedc84SCatherine Sullivan 		gve_add_napi(priv, ntfy_idx);
522f5cedc84SCatherine Sullivan 	}
523f5cedc84SCatherine Sullivan 
524f5cedc84SCatherine Sullivan 	return 0;
525f5cedc84SCatherine Sullivan 
526f5cedc84SCatherine Sullivan free_rx:
5278ec1e900SChuhong Yuan 	kvfree(priv->rx);
528f5cedc84SCatherine Sullivan 	priv->rx = NULL;
529f5cedc84SCatherine Sullivan free_tx_queue:
530f5cedc84SCatherine Sullivan 	gve_tx_free_rings(priv);
531f5cedc84SCatherine Sullivan free_tx:
5328ec1e900SChuhong Yuan 	kvfree(priv->tx);
533f5cedc84SCatherine Sullivan 	priv->tx = NULL;
534f5cedc84SCatherine Sullivan 	return err;
535f5cedc84SCatherine Sullivan }
536f5cedc84SCatherine Sullivan 
537f5cedc84SCatherine Sullivan static int gve_destroy_rings(struct gve_priv *priv)
538f5cedc84SCatherine Sullivan {
539f5cedc84SCatherine Sullivan 	int err;
540f5cedc84SCatherine Sullivan 	int i;
541f5cedc84SCatherine Sullivan 
542f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
543f5cedc84SCatherine Sullivan 		err = gve_adminq_destroy_tx_queue(priv, i);
544f5cedc84SCatherine Sullivan 		if (err) {
545f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev,
546f5cedc84SCatherine Sullivan 				  "failed to destroy tx queue %d\n",
547f5cedc84SCatherine Sullivan 				  i);
5489e5f7d26SCatherine Sullivan 			/* This failure will trigger a reset - no need to clean
5499e5f7d26SCatherine Sullivan 			 * up
5509e5f7d26SCatherine Sullivan 			 */
551f5cedc84SCatherine Sullivan 			return err;
552f5cedc84SCatherine Sullivan 		}
553f5cedc84SCatherine Sullivan 		netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
554f5cedc84SCatherine Sullivan 	}
555f5cedc84SCatherine Sullivan 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
556f5cedc84SCatherine Sullivan 		err = gve_adminq_destroy_rx_queue(priv, i);
557f5cedc84SCatherine Sullivan 		if (err) {
558f5cedc84SCatherine Sullivan 			netif_err(priv, drv, priv->dev,
559f5cedc84SCatherine Sullivan 				  "failed to destroy rx queue %d\n",
560f5cedc84SCatherine Sullivan 				  i);
5619e5f7d26SCatherine Sullivan 			/* This failure will trigger a reset - no need to clean
5629e5f7d26SCatherine Sullivan 			 * up
5639e5f7d26SCatherine Sullivan 			 */
564f5cedc84SCatherine Sullivan 			return err;
565f5cedc84SCatherine Sullivan 		}
566f5cedc84SCatherine Sullivan 		netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
567f5cedc84SCatherine Sullivan 	}
568f5cedc84SCatherine Sullivan 	return 0;
569f5cedc84SCatherine Sullivan }
570f5cedc84SCatherine Sullivan 
571f5cedc84SCatherine Sullivan static void gve_free_rings(struct gve_priv *priv)
572f5cedc84SCatherine Sullivan {
573f5cedc84SCatherine Sullivan 	int ntfy_idx;
574f5cedc84SCatherine Sullivan 	int i;
575f5cedc84SCatherine Sullivan 
576f5cedc84SCatherine Sullivan 	if (priv->tx) {
577f5cedc84SCatherine Sullivan 		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
578f5cedc84SCatherine Sullivan 			ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
579f5cedc84SCatherine Sullivan 			gve_remove_napi(priv, ntfy_idx);
580f5cedc84SCatherine Sullivan 		}
581f5cedc84SCatherine Sullivan 		gve_tx_free_rings(priv);
5828ec1e900SChuhong Yuan 		kvfree(priv->tx);
583f5cedc84SCatherine Sullivan 		priv->tx = NULL;
584f5cedc84SCatherine Sullivan 	}
585f5cedc84SCatherine Sullivan 	if (priv->rx) {
586f5cedc84SCatherine Sullivan 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
587f5cedc84SCatherine Sullivan 			ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
588f5cedc84SCatherine Sullivan 			gve_remove_napi(priv, ntfy_idx);
589f5cedc84SCatherine Sullivan 		}
590f5cedc84SCatherine Sullivan 		gve_rx_free_rings(priv);
5918ec1e900SChuhong Yuan 		kvfree(priv->rx);
592f5cedc84SCatherine Sullivan 		priv->rx = NULL;
593f5cedc84SCatherine Sullivan 	}
594f5cedc84SCatherine Sullivan }
595f5cedc84SCatherine Sullivan 
596433e274bSKuo Zhao int gve_alloc_page(struct gve_priv *priv, struct device *dev,
597433e274bSKuo Zhao 		   struct page **page, dma_addr_t *dma,
598f5cedc84SCatherine Sullivan 		   enum dma_data_direction dir)
599f5cedc84SCatherine Sullivan {
600f5cedc84SCatherine Sullivan 	*page = alloc_page(GFP_KERNEL);
601433e274bSKuo Zhao 	if (!*page) {
602433e274bSKuo Zhao 		priv->page_alloc_fail++;
603f5cedc84SCatherine Sullivan 		return -ENOMEM;
604433e274bSKuo Zhao 	}
605f5cedc84SCatherine Sullivan 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
606f5cedc84SCatherine Sullivan 	if (dma_mapping_error(dev, *dma)) {
607433e274bSKuo Zhao 		priv->dma_mapping_error++;
608f5cedc84SCatherine Sullivan 		put_page(*page);
609f5cedc84SCatherine Sullivan 		return -ENOMEM;
610f5cedc84SCatherine Sullivan 	}
611f5cedc84SCatherine Sullivan 	return 0;
612f5cedc84SCatherine Sullivan }
613f5cedc84SCatherine Sullivan 
614f5cedc84SCatherine Sullivan static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
615f5cedc84SCatherine Sullivan 				     int pages)
616f5cedc84SCatherine Sullivan {
617f5cedc84SCatherine Sullivan 	struct gve_queue_page_list *qpl = &priv->qpls[id];
618f5cedc84SCatherine Sullivan 	int err;
619f5cedc84SCatherine Sullivan 	int i;
620f5cedc84SCatherine Sullivan 
621f5cedc84SCatherine Sullivan 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
622f5cedc84SCatherine Sullivan 		netif_err(priv, drv, priv->dev,
623f5cedc84SCatherine Sullivan 			  "Reached max number of registered pages %llu > %llu\n",
624f5cedc84SCatherine Sullivan 			  pages + priv->num_registered_pages,
625f5cedc84SCatherine Sullivan 			  priv->max_registered_pages);
626f5cedc84SCatherine Sullivan 		return -EINVAL;
627f5cedc84SCatherine Sullivan 	}
628f5cedc84SCatherine Sullivan 
629f5cedc84SCatherine Sullivan 	qpl->id = id;
630a95069ecSJeroen de Borst 	qpl->num_entries = 0;
631f5cedc84SCatherine Sullivan 	qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
632f5cedc84SCatherine Sullivan 	/* caller handles clean up */
633f5cedc84SCatherine Sullivan 	if (!qpl->pages)
634f5cedc84SCatherine Sullivan 		return -ENOMEM;
635f5cedc84SCatherine Sullivan 	qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
636f5cedc84SCatherine Sullivan 				   GFP_KERNEL);
637f5cedc84SCatherine Sullivan 	/* caller handles clean up */
638f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
639f5cedc84SCatherine Sullivan 		return -ENOMEM;
640f5cedc84SCatherine Sullivan 
641f5cedc84SCatherine Sullivan 	for (i = 0; i < pages; i++) {
642433e274bSKuo Zhao 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
643f5cedc84SCatherine Sullivan 				     &qpl->page_buses[i],
644f5cedc84SCatherine Sullivan 				     gve_qpl_dma_dir(priv, id));
645f5cedc84SCatherine Sullivan 		/* caller handles clean up */
646f5cedc84SCatherine Sullivan 		if (err)
647f5cedc84SCatherine Sullivan 			return -ENOMEM;
648a95069ecSJeroen de Borst 		qpl->num_entries++;
649f5cedc84SCatherine Sullivan 	}
650f5cedc84SCatherine Sullivan 	priv->num_registered_pages += pages;
651f5cedc84SCatherine Sullivan 
652f5cedc84SCatherine Sullivan 	return 0;
653f5cedc84SCatherine Sullivan }
654f5cedc84SCatherine Sullivan 
655f5cedc84SCatherine Sullivan void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
656f5cedc84SCatherine Sullivan 		   enum dma_data_direction dir)
657f5cedc84SCatherine Sullivan {
658f5cedc84SCatherine Sullivan 	if (!dma_mapping_error(dev, dma))
659f5cedc84SCatherine Sullivan 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
660f5cedc84SCatherine Sullivan 	if (page)
661f5cedc84SCatherine Sullivan 		put_page(page);
662f5cedc84SCatherine Sullivan }
663f5cedc84SCatherine Sullivan 
664f5cedc84SCatherine Sullivan static void gve_free_queue_page_list(struct gve_priv *priv,
665f5cedc84SCatherine Sullivan 				     int id)
666f5cedc84SCatherine Sullivan {
667f5cedc84SCatherine Sullivan 	struct gve_queue_page_list *qpl = &priv->qpls[id];
668f5cedc84SCatherine Sullivan 	int i;
669f5cedc84SCatherine Sullivan 
670f5cedc84SCatherine Sullivan 	if (!qpl->pages)
671f5cedc84SCatherine Sullivan 		return;
672f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
673f5cedc84SCatherine Sullivan 		goto free_pages;
674f5cedc84SCatherine Sullivan 
675f5cedc84SCatherine Sullivan 	for (i = 0; i < qpl->num_entries; i++)
676f5cedc84SCatherine Sullivan 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
677f5cedc84SCatherine Sullivan 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
678f5cedc84SCatherine Sullivan 
6798ec1e900SChuhong Yuan 	kvfree(qpl->page_buses);
680f5cedc84SCatherine Sullivan free_pages:
6818ec1e900SChuhong Yuan 	kvfree(qpl->pages);
682f5cedc84SCatherine Sullivan 	priv->num_registered_pages -= qpl->num_entries;
683f5cedc84SCatherine Sullivan }
684f5cedc84SCatherine Sullivan 
685f5cedc84SCatherine Sullivan static int gve_alloc_qpls(struct gve_priv *priv)
686f5cedc84SCatherine Sullivan {
687f5cedc84SCatherine Sullivan 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
688f5cedc84SCatherine Sullivan 	int i, j;
689f5cedc84SCatherine Sullivan 	int err;
690f5cedc84SCatherine Sullivan 
691f5cedc84SCatherine Sullivan 	priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
692f5cedc84SCatherine Sullivan 	if (!priv->qpls)
693f5cedc84SCatherine Sullivan 		return -ENOMEM;
694f5cedc84SCatherine Sullivan 
695f5cedc84SCatherine Sullivan 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
696f5cedc84SCatherine Sullivan 		err = gve_alloc_queue_page_list(priv, i,
697f5cedc84SCatherine Sullivan 						priv->tx_pages_per_qpl);
698f5cedc84SCatherine Sullivan 		if (err)
699f5cedc84SCatherine Sullivan 			goto free_qpls;
700f5cedc84SCatherine Sullivan 	}
701f5cedc84SCatherine Sullivan 	for (; i < num_qpls; i++) {
702f5cedc84SCatherine Sullivan 		err = gve_alloc_queue_page_list(priv, i,
703f5cedc84SCatherine Sullivan 						priv->rx_pages_per_qpl);
704f5cedc84SCatherine Sullivan 		if (err)
705f5cedc84SCatherine Sullivan 			goto free_qpls;
706f5cedc84SCatherine Sullivan 	}
707f5cedc84SCatherine Sullivan 
708f5cedc84SCatherine Sullivan 	priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
709f5cedc84SCatherine Sullivan 				     sizeof(unsigned long) * BITS_PER_BYTE;
710f5cedc84SCatherine Sullivan 	priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
711f5cedc84SCatherine Sullivan 					    sizeof(unsigned long), GFP_KERNEL);
712877cb240SWei Yongjun 	if (!priv->qpl_cfg.qpl_id_map) {
713877cb240SWei Yongjun 		err = -ENOMEM;
714f5cedc84SCatherine Sullivan 		goto free_qpls;
715877cb240SWei Yongjun 	}
716f5cedc84SCatherine Sullivan 
717f5cedc84SCatherine Sullivan 	return 0;
718f5cedc84SCatherine Sullivan 
719f5cedc84SCatherine Sullivan free_qpls:
720f5cedc84SCatherine Sullivan 	for (j = 0; j <= i; j++)
721f5cedc84SCatherine Sullivan 		gve_free_queue_page_list(priv, j);
7228ec1e900SChuhong Yuan 	kvfree(priv->qpls);
723f5cedc84SCatherine Sullivan 	return err;
724f5cedc84SCatherine Sullivan }
725f5cedc84SCatherine Sullivan 
726f5cedc84SCatherine Sullivan static void gve_free_qpls(struct gve_priv *priv)
727f5cedc84SCatherine Sullivan {
728f5cedc84SCatherine Sullivan 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
729f5cedc84SCatherine Sullivan 	int i;
730f5cedc84SCatherine Sullivan 
7318ec1e900SChuhong Yuan 	kvfree(priv->qpl_cfg.qpl_id_map);
732f5cedc84SCatherine Sullivan 
733f5cedc84SCatherine Sullivan 	for (i = 0; i < num_qpls; i++)
734f5cedc84SCatherine Sullivan 		gve_free_queue_page_list(priv, i);
735f5cedc84SCatherine Sullivan 
7368ec1e900SChuhong Yuan 	kvfree(priv->qpls);
737f5cedc84SCatherine Sullivan }
738f5cedc84SCatherine Sullivan 
7399e5f7d26SCatherine Sullivan /* Use this to schedule a reset when the device is capable of continuing
7409e5f7d26SCatherine Sullivan  * to handle other requests in its current state. If it is not, do a reset
7419e5f7d26SCatherine Sullivan  * in thread instead.
7429e5f7d26SCatherine Sullivan  */
7439e5f7d26SCatherine Sullivan void gve_schedule_reset(struct gve_priv *priv)
7449e5f7d26SCatherine Sullivan {
7459e5f7d26SCatherine Sullivan 	gve_set_do_reset(priv);
7469e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
7479e5f7d26SCatherine Sullivan }
7489e5f7d26SCatherine Sullivan 
7499e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
7509e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
751f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv);
752f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv);
753f5cedc84SCatherine Sullivan 
754f5cedc84SCatherine Sullivan static int gve_open(struct net_device *dev)
755f5cedc84SCatherine Sullivan {
756f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
757f5cedc84SCatherine Sullivan 	int err;
758f5cedc84SCatherine Sullivan 
759f5cedc84SCatherine Sullivan 	err = gve_alloc_qpls(priv);
760f5cedc84SCatherine Sullivan 	if (err)
761f5cedc84SCatherine Sullivan 		return err;
762f5cedc84SCatherine Sullivan 	err = gve_alloc_rings(priv);
763f5cedc84SCatherine Sullivan 	if (err)
764f5cedc84SCatherine Sullivan 		goto free_qpls;
765f5cedc84SCatherine Sullivan 
766f5cedc84SCatherine Sullivan 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
767f5cedc84SCatherine Sullivan 	if (err)
768f5cedc84SCatherine Sullivan 		goto free_rings;
769f5cedc84SCatherine Sullivan 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
770f5cedc84SCatherine Sullivan 	if (err)
771f5cedc84SCatherine Sullivan 		goto free_rings;
772f5cedc84SCatherine Sullivan 
773f5cedc84SCatherine Sullivan 	err = gve_register_qpls(priv);
774f5cedc84SCatherine Sullivan 	if (err)
7759e5f7d26SCatherine Sullivan 		goto reset;
776f5cedc84SCatherine Sullivan 	err = gve_create_rings(priv);
777f5cedc84SCatherine Sullivan 	if (err)
7789e5f7d26SCatherine Sullivan 		goto reset;
779f5cedc84SCatherine Sullivan 	gve_set_device_rings_ok(priv);
780f5cedc84SCatherine Sullivan 
78124aeb56fSKuo Zhao 	if (gve_get_report_stats(priv))
78224aeb56fSKuo Zhao 		mod_timer(&priv->stats_report_timer,
78324aeb56fSKuo Zhao 			  round_jiffies(jiffies +
78424aeb56fSKuo Zhao 				msecs_to_jiffies(priv->stats_report_timer_period)));
78524aeb56fSKuo Zhao 
786f5cedc84SCatherine Sullivan 	gve_turnup(priv);
787f5cedc84SCatherine Sullivan 	netif_carrier_on(dev);
788433e274bSKuo Zhao 	priv->interface_up_cnt++;
789f5cedc84SCatherine Sullivan 	return 0;
790f5cedc84SCatherine Sullivan 
791f5cedc84SCatherine Sullivan free_rings:
792f5cedc84SCatherine Sullivan 	gve_free_rings(priv);
793f5cedc84SCatherine Sullivan free_qpls:
794f5cedc84SCatherine Sullivan 	gve_free_qpls(priv);
795f5cedc84SCatherine Sullivan 	return err;
7969e5f7d26SCatherine Sullivan 
7979e5f7d26SCatherine Sullivan reset:
7989e5f7d26SCatherine Sullivan 	/* This must have been called from a reset due to the rtnl lock
7999e5f7d26SCatherine Sullivan 	 * so just return at this point.
8009e5f7d26SCatherine Sullivan 	 */
8019e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
8029e5f7d26SCatherine Sullivan 		return err;
8039e5f7d26SCatherine Sullivan 	/* Otherwise reset before returning */
8049e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
8059e5f7d26SCatherine Sullivan 	/* if this fails there is nothing we can do so just ignore the return */
8069e5f7d26SCatherine Sullivan 	gve_reset_recovery(priv, false);
8079e5f7d26SCatherine Sullivan 	/* return the original error */
8089e5f7d26SCatherine Sullivan 	return err;
809f5cedc84SCatherine Sullivan }
810f5cedc84SCatherine Sullivan 
811f5cedc84SCatherine Sullivan static int gve_close(struct net_device *dev)
812f5cedc84SCatherine Sullivan {
813f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
814f5cedc84SCatherine Sullivan 	int err;
815f5cedc84SCatherine Sullivan 
816f5cedc84SCatherine Sullivan 	netif_carrier_off(dev);
817f5cedc84SCatherine Sullivan 	if (gve_get_device_rings_ok(priv)) {
818f5cedc84SCatherine Sullivan 		gve_turndown(priv);
819f5cedc84SCatherine Sullivan 		err = gve_destroy_rings(priv);
820f5cedc84SCatherine Sullivan 		if (err)
8219e5f7d26SCatherine Sullivan 			goto err;
822f5cedc84SCatherine Sullivan 		err = gve_unregister_qpls(priv);
823f5cedc84SCatherine Sullivan 		if (err)
8249e5f7d26SCatherine Sullivan 			goto err;
825f5cedc84SCatherine Sullivan 		gve_clear_device_rings_ok(priv);
826f5cedc84SCatherine Sullivan 	}
82724aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
828f5cedc84SCatherine Sullivan 
829f5cedc84SCatherine Sullivan 	gve_free_rings(priv);
830f5cedc84SCatherine Sullivan 	gve_free_qpls(priv);
831433e274bSKuo Zhao 	priv->interface_down_cnt++;
832f5cedc84SCatherine Sullivan 	return 0;
8339e5f7d26SCatherine Sullivan 
8349e5f7d26SCatherine Sullivan err:
8359e5f7d26SCatherine Sullivan 	/* This must have been called from a reset due to the rtnl lock
8369e5f7d26SCatherine Sullivan 	 * so just return at this point.
8379e5f7d26SCatherine Sullivan 	 */
8389e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
8399e5f7d26SCatherine Sullivan 		return err;
8409e5f7d26SCatherine Sullivan 	/* Otherwise reset before returning */
8419e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
8429e5f7d26SCatherine Sullivan 	return gve_reset_recovery(priv, false);
843f5cedc84SCatherine Sullivan }
844f5cedc84SCatherine Sullivan 
845e5b845dcSCatherine Sullivan int gve_adjust_queues(struct gve_priv *priv,
846e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_rx_config,
847e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_tx_config)
848e5b845dcSCatherine Sullivan {
849e5b845dcSCatherine Sullivan 	int err;
850e5b845dcSCatherine Sullivan 
851e5b845dcSCatherine Sullivan 	if (netif_carrier_ok(priv->dev)) {
852e5b845dcSCatherine Sullivan 		/* To make this process as simple as possible we teardown the
853e5b845dcSCatherine Sullivan 		 * device, set the new configuration, and then bring the device
854e5b845dcSCatherine Sullivan 		 * up again.
855e5b845dcSCatherine Sullivan 		 */
856e5b845dcSCatherine Sullivan 		err = gve_close(priv->dev);
857e5b845dcSCatherine Sullivan 		/* we have already tried to reset in close,
858e5b845dcSCatherine Sullivan 		 * just fail at this point
859e5b845dcSCatherine Sullivan 		 */
860e5b845dcSCatherine Sullivan 		if (err)
861e5b845dcSCatherine Sullivan 			return err;
862e5b845dcSCatherine Sullivan 		priv->tx_cfg = new_tx_config;
863e5b845dcSCatherine Sullivan 		priv->rx_cfg = new_rx_config;
864e5b845dcSCatherine Sullivan 
865e5b845dcSCatherine Sullivan 		err = gve_open(priv->dev);
866e5b845dcSCatherine Sullivan 		if (err)
867e5b845dcSCatherine Sullivan 			goto err;
868e5b845dcSCatherine Sullivan 
869e5b845dcSCatherine Sullivan 		return 0;
870e5b845dcSCatherine Sullivan 	}
871e5b845dcSCatherine Sullivan 	/* Set the config for the next up. */
872e5b845dcSCatherine Sullivan 	priv->tx_cfg = new_tx_config;
873e5b845dcSCatherine Sullivan 	priv->rx_cfg = new_rx_config;
874e5b845dcSCatherine Sullivan 
875e5b845dcSCatherine Sullivan 	return 0;
876e5b845dcSCatherine Sullivan err:
877e5b845dcSCatherine Sullivan 	netif_err(priv, drv, priv->dev,
878e5b845dcSCatherine Sullivan 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
879e5b845dcSCatherine Sullivan 	gve_turndown(priv);
880e5b845dcSCatherine Sullivan 	return err;
881e5b845dcSCatherine Sullivan }
882e5b845dcSCatherine Sullivan 
883f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv)
884f5cedc84SCatherine Sullivan {
885f5cedc84SCatherine Sullivan 	int idx;
886f5cedc84SCatherine Sullivan 
887f5cedc84SCatherine Sullivan 	if (netif_carrier_ok(priv->dev))
888f5cedc84SCatherine Sullivan 		netif_carrier_off(priv->dev);
889f5cedc84SCatherine Sullivan 
890f5cedc84SCatherine Sullivan 	if (!gve_get_napi_enabled(priv))
891f5cedc84SCatherine Sullivan 		return;
892f5cedc84SCatherine Sullivan 
893f5cedc84SCatherine Sullivan 	/* Disable napi to prevent more work from coming in */
894f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
895f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
896f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
897f5cedc84SCatherine Sullivan 
898f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
899f5cedc84SCatherine Sullivan 	}
900f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
901f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
902f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
903f5cedc84SCatherine Sullivan 
904f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
905f5cedc84SCatherine Sullivan 	}
906f5cedc84SCatherine Sullivan 
907f5cedc84SCatherine Sullivan 	/* Stop tx queues */
908f5cedc84SCatherine Sullivan 	netif_tx_disable(priv->dev);
909f5cedc84SCatherine Sullivan 
910f5cedc84SCatherine Sullivan 	gve_clear_napi_enabled(priv);
91124aeb56fSKuo Zhao 	gve_clear_report_stats(priv);
912f5cedc84SCatherine Sullivan }
913f5cedc84SCatherine Sullivan 
914f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv)
915f5cedc84SCatherine Sullivan {
916f5cedc84SCatherine Sullivan 	int idx;
917f5cedc84SCatherine Sullivan 
918f5cedc84SCatherine Sullivan 	/* Start the tx queues */
919f5cedc84SCatherine Sullivan 	netif_tx_start_all_queues(priv->dev);
920f5cedc84SCatherine Sullivan 
921f5cedc84SCatherine Sullivan 	/* Enable napi and unmask interrupts for all queues */
922f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
923f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
924f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
925f5cedc84SCatherine Sullivan 
926f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
927f5cedc84SCatherine Sullivan 		iowrite32be(0, gve_irq_doorbell(priv, block));
928f5cedc84SCatherine Sullivan 	}
929f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
930f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
931f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
932f5cedc84SCatherine Sullivan 
933f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
934f5cedc84SCatherine Sullivan 		iowrite32be(0, gve_irq_doorbell(priv, block));
935f5cedc84SCatherine Sullivan 	}
936f5cedc84SCatherine Sullivan 
937f5cedc84SCatherine Sullivan 	gve_set_napi_enabled(priv);
938f5cedc84SCatherine Sullivan }
939f5cedc84SCatherine Sullivan 
9400290bd29SMichael S. Tsirkin static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
941f5cedc84SCatherine Sullivan {
942f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
943f5cedc84SCatherine Sullivan 
9449e5f7d26SCatherine Sullivan 	gve_schedule_reset(priv);
945f5cedc84SCatherine Sullivan 	priv->tx_timeo_cnt++;
946f5cedc84SCatherine Sullivan }
947f5cedc84SCatherine Sullivan 
948f5cedc84SCatherine Sullivan static const struct net_device_ops gve_netdev_ops = {
949f5cedc84SCatherine Sullivan 	.ndo_start_xmit		=	gve_tx,
950f5cedc84SCatherine Sullivan 	.ndo_open		=	gve_open,
951f5cedc84SCatherine Sullivan 	.ndo_stop		=	gve_close,
952f5cedc84SCatherine Sullivan 	.ndo_get_stats64	=	gve_get_stats,
953f5cedc84SCatherine Sullivan 	.ndo_tx_timeout         =       gve_tx_timeout,
954f5cedc84SCatherine Sullivan };
955f5cedc84SCatherine Sullivan 
9569e5f7d26SCatherine Sullivan static void gve_handle_status(struct gve_priv *priv, u32 status)
9579e5f7d26SCatherine Sullivan {
9589e5f7d26SCatherine Sullivan 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
9599e5f7d26SCatherine Sullivan 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
9609e5f7d26SCatherine Sullivan 		gve_set_do_reset(priv);
9619e5f7d26SCatherine Sullivan 	}
96224aeb56fSKuo Zhao 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
96324aeb56fSKuo Zhao 		priv->stats_report_trigger_cnt++;
96424aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
96524aeb56fSKuo Zhao 	}
9669e5f7d26SCatherine Sullivan }
9679e5f7d26SCatherine Sullivan 
9689e5f7d26SCatherine Sullivan static void gve_handle_reset(struct gve_priv *priv)
9699e5f7d26SCatherine Sullivan {
9709e5f7d26SCatherine Sullivan 	/* A service task will be scheduled at the end of probe to catch any
9719e5f7d26SCatherine Sullivan 	 * resets that need to happen, and we don't want to reset until
9729e5f7d26SCatherine Sullivan 	 * probe is done.
9739e5f7d26SCatherine Sullivan 	 */
9749e5f7d26SCatherine Sullivan 	if (gve_get_probe_in_progress(priv))
9759e5f7d26SCatherine Sullivan 		return;
9769e5f7d26SCatherine Sullivan 
9779e5f7d26SCatherine Sullivan 	if (gve_get_do_reset(priv)) {
9789e5f7d26SCatherine Sullivan 		rtnl_lock();
9799e5f7d26SCatherine Sullivan 		gve_reset(priv, false);
9809e5f7d26SCatherine Sullivan 		rtnl_unlock();
9819e5f7d26SCatherine Sullivan 	}
9829e5f7d26SCatherine Sullivan }
9839e5f7d26SCatherine Sullivan 
98424aeb56fSKuo Zhao void gve_handle_report_stats(struct gve_priv *priv)
98524aeb56fSKuo Zhao {
98624aeb56fSKuo Zhao 	int idx, stats_idx = 0, tx_bytes;
98724aeb56fSKuo Zhao 	unsigned int start = 0;
98824aeb56fSKuo Zhao 	struct stats *stats = priv->stats_report->stats;
98924aeb56fSKuo Zhao 
99024aeb56fSKuo Zhao 	if (!gve_get_report_stats(priv))
99124aeb56fSKuo Zhao 		return;
99224aeb56fSKuo Zhao 
99324aeb56fSKuo Zhao 	be64_add_cpu(&priv->stats_report->written_count, 1);
99424aeb56fSKuo Zhao 	/* tx stats */
99524aeb56fSKuo Zhao 	if (priv->tx) {
99624aeb56fSKuo Zhao 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
99724aeb56fSKuo Zhao 			do {
99824aeb56fSKuo Zhao 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
99924aeb56fSKuo Zhao 				tx_bytes = priv->tx[idx].bytes_done;
100024aeb56fSKuo Zhao 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
100124aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
100224aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
100324aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
100424aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
100524aeb56fSKuo Zhao 			};
100624aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
100724aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_STOP_CNT),
100824aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
100924aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
101024aeb56fSKuo Zhao 			};
101124aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
101224aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
101324aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].req),
101424aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
101524aeb56fSKuo Zhao 			};
101624aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
101724aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
101824aeb56fSKuo Zhao 				.value = cpu_to_be64(tx_bytes),
101924aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
102024aeb56fSKuo Zhao 			};
102124aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
102224aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
102324aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].done),
102424aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
102524aeb56fSKuo Zhao 			};
102624aeb56fSKuo Zhao 		}
102724aeb56fSKuo Zhao 	}
102824aeb56fSKuo Zhao 	/* rx stats */
102924aeb56fSKuo Zhao 	if (priv->rx) {
103024aeb56fSKuo Zhao 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
103124aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
103224aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
103324aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
103424aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
103524aeb56fSKuo Zhao 			};
103624aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
103724aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
103824aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
103924aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
104024aeb56fSKuo Zhao 			};
104124aeb56fSKuo Zhao 		}
104224aeb56fSKuo Zhao 	}
104324aeb56fSKuo Zhao }
104424aeb56fSKuo Zhao 
104524aeb56fSKuo Zhao /* Handle NIC status register changes, reset requests and report stats */
10469e5f7d26SCatherine Sullivan static void gve_service_task(struct work_struct *work)
10479e5f7d26SCatherine Sullivan {
10489e5f7d26SCatherine Sullivan 	struct gve_priv *priv = container_of(work, struct gve_priv,
10499e5f7d26SCatherine Sullivan 					     service_task);
10509e5f7d26SCatherine Sullivan 
10519e5f7d26SCatherine Sullivan 	gve_handle_status(priv,
10529e5f7d26SCatherine Sullivan 			  ioread32be(&priv->reg_bar0->device_status));
10539e5f7d26SCatherine Sullivan 
10549e5f7d26SCatherine Sullivan 	gve_handle_reset(priv);
10559e5f7d26SCatherine Sullivan }
10569e5f7d26SCatherine Sullivan 
1057893ce44dSCatherine Sullivan static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1058893ce44dSCatherine Sullivan {
1059893ce44dSCatherine Sullivan 	int num_ntfy;
1060893ce44dSCatherine Sullivan 	int err;
1061893ce44dSCatherine Sullivan 
1062893ce44dSCatherine Sullivan 	/* Set up the adminq */
1063893ce44dSCatherine Sullivan 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
1064893ce44dSCatherine Sullivan 	if (err) {
1065893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
1066893ce44dSCatherine Sullivan 			"Failed to alloc admin queue: err=%d\n", err);
1067893ce44dSCatherine Sullivan 		return err;
1068893ce44dSCatherine Sullivan 	}
1069893ce44dSCatherine Sullivan 
1070893ce44dSCatherine Sullivan 	if (skip_describe_device)
1071893ce44dSCatherine Sullivan 		goto setup_device;
1072893ce44dSCatherine Sullivan 
1073893ce44dSCatherine Sullivan 	/* Get the initial information we need from the device */
1074893ce44dSCatherine Sullivan 	err = gve_adminq_describe_device(priv);
1075893ce44dSCatherine Sullivan 	if (err) {
1076893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
1077893ce44dSCatherine Sullivan 			"Could not get device information: err=%d\n", err);
1078893ce44dSCatherine Sullivan 		goto err;
1079893ce44dSCatherine Sullivan 	}
1080893ce44dSCatherine Sullivan 	if (priv->dev->max_mtu > PAGE_SIZE) {
1081893ce44dSCatherine Sullivan 		priv->dev->max_mtu = PAGE_SIZE;
1082893ce44dSCatherine Sullivan 		err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1083893ce44dSCatherine Sullivan 		if (err) {
10840d5775d3SCatherine Sullivan 			dev_err(&priv->pdev->dev, "Could not set mtu");
1085893ce44dSCatherine Sullivan 			goto err;
1086893ce44dSCatherine Sullivan 		}
1087893ce44dSCatherine Sullivan 	}
1088893ce44dSCatherine Sullivan 	priv->dev->mtu = priv->dev->max_mtu;
1089893ce44dSCatherine Sullivan 	num_ntfy = pci_msix_vec_count(priv->pdev);
1090893ce44dSCatherine Sullivan 	if (num_ntfy <= 0) {
1091893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
1092893ce44dSCatherine Sullivan 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
1093893ce44dSCatherine Sullivan 		err = num_ntfy;
1094893ce44dSCatherine Sullivan 		goto err;
1095893ce44dSCatherine Sullivan 	} else if (num_ntfy < GVE_MIN_MSIX) {
1096893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1097893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, num_ntfy);
1098893ce44dSCatherine Sullivan 		err = -EINVAL;
1099893ce44dSCatherine Sullivan 		goto err;
1100893ce44dSCatherine Sullivan 	}
1101893ce44dSCatherine Sullivan 
1102f5cedc84SCatherine Sullivan 	priv->num_registered_pages = 0;
1103f5cedc84SCatherine Sullivan 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1104893ce44dSCatherine Sullivan 	/* gvnic has one Notification Block per MSI-x vector, except for the
1105893ce44dSCatherine Sullivan 	 * management vector
1106893ce44dSCatherine Sullivan 	 */
1107893ce44dSCatherine Sullivan 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1108893ce44dSCatherine Sullivan 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1109893ce44dSCatherine Sullivan 
1110f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues =
1111f5cedc84SCatherine Sullivan 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1112f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues =
1113f5cedc84SCatherine Sullivan 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1114f5cedc84SCatherine Sullivan 
1115f5cedc84SCatherine Sullivan 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1116f5cedc84SCatherine Sullivan 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1117f5cedc84SCatherine Sullivan 	if (priv->default_num_queues > 0) {
1118f5cedc84SCatherine Sullivan 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1119f5cedc84SCatherine Sullivan 						priv->tx_cfg.num_queues);
1120f5cedc84SCatherine Sullivan 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1121f5cedc84SCatherine Sullivan 						priv->rx_cfg.num_queues);
1122f5cedc84SCatherine Sullivan 	}
1123f5cedc84SCatherine Sullivan 
11240d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1125f5cedc84SCatherine Sullivan 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
11260d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1127f5cedc84SCatherine Sullivan 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1128f5cedc84SCatherine Sullivan 
1129893ce44dSCatherine Sullivan setup_device:
1130893ce44dSCatherine Sullivan 	err = gve_setup_device_resources(priv);
1131893ce44dSCatherine Sullivan 	if (!err)
1132893ce44dSCatherine Sullivan 		return 0;
1133893ce44dSCatherine Sullivan err:
1134893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
1135893ce44dSCatherine Sullivan 	return err;
1136893ce44dSCatherine Sullivan }
1137893ce44dSCatherine Sullivan 
1138893ce44dSCatherine Sullivan static void gve_teardown_priv_resources(struct gve_priv *priv)
1139893ce44dSCatherine Sullivan {
1140893ce44dSCatherine Sullivan 	gve_teardown_device_resources(priv);
1141893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
1142893ce44dSCatherine Sullivan }
1143893ce44dSCatherine Sullivan 
11449e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv)
11459e5f7d26SCatherine Sullivan {
11469e5f7d26SCatherine Sullivan 	/* Reset the device by releasing the AQ */
11479e5f7d26SCatherine Sullivan 	gve_adminq_release(priv);
11489e5f7d26SCatherine Sullivan }
11499e5f7d26SCatherine Sullivan 
11509e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
11519e5f7d26SCatherine Sullivan {
11529e5f7d26SCatherine Sullivan 	gve_trigger_reset(priv);
11539e5f7d26SCatherine Sullivan 	/* With the reset having already happened, close cannot fail */
11549e5f7d26SCatherine Sullivan 	if (was_up)
11559e5f7d26SCatherine Sullivan 		gve_close(priv->dev);
11569e5f7d26SCatherine Sullivan 	gve_teardown_priv_resources(priv);
11579e5f7d26SCatherine Sullivan }
11589e5f7d26SCatherine Sullivan 
11599e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
11609e5f7d26SCatherine Sullivan {
11619e5f7d26SCatherine Sullivan 	int err;
11629e5f7d26SCatherine Sullivan 
11639e5f7d26SCatherine Sullivan 	err = gve_init_priv(priv, true);
11649e5f7d26SCatherine Sullivan 	if (err)
11659e5f7d26SCatherine Sullivan 		goto err;
11669e5f7d26SCatherine Sullivan 	if (was_up) {
11679e5f7d26SCatherine Sullivan 		err = gve_open(priv->dev);
11689e5f7d26SCatherine Sullivan 		if (err)
11699e5f7d26SCatherine Sullivan 			goto err;
11709e5f7d26SCatherine Sullivan 	}
11719e5f7d26SCatherine Sullivan 	return 0;
11729e5f7d26SCatherine Sullivan err:
11739e5f7d26SCatherine Sullivan 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
11749e5f7d26SCatherine Sullivan 	gve_turndown(priv);
11759e5f7d26SCatherine Sullivan 	return err;
11769e5f7d26SCatherine Sullivan }
11779e5f7d26SCatherine Sullivan 
11789e5f7d26SCatherine Sullivan int gve_reset(struct gve_priv *priv, bool attempt_teardown)
11799e5f7d26SCatherine Sullivan {
11809e5f7d26SCatherine Sullivan 	bool was_up = netif_carrier_ok(priv->dev);
11819e5f7d26SCatherine Sullivan 	int err;
11829e5f7d26SCatherine Sullivan 
11839e5f7d26SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Performing reset\n");
11849e5f7d26SCatherine Sullivan 	gve_clear_do_reset(priv);
11859e5f7d26SCatherine Sullivan 	gve_set_reset_in_progress(priv);
11869e5f7d26SCatherine Sullivan 	/* If we aren't attempting to teardown normally, just go turndown and
11879e5f7d26SCatherine Sullivan 	 * reset right away.
11889e5f7d26SCatherine Sullivan 	 */
11899e5f7d26SCatherine Sullivan 	if (!attempt_teardown) {
11909e5f7d26SCatherine Sullivan 		gve_turndown(priv);
11919e5f7d26SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
11929e5f7d26SCatherine Sullivan 	} else {
11939e5f7d26SCatherine Sullivan 		/* Otherwise attempt to close normally */
11949e5f7d26SCatherine Sullivan 		if (was_up) {
11959e5f7d26SCatherine Sullivan 			err = gve_close(priv->dev);
11969e5f7d26SCatherine Sullivan 			/* If that fails reset as we did above */
11979e5f7d26SCatherine Sullivan 			if (err)
11989e5f7d26SCatherine Sullivan 				gve_reset_and_teardown(priv, was_up);
11999e5f7d26SCatherine Sullivan 		}
12009e5f7d26SCatherine Sullivan 		/* Clean up any remaining resources */
12019e5f7d26SCatherine Sullivan 		gve_teardown_priv_resources(priv);
12029e5f7d26SCatherine Sullivan 	}
12039e5f7d26SCatherine Sullivan 
12049e5f7d26SCatherine Sullivan 	/* Set it all back up */
12059e5f7d26SCatherine Sullivan 	err = gve_reset_recovery(priv, was_up);
12069e5f7d26SCatherine Sullivan 	gve_clear_reset_in_progress(priv);
1207433e274bSKuo Zhao 	priv->reset_cnt++;
1208433e274bSKuo Zhao 	priv->interface_up_cnt = 0;
1209433e274bSKuo Zhao 	priv->interface_down_cnt = 0;
121024aeb56fSKuo Zhao 	priv->stats_report_trigger_cnt = 0;
12119e5f7d26SCatherine Sullivan 	return err;
12129e5f7d26SCatherine Sullivan }
12139e5f7d26SCatherine Sullivan 
1214893ce44dSCatherine Sullivan static void gve_write_version(u8 __iomem *driver_version_register)
1215893ce44dSCatherine Sullivan {
1216893ce44dSCatherine Sullivan 	const char *c = gve_version_prefix;
1217893ce44dSCatherine Sullivan 
1218893ce44dSCatherine Sullivan 	while (*c) {
1219893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
1220893ce44dSCatherine Sullivan 		c++;
1221893ce44dSCatherine Sullivan 	}
1222893ce44dSCatherine Sullivan 
1223893ce44dSCatherine Sullivan 	c = gve_version_str;
1224893ce44dSCatherine Sullivan 	while (*c) {
1225893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
1226893ce44dSCatherine Sullivan 		c++;
1227893ce44dSCatherine Sullivan 	}
1228893ce44dSCatherine Sullivan 	writeb('\n', driver_version_register);
1229893ce44dSCatherine Sullivan }
1230893ce44dSCatherine Sullivan 
1231893ce44dSCatherine Sullivan static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1232893ce44dSCatherine Sullivan {
1233893ce44dSCatherine Sullivan 	int max_tx_queues, max_rx_queues;
1234893ce44dSCatherine Sullivan 	struct net_device *dev;
1235893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar;
1236893ce44dSCatherine Sullivan 	struct gve_registers __iomem *reg_bar;
1237893ce44dSCatherine Sullivan 	struct gve_priv *priv;
1238893ce44dSCatherine Sullivan 	int err;
1239893ce44dSCatherine Sullivan 
1240893ce44dSCatherine Sullivan 	err = pci_enable_device(pdev);
1241893ce44dSCatherine Sullivan 	if (err)
1242893ce44dSCatherine Sullivan 		return -ENXIO;
1243893ce44dSCatherine Sullivan 
1244893ce44dSCatherine Sullivan 	err = pci_request_regions(pdev, "gvnic-cfg");
1245893ce44dSCatherine Sullivan 	if (err)
1246893ce44dSCatherine Sullivan 		goto abort_with_enabled;
1247893ce44dSCatherine Sullivan 
1248893ce44dSCatherine Sullivan 	pci_set_master(pdev);
1249893ce44dSCatherine Sullivan 
1250893ce44dSCatherine Sullivan 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1251893ce44dSCatherine Sullivan 	if (err) {
1252893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1253893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
1254893ce44dSCatherine Sullivan 	}
1255893ce44dSCatherine Sullivan 
1256893ce44dSCatherine Sullivan 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1257893ce44dSCatherine Sullivan 	if (err) {
1258893ce44dSCatherine Sullivan 		dev_err(&pdev->dev,
1259893ce44dSCatherine Sullivan 			"Failed to set consistent dma mask: err=%d\n", err);
1260893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
1261893ce44dSCatherine Sullivan 	}
1262893ce44dSCatherine Sullivan 
1263893ce44dSCatherine Sullivan 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1264893ce44dSCatherine Sullivan 	if (!reg_bar) {
1265f5cedc84SCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
1266893ce44dSCatherine Sullivan 		err = -ENOMEM;
1267893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
1268893ce44dSCatherine Sullivan 	}
1269893ce44dSCatherine Sullivan 
1270893ce44dSCatherine Sullivan 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1271893ce44dSCatherine Sullivan 	if (!db_bar) {
1272893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1273893ce44dSCatherine Sullivan 		err = -ENOMEM;
1274893ce44dSCatherine Sullivan 		goto abort_with_reg_bar;
1275893ce44dSCatherine Sullivan 	}
1276893ce44dSCatherine Sullivan 
1277893ce44dSCatherine Sullivan 	gve_write_version(&reg_bar->driver_version);
1278893ce44dSCatherine Sullivan 	/* Get max queues to alloc etherdev */
1279893ce44dSCatherine Sullivan 	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
1280893ce44dSCatherine Sullivan 	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
1281893ce44dSCatherine Sullivan 	/* Alloc and setup the netdev and priv */
1282893ce44dSCatherine Sullivan 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1283893ce44dSCatherine Sullivan 	if (!dev) {
1284893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "could not allocate netdev\n");
1285893ce44dSCatherine Sullivan 		goto abort_with_db_bar;
1286893ce44dSCatherine Sullivan 	}
1287893ce44dSCatherine Sullivan 	SET_NETDEV_DEV(dev, &pdev->dev);
1288893ce44dSCatherine Sullivan 	pci_set_drvdata(pdev, dev);
1289e5b845dcSCatherine Sullivan 	dev->ethtool_ops = &gve_ethtool_ops;
1290f5cedc84SCatherine Sullivan 	dev->netdev_ops = &gve_netdev_ops;
1291893ce44dSCatherine Sullivan 	/* advertise features */
1292893ce44dSCatherine Sullivan 	dev->hw_features = NETIF_F_HIGHDMA;
1293893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_SG;
1294893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_HW_CSUM;
1295893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO;
1296893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO6;
1297893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO_ECN;
1298893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXCSUM;
1299893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXHASH;
1300893ce44dSCatherine Sullivan 	dev->features = dev->hw_features;
1301f5cedc84SCatherine Sullivan 	dev->watchdog_timeo = 5 * HZ;
1302893ce44dSCatherine Sullivan 	dev->min_mtu = ETH_MIN_MTU;
1303893ce44dSCatherine Sullivan 	netif_carrier_off(dev);
1304893ce44dSCatherine Sullivan 
1305893ce44dSCatherine Sullivan 	priv = netdev_priv(dev);
1306893ce44dSCatherine Sullivan 	priv->dev = dev;
1307893ce44dSCatherine Sullivan 	priv->pdev = pdev;
1308893ce44dSCatherine Sullivan 	priv->msg_enable = DEFAULT_MSG_LEVEL;
1309893ce44dSCatherine Sullivan 	priv->reg_bar0 = reg_bar;
1310893ce44dSCatherine Sullivan 	priv->db_bar2 = db_bar;
13119e5f7d26SCatherine Sullivan 	priv->service_task_flags = 0x0;
1312893ce44dSCatherine Sullivan 	priv->state_flags = 0x0;
131324aeb56fSKuo Zhao 	priv->ethtool_flags = 0x0;
13149e5f7d26SCatherine Sullivan 
13159e5f7d26SCatherine Sullivan 	gve_set_probe_in_progress(priv);
13169e5f7d26SCatherine Sullivan 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
13179e5f7d26SCatherine Sullivan 	if (!priv->gve_wq) {
13189e5f7d26SCatherine Sullivan 		dev_err(&pdev->dev, "Could not allocate workqueue");
13199e5f7d26SCatherine Sullivan 		err = -ENOMEM;
13209e5f7d26SCatherine Sullivan 		goto abort_with_netdev;
13219e5f7d26SCatherine Sullivan 	}
13229e5f7d26SCatherine Sullivan 	INIT_WORK(&priv->service_task, gve_service_task);
132324aeb56fSKuo Zhao 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1324f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues = max_tx_queues;
1325f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues = max_rx_queues;
1326893ce44dSCatherine Sullivan 
1327893ce44dSCatherine Sullivan 	err = gve_init_priv(priv, false);
1328893ce44dSCatherine Sullivan 	if (err)
13299e5f7d26SCatherine Sullivan 		goto abort_with_wq;
1330893ce44dSCatherine Sullivan 
1331893ce44dSCatherine Sullivan 	err = register_netdev(dev);
1332893ce44dSCatherine Sullivan 	if (err)
13339e5f7d26SCatherine Sullivan 		goto abort_with_wq;
1334893ce44dSCatherine Sullivan 
1335893ce44dSCatherine Sullivan 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
13369e5f7d26SCatherine Sullivan 	gve_clear_probe_in_progress(priv);
13379e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
1338893ce44dSCatherine Sullivan 	return 0;
1339893ce44dSCatherine Sullivan 
13409e5f7d26SCatherine Sullivan abort_with_wq:
13419e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
13429e5f7d26SCatherine Sullivan 
1343893ce44dSCatherine Sullivan abort_with_netdev:
1344893ce44dSCatherine Sullivan 	free_netdev(dev);
1345893ce44dSCatherine Sullivan 
1346893ce44dSCatherine Sullivan abort_with_db_bar:
1347893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
1348893ce44dSCatherine Sullivan 
1349893ce44dSCatherine Sullivan abort_with_reg_bar:
1350893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
1351893ce44dSCatherine Sullivan 
1352893ce44dSCatherine Sullivan abort_with_pci_region:
1353893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
1354893ce44dSCatherine Sullivan 
1355893ce44dSCatherine Sullivan abort_with_enabled:
1356893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
1357893ce44dSCatherine Sullivan 	return -ENXIO;
1358893ce44dSCatherine Sullivan }
1359893ce44dSCatherine Sullivan 
1360893ce44dSCatherine Sullivan static void gve_remove(struct pci_dev *pdev)
1361893ce44dSCatherine Sullivan {
1362893ce44dSCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
1363893ce44dSCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
1364893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar = priv->db_bar2;
1365893ce44dSCatherine Sullivan 	void __iomem *reg_bar = priv->reg_bar0;
1366893ce44dSCatherine Sullivan 
1367893ce44dSCatherine Sullivan 	unregister_netdev(netdev);
1368893ce44dSCatherine Sullivan 	gve_teardown_priv_resources(priv);
13699e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
1370893ce44dSCatherine Sullivan 	free_netdev(netdev);
1371893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
1372893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
1373893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
1374893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
1375893ce44dSCatherine Sullivan }
1376893ce44dSCatherine Sullivan 
1377893ce44dSCatherine Sullivan static const struct pci_device_id gve_id_table[] = {
1378893ce44dSCatherine Sullivan 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1379893ce44dSCatherine Sullivan 	{ }
1380893ce44dSCatherine Sullivan };
1381893ce44dSCatherine Sullivan 
1382893ce44dSCatherine Sullivan static struct pci_driver gvnic_driver = {
1383893ce44dSCatherine Sullivan 	.name		= "gvnic",
1384893ce44dSCatherine Sullivan 	.id_table	= gve_id_table,
1385893ce44dSCatherine Sullivan 	.probe		= gve_probe,
1386893ce44dSCatherine Sullivan 	.remove		= gve_remove,
1387893ce44dSCatherine Sullivan };
1388893ce44dSCatherine Sullivan 
1389893ce44dSCatherine Sullivan module_pci_driver(gvnic_driver);
1390893ce44dSCatherine Sullivan 
1391893ce44dSCatherine Sullivan MODULE_DEVICE_TABLE(pci, gve_id_table);
1392893ce44dSCatherine Sullivan MODULE_AUTHOR("Google, Inc.");
1393893ce44dSCatherine Sullivan MODULE_DESCRIPTION("gVNIC Driver");
1394893ce44dSCatherine Sullivan MODULE_LICENSE("Dual MIT/GPL");
1395893ce44dSCatherine Sullivan MODULE_VERSION(GVE_VERSION);
1396