xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision 860a9bed265146b10311bcadbbcef59c3af4454d)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include <linux/bpf.h>
8 #include <linux/cpumask.h>
9 #include <linux/etherdevice.h>
10 #include <linux/filter.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sched.h>
15 #include <linux/timer.h>
16 #include <linux/workqueue.h>
17 #include <linux/utsname.h>
18 #include <linux/version.h>
19 #include <net/sch_generic.h>
20 #include <net/xdp_sock_drv.h>
21 #include "gve.h"
22 #include "gve_dqo.h"
23 #include "gve_adminq.h"
24 #include "gve_register.h"
25 #include "gve_utils.h"
26 
27 #define GVE_DEFAULT_RX_COPYBREAK	(256)
28 
29 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
30 #define GVE_VERSION		"1.0.0"
31 #define GVE_VERSION_PREFIX	"GVE-"
32 
33 // Minimum amount of time between queue kicks in msec (10 seconds)
34 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
35 
36 char gve_driver_name[] = "gve";
37 const char gve_version_str[] = GVE_VERSION;
38 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
39 
40 static int gve_verify_driver_compatibility(struct gve_priv *priv)
41 {
42 	int err;
43 	struct gve_driver_info *driver_info;
44 	dma_addr_t driver_info_bus;
45 
46 	driver_info = dma_alloc_coherent(&priv->pdev->dev,
47 					 sizeof(struct gve_driver_info),
48 					 &driver_info_bus, GFP_KERNEL);
49 	if (!driver_info)
50 		return -ENOMEM;
51 
52 	*driver_info = (struct gve_driver_info) {
53 		.os_type = 1, /* Linux */
54 		.os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
55 		.os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
56 		.os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
57 		.driver_capability_flags = {
58 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
59 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
60 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
61 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
62 		},
63 	};
64 	strscpy(driver_info->os_version_str1, utsname()->release,
65 		sizeof(driver_info->os_version_str1));
66 	strscpy(driver_info->os_version_str2, utsname()->version,
67 		sizeof(driver_info->os_version_str2));
68 
69 	err = gve_adminq_verify_driver_compatibility(priv,
70 						     sizeof(struct gve_driver_info),
71 						     driver_info_bus);
72 
73 	/* It's ok if the device doesn't support this */
74 	if (err == -EOPNOTSUPP)
75 		err = 0;
76 
77 	dma_free_coherent(&priv->pdev->dev,
78 			  sizeof(struct gve_driver_info),
79 			  driver_info, driver_info_bus);
80 	return err;
81 }
82 
83 static netdev_features_t gve_features_check(struct sk_buff *skb,
84 					    struct net_device *dev,
85 					    netdev_features_t features)
86 {
87 	struct gve_priv *priv = netdev_priv(dev);
88 
89 	if (!gve_is_gqi(priv))
90 		return gve_features_check_dqo(skb, dev, features);
91 
92 	return features;
93 }
94 
95 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
96 {
97 	struct gve_priv *priv = netdev_priv(dev);
98 
99 	if (gve_is_gqi(priv))
100 		return gve_tx(skb, dev);
101 	else
102 		return gve_tx_dqo(skb, dev);
103 }
104 
105 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
106 {
107 	struct gve_priv *priv = netdev_priv(dev);
108 	unsigned int start;
109 	u64 packets, bytes;
110 	int num_tx_queues;
111 	int ring;
112 
113 	num_tx_queues = gve_num_tx_queues(priv);
114 	if (priv->rx) {
115 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
116 			do {
117 				start =
118 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
119 				packets = priv->rx[ring].rpackets;
120 				bytes = priv->rx[ring].rbytes;
121 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
122 						       start));
123 			s->rx_packets += packets;
124 			s->rx_bytes += bytes;
125 		}
126 	}
127 	if (priv->tx) {
128 		for (ring = 0; ring < num_tx_queues; ring++) {
129 			do {
130 				start =
131 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
132 				packets = priv->tx[ring].pkt_done;
133 				bytes = priv->tx[ring].bytes_done;
134 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
135 						       start));
136 			s->tx_packets += packets;
137 			s->tx_bytes += bytes;
138 		}
139 	}
140 }
141 
142 static int gve_alloc_counter_array(struct gve_priv *priv)
143 {
144 	priv->counter_array =
145 		dma_alloc_coherent(&priv->pdev->dev,
146 				   priv->num_event_counters *
147 				   sizeof(*priv->counter_array),
148 				   &priv->counter_array_bus, GFP_KERNEL);
149 	if (!priv->counter_array)
150 		return -ENOMEM;
151 
152 	return 0;
153 }
154 
155 static void gve_free_counter_array(struct gve_priv *priv)
156 {
157 	if (!priv->counter_array)
158 		return;
159 
160 	dma_free_coherent(&priv->pdev->dev,
161 			  priv->num_event_counters *
162 			  sizeof(*priv->counter_array),
163 			  priv->counter_array, priv->counter_array_bus);
164 	priv->counter_array = NULL;
165 }
166 
167 /* NIC requests to report stats */
168 static void gve_stats_report_task(struct work_struct *work)
169 {
170 	struct gve_priv *priv = container_of(work, struct gve_priv,
171 					     stats_report_task);
172 	if (gve_get_do_report_stats(priv)) {
173 		gve_handle_report_stats(priv);
174 		gve_clear_do_report_stats(priv);
175 	}
176 }
177 
178 static void gve_stats_report_schedule(struct gve_priv *priv)
179 {
180 	if (!gve_get_probe_in_progress(priv) &&
181 	    !gve_get_reset_in_progress(priv)) {
182 		gve_set_do_report_stats(priv);
183 		queue_work(priv->gve_wq, &priv->stats_report_task);
184 	}
185 }
186 
187 static void gve_stats_report_timer(struct timer_list *t)
188 {
189 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
190 
191 	mod_timer(&priv->stats_report_timer,
192 		  round_jiffies(jiffies +
193 		  msecs_to_jiffies(priv->stats_report_timer_period)));
194 	gve_stats_report_schedule(priv);
195 }
196 
197 static int gve_alloc_stats_report(struct gve_priv *priv)
198 {
199 	int tx_stats_num, rx_stats_num;
200 
201 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
202 		       gve_num_tx_queues(priv);
203 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
204 		       priv->rx_cfg.num_queues;
205 	priv->stats_report_len = struct_size(priv->stats_report, stats,
206 					     size_add(tx_stats_num, rx_stats_num));
207 	priv->stats_report =
208 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
209 				   &priv->stats_report_bus, GFP_KERNEL);
210 	if (!priv->stats_report)
211 		return -ENOMEM;
212 	/* Set up timer for the report-stats task */
213 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
214 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
215 	return 0;
216 }
217 
218 static void gve_free_stats_report(struct gve_priv *priv)
219 {
220 	if (!priv->stats_report)
221 		return;
222 
223 	del_timer_sync(&priv->stats_report_timer);
224 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
225 			  priv->stats_report, priv->stats_report_bus);
226 	priv->stats_report = NULL;
227 }
228 
229 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
230 {
231 	struct gve_priv *priv = arg;
232 
233 	queue_work(priv->gve_wq, &priv->service_task);
234 	return IRQ_HANDLED;
235 }
236 
237 static irqreturn_t gve_intr(int irq, void *arg)
238 {
239 	struct gve_notify_block *block = arg;
240 	struct gve_priv *priv = block->priv;
241 
242 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
243 	napi_schedule_irqoff(&block->napi);
244 	return IRQ_HANDLED;
245 }
246 
247 static irqreturn_t gve_intr_dqo(int irq, void *arg)
248 {
249 	struct gve_notify_block *block = arg;
250 
251 	/* Interrupts are automatically masked */
252 	napi_schedule_irqoff(&block->napi);
253 	return IRQ_HANDLED;
254 }
255 
256 int gve_napi_poll(struct napi_struct *napi, int budget)
257 {
258 	struct gve_notify_block *block;
259 	__be32 __iomem *irq_doorbell;
260 	bool reschedule = false;
261 	struct gve_priv *priv;
262 	int work_done = 0;
263 
264 	block = container_of(napi, struct gve_notify_block, napi);
265 	priv = block->priv;
266 
267 	if (block->tx) {
268 		if (block->tx->q_num < priv->tx_cfg.num_queues)
269 			reschedule |= gve_tx_poll(block, budget);
270 		else if (budget)
271 			reschedule |= gve_xdp_poll(block, budget);
272 	}
273 
274 	if (!budget)
275 		return 0;
276 
277 	if (block->rx) {
278 		work_done = gve_rx_poll(block, budget);
279 		reschedule |= work_done == budget;
280 	}
281 
282 	if (reschedule)
283 		return budget;
284 
285        /* Complete processing - don't unmask irq if busy polling is enabled */
286 	if (likely(napi_complete_done(napi, work_done))) {
287 		irq_doorbell = gve_irq_doorbell(priv, block);
288 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
289 
290 		/* Ensure IRQ ACK is visible before we check pending work.
291 		 * If queue had issued updates, it would be truly visible.
292 		 */
293 		mb();
294 
295 		if (block->tx)
296 			reschedule |= gve_tx_clean_pending(priv, block->tx);
297 		if (block->rx)
298 			reschedule |= gve_rx_work_pending(block->rx);
299 
300 		if (reschedule && napi_schedule(napi))
301 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
302 	}
303 	return work_done;
304 }
305 
306 int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
307 {
308 	struct gve_notify_block *block =
309 		container_of(napi, struct gve_notify_block, napi);
310 	struct gve_priv *priv = block->priv;
311 	bool reschedule = false;
312 	int work_done = 0;
313 
314 	if (block->tx)
315 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
316 
317 	if (!budget)
318 		return 0;
319 
320 	if (block->rx) {
321 		work_done = gve_rx_poll_dqo(block, budget);
322 		reschedule |= work_done == budget;
323 	}
324 
325 	if (reschedule)
326 		return budget;
327 
328 	if (likely(napi_complete_done(napi, work_done))) {
329 		/* Enable interrupts again.
330 		 *
331 		 * We don't need to repoll afterwards because HW supports the
332 		 * PCI MSI-X PBA feature.
333 		 *
334 		 * Another interrupt would be triggered if a new event came in
335 		 * since the last one.
336 		 */
337 		gve_write_irq_doorbell_dqo(priv, block,
338 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
339 	}
340 
341 	return work_done;
342 }
343 
344 static int gve_alloc_notify_blocks(struct gve_priv *priv)
345 {
346 	int num_vecs_requested = priv->num_ntfy_blks + 1;
347 	unsigned int active_cpus;
348 	int vecs_enabled;
349 	int i, j;
350 	int err;
351 
352 	priv->msix_vectors = kvcalloc(num_vecs_requested,
353 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
354 	if (!priv->msix_vectors)
355 		return -ENOMEM;
356 	for (i = 0; i < num_vecs_requested; i++)
357 		priv->msix_vectors[i].entry = i;
358 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
359 					     GVE_MIN_MSIX, num_vecs_requested);
360 	if (vecs_enabled < 0) {
361 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
362 			GVE_MIN_MSIX, vecs_enabled);
363 		err = vecs_enabled;
364 		goto abort_with_msix_vectors;
365 	}
366 	if (vecs_enabled != num_vecs_requested) {
367 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
368 		int vecs_per_type = new_num_ntfy_blks / 2;
369 		int vecs_left = new_num_ntfy_blks % 2;
370 
371 		priv->num_ntfy_blks = new_num_ntfy_blks;
372 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
373 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
374 						vecs_per_type);
375 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
376 						vecs_per_type + vecs_left);
377 		dev_err(&priv->pdev->dev,
378 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
379 			vecs_enabled, priv->tx_cfg.max_queues,
380 			priv->rx_cfg.max_queues);
381 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
382 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
383 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
384 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
385 	}
386 	/* Half the notification blocks go to TX and half to RX */
387 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
388 
389 	/* Setup Management Vector  - the last vector */
390 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
391 		 pci_name(priv->pdev));
392 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
393 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
394 	if (err) {
395 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
396 		goto abort_with_msix_enabled;
397 	}
398 	priv->irq_db_indices =
399 		dma_alloc_coherent(&priv->pdev->dev,
400 				   priv->num_ntfy_blks *
401 				   sizeof(*priv->irq_db_indices),
402 				   &priv->irq_db_indices_bus, GFP_KERNEL);
403 	if (!priv->irq_db_indices) {
404 		err = -ENOMEM;
405 		goto abort_with_mgmt_vector;
406 	}
407 
408 	priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
409 				     sizeof(*priv->ntfy_blocks), GFP_KERNEL);
410 	if (!priv->ntfy_blocks) {
411 		err = -ENOMEM;
412 		goto abort_with_irq_db_indices;
413 	}
414 
415 	/* Setup the other blocks - the first n-1 vectors */
416 	for (i = 0; i < priv->num_ntfy_blks; i++) {
417 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
418 		int msix_idx = i;
419 
420 		snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
421 			 i, pci_name(priv->pdev));
422 		block->priv = priv;
423 		err = request_irq(priv->msix_vectors[msix_idx].vector,
424 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
425 				  0, block->name, block);
426 		if (err) {
427 			dev_err(&priv->pdev->dev,
428 				"Failed to receive msix vector %d\n", i);
429 			goto abort_with_some_ntfy_blocks;
430 		}
431 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
432 				      get_cpu_mask(i % active_cpus));
433 		block->irq_db_index = &priv->irq_db_indices[i].index;
434 	}
435 	return 0;
436 abort_with_some_ntfy_blocks:
437 	for (j = 0; j < i; j++) {
438 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
439 		int msix_idx = j;
440 
441 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
442 				      NULL);
443 		free_irq(priv->msix_vectors[msix_idx].vector, block);
444 	}
445 	kvfree(priv->ntfy_blocks);
446 	priv->ntfy_blocks = NULL;
447 abort_with_irq_db_indices:
448 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
449 			  sizeof(*priv->irq_db_indices),
450 			  priv->irq_db_indices, priv->irq_db_indices_bus);
451 	priv->irq_db_indices = NULL;
452 abort_with_mgmt_vector:
453 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
454 abort_with_msix_enabled:
455 	pci_disable_msix(priv->pdev);
456 abort_with_msix_vectors:
457 	kvfree(priv->msix_vectors);
458 	priv->msix_vectors = NULL;
459 	return err;
460 }
461 
462 static void gve_free_notify_blocks(struct gve_priv *priv)
463 {
464 	int i;
465 
466 	if (!priv->msix_vectors)
467 		return;
468 
469 	/* Free the irqs */
470 	for (i = 0; i < priv->num_ntfy_blks; i++) {
471 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
472 		int msix_idx = i;
473 
474 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
475 				      NULL);
476 		free_irq(priv->msix_vectors[msix_idx].vector, block);
477 	}
478 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
479 	kvfree(priv->ntfy_blocks);
480 	priv->ntfy_blocks = NULL;
481 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
482 			  sizeof(*priv->irq_db_indices),
483 			  priv->irq_db_indices, priv->irq_db_indices_bus);
484 	priv->irq_db_indices = NULL;
485 	pci_disable_msix(priv->pdev);
486 	kvfree(priv->msix_vectors);
487 	priv->msix_vectors = NULL;
488 }
489 
490 static int gve_setup_device_resources(struct gve_priv *priv)
491 {
492 	int err;
493 
494 	err = gve_alloc_counter_array(priv);
495 	if (err)
496 		return err;
497 	err = gve_alloc_notify_blocks(priv);
498 	if (err)
499 		goto abort_with_counter;
500 	err = gve_alloc_stats_report(priv);
501 	if (err)
502 		goto abort_with_ntfy_blocks;
503 	err = gve_adminq_configure_device_resources(priv,
504 						    priv->counter_array_bus,
505 						    priv->num_event_counters,
506 						    priv->irq_db_indices_bus,
507 						    priv->num_ntfy_blks);
508 	if (unlikely(err)) {
509 		dev_err(&priv->pdev->dev,
510 			"could not setup device_resources: err=%d\n", err);
511 		err = -ENXIO;
512 		goto abort_with_stats_report;
513 	}
514 
515 	if (!gve_is_gqi(priv)) {
516 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
517 					       GFP_KERNEL);
518 		if (!priv->ptype_lut_dqo) {
519 			err = -ENOMEM;
520 			goto abort_with_stats_report;
521 		}
522 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
523 		if (err) {
524 			dev_err(&priv->pdev->dev,
525 				"Failed to get ptype map: err=%d\n", err);
526 			goto abort_with_ptype_lut;
527 		}
528 	}
529 
530 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
531 				      priv->stats_report_bus,
532 				      GVE_STATS_REPORT_TIMER_PERIOD);
533 	if (err)
534 		dev_err(&priv->pdev->dev,
535 			"Failed to report stats: err=%d\n", err);
536 	gve_set_device_resources_ok(priv);
537 	return 0;
538 
539 abort_with_ptype_lut:
540 	kvfree(priv->ptype_lut_dqo);
541 	priv->ptype_lut_dqo = NULL;
542 abort_with_stats_report:
543 	gve_free_stats_report(priv);
544 abort_with_ntfy_blocks:
545 	gve_free_notify_blocks(priv);
546 abort_with_counter:
547 	gve_free_counter_array(priv);
548 
549 	return err;
550 }
551 
552 static void gve_trigger_reset(struct gve_priv *priv);
553 
554 static void gve_teardown_device_resources(struct gve_priv *priv)
555 {
556 	int err;
557 
558 	/* Tell device its resources are being freed */
559 	if (gve_get_device_resources_ok(priv)) {
560 		/* detach the stats report */
561 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
562 		if (err) {
563 			dev_err(&priv->pdev->dev,
564 				"Failed to detach stats report: err=%d\n", err);
565 			gve_trigger_reset(priv);
566 		}
567 		err = gve_adminq_deconfigure_device_resources(priv);
568 		if (err) {
569 			dev_err(&priv->pdev->dev,
570 				"Could not deconfigure device resources: err=%d\n",
571 				err);
572 			gve_trigger_reset(priv);
573 		}
574 	}
575 
576 	kvfree(priv->ptype_lut_dqo);
577 	priv->ptype_lut_dqo = NULL;
578 
579 	gve_free_counter_array(priv);
580 	gve_free_notify_blocks(priv);
581 	gve_free_stats_report(priv);
582 	gve_clear_device_resources_ok(priv);
583 }
584 
585 static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
586 {
587 	int err;
588 
589 	err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
590 	if (err) {
591 		netif_err(priv, drv, priv->dev,
592 			  "Failed to unregister queue page list %d\n",
593 			  priv->qpls[i].id);
594 		return err;
595 	}
596 
597 	priv->num_registered_pages -= priv->qpls[i].num_entries;
598 	return 0;
599 }
600 
601 static int gve_register_qpl(struct gve_priv *priv, u32 i)
602 {
603 	int num_rx_qpls;
604 	int pages;
605 	int err;
606 
607 	/* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
608 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
609 	if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
610 		netif_err(priv, drv, priv->dev,
611 			  "Cannot register nonexisting QPL at index %d\n", i);
612 		return -EINVAL;
613 	}
614 
615 	pages = priv->qpls[i].num_entries;
616 
617 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
618 		netif_err(priv, drv, priv->dev,
619 			  "Reached max number of registered pages %llu > %llu\n",
620 			  pages + priv->num_registered_pages,
621 			  priv->max_registered_pages);
622 		return -EINVAL;
623 	}
624 
625 	err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
626 	if (err) {
627 		netif_err(priv, drv, priv->dev,
628 			  "failed to register queue page list %d\n",
629 			  priv->qpls[i].id);
630 		/* This failure will trigger a reset - no need to clean
631 		 * up
632 		 */
633 		return err;
634 	}
635 
636 	priv->num_registered_pages += pages;
637 	return 0;
638 }
639 
640 static int gve_register_xdp_qpls(struct gve_priv *priv)
641 {
642 	int start_id;
643 	int err;
644 	int i;
645 
646 	start_id = gve_xdp_tx_start_queue_id(priv);
647 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
648 		err = gve_register_qpl(priv, i);
649 		/* This failure will trigger a reset - no need to clean up */
650 		if (err)
651 			return err;
652 	}
653 	return 0;
654 }
655 
656 static int gve_register_qpls(struct gve_priv *priv)
657 {
658 	int num_tx_qpls, num_rx_qpls;
659 	int start_id;
660 	int err;
661 	int i;
662 
663 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
664 				      gve_is_qpl(priv));
665 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
666 
667 	for (i = 0; i < num_tx_qpls; i++) {
668 		err = gve_register_qpl(priv, i);
669 		if (err)
670 			return err;
671 	}
672 
673 	/* there might be a gap between the tx and rx qpl ids */
674 	start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
675 	for (i = 0; i < num_rx_qpls; i++) {
676 		err = gve_register_qpl(priv, start_id + i);
677 		if (err)
678 			return err;
679 	}
680 
681 	return 0;
682 }
683 
684 static int gve_unregister_xdp_qpls(struct gve_priv *priv)
685 {
686 	int start_id;
687 	int err;
688 	int i;
689 
690 	start_id = gve_xdp_tx_start_queue_id(priv);
691 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
692 		err = gve_unregister_qpl(priv, i);
693 		/* This failure will trigger a reset - no need to clean */
694 		if (err)
695 			return err;
696 	}
697 	return 0;
698 }
699 
700 static int gve_unregister_qpls(struct gve_priv *priv)
701 {
702 	int num_tx_qpls, num_rx_qpls;
703 	int start_id;
704 	int err;
705 	int i;
706 
707 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
708 				      gve_is_qpl(priv));
709 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
710 
711 	for (i = 0; i < num_tx_qpls; i++) {
712 		err = gve_unregister_qpl(priv, i);
713 		/* This failure will trigger a reset - no need to clean */
714 		if (err)
715 			return err;
716 	}
717 
718 	start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
719 	for (i = 0; i < num_rx_qpls; i++) {
720 		err = gve_unregister_qpl(priv, start_id + i);
721 		/* This failure will trigger a reset - no need to clean */
722 		if (err)
723 			return err;
724 	}
725 	return 0;
726 }
727 
728 static int gve_create_xdp_rings(struct gve_priv *priv)
729 {
730 	int err;
731 
732 	err = gve_adminq_create_tx_queues(priv,
733 					  gve_xdp_tx_start_queue_id(priv),
734 					  priv->num_xdp_queues);
735 	if (err) {
736 		netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
737 			  priv->num_xdp_queues);
738 		/* This failure will trigger a reset - no need to clean
739 		 * up
740 		 */
741 		return err;
742 	}
743 	netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
744 		  priv->num_xdp_queues);
745 
746 	return 0;
747 }
748 
749 static int gve_create_rings(struct gve_priv *priv)
750 {
751 	int num_tx_queues = gve_num_tx_queues(priv);
752 	int err;
753 	int i;
754 
755 	err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
756 	if (err) {
757 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
758 			  num_tx_queues);
759 		/* This failure will trigger a reset - no need to clean
760 		 * up
761 		 */
762 		return err;
763 	}
764 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
765 		  num_tx_queues);
766 
767 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
768 	if (err) {
769 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
770 			  priv->rx_cfg.num_queues);
771 		/* This failure will trigger a reset - no need to clean
772 		 * up
773 		 */
774 		return err;
775 	}
776 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
777 		  priv->rx_cfg.num_queues);
778 
779 	if (gve_is_gqi(priv)) {
780 		/* Rx data ring has been prefilled with packet buffers at queue
781 		 * allocation time.
782 		 *
783 		 * Write the doorbell to provide descriptor slots and packet
784 		 * buffers to the NIC.
785 		 */
786 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
787 			gve_rx_write_doorbell(priv, &priv->rx[i]);
788 	} else {
789 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
790 			/* Post buffers and ring doorbell. */
791 			gve_rx_post_buffers_dqo(&priv->rx[i]);
792 		}
793 	}
794 
795 	return 0;
796 }
797 
798 static void init_xdp_sync_stats(struct gve_priv *priv)
799 {
800 	int start_id = gve_xdp_tx_start_queue_id(priv);
801 	int i;
802 
803 	/* Init stats */
804 	for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
805 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
806 
807 		u64_stats_init(&priv->tx[i].statss);
808 		priv->tx[i].ntfy_id = ntfy_idx;
809 	}
810 }
811 
812 static void gve_init_sync_stats(struct gve_priv *priv)
813 {
814 	int i;
815 
816 	for (i = 0; i < priv->tx_cfg.num_queues; i++)
817 		u64_stats_init(&priv->tx[i].statss);
818 
819 	/* Init stats for XDP TX queues */
820 	init_xdp_sync_stats(priv);
821 
822 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
823 		u64_stats_init(&priv->rx[i].statss);
824 }
825 
826 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
827 				      struct gve_tx_alloc_rings_cfg *cfg)
828 {
829 	cfg->qcfg = &priv->tx_cfg;
830 	cfg->raw_addressing = !gve_is_qpl(priv);
831 	cfg->qpls = priv->qpls;
832 	cfg->qpl_cfg = &priv->qpl_cfg;
833 	cfg->ring_size = priv->tx_desc_cnt;
834 	cfg->start_idx = 0;
835 	cfg->num_rings = gve_num_tx_queues(priv);
836 	cfg->tx = priv->tx;
837 }
838 
839 static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
840 {
841 	int i;
842 
843 	if (!priv->tx)
844 		return;
845 
846 	for (i = start_id; i < start_id + num_rings; i++) {
847 		if (gve_is_gqi(priv))
848 			gve_tx_stop_ring_gqi(priv, i);
849 		else
850 			gve_tx_stop_ring_dqo(priv, i);
851 	}
852 }
853 
854 static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
855 			       int num_rings)
856 {
857 	int i;
858 
859 	for (i = start_id; i < start_id + num_rings; i++) {
860 		if (gve_is_gqi(priv))
861 			gve_tx_start_ring_gqi(priv, i);
862 		else
863 			gve_tx_start_ring_dqo(priv, i);
864 	}
865 }
866 
867 static int gve_alloc_xdp_rings(struct gve_priv *priv)
868 {
869 	struct gve_tx_alloc_rings_cfg cfg = {0};
870 	int err = 0;
871 
872 	if (!priv->num_xdp_queues)
873 		return 0;
874 
875 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
876 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
877 	cfg.num_rings = priv->num_xdp_queues;
878 
879 	err = gve_tx_alloc_rings_gqi(priv, &cfg);
880 	if (err)
881 		return err;
882 
883 	gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
884 	init_xdp_sync_stats(priv);
885 
886 	return 0;
887 }
888 
889 static int gve_alloc_rings(struct gve_priv *priv,
890 			   struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
891 			   struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
892 {
893 	int err;
894 
895 	if (gve_is_gqi(priv))
896 		err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
897 	else
898 		err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
899 	if (err)
900 		return err;
901 
902 	if (gve_is_gqi(priv))
903 		err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
904 	else
905 		err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
906 	if (err)
907 		goto free_tx;
908 
909 	return 0;
910 
911 free_tx:
912 	if (gve_is_gqi(priv))
913 		gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
914 	else
915 		gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
916 	return err;
917 }
918 
919 static int gve_destroy_xdp_rings(struct gve_priv *priv)
920 {
921 	int start_id;
922 	int err;
923 
924 	start_id = gve_xdp_tx_start_queue_id(priv);
925 	err = gve_adminq_destroy_tx_queues(priv,
926 					   start_id,
927 					   priv->num_xdp_queues);
928 	if (err) {
929 		netif_err(priv, drv, priv->dev,
930 			  "failed to destroy XDP queues\n");
931 		/* This failure will trigger a reset - no need to clean up */
932 		return err;
933 	}
934 	netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
935 
936 	return 0;
937 }
938 
939 static int gve_destroy_rings(struct gve_priv *priv)
940 {
941 	int num_tx_queues = gve_num_tx_queues(priv);
942 	int err;
943 
944 	err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
945 	if (err) {
946 		netif_err(priv, drv, priv->dev,
947 			  "failed to destroy tx queues\n");
948 		/* This failure will trigger a reset - no need to clean up */
949 		return err;
950 	}
951 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
952 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
953 	if (err) {
954 		netif_err(priv, drv, priv->dev,
955 			  "failed to destroy rx queues\n");
956 		/* This failure will trigger a reset - no need to clean up */
957 		return err;
958 	}
959 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
960 	return 0;
961 }
962 
963 static void gve_free_xdp_rings(struct gve_priv *priv)
964 {
965 	struct gve_tx_alloc_rings_cfg cfg = {0};
966 
967 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
968 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
969 	cfg.num_rings = priv->num_xdp_queues;
970 
971 	if (priv->tx) {
972 		gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
973 		gve_tx_free_rings_gqi(priv, &cfg);
974 	}
975 }
976 
977 static void gve_free_rings(struct gve_priv *priv,
978 			   struct gve_tx_alloc_rings_cfg *tx_cfg,
979 			   struct gve_rx_alloc_rings_cfg *rx_cfg)
980 {
981 	if (gve_is_gqi(priv)) {
982 		gve_tx_free_rings_gqi(priv, tx_cfg);
983 		gve_rx_free_rings_gqi(priv, rx_cfg);
984 	} else {
985 		gve_tx_free_rings_dqo(priv, tx_cfg);
986 		gve_rx_free_rings_dqo(priv, rx_cfg);
987 	}
988 }
989 
990 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
991 		   struct page **page, dma_addr_t *dma,
992 		   enum dma_data_direction dir, gfp_t gfp_flags)
993 {
994 	*page = alloc_page(gfp_flags);
995 	if (!*page) {
996 		priv->page_alloc_fail++;
997 		return -ENOMEM;
998 	}
999 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
1000 	if (dma_mapping_error(dev, *dma)) {
1001 		priv->dma_mapping_error++;
1002 		put_page(*page);
1003 		return -ENOMEM;
1004 	}
1005 	return 0;
1006 }
1007 
1008 static int gve_alloc_queue_page_list(struct gve_priv *priv,
1009 				     struct gve_queue_page_list *qpl,
1010 				     u32 id, int pages)
1011 {
1012 	int err;
1013 	int i;
1014 
1015 	qpl->id = id;
1016 	qpl->num_entries = 0;
1017 	qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
1018 	/* caller handles clean up */
1019 	if (!qpl->pages)
1020 		return -ENOMEM;
1021 	qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
1022 	/* caller handles clean up */
1023 	if (!qpl->page_buses)
1024 		return -ENOMEM;
1025 
1026 	for (i = 0; i < pages; i++) {
1027 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1028 				     &qpl->page_buses[i],
1029 				     gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1030 		/* caller handles clean up */
1031 		if (err)
1032 			return -ENOMEM;
1033 		qpl->num_entries++;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1040 		   enum dma_data_direction dir)
1041 {
1042 	if (!dma_mapping_error(dev, dma))
1043 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1044 	if (page)
1045 		put_page(page);
1046 }
1047 
1048 static void gve_free_queue_page_list(struct gve_priv *priv,
1049 				     struct gve_queue_page_list *qpl,
1050 				     int id)
1051 {
1052 	int i;
1053 
1054 	if (!qpl->pages)
1055 		return;
1056 	if (!qpl->page_buses)
1057 		goto free_pages;
1058 
1059 	for (i = 0; i < qpl->num_entries; i++)
1060 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
1061 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1062 
1063 	kvfree(qpl->page_buses);
1064 	qpl->page_buses = NULL;
1065 free_pages:
1066 	kvfree(qpl->pages);
1067 	qpl->pages = NULL;
1068 }
1069 
1070 static void gve_free_n_qpls(struct gve_priv *priv,
1071 			    struct gve_queue_page_list *qpls,
1072 			    int start_id,
1073 			    int num_qpls)
1074 {
1075 	int i;
1076 
1077 	for (i = start_id; i < start_id + num_qpls; i++)
1078 		gve_free_queue_page_list(priv, &qpls[i], i);
1079 }
1080 
1081 static int gve_alloc_n_qpls(struct gve_priv *priv,
1082 			    struct gve_queue_page_list *qpls,
1083 			    int page_count,
1084 			    int start_id,
1085 			    int num_qpls)
1086 {
1087 	int err;
1088 	int i;
1089 
1090 	for (i = start_id; i < start_id + num_qpls; i++) {
1091 		err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
1092 		if (err)
1093 			goto free_qpls;
1094 	}
1095 
1096 	return 0;
1097 
1098 free_qpls:
1099 	/* Must include the failing QPL too for gve_alloc_queue_page_list fails
1100 	 * without cleaning up.
1101 	 */
1102 	gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
1103 	return err;
1104 }
1105 
1106 static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
1107 			  struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1108 {
1109 	int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
1110 	int rx_start_id, tx_num_qpls, rx_num_qpls;
1111 	struct gve_queue_page_list *qpls;
1112 	u32 page_count;
1113 	int err;
1114 
1115 	if (cfg->raw_addressing)
1116 		return 0;
1117 
1118 	qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
1119 	if (!qpls)
1120 		return -ENOMEM;
1121 
1122 	cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
1123 		sizeof(unsigned long) * BITS_PER_BYTE;
1124 	cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
1125 					    sizeof(unsigned long), GFP_KERNEL);
1126 	if (!cfg->qpl_cfg->qpl_id_map) {
1127 		err = -ENOMEM;
1128 		goto free_qpl_array;
1129 	}
1130 
1131 	/* Allocate TX QPLs */
1132 	page_count = priv->tx_pages_per_qpl;
1133 	tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
1134 				      gve_is_qpl(priv));
1135 	err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
1136 	if (err)
1137 		goto free_qpl_map;
1138 
1139 	/* Allocate RX QPLs */
1140 	rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
1141 	/* For GQI_QPL number of pages allocated have 1:1 relationship with
1142 	 * number of descriptors. For DQO, number of pages required are
1143 	 * more than descriptors (because of out of order completions).
1144 	 * Set it to twice the number of descriptors.
1145 	 */
1146 	if (cfg->is_gqi)
1147 		page_count = rx_alloc_cfg->ring_size;
1148 	else
1149 		page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
1150 	rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
1151 	err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
1152 	if (err)
1153 		goto free_tx_qpls;
1154 
1155 	cfg->qpls = qpls;
1156 	return 0;
1157 
1158 free_tx_qpls:
1159 	gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
1160 free_qpl_map:
1161 	kvfree(cfg->qpl_cfg->qpl_id_map);
1162 	cfg->qpl_cfg->qpl_id_map = NULL;
1163 free_qpl_array:
1164 	kvfree(qpls);
1165 	return err;
1166 }
1167 
1168 static void gve_free_qpls(struct gve_priv *priv,
1169 			  struct gve_qpls_alloc_cfg *cfg)
1170 {
1171 	int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
1172 	struct gve_queue_page_list *qpls = cfg->qpls;
1173 	int i;
1174 
1175 	if (!qpls)
1176 		return;
1177 
1178 	kvfree(cfg->qpl_cfg->qpl_id_map);
1179 	cfg->qpl_cfg->qpl_id_map = NULL;
1180 
1181 	for (i = 0; i < max_queues; i++)
1182 		gve_free_queue_page_list(priv, &qpls[i], i);
1183 
1184 	kvfree(qpls);
1185 	cfg->qpls = NULL;
1186 }
1187 
1188 /* Use this to schedule a reset when the device is capable of continuing
1189  * to handle other requests in its current state. If it is not, do a reset
1190  * in thread instead.
1191  */
1192 void gve_schedule_reset(struct gve_priv *priv)
1193 {
1194 	gve_set_do_reset(priv);
1195 	queue_work(priv->gve_wq, &priv->service_task);
1196 }
1197 
1198 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1199 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1200 static void gve_turndown(struct gve_priv *priv);
1201 static void gve_turnup(struct gve_priv *priv);
1202 
1203 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1204 {
1205 	struct napi_struct *napi;
1206 	struct gve_rx_ring *rx;
1207 	int err = 0;
1208 	int i, j;
1209 	u32 tx_qid;
1210 
1211 	if (!priv->num_xdp_queues)
1212 		return 0;
1213 
1214 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1215 		rx = &priv->rx[i];
1216 		napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1217 
1218 		err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
1219 				       napi->napi_id);
1220 		if (err)
1221 			goto err;
1222 		err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1223 						 MEM_TYPE_PAGE_SHARED, NULL);
1224 		if (err)
1225 			goto err;
1226 		rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
1227 		if (rx->xsk_pool) {
1228 			err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
1229 					       napi->napi_id);
1230 			if (err)
1231 				goto err;
1232 			err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1233 							 MEM_TYPE_XSK_BUFF_POOL, NULL);
1234 			if (err)
1235 				goto err;
1236 			xsk_pool_set_rxq_info(rx->xsk_pool,
1237 					      &rx->xsk_rxq);
1238 		}
1239 	}
1240 
1241 	for (i = 0; i < priv->num_xdp_queues; i++) {
1242 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1243 		priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
1244 	}
1245 	return 0;
1246 
1247 err:
1248 	for (j = i; j >= 0; j--) {
1249 		rx = &priv->rx[j];
1250 		if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1251 			xdp_rxq_info_unreg(&rx->xdp_rxq);
1252 		if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1253 			xdp_rxq_info_unreg(&rx->xsk_rxq);
1254 	}
1255 	return err;
1256 }
1257 
1258 static void gve_unreg_xdp_info(struct gve_priv *priv)
1259 {
1260 	int i, tx_qid;
1261 
1262 	if (!priv->num_xdp_queues)
1263 		return;
1264 
1265 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1266 		struct gve_rx_ring *rx = &priv->rx[i];
1267 
1268 		xdp_rxq_info_unreg(&rx->xdp_rxq);
1269 		if (rx->xsk_pool) {
1270 			xdp_rxq_info_unreg(&rx->xsk_rxq);
1271 			rx->xsk_pool = NULL;
1272 		}
1273 	}
1274 
1275 	for (i = 0; i < priv->num_xdp_queues; i++) {
1276 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1277 		priv->tx[tx_qid].xsk_pool = NULL;
1278 	}
1279 }
1280 
1281 static void gve_drain_page_cache(struct gve_priv *priv)
1282 {
1283 	int i;
1284 
1285 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
1286 		page_frag_cache_drain(&priv->rx[i].page_cache);
1287 }
1288 
1289 static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
1290 					struct gve_qpls_alloc_cfg *cfg)
1291 {
1292 	  cfg->raw_addressing = !gve_is_qpl(priv);
1293 	  cfg->is_gqi = gve_is_gqi(priv);
1294 	  cfg->num_xdp_queues = priv->num_xdp_queues;
1295 	  cfg->qpl_cfg = &priv->qpl_cfg;
1296 	  cfg->tx_cfg = &priv->tx_cfg;
1297 	  cfg->rx_cfg = &priv->rx_cfg;
1298 	  cfg->qpls = priv->qpls;
1299 }
1300 
1301 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1302 				      struct gve_rx_alloc_rings_cfg *cfg)
1303 {
1304 	cfg->qcfg = &priv->rx_cfg;
1305 	cfg->qcfg_tx = &priv->tx_cfg;
1306 	cfg->raw_addressing = !gve_is_qpl(priv);
1307 	cfg->enable_header_split = priv->header_split_enabled;
1308 	cfg->qpls = priv->qpls;
1309 	cfg->qpl_cfg = &priv->qpl_cfg;
1310 	cfg->ring_size = priv->rx_desc_cnt;
1311 	cfg->packet_buffer_size = gve_is_gqi(priv) ?
1312 				  GVE_DEFAULT_RX_BUFFER_SIZE :
1313 				  priv->data_buffer_size_dqo;
1314 	cfg->rx = priv->rx;
1315 }
1316 
1317 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1318 			     struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1319 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1320 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1321 {
1322 	gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
1323 	gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1324 	gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1325 }
1326 
1327 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1328 {
1329 	int i;
1330 
1331 	for (i = 0; i < num_rings; i++) {
1332 		if (gve_is_gqi(priv))
1333 			gve_rx_start_ring_gqi(priv, i);
1334 		else
1335 			gve_rx_start_ring_dqo(priv, i);
1336 	}
1337 }
1338 
1339 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1340 {
1341 	int i;
1342 
1343 	if (!priv->rx)
1344 		return;
1345 
1346 	for (i = 0; i < num_rings; i++) {
1347 		if (gve_is_gqi(priv))
1348 			gve_rx_stop_ring_gqi(priv, i);
1349 		else
1350 			gve_rx_stop_ring_dqo(priv, i);
1351 	}
1352 }
1353 
1354 static void gve_queues_mem_free(struct gve_priv *priv,
1355 				struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1356 				struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1357 				struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1358 {
1359 	gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
1360 	gve_free_qpls(priv, qpls_alloc_cfg);
1361 }
1362 
1363 static int gve_queues_mem_alloc(struct gve_priv *priv,
1364 				struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1365 				struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1366 				struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1367 {
1368 	int err;
1369 
1370 	err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
1371 	if (err) {
1372 		netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
1373 		return err;
1374 	}
1375 	tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
1376 	rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
1377 	err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
1378 	if (err) {
1379 		netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
1380 		goto free_qpls;
1381 	}
1382 
1383 	return 0;
1384 
1385 free_qpls:
1386 	gve_free_qpls(priv, qpls_alloc_cfg);
1387 	return err;
1388 }
1389 
1390 static void gve_queues_mem_remove(struct gve_priv *priv)
1391 {
1392 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1393 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1394 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1395 
1396 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1397 				&tx_alloc_cfg, &rx_alloc_cfg);
1398 	gve_queues_mem_free(priv, &qpls_alloc_cfg,
1399 			    &tx_alloc_cfg, &rx_alloc_cfg);
1400 	priv->qpls = NULL;
1401 	priv->tx = NULL;
1402 	priv->rx = NULL;
1403 }
1404 
1405 /* The passed-in queue memory is stored into priv and the queues are made live.
1406  * No memory is allocated. Passed-in memory is freed on errors.
1407  */
1408 static int gve_queues_start(struct gve_priv *priv,
1409 			    struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1410 			    struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1411 			    struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1412 {
1413 	struct net_device *dev = priv->dev;
1414 	int err;
1415 
1416 	/* Record new resources into priv */
1417 	priv->qpls = qpls_alloc_cfg->qpls;
1418 	priv->tx = tx_alloc_cfg->tx;
1419 	priv->rx = rx_alloc_cfg->rx;
1420 
1421 	/* Record new configs into priv */
1422 	priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
1423 	priv->tx_cfg = *tx_alloc_cfg->qcfg;
1424 	priv->rx_cfg = *rx_alloc_cfg->qcfg;
1425 	priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
1426 	priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
1427 
1428 	if (priv->xdp_prog)
1429 		priv->num_xdp_queues = priv->rx_cfg.num_queues;
1430 	else
1431 		priv->num_xdp_queues = 0;
1432 
1433 	gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
1434 	gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
1435 	gve_init_sync_stats(priv);
1436 
1437 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1438 	if (err)
1439 		goto stop_and_free_rings;
1440 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1441 	if (err)
1442 		goto stop_and_free_rings;
1443 
1444 	err = gve_reg_xdp_info(priv, dev);
1445 	if (err)
1446 		goto stop_and_free_rings;
1447 
1448 	err = gve_register_qpls(priv);
1449 	if (err)
1450 		goto reset;
1451 
1452 	priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
1453 	priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
1454 
1455 	err = gve_create_rings(priv);
1456 	if (err)
1457 		goto reset;
1458 
1459 	gve_set_device_rings_ok(priv);
1460 
1461 	if (gve_get_report_stats(priv))
1462 		mod_timer(&priv->stats_report_timer,
1463 			  round_jiffies(jiffies +
1464 				msecs_to_jiffies(priv->stats_report_timer_period)));
1465 
1466 	gve_turnup(priv);
1467 	queue_work(priv->gve_wq, &priv->service_task);
1468 	priv->interface_up_cnt++;
1469 	return 0;
1470 
1471 reset:
1472 	if (gve_get_reset_in_progress(priv))
1473 		goto stop_and_free_rings;
1474 	gve_reset_and_teardown(priv, true);
1475 	/* if this fails there is nothing we can do so just ignore the return */
1476 	gve_reset_recovery(priv, false);
1477 	/* return the original error */
1478 	return err;
1479 stop_and_free_rings:
1480 	gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1481 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1482 	gve_queues_mem_remove(priv);
1483 	return err;
1484 }
1485 
1486 static int gve_open(struct net_device *dev)
1487 {
1488 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1489 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1490 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1491 	struct gve_priv *priv = netdev_priv(dev);
1492 	int err;
1493 
1494 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1495 				&tx_alloc_cfg, &rx_alloc_cfg);
1496 
1497 	err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
1498 				   &tx_alloc_cfg, &rx_alloc_cfg);
1499 	if (err)
1500 		return err;
1501 
1502 	/* No need to free on error: ownership of resources is lost after
1503 	 * calling gve_queues_start.
1504 	 */
1505 	err = gve_queues_start(priv, &qpls_alloc_cfg,
1506 			       &tx_alloc_cfg, &rx_alloc_cfg);
1507 	if (err)
1508 		return err;
1509 
1510 	return 0;
1511 }
1512 
1513 static int gve_queues_stop(struct gve_priv *priv)
1514 {
1515 	int err;
1516 
1517 	netif_carrier_off(priv->dev);
1518 	if (gve_get_device_rings_ok(priv)) {
1519 		gve_turndown(priv);
1520 		gve_drain_page_cache(priv);
1521 		err = gve_destroy_rings(priv);
1522 		if (err)
1523 			goto err;
1524 		err = gve_unregister_qpls(priv);
1525 		if (err)
1526 			goto err;
1527 		gve_clear_device_rings_ok(priv);
1528 	}
1529 	del_timer_sync(&priv->stats_report_timer);
1530 
1531 	gve_unreg_xdp_info(priv);
1532 
1533 	gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1534 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1535 
1536 	priv->interface_down_cnt++;
1537 	return 0;
1538 
1539 err:
1540 	/* This must have been called from a reset due to the rtnl lock
1541 	 * so just return at this point.
1542 	 */
1543 	if (gve_get_reset_in_progress(priv))
1544 		return err;
1545 	/* Otherwise reset before returning */
1546 	gve_reset_and_teardown(priv, true);
1547 	return gve_reset_recovery(priv, false);
1548 }
1549 
1550 static int gve_close(struct net_device *dev)
1551 {
1552 	struct gve_priv *priv = netdev_priv(dev);
1553 	int err;
1554 
1555 	err = gve_queues_stop(priv);
1556 	if (err)
1557 		return err;
1558 
1559 	gve_queues_mem_remove(priv);
1560 	return 0;
1561 }
1562 
1563 static int gve_remove_xdp_queues(struct gve_priv *priv)
1564 {
1565 	int qpl_start_id;
1566 	int err;
1567 
1568 	qpl_start_id = gve_xdp_tx_start_queue_id(priv);
1569 
1570 	err = gve_destroy_xdp_rings(priv);
1571 	if (err)
1572 		return err;
1573 
1574 	err = gve_unregister_xdp_qpls(priv);
1575 	if (err)
1576 		return err;
1577 
1578 	gve_unreg_xdp_info(priv);
1579 	gve_free_xdp_rings(priv);
1580 
1581 	gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
1582 	priv->num_xdp_queues = 0;
1583 	return 0;
1584 }
1585 
1586 static int gve_add_xdp_queues(struct gve_priv *priv)
1587 {
1588 	int start_id;
1589 	int err;
1590 
1591 	priv->num_xdp_queues = priv->rx_cfg.num_queues;
1592 
1593 	start_id = gve_xdp_tx_start_queue_id(priv);
1594 	err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
1595 			       start_id, gve_num_xdp_qpls(priv));
1596 	if (err)
1597 		goto err;
1598 
1599 	err = gve_alloc_xdp_rings(priv);
1600 	if (err)
1601 		goto free_xdp_qpls;
1602 
1603 	err = gve_reg_xdp_info(priv, priv->dev);
1604 	if (err)
1605 		goto free_xdp_rings;
1606 
1607 	err = gve_register_xdp_qpls(priv);
1608 	if (err)
1609 		goto free_xdp_rings;
1610 
1611 	err = gve_create_xdp_rings(priv);
1612 	if (err)
1613 		goto free_xdp_rings;
1614 
1615 	return 0;
1616 
1617 free_xdp_rings:
1618 	gve_free_xdp_rings(priv);
1619 free_xdp_qpls:
1620 	gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
1621 err:
1622 	priv->num_xdp_queues = 0;
1623 	return err;
1624 }
1625 
1626 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1627 {
1628 	if (!gve_get_napi_enabled(priv))
1629 		return;
1630 
1631 	if (link_status == netif_carrier_ok(priv->dev))
1632 		return;
1633 
1634 	if (link_status) {
1635 		netdev_info(priv->dev, "Device link is up.\n");
1636 		netif_carrier_on(priv->dev);
1637 	} else {
1638 		netdev_info(priv->dev, "Device link is down.\n");
1639 		netif_carrier_off(priv->dev);
1640 	}
1641 }
1642 
1643 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
1644 		       struct netlink_ext_ack *extack)
1645 {
1646 	struct bpf_prog *old_prog;
1647 	int err = 0;
1648 	u32 status;
1649 
1650 	old_prog = READ_ONCE(priv->xdp_prog);
1651 	if (!netif_carrier_ok(priv->dev)) {
1652 		WRITE_ONCE(priv->xdp_prog, prog);
1653 		if (old_prog)
1654 			bpf_prog_put(old_prog);
1655 		return 0;
1656 	}
1657 
1658 	gve_turndown(priv);
1659 	if (!old_prog && prog) {
1660 		// Allocate XDP TX queues if an XDP program is
1661 		// being installed
1662 		err = gve_add_xdp_queues(priv);
1663 		if (err)
1664 			goto out;
1665 	} else if (old_prog && !prog) {
1666 		// Remove XDP TX queues if an XDP program is
1667 		// being uninstalled
1668 		err = gve_remove_xdp_queues(priv);
1669 		if (err)
1670 			goto out;
1671 	}
1672 	WRITE_ONCE(priv->xdp_prog, prog);
1673 	if (old_prog)
1674 		bpf_prog_put(old_prog);
1675 
1676 out:
1677 	gve_turnup(priv);
1678 	status = ioread32be(&priv->reg_bar0->device_status);
1679 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1680 	return err;
1681 }
1682 
1683 static int gve_xsk_pool_enable(struct net_device *dev,
1684 			       struct xsk_buff_pool *pool,
1685 			       u16 qid)
1686 {
1687 	struct gve_priv *priv = netdev_priv(dev);
1688 	struct napi_struct *napi;
1689 	struct gve_rx_ring *rx;
1690 	int tx_qid;
1691 	int err;
1692 
1693 	if (qid >= priv->rx_cfg.num_queues) {
1694 		dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1695 		return -EINVAL;
1696 	}
1697 	if (xsk_pool_get_rx_frame_size(pool) <
1698 	     priv->dev->max_mtu + sizeof(struct ethhdr)) {
1699 		dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1700 		return -EINVAL;
1701 	}
1702 
1703 	err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1704 			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1705 	if (err)
1706 		return err;
1707 
1708 	/* If XDP prog is not installed, return */
1709 	if (!priv->xdp_prog)
1710 		return 0;
1711 
1712 	rx = &priv->rx[qid];
1713 	napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1714 	err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
1715 	if (err)
1716 		goto err;
1717 
1718 	err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1719 					 MEM_TYPE_XSK_BUFF_POOL, NULL);
1720 	if (err)
1721 		goto err;
1722 
1723 	xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
1724 	rx->xsk_pool = pool;
1725 
1726 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1727 	priv->tx[tx_qid].xsk_pool = pool;
1728 
1729 	return 0;
1730 err:
1731 	if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1732 		xdp_rxq_info_unreg(&rx->xsk_rxq);
1733 
1734 	xsk_pool_dma_unmap(pool,
1735 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1736 	return err;
1737 }
1738 
1739 static int gve_xsk_pool_disable(struct net_device *dev,
1740 				u16 qid)
1741 {
1742 	struct gve_priv *priv = netdev_priv(dev);
1743 	struct napi_struct *napi_rx;
1744 	struct napi_struct *napi_tx;
1745 	struct xsk_buff_pool *pool;
1746 	int tx_qid;
1747 
1748 	pool = xsk_get_pool_from_qid(dev, qid);
1749 	if (!pool)
1750 		return -EINVAL;
1751 	if (qid >= priv->rx_cfg.num_queues)
1752 		return -EINVAL;
1753 
1754 	/* If XDP prog is not installed, unmap DMA and return */
1755 	if (!priv->xdp_prog)
1756 		goto done;
1757 
1758 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1759 	if (!netif_running(dev)) {
1760 		priv->rx[qid].xsk_pool = NULL;
1761 		xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1762 		priv->tx[tx_qid].xsk_pool = NULL;
1763 		goto done;
1764 	}
1765 
1766 	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1767 	napi_disable(napi_rx); /* make sure current rx poll is done */
1768 
1769 	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1770 	napi_disable(napi_tx); /* make sure current tx poll is done */
1771 
1772 	priv->rx[qid].xsk_pool = NULL;
1773 	xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1774 	priv->tx[tx_qid].xsk_pool = NULL;
1775 	smp_mb(); /* Make sure it is visible to the workers on datapath */
1776 
1777 	napi_enable(napi_rx);
1778 	if (gve_rx_work_pending(&priv->rx[qid]))
1779 		napi_schedule(napi_rx);
1780 
1781 	napi_enable(napi_tx);
1782 	if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1783 		napi_schedule(napi_tx);
1784 
1785 done:
1786 	xsk_pool_dma_unmap(pool,
1787 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1788 	return 0;
1789 }
1790 
1791 static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1792 {
1793 	struct gve_priv *priv = netdev_priv(dev);
1794 	int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
1795 
1796 	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1797 		return -EINVAL;
1798 
1799 	if (flags & XDP_WAKEUP_TX) {
1800 		struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
1801 		struct napi_struct *napi =
1802 			&priv->ntfy_blocks[tx->ntfy_id].napi;
1803 
1804 		if (!napi_if_scheduled_mark_missed(napi)) {
1805 			/* Call local_bh_enable to trigger SoftIRQ processing */
1806 			local_bh_disable();
1807 			napi_schedule(napi);
1808 			local_bh_enable();
1809 		}
1810 
1811 		tx->xdp_xsk_wakeup++;
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 static int verify_xdp_configuration(struct net_device *dev)
1818 {
1819 	struct gve_priv *priv = netdev_priv(dev);
1820 
1821 	if (dev->features & NETIF_F_LRO) {
1822 		netdev_warn(dev, "XDP is not supported when LRO is on.\n");
1823 		return -EOPNOTSUPP;
1824 	}
1825 
1826 	if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
1827 		netdev_warn(dev, "XDP is not supported in mode %d.\n",
1828 			    priv->queue_format);
1829 		return -EOPNOTSUPP;
1830 	}
1831 
1832 	if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
1833 		netdev_warn(dev, "XDP is not supported for mtu %d.\n",
1834 			    dev->mtu);
1835 		return -EOPNOTSUPP;
1836 	}
1837 
1838 	if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
1839 	    (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
1840 		netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
1841 			    priv->rx_cfg.num_queues,
1842 			    priv->tx_cfg.num_queues,
1843 			    priv->tx_cfg.max_queues);
1844 		return -EINVAL;
1845 	}
1846 	return 0;
1847 }
1848 
1849 static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1850 {
1851 	struct gve_priv *priv = netdev_priv(dev);
1852 	int err;
1853 
1854 	err = verify_xdp_configuration(dev);
1855 	if (err)
1856 		return err;
1857 	switch (xdp->command) {
1858 	case XDP_SETUP_PROG:
1859 		return gve_set_xdp(priv, xdp->prog, xdp->extack);
1860 	case XDP_SETUP_XSK_POOL:
1861 		if (xdp->xsk.pool)
1862 			return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1863 		else
1864 			return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
1865 	default:
1866 		return -EINVAL;
1867 	}
1868 }
1869 
1870 int gve_adjust_config(struct gve_priv *priv,
1871 		      struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1872 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1873 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1874 {
1875 	int err;
1876 
1877 	/* Allocate resources for the new confiugration */
1878 	err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
1879 				   tx_alloc_cfg, rx_alloc_cfg);
1880 	if (err) {
1881 		netif_err(priv, drv, priv->dev,
1882 			  "Adjust config failed to alloc new queues");
1883 		return err;
1884 	}
1885 
1886 	/* Teardown the device and free existing resources */
1887 	err = gve_close(priv->dev);
1888 	if (err) {
1889 		netif_err(priv, drv, priv->dev,
1890 			  "Adjust config failed to close old queues");
1891 		gve_queues_mem_free(priv, qpls_alloc_cfg,
1892 				    tx_alloc_cfg, rx_alloc_cfg);
1893 		return err;
1894 	}
1895 
1896 	/* Bring the device back up again with the new resources. */
1897 	err = gve_queues_start(priv, qpls_alloc_cfg,
1898 			       tx_alloc_cfg, rx_alloc_cfg);
1899 	if (err) {
1900 		netif_err(priv, drv, priv->dev,
1901 			  "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
1902 		/* No need to free on error: ownership of resources is lost after
1903 		 * calling gve_queues_start.
1904 		 */
1905 		gve_turndown(priv);
1906 		return err;
1907 	}
1908 
1909 	return 0;
1910 }
1911 
1912 int gve_adjust_queues(struct gve_priv *priv,
1913 		      struct gve_queue_config new_rx_config,
1914 		      struct gve_queue_config new_tx_config)
1915 {
1916 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1917 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1918 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1919 	struct gve_qpl_config new_qpl_cfg;
1920 	int err;
1921 
1922 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1923 				&tx_alloc_cfg, &rx_alloc_cfg);
1924 
1925 	/* qpl_cfg is not read-only, it contains a map that gets updated as
1926 	 * rings are allocated, which is why we cannot use the yet unreleased
1927 	 * one in priv.
1928 	 */
1929 	qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1930 	tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1931 	rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
1932 
1933 	/* Relay the new config from ethtool */
1934 	qpls_alloc_cfg.tx_cfg = &new_tx_config;
1935 	tx_alloc_cfg.qcfg = &new_tx_config;
1936 	rx_alloc_cfg.qcfg_tx = &new_tx_config;
1937 	qpls_alloc_cfg.rx_cfg = &new_rx_config;
1938 	rx_alloc_cfg.qcfg = &new_rx_config;
1939 	tx_alloc_cfg.num_rings = new_tx_config.num_queues;
1940 
1941 	if (netif_carrier_ok(priv->dev)) {
1942 		err = gve_adjust_config(priv, &qpls_alloc_cfg,
1943 					&tx_alloc_cfg, &rx_alloc_cfg);
1944 		return err;
1945 	}
1946 	/* Set the config for the next up. */
1947 	priv->tx_cfg = new_tx_config;
1948 	priv->rx_cfg = new_rx_config;
1949 
1950 	return 0;
1951 }
1952 
1953 static void gve_turndown(struct gve_priv *priv)
1954 {
1955 	int idx;
1956 
1957 	if (netif_carrier_ok(priv->dev))
1958 		netif_carrier_off(priv->dev);
1959 
1960 	if (!gve_get_napi_enabled(priv))
1961 		return;
1962 
1963 	/* Disable napi to prevent more work from coming in */
1964 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1965 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1966 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1967 
1968 		napi_disable(&block->napi);
1969 	}
1970 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1971 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1972 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1973 
1974 		napi_disable(&block->napi);
1975 	}
1976 
1977 	/* Stop tx queues */
1978 	netif_tx_disable(priv->dev);
1979 
1980 	gve_clear_napi_enabled(priv);
1981 	gve_clear_report_stats(priv);
1982 }
1983 
1984 static void gve_turnup(struct gve_priv *priv)
1985 {
1986 	int idx;
1987 
1988 	/* Start the tx queues */
1989 	netif_tx_start_all_queues(priv->dev);
1990 
1991 	/* Enable napi and unmask interrupts for all queues */
1992 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1993 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1994 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1995 
1996 		napi_enable(&block->napi);
1997 		if (gve_is_gqi(priv)) {
1998 			iowrite32be(0, gve_irq_doorbell(priv, block));
1999 		} else {
2000 			gve_set_itr_coalesce_usecs_dqo(priv, block,
2001 						       priv->tx_coalesce_usecs);
2002 		}
2003 	}
2004 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
2005 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
2006 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
2007 
2008 		napi_enable(&block->napi);
2009 		if (gve_is_gqi(priv)) {
2010 			iowrite32be(0, gve_irq_doorbell(priv, block));
2011 		} else {
2012 			gve_set_itr_coalesce_usecs_dqo(priv, block,
2013 						       priv->rx_coalesce_usecs);
2014 		}
2015 	}
2016 
2017 	gve_set_napi_enabled(priv);
2018 }
2019 
2020 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
2021 {
2022 	struct gve_notify_block *block;
2023 	struct gve_tx_ring *tx = NULL;
2024 	struct gve_priv *priv;
2025 	u32 last_nic_done;
2026 	u32 current_time;
2027 	u32 ntfy_idx;
2028 
2029 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
2030 	priv = netdev_priv(dev);
2031 	if (txqueue > priv->tx_cfg.num_queues)
2032 		goto reset;
2033 
2034 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
2035 	if (ntfy_idx >= priv->num_ntfy_blks)
2036 		goto reset;
2037 
2038 	block = &priv->ntfy_blocks[ntfy_idx];
2039 	tx = block->tx;
2040 
2041 	current_time = jiffies_to_msecs(jiffies);
2042 	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
2043 		goto reset;
2044 
2045 	/* Check to see if there are missed completions, which will allow us to
2046 	 * kick the queue.
2047 	 */
2048 	last_nic_done = gve_tx_load_event_counter(priv, tx);
2049 	if (last_nic_done - tx->done) {
2050 		netdev_info(dev, "Kicking queue %d", txqueue);
2051 		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
2052 		napi_schedule(&block->napi);
2053 		tx->last_kick_msec = current_time;
2054 		goto out;
2055 	} // Else reset.
2056 
2057 reset:
2058 	gve_schedule_reset(priv);
2059 
2060 out:
2061 	if (tx)
2062 		tx->queue_timeout++;
2063 	priv->tx_timeo_cnt++;
2064 }
2065 
2066 u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
2067 {
2068 	if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
2069 		return GVE_MAX_RX_BUFFER_SIZE;
2070 	else
2071 		return GVE_DEFAULT_RX_BUFFER_SIZE;
2072 }
2073 
2074 /* header-split is not supported on non-DQO_RDA yet even if device advertises it */
2075 bool gve_header_split_supported(const struct gve_priv *priv)
2076 {
2077 	return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
2078 }
2079 
2080 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
2081 {
2082 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2083 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
2084 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
2085 	bool enable_hdr_split;
2086 	int err = 0;
2087 
2088 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
2089 		return 0;
2090 
2091 	if (!gve_header_split_supported(priv)) {
2092 		dev_err(&priv->pdev->dev, "Header-split not supported\n");
2093 		return -EOPNOTSUPP;
2094 	}
2095 
2096 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
2097 		enable_hdr_split = true;
2098 	else
2099 		enable_hdr_split = false;
2100 
2101 	if (enable_hdr_split == priv->header_split_enabled)
2102 		return 0;
2103 
2104 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
2105 				&tx_alloc_cfg, &rx_alloc_cfg);
2106 
2107 	rx_alloc_cfg.enable_header_split = enable_hdr_split;
2108 	rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
2109 
2110 	if (netif_running(priv->dev))
2111 		err = gve_adjust_config(priv, &qpls_alloc_cfg,
2112 					&tx_alloc_cfg, &rx_alloc_cfg);
2113 	return err;
2114 }
2115 
2116 static int gve_set_features(struct net_device *netdev,
2117 			    netdev_features_t features)
2118 {
2119 	const netdev_features_t orig_features = netdev->features;
2120 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2121 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
2122 	struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
2123 	struct gve_priv *priv = netdev_priv(netdev);
2124 	struct gve_qpl_config new_qpl_cfg;
2125 	int err;
2126 
2127 	gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
2128 				&tx_alloc_cfg, &rx_alloc_cfg);
2129 	/* qpl_cfg is not read-only, it contains a map that gets updated as
2130 	 * rings are allocated, which is why we cannot use the yet unreleased
2131 	 * one in priv.
2132 	 */
2133 	qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
2134 	tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
2135 	rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
2136 
2137 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
2138 		netdev->features ^= NETIF_F_LRO;
2139 		if (netif_carrier_ok(netdev)) {
2140 			err = gve_adjust_config(priv, &qpls_alloc_cfg,
2141 						&tx_alloc_cfg, &rx_alloc_cfg);
2142 			if (err) {
2143 				/* Revert the change on error. */
2144 				netdev->features = orig_features;
2145 				return err;
2146 			}
2147 		}
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 static const struct net_device_ops gve_netdev_ops = {
2154 	.ndo_start_xmit		=	gve_start_xmit,
2155 	.ndo_features_check	=	gve_features_check,
2156 	.ndo_open		=	gve_open,
2157 	.ndo_stop		=	gve_close,
2158 	.ndo_get_stats64	=	gve_get_stats,
2159 	.ndo_tx_timeout         =       gve_tx_timeout,
2160 	.ndo_set_features	=	gve_set_features,
2161 	.ndo_bpf		=	gve_xdp,
2162 	.ndo_xdp_xmit		=	gve_xdp_xmit,
2163 	.ndo_xsk_wakeup		=	gve_xsk_wakeup,
2164 };
2165 
2166 static void gve_handle_status(struct gve_priv *priv, u32 status)
2167 {
2168 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
2169 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
2170 		gve_set_do_reset(priv);
2171 	}
2172 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
2173 		priv->stats_report_trigger_cnt++;
2174 		gve_set_do_report_stats(priv);
2175 	}
2176 }
2177 
2178 static void gve_handle_reset(struct gve_priv *priv)
2179 {
2180 	/* A service task will be scheduled at the end of probe to catch any
2181 	 * resets that need to happen, and we don't want to reset until
2182 	 * probe is done.
2183 	 */
2184 	if (gve_get_probe_in_progress(priv))
2185 		return;
2186 
2187 	if (gve_get_do_reset(priv)) {
2188 		rtnl_lock();
2189 		gve_reset(priv, false);
2190 		rtnl_unlock();
2191 	}
2192 }
2193 
2194 void gve_handle_report_stats(struct gve_priv *priv)
2195 {
2196 	struct stats *stats = priv->stats_report->stats;
2197 	int idx, stats_idx = 0;
2198 	unsigned int start = 0;
2199 	u64 tx_bytes;
2200 
2201 	if (!gve_get_report_stats(priv))
2202 		return;
2203 
2204 	be64_add_cpu(&priv->stats_report->written_count, 1);
2205 	/* tx stats */
2206 	if (priv->tx) {
2207 		for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
2208 			u32 last_completion = 0;
2209 			u32 tx_frames = 0;
2210 
2211 			/* DQO doesn't currently support these metrics. */
2212 			if (gve_is_gqi(priv)) {
2213 				last_completion = priv->tx[idx].done;
2214 				tx_frames = priv->tx[idx].req;
2215 			}
2216 
2217 			do {
2218 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
2219 				tx_bytes = priv->tx[idx].bytes_done;
2220 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
2221 			stats[stats_idx++] = (struct stats) {
2222 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
2223 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
2224 				.queue_id = cpu_to_be32(idx),
2225 			};
2226 			stats[stats_idx++] = (struct stats) {
2227 				.stat_name = cpu_to_be32(TX_STOP_CNT),
2228 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
2229 				.queue_id = cpu_to_be32(idx),
2230 			};
2231 			stats[stats_idx++] = (struct stats) {
2232 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
2233 				.value = cpu_to_be64(tx_frames),
2234 				.queue_id = cpu_to_be32(idx),
2235 			};
2236 			stats[stats_idx++] = (struct stats) {
2237 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
2238 				.value = cpu_to_be64(tx_bytes),
2239 				.queue_id = cpu_to_be32(idx),
2240 			};
2241 			stats[stats_idx++] = (struct stats) {
2242 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
2243 				.value = cpu_to_be64(last_completion),
2244 				.queue_id = cpu_to_be32(idx),
2245 			};
2246 			stats[stats_idx++] = (struct stats) {
2247 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
2248 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
2249 				.queue_id = cpu_to_be32(idx),
2250 			};
2251 		}
2252 	}
2253 	/* rx stats */
2254 	if (priv->rx) {
2255 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
2256 			stats[stats_idx++] = (struct stats) {
2257 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
2258 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
2259 				.queue_id = cpu_to_be32(idx),
2260 			};
2261 			stats[stats_idx++] = (struct stats) {
2262 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
2263 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
2264 				.queue_id = cpu_to_be32(idx),
2265 			};
2266 		}
2267 	}
2268 }
2269 
2270 /* Handle NIC status register changes, reset requests and report stats */
2271 static void gve_service_task(struct work_struct *work)
2272 {
2273 	struct gve_priv *priv = container_of(work, struct gve_priv,
2274 					     service_task);
2275 	u32 status = ioread32be(&priv->reg_bar0->device_status);
2276 
2277 	gve_handle_status(priv, status);
2278 
2279 	gve_handle_reset(priv);
2280 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
2281 }
2282 
2283 static void gve_set_netdev_xdp_features(struct gve_priv *priv)
2284 {
2285 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
2286 		priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
2287 		priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2288 		priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
2289 		priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2290 	} else {
2291 		priv->dev->xdp_features = 0;
2292 	}
2293 }
2294 
2295 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2296 {
2297 	int num_ntfy;
2298 	int err;
2299 
2300 	/* Set up the adminq */
2301 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
2302 	if (err) {
2303 		dev_err(&priv->pdev->dev,
2304 			"Failed to alloc admin queue: err=%d\n", err);
2305 		return err;
2306 	}
2307 
2308 	err = gve_verify_driver_compatibility(priv);
2309 	if (err) {
2310 		dev_err(&priv->pdev->dev,
2311 			"Could not verify driver compatibility: err=%d\n", err);
2312 		goto err;
2313 	}
2314 
2315 	priv->num_registered_pages = 0;
2316 
2317 	if (skip_describe_device)
2318 		goto setup_device;
2319 
2320 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2321 	/* Get the initial information we need from the device */
2322 	err = gve_adminq_describe_device(priv);
2323 	if (err) {
2324 		dev_err(&priv->pdev->dev,
2325 			"Could not get device information: err=%d\n", err);
2326 		goto err;
2327 	}
2328 	priv->dev->mtu = priv->dev->max_mtu;
2329 	num_ntfy = pci_msix_vec_count(priv->pdev);
2330 	if (num_ntfy <= 0) {
2331 		dev_err(&priv->pdev->dev,
2332 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
2333 		err = num_ntfy;
2334 		goto err;
2335 	} else if (num_ntfy < GVE_MIN_MSIX) {
2336 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2337 			GVE_MIN_MSIX, num_ntfy);
2338 		err = -EINVAL;
2339 		goto err;
2340 	}
2341 
2342 	/* Big TCP is only supported on DQ*/
2343 	if (!gve_is_gqi(priv))
2344 		netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2345 
2346 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2347 	/* gvnic has one Notification Block per MSI-x vector, except for the
2348 	 * management vector
2349 	 */
2350 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2351 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
2352 
2353 	priv->tx_cfg.max_queues =
2354 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2355 	priv->rx_cfg.max_queues =
2356 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2357 
2358 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2359 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2360 	if (priv->default_num_queues > 0) {
2361 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2362 						priv->tx_cfg.num_queues);
2363 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2364 						priv->rx_cfg.num_queues);
2365 	}
2366 
2367 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2368 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
2369 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2370 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2371 
2372 	if (!gve_is_gqi(priv)) {
2373 		priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
2374 		priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
2375 	}
2376 
2377 setup_device:
2378 	gve_set_netdev_xdp_features(priv);
2379 	err = gve_setup_device_resources(priv);
2380 	if (!err)
2381 		return 0;
2382 err:
2383 	gve_adminq_free(&priv->pdev->dev, priv);
2384 	return err;
2385 }
2386 
2387 static void gve_teardown_priv_resources(struct gve_priv *priv)
2388 {
2389 	gve_teardown_device_resources(priv);
2390 	gve_adminq_free(&priv->pdev->dev, priv);
2391 }
2392 
2393 static void gve_trigger_reset(struct gve_priv *priv)
2394 {
2395 	/* Reset the device by releasing the AQ */
2396 	gve_adminq_release(priv);
2397 }
2398 
2399 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
2400 {
2401 	gve_trigger_reset(priv);
2402 	/* With the reset having already happened, close cannot fail */
2403 	if (was_up)
2404 		gve_close(priv->dev);
2405 	gve_teardown_priv_resources(priv);
2406 }
2407 
2408 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
2409 {
2410 	int err;
2411 
2412 	err = gve_init_priv(priv, true);
2413 	if (err)
2414 		goto err;
2415 	if (was_up) {
2416 		err = gve_open(priv->dev);
2417 		if (err)
2418 			goto err;
2419 	}
2420 	return 0;
2421 err:
2422 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
2423 	gve_turndown(priv);
2424 	return err;
2425 }
2426 
2427 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
2428 {
2429 	bool was_up = netif_carrier_ok(priv->dev);
2430 	int err;
2431 
2432 	dev_info(&priv->pdev->dev, "Performing reset\n");
2433 	gve_clear_do_reset(priv);
2434 	gve_set_reset_in_progress(priv);
2435 	/* If we aren't attempting to teardown normally, just go turndown and
2436 	 * reset right away.
2437 	 */
2438 	if (!attempt_teardown) {
2439 		gve_turndown(priv);
2440 		gve_reset_and_teardown(priv, was_up);
2441 	} else {
2442 		/* Otherwise attempt to close normally */
2443 		if (was_up) {
2444 			err = gve_close(priv->dev);
2445 			/* If that fails reset as we did above */
2446 			if (err)
2447 				gve_reset_and_teardown(priv, was_up);
2448 		}
2449 		/* Clean up any remaining resources */
2450 		gve_teardown_priv_resources(priv);
2451 	}
2452 
2453 	/* Set it all back up */
2454 	err = gve_reset_recovery(priv, was_up);
2455 	gve_clear_reset_in_progress(priv);
2456 	priv->reset_cnt++;
2457 	priv->interface_up_cnt = 0;
2458 	priv->interface_down_cnt = 0;
2459 	priv->stats_report_trigger_cnt = 0;
2460 	return err;
2461 }
2462 
2463 static void gve_write_version(u8 __iomem *driver_version_register)
2464 {
2465 	const char *c = gve_version_prefix;
2466 
2467 	while (*c) {
2468 		writeb(*c, driver_version_register);
2469 		c++;
2470 	}
2471 
2472 	c = gve_version_str;
2473 	while (*c) {
2474 		writeb(*c, driver_version_register);
2475 		c++;
2476 	}
2477 	writeb('\n', driver_version_register);
2478 }
2479 
2480 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2481 {
2482 	int max_tx_queues, max_rx_queues;
2483 	struct net_device *dev;
2484 	__be32 __iomem *db_bar;
2485 	struct gve_registers __iomem *reg_bar;
2486 	struct gve_priv *priv;
2487 	int err;
2488 
2489 	err = pci_enable_device(pdev);
2490 	if (err)
2491 		return err;
2492 
2493 	err = pci_request_regions(pdev, gve_driver_name);
2494 	if (err)
2495 		goto abort_with_enabled;
2496 
2497 	pci_set_master(pdev);
2498 
2499 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2500 	if (err) {
2501 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2502 		goto abort_with_pci_region;
2503 	}
2504 
2505 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2506 	if (!reg_bar) {
2507 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
2508 		err = -ENOMEM;
2509 		goto abort_with_pci_region;
2510 	}
2511 
2512 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2513 	if (!db_bar) {
2514 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2515 		err = -ENOMEM;
2516 		goto abort_with_reg_bar;
2517 	}
2518 
2519 	gve_write_version(&reg_bar->driver_version);
2520 	/* Get max queues to alloc etherdev */
2521 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
2522 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
2523 	/* Alloc and setup the netdev and priv */
2524 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2525 	if (!dev) {
2526 		dev_err(&pdev->dev, "could not allocate netdev\n");
2527 		err = -ENOMEM;
2528 		goto abort_with_db_bar;
2529 	}
2530 	SET_NETDEV_DEV(dev, &pdev->dev);
2531 	pci_set_drvdata(pdev, dev);
2532 	dev->ethtool_ops = &gve_ethtool_ops;
2533 	dev->netdev_ops = &gve_netdev_ops;
2534 
2535 	/* Set default and supported features.
2536 	 *
2537 	 * Features might be set in other locations as well (such as
2538 	 * `gve_adminq_describe_device`).
2539 	 */
2540 	dev->hw_features = NETIF_F_HIGHDMA;
2541 	dev->hw_features |= NETIF_F_SG;
2542 	dev->hw_features |= NETIF_F_HW_CSUM;
2543 	dev->hw_features |= NETIF_F_TSO;
2544 	dev->hw_features |= NETIF_F_TSO6;
2545 	dev->hw_features |= NETIF_F_TSO_ECN;
2546 	dev->hw_features |= NETIF_F_RXCSUM;
2547 	dev->hw_features |= NETIF_F_RXHASH;
2548 	dev->features = dev->hw_features;
2549 	dev->watchdog_timeo = 5 * HZ;
2550 	dev->min_mtu = ETH_MIN_MTU;
2551 	netif_carrier_off(dev);
2552 
2553 	priv = netdev_priv(dev);
2554 	priv->dev = dev;
2555 	priv->pdev = pdev;
2556 	priv->msg_enable = DEFAULT_MSG_LEVEL;
2557 	priv->reg_bar0 = reg_bar;
2558 	priv->db_bar2 = db_bar;
2559 	priv->service_task_flags = 0x0;
2560 	priv->state_flags = 0x0;
2561 	priv->ethtool_flags = 0x0;
2562 	priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
2563 	priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2564 
2565 	gve_set_probe_in_progress(priv);
2566 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
2567 	if (!priv->gve_wq) {
2568 		dev_err(&pdev->dev, "Could not allocate workqueue");
2569 		err = -ENOMEM;
2570 		goto abort_with_netdev;
2571 	}
2572 	INIT_WORK(&priv->service_task, gve_service_task);
2573 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2574 	priv->tx_cfg.max_queues = max_tx_queues;
2575 	priv->rx_cfg.max_queues = max_rx_queues;
2576 
2577 	err = gve_init_priv(priv, false);
2578 	if (err)
2579 		goto abort_with_wq;
2580 
2581 	err = register_netdev(dev);
2582 	if (err)
2583 		goto abort_with_gve_init;
2584 
2585 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2586 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
2587 	gve_clear_probe_in_progress(priv);
2588 	queue_work(priv->gve_wq, &priv->service_task);
2589 	return 0;
2590 
2591 abort_with_gve_init:
2592 	gve_teardown_priv_resources(priv);
2593 
2594 abort_with_wq:
2595 	destroy_workqueue(priv->gve_wq);
2596 
2597 abort_with_netdev:
2598 	free_netdev(dev);
2599 
2600 abort_with_db_bar:
2601 	pci_iounmap(pdev, db_bar);
2602 
2603 abort_with_reg_bar:
2604 	pci_iounmap(pdev, reg_bar);
2605 
2606 abort_with_pci_region:
2607 	pci_release_regions(pdev);
2608 
2609 abort_with_enabled:
2610 	pci_disable_device(pdev);
2611 	return err;
2612 }
2613 
2614 static void gve_remove(struct pci_dev *pdev)
2615 {
2616 	struct net_device *netdev = pci_get_drvdata(pdev);
2617 	struct gve_priv *priv = netdev_priv(netdev);
2618 	__be32 __iomem *db_bar = priv->db_bar2;
2619 	void __iomem *reg_bar = priv->reg_bar0;
2620 
2621 	unregister_netdev(netdev);
2622 	gve_teardown_priv_resources(priv);
2623 	destroy_workqueue(priv->gve_wq);
2624 	free_netdev(netdev);
2625 	pci_iounmap(pdev, db_bar);
2626 	pci_iounmap(pdev, reg_bar);
2627 	pci_release_regions(pdev);
2628 	pci_disable_device(pdev);
2629 }
2630 
2631 static void gve_shutdown(struct pci_dev *pdev)
2632 {
2633 	struct net_device *netdev = pci_get_drvdata(pdev);
2634 	struct gve_priv *priv = netdev_priv(netdev);
2635 	bool was_up = netif_carrier_ok(priv->dev);
2636 
2637 	rtnl_lock();
2638 	if (was_up && gve_close(priv->dev)) {
2639 		/* If the dev was up, attempt to close, if close fails, reset */
2640 		gve_reset_and_teardown(priv, was_up);
2641 	} else {
2642 		/* If the dev wasn't up or close worked, finish tearing down */
2643 		gve_teardown_priv_resources(priv);
2644 	}
2645 	rtnl_unlock();
2646 }
2647 
2648 #ifdef CONFIG_PM
2649 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2650 {
2651 	struct net_device *netdev = pci_get_drvdata(pdev);
2652 	struct gve_priv *priv = netdev_priv(netdev);
2653 	bool was_up = netif_carrier_ok(priv->dev);
2654 
2655 	priv->suspend_cnt++;
2656 	rtnl_lock();
2657 	if (was_up && gve_close(priv->dev)) {
2658 		/* If the dev was up, attempt to close, if close fails, reset */
2659 		gve_reset_and_teardown(priv, was_up);
2660 	} else {
2661 		/* If the dev wasn't up or close worked, finish tearing down */
2662 		gve_teardown_priv_resources(priv);
2663 	}
2664 	priv->up_before_suspend = was_up;
2665 	rtnl_unlock();
2666 	return 0;
2667 }
2668 
2669 static int gve_resume(struct pci_dev *pdev)
2670 {
2671 	struct net_device *netdev = pci_get_drvdata(pdev);
2672 	struct gve_priv *priv = netdev_priv(netdev);
2673 	int err;
2674 
2675 	priv->resume_cnt++;
2676 	rtnl_lock();
2677 	err = gve_reset_recovery(priv, priv->up_before_suspend);
2678 	rtnl_unlock();
2679 	return err;
2680 }
2681 #endif /* CONFIG_PM */
2682 
2683 static const struct pci_device_id gve_id_table[] = {
2684 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2685 	{ }
2686 };
2687 
2688 static struct pci_driver gve_driver = {
2689 	.name		= gve_driver_name,
2690 	.id_table	= gve_id_table,
2691 	.probe		= gve_probe,
2692 	.remove		= gve_remove,
2693 	.shutdown	= gve_shutdown,
2694 #ifdef CONFIG_PM
2695 	.suspend        = gve_suspend,
2696 	.resume         = gve_resume,
2697 #endif
2698 };
2699 
2700 module_pci_driver(gve_driver);
2701 
2702 MODULE_DEVICE_TABLE(pci, gve_id_table);
2703 MODULE_AUTHOR("Google, Inc.");
2704 MODULE_DESCRIPTION("Google Virtual NIC Driver");
2705 MODULE_LICENSE("Dual MIT/GPL");
2706 MODULE_VERSION(GVE_VERSION);
2707