xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision 67f49869106f78882a8a09b736d4884be85aba18)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <linux/utsname.h>
16 #include <linux/version.h>
17 #include <net/sch_generic.h>
18 #include "gve.h"
19 #include "gve_dqo.h"
20 #include "gve_adminq.h"
21 #include "gve_register.h"
22 
23 #define GVE_DEFAULT_RX_COPYBREAK	(256)
24 
25 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
26 #define GVE_VERSION		"1.0.0"
27 #define GVE_VERSION_PREFIX	"GVE-"
28 
29 // Minimum amount of time between queue kicks in msec (10 seconds)
30 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
31 
32 const char gve_version_str[] = GVE_VERSION;
33 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
34 
35 static int gve_verify_driver_compatibility(struct gve_priv *priv)
36 {
37 	int err;
38 	struct gve_driver_info *driver_info;
39 	dma_addr_t driver_info_bus;
40 
41 	driver_info = dma_alloc_coherent(&priv->pdev->dev,
42 					 sizeof(struct gve_driver_info),
43 					 &driver_info_bus, GFP_KERNEL);
44 	if (!driver_info)
45 		return -ENOMEM;
46 
47 	*driver_info = (struct gve_driver_info) {
48 		.os_type = 1, /* Linux */
49 		.os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
50 		.os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
51 		.os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
52 		.driver_capability_flags = {
53 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
54 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
55 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
56 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
57 		},
58 	};
59 	strscpy(driver_info->os_version_str1, utsname()->release,
60 		sizeof(driver_info->os_version_str1));
61 	strscpy(driver_info->os_version_str2, utsname()->version,
62 		sizeof(driver_info->os_version_str2));
63 
64 	err = gve_adminq_verify_driver_compatibility(priv,
65 						     sizeof(struct gve_driver_info),
66 						     driver_info_bus);
67 
68 	/* It's ok if the device doesn't support this */
69 	if (err == -EOPNOTSUPP)
70 		err = 0;
71 
72 	dma_free_coherent(&priv->pdev->dev,
73 			  sizeof(struct gve_driver_info),
74 			  driver_info, driver_info_bus);
75 	return err;
76 }
77 
78 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
79 {
80 	struct gve_priv *priv = netdev_priv(dev);
81 
82 	if (gve_is_gqi(priv))
83 		return gve_tx(skb, dev);
84 	else
85 		return gve_tx_dqo(skb, dev);
86 }
87 
88 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
89 {
90 	struct gve_priv *priv = netdev_priv(dev);
91 	unsigned int start;
92 	u64 packets, bytes;
93 	int ring;
94 
95 	if (priv->rx) {
96 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
97 			do {
98 				start =
99 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
100 				packets = priv->rx[ring].rpackets;
101 				bytes = priv->rx[ring].rbytes;
102 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
103 						       start));
104 			s->rx_packets += packets;
105 			s->rx_bytes += bytes;
106 		}
107 	}
108 	if (priv->tx) {
109 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
110 			do {
111 				start =
112 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
113 				packets = priv->tx[ring].pkt_done;
114 				bytes = priv->tx[ring].bytes_done;
115 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
116 						       start));
117 			s->tx_packets += packets;
118 			s->tx_bytes += bytes;
119 		}
120 	}
121 }
122 
123 static int gve_alloc_counter_array(struct gve_priv *priv)
124 {
125 	priv->counter_array =
126 		dma_alloc_coherent(&priv->pdev->dev,
127 				   priv->num_event_counters *
128 				   sizeof(*priv->counter_array),
129 				   &priv->counter_array_bus, GFP_KERNEL);
130 	if (!priv->counter_array)
131 		return -ENOMEM;
132 
133 	return 0;
134 }
135 
136 static void gve_free_counter_array(struct gve_priv *priv)
137 {
138 	if (!priv->counter_array)
139 		return;
140 
141 	dma_free_coherent(&priv->pdev->dev,
142 			  priv->num_event_counters *
143 			  sizeof(*priv->counter_array),
144 			  priv->counter_array, priv->counter_array_bus);
145 	priv->counter_array = NULL;
146 }
147 
148 /* NIC requests to report stats */
149 static void gve_stats_report_task(struct work_struct *work)
150 {
151 	struct gve_priv *priv = container_of(work, struct gve_priv,
152 					     stats_report_task);
153 	if (gve_get_do_report_stats(priv)) {
154 		gve_handle_report_stats(priv);
155 		gve_clear_do_report_stats(priv);
156 	}
157 }
158 
159 static void gve_stats_report_schedule(struct gve_priv *priv)
160 {
161 	if (!gve_get_probe_in_progress(priv) &&
162 	    !gve_get_reset_in_progress(priv)) {
163 		gve_set_do_report_stats(priv);
164 		queue_work(priv->gve_wq, &priv->stats_report_task);
165 	}
166 }
167 
168 static void gve_stats_report_timer(struct timer_list *t)
169 {
170 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
171 
172 	mod_timer(&priv->stats_report_timer,
173 		  round_jiffies(jiffies +
174 		  msecs_to_jiffies(priv->stats_report_timer_period)));
175 	gve_stats_report_schedule(priv);
176 }
177 
178 static int gve_alloc_stats_report(struct gve_priv *priv)
179 {
180 	int tx_stats_num, rx_stats_num;
181 
182 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
183 		       priv->tx_cfg.num_queues;
184 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
185 		       priv->rx_cfg.num_queues;
186 	priv->stats_report_len = struct_size(priv->stats_report, stats,
187 					     tx_stats_num + rx_stats_num);
188 	priv->stats_report =
189 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
190 				   &priv->stats_report_bus, GFP_KERNEL);
191 	if (!priv->stats_report)
192 		return -ENOMEM;
193 	/* Set up timer for the report-stats task */
194 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
195 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
196 	return 0;
197 }
198 
199 static void gve_free_stats_report(struct gve_priv *priv)
200 {
201 	if (!priv->stats_report)
202 		return;
203 
204 	del_timer_sync(&priv->stats_report_timer);
205 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
206 			  priv->stats_report, priv->stats_report_bus);
207 	priv->stats_report = NULL;
208 }
209 
210 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
211 {
212 	struct gve_priv *priv = arg;
213 
214 	queue_work(priv->gve_wq, &priv->service_task);
215 	return IRQ_HANDLED;
216 }
217 
218 static irqreturn_t gve_intr(int irq, void *arg)
219 {
220 	struct gve_notify_block *block = arg;
221 	struct gve_priv *priv = block->priv;
222 
223 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
224 	napi_schedule_irqoff(&block->napi);
225 	return IRQ_HANDLED;
226 }
227 
228 static irqreturn_t gve_intr_dqo(int irq, void *arg)
229 {
230 	struct gve_notify_block *block = arg;
231 
232 	/* Interrupts are automatically masked */
233 	napi_schedule_irqoff(&block->napi);
234 	return IRQ_HANDLED;
235 }
236 
237 static int gve_napi_poll(struct napi_struct *napi, int budget)
238 {
239 	struct gve_notify_block *block;
240 	__be32 __iomem *irq_doorbell;
241 	bool reschedule = false;
242 	struct gve_priv *priv;
243 	int work_done = 0;
244 
245 	block = container_of(napi, struct gve_notify_block, napi);
246 	priv = block->priv;
247 
248 	if (block->tx)
249 		reschedule |= gve_tx_poll(block, budget);
250 	if (block->rx) {
251 		work_done = gve_rx_poll(block, budget);
252 		reschedule |= work_done == budget;
253 	}
254 
255 	if (reschedule)
256 		return budget;
257 
258        /* Complete processing - don't unmask irq if busy polling is enabled */
259 	if (likely(napi_complete_done(napi, work_done))) {
260 		irq_doorbell = gve_irq_doorbell(priv, block);
261 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
262 
263 		/* Ensure IRQ ACK is visible before we check pending work.
264 		 * If queue had issued updates, it would be truly visible.
265 		 */
266 		mb();
267 
268 		if (block->tx)
269 			reschedule |= gve_tx_clean_pending(priv, block->tx);
270 		if (block->rx)
271 			reschedule |= gve_rx_work_pending(block->rx);
272 
273 		if (reschedule && napi_reschedule(napi))
274 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
275 	}
276 	return work_done;
277 }
278 
279 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
280 {
281 	struct gve_notify_block *block =
282 		container_of(napi, struct gve_notify_block, napi);
283 	struct gve_priv *priv = block->priv;
284 	bool reschedule = false;
285 	int work_done = 0;
286 
287 	/* Clear PCI MSI-X Pending Bit Array (PBA)
288 	 *
289 	 * This bit is set if an interrupt event occurs while the vector is
290 	 * masked. If this bit is set and we reenable the interrupt, it will
291 	 * fire again. Since we're just about to poll the queue state, we don't
292 	 * need it to fire again.
293 	 *
294 	 * Under high softirq load, it's possible that the interrupt condition
295 	 * is triggered twice before we got the chance to process it.
296 	 */
297 	gve_write_irq_doorbell_dqo(priv, block,
298 				   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
299 
300 	if (block->tx)
301 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
302 
303 	if (block->rx) {
304 		work_done = gve_rx_poll_dqo(block, budget);
305 		reschedule |= work_done == budget;
306 	}
307 
308 	if (reschedule)
309 		return budget;
310 
311 	if (likely(napi_complete_done(napi, work_done))) {
312 		/* Enable interrupts again.
313 		 *
314 		 * We don't need to repoll afterwards because HW supports the
315 		 * PCI MSI-X PBA feature.
316 		 *
317 		 * Another interrupt would be triggered if a new event came in
318 		 * since the last one.
319 		 */
320 		gve_write_irq_doorbell_dqo(priv, block,
321 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
322 	}
323 
324 	return work_done;
325 }
326 
327 static int gve_alloc_notify_blocks(struct gve_priv *priv)
328 {
329 	int num_vecs_requested = priv->num_ntfy_blks + 1;
330 	char *name = priv->dev->name;
331 	unsigned int active_cpus;
332 	int vecs_enabled;
333 	int i, j;
334 	int err;
335 
336 	priv->msix_vectors = kvcalloc(num_vecs_requested,
337 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
338 	if (!priv->msix_vectors)
339 		return -ENOMEM;
340 	for (i = 0; i < num_vecs_requested; i++)
341 		priv->msix_vectors[i].entry = i;
342 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
343 					     GVE_MIN_MSIX, num_vecs_requested);
344 	if (vecs_enabled < 0) {
345 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
346 			GVE_MIN_MSIX, vecs_enabled);
347 		err = vecs_enabled;
348 		goto abort_with_msix_vectors;
349 	}
350 	if (vecs_enabled != num_vecs_requested) {
351 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
352 		int vecs_per_type = new_num_ntfy_blks / 2;
353 		int vecs_left = new_num_ntfy_blks % 2;
354 
355 		priv->num_ntfy_blks = new_num_ntfy_blks;
356 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
357 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
358 						vecs_per_type);
359 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
360 						vecs_per_type + vecs_left);
361 		dev_err(&priv->pdev->dev,
362 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
363 			vecs_enabled, priv->tx_cfg.max_queues,
364 			priv->rx_cfg.max_queues);
365 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
366 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
367 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
368 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
369 	}
370 	/* Half the notification blocks go to TX and half to RX */
371 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
372 
373 	/* Setup Management Vector  - the last vector */
374 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
375 		 name);
376 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
377 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
378 	if (err) {
379 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
380 		goto abort_with_msix_enabled;
381 	}
382 	priv->irq_db_indices =
383 		dma_alloc_coherent(&priv->pdev->dev,
384 				   priv->num_ntfy_blks *
385 				   sizeof(*priv->irq_db_indices),
386 				   &priv->irq_db_indices_bus, GFP_KERNEL);
387 	if (!priv->irq_db_indices) {
388 		err = -ENOMEM;
389 		goto abort_with_mgmt_vector;
390 	}
391 
392 	priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
393 				     sizeof(*priv->ntfy_blocks), GFP_KERNEL);
394 	if (!priv->ntfy_blocks) {
395 		err = -ENOMEM;
396 		goto abort_with_irq_db_indices;
397 	}
398 
399 	/* Setup the other blocks - the first n-1 vectors */
400 	for (i = 0; i < priv->num_ntfy_blks; i++) {
401 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
402 		int msix_idx = i;
403 
404 		snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
405 			 name, i);
406 		block->priv = priv;
407 		err = request_irq(priv->msix_vectors[msix_idx].vector,
408 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
409 				  0, block->name, block);
410 		if (err) {
411 			dev_err(&priv->pdev->dev,
412 				"Failed to receive msix vector %d\n", i);
413 			goto abort_with_some_ntfy_blocks;
414 		}
415 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
416 				      get_cpu_mask(i % active_cpus));
417 		block->irq_db_index = &priv->irq_db_indices[i].index;
418 	}
419 	return 0;
420 abort_with_some_ntfy_blocks:
421 	for (j = 0; j < i; j++) {
422 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
423 		int msix_idx = j;
424 
425 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
426 				      NULL);
427 		free_irq(priv->msix_vectors[msix_idx].vector, block);
428 	}
429 	kvfree(priv->ntfy_blocks);
430 	priv->ntfy_blocks = NULL;
431 abort_with_irq_db_indices:
432 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
433 			  sizeof(*priv->irq_db_indices),
434 			  priv->irq_db_indices, priv->irq_db_indices_bus);
435 	priv->irq_db_indices = NULL;
436 abort_with_mgmt_vector:
437 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
438 abort_with_msix_enabled:
439 	pci_disable_msix(priv->pdev);
440 abort_with_msix_vectors:
441 	kvfree(priv->msix_vectors);
442 	priv->msix_vectors = NULL;
443 	return err;
444 }
445 
446 static void gve_free_notify_blocks(struct gve_priv *priv)
447 {
448 	int i;
449 
450 	if (!priv->msix_vectors)
451 		return;
452 
453 	/* Free the irqs */
454 	for (i = 0; i < priv->num_ntfy_blks; i++) {
455 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
456 		int msix_idx = i;
457 
458 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
459 				      NULL);
460 		free_irq(priv->msix_vectors[msix_idx].vector, block);
461 	}
462 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
463 	kvfree(priv->ntfy_blocks);
464 	priv->ntfy_blocks = NULL;
465 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
466 			  sizeof(*priv->irq_db_indices),
467 			  priv->irq_db_indices, priv->irq_db_indices_bus);
468 	priv->irq_db_indices = NULL;
469 	pci_disable_msix(priv->pdev);
470 	kvfree(priv->msix_vectors);
471 	priv->msix_vectors = NULL;
472 }
473 
474 static int gve_setup_device_resources(struct gve_priv *priv)
475 {
476 	int err;
477 
478 	err = gve_alloc_counter_array(priv);
479 	if (err)
480 		return err;
481 	err = gve_alloc_notify_blocks(priv);
482 	if (err)
483 		goto abort_with_counter;
484 	err = gve_alloc_stats_report(priv);
485 	if (err)
486 		goto abort_with_ntfy_blocks;
487 	err = gve_adminq_configure_device_resources(priv,
488 						    priv->counter_array_bus,
489 						    priv->num_event_counters,
490 						    priv->irq_db_indices_bus,
491 						    priv->num_ntfy_blks);
492 	if (unlikely(err)) {
493 		dev_err(&priv->pdev->dev,
494 			"could not setup device_resources: err=%d\n", err);
495 		err = -ENXIO;
496 		goto abort_with_stats_report;
497 	}
498 
499 	if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
500 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
501 					       GFP_KERNEL);
502 		if (!priv->ptype_lut_dqo) {
503 			err = -ENOMEM;
504 			goto abort_with_stats_report;
505 		}
506 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
507 		if (err) {
508 			dev_err(&priv->pdev->dev,
509 				"Failed to get ptype map: err=%d\n", err);
510 			goto abort_with_ptype_lut;
511 		}
512 	}
513 
514 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
515 				      priv->stats_report_bus,
516 				      GVE_STATS_REPORT_TIMER_PERIOD);
517 	if (err)
518 		dev_err(&priv->pdev->dev,
519 			"Failed to report stats: err=%d\n", err);
520 	gve_set_device_resources_ok(priv);
521 	return 0;
522 
523 abort_with_ptype_lut:
524 	kvfree(priv->ptype_lut_dqo);
525 	priv->ptype_lut_dqo = NULL;
526 abort_with_stats_report:
527 	gve_free_stats_report(priv);
528 abort_with_ntfy_blocks:
529 	gve_free_notify_blocks(priv);
530 abort_with_counter:
531 	gve_free_counter_array(priv);
532 
533 	return err;
534 }
535 
536 static void gve_trigger_reset(struct gve_priv *priv);
537 
538 static void gve_teardown_device_resources(struct gve_priv *priv)
539 {
540 	int err;
541 
542 	/* Tell device its resources are being freed */
543 	if (gve_get_device_resources_ok(priv)) {
544 		/* detach the stats report */
545 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
546 		if (err) {
547 			dev_err(&priv->pdev->dev,
548 				"Failed to detach stats report: err=%d\n", err);
549 			gve_trigger_reset(priv);
550 		}
551 		err = gve_adminq_deconfigure_device_resources(priv);
552 		if (err) {
553 			dev_err(&priv->pdev->dev,
554 				"Could not deconfigure device resources: err=%d\n",
555 				err);
556 			gve_trigger_reset(priv);
557 		}
558 	}
559 
560 	kvfree(priv->ptype_lut_dqo);
561 	priv->ptype_lut_dqo = NULL;
562 
563 	gve_free_counter_array(priv);
564 	gve_free_notify_blocks(priv);
565 	gve_free_stats_report(priv);
566 	gve_clear_device_resources_ok(priv);
567 }
568 
569 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
570 			 int (*gve_poll)(struct napi_struct *, int))
571 {
572 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
573 
574 	netif_napi_add(priv->dev, &block->napi, gve_poll);
575 }
576 
577 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
578 {
579 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
580 
581 	netif_napi_del(&block->napi);
582 }
583 
584 static int gve_register_qpls(struct gve_priv *priv)
585 {
586 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
587 	int err;
588 	int i;
589 
590 	for (i = 0; i < num_qpls; i++) {
591 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
592 		if (err) {
593 			netif_err(priv, drv, priv->dev,
594 				  "failed to register queue page list %d\n",
595 				  priv->qpls[i].id);
596 			/* This failure will trigger a reset - no need to clean
597 			 * up
598 			 */
599 			return err;
600 		}
601 	}
602 	return 0;
603 }
604 
605 static int gve_unregister_qpls(struct gve_priv *priv)
606 {
607 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
608 	int err;
609 	int i;
610 
611 	for (i = 0; i < num_qpls; i++) {
612 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
613 		/* This failure will trigger a reset - no need to clean up */
614 		if (err) {
615 			netif_err(priv, drv, priv->dev,
616 				  "Failed to unregister queue page list %d\n",
617 				  priv->qpls[i].id);
618 			return err;
619 		}
620 	}
621 	return 0;
622 }
623 
624 static int gve_create_rings(struct gve_priv *priv)
625 {
626 	int err;
627 	int i;
628 
629 	err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
630 	if (err) {
631 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
632 			  priv->tx_cfg.num_queues);
633 		/* This failure will trigger a reset - no need to clean
634 		 * up
635 		 */
636 		return err;
637 	}
638 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
639 		  priv->tx_cfg.num_queues);
640 
641 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
642 	if (err) {
643 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
644 			  priv->rx_cfg.num_queues);
645 		/* This failure will trigger a reset - no need to clean
646 		 * up
647 		 */
648 		return err;
649 	}
650 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
651 		  priv->rx_cfg.num_queues);
652 
653 	if (gve_is_gqi(priv)) {
654 		/* Rx data ring has been prefilled with packet buffers at queue
655 		 * allocation time.
656 		 *
657 		 * Write the doorbell to provide descriptor slots and packet
658 		 * buffers to the NIC.
659 		 */
660 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
661 			gve_rx_write_doorbell(priv, &priv->rx[i]);
662 	} else {
663 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
664 			/* Post buffers and ring doorbell. */
665 			gve_rx_post_buffers_dqo(&priv->rx[i]);
666 		}
667 	}
668 
669 	return 0;
670 }
671 
672 static void add_napi_init_sync_stats(struct gve_priv *priv,
673 				     int (*napi_poll)(struct napi_struct *napi,
674 						      int budget))
675 {
676 	int i;
677 
678 	/* Add tx napi & init sync stats*/
679 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
680 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
681 
682 		u64_stats_init(&priv->tx[i].statss);
683 		priv->tx[i].ntfy_id = ntfy_idx;
684 		gve_add_napi(priv, ntfy_idx, napi_poll);
685 	}
686 	/* Add rx napi  & init sync stats*/
687 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
688 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
689 
690 		u64_stats_init(&priv->rx[i].statss);
691 		priv->rx[i].ntfy_id = ntfy_idx;
692 		gve_add_napi(priv, ntfy_idx, napi_poll);
693 	}
694 }
695 
696 static void gve_tx_free_rings(struct gve_priv *priv)
697 {
698 	if (gve_is_gqi(priv)) {
699 		gve_tx_free_rings_gqi(priv);
700 	} else {
701 		gve_tx_free_rings_dqo(priv);
702 	}
703 }
704 
705 static int gve_alloc_rings(struct gve_priv *priv)
706 {
707 	int err;
708 
709 	/* Setup tx rings */
710 	priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
711 			    GFP_KERNEL);
712 	if (!priv->tx)
713 		return -ENOMEM;
714 
715 	if (gve_is_gqi(priv))
716 		err = gve_tx_alloc_rings(priv);
717 	else
718 		err = gve_tx_alloc_rings_dqo(priv);
719 	if (err)
720 		goto free_tx;
721 
722 	/* Setup rx rings */
723 	priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
724 			    GFP_KERNEL);
725 	if (!priv->rx) {
726 		err = -ENOMEM;
727 		goto free_tx_queue;
728 	}
729 
730 	if (gve_is_gqi(priv))
731 		err = gve_rx_alloc_rings(priv);
732 	else
733 		err = gve_rx_alloc_rings_dqo(priv);
734 	if (err)
735 		goto free_rx;
736 
737 	if (gve_is_gqi(priv))
738 		add_napi_init_sync_stats(priv, gve_napi_poll);
739 	else
740 		add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
741 
742 	return 0;
743 
744 free_rx:
745 	kvfree(priv->rx);
746 	priv->rx = NULL;
747 free_tx_queue:
748 	gve_tx_free_rings(priv);
749 free_tx:
750 	kvfree(priv->tx);
751 	priv->tx = NULL;
752 	return err;
753 }
754 
755 static int gve_destroy_rings(struct gve_priv *priv)
756 {
757 	int err;
758 
759 	err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
760 	if (err) {
761 		netif_err(priv, drv, priv->dev,
762 			  "failed to destroy tx queues\n");
763 		/* This failure will trigger a reset - no need to clean up */
764 		return err;
765 	}
766 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
767 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
768 	if (err) {
769 		netif_err(priv, drv, priv->dev,
770 			  "failed to destroy rx queues\n");
771 		/* This failure will trigger a reset - no need to clean up */
772 		return err;
773 	}
774 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
775 	return 0;
776 }
777 
778 static void gve_rx_free_rings(struct gve_priv *priv)
779 {
780 	if (gve_is_gqi(priv))
781 		gve_rx_free_rings_gqi(priv);
782 	else
783 		gve_rx_free_rings_dqo(priv);
784 }
785 
786 static void gve_free_rings(struct gve_priv *priv)
787 {
788 	int ntfy_idx;
789 	int i;
790 
791 	if (priv->tx) {
792 		for (i = 0; i < priv->tx_cfg.num_queues; i++) {
793 			ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
794 			gve_remove_napi(priv, ntfy_idx);
795 		}
796 		gve_tx_free_rings(priv);
797 		kvfree(priv->tx);
798 		priv->tx = NULL;
799 	}
800 	if (priv->rx) {
801 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
802 			ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
803 			gve_remove_napi(priv, ntfy_idx);
804 		}
805 		gve_rx_free_rings(priv);
806 		kvfree(priv->rx);
807 		priv->rx = NULL;
808 	}
809 }
810 
811 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
812 		   struct page **page, dma_addr_t *dma,
813 		   enum dma_data_direction dir, gfp_t gfp_flags)
814 {
815 	*page = alloc_page(gfp_flags);
816 	if (!*page) {
817 		priv->page_alloc_fail++;
818 		return -ENOMEM;
819 	}
820 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
821 	if (dma_mapping_error(dev, *dma)) {
822 		priv->dma_mapping_error++;
823 		put_page(*page);
824 		return -ENOMEM;
825 	}
826 	return 0;
827 }
828 
829 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
830 				     int pages)
831 {
832 	struct gve_queue_page_list *qpl = &priv->qpls[id];
833 	int err;
834 	int i;
835 
836 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
837 		netif_err(priv, drv, priv->dev,
838 			  "Reached max number of registered pages %llu > %llu\n",
839 			  pages + priv->num_registered_pages,
840 			  priv->max_registered_pages);
841 		return -EINVAL;
842 	}
843 
844 	qpl->id = id;
845 	qpl->num_entries = 0;
846 	qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
847 	/* caller handles clean up */
848 	if (!qpl->pages)
849 		return -ENOMEM;
850 	qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
851 	/* caller handles clean up */
852 	if (!qpl->page_buses)
853 		return -ENOMEM;
854 
855 	for (i = 0; i < pages; i++) {
856 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
857 				     &qpl->page_buses[i],
858 				     gve_qpl_dma_dir(priv, id), GFP_KERNEL);
859 		/* caller handles clean up */
860 		if (err)
861 			return -ENOMEM;
862 		qpl->num_entries++;
863 	}
864 	priv->num_registered_pages += pages;
865 
866 	return 0;
867 }
868 
869 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
870 		   enum dma_data_direction dir)
871 {
872 	if (!dma_mapping_error(dev, dma))
873 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
874 	if (page)
875 		put_page(page);
876 }
877 
878 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
879 {
880 	struct gve_queue_page_list *qpl = &priv->qpls[id];
881 	int i;
882 
883 	if (!qpl->pages)
884 		return;
885 	if (!qpl->page_buses)
886 		goto free_pages;
887 
888 	for (i = 0; i < qpl->num_entries; i++)
889 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
890 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
891 
892 	kvfree(qpl->page_buses);
893 free_pages:
894 	kvfree(qpl->pages);
895 	priv->num_registered_pages -= qpl->num_entries;
896 }
897 
898 static int gve_alloc_qpls(struct gve_priv *priv)
899 {
900 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
901 	int i, j;
902 	int err;
903 
904 	if (num_qpls == 0)
905 		return 0;
906 
907 	priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
908 	if (!priv->qpls)
909 		return -ENOMEM;
910 
911 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
912 		err = gve_alloc_queue_page_list(priv, i,
913 						priv->tx_pages_per_qpl);
914 		if (err)
915 			goto free_qpls;
916 	}
917 	for (; i < num_qpls; i++) {
918 		err = gve_alloc_queue_page_list(priv, i,
919 						priv->rx_data_slot_cnt);
920 		if (err)
921 			goto free_qpls;
922 	}
923 
924 	priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
925 				     sizeof(unsigned long) * BITS_PER_BYTE;
926 	priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
927 					    sizeof(unsigned long), GFP_KERNEL);
928 	if (!priv->qpl_cfg.qpl_id_map) {
929 		err = -ENOMEM;
930 		goto free_qpls;
931 	}
932 
933 	return 0;
934 
935 free_qpls:
936 	for (j = 0; j <= i; j++)
937 		gve_free_queue_page_list(priv, j);
938 	kvfree(priv->qpls);
939 	return err;
940 }
941 
942 static void gve_free_qpls(struct gve_priv *priv)
943 {
944 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
945 	int i;
946 
947 	if (num_qpls == 0)
948 		return;
949 
950 	kvfree(priv->qpl_cfg.qpl_id_map);
951 
952 	for (i = 0; i < num_qpls; i++)
953 		gve_free_queue_page_list(priv, i);
954 
955 	kvfree(priv->qpls);
956 }
957 
958 /* Use this to schedule a reset when the device is capable of continuing
959  * to handle other requests in its current state. If it is not, do a reset
960  * in thread instead.
961  */
962 void gve_schedule_reset(struct gve_priv *priv)
963 {
964 	gve_set_do_reset(priv);
965 	queue_work(priv->gve_wq, &priv->service_task);
966 }
967 
968 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
969 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
970 static void gve_turndown(struct gve_priv *priv);
971 static void gve_turnup(struct gve_priv *priv);
972 
973 static int gve_open(struct net_device *dev)
974 {
975 	struct gve_priv *priv = netdev_priv(dev);
976 	int err;
977 
978 	err = gve_alloc_qpls(priv);
979 	if (err)
980 		return err;
981 
982 	err = gve_alloc_rings(priv);
983 	if (err)
984 		goto free_qpls;
985 
986 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
987 	if (err)
988 		goto free_rings;
989 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
990 	if (err)
991 		goto free_rings;
992 
993 	err = gve_register_qpls(priv);
994 	if (err)
995 		goto reset;
996 
997 	if (!gve_is_gqi(priv)) {
998 		/* Hard code this for now. This may be tuned in the future for
999 		 * performance.
1000 		 */
1001 		priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
1002 	}
1003 	err = gve_create_rings(priv);
1004 	if (err)
1005 		goto reset;
1006 
1007 	gve_set_device_rings_ok(priv);
1008 
1009 	if (gve_get_report_stats(priv))
1010 		mod_timer(&priv->stats_report_timer,
1011 			  round_jiffies(jiffies +
1012 				msecs_to_jiffies(priv->stats_report_timer_period)));
1013 
1014 	gve_turnup(priv);
1015 	queue_work(priv->gve_wq, &priv->service_task);
1016 	priv->interface_up_cnt++;
1017 	return 0;
1018 
1019 free_rings:
1020 	gve_free_rings(priv);
1021 free_qpls:
1022 	gve_free_qpls(priv);
1023 	return err;
1024 
1025 reset:
1026 	/* This must have been called from a reset due to the rtnl lock
1027 	 * so just return at this point.
1028 	 */
1029 	if (gve_get_reset_in_progress(priv))
1030 		return err;
1031 	/* Otherwise reset before returning */
1032 	gve_reset_and_teardown(priv, true);
1033 	/* if this fails there is nothing we can do so just ignore the return */
1034 	gve_reset_recovery(priv, false);
1035 	/* return the original error */
1036 	return err;
1037 }
1038 
1039 static int gve_close(struct net_device *dev)
1040 {
1041 	struct gve_priv *priv = netdev_priv(dev);
1042 	int err;
1043 
1044 	netif_carrier_off(dev);
1045 	if (gve_get_device_rings_ok(priv)) {
1046 		gve_turndown(priv);
1047 		err = gve_destroy_rings(priv);
1048 		if (err)
1049 			goto err;
1050 		err = gve_unregister_qpls(priv);
1051 		if (err)
1052 			goto err;
1053 		gve_clear_device_rings_ok(priv);
1054 	}
1055 	del_timer_sync(&priv->stats_report_timer);
1056 
1057 	gve_free_rings(priv);
1058 	gve_free_qpls(priv);
1059 	priv->interface_down_cnt++;
1060 	return 0;
1061 
1062 err:
1063 	/* This must have been called from a reset due to the rtnl lock
1064 	 * so just return at this point.
1065 	 */
1066 	if (gve_get_reset_in_progress(priv))
1067 		return err;
1068 	/* Otherwise reset before returning */
1069 	gve_reset_and_teardown(priv, true);
1070 	return gve_reset_recovery(priv, false);
1071 }
1072 
1073 int gve_adjust_queues(struct gve_priv *priv,
1074 		      struct gve_queue_config new_rx_config,
1075 		      struct gve_queue_config new_tx_config)
1076 {
1077 	int err;
1078 
1079 	if (netif_carrier_ok(priv->dev)) {
1080 		/* To make this process as simple as possible we teardown the
1081 		 * device, set the new configuration, and then bring the device
1082 		 * up again.
1083 		 */
1084 		err = gve_close(priv->dev);
1085 		/* we have already tried to reset in close,
1086 		 * just fail at this point
1087 		 */
1088 		if (err)
1089 			return err;
1090 		priv->tx_cfg = new_tx_config;
1091 		priv->rx_cfg = new_rx_config;
1092 
1093 		err = gve_open(priv->dev);
1094 		if (err)
1095 			goto err;
1096 
1097 		return 0;
1098 	}
1099 	/* Set the config for the next up. */
1100 	priv->tx_cfg = new_tx_config;
1101 	priv->rx_cfg = new_rx_config;
1102 
1103 	return 0;
1104 err:
1105 	netif_err(priv, drv, priv->dev,
1106 		  "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1107 	gve_turndown(priv);
1108 	return err;
1109 }
1110 
1111 static void gve_turndown(struct gve_priv *priv)
1112 {
1113 	int idx;
1114 
1115 	if (netif_carrier_ok(priv->dev))
1116 		netif_carrier_off(priv->dev);
1117 
1118 	if (!gve_get_napi_enabled(priv))
1119 		return;
1120 
1121 	/* Disable napi to prevent more work from coming in */
1122 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1123 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1124 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1125 
1126 		napi_disable(&block->napi);
1127 	}
1128 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1129 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1130 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1131 
1132 		napi_disable(&block->napi);
1133 	}
1134 
1135 	/* Stop tx queues */
1136 	netif_tx_disable(priv->dev);
1137 
1138 	gve_clear_napi_enabled(priv);
1139 	gve_clear_report_stats(priv);
1140 }
1141 
1142 static void gve_turnup(struct gve_priv *priv)
1143 {
1144 	int idx;
1145 
1146 	/* Start the tx queues */
1147 	netif_tx_start_all_queues(priv->dev);
1148 
1149 	/* Enable napi and unmask interrupts for all queues */
1150 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1151 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1152 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1153 
1154 		napi_enable(&block->napi);
1155 		if (gve_is_gqi(priv)) {
1156 			iowrite32be(0, gve_irq_doorbell(priv, block));
1157 		} else {
1158 			gve_set_itr_coalesce_usecs_dqo(priv, block,
1159 						       priv->tx_coalesce_usecs);
1160 		}
1161 	}
1162 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1163 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1164 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1165 
1166 		napi_enable(&block->napi);
1167 		if (gve_is_gqi(priv)) {
1168 			iowrite32be(0, gve_irq_doorbell(priv, block));
1169 		} else {
1170 			gve_set_itr_coalesce_usecs_dqo(priv, block,
1171 						       priv->rx_coalesce_usecs);
1172 		}
1173 	}
1174 
1175 	gve_set_napi_enabled(priv);
1176 }
1177 
1178 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1179 {
1180 	struct gve_notify_block *block;
1181 	struct gve_tx_ring *tx = NULL;
1182 	struct gve_priv *priv;
1183 	u32 last_nic_done;
1184 	u32 current_time;
1185 	u32 ntfy_idx;
1186 
1187 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
1188 	priv = netdev_priv(dev);
1189 	if (txqueue > priv->tx_cfg.num_queues)
1190 		goto reset;
1191 
1192 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1193 	if (ntfy_idx >= priv->num_ntfy_blks)
1194 		goto reset;
1195 
1196 	block = &priv->ntfy_blocks[ntfy_idx];
1197 	tx = block->tx;
1198 
1199 	current_time = jiffies_to_msecs(jiffies);
1200 	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
1201 		goto reset;
1202 
1203 	/* Check to see if there are missed completions, which will allow us to
1204 	 * kick the queue.
1205 	 */
1206 	last_nic_done = gve_tx_load_event_counter(priv, tx);
1207 	if (last_nic_done - tx->done) {
1208 		netdev_info(dev, "Kicking queue %d", txqueue);
1209 		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
1210 		napi_schedule(&block->napi);
1211 		tx->last_kick_msec = current_time;
1212 		goto out;
1213 	} // Else reset.
1214 
1215 reset:
1216 	gve_schedule_reset(priv);
1217 
1218 out:
1219 	if (tx)
1220 		tx->queue_timeout++;
1221 	priv->tx_timeo_cnt++;
1222 }
1223 
1224 static int gve_set_features(struct net_device *netdev,
1225 			    netdev_features_t features)
1226 {
1227 	const netdev_features_t orig_features = netdev->features;
1228 	struct gve_priv *priv = netdev_priv(netdev);
1229 	int err;
1230 
1231 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1232 		netdev->features ^= NETIF_F_LRO;
1233 		if (netif_carrier_ok(netdev)) {
1234 			/* To make this process as simple as possible we
1235 			 * teardown the device, set the new configuration,
1236 			 * and then bring the device up again.
1237 			 */
1238 			err = gve_close(netdev);
1239 			/* We have already tried to reset in close, just fail
1240 			 * at this point.
1241 			 */
1242 			if (err)
1243 				goto err;
1244 
1245 			err = gve_open(netdev);
1246 			if (err)
1247 				goto err;
1248 		}
1249 	}
1250 
1251 	return 0;
1252 err:
1253 	/* Reverts the change on error. */
1254 	netdev->features = orig_features;
1255 	netif_err(priv, drv, netdev,
1256 		  "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1257 	return err;
1258 }
1259 
1260 static const struct net_device_ops gve_netdev_ops = {
1261 	.ndo_start_xmit		=	gve_start_xmit,
1262 	.ndo_open		=	gve_open,
1263 	.ndo_stop		=	gve_close,
1264 	.ndo_get_stats64	=	gve_get_stats,
1265 	.ndo_tx_timeout         =       gve_tx_timeout,
1266 	.ndo_set_features	=	gve_set_features,
1267 };
1268 
1269 static void gve_handle_status(struct gve_priv *priv, u32 status)
1270 {
1271 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1272 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
1273 		gve_set_do_reset(priv);
1274 	}
1275 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1276 		priv->stats_report_trigger_cnt++;
1277 		gve_set_do_report_stats(priv);
1278 	}
1279 }
1280 
1281 static void gve_handle_reset(struct gve_priv *priv)
1282 {
1283 	/* A service task will be scheduled at the end of probe to catch any
1284 	 * resets that need to happen, and we don't want to reset until
1285 	 * probe is done.
1286 	 */
1287 	if (gve_get_probe_in_progress(priv))
1288 		return;
1289 
1290 	if (gve_get_do_reset(priv)) {
1291 		rtnl_lock();
1292 		gve_reset(priv, false);
1293 		rtnl_unlock();
1294 	}
1295 }
1296 
1297 void gve_handle_report_stats(struct gve_priv *priv)
1298 {
1299 	struct stats *stats = priv->stats_report->stats;
1300 	int idx, stats_idx = 0;
1301 	unsigned int start = 0;
1302 	u64 tx_bytes;
1303 
1304 	if (!gve_get_report_stats(priv))
1305 		return;
1306 
1307 	be64_add_cpu(&priv->stats_report->written_count, 1);
1308 	/* tx stats */
1309 	if (priv->tx) {
1310 		for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1311 			u32 last_completion = 0;
1312 			u32 tx_frames = 0;
1313 
1314 			/* DQO doesn't currently support these metrics. */
1315 			if (gve_is_gqi(priv)) {
1316 				last_completion = priv->tx[idx].done;
1317 				tx_frames = priv->tx[idx].req;
1318 			}
1319 
1320 			do {
1321 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1322 				tx_bytes = priv->tx[idx].bytes_done;
1323 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1324 			stats[stats_idx++] = (struct stats) {
1325 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
1326 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
1327 				.queue_id = cpu_to_be32(idx),
1328 			};
1329 			stats[stats_idx++] = (struct stats) {
1330 				.stat_name = cpu_to_be32(TX_STOP_CNT),
1331 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
1332 				.queue_id = cpu_to_be32(idx),
1333 			};
1334 			stats[stats_idx++] = (struct stats) {
1335 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
1336 				.value = cpu_to_be64(tx_frames),
1337 				.queue_id = cpu_to_be32(idx),
1338 			};
1339 			stats[stats_idx++] = (struct stats) {
1340 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
1341 				.value = cpu_to_be64(tx_bytes),
1342 				.queue_id = cpu_to_be32(idx),
1343 			};
1344 			stats[stats_idx++] = (struct stats) {
1345 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1346 				.value = cpu_to_be64(last_completion),
1347 				.queue_id = cpu_to_be32(idx),
1348 			};
1349 			stats[stats_idx++] = (struct stats) {
1350 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
1351 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
1352 				.queue_id = cpu_to_be32(idx),
1353 			};
1354 		}
1355 	}
1356 	/* rx stats */
1357 	if (priv->rx) {
1358 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1359 			stats[stats_idx++] = (struct stats) {
1360 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1361 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
1362 				.queue_id = cpu_to_be32(idx),
1363 			};
1364 			stats[stats_idx++] = (struct stats) {
1365 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1366 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
1367 				.queue_id = cpu_to_be32(idx),
1368 			};
1369 		}
1370 	}
1371 }
1372 
1373 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1374 {
1375 	if (!gve_get_napi_enabled(priv))
1376 		return;
1377 
1378 	if (link_status == netif_carrier_ok(priv->dev))
1379 		return;
1380 
1381 	if (link_status) {
1382 		netdev_info(priv->dev, "Device link is up.\n");
1383 		netif_carrier_on(priv->dev);
1384 	} else {
1385 		netdev_info(priv->dev, "Device link is down.\n");
1386 		netif_carrier_off(priv->dev);
1387 	}
1388 }
1389 
1390 /* Handle NIC status register changes, reset requests and report stats */
1391 static void gve_service_task(struct work_struct *work)
1392 {
1393 	struct gve_priv *priv = container_of(work, struct gve_priv,
1394 					     service_task);
1395 	u32 status = ioread32be(&priv->reg_bar0->device_status);
1396 
1397 	gve_handle_status(priv, status);
1398 
1399 	gve_handle_reset(priv);
1400 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1401 }
1402 
1403 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1404 {
1405 	int num_ntfy;
1406 	int err;
1407 
1408 	/* Set up the adminq */
1409 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
1410 	if (err) {
1411 		dev_err(&priv->pdev->dev,
1412 			"Failed to alloc admin queue: err=%d\n", err);
1413 		return err;
1414 	}
1415 
1416 	err = gve_verify_driver_compatibility(priv);
1417 	if (err) {
1418 		dev_err(&priv->pdev->dev,
1419 			"Could not verify driver compatibility: err=%d\n", err);
1420 		goto err;
1421 	}
1422 
1423 	if (skip_describe_device)
1424 		goto setup_device;
1425 
1426 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1427 	/* Get the initial information we need from the device */
1428 	err = gve_adminq_describe_device(priv);
1429 	if (err) {
1430 		dev_err(&priv->pdev->dev,
1431 			"Could not get device information: err=%d\n", err);
1432 		goto err;
1433 	}
1434 	priv->dev->mtu = priv->dev->max_mtu;
1435 	num_ntfy = pci_msix_vec_count(priv->pdev);
1436 	if (num_ntfy <= 0) {
1437 		dev_err(&priv->pdev->dev,
1438 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
1439 		err = num_ntfy;
1440 		goto err;
1441 	} else if (num_ntfy < GVE_MIN_MSIX) {
1442 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1443 			GVE_MIN_MSIX, num_ntfy);
1444 		err = -EINVAL;
1445 		goto err;
1446 	}
1447 
1448 	priv->num_registered_pages = 0;
1449 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1450 	/* gvnic has one Notification Block per MSI-x vector, except for the
1451 	 * management vector
1452 	 */
1453 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1454 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
1455 
1456 	priv->tx_cfg.max_queues =
1457 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1458 	priv->rx_cfg.max_queues =
1459 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1460 
1461 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1462 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1463 	if (priv->default_num_queues > 0) {
1464 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1465 						priv->tx_cfg.num_queues);
1466 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1467 						priv->rx_cfg.num_queues);
1468 	}
1469 
1470 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1471 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1472 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1473 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1474 
1475 	if (!gve_is_gqi(priv)) {
1476 		priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
1477 		priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
1478 	}
1479 
1480 setup_device:
1481 	err = gve_setup_device_resources(priv);
1482 	if (!err)
1483 		return 0;
1484 err:
1485 	gve_adminq_free(&priv->pdev->dev, priv);
1486 	return err;
1487 }
1488 
1489 static void gve_teardown_priv_resources(struct gve_priv *priv)
1490 {
1491 	gve_teardown_device_resources(priv);
1492 	gve_adminq_free(&priv->pdev->dev, priv);
1493 }
1494 
1495 static void gve_trigger_reset(struct gve_priv *priv)
1496 {
1497 	/* Reset the device by releasing the AQ */
1498 	gve_adminq_release(priv);
1499 }
1500 
1501 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1502 {
1503 	gve_trigger_reset(priv);
1504 	/* With the reset having already happened, close cannot fail */
1505 	if (was_up)
1506 		gve_close(priv->dev);
1507 	gve_teardown_priv_resources(priv);
1508 }
1509 
1510 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1511 {
1512 	int err;
1513 
1514 	err = gve_init_priv(priv, true);
1515 	if (err)
1516 		goto err;
1517 	if (was_up) {
1518 		err = gve_open(priv->dev);
1519 		if (err)
1520 			goto err;
1521 	}
1522 	return 0;
1523 err:
1524 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1525 	gve_turndown(priv);
1526 	return err;
1527 }
1528 
1529 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1530 {
1531 	bool was_up = netif_carrier_ok(priv->dev);
1532 	int err;
1533 
1534 	dev_info(&priv->pdev->dev, "Performing reset\n");
1535 	gve_clear_do_reset(priv);
1536 	gve_set_reset_in_progress(priv);
1537 	/* If we aren't attempting to teardown normally, just go turndown and
1538 	 * reset right away.
1539 	 */
1540 	if (!attempt_teardown) {
1541 		gve_turndown(priv);
1542 		gve_reset_and_teardown(priv, was_up);
1543 	} else {
1544 		/* Otherwise attempt to close normally */
1545 		if (was_up) {
1546 			err = gve_close(priv->dev);
1547 			/* If that fails reset as we did above */
1548 			if (err)
1549 				gve_reset_and_teardown(priv, was_up);
1550 		}
1551 		/* Clean up any remaining resources */
1552 		gve_teardown_priv_resources(priv);
1553 	}
1554 
1555 	/* Set it all back up */
1556 	err = gve_reset_recovery(priv, was_up);
1557 	gve_clear_reset_in_progress(priv);
1558 	priv->reset_cnt++;
1559 	priv->interface_up_cnt = 0;
1560 	priv->interface_down_cnt = 0;
1561 	priv->stats_report_trigger_cnt = 0;
1562 	return err;
1563 }
1564 
1565 static void gve_write_version(u8 __iomem *driver_version_register)
1566 {
1567 	const char *c = gve_version_prefix;
1568 
1569 	while (*c) {
1570 		writeb(*c, driver_version_register);
1571 		c++;
1572 	}
1573 
1574 	c = gve_version_str;
1575 	while (*c) {
1576 		writeb(*c, driver_version_register);
1577 		c++;
1578 	}
1579 	writeb('\n', driver_version_register);
1580 }
1581 
1582 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1583 {
1584 	int max_tx_queues, max_rx_queues;
1585 	struct net_device *dev;
1586 	__be32 __iomem *db_bar;
1587 	struct gve_registers __iomem *reg_bar;
1588 	struct gve_priv *priv;
1589 	int err;
1590 
1591 	err = pci_enable_device(pdev);
1592 	if (err)
1593 		return err;
1594 
1595 	err = pci_request_regions(pdev, "gvnic-cfg");
1596 	if (err)
1597 		goto abort_with_enabled;
1598 
1599 	pci_set_master(pdev);
1600 
1601 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1602 	if (err) {
1603 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1604 		goto abort_with_pci_region;
1605 	}
1606 
1607 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1608 	if (!reg_bar) {
1609 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
1610 		err = -ENOMEM;
1611 		goto abort_with_pci_region;
1612 	}
1613 
1614 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1615 	if (!db_bar) {
1616 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1617 		err = -ENOMEM;
1618 		goto abort_with_reg_bar;
1619 	}
1620 
1621 	gve_write_version(&reg_bar->driver_version);
1622 	/* Get max queues to alloc etherdev */
1623 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1624 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1625 	/* Alloc and setup the netdev and priv */
1626 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1627 	if (!dev) {
1628 		dev_err(&pdev->dev, "could not allocate netdev\n");
1629 		err = -ENOMEM;
1630 		goto abort_with_db_bar;
1631 	}
1632 	SET_NETDEV_DEV(dev, &pdev->dev);
1633 	pci_set_drvdata(pdev, dev);
1634 	dev->ethtool_ops = &gve_ethtool_ops;
1635 	dev->netdev_ops = &gve_netdev_ops;
1636 
1637 	/* Set default and supported features.
1638 	 *
1639 	 * Features might be set in other locations as well (such as
1640 	 * `gve_adminq_describe_device`).
1641 	 */
1642 	dev->hw_features = NETIF_F_HIGHDMA;
1643 	dev->hw_features |= NETIF_F_SG;
1644 	dev->hw_features |= NETIF_F_HW_CSUM;
1645 	dev->hw_features |= NETIF_F_TSO;
1646 	dev->hw_features |= NETIF_F_TSO6;
1647 	dev->hw_features |= NETIF_F_TSO_ECN;
1648 	dev->hw_features |= NETIF_F_RXCSUM;
1649 	dev->hw_features |= NETIF_F_RXHASH;
1650 	dev->features = dev->hw_features;
1651 	dev->watchdog_timeo = 5 * HZ;
1652 	dev->min_mtu = ETH_MIN_MTU;
1653 	netif_carrier_off(dev);
1654 
1655 	priv = netdev_priv(dev);
1656 	priv->dev = dev;
1657 	priv->pdev = pdev;
1658 	priv->msg_enable = DEFAULT_MSG_LEVEL;
1659 	priv->reg_bar0 = reg_bar;
1660 	priv->db_bar2 = db_bar;
1661 	priv->service_task_flags = 0x0;
1662 	priv->state_flags = 0x0;
1663 	priv->ethtool_flags = 0x0;
1664 
1665 	gve_set_probe_in_progress(priv);
1666 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1667 	if (!priv->gve_wq) {
1668 		dev_err(&pdev->dev, "Could not allocate workqueue");
1669 		err = -ENOMEM;
1670 		goto abort_with_netdev;
1671 	}
1672 	INIT_WORK(&priv->service_task, gve_service_task);
1673 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1674 	priv->tx_cfg.max_queues = max_tx_queues;
1675 	priv->rx_cfg.max_queues = max_rx_queues;
1676 
1677 	err = gve_init_priv(priv, false);
1678 	if (err)
1679 		goto abort_with_wq;
1680 
1681 	err = register_netdev(dev);
1682 	if (err)
1683 		goto abort_with_gve_init;
1684 
1685 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1686 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1687 	gve_clear_probe_in_progress(priv);
1688 	queue_work(priv->gve_wq, &priv->service_task);
1689 	return 0;
1690 
1691 abort_with_gve_init:
1692 	gve_teardown_priv_resources(priv);
1693 
1694 abort_with_wq:
1695 	destroy_workqueue(priv->gve_wq);
1696 
1697 abort_with_netdev:
1698 	free_netdev(dev);
1699 
1700 abort_with_db_bar:
1701 	pci_iounmap(pdev, db_bar);
1702 
1703 abort_with_reg_bar:
1704 	pci_iounmap(pdev, reg_bar);
1705 
1706 abort_with_pci_region:
1707 	pci_release_regions(pdev);
1708 
1709 abort_with_enabled:
1710 	pci_disable_device(pdev);
1711 	return err;
1712 }
1713 
1714 static void gve_remove(struct pci_dev *pdev)
1715 {
1716 	struct net_device *netdev = pci_get_drvdata(pdev);
1717 	struct gve_priv *priv = netdev_priv(netdev);
1718 	__be32 __iomem *db_bar = priv->db_bar2;
1719 	void __iomem *reg_bar = priv->reg_bar0;
1720 
1721 	unregister_netdev(netdev);
1722 	gve_teardown_priv_resources(priv);
1723 	destroy_workqueue(priv->gve_wq);
1724 	free_netdev(netdev);
1725 	pci_iounmap(pdev, db_bar);
1726 	pci_iounmap(pdev, reg_bar);
1727 	pci_release_regions(pdev);
1728 	pci_disable_device(pdev);
1729 }
1730 
1731 static void gve_shutdown(struct pci_dev *pdev)
1732 {
1733 	struct net_device *netdev = pci_get_drvdata(pdev);
1734 	struct gve_priv *priv = netdev_priv(netdev);
1735 	bool was_up = netif_carrier_ok(priv->dev);
1736 
1737 	rtnl_lock();
1738 	if (was_up && gve_close(priv->dev)) {
1739 		/* If the dev was up, attempt to close, if close fails, reset */
1740 		gve_reset_and_teardown(priv, was_up);
1741 	} else {
1742 		/* If the dev wasn't up or close worked, finish tearing down */
1743 		gve_teardown_priv_resources(priv);
1744 	}
1745 	rtnl_unlock();
1746 }
1747 
1748 #ifdef CONFIG_PM
1749 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
1750 {
1751 	struct net_device *netdev = pci_get_drvdata(pdev);
1752 	struct gve_priv *priv = netdev_priv(netdev);
1753 	bool was_up = netif_carrier_ok(priv->dev);
1754 
1755 	priv->suspend_cnt++;
1756 	rtnl_lock();
1757 	if (was_up && gve_close(priv->dev)) {
1758 		/* If the dev was up, attempt to close, if close fails, reset */
1759 		gve_reset_and_teardown(priv, was_up);
1760 	} else {
1761 		/* If the dev wasn't up or close worked, finish tearing down */
1762 		gve_teardown_priv_resources(priv);
1763 	}
1764 	priv->up_before_suspend = was_up;
1765 	rtnl_unlock();
1766 	return 0;
1767 }
1768 
1769 static int gve_resume(struct pci_dev *pdev)
1770 {
1771 	struct net_device *netdev = pci_get_drvdata(pdev);
1772 	struct gve_priv *priv = netdev_priv(netdev);
1773 	int err;
1774 
1775 	priv->resume_cnt++;
1776 	rtnl_lock();
1777 	err = gve_reset_recovery(priv, priv->up_before_suspend);
1778 	rtnl_unlock();
1779 	return err;
1780 }
1781 #endif /* CONFIG_PM */
1782 
1783 static const struct pci_device_id gve_id_table[] = {
1784 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1785 	{ }
1786 };
1787 
1788 static struct pci_driver gvnic_driver = {
1789 	.name		= "gvnic",
1790 	.id_table	= gve_id_table,
1791 	.probe		= gve_probe,
1792 	.remove		= gve_remove,
1793 	.shutdown	= gve_shutdown,
1794 #ifdef CONFIG_PM
1795 	.suspend        = gve_suspend,
1796 	.resume         = gve_resume,
1797 #endif
1798 };
1799 
1800 module_pci_driver(gvnic_driver);
1801 
1802 MODULE_DEVICE_TABLE(pci, gve_id_table);
1803 MODULE_AUTHOR("Google, Inc.");
1804 MODULE_DESCRIPTION("gVNIC Driver");
1805 MODULE_LICENSE("Dual MIT/GPL");
1806 MODULE_VERSION(GVE_VERSION);
1807