xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6 
7 #include <linux/bitmap.h>
8 #include <linux/bpf.h>
9 #include <linux/cpumask.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
19 #include <linux/utsname.h>
20 #include <linux/version.h>
21 #include <net/netdev_queues.h>
22 #include <net/sch_generic.h>
23 #include <net/xdp_sock_drv.h>
24 #include "gve.h"
25 #include "gve_dqo.h"
26 #include "gve_adminq.h"
27 #include "gve_register.h"
28 #include "gve_utils.h"
29 
30 #define GVE_DEFAULT_RX_COPYBREAK	(256)
31 
32 #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
33 #define GVE_VERSION		"1.0.0"
34 #define GVE_VERSION_PREFIX	"GVE-"
35 
36 // Minimum amount of time between queue kicks in msec (10 seconds)
37 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
38 
39 char gve_driver_name[] = "gve";
40 const char gve_version_str[] = GVE_VERSION;
41 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
42 
gve_verify_driver_compatibility(struct gve_priv * priv)43 static int gve_verify_driver_compatibility(struct gve_priv *priv)
44 {
45 	int err;
46 	struct gve_driver_info *driver_info;
47 	dma_addr_t driver_info_bus;
48 
49 	driver_info = dma_alloc_coherent(&priv->pdev->dev,
50 					 sizeof(struct gve_driver_info),
51 					 &driver_info_bus, GFP_KERNEL);
52 	if (!driver_info)
53 		return -ENOMEM;
54 
55 	*driver_info = (struct gve_driver_info) {
56 		.os_type = 1, /* Linux */
57 		.os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
58 		.os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
59 		.os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
60 		.driver_capability_flags = {
61 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
62 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
63 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
64 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
65 		},
66 	};
67 	strscpy(driver_info->os_version_str1, utsname()->release,
68 		sizeof(driver_info->os_version_str1));
69 	strscpy(driver_info->os_version_str2, utsname()->version,
70 		sizeof(driver_info->os_version_str2));
71 
72 	err = gve_adminq_verify_driver_compatibility(priv,
73 						     sizeof(struct gve_driver_info),
74 						     driver_info_bus);
75 
76 	/* It's ok if the device doesn't support this */
77 	if (err == -EOPNOTSUPP)
78 		err = 0;
79 
80 	dma_free_coherent(&priv->pdev->dev,
81 			  sizeof(struct gve_driver_info),
82 			  driver_info, driver_info_bus);
83 	return err;
84 }
85 
gve_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)86 static netdev_features_t gve_features_check(struct sk_buff *skb,
87 					    struct net_device *dev,
88 					    netdev_features_t features)
89 {
90 	struct gve_priv *priv = netdev_priv(dev);
91 
92 	if (!gve_is_gqi(priv))
93 		return gve_features_check_dqo(skb, dev, features);
94 
95 	return features;
96 }
97 
gve_start_xmit(struct sk_buff * skb,struct net_device * dev)98 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
99 {
100 	struct gve_priv *priv = netdev_priv(dev);
101 
102 	if (gve_is_gqi(priv))
103 		return gve_tx(skb, dev);
104 	else
105 		return gve_tx_dqo(skb, dev);
106 }
107 
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)108 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
109 {
110 	struct gve_priv *priv = netdev_priv(dev);
111 	unsigned int start;
112 	u64 packets, bytes;
113 	int num_tx_queues;
114 	int ring;
115 
116 	num_tx_queues = gve_num_tx_queues(priv);
117 	if (priv->rx) {
118 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
119 			do {
120 				start =
121 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
122 				packets = priv->rx[ring].rpackets;
123 				bytes = priv->rx[ring].rbytes;
124 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
125 						       start));
126 			s->rx_packets += packets;
127 			s->rx_bytes += bytes;
128 		}
129 	}
130 	if (priv->tx) {
131 		for (ring = 0; ring < num_tx_queues; ring++) {
132 			do {
133 				start =
134 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
135 				packets = priv->tx[ring].pkt_done;
136 				bytes = priv->tx[ring].bytes_done;
137 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
138 						       start));
139 			s->tx_packets += packets;
140 			s->tx_bytes += bytes;
141 		}
142 	}
143 }
144 
gve_alloc_flow_rule_caches(struct gve_priv * priv)145 static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
146 {
147 	struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
148 	int err = 0;
149 
150 	if (!priv->max_flow_rules)
151 		return 0;
152 
153 	flow_rules_cache->rules_cache =
154 		kvzalloc_objs(*flow_rules_cache->rules_cache,
155 			      GVE_FLOW_RULES_CACHE_SIZE);
156 	if (!flow_rules_cache->rules_cache) {
157 		dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
158 		return -ENOMEM;
159 	}
160 
161 	flow_rules_cache->rule_ids_cache =
162 		kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
163 			 GFP_KERNEL);
164 	if (!flow_rules_cache->rule_ids_cache) {
165 		dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
166 		err = -ENOMEM;
167 		goto free_rules_cache;
168 	}
169 
170 	return 0;
171 
172 free_rules_cache:
173 	kvfree(flow_rules_cache->rules_cache);
174 	flow_rules_cache->rules_cache = NULL;
175 	return err;
176 }
177 
gve_free_flow_rule_caches(struct gve_priv * priv)178 static void gve_free_flow_rule_caches(struct gve_priv *priv)
179 {
180 	struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
181 
182 	kvfree(flow_rules_cache->rule_ids_cache);
183 	flow_rules_cache->rule_ids_cache = NULL;
184 	kvfree(flow_rules_cache->rules_cache);
185 	flow_rules_cache->rules_cache = NULL;
186 }
187 
gve_alloc_rss_config_cache(struct gve_priv * priv)188 static int gve_alloc_rss_config_cache(struct gve_priv *priv)
189 {
190 	struct gve_rss_config *rss_config = &priv->rss_config;
191 
192 	if (!priv->cache_rss_config)
193 		return 0;
194 
195 	rss_config->hash_key = kcalloc(priv->rss_key_size,
196 				       sizeof(rss_config->hash_key[0]),
197 				       GFP_KERNEL);
198 	if (!rss_config->hash_key)
199 		return -ENOMEM;
200 
201 	rss_config->hash_lut = kcalloc(priv->rss_lut_size,
202 				       sizeof(rss_config->hash_lut[0]),
203 				       GFP_KERNEL);
204 	if (!rss_config->hash_lut)
205 		goto free_rss_key_cache;
206 
207 	return 0;
208 
209 free_rss_key_cache:
210 	kfree(rss_config->hash_key);
211 	rss_config->hash_key = NULL;
212 	return -ENOMEM;
213 }
214 
gve_free_rss_config_cache(struct gve_priv * priv)215 static void gve_free_rss_config_cache(struct gve_priv *priv)
216 {
217 	struct gve_rss_config *rss_config = &priv->rss_config;
218 
219 	kfree(rss_config->hash_key);
220 	kfree(rss_config->hash_lut);
221 
222 	memset(rss_config, 0, sizeof(*rss_config));
223 }
224 
gve_alloc_counter_array(struct gve_priv * priv)225 static int gve_alloc_counter_array(struct gve_priv *priv)
226 {
227 	priv->counter_array =
228 		dma_alloc_coherent(&priv->pdev->dev,
229 				   priv->num_event_counters *
230 				   sizeof(*priv->counter_array),
231 				   &priv->counter_array_bus, GFP_KERNEL);
232 	if (!priv->counter_array)
233 		return -ENOMEM;
234 
235 	return 0;
236 }
237 
gve_free_counter_array(struct gve_priv * priv)238 static void gve_free_counter_array(struct gve_priv *priv)
239 {
240 	if (!priv->counter_array)
241 		return;
242 
243 	dma_free_coherent(&priv->pdev->dev,
244 			  priv->num_event_counters *
245 			  sizeof(*priv->counter_array),
246 			  priv->counter_array, priv->counter_array_bus);
247 	priv->counter_array = NULL;
248 }
249 
250 /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)251 static void gve_stats_report_task(struct work_struct *work)
252 {
253 	struct gve_priv *priv = container_of(work, struct gve_priv,
254 					     stats_report_task);
255 	if (gve_get_do_report_stats(priv)) {
256 		gve_handle_report_stats(priv);
257 		gve_clear_do_report_stats(priv);
258 	}
259 }
260 
gve_stats_report_schedule(struct gve_priv * priv)261 static void gve_stats_report_schedule(struct gve_priv *priv)
262 {
263 	if (!gve_get_probe_in_progress(priv) &&
264 	    !gve_get_reset_in_progress(priv)) {
265 		gve_set_do_report_stats(priv);
266 		queue_work(priv->gve_wq, &priv->stats_report_task);
267 	}
268 }
269 
gve_stats_report_timer(struct timer_list * t)270 static void gve_stats_report_timer(struct timer_list *t)
271 {
272 	struct gve_priv *priv = timer_container_of(priv, t,
273 						   stats_report_timer);
274 
275 	mod_timer(&priv->stats_report_timer,
276 		  round_jiffies(jiffies +
277 		  msecs_to_jiffies(priv->stats_report_timer_period)));
278 	gve_stats_report_schedule(priv);
279 }
280 
gve_alloc_stats_report(struct gve_priv * priv)281 static int gve_alloc_stats_report(struct gve_priv *priv)
282 {
283 	int tx_stats_num, rx_stats_num;
284 
285 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
286 				priv->tx_cfg.max_queues;
287 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
288 				priv->rx_cfg.max_queues;
289 	priv->stats_report_len = struct_size(priv->stats_report, stats,
290 					     size_add(tx_stats_num, rx_stats_num));
291 	priv->stats_report =
292 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
293 				   &priv->stats_report_bus, GFP_KERNEL);
294 	if (!priv->stats_report)
295 		return -ENOMEM;
296 	/* Set up timer for the report-stats task */
297 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
298 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
299 	return 0;
300 }
301 
gve_free_stats_report(struct gve_priv * priv)302 static void gve_free_stats_report(struct gve_priv *priv)
303 {
304 	if (!priv->stats_report)
305 		return;
306 
307 	timer_delete_sync(&priv->stats_report_timer);
308 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
309 			  priv->stats_report, priv->stats_report_bus);
310 	priv->stats_report = NULL;
311 }
312 
gve_mgmnt_intr(int irq,void * arg)313 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
314 {
315 	struct gve_priv *priv = arg;
316 
317 	queue_work(priv->gve_wq, &priv->service_task);
318 	return IRQ_HANDLED;
319 }
320 
gve_intr(int irq,void * arg)321 static irqreturn_t gve_intr(int irq, void *arg)
322 {
323 	struct gve_notify_block *block = arg;
324 	struct gve_priv *priv = block->priv;
325 
326 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
327 	napi_schedule_irqoff(&block->napi);
328 	return IRQ_HANDLED;
329 }
330 
gve_intr_dqo(int irq,void * arg)331 static irqreturn_t gve_intr_dqo(int irq, void *arg)
332 {
333 	struct gve_notify_block *block = arg;
334 
335 	/* Interrupts are automatically masked */
336 	napi_schedule_irqoff(&block->napi);
337 	return IRQ_HANDLED;
338 }
339 
gve_is_napi_on_home_cpu(struct gve_priv * priv,u32 irq)340 static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
341 {
342 	int cpu_curr = smp_processor_id();
343 	const struct cpumask *aff_mask;
344 
345 	aff_mask = irq_get_effective_affinity_mask(irq);
346 	if (unlikely(!aff_mask))
347 		return 1;
348 
349 	return cpumask_test_cpu(cpu_curr, aff_mask);
350 }
351 
gve_napi_poll(struct napi_struct * napi,int budget)352 int gve_napi_poll(struct napi_struct *napi, int budget)
353 {
354 	struct gve_notify_block *block;
355 	__be32 __iomem *irq_doorbell;
356 	bool reschedule = false;
357 	struct gve_priv *priv;
358 	int work_done = 0;
359 
360 	block = container_of(napi, struct gve_notify_block, napi);
361 	priv = block->priv;
362 
363 	if (block->tx) {
364 		if (block->tx->q_num < priv->tx_cfg.num_queues)
365 			reschedule |= gve_tx_poll(block, budget);
366 		else if (budget)
367 			reschedule |= gve_xdp_poll(block, budget);
368 	}
369 
370 	if (!budget)
371 		return 0;
372 
373 	if (block->rx) {
374 		work_done = gve_rx_poll(block, budget);
375 
376 		/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
377 		 * TX and RX work done.
378 		 */
379 		if (priv->xdp_prog)
380 			work_done = max_t(int, work_done,
381 					  gve_xsk_tx_poll(block, budget));
382 
383 		reschedule |= work_done == budget;
384 	}
385 
386 	if (reschedule)
387 		return budget;
388 
389        /* Complete processing - don't unmask irq if busy polling is enabled */
390 	if (likely(napi_complete_done(napi, work_done))) {
391 		irq_doorbell = gve_irq_doorbell(priv, block);
392 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
393 
394 		/* Ensure IRQ ACK is visible before we check pending work.
395 		 * If queue had issued updates, it would be truly visible.
396 		 */
397 		mb();
398 
399 		if (block->tx)
400 			reschedule |= gve_tx_clean_pending(priv, block->tx);
401 		if (block->rx)
402 			reschedule |= gve_rx_work_pending(block->rx);
403 
404 		if (reschedule && napi_schedule(napi))
405 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
406 	}
407 	return work_done;
408 }
409 
gve_napi_poll_dqo(struct napi_struct * napi,int budget)410 int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
411 {
412 	struct gve_notify_block *block =
413 		container_of(napi, struct gve_notify_block, napi);
414 	struct gve_priv *priv = block->priv;
415 	bool reschedule = false;
416 	int work_done = 0;
417 
418 	if (block->tx) {
419 		if (block->tx->q_num < priv->tx_cfg.num_queues)
420 			reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
421 		else
422 			reschedule |= gve_xdp_poll_dqo(block);
423 	}
424 
425 	if (!budget)
426 		return 0;
427 
428 	if (block->rx) {
429 		work_done = gve_rx_poll_dqo(block, budget);
430 
431 		/* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
432 		 * either datapath has more work to do.
433 		 */
434 		if (priv->xdp_prog)
435 			reschedule |= gve_xsk_tx_poll_dqo(block, budget);
436 		reschedule |= work_done == budget;
437 	}
438 
439 	if (reschedule) {
440 		/* Reschedule by returning budget only if already on the correct
441 		 * cpu.
442 		 */
443 		if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
444 			return budget;
445 
446 		/* If not on the cpu with which this queue's irq has affinity
447 		 * with, we avoid rescheduling napi and arm the irq instead so
448 		 * that napi gets rescheduled back eventually onto the right
449 		 * cpu.
450 		 */
451 		if (work_done == budget)
452 			work_done--;
453 	}
454 
455 	if (likely(napi_complete_done(napi, work_done))) {
456 		/* Enable interrupts again.
457 		 *
458 		 * We don't need to repoll afterwards because HW supports the
459 		 * PCI MSI-X PBA feature.
460 		 *
461 		 * Another interrupt would be triggered if a new event came in
462 		 * since the last one.
463 		 */
464 		gve_write_irq_doorbell_dqo(priv, block,
465 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
466 	}
467 
468 	return work_done;
469 }
470 
gve_get_node_mask(struct gve_priv * priv)471 static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
472 {
473 	if (priv->numa_node == NUMA_NO_NODE)
474 		return cpu_all_mask;
475 	else
476 		return cpumask_of_node(priv->numa_node);
477 }
478 
gve_alloc_notify_blocks(struct gve_priv * priv)479 static int gve_alloc_notify_blocks(struct gve_priv *priv)
480 {
481 	int num_vecs_requested = priv->num_ntfy_blks + 1;
482 	const struct cpumask *node_mask;
483 	unsigned int cur_cpu;
484 	int vecs_enabled;
485 	int i, j;
486 	int err;
487 
488 	priv->msix_vectors = kvzalloc_objs(*priv->msix_vectors,
489 					   num_vecs_requested);
490 	if (!priv->msix_vectors)
491 		return -ENOMEM;
492 	for (i = 0; i < num_vecs_requested; i++)
493 		priv->msix_vectors[i].entry = i;
494 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
495 					     GVE_MIN_MSIX, num_vecs_requested);
496 	if (vecs_enabled < 0) {
497 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
498 			GVE_MIN_MSIX, vecs_enabled);
499 		err = vecs_enabled;
500 		goto abort_with_msix_vectors;
501 	}
502 	if (vecs_enabled != num_vecs_requested) {
503 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
504 		int vecs_per_type = new_num_ntfy_blks / 2;
505 		int vecs_left = new_num_ntfy_blks % 2;
506 
507 		priv->num_ntfy_blks = new_num_ntfy_blks;
508 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
509 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
510 						vecs_per_type);
511 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
512 						vecs_per_type + vecs_left);
513 		dev_err(&priv->pdev->dev,
514 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
515 			vecs_enabled, priv->tx_cfg.max_queues,
516 			priv->rx_cfg.max_queues);
517 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
518 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
519 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
520 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
521 	}
522 
523 	/* Setup Management Vector  - the last vector */
524 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
525 		 pci_name(priv->pdev));
526 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
527 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
528 	if (err) {
529 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
530 		goto abort_with_msix_enabled;
531 	}
532 	priv->irq_db_indices =
533 		dma_alloc_coherent(&priv->pdev->dev,
534 				   priv->num_ntfy_blks *
535 				   sizeof(*priv->irq_db_indices),
536 				   &priv->irq_db_indices_bus, GFP_KERNEL);
537 	if (!priv->irq_db_indices) {
538 		err = -ENOMEM;
539 		goto abort_with_mgmt_vector;
540 	}
541 
542 	priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
543 				     sizeof(*priv->ntfy_blocks), GFP_KERNEL);
544 	if (!priv->ntfy_blocks) {
545 		err = -ENOMEM;
546 		goto abort_with_irq_db_indices;
547 	}
548 
549 	/* Setup the other blocks - the first n-1 vectors */
550 	node_mask = gve_get_node_mask(priv);
551 	cur_cpu = cpumask_first(node_mask);
552 	for (i = 0; i < priv->num_ntfy_blks; i++) {
553 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
554 		int msix_idx = i;
555 
556 		snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
557 			 i, pci_name(priv->pdev));
558 		block->priv = priv;
559 		err = request_irq(priv->msix_vectors[msix_idx].vector,
560 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
561 				  IRQF_NO_AUTOEN, block->name, block);
562 		if (err) {
563 			dev_err(&priv->pdev->dev,
564 				"Failed to receive msix vector %d\n", i);
565 			goto abort_with_some_ntfy_blocks;
566 		}
567 		block->irq = priv->msix_vectors[msix_idx].vector;
568 		irq_set_affinity_and_hint(block->irq,
569 					  cpumask_of(cur_cpu));
570 		block->irq_db_index = &priv->irq_db_indices[i].index;
571 
572 		cur_cpu = cpumask_next(cur_cpu, node_mask);
573 		/* Wrap once CPUs in the node have been exhausted, or when
574 		 * starting RX queue affinities. TX and RX queues of the same
575 		 * index share affinity.
576 		 */
577 		if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
578 			cur_cpu = cpumask_first(node_mask);
579 	}
580 	return 0;
581 abort_with_some_ntfy_blocks:
582 	for (j = 0; j < i; j++) {
583 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
584 		int msix_idx = j;
585 
586 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
587 				      NULL);
588 		free_irq(priv->msix_vectors[msix_idx].vector, block);
589 		block->irq = 0;
590 	}
591 	kvfree(priv->ntfy_blocks);
592 	priv->ntfy_blocks = NULL;
593 abort_with_irq_db_indices:
594 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
595 			  sizeof(*priv->irq_db_indices),
596 			  priv->irq_db_indices, priv->irq_db_indices_bus);
597 	priv->irq_db_indices = NULL;
598 abort_with_mgmt_vector:
599 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
600 abort_with_msix_enabled:
601 	pci_disable_msix(priv->pdev);
602 abort_with_msix_vectors:
603 	kvfree(priv->msix_vectors);
604 	priv->msix_vectors = NULL;
605 	return err;
606 }
607 
gve_free_notify_blocks(struct gve_priv * priv)608 static void gve_free_notify_blocks(struct gve_priv *priv)
609 {
610 	int i;
611 
612 	if (!priv->msix_vectors)
613 		return;
614 
615 	/* Free the irqs */
616 	for (i = 0; i < priv->num_ntfy_blks; i++) {
617 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
618 		int msix_idx = i;
619 
620 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
621 				      NULL);
622 		free_irq(priv->msix_vectors[msix_idx].vector, block);
623 		block->irq = 0;
624 	}
625 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
626 	kvfree(priv->ntfy_blocks);
627 	priv->ntfy_blocks = NULL;
628 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
629 			  sizeof(*priv->irq_db_indices),
630 			  priv->irq_db_indices, priv->irq_db_indices_bus);
631 	priv->irq_db_indices = NULL;
632 	pci_disable_msix(priv->pdev);
633 	kvfree(priv->msix_vectors);
634 	priv->msix_vectors = NULL;
635 }
636 
gve_setup_device_resources(struct gve_priv * priv)637 static int gve_setup_device_resources(struct gve_priv *priv)
638 {
639 	int err;
640 
641 	err = gve_alloc_flow_rule_caches(priv);
642 	if (err)
643 		return err;
644 	err = gve_alloc_rss_config_cache(priv);
645 	if (err)
646 		goto abort_with_flow_rule_caches;
647 	err = gve_alloc_counter_array(priv);
648 	if (err)
649 		goto abort_with_rss_config_cache;
650 	err = gve_alloc_notify_blocks(priv);
651 	if (err)
652 		goto abort_with_counter;
653 	err = gve_alloc_stats_report(priv);
654 	if (err)
655 		goto abort_with_ntfy_blocks;
656 	err = gve_adminq_configure_device_resources(priv,
657 						    priv->counter_array_bus,
658 						    priv->num_event_counters,
659 						    priv->irq_db_indices_bus,
660 						    priv->num_ntfy_blks);
661 	if (unlikely(err)) {
662 		dev_err(&priv->pdev->dev,
663 			"could not setup device_resources: err=%d\n", err);
664 		err = -ENXIO;
665 		goto abort_with_stats_report;
666 	}
667 
668 	if (!gve_is_gqi(priv)) {
669 		priv->ptype_lut_dqo = kvzalloc_obj(*priv->ptype_lut_dqo);
670 		if (!priv->ptype_lut_dqo) {
671 			err = -ENOMEM;
672 			goto abort_with_stats_report;
673 		}
674 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
675 		if (err) {
676 			dev_err(&priv->pdev->dev,
677 				"Failed to get ptype map: err=%d\n", err);
678 			goto abort_with_ptype_lut;
679 		}
680 	}
681 
682 	if (priv->nic_timestamp_supported) {
683 		err = gve_init_clock(priv);
684 		if (err) {
685 			dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support");
686 			err = 0;
687 		}
688 	}
689 
690 	err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
691 	if (err) {
692 		dev_err(&priv->pdev->dev, "Failed to init RSS config");
693 		goto abort_with_clock;
694 	}
695 
696 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
697 				      priv->stats_report_bus,
698 				      GVE_STATS_REPORT_TIMER_PERIOD);
699 	if (err)
700 		dev_err(&priv->pdev->dev,
701 			"Failed to report stats: err=%d\n", err);
702 	gve_set_device_resources_ok(priv);
703 	return 0;
704 
705 abort_with_clock:
706 	gve_teardown_clock(priv);
707 abort_with_ptype_lut:
708 	kvfree(priv->ptype_lut_dqo);
709 	priv->ptype_lut_dqo = NULL;
710 abort_with_stats_report:
711 	gve_free_stats_report(priv);
712 abort_with_ntfy_blocks:
713 	gve_free_notify_blocks(priv);
714 abort_with_counter:
715 	gve_free_counter_array(priv);
716 abort_with_rss_config_cache:
717 	gve_free_rss_config_cache(priv);
718 abort_with_flow_rule_caches:
719 	gve_free_flow_rule_caches(priv);
720 
721 	return err;
722 }
723 
724 static void gve_trigger_reset(struct gve_priv *priv);
725 
gve_teardown_device_resources(struct gve_priv * priv)726 static void gve_teardown_device_resources(struct gve_priv *priv)
727 {
728 	int err;
729 
730 	/* Tell device its resources are being freed */
731 	if (gve_get_device_resources_ok(priv)) {
732 		err = gve_flow_rules_reset(priv);
733 		if (err) {
734 			dev_err(&priv->pdev->dev,
735 				"Failed to reset flow rules: err=%d\n", err);
736 			gve_trigger_reset(priv);
737 		}
738 		/* detach the stats report */
739 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
740 		if (err) {
741 			dev_err(&priv->pdev->dev,
742 				"Failed to detach stats report: err=%d\n", err);
743 			gve_trigger_reset(priv);
744 		}
745 		err = gve_adminq_deconfigure_device_resources(priv);
746 		if (err) {
747 			dev_err(&priv->pdev->dev,
748 				"Could not deconfigure device resources: err=%d\n",
749 				err);
750 			gve_trigger_reset(priv);
751 		}
752 	}
753 
754 	kvfree(priv->ptype_lut_dqo);
755 	priv->ptype_lut_dqo = NULL;
756 
757 	gve_free_flow_rule_caches(priv);
758 	gve_free_rss_config_cache(priv);
759 	gve_free_counter_array(priv);
760 	gve_free_notify_blocks(priv);
761 	gve_free_stats_report(priv);
762 	gve_teardown_clock(priv);
763 	gve_clear_device_resources_ok(priv);
764 }
765 
gve_unregister_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)766 static int gve_unregister_qpl(struct gve_priv *priv,
767 			      struct gve_queue_page_list *qpl)
768 {
769 	int err;
770 
771 	if (!qpl)
772 		return 0;
773 
774 	err = gve_adminq_unregister_page_list(priv, qpl->id);
775 	if (err) {
776 		netif_err(priv, drv, priv->dev,
777 			  "Failed to unregister queue page list %d\n",
778 			  qpl->id);
779 		return err;
780 	}
781 
782 	priv->num_registered_pages -= qpl->num_entries;
783 	return 0;
784 }
785 
gve_register_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)786 static int gve_register_qpl(struct gve_priv *priv,
787 			    struct gve_queue_page_list *qpl)
788 {
789 	int pages;
790 	int err;
791 
792 	if (!qpl)
793 		return 0;
794 
795 	pages = qpl->num_entries;
796 
797 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
798 		netif_err(priv, drv, priv->dev,
799 			  "Reached max number of registered pages %llu > %llu\n",
800 			  pages + priv->num_registered_pages,
801 			  priv->max_registered_pages);
802 		return -EINVAL;
803 	}
804 
805 	err = gve_adminq_register_page_list(priv, qpl);
806 	if (err) {
807 		netif_err(priv, drv, priv->dev,
808 			  "failed to register queue page list %d\n",
809 			  qpl->id);
810 		return err;
811 	}
812 
813 	priv->num_registered_pages += pages;
814 	return 0;
815 }
816 
gve_tx_get_qpl(struct gve_priv * priv,int idx)817 static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
818 {
819 	struct gve_tx_ring *tx = &priv->tx[idx];
820 
821 	if (gve_is_gqi(priv))
822 		return tx->tx_fifo.qpl;
823 	else
824 		return tx->dqo.qpl;
825 }
826 
gve_rx_get_qpl(struct gve_priv * priv,int idx)827 static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
828 {
829 	struct gve_rx_ring *rx = &priv->rx[idx];
830 
831 	if (gve_is_gqi(priv))
832 		return rx->data.qpl;
833 	else
834 		return rx->dqo.qpl;
835 }
836 
gve_register_qpls(struct gve_priv * priv)837 static int gve_register_qpls(struct gve_priv *priv)
838 {
839 	int num_tx_qpls, num_rx_qpls;
840 	int err;
841 	int i;
842 
843 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
844 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
845 
846 	for (i = 0; i < num_tx_qpls; i++) {
847 		err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
848 		if (err)
849 			return err;
850 	}
851 
852 	for (i = 0; i < num_rx_qpls; i++) {
853 		err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
854 		if (err)
855 			return err;
856 	}
857 
858 	return 0;
859 }
860 
gve_unregister_qpls(struct gve_priv * priv)861 static int gve_unregister_qpls(struct gve_priv *priv)
862 {
863 	int num_tx_qpls, num_rx_qpls;
864 	int err;
865 	int i;
866 
867 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
868 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
869 
870 	for (i = 0; i < num_tx_qpls; i++) {
871 		err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
872 		/* This failure will trigger a reset - no need to clean */
873 		if (err)
874 			return err;
875 	}
876 
877 	for (i = 0; i < num_rx_qpls; i++) {
878 		err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
879 		/* This failure will trigger a reset - no need to clean */
880 		if (err)
881 			return err;
882 	}
883 	return 0;
884 }
885 
gve_create_rings(struct gve_priv * priv)886 static int gve_create_rings(struct gve_priv *priv)
887 {
888 	int num_tx_queues = gve_num_tx_queues(priv);
889 	int err;
890 	int i;
891 
892 	err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
893 	if (err) {
894 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
895 			  num_tx_queues);
896 		/* This failure will trigger a reset - no need to clean
897 		 * up
898 		 */
899 		return err;
900 	}
901 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
902 		  num_tx_queues);
903 
904 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
905 	if (err) {
906 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
907 			  priv->rx_cfg.num_queues);
908 		/* This failure will trigger a reset - no need to clean
909 		 * up
910 		 */
911 		return err;
912 	}
913 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
914 		  priv->rx_cfg.num_queues);
915 
916 	if (gve_is_gqi(priv)) {
917 		/* Rx data ring has been prefilled with packet buffers at queue
918 		 * allocation time.
919 		 *
920 		 * Write the doorbell to provide descriptor slots and packet
921 		 * buffers to the NIC.
922 		 */
923 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
924 			gve_rx_write_doorbell(priv, &priv->rx[i]);
925 	} else {
926 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
927 			/* Post buffers and ring doorbell. */
928 			gve_rx_post_buffers_dqo(&priv->rx[i]);
929 		}
930 	}
931 
932 	return 0;
933 }
934 
init_xdp_sync_stats(struct gve_priv * priv)935 static void init_xdp_sync_stats(struct gve_priv *priv)
936 {
937 	int start_id = gve_xdp_tx_start_queue_id(priv);
938 	int i;
939 
940 	/* Init stats */
941 	for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
942 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
943 
944 		u64_stats_init(&priv->tx[i].statss);
945 		priv->tx[i].ntfy_id = ntfy_idx;
946 	}
947 }
948 
gve_init_sync_stats(struct gve_priv * priv)949 static void gve_init_sync_stats(struct gve_priv *priv)
950 {
951 	int i;
952 
953 	for (i = 0; i < priv->tx_cfg.num_queues; i++)
954 		u64_stats_init(&priv->tx[i].statss);
955 
956 	/* Init stats for XDP TX queues */
957 	init_xdp_sync_stats(priv);
958 
959 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
960 		u64_stats_init(&priv->rx[i].statss);
961 }
962 
gve_tx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)963 static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
964 				      struct gve_tx_alloc_rings_cfg *cfg)
965 {
966 	cfg->qcfg = &priv->tx_cfg;
967 	cfg->raw_addressing = !gve_is_qpl(priv);
968 	cfg->ring_size = priv->tx_desc_cnt;
969 	cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
970 	cfg->tx = priv->tx;
971 }
972 
gve_tx_stop_rings(struct gve_priv * priv,int num_rings)973 static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
974 {
975 	int i;
976 
977 	if (!priv->tx)
978 		return;
979 
980 	for (i = 0; i < num_rings; i++) {
981 		if (gve_is_gqi(priv))
982 			gve_tx_stop_ring_gqi(priv, i);
983 		else
984 			gve_tx_stop_ring_dqo(priv, i);
985 	}
986 }
987 
gve_tx_start_rings(struct gve_priv * priv,int num_rings)988 static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
989 {
990 	int i;
991 
992 	for (i = 0; i < num_rings; i++) {
993 		if (gve_is_gqi(priv))
994 			gve_tx_start_ring_gqi(priv, i);
995 		else
996 			gve_tx_start_ring_dqo(priv, i);
997 	}
998 }
999 
gve_queues_mem_alloc(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1000 static int gve_queues_mem_alloc(struct gve_priv *priv,
1001 				struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1002 				struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1003 {
1004 	int err;
1005 
1006 	if (gve_is_gqi(priv))
1007 		err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
1008 	else
1009 		err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
1010 	if (err)
1011 		return err;
1012 
1013 	if (gve_is_gqi(priv))
1014 		err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
1015 	else
1016 		err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
1017 	if (err)
1018 		goto free_tx;
1019 
1020 	return 0;
1021 
1022 free_tx:
1023 	if (gve_is_gqi(priv))
1024 		gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
1025 	else
1026 		gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
1027 	return err;
1028 }
1029 
gve_destroy_rings(struct gve_priv * priv)1030 static int gve_destroy_rings(struct gve_priv *priv)
1031 {
1032 	int num_tx_queues = gve_num_tx_queues(priv);
1033 	int err;
1034 
1035 	err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
1036 	if (err) {
1037 		netif_err(priv, drv, priv->dev,
1038 			  "failed to destroy tx queues\n");
1039 		/* This failure will trigger a reset - no need to clean up */
1040 		return err;
1041 	}
1042 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
1043 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
1044 	if (err) {
1045 		netif_err(priv, drv, priv->dev,
1046 			  "failed to destroy rx queues\n");
1047 		/* This failure will trigger a reset - no need to clean up */
1048 		return err;
1049 	}
1050 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
1051 	return 0;
1052 }
1053 
gve_queues_mem_free(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_cfg,struct gve_rx_alloc_rings_cfg * rx_cfg)1054 static void gve_queues_mem_free(struct gve_priv *priv,
1055 				struct gve_tx_alloc_rings_cfg *tx_cfg,
1056 				struct gve_rx_alloc_rings_cfg *rx_cfg)
1057 {
1058 	if (gve_is_gqi(priv)) {
1059 		gve_tx_free_rings_gqi(priv, tx_cfg);
1060 		gve_rx_free_rings_gqi(priv, rx_cfg);
1061 	} else {
1062 		gve_tx_free_rings_dqo(priv, tx_cfg);
1063 		gve_rx_free_rings_dqo(priv, rx_cfg);
1064 	}
1065 }
1066 
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir,gfp_t gfp_flags)1067 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1068 		   struct page **page, dma_addr_t *dma,
1069 		   enum dma_data_direction dir, gfp_t gfp_flags)
1070 {
1071 	*page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
1072 	if (!*page) {
1073 		priv->page_alloc_fail++;
1074 		return -ENOMEM;
1075 	}
1076 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
1077 	if (dma_mapping_error(dev, *dma)) {
1078 		priv->dma_mapping_error++;
1079 		put_page(*page);
1080 		return -ENOMEM;
1081 	}
1082 	return 0;
1083 }
1084 
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)1085 struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1086 						      u32 id, int pages)
1087 {
1088 	struct gve_queue_page_list *qpl;
1089 	int err;
1090 	int i;
1091 
1092 	qpl = kvzalloc_obj(*qpl);
1093 	if (!qpl)
1094 		return NULL;
1095 
1096 	qpl->id = id;
1097 	qpl->num_entries = 0;
1098 	qpl->pages = kvzalloc_objs(*qpl->pages, pages);
1099 	if (!qpl->pages)
1100 		goto abort;
1101 
1102 	qpl->page_buses = kvzalloc_objs(*qpl->page_buses, pages);
1103 	if (!qpl->page_buses)
1104 		goto abort;
1105 
1106 	for (i = 0; i < pages; i++) {
1107 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1108 				     &qpl->page_buses[i],
1109 				     gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1110 		if (err)
1111 			goto abort;
1112 		qpl->num_entries++;
1113 	}
1114 
1115 	return qpl;
1116 
1117 abort:
1118 	gve_free_queue_page_list(priv, qpl, id);
1119 	return NULL;
1120 }
1121 
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)1122 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1123 		   enum dma_data_direction dir)
1124 {
1125 	if (!dma_mapping_error(dev, dma))
1126 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1127 	if (page)
1128 		put_page(page);
1129 }
1130 
gve_free_queue_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl,u32 id)1131 void gve_free_queue_page_list(struct gve_priv *priv,
1132 			      struct gve_queue_page_list *qpl,
1133 			      u32 id)
1134 {
1135 	int i;
1136 
1137 	if (!qpl)
1138 		return;
1139 	if (!qpl->pages)
1140 		goto free_qpl;
1141 	if (!qpl->page_buses)
1142 		goto free_pages;
1143 
1144 	for (i = 0; i < qpl->num_entries; i++)
1145 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
1146 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1147 
1148 	kvfree(qpl->page_buses);
1149 	qpl->page_buses = NULL;
1150 free_pages:
1151 	kvfree(qpl->pages);
1152 	qpl->pages = NULL;
1153 free_qpl:
1154 	kvfree(qpl);
1155 }
1156 
1157 /* Use this to schedule a reset when the device is capable of continuing
1158  * to handle other requests in its current state. If it is not, do a reset
1159  * in thread instead.
1160  */
gve_schedule_reset(struct gve_priv * priv)1161 void gve_schedule_reset(struct gve_priv *priv)
1162 {
1163 	gve_set_do_reset(priv);
1164 	queue_work(priv->gve_wq, &priv->service_task);
1165 }
1166 
1167 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
1168 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1169 static void gve_turndown(struct gve_priv *priv);
1170 static void gve_turnup(struct gve_priv *priv);
1171 
gve_unreg_xsk_pool(struct gve_priv * priv,u16 qid)1172 static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
1173 {
1174 	struct gve_rx_ring *rx;
1175 
1176 	if (!priv->rx)
1177 		return;
1178 
1179 	rx = &priv->rx[qid];
1180 	rx->xsk_pool = NULL;
1181 	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1182 		xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
1183 
1184 	if (!priv->tx)
1185 		return;
1186 	priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
1187 }
1188 
gve_reg_xsk_pool(struct gve_priv * priv,struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)1189 static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
1190 			    struct xsk_buff_pool *pool, u16 qid)
1191 {
1192 	struct gve_rx_ring *rx;
1193 	u16 tx_qid;
1194 	int err;
1195 
1196 	rx = &priv->rx[qid];
1197 	err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1198 					 MEM_TYPE_XSK_BUFF_POOL, pool);
1199 	if (err) {
1200 		gve_unreg_xsk_pool(priv, qid);
1201 		return err;
1202 	}
1203 
1204 	rx->xsk_pool = pool;
1205 
1206 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1207 	priv->tx[tx_qid].xsk_pool = pool;
1208 
1209 	return 0;
1210 }
1211 
gve_unreg_xdp_info(struct gve_priv * priv)1212 static void gve_unreg_xdp_info(struct gve_priv *priv)
1213 {
1214 	int i;
1215 
1216 	if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
1217 		return;
1218 
1219 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1220 		struct gve_rx_ring *rx = &priv->rx[i];
1221 
1222 		if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1223 			xdp_rxq_info_unreg(&rx->xdp_rxq);
1224 
1225 		gve_unreg_xsk_pool(priv, i);
1226 	}
1227 }
1228 
gve_get_xsk_pool(struct gve_priv * priv,int qid)1229 static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
1230 {
1231 	if (!test_bit(qid, priv->xsk_pools))
1232 		return NULL;
1233 
1234 	return xsk_get_pool_from_qid(priv->dev, qid);
1235 }
1236 
gve_reg_xdp_info(struct gve_priv * priv,struct net_device * dev)1237 static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1238 {
1239 	struct napi_struct *napi;
1240 	struct gve_rx_ring *rx;
1241 	int err = 0;
1242 	int i;
1243 
1244 	if (!priv->tx_cfg.num_xdp_queues)
1245 		return 0;
1246 
1247 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
1248 		struct xsk_buff_pool *xsk_pool;
1249 
1250 		rx = &priv->rx[i];
1251 		napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1252 
1253 		err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
1254 				       napi->napi_id);
1255 		if (err)
1256 			goto err;
1257 
1258 		xsk_pool = gve_get_xsk_pool(priv, i);
1259 		if (xsk_pool)
1260 			err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
1261 		else if (gve_is_qpl(priv))
1262 			err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1263 							 MEM_TYPE_PAGE_SHARED,
1264 							 NULL);
1265 		else
1266 			err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1267 							 MEM_TYPE_PAGE_POOL,
1268 							 rx->dqo.page_pool);
1269 		if (err)
1270 			goto err;
1271 	}
1272 	return 0;
1273 
1274 err:
1275 	gve_unreg_xdp_info(priv);
1276 	return err;
1277 }
1278 
1279 
gve_drain_page_cache(struct gve_priv * priv)1280 static void gve_drain_page_cache(struct gve_priv *priv)
1281 {
1282 	int i;
1283 
1284 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
1285 		page_frag_cache_drain(&priv->rx[i].page_cache);
1286 }
1287 
gve_rx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_rx_alloc_rings_cfg * cfg)1288 static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1289 				      struct gve_rx_alloc_rings_cfg *cfg)
1290 {
1291 	cfg->qcfg_rx = &priv->rx_cfg;
1292 	cfg->qcfg_tx = &priv->tx_cfg;
1293 	cfg->raw_addressing = !gve_is_qpl(priv);
1294 	cfg->enable_header_split = priv->header_split_enabled;
1295 	cfg->ring_size = priv->rx_desc_cnt;
1296 	cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
1297 	cfg->rx = priv->rx;
1298 	cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
1299 }
1300 
gve_get_curr_alloc_cfgs(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1301 void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1302 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1303 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1304 {
1305 	gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1306 	gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1307 }
1308 
gve_rx_start_ring(struct gve_priv * priv,int i)1309 static void gve_rx_start_ring(struct gve_priv *priv, int i)
1310 {
1311 	if (gve_is_gqi(priv))
1312 		gve_rx_start_ring_gqi(priv, i);
1313 	else
1314 		gve_rx_start_ring_dqo(priv, i);
1315 }
1316 
gve_rx_start_rings(struct gve_priv * priv,int num_rings)1317 static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1318 {
1319 	int i;
1320 
1321 	for (i = 0; i < num_rings; i++)
1322 		gve_rx_start_ring(priv, i);
1323 }
1324 
gve_rx_stop_ring(struct gve_priv * priv,int i)1325 static void gve_rx_stop_ring(struct gve_priv *priv, int i)
1326 {
1327 	if (gve_is_gqi(priv))
1328 		gve_rx_stop_ring_gqi(priv, i);
1329 	else
1330 		gve_rx_stop_ring_dqo(priv, i);
1331 }
1332 
gve_rx_stop_rings(struct gve_priv * priv,int num_rings)1333 static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1334 {
1335 	int i;
1336 
1337 	if (!priv->rx)
1338 		return;
1339 
1340 	for (i = 0; i < num_rings; i++)
1341 		gve_rx_stop_ring(priv, i);
1342 }
1343 
gve_queues_mem_remove(struct gve_priv * priv)1344 static void gve_queues_mem_remove(struct gve_priv *priv)
1345 {
1346 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1347 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1348 
1349 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1350 	gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1351 	priv->tx = NULL;
1352 	priv->rx = NULL;
1353 }
1354 
1355 /* The passed-in queue memory is stored into priv and the queues are made live.
1356  * No memory is allocated. Passed-in memory is freed on errors.
1357  */
gve_queues_start(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1358 static int gve_queues_start(struct gve_priv *priv,
1359 			    struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1360 			    struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1361 {
1362 	struct net_device *dev = priv->dev;
1363 	int err;
1364 
1365 	/* Record new resources into priv */
1366 	priv->tx = tx_alloc_cfg->tx;
1367 	priv->rx = rx_alloc_cfg->rx;
1368 
1369 	/* Record new configs into priv */
1370 	priv->tx_cfg = *tx_alloc_cfg->qcfg;
1371 	priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
1372 	priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
1373 	priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
1374 	priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
1375 
1376 	gve_tx_start_rings(priv, gve_num_tx_queues(priv));
1377 	gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
1378 	gve_init_sync_stats(priv);
1379 
1380 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1381 	if (err)
1382 		goto stop_and_free_rings;
1383 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1384 	if (err)
1385 		goto stop_and_free_rings;
1386 
1387 	err = gve_reg_xdp_info(priv, dev);
1388 	if (err)
1389 		goto stop_and_free_rings;
1390 
1391 	if (rx_alloc_cfg->reset_rss) {
1392 		err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
1393 		if (err)
1394 			goto reset;
1395 	}
1396 
1397 	err = gve_register_qpls(priv);
1398 	if (err)
1399 		goto reset;
1400 
1401 	priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
1402 	priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
1403 
1404 	err = gve_create_rings(priv);
1405 	if (err)
1406 		goto reset;
1407 
1408 	gve_set_device_rings_ok(priv);
1409 
1410 	if (gve_get_report_stats(priv))
1411 		mod_timer(&priv->stats_report_timer,
1412 			  round_jiffies(jiffies +
1413 				msecs_to_jiffies(priv->stats_report_timer_period)));
1414 
1415 	gve_turnup(priv);
1416 	queue_work(priv->gve_wq, &priv->service_task);
1417 	priv->interface_up_cnt++;
1418 	return 0;
1419 
1420 reset:
1421 	if (gve_get_reset_in_progress(priv))
1422 		goto stop_and_free_rings;
1423 	gve_reset_and_teardown(priv, true);
1424 	/* if this fails there is nothing we can do so just ignore the return */
1425 	gve_reset_recovery(priv, false);
1426 	/* return the original error */
1427 	return err;
1428 stop_and_free_rings:
1429 	gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
1430 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1431 	gve_queues_mem_remove(priv);
1432 	return err;
1433 }
1434 
gve_open(struct net_device * dev)1435 static int gve_open(struct net_device *dev)
1436 {
1437 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1438 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1439 	struct gve_priv *priv = netdev_priv(dev);
1440 	int err;
1441 
1442 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1443 
1444 	err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1445 	if (err)
1446 		return err;
1447 
1448 	/* No need to free on error: ownership of resources is lost after
1449 	 * calling gve_queues_start.
1450 	 */
1451 	err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1452 	if (err)
1453 		return err;
1454 
1455 	return 0;
1456 }
1457 
gve_queues_stop(struct gve_priv * priv)1458 static int gve_queues_stop(struct gve_priv *priv)
1459 {
1460 	int err;
1461 
1462 	netif_carrier_off(priv->dev);
1463 	if (gve_get_device_rings_ok(priv)) {
1464 		gve_turndown(priv);
1465 		gve_drain_page_cache(priv);
1466 		err = gve_destroy_rings(priv);
1467 		if (err)
1468 			goto err;
1469 		err = gve_unregister_qpls(priv);
1470 		if (err)
1471 			goto err;
1472 		gve_clear_device_rings_ok(priv);
1473 	}
1474 	timer_delete_sync(&priv->stats_report_timer);
1475 
1476 	gve_unreg_xdp_info(priv);
1477 
1478 	gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
1479 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1480 
1481 	priv->interface_down_cnt++;
1482 	return 0;
1483 
1484 err:
1485 	/* This must have been called from a reset due to the rtnl lock
1486 	 * so just return at this point.
1487 	 */
1488 	if (gve_get_reset_in_progress(priv))
1489 		return err;
1490 	/* Otherwise reset before returning */
1491 	gve_reset_and_teardown(priv, true);
1492 	return gve_reset_recovery(priv, false);
1493 }
1494 
gve_close(struct net_device * dev)1495 static int gve_close(struct net_device *dev)
1496 {
1497 	struct gve_priv *priv = netdev_priv(dev);
1498 	int err;
1499 
1500 	err = gve_queues_stop(priv);
1501 	if (err)
1502 		return err;
1503 
1504 	gve_queues_mem_remove(priv);
1505 	return 0;
1506 }
1507 
gve_handle_link_status(struct gve_priv * priv,bool link_status)1508 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1509 {
1510 	if (!gve_get_napi_enabled(priv))
1511 		return;
1512 
1513 	if (link_status == netif_carrier_ok(priv->dev))
1514 		return;
1515 
1516 	if (link_status) {
1517 		netdev_info(priv->dev, "Device link is up.\n");
1518 		netif_carrier_on(priv->dev);
1519 	} else {
1520 		netdev_info(priv->dev, "Device link is down.\n");
1521 		netif_carrier_off(priv->dev);
1522 	}
1523 }
1524 
gve_configure_rings_xdp(struct gve_priv * priv,u16 num_xdp_rings)1525 static int gve_configure_rings_xdp(struct gve_priv *priv,
1526 				   u16 num_xdp_rings)
1527 {
1528 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1529 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1530 
1531 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1532 	tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
1533 
1534 	rx_alloc_cfg.xdp = !!num_xdp_rings;
1535 	return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1536 }
1537 
gve_set_xdp(struct gve_priv * priv,struct bpf_prog * prog,struct netlink_ext_ack * extack)1538 static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
1539 		       struct netlink_ext_ack *extack)
1540 {
1541 	struct bpf_prog *old_prog;
1542 	int err = 0;
1543 	u32 status;
1544 
1545 	old_prog = READ_ONCE(priv->xdp_prog);
1546 	if (!netif_running(priv->dev)) {
1547 		WRITE_ONCE(priv->xdp_prog, prog);
1548 		if (old_prog)
1549 			bpf_prog_put(old_prog);
1550 
1551 		/* Update priv XDP queue configuration */
1552 		priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
1553 			priv->rx_cfg.num_queues : 0;
1554 		return 0;
1555 	}
1556 
1557 	if (!old_prog && prog)
1558 		err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1559 	else if (old_prog && !prog)
1560 		err = gve_configure_rings_xdp(priv, 0);
1561 
1562 	if (err)
1563 		goto out;
1564 
1565 	WRITE_ONCE(priv->xdp_prog, prog);
1566 	if (old_prog)
1567 		bpf_prog_put(old_prog);
1568 
1569 out:
1570 	status = ioread32be(&priv->reg_bar0->device_status);
1571 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1572 	return err;
1573 }
1574 
gve_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1575 static int gve_xdp_xmit(struct net_device *dev, int n,
1576 			struct xdp_frame **frames, u32 flags)
1577 {
1578 	struct gve_priv *priv = netdev_priv(dev);
1579 
1580 	if (priv->queue_format == GVE_GQI_QPL_FORMAT)
1581 		return gve_xdp_xmit_gqi(dev, n, frames, flags);
1582 	else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
1583 		return gve_xdp_xmit_dqo(dev, n, frames, flags);
1584 
1585 	return -EOPNOTSUPP;
1586 }
1587 
gve_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)1588 static int gve_xsk_pool_enable(struct net_device *dev,
1589 			       struct xsk_buff_pool *pool,
1590 			       u16 qid)
1591 {
1592 	struct gve_priv *priv = netdev_priv(dev);
1593 	int err;
1594 
1595 	if (qid >= priv->rx_cfg.num_queues) {
1596 		dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1597 		return -EINVAL;
1598 	}
1599 	if (xsk_pool_get_rx_frame_size(pool) <
1600 	     priv->dev->max_mtu + sizeof(struct ethhdr)) {
1601 		dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1602 		return -EINVAL;
1603 	}
1604 
1605 	err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1606 			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1607 	if (err)
1608 		return err;
1609 
1610 	set_bit(qid, priv->xsk_pools);
1611 
1612 	/* If XDP prog is not installed or interface is down, return. */
1613 	if (!priv->xdp_prog || !netif_running(dev))
1614 		return 0;
1615 
1616 	err = gve_reg_xsk_pool(priv, dev, pool, qid);
1617 	if (err)
1618 		goto err_xsk_pool_dma_mapped;
1619 
1620 	/* Stop and start RDA queues to repost buffers. */
1621 	if (!gve_is_qpl(priv)) {
1622 		err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1623 		if (err)
1624 			goto err_xsk_pool_registered;
1625 	}
1626 	return 0;
1627 
1628 err_xsk_pool_registered:
1629 	gve_unreg_xsk_pool(priv, qid);
1630 err_xsk_pool_dma_mapped:
1631 	clear_bit(qid, priv->xsk_pools);
1632 	xsk_pool_dma_unmap(pool,
1633 			   DMA_ATTR_SKIP_CPU_SYNC |
1634 			   DMA_ATTR_WEAK_ORDERING);
1635 	return err;
1636 }
1637 
gve_xsk_pool_disable(struct net_device * dev,u16 qid)1638 static int gve_xsk_pool_disable(struct net_device *dev,
1639 				u16 qid)
1640 {
1641 	struct gve_priv *priv = netdev_priv(dev);
1642 	struct napi_struct *napi_rx;
1643 	struct napi_struct *napi_tx;
1644 	struct xsk_buff_pool *pool;
1645 	int tx_qid;
1646 	int err;
1647 
1648 	if (qid >= priv->rx_cfg.num_queues)
1649 		return -EINVAL;
1650 
1651 	clear_bit(qid, priv->xsk_pools);
1652 
1653 	pool = xsk_get_pool_from_qid(dev, qid);
1654 	if (pool)
1655 		xsk_pool_dma_unmap(pool,
1656 				   DMA_ATTR_SKIP_CPU_SYNC |
1657 				   DMA_ATTR_WEAK_ORDERING);
1658 
1659 	if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
1660 		return 0;
1661 
1662 	/* Stop and start RDA queues to repost buffers. */
1663 	if (!gve_is_qpl(priv) && priv->xdp_prog) {
1664 		err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
1665 		if (err)
1666 			return err;
1667 	}
1668 
1669 	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1670 	napi_disable(napi_rx); /* make sure current rx poll is done */
1671 
1672 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1673 	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1674 	napi_disable(napi_tx); /* make sure current tx poll is done */
1675 
1676 	gve_unreg_xsk_pool(priv, qid);
1677 	smp_mb(); /* Make sure it is visible to the workers on datapath */
1678 
1679 	napi_enable(napi_rx);
1680 	napi_enable(napi_tx);
1681 	if (gve_is_gqi(priv)) {
1682 		if (gve_rx_work_pending(&priv->rx[qid]))
1683 			napi_schedule(napi_rx);
1684 
1685 		if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1686 			napi_schedule(napi_tx);
1687 	}
1688 
1689 	return 0;
1690 }
1691 
gve_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)1692 static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1693 {
1694 	struct gve_priv *priv = netdev_priv(dev);
1695 	struct napi_struct *napi;
1696 
1697 	if (!gve_get_napi_enabled(priv))
1698 		return -ENETDOWN;
1699 
1700 	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1701 		return -EINVAL;
1702 
1703 	napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
1704 	if (!napi_if_scheduled_mark_missed(napi)) {
1705 		/* Call local_bh_enable to trigger SoftIRQ processing */
1706 		local_bh_disable();
1707 		napi_schedule(napi);
1708 		local_bh_enable();
1709 	}
1710 
1711 	return 0;
1712 }
1713 
gve_verify_xdp_configuration(struct net_device * dev,struct netlink_ext_ack * extack)1714 static int gve_verify_xdp_configuration(struct net_device *dev,
1715 					struct netlink_ext_ack *extack)
1716 {
1717 	struct gve_priv *priv = netdev_priv(dev);
1718 	u16 max_xdp_mtu;
1719 
1720 	if (dev->features & NETIF_F_LRO) {
1721 		NL_SET_ERR_MSG_MOD(extack,
1722 				   "XDP is not supported when LRO is on.");
1723 		return -EOPNOTSUPP;
1724 	}
1725 
1726 	if (priv->header_split_enabled) {
1727 		NL_SET_ERR_MSG_MOD(extack,
1728 				   "XDP is not supported when header-data split is enabled.");
1729 		return -EOPNOTSUPP;
1730 	}
1731 
1732 	if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
1733 		NL_SET_ERR_MSG_FMT_MOD(extack,
1734 				       "XDP is not supported for Rx buf len %d, only %d supported.",
1735 				       priv->rx_cfg.packet_buffer_size, SZ_2K);
1736 		return -EOPNOTSUPP;
1737 	}
1738 
1739 	max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
1740 	if (priv->queue_format == GVE_GQI_QPL_FORMAT)
1741 		max_xdp_mtu -= GVE_RX_PAD;
1742 
1743 	if (dev->mtu > max_xdp_mtu) {
1744 		NL_SET_ERR_MSG_FMT_MOD(extack,
1745 				       "XDP is not supported for mtu %d.",
1746 				       dev->mtu);
1747 		return -EOPNOTSUPP;
1748 	}
1749 
1750 	if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
1751 	    (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
1752 		netdev_warn(dev,
1753 			    "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.",
1754 			    priv->rx_cfg.num_queues, priv->tx_cfg.num_queues,
1755 			    priv->tx_cfg.max_queues);
1756 		NL_SET_ERR_MSG_MOD(extack,
1757 				   "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
1758 		return -EINVAL;
1759 	}
1760 	return 0;
1761 }
1762 
gve_xdp(struct net_device * dev,struct netdev_bpf * xdp)1763 static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1764 {
1765 	struct gve_priv *priv = netdev_priv(dev);
1766 	int err;
1767 
1768 	err = gve_verify_xdp_configuration(dev, xdp->extack);
1769 	if (err)
1770 		return err;
1771 	switch (xdp->command) {
1772 	case XDP_SETUP_PROG:
1773 		return gve_set_xdp(priv, xdp->prog, xdp->extack);
1774 	case XDP_SETUP_XSK_POOL:
1775 		if (xdp->xsk.pool)
1776 			return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1777 		else
1778 			return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
1779 	default:
1780 		return -EINVAL;
1781 	}
1782 }
1783 
gve_init_rss_config(struct gve_priv * priv,u16 num_queues)1784 int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
1785 {
1786 	struct gve_rss_config *rss_config = &priv->rss_config;
1787 	struct ethtool_rxfh_param rxfh = {0};
1788 	u16 i;
1789 
1790 	if (!priv->cache_rss_config)
1791 		return 0;
1792 
1793 	for (i = 0; i < priv->rss_lut_size; i++)
1794 		rss_config->hash_lut[i] =
1795 			ethtool_rxfh_indir_default(i, num_queues);
1796 
1797 	netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
1798 
1799 	rxfh.hfunc = ETH_RSS_HASH_TOP;
1800 
1801 	return gve_adminq_configure_rss(priv, &rxfh);
1802 }
1803 
gve_flow_rules_reset(struct gve_priv * priv)1804 int gve_flow_rules_reset(struct gve_priv *priv)
1805 {
1806 	if (!priv->max_flow_rules)
1807 		return 0;
1808 
1809 	return gve_adminq_reset_flow_rules(priv);
1810 }
1811 
gve_adjust_config(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1812 int gve_adjust_config(struct gve_priv *priv,
1813 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1814 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1815 {
1816 	int err;
1817 
1818 	/* Allocate resources for the new configuration */
1819 	err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
1820 	if (err) {
1821 		netif_err(priv, drv, priv->dev,
1822 			  "Adjust config failed to alloc new queues");
1823 		return err;
1824 	}
1825 
1826 	/* Teardown the device and free existing resources */
1827 	err = gve_close(priv->dev);
1828 	if (err) {
1829 		netif_err(priv, drv, priv->dev,
1830 			  "Adjust config failed to close old queues");
1831 		gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
1832 		return err;
1833 	}
1834 
1835 	/* Bring the device back up again with the new resources. */
1836 	err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
1837 	if (err) {
1838 		netif_err(priv, drv, priv->dev,
1839 			  "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
1840 		/* No need to free on error: ownership of resources is lost after
1841 		 * calling gve_queues_start.
1842 		 */
1843 		gve_turndown(priv);
1844 		return err;
1845 	}
1846 
1847 	return 0;
1848 }
1849 
gve_adjust_queues(struct gve_priv * priv,struct gve_rx_queue_config new_rx_config,struct gve_tx_queue_config new_tx_config,bool reset_rss)1850 int gve_adjust_queues(struct gve_priv *priv,
1851 		      struct gve_rx_queue_config new_rx_config,
1852 		      struct gve_tx_queue_config new_tx_config,
1853 		      bool reset_rss)
1854 {
1855 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1856 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1857 	int err;
1858 
1859 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1860 
1861 	/* Relay the new config from ethtool */
1862 	tx_alloc_cfg.qcfg = &new_tx_config;
1863 	rx_alloc_cfg.qcfg_tx = &new_tx_config;
1864 	rx_alloc_cfg.qcfg_rx = &new_rx_config;
1865 	rx_alloc_cfg.reset_rss = reset_rss;
1866 
1867 	if (netif_running(priv->dev)) {
1868 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1869 		return err;
1870 	}
1871 	/* Set the config for the next up. */
1872 	if (reset_rss) {
1873 		err = gve_init_rss_config(priv, new_rx_config.num_queues);
1874 		if (err)
1875 			return err;
1876 	}
1877 	priv->tx_cfg = new_tx_config;
1878 	priv->rx_cfg = new_rx_config;
1879 
1880 	return 0;
1881 }
1882 
gve_turndown(struct gve_priv * priv)1883 static void gve_turndown(struct gve_priv *priv)
1884 {
1885 	int idx;
1886 
1887 	if (netif_carrier_ok(priv->dev))
1888 		netif_carrier_off(priv->dev);
1889 
1890 	if (!gve_get_napi_enabled(priv))
1891 		return;
1892 
1893 	/* Disable napi to prevent more work from coming in */
1894 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1895 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1896 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1897 
1898 		if (!gve_tx_was_added_to_block(priv, idx))
1899 			continue;
1900 
1901 		if (idx < priv->tx_cfg.num_queues)
1902 			netif_queue_set_napi(priv->dev, idx,
1903 					     NETDEV_QUEUE_TYPE_TX, NULL);
1904 
1905 		napi_disable_locked(&block->napi);
1906 	}
1907 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1908 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1909 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1910 
1911 		if (!gve_rx_was_added_to_block(priv, idx))
1912 			continue;
1913 
1914 		netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
1915 				     NULL);
1916 		napi_disable_locked(&block->napi);
1917 	}
1918 
1919 	/* Stop tx queues */
1920 	netif_tx_disable(priv->dev);
1921 
1922 	xdp_features_clear_redirect_target_locked(priv->dev);
1923 
1924 	gve_clear_napi_enabled(priv);
1925 	gve_clear_report_stats(priv);
1926 
1927 	/* Make sure that all traffic is finished processing. */
1928 	synchronize_net();
1929 }
1930 
gve_turnup(struct gve_priv * priv)1931 static void gve_turnup(struct gve_priv *priv)
1932 {
1933 	int idx;
1934 
1935 	/* Start the tx queues */
1936 	netif_tx_start_all_queues(priv->dev);
1937 
1938 	/* Enable napi and unmask interrupts for all queues */
1939 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1940 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1941 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1942 
1943 		if (!gve_tx_was_added_to_block(priv, idx))
1944 			continue;
1945 
1946 		napi_enable_locked(&block->napi);
1947 
1948 		if (idx < priv->tx_cfg.num_queues)
1949 			netif_queue_set_napi(priv->dev, idx,
1950 					     NETDEV_QUEUE_TYPE_TX,
1951 					     &block->napi);
1952 
1953 		if (gve_is_gqi(priv)) {
1954 			iowrite32be(0, gve_irq_doorbell(priv, block));
1955 		} else {
1956 			gve_set_itr_coalesce_usecs_dqo(priv, block,
1957 						       priv->tx_coalesce_usecs);
1958 		}
1959 
1960 		/* Any descs written by the NIC before this barrier will be
1961 		 * handled by the one-off napi schedule below. Whereas any
1962 		 * descs after the barrier will generate interrupts.
1963 		 */
1964 		mb();
1965 		napi_schedule(&block->napi);
1966 	}
1967 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1968 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1969 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1970 
1971 		if (!gve_rx_was_added_to_block(priv, idx))
1972 			continue;
1973 
1974 		napi_enable_locked(&block->napi);
1975 		netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
1976 				     &block->napi);
1977 
1978 		if (gve_is_gqi(priv)) {
1979 			iowrite32be(0, gve_irq_doorbell(priv, block));
1980 		} else {
1981 			gve_set_itr_coalesce_usecs_dqo(priv, block,
1982 						       priv->rx_coalesce_usecs);
1983 		}
1984 
1985 		/* Any descs written by the NIC before this barrier will be
1986 		 * handled by the one-off napi schedule below. Whereas any
1987 		 * descs after the barrier will generate interrupts.
1988 		 */
1989 		mb();
1990 		napi_schedule(&block->napi);
1991 	}
1992 
1993 	if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
1994 		xdp_features_set_redirect_target_locked(priv->dev, false);
1995 
1996 	gve_set_napi_enabled(priv);
1997 }
1998 
gve_turnup_and_check_status(struct gve_priv * priv)1999 static void gve_turnup_and_check_status(struct gve_priv *priv)
2000 {
2001 	u32 status;
2002 
2003 	gve_turnup(priv);
2004 	status = ioread32be(&priv->reg_bar0->device_status);
2005 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
2006 }
2007 
gve_get_tx_notify_block(struct gve_priv * priv,unsigned int txqueue)2008 static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
2009 							unsigned int txqueue)
2010 {
2011 	u32 ntfy_idx;
2012 
2013 	if (txqueue > priv->tx_cfg.num_queues)
2014 		return NULL;
2015 
2016 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
2017 	if (ntfy_idx >= priv->num_ntfy_blks)
2018 		return NULL;
2019 
2020 	return &priv->ntfy_blocks[ntfy_idx];
2021 }
2022 
gve_tx_timeout_try_q_kick(struct gve_priv * priv,unsigned int txqueue)2023 static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
2024 				      unsigned int txqueue)
2025 {
2026 	struct gve_notify_block *block;
2027 	u32 current_time;
2028 
2029 	block = gve_get_tx_notify_block(priv, txqueue);
2030 
2031 	if (!block)
2032 		return false;
2033 
2034 	current_time = jiffies_to_msecs(jiffies);
2035 	if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
2036 		return false;
2037 
2038 	netdev_info(priv->dev, "Kicking queue %d", txqueue);
2039 	napi_schedule(&block->napi);
2040 	block->tx->last_kick_msec = current_time;
2041 	return true;
2042 }
2043 
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)2044 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
2045 {
2046 	struct gve_notify_block *block;
2047 	struct gve_priv *priv;
2048 
2049 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
2050 	priv = netdev_priv(dev);
2051 
2052 	if (!gve_tx_timeout_try_q_kick(priv, txqueue))
2053 		gve_schedule_reset(priv);
2054 
2055 	block = gve_get_tx_notify_block(priv, txqueue);
2056 	if (block)
2057 		block->tx->queue_timeout++;
2058 	priv->tx_timeo_cnt++;
2059 }
2060 
2061 /* Header split is only supported on DQ RDA queue format. If XDP is enabled,
2062  * header split is not allowed.
2063  */
gve_header_split_supported(const struct gve_priv * priv)2064 bool gve_header_split_supported(const struct gve_priv *priv)
2065 {
2066 	return priv->header_buf_size &&
2067 		priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
2068 }
2069 
gve_set_rx_buf_len_config(struct gve_priv * priv,u32 rx_buf_len,struct netlink_ext_ack * extack,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)2070 int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
2071 			      struct netlink_ext_ack *extack,
2072 			      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
2073 {
2074 	u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
2075 
2076 	if (rx_buf_len == old_rx_buf_len)
2077 		return 0;
2078 
2079 	/* device options may not always contain support for 4K buffers */
2080 	if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
2081 		NL_SET_ERR_MSG_MOD(extack,
2082 				   "Modifying Rx buf len is not supported");
2083 		return -EOPNOTSUPP;
2084 	}
2085 
2086 	if (priv->xdp_prog && rx_buf_len != SZ_2K) {
2087 		NL_SET_ERR_MSG_MOD(extack,
2088 				   "Rx buf len can only be 2048 when XDP is on");
2089 		return -EINVAL;
2090 	}
2091 
2092 	if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
2093 		NL_SET_ERR_MSG_MOD(extack,
2094 				   "Rx buf len can only be 2048 or 4096");
2095 		return -EINVAL;
2096 	}
2097 	rx_alloc_cfg->packet_buffer_size = rx_buf_len;
2098 
2099 	return 0;
2100 }
2101 
gve_set_hsplit_config(struct gve_priv * priv,u8 tcp_data_split,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)2102 int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
2103 			  struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
2104 {
2105 	bool enable_hdr_split;
2106 
2107 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
2108 		return 0;
2109 
2110 	if (!gve_header_split_supported(priv)) {
2111 		dev_err(&priv->pdev->dev, "Header-split not supported\n");
2112 		return -EOPNOTSUPP;
2113 	}
2114 
2115 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
2116 		enable_hdr_split = true;
2117 	else
2118 		enable_hdr_split = false;
2119 
2120 	if (enable_hdr_split == priv->header_split_enabled)
2121 		return 0;
2122 
2123 	rx_alloc_cfg->enable_header_split = enable_hdr_split;
2124 
2125 	return 0;
2126 }
2127 
gve_set_features(struct net_device * netdev,netdev_features_t features)2128 static int gve_set_features(struct net_device *netdev,
2129 			    netdev_features_t features)
2130 {
2131 	const netdev_features_t orig_features = netdev->features;
2132 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2133 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
2134 	struct gve_priv *priv = netdev_priv(netdev);
2135 	int err;
2136 
2137 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2138 
2139 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
2140 		netdev->features ^= NETIF_F_LRO;
2141 		if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
2142 			netdev_warn(netdev,
2143 				    "XDP is not supported when LRO is on.\n");
2144 			err =  -EOPNOTSUPP;
2145 			goto revert_features;
2146 		}
2147 		if (netif_running(netdev)) {
2148 			err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2149 			if (err)
2150 				goto revert_features;
2151 		}
2152 	}
2153 	if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
2154 		err = gve_flow_rules_reset(priv);
2155 		if (err)
2156 			goto revert_features;
2157 	}
2158 
2159 	return 0;
2160 
2161 revert_features:
2162 	netdev->features = orig_features;
2163 	return err;
2164 }
2165 
gve_get_ts_config(struct net_device * dev,struct kernel_hwtstamp_config * kernel_config)2166 static int gve_get_ts_config(struct net_device *dev,
2167 			     struct kernel_hwtstamp_config *kernel_config)
2168 {
2169 	struct gve_priv *priv = netdev_priv(dev);
2170 
2171 	*kernel_config = priv->ts_config;
2172 	return 0;
2173 }
2174 
gve_set_ts_config(struct net_device * dev,struct kernel_hwtstamp_config * kernel_config,struct netlink_ext_ack * extack)2175 static int gve_set_ts_config(struct net_device *dev,
2176 			     struct kernel_hwtstamp_config *kernel_config,
2177 			     struct netlink_ext_ack *extack)
2178 {
2179 	struct gve_priv *priv = netdev_priv(dev);
2180 
2181 	if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
2182 		NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
2183 		return -ERANGE;
2184 	}
2185 
2186 	if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
2187 		if (!gve_is_clock_enabled(priv)) {
2188 			NL_SET_ERR_MSG_MOD(extack,
2189 					   "RX timestamping is not supported");
2190 			kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
2191 			return -EOPNOTSUPP;
2192 		}
2193 
2194 		kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
2195 	}
2196 
2197 	priv->ts_config.rx_filter = kernel_config->rx_filter;
2198 
2199 	return 0;
2200 }
2201 
2202 static const struct net_device_ops gve_netdev_ops = {
2203 	.ndo_start_xmit		=	gve_start_xmit,
2204 	.ndo_features_check	=	gve_features_check,
2205 	.ndo_open		=	gve_open,
2206 	.ndo_stop		=	gve_close,
2207 	.ndo_get_stats64	=	gve_get_stats,
2208 	.ndo_tx_timeout         =       gve_tx_timeout,
2209 	.ndo_set_features	=	gve_set_features,
2210 	.ndo_bpf		=	gve_xdp,
2211 	.ndo_xdp_xmit		=	gve_xdp_xmit,
2212 	.ndo_xsk_wakeup		=	gve_xsk_wakeup,
2213 	.ndo_hwtstamp_get	=	gve_get_ts_config,
2214 	.ndo_hwtstamp_set	=	gve_set_ts_config,
2215 };
2216 
gve_handle_status(struct gve_priv * priv,u32 status)2217 static void gve_handle_status(struct gve_priv *priv, u32 status)
2218 {
2219 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
2220 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
2221 		gve_set_do_reset(priv);
2222 	}
2223 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
2224 		priv->stats_report_trigger_cnt++;
2225 		gve_set_do_report_stats(priv);
2226 	}
2227 }
2228 
gve_handle_reset(struct gve_priv * priv)2229 static void gve_handle_reset(struct gve_priv *priv)
2230 {
2231 	/* A service task will be scheduled at the end of probe to catch any
2232 	 * resets that need to happen, and we don't want to reset until
2233 	 * probe is done.
2234 	 */
2235 	if (gve_get_probe_in_progress(priv))
2236 		return;
2237 
2238 	if (gve_get_do_reset(priv)) {
2239 		rtnl_lock();
2240 		netdev_lock(priv->dev);
2241 		gve_reset(priv, false);
2242 		netdev_unlock(priv->dev);
2243 		rtnl_unlock();
2244 	}
2245 }
2246 
gve_handle_report_stats(struct gve_priv * priv)2247 void gve_handle_report_stats(struct gve_priv *priv)
2248 {
2249 	struct stats *stats = priv->stats_report->stats;
2250 	int idx, stats_idx = 0;
2251 	unsigned int start = 0;
2252 	u64 tx_bytes;
2253 
2254 	if (!gve_get_report_stats(priv))
2255 		return;
2256 
2257 	be64_add_cpu(&priv->stats_report->written_count, 1);
2258 	/* tx stats */
2259 	if (priv->tx) {
2260 		for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
2261 			u32 last_completion = 0;
2262 			u32 tx_frames = 0;
2263 
2264 			/* DQO doesn't currently support these metrics. */
2265 			if (gve_is_gqi(priv)) {
2266 				last_completion = priv->tx[idx].done;
2267 				tx_frames = priv->tx[idx].req;
2268 			}
2269 
2270 			do {
2271 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
2272 				tx_bytes = priv->tx[idx].bytes_done;
2273 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
2274 			stats[stats_idx++] = (struct stats) {
2275 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
2276 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
2277 				.queue_id = cpu_to_be32(idx),
2278 			};
2279 			stats[stats_idx++] = (struct stats) {
2280 				.stat_name = cpu_to_be32(TX_STOP_CNT),
2281 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
2282 				.queue_id = cpu_to_be32(idx),
2283 			};
2284 			stats[stats_idx++] = (struct stats) {
2285 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
2286 				.value = cpu_to_be64(tx_frames),
2287 				.queue_id = cpu_to_be32(idx),
2288 			};
2289 			stats[stats_idx++] = (struct stats) {
2290 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
2291 				.value = cpu_to_be64(tx_bytes),
2292 				.queue_id = cpu_to_be32(idx),
2293 			};
2294 			stats[stats_idx++] = (struct stats) {
2295 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
2296 				.value = cpu_to_be64(last_completion),
2297 				.queue_id = cpu_to_be32(idx),
2298 			};
2299 			stats[stats_idx++] = (struct stats) {
2300 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
2301 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
2302 				.queue_id = cpu_to_be32(idx),
2303 			};
2304 		}
2305 	}
2306 	/* rx stats */
2307 	if (priv->rx) {
2308 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
2309 			stats[stats_idx++] = (struct stats) {
2310 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
2311 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
2312 				.queue_id = cpu_to_be32(idx),
2313 			};
2314 			stats[stats_idx++] = (struct stats) {
2315 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
2316 				.value = cpu_to_be64(priv->rx[idx].fill_cnt),
2317 				.queue_id = cpu_to_be32(idx),
2318 			};
2319 		}
2320 	}
2321 }
2322 
2323 /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)2324 static void gve_service_task(struct work_struct *work)
2325 {
2326 	struct gve_priv *priv = container_of(work, struct gve_priv,
2327 					     service_task);
2328 	u32 status = ioread32be(&priv->reg_bar0->device_status);
2329 
2330 	gve_handle_status(priv, status);
2331 
2332 	gve_handle_reset(priv);
2333 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
2334 }
2335 
gve_set_netdev_xdp_features(struct gve_priv * priv)2336 static void gve_set_netdev_xdp_features(struct gve_priv *priv)
2337 {
2338 	xdp_features_t xdp_features;
2339 
2340 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
2341 		xdp_features = NETDEV_XDP_ACT_BASIC;
2342 		xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2343 		xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2344 	} else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
2345 		xdp_features = NETDEV_XDP_ACT_BASIC;
2346 		xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2347 		xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2348 	} else {
2349 		xdp_features = 0;
2350 	}
2351 
2352 	xdp_set_features_flag_locked(priv->dev, xdp_features);
2353 }
2354 
2355 static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
2356 	.xmo_rx_timestamp	= gve_xdp_rx_timestamp,
2357 };
2358 
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)2359 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2360 {
2361 	int num_ntfy;
2362 	int err;
2363 
2364 	/* Set up the adminq */
2365 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
2366 	if (err) {
2367 		dev_err(&priv->pdev->dev,
2368 			"Failed to alloc admin queue: err=%d\n", err);
2369 		return err;
2370 	}
2371 
2372 	err = gve_verify_driver_compatibility(priv);
2373 	if (err) {
2374 		dev_err(&priv->pdev->dev,
2375 			"Could not verify driver compatibility: err=%d\n", err);
2376 		goto err;
2377 	}
2378 
2379 	priv->num_registered_pages = 0;
2380 
2381 	if (skip_describe_device)
2382 		goto setup_device;
2383 
2384 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2385 	/* Get the initial information we need from the device */
2386 	err = gve_adminq_describe_device(priv);
2387 	if (err) {
2388 		dev_err(&priv->pdev->dev,
2389 			"Could not get device information: err=%d\n", err);
2390 		goto err;
2391 	}
2392 	priv->dev->mtu = priv->dev->max_mtu;
2393 	num_ntfy = pci_msix_vec_count(priv->pdev);
2394 	if (num_ntfy <= 0) {
2395 		dev_err(&priv->pdev->dev,
2396 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
2397 		err = num_ntfy;
2398 		goto err;
2399 	} else if (num_ntfy < GVE_MIN_MSIX) {
2400 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2401 			GVE_MIN_MSIX, num_ntfy);
2402 		err = -EINVAL;
2403 		goto err;
2404 	}
2405 
2406 	/* Big TCP is only supported on DQO */
2407 	if (!gve_is_gqi(priv))
2408 		netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2409 
2410 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2411 	/* gvnic has one Notification Block per MSI-x vector, except for the
2412 	 * management vector
2413 	 */
2414 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2415 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
2416 	priv->numa_node = dev_to_node(&priv->pdev->dev);
2417 
2418 	priv->tx_cfg.max_queues =
2419 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2420 	priv->rx_cfg.max_queues =
2421 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2422 
2423 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2424 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2425 	if (priv->default_num_queues > 0) {
2426 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2427 						priv->tx_cfg.num_queues);
2428 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2429 						priv->rx_cfg.num_queues);
2430 	}
2431 	priv->tx_cfg.num_xdp_queues = 0;
2432 
2433 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2434 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
2435 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2436 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2437 
2438 	if (!gve_is_gqi(priv)) {
2439 		priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
2440 		priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
2441 	}
2442 
2443 	priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
2444 	priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
2445 
2446 setup_device:
2447 	priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
2448 	if (!priv->xsk_pools) {
2449 		err = -ENOMEM;
2450 		goto err;
2451 	}
2452 
2453 	gve_set_netdev_xdp_features(priv);
2454 	if (!gve_is_gqi(priv))
2455 		priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
2456 
2457 	err = gve_setup_device_resources(priv);
2458 	if (err)
2459 		goto err_free_xsk_bitmap;
2460 
2461 	return 0;
2462 
2463 err_free_xsk_bitmap:
2464 	bitmap_free(priv->xsk_pools);
2465 	priv->xsk_pools = NULL;
2466 err:
2467 	gve_adminq_free(&priv->pdev->dev, priv);
2468 	return err;
2469 }
2470 
gve_teardown_priv_resources(struct gve_priv * priv)2471 static void gve_teardown_priv_resources(struct gve_priv *priv)
2472 {
2473 	gve_teardown_device_resources(priv);
2474 	gve_adminq_free(&priv->pdev->dev, priv);
2475 	bitmap_free(priv->xsk_pools);
2476 	priv->xsk_pools = NULL;
2477 }
2478 
gve_trigger_reset(struct gve_priv * priv)2479 static void gve_trigger_reset(struct gve_priv *priv)
2480 {
2481 	/* Reset the device by releasing the AQ */
2482 	gve_adminq_release(priv);
2483 }
2484 
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)2485 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
2486 {
2487 	gve_trigger_reset(priv);
2488 	/* With the reset having already happened, close cannot fail */
2489 	if (was_up)
2490 		gve_close(priv->dev);
2491 	gve_teardown_priv_resources(priv);
2492 }
2493 
gve_reset_recovery(struct gve_priv * priv,bool was_up)2494 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
2495 {
2496 	int err;
2497 
2498 	err = gve_init_priv(priv, true);
2499 	if (err)
2500 		goto err;
2501 	if (was_up) {
2502 		err = gve_open(priv->dev);
2503 		if (err)
2504 			goto err;
2505 	}
2506 	return 0;
2507 err:
2508 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
2509 	gve_turndown(priv);
2510 	return err;
2511 }
2512 
gve_reset(struct gve_priv * priv,bool attempt_teardown)2513 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
2514 {
2515 	bool was_up = netif_running(priv->dev);
2516 	int err;
2517 
2518 	dev_info(&priv->pdev->dev, "Performing reset\n");
2519 	gve_clear_do_reset(priv);
2520 	gve_set_reset_in_progress(priv);
2521 	/* If we aren't attempting to teardown normally, just go turndown and
2522 	 * reset right away.
2523 	 */
2524 	if (!attempt_teardown) {
2525 		gve_turndown(priv);
2526 		gve_reset_and_teardown(priv, was_up);
2527 	} else {
2528 		/* Otherwise attempt to close normally */
2529 		if (was_up) {
2530 			err = gve_close(priv->dev);
2531 			/* If that fails reset as we did above */
2532 			if (err)
2533 				gve_reset_and_teardown(priv, was_up);
2534 		}
2535 		/* Clean up any remaining resources */
2536 		gve_teardown_priv_resources(priv);
2537 	}
2538 
2539 	/* Set it all back up */
2540 	err = gve_reset_recovery(priv, was_up);
2541 	gve_clear_reset_in_progress(priv);
2542 	priv->reset_cnt++;
2543 	priv->interface_up_cnt = 0;
2544 	priv->interface_down_cnt = 0;
2545 	priv->stats_report_trigger_cnt = 0;
2546 	return err;
2547 }
2548 
gve_write_version(u8 __iomem * driver_version_register)2549 static void gve_write_version(u8 __iomem *driver_version_register)
2550 {
2551 	const char *c = gve_version_prefix;
2552 
2553 	while (*c) {
2554 		writeb(*c, driver_version_register);
2555 		c++;
2556 	}
2557 
2558 	c = gve_version_str;
2559 	while (*c) {
2560 		writeb(*c, driver_version_register);
2561 		c++;
2562 	}
2563 	writeb('\n', driver_version_register);
2564 }
2565 
gve_rx_queue_stop(struct net_device * dev,void * per_q_mem,int idx)2566 static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
2567 {
2568 	struct gve_priv *priv = netdev_priv(dev);
2569 	struct gve_rx_ring *gve_per_q_mem;
2570 	int err;
2571 
2572 	if (!priv->rx)
2573 		return -EAGAIN;
2574 
2575 	/* Destroying queue 0 while other queues exist is not supported in DQO */
2576 	if (!gve_is_gqi(priv) && idx == 0)
2577 		return -ERANGE;
2578 
2579 	/* Single-queue destruction requires quiescence on all queues */
2580 	gve_turndown(priv);
2581 
2582 	/* This failure will trigger a reset - no need to clean up */
2583 	err = gve_adminq_destroy_single_rx_queue(priv, idx);
2584 	if (err)
2585 		return err;
2586 
2587 	if (gve_is_qpl(priv)) {
2588 		/* This failure will trigger a reset - no need to clean up */
2589 		err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
2590 		if (err)
2591 			return err;
2592 	}
2593 
2594 	gve_rx_stop_ring(priv, idx);
2595 
2596 	/* Turn the unstopped queues back up */
2597 	gve_turnup_and_check_status(priv);
2598 
2599 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2600 	*gve_per_q_mem = priv->rx[idx];
2601 	memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2602 	return 0;
2603 }
2604 
gve_rx_queue_mem_free(struct net_device * dev,void * per_q_mem)2605 static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
2606 {
2607 	struct gve_priv *priv = netdev_priv(dev);
2608 	struct gve_rx_alloc_rings_cfg cfg = {0};
2609 	struct gve_rx_ring *gve_per_q_mem;
2610 
2611 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2612 	gve_rx_get_curr_alloc_cfg(priv, &cfg);
2613 
2614 	if (gve_is_gqi(priv))
2615 		gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
2616 	else
2617 		gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
2618 }
2619 
gve_rx_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * per_q_mem,int idx)2620 static int gve_rx_queue_mem_alloc(struct net_device *dev,
2621 				  struct netdev_queue_config *qcfg,
2622 				  void *per_q_mem, int idx)
2623 {
2624 	struct gve_priv *priv = netdev_priv(dev);
2625 	struct gve_rx_alloc_rings_cfg cfg = {0};
2626 	struct gve_rx_ring *gve_per_q_mem;
2627 	int err;
2628 
2629 	if (!priv->rx)
2630 		return -EAGAIN;
2631 
2632 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2633 	gve_rx_get_curr_alloc_cfg(priv, &cfg);
2634 
2635 	if (gve_is_gqi(priv))
2636 		err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
2637 	else
2638 		err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
2639 
2640 	return err;
2641 }
2642 
gve_rx_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * per_q_mem,int idx)2643 static int gve_rx_queue_start(struct net_device *dev,
2644 			      struct netdev_queue_config *qcfg,
2645 			      void *per_q_mem, int idx)
2646 {
2647 	struct gve_priv *priv = netdev_priv(dev);
2648 	struct gve_rx_ring *gve_per_q_mem;
2649 	int err;
2650 
2651 	if (!priv->rx)
2652 		return -EAGAIN;
2653 
2654 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2655 	priv->rx[idx] = *gve_per_q_mem;
2656 
2657 	/* Single-queue creation requires quiescence on all queues */
2658 	gve_turndown(priv);
2659 
2660 	gve_rx_start_ring(priv, idx);
2661 
2662 	if (gve_is_qpl(priv)) {
2663 		/* This failure will trigger a reset - no need to clean up */
2664 		err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
2665 		if (err)
2666 			goto abort;
2667 	}
2668 
2669 	/* This failure will trigger a reset - no need to clean up */
2670 	err = gve_adminq_create_single_rx_queue(priv, idx);
2671 	if (err)
2672 		goto abort;
2673 
2674 	if (gve_is_gqi(priv))
2675 		gve_rx_write_doorbell(priv, &priv->rx[idx]);
2676 	else
2677 		gve_rx_post_buffers_dqo(&priv->rx[idx]);
2678 
2679 	/* Turn the unstopped queues back up */
2680 	gve_turnup_and_check_status(priv);
2681 	return 0;
2682 
2683 abort:
2684 	gve_rx_stop_ring(priv, idx);
2685 
2686 	/* All failures in this func result in a reset, by clearing the struct
2687 	 * at idx, we prevent a double free when that reset runs. The reset,
2688 	 * which needs the rtnl lock, will not run till this func returns and
2689 	 * its caller gives up the lock.
2690 	 */
2691 	memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2692 	return err;
2693 }
2694 
2695 static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
2696 	.ndo_queue_mem_size	=	sizeof(struct gve_rx_ring),
2697 	.ndo_queue_mem_alloc	=	gve_rx_queue_mem_alloc,
2698 	.ndo_queue_mem_free	=	gve_rx_queue_mem_free,
2699 	.ndo_queue_start	=	gve_rx_queue_start,
2700 	.ndo_queue_stop		=	gve_rx_queue_stop,
2701 };
2702 
gve_get_rx_queue_stats(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx_stats)2703 static void gve_get_rx_queue_stats(struct net_device *dev, int idx,
2704 				   struct netdev_queue_stats_rx *rx_stats)
2705 {
2706 	struct gve_priv *priv = netdev_priv(dev);
2707 	struct gve_rx_ring *rx = &priv->rx[idx];
2708 	unsigned int start;
2709 
2710 	do {
2711 		start = u64_stats_fetch_begin(&rx->statss);
2712 		rx_stats->packets = rx->rpackets;
2713 		rx_stats->bytes = rx->rbytes;
2714 		rx_stats->alloc_fail = rx->rx_skb_alloc_fail +
2715 				       rx->rx_buf_alloc_fail;
2716 	} while (u64_stats_fetch_retry(&rx->statss, start));
2717 }
2718 
gve_get_tx_queue_stats(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx_stats)2719 static void gve_get_tx_queue_stats(struct net_device *dev, int idx,
2720 				   struct netdev_queue_stats_tx *tx_stats)
2721 {
2722 	struct gve_priv *priv = netdev_priv(dev);
2723 	struct gve_tx_ring *tx = &priv->tx[idx];
2724 	unsigned int start;
2725 
2726 	do {
2727 		start = u64_stats_fetch_begin(&tx->statss);
2728 		tx_stats->packets = tx->pkt_done;
2729 		tx_stats->bytes = tx->bytes_done;
2730 	} while (u64_stats_fetch_retry(&tx->statss, start));
2731 }
2732 
gve_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)2733 static void gve_get_base_stats(struct net_device *dev,
2734 			       struct netdev_queue_stats_rx *rx,
2735 			       struct netdev_queue_stats_tx *tx)
2736 {
2737 	rx->packets = 0;
2738 	rx->bytes = 0;
2739 	rx->alloc_fail = 0;
2740 
2741 	tx->packets = 0;
2742 	tx->bytes = 0;
2743 }
2744 
2745 static const struct netdev_stat_ops gve_stat_ops = {
2746 	.get_queue_stats_rx	= gve_get_rx_queue_stats,
2747 	.get_queue_stats_tx	= gve_get_tx_queue_stats,
2748 	.get_base_stats		= gve_get_base_stats,
2749 };
2750 
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2751 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2752 {
2753 	int max_tx_queues, max_rx_queues;
2754 	struct net_device *dev;
2755 	__be32 __iomem *db_bar;
2756 	struct gve_registers __iomem *reg_bar;
2757 	struct gve_priv *priv;
2758 	int err;
2759 
2760 	err = pci_enable_device(pdev);
2761 	if (err)
2762 		return err;
2763 
2764 	err = pci_request_regions(pdev, gve_driver_name);
2765 	if (err)
2766 		goto abort_with_enabled;
2767 
2768 	pci_set_master(pdev);
2769 
2770 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2771 	if (err) {
2772 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2773 		goto abort_with_pci_region;
2774 	}
2775 
2776 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2777 	if (!reg_bar) {
2778 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
2779 		err = -ENOMEM;
2780 		goto abort_with_pci_region;
2781 	}
2782 
2783 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2784 	if (!db_bar) {
2785 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2786 		err = -ENOMEM;
2787 		goto abort_with_reg_bar;
2788 	}
2789 
2790 	gve_write_version(&reg_bar->driver_version);
2791 	/* Get max queues to alloc etherdev */
2792 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
2793 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
2794 	/* Alloc and setup the netdev and priv */
2795 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2796 	if (!dev) {
2797 		dev_err(&pdev->dev, "could not allocate netdev\n");
2798 		err = -ENOMEM;
2799 		goto abort_with_db_bar;
2800 	}
2801 	SET_NETDEV_DEV(dev, &pdev->dev);
2802 	pci_set_drvdata(pdev, dev);
2803 	dev->ethtool_ops = &gve_ethtool_ops;
2804 	dev->netdev_ops = &gve_netdev_ops;
2805 	dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
2806 	dev->stat_ops = &gve_stat_ops;
2807 
2808 	/* Set default and supported features.
2809 	 *
2810 	 * Features might be set in other locations as well (such as
2811 	 * `gve_adminq_describe_device`).
2812 	 */
2813 	dev->hw_features = NETIF_F_HIGHDMA;
2814 	dev->hw_features |= NETIF_F_SG;
2815 	dev->hw_features |= NETIF_F_HW_CSUM;
2816 	dev->hw_features |= NETIF_F_TSO;
2817 	dev->hw_features |= NETIF_F_TSO6;
2818 	dev->hw_features |= NETIF_F_TSO_ECN;
2819 	dev->hw_features |= NETIF_F_RXCSUM;
2820 	dev->hw_features |= NETIF_F_RXHASH;
2821 	dev->features = dev->hw_features;
2822 	dev->watchdog_timeo = 5 * HZ;
2823 	dev->min_mtu = ETH_MIN_MTU;
2824 	netif_carrier_off(dev);
2825 
2826 	priv = netdev_priv(dev);
2827 	priv->dev = dev;
2828 	priv->pdev = pdev;
2829 	priv->msg_enable = DEFAULT_MSG_LEVEL;
2830 	priv->reg_bar0 = reg_bar;
2831 	priv->db_bar2 = db_bar;
2832 	priv->service_task_flags = 0x0;
2833 	priv->state_flags = 0x0;
2834 	priv->ethtool_flags = 0x0;
2835 	priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2836 	priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
2837 
2838 	gve_set_probe_in_progress(priv);
2839 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
2840 	if (!priv->gve_wq) {
2841 		dev_err(&pdev->dev, "Could not allocate workqueue");
2842 		err = -ENOMEM;
2843 		goto abort_with_netdev;
2844 	}
2845 	INIT_WORK(&priv->service_task, gve_service_task);
2846 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2847 	priv->tx_cfg.max_queues = max_tx_queues;
2848 	priv->rx_cfg.max_queues = max_rx_queues;
2849 
2850 	err = gve_init_priv(priv, false);
2851 	if (err)
2852 		goto abort_with_wq;
2853 
2854 	if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
2855 		dev->netmem_tx = true;
2856 
2857 	err = register_netdev(dev);
2858 	if (err)
2859 		goto abort_with_gve_init;
2860 
2861 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2862 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
2863 	gve_clear_probe_in_progress(priv);
2864 	queue_work(priv->gve_wq, &priv->service_task);
2865 	return 0;
2866 
2867 abort_with_gve_init:
2868 	gve_teardown_priv_resources(priv);
2869 
2870 abort_with_wq:
2871 	destroy_workqueue(priv->gve_wq);
2872 
2873 abort_with_netdev:
2874 	free_netdev(dev);
2875 
2876 abort_with_db_bar:
2877 	pci_iounmap(pdev, db_bar);
2878 
2879 abort_with_reg_bar:
2880 	pci_iounmap(pdev, reg_bar);
2881 
2882 abort_with_pci_region:
2883 	pci_release_regions(pdev);
2884 
2885 abort_with_enabled:
2886 	pci_disable_device(pdev);
2887 	return err;
2888 }
2889 
gve_remove(struct pci_dev * pdev)2890 static void gve_remove(struct pci_dev *pdev)
2891 {
2892 	struct net_device *netdev = pci_get_drvdata(pdev);
2893 	struct gve_priv *priv = netdev_priv(netdev);
2894 	__be32 __iomem *db_bar = priv->db_bar2;
2895 	void __iomem *reg_bar = priv->reg_bar0;
2896 
2897 	unregister_netdev(netdev);
2898 	gve_teardown_priv_resources(priv);
2899 	destroy_workqueue(priv->gve_wq);
2900 	free_netdev(netdev);
2901 	pci_iounmap(pdev, db_bar);
2902 	pci_iounmap(pdev, reg_bar);
2903 	pci_release_regions(pdev);
2904 	pci_disable_device(pdev);
2905 }
2906 
gve_shutdown(struct pci_dev * pdev)2907 static void gve_shutdown(struct pci_dev *pdev)
2908 {
2909 	struct net_device *netdev = pci_get_drvdata(pdev);
2910 	struct gve_priv *priv = netdev_priv(netdev);
2911 	bool was_up = netif_running(priv->dev);
2912 
2913 	netif_device_detach(netdev);
2914 
2915 	rtnl_lock();
2916 	netdev_lock(netdev);
2917 	if (was_up && gve_close(priv->dev)) {
2918 		/* If the dev was up, attempt to close, if close fails, reset */
2919 		gve_reset_and_teardown(priv, was_up);
2920 	} else {
2921 		/* If the dev wasn't up or close worked, finish tearing down */
2922 		gve_teardown_priv_resources(priv);
2923 	}
2924 	netdev_unlock(netdev);
2925 	rtnl_unlock();
2926 }
2927 
2928 #ifdef CONFIG_PM
gve_suspend(struct pci_dev * pdev,pm_message_t state)2929 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2930 {
2931 	struct net_device *netdev = pci_get_drvdata(pdev);
2932 	struct gve_priv *priv = netdev_priv(netdev);
2933 	bool was_up = netif_running(priv->dev);
2934 
2935 	priv->suspend_cnt++;
2936 	rtnl_lock();
2937 	netdev_lock(netdev);
2938 	if (was_up && gve_close(priv->dev)) {
2939 		/* If the dev was up, attempt to close, if close fails, reset */
2940 		gve_reset_and_teardown(priv, was_up);
2941 	} else {
2942 		/* If the dev wasn't up or close worked, finish tearing down */
2943 		gve_teardown_priv_resources(priv);
2944 	}
2945 	priv->up_before_suspend = was_up;
2946 	netdev_unlock(netdev);
2947 	rtnl_unlock();
2948 	return 0;
2949 }
2950 
gve_resume(struct pci_dev * pdev)2951 static int gve_resume(struct pci_dev *pdev)
2952 {
2953 	struct net_device *netdev = pci_get_drvdata(pdev);
2954 	struct gve_priv *priv = netdev_priv(netdev);
2955 	int err;
2956 
2957 	priv->resume_cnt++;
2958 	rtnl_lock();
2959 	netdev_lock(netdev);
2960 	err = gve_reset_recovery(priv, priv->up_before_suspend);
2961 	netdev_unlock(netdev);
2962 	rtnl_unlock();
2963 	return err;
2964 }
2965 #endif /* CONFIG_PM */
2966 
2967 static const struct pci_device_id gve_id_table[] = {
2968 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2969 	{ }
2970 };
2971 
2972 static struct pci_driver gve_driver = {
2973 	.name		= gve_driver_name,
2974 	.id_table	= gve_id_table,
2975 	.probe		= gve_probe,
2976 	.remove		= gve_remove,
2977 	.shutdown	= gve_shutdown,
2978 #ifdef CONFIG_PM
2979 	.suspend        = gve_suspend,
2980 	.resume         = gve_resume,
2981 #endif
2982 };
2983 
2984 module_pci_driver(gve_driver);
2985 
2986 MODULE_DEVICE_TABLE(pci, gve_id_table);
2987 MODULE_AUTHOR("Google, Inc.");
2988 MODULE_DESCRIPTION("Google Virtual NIC Driver");
2989 MODULE_LICENSE("Dual MIT/GPL");
2990 MODULE_VERSION(GVE_VERSION);
2991