xref: /linux/drivers/net/ethernet/google/gve/gve_main.c (revision ee9a43b7cfe2d8a3520335fea7d8ce71b8cabd9d)
1893ce44dSCatherine Sullivan // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2893ce44dSCatherine Sullivan /* Google virtual Ethernet (gve) driver
3893ce44dSCatherine Sullivan  *
46f3bc487SJeroen de Borst  * Copyright (C) 2015-2024 Google LLC
5893ce44dSCatherine Sullivan  */
6893ce44dSCatherine Sullivan 
775eaae15SPraveen Kaligineedi #include <linux/bpf.h>
8893ce44dSCatherine Sullivan #include <linux/cpumask.h>
9893ce44dSCatherine Sullivan #include <linux/etherdevice.h>
1075eaae15SPraveen Kaligineedi #include <linux/filter.h>
11893ce44dSCatherine Sullivan #include <linux/interrupt.h>
129a5e0776SShailend Chand #include <linux/irq.h>
13893ce44dSCatherine Sullivan #include <linux/module.h>
14893ce44dSCatherine Sullivan #include <linux/pci.h>
15893ce44dSCatherine Sullivan #include <linux/sched.h>
16893ce44dSCatherine Sullivan #include <linux/timer.h>
179e5f7d26SCatherine Sullivan #include <linux/workqueue.h>
18c2a0c3edSJeroen de Borst #include <linux/utsname.h>
19c2a0c3edSJeroen de Borst #include <linux/version.h>
20c93462b9SShailend Chand #include <net/netdev_queues.h>
21893ce44dSCatherine Sullivan #include <net/sch_generic.h>
22fd8e4032SPraveen Kaligineedi #include <net/xdp_sock_drv.h>
23893ce44dSCatherine Sullivan #include "gve.h"
245e8c5adfSBailey Forrest #include "gve_dqo.h"
25893ce44dSCatherine Sullivan #include "gve_adminq.h"
26893ce44dSCatherine Sullivan #include "gve_register.h"
271dfc2e46SShailend Chand #include "gve_utils.h"
28893ce44dSCatherine Sullivan 
29f5cedc84SCatherine Sullivan #define GVE_DEFAULT_RX_COPYBREAK	(256)
30f5cedc84SCatherine Sullivan 
31893ce44dSCatherine Sullivan #define DEFAULT_MSG_LEVEL	(NETIF_MSG_DRV | NETIF_MSG_LINK)
32893ce44dSCatherine Sullivan #define GVE_VERSION		"1.0.0"
33893ce44dSCatherine Sullivan #define GVE_VERSION_PREFIX	"GVE-"
34893ce44dSCatherine Sullivan 
3587a7f321SJohn Fraker // Minimum amount of time between queue kicks in msec (10 seconds)
3687a7f321SJohn Fraker #define MIN_TX_TIMEOUT_GAP (1000 * 10)
3787a7f321SJohn Fraker 
389d0aba98SJunfeng Guo char gve_driver_name[] = "gve";
39e5b845dcSCatherine Sullivan const char gve_version_str[] = GVE_VERSION;
40893ce44dSCatherine Sullivan static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
41893ce44dSCatherine Sullivan 
gve_verify_driver_compatibility(struct gve_priv * priv)42c2a0c3edSJeroen de Borst static int gve_verify_driver_compatibility(struct gve_priv *priv)
43c2a0c3edSJeroen de Borst {
44c2a0c3edSJeroen de Borst 	int err;
45c2a0c3edSJeroen de Borst 	struct gve_driver_info *driver_info;
46c2a0c3edSJeroen de Borst 	dma_addr_t driver_info_bus;
47c2a0c3edSJeroen de Borst 
48c2a0c3edSJeroen de Borst 	driver_info = dma_alloc_coherent(&priv->pdev->dev,
49c2a0c3edSJeroen de Borst 					 sizeof(struct gve_driver_info),
50c2a0c3edSJeroen de Borst 					 &driver_info_bus, GFP_KERNEL);
51c2a0c3edSJeroen de Borst 	if (!driver_info)
52c2a0c3edSJeroen de Borst 		return -ENOMEM;
53c2a0c3edSJeroen de Borst 
54c2a0c3edSJeroen de Borst 	*driver_info = (struct gve_driver_info) {
55c2a0c3edSJeroen de Borst 		.os_type = 1, /* Linux */
56c2a0c3edSJeroen de Borst 		.os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
57c2a0c3edSJeroen de Borst 		.os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
58c2a0c3edSJeroen de Borst 		.os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
59c2a0c3edSJeroen de Borst 		.driver_capability_flags = {
60c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
61c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
62c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
63c2a0c3edSJeroen de Borst 			cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
64c2a0c3edSJeroen de Borst 		},
65c2a0c3edSJeroen de Borst 	};
66c2a0c3edSJeroen de Borst 	strscpy(driver_info->os_version_str1, utsname()->release,
67c2a0c3edSJeroen de Borst 		sizeof(driver_info->os_version_str1));
68c2a0c3edSJeroen de Borst 	strscpy(driver_info->os_version_str2, utsname()->version,
69c2a0c3edSJeroen de Borst 		sizeof(driver_info->os_version_str2));
70c2a0c3edSJeroen de Borst 
71c2a0c3edSJeroen de Borst 	err = gve_adminq_verify_driver_compatibility(priv,
72c2a0c3edSJeroen de Borst 						     sizeof(struct gve_driver_info),
73c2a0c3edSJeroen de Borst 						     driver_info_bus);
74c2a0c3edSJeroen de Borst 
75c2a0c3edSJeroen de Borst 	/* It's ok if the device doesn't support this */
76c2a0c3edSJeroen de Borst 	if (err == -EOPNOTSUPP)
77c2a0c3edSJeroen de Borst 		err = 0;
78c2a0c3edSJeroen de Borst 
79c2a0c3edSJeroen de Borst 	dma_free_coherent(&priv->pdev->dev,
80c2a0c3edSJeroen de Borst 			  sizeof(struct gve_driver_info),
81c2a0c3edSJeroen de Borst 			  driver_info, driver_info_bus);
82c2a0c3edSJeroen de Borst 	return err;
83c2a0c3edSJeroen de Borst }
84c2a0c3edSJeroen de Borst 
gve_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)8518de1e51SEric Dumazet static netdev_features_t gve_features_check(struct sk_buff *skb,
8618de1e51SEric Dumazet 					    struct net_device *dev,
8718de1e51SEric Dumazet 					    netdev_features_t features)
8818de1e51SEric Dumazet {
8918de1e51SEric Dumazet 	struct gve_priv *priv = netdev_priv(dev);
9018de1e51SEric Dumazet 
9118de1e51SEric Dumazet 	if (!gve_is_gqi(priv))
9218de1e51SEric Dumazet 		return gve_features_check_dqo(skb, dev, features);
9318de1e51SEric Dumazet 
9418de1e51SEric Dumazet 	return features;
9518de1e51SEric Dumazet }
9618de1e51SEric Dumazet 
gve_start_xmit(struct sk_buff * skb,struct net_device * dev)975e8c5adfSBailey Forrest static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
985e8c5adfSBailey Forrest {
995e8c5adfSBailey Forrest 	struct gve_priv *priv = netdev_priv(dev);
1005e8c5adfSBailey Forrest 
1015e8c5adfSBailey Forrest 	if (gve_is_gqi(priv))
1025e8c5adfSBailey Forrest 		return gve_tx(skb, dev);
1035e8c5adfSBailey Forrest 	else
1045e8c5adfSBailey Forrest 		return gve_tx_dqo(skb, dev);
1055e8c5adfSBailey Forrest }
1065e8c5adfSBailey Forrest 
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)107f5cedc84SCatherine Sullivan static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
108f5cedc84SCatherine Sullivan {
109f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
110f5cedc84SCatherine Sullivan 	unsigned int start;
1112f57d497SEric Dumazet 	u64 packets, bytes;
1122e80aeaeSPraveen Kaligineedi 	int num_tx_queues;
113f5cedc84SCatherine Sullivan 	int ring;
114f5cedc84SCatherine Sullivan 
1152e80aeaeSPraveen Kaligineedi 	num_tx_queues = gve_num_tx_queues(priv);
116f5cedc84SCatherine Sullivan 	if (priv->rx) {
117f5cedc84SCatherine Sullivan 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
118f5cedc84SCatherine Sullivan 			do {
1193c13ce74SCatherine Sullivan 				start =
120068c38adSThomas Gleixner 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
1212f57d497SEric Dumazet 				packets = priv->rx[ring].rpackets;
1222f57d497SEric Dumazet 				bytes = priv->rx[ring].rbytes;
123068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
124f5cedc84SCatherine Sullivan 						       start));
1252f57d497SEric Dumazet 			s->rx_packets += packets;
1262f57d497SEric Dumazet 			s->rx_bytes += bytes;
127f5cedc84SCatherine Sullivan 		}
128f5cedc84SCatherine Sullivan 	}
129f5cedc84SCatherine Sullivan 	if (priv->tx) {
1302e80aeaeSPraveen Kaligineedi 		for (ring = 0; ring < num_tx_queues; ring++) {
131f5cedc84SCatherine Sullivan 			do {
1323c13ce74SCatherine Sullivan 				start =
133068c38adSThomas Gleixner 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
1342f57d497SEric Dumazet 				packets = priv->tx[ring].pkt_done;
1352f57d497SEric Dumazet 				bytes = priv->tx[ring].bytes_done;
136068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
137f5cedc84SCatherine Sullivan 						       start));
1382f57d497SEric Dumazet 			s->tx_packets += packets;
1392f57d497SEric Dumazet 			s->tx_bytes += bytes;
140f5cedc84SCatherine Sullivan 		}
141f5cedc84SCatherine Sullivan 	}
142f5cedc84SCatherine Sullivan }
143f5cedc84SCatherine Sullivan 
gve_alloc_flow_rule_caches(struct gve_priv * priv)14457718b60SJeroen de Borst static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
14557718b60SJeroen de Borst {
14657718b60SJeroen de Borst 	struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
14757718b60SJeroen de Borst 	int err = 0;
14857718b60SJeroen de Borst 
14957718b60SJeroen de Borst 	if (!priv->max_flow_rules)
15057718b60SJeroen de Borst 		return 0;
15157718b60SJeroen de Borst 
15257718b60SJeroen de Borst 	flow_rules_cache->rules_cache =
15357718b60SJeroen de Borst 		kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
15457718b60SJeroen de Borst 			 GFP_KERNEL);
15557718b60SJeroen de Borst 	if (!flow_rules_cache->rules_cache) {
15657718b60SJeroen de Borst 		dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
15757718b60SJeroen de Borst 		return -ENOMEM;
15857718b60SJeroen de Borst 	}
15957718b60SJeroen de Borst 
16057718b60SJeroen de Borst 	flow_rules_cache->rule_ids_cache =
16157718b60SJeroen de Borst 		kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
16257718b60SJeroen de Borst 			 GFP_KERNEL);
16357718b60SJeroen de Borst 	if (!flow_rules_cache->rule_ids_cache) {
16457718b60SJeroen de Borst 		dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
16557718b60SJeroen de Borst 		err = -ENOMEM;
16657718b60SJeroen de Borst 		goto free_rules_cache;
16757718b60SJeroen de Borst 	}
16857718b60SJeroen de Borst 
16957718b60SJeroen de Borst 	return 0;
17057718b60SJeroen de Borst 
17157718b60SJeroen de Borst free_rules_cache:
17257718b60SJeroen de Borst 	kvfree(flow_rules_cache->rules_cache);
17357718b60SJeroen de Borst 	flow_rules_cache->rules_cache = NULL;
17457718b60SJeroen de Borst 	return err;
17557718b60SJeroen de Borst }
17657718b60SJeroen de Borst 
gve_free_flow_rule_caches(struct gve_priv * priv)17757718b60SJeroen de Borst static void gve_free_flow_rule_caches(struct gve_priv *priv)
17857718b60SJeroen de Borst {
17957718b60SJeroen de Borst 	struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
18057718b60SJeroen de Borst 
18157718b60SJeroen de Borst 	kvfree(flow_rules_cache->rule_ids_cache);
18257718b60SJeroen de Borst 	flow_rules_cache->rule_ids_cache = NULL;
18357718b60SJeroen de Borst 	kvfree(flow_rules_cache->rules_cache);
18457718b60SJeroen de Borst 	flow_rules_cache->rules_cache = NULL;
18557718b60SJeroen de Borst }
18657718b60SJeroen de Borst 
gve_alloc_counter_array(struct gve_priv * priv)187893ce44dSCatherine Sullivan static int gve_alloc_counter_array(struct gve_priv *priv)
188893ce44dSCatherine Sullivan {
189893ce44dSCatherine Sullivan 	priv->counter_array =
190893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
191893ce44dSCatherine Sullivan 				   priv->num_event_counters *
192893ce44dSCatherine Sullivan 				   sizeof(*priv->counter_array),
193893ce44dSCatherine Sullivan 				   &priv->counter_array_bus, GFP_KERNEL);
194893ce44dSCatherine Sullivan 	if (!priv->counter_array)
195893ce44dSCatherine Sullivan 		return -ENOMEM;
196893ce44dSCatherine Sullivan 
197893ce44dSCatherine Sullivan 	return 0;
198893ce44dSCatherine Sullivan }
199893ce44dSCatherine Sullivan 
gve_free_counter_array(struct gve_priv * priv)200893ce44dSCatherine Sullivan static void gve_free_counter_array(struct gve_priv *priv)
201893ce44dSCatherine Sullivan {
202922aa9bcSTao Liu 	if (!priv->counter_array)
203922aa9bcSTao Liu 		return;
204922aa9bcSTao Liu 
205893ce44dSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev,
206893ce44dSCatherine Sullivan 			  priv->num_event_counters *
207893ce44dSCatherine Sullivan 			  sizeof(*priv->counter_array),
208893ce44dSCatherine Sullivan 			  priv->counter_array, priv->counter_array_bus);
209893ce44dSCatherine Sullivan 	priv->counter_array = NULL;
210893ce44dSCatherine Sullivan }
211893ce44dSCatherine Sullivan 
21224aeb56fSKuo Zhao /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)21324aeb56fSKuo Zhao static void gve_stats_report_task(struct work_struct *work)
21424aeb56fSKuo Zhao {
21524aeb56fSKuo Zhao 	struct gve_priv *priv = container_of(work, struct gve_priv,
21624aeb56fSKuo Zhao 					     stats_report_task);
21724aeb56fSKuo Zhao 	if (gve_get_do_report_stats(priv)) {
21824aeb56fSKuo Zhao 		gve_handle_report_stats(priv);
21924aeb56fSKuo Zhao 		gve_clear_do_report_stats(priv);
22024aeb56fSKuo Zhao 	}
22124aeb56fSKuo Zhao }
22224aeb56fSKuo Zhao 
gve_stats_report_schedule(struct gve_priv * priv)22324aeb56fSKuo Zhao static void gve_stats_report_schedule(struct gve_priv *priv)
22424aeb56fSKuo Zhao {
22524aeb56fSKuo Zhao 	if (!gve_get_probe_in_progress(priv) &&
22624aeb56fSKuo Zhao 	    !gve_get_reset_in_progress(priv)) {
22724aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
22824aeb56fSKuo Zhao 		queue_work(priv->gve_wq, &priv->stats_report_task);
22924aeb56fSKuo Zhao 	}
23024aeb56fSKuo Zhao }
23124aeb56fSKuo Zhao 
gve_stats_report_timer(struct timer_list * t)23224aeb56fSKuo Zhao static void gve_stats_report_timer(struct timer_list *t)
23324aeb56fSKuo Zhao {
23424aeb56fSKuo Zhao 	struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
23524aeb56fSKuo Zhao 
23624aeb56fSKuo Zhao 	mod_timer(&priv->stats_report_timer,
23724aeb56fSKuo Zhao 		  round_jiffies(jiffies +
23824aeb56fSKuo Zhao 		  msecs_to_jiffies(priv->stats_report_timer_period)));
23924aeb56fSKuo Zhao 	gve_stats_report_schedule(priv);
24024aeb56fSKuo Zhao }
24124aeb56fSKuo Zhao 
gve_alloc_stats_report(struct gve_priv * priv)24224aeb56fSKuo Zhao static int gve_alloc_stats_report(struct gve_priv *priv)
24324aeb56fSKuo Zhao {
24424aeb56fSKuo Zhao 	int tx_stats_num, rx_stats_num;
24524aeb56fSKuo Zhao 
2462f523dc3SDavid Awogbemila 	tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
2472e80aeaeSPraveen Kaligineedi 		       gve_num_tx_queues(priv);
2482f523dc3SDavid Awogbemila 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
24924aeb56fSKuo Zhao 		       priv->rx_cfg.num_queues;
250691f4077SGustavo A. R. Silva 	priv->stats_report_len = struct_size(priv->stats_report, stats,
251d692873cSGustavo A. R. Silva 					     size_add(tx_stats_num, rx_stats_num));
25224aeb56fSKuo Zhao 	priv->stats_report =
25324aeb56fSKuo Zhao 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
25424aeb56fSKuo Zhao 				   &priv->stats_report_bus, GFP_KERNEL);
25524aeb56fSKuo Zhao 	if (!priv->stats_report)
25624aeb56fSKuo Zhao 		return -ENOMEM;
25724aeb56fSKuo Zhao 	/* Set up timer for the report-stats task */
25824aeb56fSKuo Zhao 	timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
25924aeb56fSKuo Zhao 	priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
26024aeb56fSKuo Zhao 	return 0;
26124aeb56fSKuo Zhao }
26224aeb56fSKuo Zhao 
gve_free_stats_report(struct gve_priv * priv)26324aeb56fSKuo Zhao static void gve_free_stats_report(struct gve_priv *priv)
26424aeb56fSKuo Zhao {
265922aa9bcSTao Liu 	if (!priv->stats_report)
266922aa9bcSTao Liu 		return;
267922aa9bcSTao Liu 
26824aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
26924aeb56fSKuo Zhao 	dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
27024aeb56fSKuo Zhao 			  priv->stats_report, priv->stats_report_bus);
27124aeb56fSKuo Zhao 	priv->stats_report = NULL;
27224aeb56fSKuo Zhao }
27324aeb56fSKuo Zhao 
gve_mgmnt_intr(int irq,void * arg)274893ce44dSCatherine Sullivan static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
275893ce44dSCatherine Sullivan {
2769e5f7d26SCatherine Sullivan 	struct gve_priv *priv = arg;
2779e5f7d26SCatherine Sullivan 
2789e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
279893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
280893ce44dSCatherine Sullivan }
281893ce44dSCatherine Sullivan 
gve_intr(int irq,void * arg)282893ce44dSCatherine Sullivan static irqreturn_t gve_intr(int irq, void *arg)
283893ce44dSCatherine Sullivan {
284f5cedc84SCatherine Sullivan 	struct gve_notify_block *block = arg;
285f5cedc84SCatherine Sullivan 	struct gve_priv *priv = block->priv;
286f5cedc84SCatherine Sullivan 
287f5cedc84SCatherine Sullivan 	iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
288f5cedc84SCatherine Sullivan 	napi_schedule_irqoff(&block->napi);
289893ce44dSCatherine Sullivan 	return IRQ_HANDLED;
290893ce44dSCatherine Sullivan }
291893ce44dSCatherine Sullivan 
gve_intr_dqo(int irq,void * arg)2925e8c5adfSBailey Forrest static irqreturn_t gve_intr_dqo(int irq, void *arg)
2935e8c5adfSBailey Forrest {
2945e8c5adfSBailey Forrest 	struct gve_notify_block *block = arg;
2955e8c5adfSBailey Forrest 
2965e8c5adfSBailey Forrest 	/* Interrupts are automatically masked */
2975e8c5adfSBailey Forrest 	napi_schedule_irqoff(&block->napi);
2985e8c5adfSBailey Forrest 	return IRQ_HANDLED;
2995e8c5adfSBailey Forrest }
3005e8c5adfSBailey Forrest 
gve_is_napi_on_home_cpu(struct gve_priv * priv,u32 irq)3019a5e0776SShailend Chand static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
3029a5e0776SShailend Chand {
3039a5e0776SShailend Chand 	int cpu_curr = smp_processor_id();
3049a5e0776SShailend Chand 	const struct cpumask *aff_mask;
3059a5e0776SShailend Chand 
3069a5e0776SShailend Chand 	aff_mask = irq_get_effective_affinity_mask(irq);
3079a5e0776SShailend Chand 	if (unlikely(!aff_mask))
3089a5e0776SShailend Chand 		return 1;
3099a5e0776SShailend Chand 
3109a5e0776SShailend Chand 	return cpumask_test_cpu(cpu_curr, aff_mask);
3119a5e0776SShailend Chand }
3129a5e0776SShailend Chand 
gve_napi_poll(struct napi_struct * napi,int budget)3131dfc2e46SShailend Chand int gve_napi_poll(struct napi_struct *napi, int budget)
314f5cedc84SCatherine Sullivan {
315f5cedc84SCatherine Sullivan 	struct gve_notify_block *block;
316f5cedc84SCatherine Sullivan 	__be32 __iomem *irq_doorbell;
317f5cedc84SCatherine Sullivan 	bool reschedule = false;
318f5cedc84SCatherine Sullivan 	struct gve_priv *priv;
3192cb67ab1SYangchun Fu 	int work_done = 0;
320f5cedc84SCatherine Sullivan 
321f5cedc84SCatherine Sullivan 	block = container_of(napi, struct gve_notify_block, napi);
322f5cedc84SCatherine Sullivan 	priv = block->priv;
323f5cedc84SCatherine Sullivan 
32475eaae15SPraveen Kaligineedi 	if (block->tx) {
32575eaae15SPraveen Kaligineedi 		if (block->tx->q_num < priv->tx_cfg.num_queues)
326f5cedc84SCatherine Sullivan 			reschedule |= gve_tx_poll(block, budget);
327278a370cSZiwei Xiao 		else if (budget)
32875eaae15SPraveen Kaligineedi 			reschedule |= gve_xdp_poll(block, budget);
32975eaae15SPraveen Kaligineedi 	}
33075eaae15SPraveen Kaligineedi 
331278a370cSZiwei Xiao 	if (!budget)
332278a370cSZiwei Xiao 		return 0;
333278a370cSZiwei Xiao 
3342cb67ab1SYangchun Fu 	if (block->rx) {
3352cb67ab1SYangchun Fu 		work_done = gve_rx_poll(block, budget);
3362cb67ab1SYangchun Fu 		reschedule |= work_done == budget;
3372cb67ab1SYangchun Fu 	}
338f5cedc84SCatherine Sullivan 
339f5cedc84SCatherine Sullivan 	if (reschedule)
340f5cedc84SCatherine Sullivan 		return budget;
341f5cedc84SCatherine Sullivan 
3422cb67ab1SYangchun Fu        /* Complete processing - don't unmask irq if busy polling is enabled */
3432cb67ab1SYangchun Fu 	if (likely(napi_complete_done(napi, work_done))) {
344f5cedc84SCatherine Sullivan 		irq_doorbell = gve_irq_doorbell(priv, block);
345f5cedc84SCatherine Sullivan 		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
346f5cedc84SCatherine Sullivan 
34761d72c7eSTao Liu 		/* Ensure IRQ ACK is visible before we check pending work.
34861d72c7eSTao Liu 		 * If queue had issued updates, it would be truly visible.
349f5cedc84SCatherine Sullivan 		 */
350f8178183SCatherine Sullivan 		mb();
3512cb67ab1SYangchun Fu 
352f5cedc84SCatherine Sullivan 		if (block->tx)
35361d72c7eSTao Liu 			reschedule |= gve_tx_clean_pending(priv, block->tx);
354f5cedc84SCatherine Sullivan 		if (block->rx)
3552cb67ab1SYangchun Fu 			reschedule |= gve_rx_work_pending(block->rx);
3562cb67ab1SYangchun Fu 
35773382e91SChristian Marangi 		if (reschedule && napi_schedule(napi))
358f5cedc84SCatherine Sullivan 			iowrite32be(GVE_IRQ_MASK, irq_doorbell);
3592cb67ab1SYangchun Fu 	}
3602cb67ab1SYangchun Fu 	return work_done;
361f5cedc84SCatherine Sullivan }
362f5cedc84SCatherine Sullivan 
gve_napi_poll_dqo(struct napi_struct * napi,int budget)3631dfc2e46SShailend Chand int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
3645e8c5adfSBailey Forrest {
3655e8c5adfSBailey Forrest 	struct gve_notify_block *block =
3665e8c5adfSBailey Forrest 		container_of(napi, struct gve_notify_block, napi);
3675e8c5adfSBailey Forrest 	struct gve_priv *priv = block->priv;
3685e8c5adfSBailey Forrest 	bool reschedule = false;
3695e8c5adfSBailey Forrest 	int work_done = 0;
3705e8c5adfSBailey Forrest 
3715e8c5adfSBailey Forrest 	if (block->tx)
3725e8c5adfSBailey Forrest 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
3735e8c5adfSBailey Forrest 
374278a370cSZiwei Xiao 	if (!budget)
375278a370cSZiwei Xiao 		return 0;
376278a370cSZiwei Xiao 
3775e8c5adfSBailey Forrest 	if (block->rx) {
3785e8c5adfSBailey Forrest 		work_done = gve_rx_poll_dqo(block, budget);
3795e8c5adfSBailey Forrest 		reschedule |= work_done == budget;
3805e8c5adfSBailey Forrest 	}
3815e8c5adfSBailey Forrest 
3829a5e0776SShailend Chand 	if (reschedule) {
3839a5e0776SShailend Chand 		/* Reschedule by returning budget only if already on the correct
3849a5e0776SShailend Chand 		 * cpu.
3859a5e0776SShailend Chand 		 */
3869a5e0776SShailend Chand 		if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
3875e8c5adfSBailey Forrest 			return budget;
3885e8c5adfSBailey Forrest 
3899a5e0776SShailend Chand 		/* If not on the cpu with which this queue's irq has affinity
3909a5e0776SShailend Chand 		 * with, we avoid rescheduling napi and arm the irq instead so
3919a5e0776SShailend Chand 		 * that napi gets rescheduled back eventually onto the right
3929a5e0776SShailend Chand 		 * cpu.
3939a5e0776SShailend Chand 		 */
3949a5e0776SShailend Chand 		if (work_done == budget)
3959a5e0776SShailend Chand 			work_done--;
3969a5e0776SShailend Chand 	}
3979a5e0776SShailend Chand 
3985e8c5adfSBailey Forrest 	if (likely(napi_complete_done(napi, work_done))) {
3995e8c5adfSBailey Forrest 		/* Enable interrupts again.
4005e8c5adfSBailey Forrest 		 *
4015e8c5adfSBailey Forrest 		 * We don't need to repoll afterwards because HW supports the
4025e8c5adfSBailey Forrest 		 * PCI MSI-X PBA feature.
4035e8c5adfSBailey Forrest 		 *
4045e8c5adfSBailey Forrest 		 * Another interrupt would be triggered if a new event came in
4055e8c5adfSBailey Forrest 		 * since the last one.
4065e8c5adfSBailey Forrest 		 */
4075e8c5adfSBailey Forrest 		gve_write_irq_doorbell_dqo(priv, block,
4085e8c5adfSBailey Forrest 					   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
4095e8c5adfSBailey Forrest 	}
4105e8c5adfSBailey Forrest 
4115e8c5adfSBailey Forrest 	return work_done;
4125e8c5adfSBailey Forrest }
4135e8c5adfSBailey Forrest 
gve_alloc_notify_blocks(struct gve_priv * priv)414893ce44dSCatherine Sullivan static int gve_alloc_notify_blocks(struct gve_priv *priv)
415893ce44dSCatherine Sullivan {
416893ce44dSCatherine Sullivan 	int num_vecs_requested = priv->num_ntfy_blks + 1;
417893ce44dSCatherine Sullivan 	unsigned int active_cpus;
418893ce44dSCatherine Sullivan 	int vecs_enabled;
419893ce44dSCatherine Sullivan 	int i, j;
420893ce44dSCatherine Sullivan 	int err;
421893ce44dSCatherine Sullivan 
4227fec4d39SGustavo A. R. Silva 	priv->msix_vectors = kvcalloc(num_vecs_requested,
423893ce44dSCatherine Sullivan 				      sizeof(*priv->msix_vectors), GFP_KERNEL);
424893ce44dSCatherine Sullivan 	if (!priv->msix_vectors)
425893ce44dSCatherine Sullivan 		return -ENOMEM;
426893ce44dSCatherine Sullivan 	for (i = 0; i < num_vecs_requested; i++)
427893ce44dSCatherine Sullivan 		priv->msix_vectors[i].entry = i;
428893ce44dSCatherine Sullivan 	vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
429893ce44dSCatherine Sullivan 					     GVE_MIN_MSIX, num_vecs_requested);
430893ce44dSCatherine Sullivan 	if (vecs_enabled < 0) {
431893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
432893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, vecs_enabled);
433893ce44dSCatherine Sullivan 		err = vecs_enabled;
434893ce44dSCatherine Sullivan 		goto abort_with_msix_vectors;
435893ce44dSCatherine Sullivan 	}
436893ce44dSCatherine Sullivan 	if (vecs_enabled != num_vecs_requested) {
437f5cedc84SCatherine Sullivan 		int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
438f5cedc84SCatherine Sullivan 		int vecs_per_type = new_num_ntfy_blks / 2;
439f5cedc84SCatherine Sullivan 		int vecs_left = new_num_ntfy_blks % 2;
440f5cedc84SCatherine Sullivan 
441f5cedc84SCatherine Sullivan 		priv->num_ntfy_blks = new_num_ntfy_blks;
442e96b491aSDavid Awogbemila 		priv->mgmt_msix_idx = priv->num_ntfy_blks;
443f5cedc84SCatherine Sullivan 		priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
444f5cedc84SCatherine Sullivan 						vecs_per_type);
445f5cedc84SCatherine Sullivan 		priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
446f5cedc84SCatherine Sullivan 						vecs_per_type + vecs_left);
447893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
448f5cedc84SCatherine Sullivan 			"Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
449f5cedc84SCatherine Sullivan 			vecs_enabled, priv->tx_cfg.max_queues,
450f5cedc84SCatherine Sullivan 			priv->rx_cfg.max_queues);
451f5cedc84SCatherine Sullivan 		if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
452f5cedc84SCatherine Sullivan 			priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
453f5cedc84SCatherine Sullivan 		if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
454f5cedc84SCatherine Sullivan 			priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
455893ce44dSCatherine Sullivan 	}
456893ce44dSCatherine Sullivan 	/* Half the notification blocks go to TX and half to RX */
457893ce44dSCatherine Sullivan 	active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
458893ce44dSCatherine Sullivan 
459893ce44dSCatherine Sullivan 	/* Setup Management Vector  - the last vector */
46084371145SPraveen Kaligineedi 	snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
46184371145SPraveen Kaligineedi 		 pci_name(priv->pdev));
462893ce44dSCatherine Sullivan 	err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
463893ce44dSCatherine Sullivan 			  gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
464893ce44dSCatherine Sullivan 	if (err) {
465893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
466893ce44dSCatherine Sullivan 		goto abort_with_msix_enabled;
467893ce44dSCatherine Sullivan 	}
468d30baaccSCatherine Sullivan 	priv->irq_db_indices =
469893ce44dSCatherine Sullivan 		dma_alloc_coherent(&priv->pdev->dev,
470893ce44dSCatherine Sullivan 				   priv->num_ntfy_blks *
471d30baaccSCatherine Sullivan 				   sizeof(*priv->irq_db_indices),
472d30baaccSCatherine Sullivan 				   &priv->irq_db_indices_bus, GFP_KERNEL);
473d30baaccSCatherine Sullivan 	if (!priv->irq_db_indices) {
474893ce44dSCatherine Sullivan 		err = -ENOMEM;
475893ce44dSCatherine Sullivan 		goto abort_with_mgmt_vector;
476893ce44dSCatherine Sullivan 	}
477d30baaccSCatherine Sullivan 
478d30baaccSCatherine Sullivan 	priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
479d30baaccSCatherine Sullivan 				     sizeof(*priv->ntfy_blocks), GFP_KERNEL);
480d30baaccSCatherine Sullivan 	if (!priv->ntfy_blocks) {
481d30baaccSCatherine Sullivan 		err = -ENOMEM;
482d30baaccSCatherine Sullivan 		goto abort_with_irq_db_indices;
483d30baaccSCatherine Sullivan 	}
484d30baaccSCatherine Sullivan 
485893ce44dSCatherine Sullivan 	/* Setup the other blocks - the first n-1 vectors */
486893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
487893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
488893ce44dSCatherine Sullivan 		int msix_idx = i;
489893ce44dSCatherine Sullivan 
49084371145SPraveen Kaligineedi 		snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
49184371145SPraveen Kaligineedi 			 i, pci_name(priv->pdev));
492893ce44dSCatherine Sullivan 		block->priv = priv;
493893ce44dSCatherine Sullivan 		err = request_irq(priv->msix_vectors[msix_idx].vector,
4945e8c5adfSBailey Forrest 				  gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
4955e8c5adfSBailey Forrest 				  0, block->name, block);
496893ce44dSCatherine Sullivan 		if (err) {
497893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
498893ce44dSCatherine Sullivan 				"Failed to receive msix vector %d\n", i);
499893ce44dSCatherine Sullivan 			goto abort_with_some_ntfy_blocks;
500893ce44dSCatherine Sullivan 		}
5019a5e0776SShailend Chand 		block->irq = priv->msix_vectors[msix_idx].vector;
502893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
503893ce44dSCatherine Sullivan 				      get_cpu_mask(i % active_cpus));
504d30baaccSCatherine Sullivan 		block->irq_db_index = &priv->irq_db_indices[i].index;
505893ce44dSCatherine Sullivan 	}
506893ce44dSCatherine Sullivan 	return 0;
507893ce44dSCatherine Sullivan abort_with_some_ntfy_blocks:
508893ce44dSCatherine Sullivan 	for (j = 0; j < i; j++) {
509893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[j];
510893ce44dSCatherine Sullivan 		int msix_idx = j;
511893ce44dSCatherine Sullivan 
512893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
513893ce44dSCatherine Sullivan 				      NULL);
514893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
5159a5e0776SShailend Chand 		block->irq = 0;
516893ce44dSCatherine Sullivan 	}
517d30baaccSCatherine Sullivan 	kvfree(priv->ntfy_blocks);
518893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
519d30baaccSCatherine Sullivan abort_with_irq_db_indices:
520d30baaccSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
521d30baaccSCatherine Sullivan 			  sizeof(*priv->irq_db_indices),
522d30baaccSCatherine Sullivan 			  priv->irq_db_indices, priv->irq_db_indices_bus);
523d30baaccSCatherine Sullivan 	priv->irq_db_indices = NULL;
524893ce44dSCatherine Sullivan abort_with_mgmt_vector:
525893ce44dSCatherine Sullivan 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
526893ce44dSCatherine Sullivan abort_with_msix_enabled:
527893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
528893ce44dSCatherine Sullivan abort_with_msix_vectors:
5298ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
530893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
531893ce44dSCatherine Sullivan 	return err;
532893ce44dSCatherine Sullivan }
533893ce44dSCatherine Sullivan 
gve_free_notify_blocks(struct gve_priv * priv)534893ce44dSCatherine Sullivan static void gve_free_notify_blocks(struct gve_priv *priv)
535893ce44dSCatherine Sullivan {
536893ce44dSCatherine Sullivan 	int i;
537893ce44dSCatherine Sullivan 
538922aa9bcSTao Liu 	if (!priv->msix_vectors)
539922aa9bcSTao Liu 		return;
540922aa9bcSTao Liu 
541893ce44dSCatherine Sullivan 	/* Free the irqs */
542893ce44dSCatherine Sullivan 	for (i = 0; i < priv->num_ntfy_blks; i++) {
543893ce44dSCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[i];
544893ce44dSCatherine Sullivan 		int msix_idx = i;
545893ce44dSCatherine Sullivan 
546893ce44dSCatherine Sullivan 		irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
547893ce44dSCatherine Sullivan 				      NULL);
548893ce44dSCatherine Sullivan 		free_irq(priv->msix_vectors[msix_idx].vector, block);
5499a5e0776SShailend Chand 		block->irq = 0;
550893ce44dSCatherine Sullivan 	}
5515218e919SDavid Awogbemila 	free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
552d30baaccSCatherine Sullivan 	kvfree(priv->ntfy_blocks);
553893ce44dSCatherine Sullivan 	priv->ntfy_blocks = NULL;
554d30baaccSCatherine Sullivan 	dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
555d30baaccSCatherine Sullivan 			  sizeof(*priv->irq_db_indices),
556d30baaccSCatherine Sullivan 			  priv->irq_db_indices, priv->irq_db_indices_bus);
557d30baaccSCatherine Sullivan 	priv->irq_db_indices = NULL;
558893ce44dSCatherine Sullivan 	pci_disable_msix(priv->pdev);
5598ec1e900SChuhong Yuan 	kvfree(priv->msix_vectors);
560893ce44dSCatherine Sullivan 	priv->msix_vectors = NULL;
561893ce44dSCatherine Sullivan }
562893ce44dSCatherine Sullivan 
gve_setup_device_resources(struct gve_priv * priv)563893ce44dSCatherine Sullivan static int gve_setup_device_resources(struct gve_priv *priv)
564893ce44dSCatherine Sullivan {
565893ce44dSCatherine Sullivan 	int err;
566893ce44dSCatherine Sullivan 
56757718b60SJeroen de Borst 	err = gve_alloc_flow_rule_caches(priv);
568893ce44dSCatherine Sullivan 	if (err)
569893ce44dSCatherine Sullivan 		return err;
57057718b60SJeroen de Borst 	err = gve_alloc_counter_array(priv);
57157718b60SJeroen de Borst 	if (err)
57257718b60SJeroen de Borst 		goto abort_with_flow_rule_caches;
573893ce44dSCatherine Sullivan 	err = gve_alloc_notify_blocks(priv);
574893ce44dSCatherine Sullivan 	if (err)
575893ce44dSCatherine Sullivan 		goto abort_with_counter;
57624aeb56fSKuo Zhao 	err = gve_alloc_stats_report(priv);
57724aeb56fSKuo Zhao 	if (err)
57824aeb56fSKuo Zhao 		goto abort_with_ntfy_blocks;
579893ce44dSCatherine Sullivan 	err = gve_adminq_configure_device_resources(priv,
580893ce44dSCatherine Sullivan 						    priv->counter_array_bus,
581893ce44dSCatherine Sullivan 						    priv->num_event_counters,
582d30baaccSCatherine Sullivan 						    priv->irq_db_indices_bus,
583893ce44dSCatherine Sullivan 						    priv->num_ntfy_blks);
584893ce44dSCatherine Sullivan 	if (unlikely(err)) {
585893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
586893ce44dSCatherine Sullivan 			"could not setup device_resources: err=%d\n", err);
587893ce44dSCatherine Sullivan 		err = -ENXIO;
58824aeb56fSKuo Zhao 		goto abort_with_stats_report;
589893ce44dSCatherine Sullivan 	}
590c4b87ac8SBailey Forrest 
59166ce8e6bSRushil Gupta 	if (!gve_is_gqi(priv)) {
592c4b87ac8SBailey Forrest 		priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
593c4b87ac8SBailey Forrest 					       GFP_KERNEL);
594c4b87ac8SBailey Forrest 		if (!priv->ptype_lut_dqo) {
595c4b87ac8SBailey Forrest 			err = -ENOMEM;
596c4b87ac8SBailey Forrest 			goto abort_with_stats_report;
597c4b87ac8SBailey Forrest 		}
598c4b87ac8SBailey Forrest 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
599c4b87ac8SBailey Forrest 		if (err) {
600c4b87ac8SBailey Forrest 			dev_err(&priv->pdev->dev,
601c4b87ac8SBailey Forrest 				"Failed to get ptype map: err=%d\n", err);
602c4b87ac8SBailey Forrest 			goto abort_with_ptype_lut;
603c4b87ac8SBailey Forrest 		}
604c4b87ac8SBailey Forrest 	}
605c4b87ac8SBailey Forrest 
60624aeb56fSKuo Zhao 	err = gve_adminq_report_stats(priv, priv->stats_report_len,
60724aeb56fSKuo Zhao 				      priv->stats_report_bus,
60824aeb56fSKuo Zhao 				      GVE_STATS_REPORT_TIMER_PERIOD);
60924aeb56fSKuo Zhao 	if (err)
61024aeb56fSKuo Zhao 		dev_err(&priv->pdev->dev,
61124aeb56fSKuo Zhao 			"Failed to report stats: err=%d\n", err);
612893ce44dSCatherine Sullivan 	gve_set_device_resources_ok(priv);
613893ce44dSCatherine Sullivan 	return 0;
614c4b87ac8SBailey Forrest 
615c4b87ac8SBailey Forrest abort_with_ptype_lut:
616c4b87ac8SBailey Forrest 	kvfree(priv->ptype_lut_dqo);
617c4b87ac8SBailey Forrest 	priv->ptype_lut_dqo = NULL;
61824aeb56fSKuo Zhao abort_with_stats_report:
61924aeb56fSKuo Zhao 	gve_free_stats_report(priv);
620893ce44dSCatherine Sullivan abort_with_ntfy_blocks:
621893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
622893ce44dSCatherine Sullivan abort_with_counter:
623893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
62457718b60SJeroen de Borst abort_with_flow_rule_caches:
62557718b60SJeroen de Borst 	gve_free_flow_rule_caches(priv);
626c4b87ac8SBailey Forrest 
627893ce44dSCatherine Sullivan 	return err;
628893ce44dSCatherine Sullivan }
629893ce44dSCatherine Sullivan 
6309e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv);
6319e5f7d26SCatherine Sullivan 
gve_teardown_device_resources(struct gve_priv * priv)632893ce44dSCatherine Sullivan static void gve_teardown_device_resources(struct gve_priv *priv)
633893ce44dSCatherine Sullivan {
634893ce44dSCatherine Sullivan 	int err;
635893ce44dSCatherine Sullivan 
636893ce44dSCatherine Sullivan 	/* Tell device its resources are being freed */
637893ce44dSCatherine Sullivan 	if (gve_get_device_resources_ok(priv)) {
6386f3bc487SJeroen de Borst 		err = gve_flow_rules_reset(priv);
6396f3bc487SJeroen de Borst 		if (err) {
6406f3bc487SJeroen de Borst 			dev_err(&priv->pdev->dev,
6416f3bc487SJeroen de Borst 				"Failed to reset flow rules: err=%d\n", err);
6426f3bc487SJeroen de Borst 			gve_trigger_reset(priv);
6436f3bc487SJeroen de Borst 		}
64424aeb56fSKuo Zhao 		/* detach the stats report */
64524aeb56fSKuo Zhao 		err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
64624aeb56fSKuo Zhao 		if (err) {
64724aeb56fSKuo Zhao 			dev_err(&priv->pdev->dev,
64824aeb56fSKuo Zhao 				"Failed to detach stats report: err=%d\n", err);
64924aeb56fSKuo Zhao 			gve_trigger_reset(priv);
65024aeb56fSKuo Zhao 		}
651893ce44dSCatherine Sullivan 		err = gve_adminq_deconfigure_device_resources(priv);
652893ce44dSCatherine Sullivan 		if (err) {
653893ce44dSCatherine Sullivan 			dev_err(&priv->pdev->dev,
654893ce44dSCatherine Sullivan 				"Could not deconfigure device resources: err=%d\n",
655893ce44dSCatherine Sullivan 				err);
6569e5f7d26SCatherine Sullivan 			gve_trigger_reset(priv);
657893ce44dSCatherine Sullivan 		}
658893ce44dSCatherine Sullivan 	}
659c4b87ac8SBailey Forrest 
660c4b87ac8SBailey Forrest 	kvfree(priv->ptype_lut_dqo);
661c4b87ac8SBailey Forrest 	priv->ptype_lut_dqo = NULL;
662c4b87ac8SBailey Forrest 
66357718b60SJeroen de Borst 	gve_free_flow_rule_caches(priv);
664893ce44dSCatherine Sullivan 	gve_free_counter_array(priv);
665893ce44dSCatherine Sullivan 	gve_free_notify_blocks(priv);
66624aeb56fSKuo Zhao 	gve_free_stats_report(priv);
667893ce44dSCatherine Sullivan 	gve_clear_device_resources_ok(priv);
668893ce44dSCatherine Sullivan }
669893ce44dSCatherine Sullivan 
gve_unregister_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)670ee24284eSShailend Chand static int gve_unregister_qpl(struct gve_priv *priv,
671ee24284eSShailend Chand 			      struct gve_queue_page_list *qpl)
672f13697ccSShailend Chand {
673f13697ccSShailend Chand 	int err;
674f13697ccSShailend Chand 
675ee24284eSShailend Chand 	if (!qpl)
676ee24284eSShailend Chand 		return 0;
677ee24284eSShailend Chand 
678ee24284eSShailend Chand 	err = gve_adminq_unregister_page_list(priv, qpl->id);
679f13697ccSShailend Chand 	if (err) {
680f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
681f13697ccSShailend Chand 			  "Failed to unregister queue page list %d\n",
682ee24284eSShailend Chand 			  qpl->id);
683f13697ccSShailend Chand 		return err;
684f13697ccSShailend Chand 	}
685f13697ccSShailend Chand 
686ee24284eSShailend Chand 	priv->num_registered_pages -= qpl->num_entries;
687f13697ccSShailend Chand 	return 0;
688f13697ccSShailend Chand }
689f13697ccSShailend Chand 
gve_register_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)690ee24284eSShailend Chand static int gve_register_qpl(struct gve_priv *priv,
691ee24284eSShailend Chand 			    struct gve_queue_page_list *qpl)
692f13697ccSShailend Chand {
693f13697ccSShailend Chand 	int pages;
694f13697ccSShailend Chand 	int err;
695f13697ccSShailend Chand 
696ee24284eSShailend Chand 	if (!qpl)
697ee24284eSShailend Chand 		return 0;
698f13697ccSShailend Chand 
699ee24284eSShailend Chand 	pages = qpl->num_entries;
700f13697ccSShailend Chand 
701f13697ccSShailend Chand 	if (pages + priv->num_registered_pages > priv->max_registered_pages) {
702f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
703f13697ccSShailend Chand 			  "Reached max number of registered pages %llu > %llu\n",
704f13697ccSShailend Chand 			  pages + priv->num_registered_pages,
705f13697ccSShailend Chand 			  priv->max_registered_pages);
706f13697ccSShailend Chand 		return -EINVAL;
707f13697ccSShailend Chand 	}
708f13697ccSShailend Chand 
709ee24284eSShailend Chand 	err = gve_adminq_register_page_list(priv, qpl);
710f13697ccSShailend Chand 	if (err) {
711f13697ccSShailend Chand 		netif_err(priv, drv, priv->dev,
712f13697ccSShailend Chand 			  "failed to register queue page list %d\n",
713ee24284eSShailend Chand 			  qpl->id);
714f13697ccSShailend Chand 		return err;
715f13697ccSShailend Chand 	}
716f13697ccSShailend Chand 
717f13697ccSShailend Chand 	priv->num_registered_pages += pages;
718f13697ccSShailend Chand 	return 0;
719f13697ccSShailend Chand }
720f13697ccSShailend Chand 
gve_tx_get_qpl(struct gve_priv * priv,int idx)721ee24284eSShailend Chand static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
722ee24284eSShailend Chand {
723ee24284eSShailend Chand 	struct gve_tx_ring *tx = &priv->tx[idx];
724ee24284eSShailend Chand 
725ee24284eSShailend Chand 	if (gve_is_gqi(priv))
726ee24284eSShailend Chand 		return tx->tx_fifo.qpl;
727ee24284eSShailend Chand 	else
728ee24284eSShailend Chand 		return tx->dqo.qpl;
729ee24284eSShailend Chand }
730ee24284eSShailend Chand 
gve_rx_get_qpl(struct gve_priv * priv,int idx)731ee24284eSShailend Chand static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
732ee24284eSShailend Chand {
733ee24284eSShailend Chand 	struct gve_rx_ring *rx = &priv->rx[idx];
734ee24284eSShailend Chand 
735ee24284eSShailend Chand 	if (gve_is_gqi(priv))
736ee24284eSShailend Chand 		return rx->data.qpl;
737ee24284eSShailend Chand 	else
738ee24284eSShailend Chand 		return rx->dqo.qpl;
739ee24284eSShailend Chand }
740ee24284eSShailend Chand 
gve_register_xdp_qpls(struct gve_priv * priv)74175eaae15SPraveen Kaligineedi static int gve_register_xdp_qpls(struct gve_priv *priv)
74275eaae15SPraveen Kaligineedi {
74375eaae15SPraveen Kaligineedi 	int start_id;
74475eaae15SPraveen Kaligineedi 	int err;
74575eaae15SPraveen Kaligineedi 	int i;
74675eaae15SPraveen Kaligineedi 
747f13697ccSShailend Chand 	start_id = gve_xdp_tx_start_queue_id(priv);
74875eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
749ee24284eSShailend Chand 		err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
750f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean up */
751f13697ccSShailend Chand 		if (err)
75275eaae15SPraveen Kaligineedi 			return err;
75375eaae15SPraveen Kaligineedi 	}
75475eaae15SPraveen Kaligineedi 	return 0;
75575eaae15SPraveen Kaligineedi }
75675eaae15SPraveen Kaligineedi 
gve_register_qpls(struct gve_priv * priv)757f5cedc84SCatherine Sullivan static int gve_register_qpls(struct gve_priv *priv)
758f5cedc84SCatherine Sullivan {
759f13697ccSShailend Chand 	int num_tx_qpls, num_rx_qpls;
760f5cedc84SCatherine Sullivan 	int err;
761f5cedc84SCatherine Sullivan 	int i;
762f5cedc84SCatherine Sullivan 
763f13697ccSShailend Chand 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
764f13697ccSShailend Chand 				      gve_is_qpl(priv));
765f13697ccSShailend Chand 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
766f13697ccSShailend Chand 
767f13697ccSShailend Chand 	for (i = 0; i < num_tx_qpls; i++) {
768ee24284eSShailend Chand 		err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
769f13697ccSShailend Chand 		if (err)
7707fc2bf78SPraveen Kaligineedi 			return err;
7717fc2bf78SPraveen Kaligineedi 	}
7727fc2bf78SPraveen Kaligineedi 
773f13697ccSShailend Chand 	for (i = 0; i < num_rx_qpls; i++) {
774ee24284eSShailend Chand 		err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
775f13697ccSShailend Chand 		if (err)
776f5cedc84SCatherine Sullivan 			return err;
777f5cedc84SCatherine Sullivan 	}
778f13697ccSShailend Chand 
779f5cedc84SCatherine Sullivan 	return 0;
780f5cedc84SCatherine Sullivan }
781f5cedc84SCatherine Sullivan 
gve_unregister_xdp_qpls(struct gve_priv * priv)78275eaae15SPraveen Kaligineedi static int gve_unregister_xdp_qpls(struct gve_priv *priv)
78375eaae15SPraveen Kaligineedi {
78475eaae15SPraveen Kaligineedi 	int start_id;
78575eaae15SPraveen Kaligineedi 	int err;
78675eaae15SPraveen Kaligineedi 	int i;
78775eaae15SPraveen Kaligineedi 
788f13697ccSShailend Chand 	start_id = gve_xdp_tx_start_queue_id(priv);
78975eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
790ee24284eSShailend Chand 		err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
791f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
792f13697ccSShailend Chand 		if (err)
79375eaae15SPraveen Kaligineedi 			return err;
79475eaae15SPraveen Kaligineedi 	}
79575eaae15SPraveen Kaligineedi 	return 0;
79675eaae15SPraveen Kaligineedi }
79775eaae15SPraveen Kaligineedi 
gve_unregister_qpls(struct gve_priv * priv)798f5cedc84SCatherine Sullivan static int gve_unregister_qpls(struct gve_priv *priv)
799f5cedc84SCatherine Sullivan {
800f13697ccSShailend Chand 	int num_tx_qpls, num_rx_qpls;
801f5cedc84SCatherine Sullivan 	int err;
802f5cedc84SCatherine Sullivan 	int i;
803f5cedc84SCatherine Sullivan 
804f13697ccSShailend Chand 	num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
805f13697ccSShailend Chand 				      gve_is_qpl(priv));
806f13697ccSShailend Chand 	num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
807f13697ccSShailend Chand 
808f13697ccSShailend Chand 	for (i = 0; i < num_tx_qpls; i++) {
809ee24284eSShailend Chand 		err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
810f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
811f13697ccSShailend Chand 		if (err)
8127fc2bf78SPraveen Kaligineedi 			return err;
8137fc2bf78SPraveen Kaligineedi 	}
8147fc2bf78SPraveen Kaligineedi 
815f13697ccSShailend Chand 	for (i = 0; i < num_rx_qpls; i++) {
816ee24284eSShailend Chand 		err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
817f13697ccSShailend Chand 		/* This failure will trigger a reset - no need to clean */
818f13697ccSShailend Chand 		if (err)
819f5cedc84SCatherine Sullivan 			return err;
820f5cedc84SCatherine Sullivan 	}
821f5cedc84SCatherine Sullivan 	return 0;
822f5cedc84SCatherine Sullivan }
823f5cedc84SCatherine Sullivan 
gve_create_xdp_rings(struct gve_priv * priv)82475eaae15SPraveen Kaligineedi static int gve_create_xdp_rings(struct gve_priv *priv)
82575eaae15SPraveen Kaligineedi {
82675eaae15SPraveen Kaligineedi 	int err;
82775eaae15SPraveen Kaligineedi 
82875eaae15SPraveen Kaligineedi 	err = gve_adminq_create_tx_queues(priv,
82975eaae15SPraveen Kaligineedi 					  gve_xdp_tx_start_queue_id(priv),
83075eaae15SPraveen Kaligineedi 					  priv->num_xdp_queues);
83175eaae15SPraveen Kaligineedi 	if (err) {
83275eaae15SPraveen Kaligineedi 		netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
83375eaae15SPraveen Kaligineedi 			  priv->num_xdp_queues);
83475eaae15SPraveen Kaligineedi 		/* This failure will trigger a reset - no need to clean
83575eaae15SPraveen Kaligineedi 		 * up
83675eaae15SPraveen Kaligineedi 		 */
83775eaae15SPraveen Kaligineedi 		return err;
83875eaae15SPraveen Kaligineedi 	}
83975eaae15SPraveen Kaligineedi 	netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
84075eaae15SPraveen Kaligineedi 		  priv->num_xdp_queues);
84175eaae15SPraveen Kaligineedi 
84275eaae15SPraveen Kaligineedi 	return 0;
84375eaae15SPraveen Kaligineedi }
84475eaae15SPraveen Kaligineedi 
gve_create_rings(struct gve_priv * priv)845f5cedc84SCatherine Sullivan static int gve_create_rings(struct gve_priv *priv)
846f5cedc84SCatherine Sullivan {
8472e80aeaeSPraveen Kaligineedi 	int num_tx_queues = gve_num_tx_queues(priv);
848f5cedc84SCatherine Sullivan 	int err;
849f5cedc84SCatherine Sullivan 	int i;
850f5cedc84SCatherine Sullivan 
8517fc2bf78SPraveen Kaligineedi 	err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
852f5cedc84SCatherine Sullivan 	if (err) {
8535cdad90dSSagi Shahar 		netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
8542e80aeaeSPraveen Kaligineedi 			  num_tx_queues);
8559e5f7d26SCatherine Sullivan 		/* This failure will trigger a reset - no need to clean
8569e5f7d26SCatherine Sullivan 		 * up
8579e5f7d26SCatherine Sullivan 		 */
858f5cedc84SCatherine Sullivan 		return err;
859f5cedc84SCatherine Sullivan 	}
8605cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
8612e80aeaeSPraveen Kaligineedi 		  num_tx_queues);
8625cdad90dSSagi Shahar 
8635cdad90dSSagi Shahar 	err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
864f5cedc84SCatherine Sullivan 	if (err) {
8655cdad90dSSagi Shahar 		netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
8665cdad90dSSagi Shahar 			  priv->rx_cfg.num_queues);
8679e5f7d26SCatherine Sullivan 		/* This failure will trigger a reset - no need to clean
8689e5f7d26SCatherine Sullivan 		 * up
8699e5f7d26SCatherine Sullivan 		 */
870f5cedc84SCatherine Sullivan 		return err;
871f5cedc84SCatherine Sullivan 	}
8725cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
8735cdad90dSSagi Shahar 		  priv->rx_cfg.num_queues);
8745cdad90dSSagi Shahar 
8759c1a59a2SBailey Forrest 	if (gve_is_gqi(priv)) {
8765cdad90dSSagi Shahar 		/* Rx data ring has been prefilled with packet buffers at queue
8775cdad90dSSagi Shahar 		 * allocation time.
8789c1a59a2SBailey Forrest 		 *
8799c1a59a2SBailey Forrest 		 * Write the doorbell to provide descriptor slots and packet
8809c1a59a2SBailey Forrest 		 * buffers to the NIC.
881f5cedc84SCatherine Sullivan 		 */
8825cdad90dSSagi Shahar 		for (i = 0; i < priv->rx_cfg.num_queues; i++)
883f5cedc84SCatherine Sullivan 			gve_rx_write_doorbell(priv, &priv->rx[i]);
8849c1a59a2SBailey Forrest 	} else {
8859c1a59a2SBailey Forrest 		for (i = 0; i < priv->rx_cfg.num_queues; i++) {
8869c1a59a2SBailey Forrest 			/* Post buffers and ring doorbell. */
8879c1a59a2SBailey Forrest 			gve_rx_post_buffers_dqo(&priv->rx[i]);
8889c1a59a2SBailey Forrest 		}
8899c1a59a2SBailey Forrest 	}
890f5cedc84SCatherine Sullivan 
891f5cedc84SCatherine Sullivan 	return 0;
892f5cedc84SCatherine Sullivan }
893f5cedc84SCatherine Sullivan 
init_xdp_sync_stats(struct gve_priv * priv)894f13697ccSShailend Chand static void init_xdp_sync_stats(struct gve_priv *priv)
89575eaae15SPraveen Kaligineedi {
89675eaae15SPraveen Kaligineedi 	int start_id = gve_xdp_tx_start_queue_id(priv);
89775eaae15SPraveen Kaligineedi 	int i;
89875eaae15SPraveen Kaligineedi 
899f13697ccSShailend Chand 	/* Init stats */
90075eaae15SPraveen Kaligineedi 	for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
90175eaae15SPraveen Kaligineedi 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
90275eaae15SPraveen Kaligineedi 
90375eaae15SPraveen Kaligineedi 		u64_stats_init(&priv->tx[i].statss);
90475eaae15SPraveen Kaligineedi 		priv->tx[i].ntfy_id = ntfy_idx;
90575eaae15SPraveen Kaligineedi 	}
90675eaae15SPraveen Kaligineedi }
90775eaae15SPraveen Kaligineedi 
gve_init_sync_stats(struct gve_priv * priv)908f13697ccSShailend Chand static void gve_init_sync_stats(struct gve_priv *priv)
9095e8c5adfSBailey Forrest {
9105e8c5adfSBailey Forrest 	int i;
9115e8c5adfSBailey Forrest 
912f13697ccSShailend Chand 	for (i = 0; i < priv->tx_cfg.num_queues; i++)
9135e8c5adfSBailey Forrest 		u64_stats_init(&priv->tx[i].statss);
9145e8c5adfSBailey Forrest 
915f13697ccSShailend Chand 	/* Init stats for XDP TX queues */
916f13697ccSShailend Chand 	init_xdp_sync_stats(priv);
917f13697ccSShailend Chand 
918f13697ccSShailend Chand 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
9195e8c5adfSBailey Forrest 		u64_stats_init(&priv->rx[i].statss);
920f13697ccSShailend Chand }
921f13697ccSShailend Chand 
gve_tx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)922f13697ccSShailend Chand static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
923f13697ccSShailend Chand 				      struct gve_tx_alloc_rings_cfg *cfg)
924f13697ccSShailend Chand {
925f13697ccSShailend Chand 	cfg->qcfg = &priv->tx_cfg;
926f13697ccSShailend Chand 	cfg->raw_addressing = !gve_is_qpl(priv);
927f13697ccSShailend Chand 	cfg->ring_size = priv->tx_desc_cnt;
928f13697ccSShailend Chand 	cfg->start_idx = 0;
929f13697ccSShailend Chand 	cfg->num_rings = gve_num_tx_queues(priv);
930f13697ccSShailend Chand 	cfg->tx = priv->tx;
931f13697ccSShailend Chand }
932f13697ccSShailend Chand 
gve_tx_stop_rings(struct gve_priv * priv,int start_id,int num_rings)933f13697ccSShailend Chand static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
934f13697ccSShailend Chand {
935f13697ccSShailend Chand 	int i;
936f13697ccSShailend Chand 
937f13697ccSShailend Chand 	if (!priv->tx)
938f13697ccSShailend Chand 		return;
939f13697ccSShailend Chand 
940f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_rings; i++) {
941f13697ccSShailend Chand 		if (gve_is_gqi(priv))
942f13697ccSShailend Chand 			gve_tx_stop_ring_gqi(priv, i);
943f13697ccSShailend Chand 		else
944f13697ccSShailend Chand 			gve_tx_stop_ring_dqo(priv, i);
9455e8c5adfSBailey Forrest 	}
9465e8c5adfSBailey Forrest }
9475e8c5adfSBailey Forrest 
gve_tx_start_rings(struct gve_priv * priv,int start_id,int num_rings)948f13697ccSShailend Chand static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
949f13697ccSShailend Chand 			       int num_rings)
9509c1a59a2SBailey Forrest {
951f13697ccSShailend Chand 	int i;
952f13697ccSShailend Chand 
953f13697ccSShailend Chand 	for (i = start_id; i < start_id + num_rings; i++) {
954f13697ccSShailend Chand 		if (gve_is_gqi(priv))
955f13697ccSShailend Chand 			gve_tx_start_ring_gqi(priv, i);
956f13697ccSShailend Chand 		else
957f13697ccSShailend Chand 			gve_tx_start_ring_dqo(priv, i);
9589c1a59a2SBailey Forrest 	}
9599c1a59a2SBailey Forrest }
9609c1a59a2SBailey Forrest 
gve_alloc_xdp_rings(struct gve_priv * priv)96175eaae15SPraveen Kaligineedi static int gve_alloc_xdp_rings(struct gve_priv *priv)
96275eaae15SPraveen Kaligineedi {
963f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg cfg = {0};
96475eaae15SPraveen Kaligineedi 	int err = 0;
96575eaae15SPraveen Kaligineedi 
96675eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
96775eaae15SPraveen Kaligineedi 		return 0;
96875eaae15SPraveen Kaligineedi 
969f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
970f13697ccSShailend Chand 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
971f13697ccSShailend Chand 	cfg.num_rings = priv->num_xdp_queues;
972f13697ccSShailend Chand 
973f13697ccSShailend Chand 	err = gve_tx_alloc_rings_gqi(priv, &cfg);
97475eaae15SPraveen Kaligineedi 	if (err)
97575eaae15SPraveen Kaligineedi 		return err;
976f13697ccSShailend Chand 
977f13697ccSShailend Chand 	gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
978f13697ccSShailend Chand 	init_xdp_sync_stats(priv);
97975eaae15SPraveen Kaligineedi 
98075eaae15SPraveen Kaligineedi 	return 0;
98175eaae15SPraveen Kaligineedi }
98275eaae15SPraveen Kaligineedi 
gve_queues_mem_alloc(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)983ee24284eSShailend Chand static int gve_queues_mem_alloc(struct gve_priv *priv,
984f13697ccSShailend Chand 				struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
985f13697ccSShailend Chand 				struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
986f5cedc84SCatherine Sullivan {
987f5cedc84SCatherine Sullivan 	int err;
988f5cedc84SCatherine Sullivan 
989f13697ccSShailend Chand 	if (gve_is_gqi(priv))
990f13697ccSShailend Chand 		err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
991f13697ccSShailend Chand 	else
992f13697ccSShailend Chand 		err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
993f13697ccSShailend Chand 	if (err)
994f13697ccSShailend Chand 		return err;
9959c1a59a2SBailey Forrest 
9969c1a59a2SBailey Forrest 	if (gve_is_gqi(priv))
997f13697ccSShailend Chand 		err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
9989c1a59a2SBailey Forrest 	else
999f13697ccSShailend Chand 		err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
1000f5cedc84SCatherine Sullivan 	if (err)
1001f5cedc84SCatherine Sullivan 		goto free_tx;
10029c1a59a2SBailey Forrest 
1003f5cedc84SCatherine Sullivan 	return 0;
1004f5cedc84SCatherine Sullivan 
1005f5cedc84SCatherine Sullivan free_tx:
1006f13697ccSShailend Chand 	if (gve_is_gqi(priv))
1007f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
1008f13697ccSShailend Chand 	else
1009f13697ccSShailend Chand 		gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
1010f5cedc84SCatherine Sullivan 	return err;
1011f5cedc84SCatherine Sullivan }
1012f5cedc84SCatherine Sullivan 
gve_destroy_xdp_rings(struct gve_priv * priv)101375eaae15SPraveen Kaligineedi static int gve_destroy_xdp_rings(struct gve_priv *priv)
101475eaae15SPraveen Kaligineedi {
101575eaae15SPraveen Kaligineedi 	int start_id;
101675eaae15SPraveen Kaligineedi 	int err;
101775eaae15SPraveen Kaligineedi 
101875eaae15SPraveen Kaligineedi 	start_id = gve_xdp_tx_start_queue_id(priv);
101975eaae15SPraveen Kaligineedi 	err = gve_adminq_destroy_tx_queues(priv,
102075eaae15SPraveen Kaligineedi 					   start_id,
102175eaae15SPraveen Kaligineedi 					   priv->num_xdp_queues);
102275eaae15SPraveen Kaligineedi 	if (err) {
102375eaae15SPraveen Kaligineedi 		netif_err(priv, drv, priv->dev,
102475eaae15SPraveen Kaligineedi 			  "failed to destroy XDP queues\n");
102575eaae15SPraveen Kaligineedi 		/* This failure will trigger a reset - no need to clean up */
102675eaae15SPraveen Kaligineedi 		return err;
102775eaae15SPraveen Kaligineedi 	}
102875eaae15SPraveen Kaligineedi 	netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
102975eaae15SPraveen Kaligineedi 
103075eaae15SPraveen Kaligineedi 	return 0;
103175eaae15SPraveen Kaligineedi }
103275eaae15SPraveen Kaligineedi 
gve_destroy_rings(struct gve_priv * priv)1033f5cedc84SCatherine Sullivan static int gve_destroy_rings(struct gve_priv *priv)
1034f5cedc84SCatherine Sullivan {
10352e80aeaeSPraveen Kaligineedi 	int num_tx_queues = gve_num_tx_queues(priv);
1036f5cedc84SCatherine Sullivan 	int err;
1037f5cedc84SCatherine Sullivan 
10387fc2bf78SPraveen Kaligineedi 	err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
1039f5cedc84SCatherine Sullivan 	if (err) {
1040f5cedc84SCatherine Sullivan 		netif_err(priv, drv, priv->dev,
10415cdad90dSSagi Shahar 			  "failed to destroy tx queues\n");
10425cdad90dSSagi Shahar 		/* This failure will trigger a reset - no need to clean up */
1043f5cedc84SCatherine Sullivan 		return err;
1044f5cedc84SCatherine Sullivan 	}
10455cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
10465cdad90dSSagi Shahar 	err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
1047f5cedc84SCatherine Sullivan 	if (err) {
1048f5cedc84SCatherine Sullivan 		netif_err(priv, drv, priv->dev,
10495cdad90dSSagi Shahar 			  "failed to destroy rx queues\n");
10505cdad90dSSagi Shahar 		/* This failure will trigger a reset - no need to clean up */
1051f5cedc84SCatherine Sullivan 		return err;
1052f5cedc84SCatherine Sullivan 	}
10535cdad90dSSagi Shahar 	netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
1054f5cedc84SCatherine Sullivan 	return 0;
1055f5cedc84SCatherine Sullivan }
1056f5cedc84SCatherine Sullivan 
gve_free_xdp_rings(struct gve_priv * priv)105775eaae15SPraveen Kaligineedi static void gve_free_xdp_rings(struct gve_priv *priv)
105875eaae15SPraveen Kaligineedi {
1059f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg cfg = {0};
106075eaae15SPraveen Kaligineedi 
1061f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, &cfg);
1062f13697ccSShailend Chand 	cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
1063f13697ccSShailend Chand 	cfg.num_rings = priv->num_xdp_queues;
1064f13697ccSShailend Chand 
106575eaae15SPraveen Kaligineedi 	if (priv->tx) {
1066f13697ccSShailend Chand 		gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
1067f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, &cfg);
106875eaae15SPraveen Kaligineedi 	}
106975eaae15SPraveen Kaligineedi }
107075eaae15SPraveen Kaligineedi 
gve_queues_mem_free(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_cfg,struct gve_rx_alloc_rings_cfg * rx_cfg)1071ee24284eSShailend Chand static void gve_queues_mem_free(struct gve_priv *priv,
1072f13697ccSShailend Chand 				struct gve_tx_alloc_rings_cfg *tx_cfg,
1073f13697ccSShailend Chand 				struct gve_rx_alloc_rings_cfg *rx_cfg)
1074f5cedc84SCatherine Sullivan {
1075f13697ccSShailend Chand 	if (gve_is_gqi(priv)) {
1076f13697ccSShailend Chand 		gve_tx_free_rings_gqi(priv, tx_cfg);
1077f13697ccSShailend Chand 		gve_rx_free_rings_gqi(priv, rx_cfg);
1078f13697ccSShailend Chand 	} else {
1079f13697ccSShailend Chand 		gve_tx_free_rings_dqo(priv, tx_cfg);
1080f13697ccSShailend Chand 		gve_rx_free_rings_dqo(priv, rx_cfg);
1081f5cedc84SCatherine Sullivan 	}
1082f5cedc84SCatherine Sullivan }
1083f5cedc84SCatherine Sullivan 
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir,gfp_t gfp_flags)1084433e274bSKuo Zhao int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1085433e274bSKuo Zhao 		   struct page **page, dma_addr_t *dma,
1086a92f7a6fSCatherine Sullivan 		   enum dma_data_direction dir, gfp_t gfp_flags)
1087f5cedc84SCatherine Sullivan {
1088a92f7a6fSCatherine Sullivan 	*page = alloc_page(gfp_flags);
1089433e274bSKuo Zhao 	if (!*page) {
1090433e274bSKuo Zhao 		priv->page_alloc_fail++;
1091f5cedc84SCatherine Sullivan 		return -ENOMEM;
1092433e274bSKuo Zhao 	}
1093f5cedc84SCatherine Sullivan 	*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
1094f5cedc84SCatherine Sullivan 	if (dma_mapping_error(dev, *dma)) {
1095433e274bSKuo Zhao 		priv->dma_mapping_error++;
1096f5cedc84SCatherine Sullivan 		put_page(*page);
1097f5cedc84SCatherine Sullivan 		return -ENOMEM;
1098f5cedc84SCatherine Sullivan 	}
1099f5cedc84SCatherine Sullivan 	return 0;
1100f5cedc84SCatherine Sullivan }
1101f5cedc84SCatherine Sullivan 
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)1102ee24284eSShailend Chand struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1103f13697ccSShailend Chand 						      u32 id, int pages)
1104f5cedc84SCatherine Sullivan {
1105ee24284eSShailend Chand 	struct gve_queue_page_list *qpl;
1106f5cedc84SCatherine Sullivan 	int err;
1107f5cedc84SCatherine Sullivan 	int i;
1108f5cedc84SCatherine Sullivan 
1109ee24284eSShailend Chand 	qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
1110ee24284eSShailend Chand 	if (!qpl)
1111ee24284eSShailend Chand 		return NULL;
1112ee24284eSShailend Chand 
1113f5cedc84SCatherine Sullivan 	qpl->id = id;
1114a95069ecSJeroen de Borst 	qpl->num_entries = 0;
11157fec4d39SGustavo A. R. Silva 	qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
1116f5cedc84SCatherine Sullivan 	if (!qpl->pages)
1117ee24284eSShailend Chand 		goto abort;
1118ee24284eSShailend Chand 
11197fec4d39SGustavo A. R. Silva 	qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
1120f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
1121ee24284eSShailend Chand 		goto abort;
1122f5cedc84SCatherine Sullivan 
1123f5cedc84SCatherine Sullivan 	for (i = 0; i < pages; i++) {
1124433e274bSKuo Zhao 		err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
1125f5cedc84SCatherine Sullivan 				     &qpl->page_buses[i],
1126a92f7a6fSCatherine Sullivan 				     gve_qpl_dma_dir(priv, id), GFP_KERNEL);
1127f5cedc84SCatherine Sullivan 		if (err)
1128ee24284eSShailend Chand 			goto abort;
1129a95069ecSJeroen de Borst 		qpl->num_entries++;
1130f5cedc84SCatherine Sullivan 	}
1131f5cedc84SCatherine Sullivan 
1132ee24284eSShailend Chand 	return qpl;
1133ee24284eSShailend Chand 
1134ee24284eSShailend Chand abort:
1135ee24284eSShailend Chand 	gve_free_queue_page_list(priv, qpl, id);
1136ee24284eSShailend Chand 	return NULL;
1137f5cedc84SCatherine Sullivan }
1138f5cedc84SCatherine Sullivan 
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)1139f5cedc84SCatherine Sullivan void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1140f5cedc84SCatherine Sullivan 		   enum dma_data_direction dir)
1141f5cedc84SCatherine Sullivan {
1142f5cedc84SCatherine Sullivan 	if (!dma_mapping_error(dev, dma))
1143f5cedc84SCatherine Sullivan 		dma_unmap_page(dev, dma, PAGE_SIZE, dir);
1144f5cedc84SCatherine Sullivan 	if (page)
1145f5cedc84SCatherine Sullivan 		put_page(page);
1146f5cedc84SCatherine Sullivan }
1147f5cedc84SCatherine Sullivan 
gve_free_queue_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl,u32 id)1148ee24284eSShailend Chand void gve_free_queue_page_list(struct gve_priv *priv,
1149f13697ccSShailend Chand 			      struct gve_queue_page_list *qpl,
1150ee24284eSShailend Chand 			      u32 id)
1151f5cedc84SCatherine Sullivan {
1152f5cedc84SCatherine Sullivan 	int i;
1153f5cedc84SCatherine Sullivan 
1154ee24284eSShailend Chand 	if (!qpl)
1155f5cedc84SCatherine Sullivan 		return;
1156ee24284eSShailend Chand 	if (!qpl->pages)
1157ee24284eSShailend Chand 		goto free_qpl;
1158f5cedc84SCatherine Sullivan 	if (!qpl->page_buses)
1159f5cedc84SCatherine Sullivan 		goto free_pages;
1160f5cedc84SCatherine Sullivan 
1161f5cedc84SCatherine Sullivan 	for (i = 0; i < qpl->num_entries; i++)
1162f5cedc84SCatherine Sullivan 		gve_free_page(&priv->pdev->dev, qpl->pages[i],
1163f5cedc84SCatherine Sullivan 			      qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
1164f5cedc84SCatherine Sullivan 
11658ec1e900SChuhong Yuan 	kvfree(qpl->page_buses);
11667fc2bf78SPraveen Kaligineedi 	qpl->page_buses = NULL;
1167f5cedc84SCatherine Sullivan free_pages:
11688ec1e900SChuhong Yuan 	kvfree(qpl->pages);
11697fc2bf78SPraveen Kaligineedi 	qpl->pages = NULL;
1170ee24284eSShailend Chand free_qpl:
1171ee24284eSShailend Chand 	kvfree(qpl);
1172f5cedc84SCatherine Sullivan }
1173f5cedc84SCatherine Sullivan 
11749e5f7d26SCatherine Sullivan /* Use this to schedule a reset when the device is capable of continuing
11759e5f7d26SCatherine Sullivan  * to handle other requests in its current state. If it is not, do a reset
11769e5f7d26SCatherine Sullivan  * in thread instead.
11779e5f7d26SCatherine Sullivan  */
gve_schedule_reset(struct gve_priv * priv)11789e5f7d26SCatherine Sullivan void gve_schedule_reset(struct gve_priv *priv)
11799e5f7d26SCatherine Sullivan {
11809e5f7d26SCatherine Sullivan 	gve_set_do_reset(priv);
11819e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
11829e5f7d26SCatherine Sullivan }
11839e5f7d26SCatherine Sullivan 
11849e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
11859e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
1186f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv);
1187f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv);
1188f5cedc84SCatherine Sullivan 
gve_reg_xdp_info(struct gve_priv * priv,struct net_device * dev)118975eaae15SPraveen Kaligineedi static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
119075eaae15SPraveen Kaligineedi {
119175eaae15SPraveen Kaligineedi 	struct napi_struct *napi;
119275eaae15SPraveen Kaligineedi 	struct gve_rx_ring *rx;
119375eaae15SPraveen Kaligineedi 	int err = 0;
119475eaae15SPraveen Kaligineedi 	int i, j;
1195fd8e4032SPraveen Kaligineedi 	u32 tx_qid;
119675eaae15SPraveen Kaligineedi 
119775eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
119875eaae15SPraveen Kaligineedi 		return 0;
119975eaae15SPraveen Kaligineedi 
120075eaae15SPraveen Kaligineedi 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
120175eaae15SPraveen Kaligineedi 		rx = &priv->rx[i];
120275eaae15SPraveen Kaligineedi 		napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
120375eaae15SPraveen Kaligineedi 
120475eaae15SPraveen Kaligineedi 		err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
120575eaae15SPraveen Kaligineedi 				       napi->napi_id);
120675eaae15SPraveen Kaligineedi 		if (err)
120775eaae15SPraveen Kaligineedi 			goto err;
120875eaae15SPraveen Kaligineedi 		err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
120975eaae15SPraveen Kaligineedi 						 MEM_TYPE_PAGE_SHARED, NULL);
121075eaae15SPraveen Kaligineedi 		if (err)
121175eaae15SPraveen Kaligineedi 			goto err;
1212fd8e4032SPraveen Kaligineedi 		rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
1213fd8e4032SPraveen Kaligineedi 		if (rx->xsk_pool) {
1214fd8e4032SPraveen Kaligineedi 			err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
1215fd8e4032SPraveen Kaligineedi 					       napi->napi_id);
1216fd8e4032SPraveen Kaligineedi 			if (err)
1217fd8e4032SPraveen Kaligineedi 				goto err;
1218fd8e4032SPraveen Kaligineedi 			err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1219fd8e4032SPraveen Kaligineedi 							 MEM_TYPE_XSK_BUFF_POOL, NULL);
1220fd8e4032SPraveen Kaligineedi 			if (err)
1221fd8e4032SPraveen Kaligineedi 				goto err;
1222fd8e4032SPraveen Kaligineedi 			xsk_pool_set_rxq_info(rx->xsk_pool,
1223fd8e4032SPraveen Kaligineedi 					      &rx->xsk_rxq);
1224fd8e4032SPraveen Kaligineedi 		}
1225fd8e4032SPraveen Kaligineedi 	}
1226fd8e4032SPraveen Kaligineedi 
1227fd8e4032SPraveen Kaligineedi 	for (i = 0; i < priv->num_xdp_queues; i++) {
1228fd8e4032SPraveen Kaligineedi 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1229fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
123075eaae15SPraveen Kaligineedi 	}
123175eaae15SPraveen Kaligineedi 	return 0;
123275eaae15SPraveen Kaligineedi 
123375eaae15SPraveen Kaligineedi err:
123475eaae15SPraveen Kaligineedi 	for (j = i; j >= 0; j--) {
123575eaae15SPraveen Kaligineedi 		rx = &priv->rx[j];
123675eaae15SPraveen Kaligineedi 		if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
123775eaae15SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xdp_rxq);
1238fd8e4032SPraveen Kaligineedi 		if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1239fd8e4032SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xsk_rxq);
124075eaae15SPraveen Kaligineedi 	}
124175eaae15SPraveen Kaligineedi 	return err;
124275eaae15SPraveen Kaligineedi }
124375eaae15SPraveen Kaligineedi 
gve_unreg_xdp_info(struct gve_priv * priv)124475eaae15SPraveen Kaligineedi static void gve_unreg_xdp_info(struct gve_priv *priv)
124575eaae15SPraveen Kaligineedi {
1246fd8e4032SPraveen Kaligineedi 	int i, tx_qid;
124775eaae15SPraveen Kaligineedi 
124875eaae15SPraveen Kaligineedi 	if (!priv->num_xdp_queues)
124975eaae15SPraveen Kaligineedi 		return;
125075eaae15SPraveen Kaligineedi 
125175eaae15SPraveen Kaligineedi 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
125275eaae15SPraveen Kaligineedi 		struct gve_rx_ring *rx = &priv->rx[i];
125375eaae15SPraveen Kaligineedi 
125475eaae15SPraveen Kaligineedi 		xdp_rxq_info_unreg(&rx->xdp_rxq);
1255fd8e4032SPraveen Kaligineedi 		if (rx->xsk_pool) {
1256fd8e4032SPraveen Kaligineedi 			xdp_rxq_info_unreg(&rx->xsk_rxq);
1257fd8e4032SPraveen Kaligineedi 			rx->xsk_pool = NULL;
1258fd8e4032SPraveen Kaligineedi 		}
1259fd8e4032SPraveen Kaligineedi 	}
1260fd8e4032SPraveen Kaligineedi 
1261fd8e4032SPraveen Kaligineedi 	for (i = 0; i < priv->num_xdp_queues; i++) {
1262fd8e4032SPraveen Kaligineedi 		tx_qid = gve_xdp_tx_queue_id(priv, i);
1263fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = NULL;
126475eaae15SPraveen Kaligineedi 	}
126575eaae15SPraveen Kaligineedi }
126675eaae15SPraveen Kaligineedi 
gve_drain_page_cache(struct gve_priv * priv)126739a7f4aaSPraveen Kaligineedi static void gve_drain_page_cache(struct gve_priv *priv)
126839a7f4aaSPraveen Kaligineedi {
126939a7f4aaSPraveen Kaligineedi 	int i;
127039a7f4aaSPraveen Kaligineedi 
1271a0727489SYunsheng Lin 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
1272a0727489SYunsheng Lin 		page_frag_cache_drain(&priv->rx[i].page_cache);
127339a7f4aaSPraveen Kaligineedi }
127439a7f4aaSPraveen Kaligineedi 
gve_rx_get_curr_alloc_cfg(struct gve_priv * priv,struct gve_rx_alloc_rings_cfg * cfg)1275f13697ccSShailend Chand static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
1276f13697ccSShailend Chand 				      struct gve_rx_alloc_rings_cfg *cfg)
1277f13697ccSShailend Chand {
1278f13697ccSShailend Chand 	cfg->qcfg = &priv->rx_cfg;
1279f13697ccSShailend Chand 	cfg->qcfg_tx = &priv->tx_cfg;
1280f13697ccSShailend Chand 	cfg->raw_addressing = !gve_is_qpl(priv);
12815e37d825SJeroen de Borst 	cfg->enable_header_split = priv->header_split_enabled;
1282f13697ccSShailend Chand 	cfg->ring_size = priv->rx_desc_cnt;
12835e37d825SJeroen de Borst 	cfg->packet_buffer_size = gve_is_gqi(priv) ?
12845e37d825SJeroen de Borst 				  GVE_DEFAULT_RX_BUFFER_SIZE :
12855e37d825SJeroen de Borst 				  priv->data_buffer_size_dqo;
1286f13697ccSShailend Chand 	cfg->rx = priv->rx;
1287f13697ccSShailend Chand }
1288f13697ccSShailend Chand 
gve_get_curr_alloc_cfgs(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1289834f9458SHarshitha Ramamurthy void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1290f13697ccSShailend Chand 			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1291f13697ccSShailend Chand 			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1292f13697ccSShailend Chand {
1293f13697ccSShailend Chand 	gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
1294f13697ccSShailend Chand 	gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
1295f13697ccSShailend Chand }
1296f13697ccSShailend Chand 
gve_rx_start_ring(struct gve_priv * priv,int i)1297c93462b9SShailend Chand static void gve_rx_start_ring(struct gve_priv *priv, int i)
1298f13697ccSShailend Chand {
1299f13697ccSShailend Chand 	if (gve_is_gqi(priv))
1300f13697ccSShailend Chand 		gve_rx_start_ring_gqi(priv, i);
1301f13697ccSShailend Chand 	else
1302f13697ccSShailend Chand 		gve_rx_start_ring_dqo(priv, i);
1303f13697ccSShailend Chand }
1304c93462b9SShailend Chand 
gve_rx_start_rings(struct gve_priv * priv,int num_rings)1305c93462b9SShailend Chand static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
1306c93462b9SShailend Chand {
1307c93462b9SShailend Chand 	int i;
1308c93462b9SShailend Chand 
1309c93462b9SShailend Chand 	for (i = 0; i < num_rings; i++)
1310c93462b9SShailend Chand 		gve_rx_start_ring(priv, i);
1311c93462b9SShailend Chand }
1312c93462b9SShailend Chand 
gve_rx_stop_ring(struct gve_priv * priv,int i)1313c93462b9SShailend Chand static void gve_rx_stop_ring(struct gve_priv *priv, int i)
1314c93462b9SShailend Chand {
1315c93462b9SShailend Chand 	if (gve_is_gqi(priv))
1316c93462b9SShailend Chand 		gve_rx_stop_ring_gqi(priv, i);
1317c93462b9SShailend Chand 	else
1318c93462b9SShailend Chand 		gve_rx_stop_ring_dqo(priv, i);
1319f13697ccSShailend Chand }
1320f13697ccSShailend Chand 
gve_rx_stop_rings(struct gve_priv * priv,int num_rings)1321f13697ccSShailend Chand static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1322f13697ccSShailend Chand {
1323f13697ccSShailend Chand 	int i;
1324f13697ccSShailend Chand 
1325f13697ccSShailend Chand 	if (!priv->rx)
1326f13697ccSShailend Chand 		return;
1327f13697ccSShailend Chand 
1328c93462b9SShailend Chand 	for (i = 0; i < num_rings; i++)
1329c93462b9SShailend Chand 		gve_rx_stop_ring(priv, i);
1330f13697ccSShailend Chand }
1331f13697ccSShailend Chand 
gve_queues_mem_remove(struct gve_priv * priv)133292a6d7a4SShailend Chand static void gve_queues_mem_remove(struct gve_priv *priv)
1333f5cedc84SCatherine Sullivan {
1334f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1335f13697ccSShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
133692a6d7a4SShailend Chand 
1337ee24284eSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1338ee24284eSShailend Chand 	gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
133992a6d7a4SShailend Chand 	priv->tx = NULL;
134092a6d7a4SShailend Chand 	priv->rx = NULL;
134192a6d7a4SShailend Chand }
134292a6d7a4SShailend Chand 
134392a6d7a4SShailend Chand /* The passed-in queue memory is stored into priv and the queues are made live.
134492a6d7a4SShailend Chand  * No memory is allocated. Passed-in memory is freed on errors.
134592a6d7a4SShailend Chand  */
gve_queues_start(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)134692a6d7a4SShailend Chand static int gve_queues_start(struct gve_priv *priv,
134792a6d7a4SShailend Chand 			    struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
134892a6d7a4SShailend Chand 			    struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
134992a6d7a4SShailend Chand {
135092a6d7a4SShailend Chand 	struct net_device *dev = priv->dev;
1351f5cedc84SCatherine Sullivan 	int err;
1352f5cedc84SCatherine Sullivan 
135392a6d7a4SShailend Chand 	/* Record new resources into priv */
135492a6d7a4SShailend Chand 	priv->tx = tx_alloc_cfg->tx;
135592a6d7a4SShailend Chand 	priv->rx = rx_alloc_cfg->rx;
135692a6d7a4SShailend Chand 
135792a6d7a4SShailend Chand 	/* Record new configs into priv */
135892a6d7a4SShailend Chand 	priv->tx_cfg = *tx_alloc_cfg->qcfg;
135992a6d7a4SShailend Chand 	priv->rx_cfg = *rx_alloc_cfg->qcfg;
136092a6d7a4SShailend Chand 	priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
136192a6d7a4SShailend Chand 	priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
136292a6d7a4SShailend Chand 
136375eaae15SPraveen Kaligineedi 	if (priv->xdp_prog)
136475eaae15SPraveen Kaligineedi 		priv->num_xdp_queues = priv->rx_cfg.num_queues;
136575eaae15SPraveen Kaligineedi 	else
136675eaae15SPraveen Kaligineedi 		priv->num_xdp_queues = 0;
136775eaae15SPraveen Kaligineedi 
136892a6d7a4SShailend Chand 	gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
136992a6d7a4SShailend Chand 	gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
1370f13697ccSShailend Chand 	gve_init_sync_stats(priv);
1371f13697ccSShailend Chand 
1372f5cedc84SCatherine Sullivan 	err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
1373f5cedc84SCatherine Sullivan 	if (err)
137492a6d7a4SShailend Chand 		goto stop_and_free_rings;
1375f5cedc84SCatherine Sullivan 	err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
1376f5cedc84SCatherine Sullivan 	if (err)
137792a6d7a4SShailend Chand 		goto stop_and_free_rings;
1378f5cedc84SCatherine Sullivan 
137975eaae15SPraveen Kaligineedi 	err = gve_reg_xdp_info(priv, dev);
138075eaae15SPraveen Kaligineedi 	if (err)
138192a6d7a4SShailend Chand 		goto stop_and_free_rings;
138275eaae15SPraveen Kaligineedi 
1383f5cedc84SCatherine Sullivan 	err = gve_register_qpls(priv);
1384f5cedc84SCatherine Sullivan 	if (err)
13859e5f7d26SCatherine Sullivan 		goto reset;
13865e8c5adfSBailey Forrest 
13875e37d825SJeroen de Borst 	priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
13885e37d825SJeroen de Borst 	priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
13895e37d825SJeroen de Borst 
1390f5cedc84SCatherine Sullivan 	err = gve_create_rings(priv);
1391f5cedc84SCatherine Sullivan 	if (err)
13929e5f7d26SCatherine Sullivan 		goto reset;
13935e8c5adfSBailey Forrest 
1394f5cedc84SCatherine Sullivan 	gve_set_device_rings_ok(priv);
1395f5cedc84SCatherine Sullivan 
139624aeb56fSKuo Zhao 	if (gve_get_report_stats(priv))
139724aeb56fSKuo Zhao 		mod_timer(&priv->stats_report_timer,
139824aeb56fSKuo Zhao 			  round_jiffies(jiffies +
139924aeb56fSKuo Zhao 				msecs_to_jiffies(priv->stats_report_timer_period)));
140024aeb56fSKuo Zhao 
1401f5cedc84SCatherine Sullivan 	gve_turnup(priv);
14023b7cc736SPatricio Noyola 	queue_work(priv->gve_wq, &priv->service_task);
1403433e274bSKuo Zhao 	priv->interface_up_cnt++;
1404f5cedc84SCatherine Sullivan 	return 0;
1405f5cedc84SCatherine Sullivan 
14069e5f7d26SCatherine Sullivan reset:
14079e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
140892a6d7a4SShailend Chand 		goto stop_and_free_rings;
14099e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
14109e5f7d26SCatherine Sullivan 	/* if this fails there is nothing we can do so just ignore the return */
14119e5f7d26SCatherine Sullivan 	gve_reset_recovery(priv, false);
14129e5f7d26SCatherine Sullivan 	/* return the original error */
14139e5f7d26SCatherine Sullivan 	return err;
141492a6d7a4SShailend Chand stop_and_free_rings:
141592a6d7a4SShailend Chand 	gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
141692a6d7a4SShailend Chand 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
141792a6d7a4SShailend Chand 	gve_queues_mem_remove(priv);
141892a6d7a4SShailend Chand 	return err;
1419f5cedc84SCatherine Sullivan }
1420f5cedc84SCatherine Sullivan 
gve_open(struct net_device * dev)142192a6d7a4SShailend Chand static int gve_open(struct net_device *dev)
1422f5cedc84SCatherine Sullivan {
1423f13697ccSShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
1424f13697ccSShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1425f5cedc84SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(dev);
1426f5cedc84SCatherine Sullivan 	int err;
1427f5cedc84SCatherine Sullivan 
1428ee24284eSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
142992a6d7a4SShailend Chand 
1430ee24284eSShailend Chand 	err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
143192a6d7a4SShailend Chand 	if (err)
143292a6d7a4SShailend Chand 		return err;
143392a6d7a4SShailend Chand 
143492a6d7a4SShailend Chand 	/* No need to free on error: ownership of resources is lost after
143592a6d7a4SShailend Chand 	 * calling gve_queues_start.
143692a6d7a4SShailend Chand 	 */
1437ee24284eSShailend Chand 	err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
143892a6d7a4SShailend Chand 	if (err)
143992a6d7a4SShailend Chand 		return err;
144092a6d7a4SShailend Chand 
144192a6d7a4SShailend Chand 	return 0;
144292a6d7a4SShailend Chand }
144392a6d7a4SShailend Chand 
gve_queues_stop(struct gve_priv * priv)144492a6d7a4SShailend Chand static int gve_queues_stop(struct gve_priv *priv)
144592a6d7a4SShailend Chand {
144692a6d7a4SShailend Chand 	int err;
144792a6d7a4SShailend Chand 
144892a6d7a4SShailend Chand 	netif_carrier_off(priv->dev);
1449f5cedc84SCatherine Sullivan 	if (gve_get_device_rings_ok(priv)) {
1450f5cedc84SCatherine Sullivan 		gve_turndown(priv);
145139a7f4aaSPraveen Kaligineedi 		gve_drain_page_cache(priv);
1452f5cedc84SCatherine Sullivan 		err = gve_destroy_rings(priv);
1453f5cedc84SCatherine Sullivan 		if (err)
14549e5f7d26SCatherine Sullivan 			goto err;
1455f5cedc84SCatherine Sullivan 		err = gve_unregister_qpls(priv);
1456f5cedc84SCatherine Sullivan 		if (err)
14579e5f7d26SCatherine Sullivan 			goto err;
1458f5cedc84SCatherine Sullivan 		gve_clear_device_rings_ok(priv);
1459f5cedc84SCatherine Sullivan 	}
146024aeb56fSKuo Zhao 	del_timer_sync(&priv->stats_report_timer);
1461f5cedc84SCatherine Sullivan 
146275eaae15SPraveen Kaligineedi 	gve_unreg_xdp_info(priv);
1463f13697ccSShailend Chand 
146492a6d7a4SShailend Chand 	gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
146592a6d7a4SShailend Chand 	gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1466f13697ccSShailend Chand 
1467433e274bSKuo Zhao 	priv->interface_down_cnt++;
1468f5cedc84SCatherine Sullivan 	return 0;
14699e5f7d26SCatherine Sullivan 
14709e5f7d26SCatherine Sullivan err:
14719e5f7d26SCatherine Sullivan 	/* This must have been called from a reset due to the rtnl lock
14729e5f7d26SCatherine Sullivan 	 * so just return at this point.
14739e5f7d26SCatherine Sullivan 	 */
14749e5f7d26SCatherine Sullivan 	if (gve_get_reset_in_progress(priv))
14759e5f7d26SCatherine Sullivan 		return err;
14769e5f7d26SCatherine Sullivan 	/* Otherwise reset before returning */
14779e5f7d26SCatherine Sullivan 	gve_reset_and_teardown(priv, true);
14789e5f7d26SCatherine Sullivan 	return gve_reset_recovery(priv, false);
1479f5cedc84SCatherine Sullivan }
1480f5cedc84SCatherine Sullivan 
gve_close(struct net_device * dev)148192a6d7a4SShailend Chand static int gve_close(struct net_device *dev)
148292a6d7a4SShailend Chand {
148392a6d7a4SShailend Chand 	struct gve_priv *priv = netdev_priv(dev);
148492a6d7a4SShailend Chand 	int err;
148592a6d7a4SShailend Chand 
148692a6d7a4SShailend Chand 	err = gve_queues_stop(priv);
148792a6d7a4SShailend Chand 	if (err)
148892a6d7a4SShailend Chand 		return err;
148992a6d7a4SShailend Chand 
149092a6d7a4SShailend Chand 	gve_queues_mem_remove(priv);
149192a6d7a4SShailend Chand 	return 0;
149292a6d7a4SShailend Chand }
149392a6d7a4SShailend Chand 
gve_remove_xdp_queues(struct gve_priv * priv)149475eaae15SPraveen Kaligineedi static int gve_remove_xdp_queues(struct gve_priv *priv)
149575eaae15SPraveen Kaligineedi {
149675eaae15SPraveen Kaligineedi 	int err;
149775eaae15SPraveen Kaligineedi 
149875eaae15SPraveen Kaligineedi 	err = gve_destroy_xdp_rings(priv);
149975eaae15SPraveen Kaligineedi 	if (err)
150075eaae15SPraveen Kaligineedi 		return err;
150175eaae15SPraveen Kaligineedi 
150275eaae15SPraveen Kaligineedi 	err = gve_unregister_xdp_qpls(priv);
150375eaae15SPraveen Kaligineedi 	if (err)
150475eaae15SPraveen Kaligineedi 		return err;
150575eaae15SPraveen Kaligineedi 
150675eaae15SPraveen Kaligineedi 	gve_unreg_xdp_info(priv);
150775eaae15SPraveen Kaligineedi 	gve_free_xdp_rings(priv);
1508f13697ccSShailend Chand 
150975eaae15SPraveen Kaligineedi 	priv->num_xdp_queues = 0;
151075eaae15SPraveen Kaligineedi 	return 0;
151175eaae15SPraveen Kaligineedi }
151275eaae15SPraveen Kaligineedi 
gve_add_xdp_queues(struct gve_priv * priv)151375eaae15SPraveen Kaligineedi static int gve_add_xdp_queues(struct gve_priv *priv)
151475eaae15SPraveen Kaligineedi {
151575eaae15SPraveen Kaligineedi 	int err;
151675eaae15SPraveen Kaligineedi 
1517f13697ccSShailend Chand 	priv->num_xdp_queues = priv->rx_cfg.num_queues;
151875eaae15SPraveen Kaligineedi 
151975eaae15SPraveen Kaligineedi 	err = gve_alloc_xdp_rings(priv);
152075eaae15SPraveen Kaligineedi 	if (err)
1521ee24284eSShailend Chand 		goto err;
152275eaae15SPraveen Kaligineedi 
152375eaae15SPraveen Kaligineedi 	err = gve_reg_xdp_info(priv, priv->dev);
152475eaae15SPraveen Kaligineedi 	if (err)
152575eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
152675eaae15SPraveen Kaligineedi 
152775eaae15SPraveen Kaligineedi 	err = gve_register_xdp_qpls(priv);
152875eaae15SPraveen Kaligineedi 	if (err)
152975eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
153075eaae15SPraveen Kaligineedi 
153175eaae15SPraveen Kaligineedi 	err = gve_create_xdp_rings(priv);
153275eaae15SPraveen Kaligineedi 	if (err)
153375eaae15SPraveen Kaligineedi 		goto free_xdp_rings;
153475eaae15SPraveen Kaligineedi 
153575eaae15SPraveen Kaligineedi 	return 0;
153675eaae15SPraveen Kaligineedi 
153775eaae15SPraveen Kaligineedi free_xdp_rings:
153875eaae15SPraveen Kaligineedi 	gve_free_xdp_rings(priv);
153975eaae15SPraveen Kaligineedi err:
154075eaae15SPraveen Kaligineedi 	priv->num_xdp_queues = 0;
154175eaae15SPraveen Kaligineedi 	return err;
154275eaae15SPraveen Kaligineedi }
154375eaae15SPraveen Kaligineedi 
gve_handle_link_status(struct gve_priv * priv,bool link_status)154475eaae15SPraveen Kaligineedi static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
154575eaae15SPraveen Kaligineedi {
154675eaae15SPraveen Kaligineedi 	if (!gve_get_napi_enabled(priv))
154775eaae15SPraveen Kaligineedi 		return;
154875eaae15SPraveen Kaligineedi 
154975eaae15SPraveen Kaligineedi 	if (link_status == netif_carrier_ok(priv->dev))
155075eaae15SPraveen Kaligineedi 		return;
155175eaae15SPraveen Kaligineedi 
155275eaae15SPraveen Kaligineedi 	if (link_status) {
155375eaae15SPraveen Kaligineedi 		netdev_info(priv->dev, "Device link is up.\n");
155475eaae15SPraveen Kaligineedi 		netif_carrier_on(priv->dev);
155575eaae15SPraveen Kaligineedi 	} else {
155675eaae15SPraveen Kaligineedi 		netdev_info(priv->dev, "Device link is down.\n");
155775eaae15SPraveen Kaligineedi 		netif_carrier_off(priv->dev);
155875eaae15SPraveen Kaligineedi 	}
155975eaae15SPraveen Kaligineedi }
156075eaae15SPraveen Kaligineedi 
gve_set_xdp(struct gve_priv * priv,struct bpf_prog * prog,struct netlink_ext_ack * extack)156175eaae15SPraveen Kaligineedi static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
156275eaae15SPraveen Kaligineedi 		       struct netlink_ext_ack *extack)
156375eaae15SPraveen Kaligineedi {
156475eaae15SPraveen Kaligineedi 	struct bpf_prog *old_prog;
156575eaae15SPraveen Kaligineedi 	int err = 0;
156675eaae15SPraveen Kaligineedi 	u32 status;
156775eaae15SPraveen Kaligineedi 
156875eaae15SPraveen Kaligineedi 	old_prog = READ_ONCE(priv->xdp_prog);
1569*fba917b1SPraveen Kaligineedi 	if (!netif_running(priv->dev)) {
157075eaae15SPraveen Kaligineedi 		WRITE_ONCE(priv->xdp_prog, prog);
157175eaae15SPraveen Kaligineedi 		if (old_prog)
157275eaae15SPraveen Kaligineedi 			bpf_prog_put(old_prog);
157375eaae15SPraveen Kaligineedi 		return 0;
157475eaae15SPraveen Kaligineedi 	}
157575eaae15SPraveen Kaligineedi 
157675eaae15SPraveen Kaligineedi 	gve_turndown(priv);
157775eaae15SPraveen Kaligineedi 	if (!old_prog && prog) {
157875eaae15SPraveen Kaligineedi 		// Allocate XDP TX queues if an XDP program is
157975eaae15SPraveen Kaligineedi 		// being installed
158075eaae15SPraveen Kaligineedi 		err = gve_add_xdp_queues(priv);
158175eaae15SPraveen Kaligineedi 		if (err)
158275eaae15SPraveen Kaligineedi 			goto out;
158375eaae15SPraveen Kaligineedi 	} else if (old_prog && !prog) {
158475eaae15SPraveen Kaligineedi 		// Remove XDP TX queues if an XDP program is
158575eaae15SPraveen Kaligineedi 		// being uninstalled
158675eaae15SPraveen Kaligineedi 		err = gve_remove_xdp_queues(priv);
158775eaae15SPraveen Kaligineedi 		if (err)
158875eaae15SPraveen Kaligineedi 			goto out;
158975eaae15SPraveen Kaligineedi 	}
159075eaae15SPraveen Kaligineedi 	WRITE_ONCE(priv->xdp_prog, prog);
159175eaae15SPraveen Kaligineedi 	if (old_prog)
159275eaae15SPraveen Kaligineedi 		bpf_prog_put(old_prog);
159375eaae15SPraveen Kaligineedi 
159475eaae15SPraveen Kaligineedi out:
159575eaae15SPraveen Kaligineedi 	gve_turnup(priv);
159675eaae15SPraveen Kaligineedi 	status = ioread32be(&priv->reg_bar0->device_status);
159775eaae15SPraveen Kaligineedi 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
159875eaae15SPraveen Kaligineedi 	return err;
159975eaae15SPraveen Kaligineedi }
160075eaae15SPraveen Kaligineedi 
gve_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)1601fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_enable(struct net_device *dev,
1602fd8e4032SPraveen Kaligineedi 			       struct xsk_buff_pool *pool,
1603fd8e4032SPraveen Kaligineedi 			       u16 qid)
1604fd8e4032SPraveen Kaligineedi {
1605fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1606fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi;
1607fd8e4032SPraveen Kaligineedi 	struct gve_rx_ring *rx;
1608fd8e4032SPraveen Kaligineedi 	int tx_qid;
1609fd8e4032SPraveen Kaligineedi 	int err;
1610fd8e4032SPraveen Kaligineedi 
1611fd8e4032SPraveen Kaligineedi 	if (qid >= priv->rx_cfg.num_queues) {
1612fd8e4032SPraveen Kaligineedi 		dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
1613fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1614fd8e4032SPraveen Kaligineedi 	}
1615fd8e4032SPraveen Kaligineedi 	if (xsk_pool_get_rx_frame_size(pool) <
1616fd8e4032SPraveen Kaligineedi 	     priv->dev->max_mtu + sizeof(struct ethhdr)) {
1617fd8e4032SPraveen Kaligineedi 		dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
1618fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1619fd8e4032SPraveen Kaligineedi 	}
1620fd8e4032SPraveen Kaligineedi 
1621fd8e4032SPraveen Kaligineedi 	err = xsk_pool_dma_map(pool, &priv->pdev->dev,
1622fd8e4032SPraveen Kaligineedi 			       DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1623fd8e4032SPraveen Kaligineedi 	if (err)
1624fd8e4032SPraveen Kaligineedi 		return err;
1625fd8e4032SPraveen Kaligineedi 
1626fd8e4032SPraveen Kaligineedi 	/* If XDP prog is not installed, return */
1627fd8e4032SPraveen Kaligineedi 	if (!priv->xdp_prog)
1628fd8e4032SPraveen Kaligineedi 		return 0;
1629fd8e4032SPraveen Kaligineedi 
1630fd8e4032SPraveen Kaligineedi 	rx = &priv->rx[qid];
1631fd8e4032SPraveen Kaligineedi 	napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1632fd8e4032SPraveen Kaligineedi 	err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
1633fd8e4032SPraveen Kaligineedi 	if (err)
1634fd8e4032SPraveen Kaligineedi 		goto err;
1635fd8e4032SPraveen Kaligineedi 
1636fd8e4032SPraveen Kaligineedi 	err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
1637fd8e4032SPraveen Kaligineedi 					 MEM_TYPE_XSK_BUFF_POOL, NULL);
1638fd8e4032SPraveen Kaligineedi 	if (err)
1639fd8e4032SPraveen Kaligineedi 		goto err;
1640fd8e4032SPraveen Kaligineedi 
1641fd8e4032SPraveen Kaligineedi 	xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
1642fd8e4032SPraveen Kaligineedi 	rx->xsk_pool = pool;
1643fd8e4032SPraveen Kaligineedi 
1644fd8e4032SPraveen Kaligineedi 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1645fd8e4032SPraveen Kaligineedi 	priv->tx[tx_qid].xsk_pool = pool;
1646fd8e4032SPraveen Kaligineedi 
1647fd8e4032SPraveen Kaligineedi 	return 0;
1648fd8e4032SPraveen Kaligineedi err:
1649fd8e4032SPraveen Kaligineedi 	if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
1650fd8e4032SPraveen Kaligineedi 		xdp_rxq_info_unreg(&rx->xsk_rxq);
1651fd8e4032SPraveen Kaligineedi 
1652fd8e4032SPraveen Kaligineedi 	xsk_pool_dma_unmap(pool,
1653fd8e4032SPraveen Kaligineedi 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1654fd8e4032SPraveen Kaligineedi 	return err;
1655fd8e4032SPraveen Kaligineedi }
1656fd8e4032SPraveen Kaligineedi 
gve_xsk_pool_disable(struct net_device * dev,u16 qid)1657fd8e4032SPraveen Kaligineedi static int gve_xsk_pool_disable(struct net_device *dev,
1658fd8e4032SPraveen Kaligineedi 				u16 qid)
1659fd8e4032SPraveen Kaligineedi {
1660fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1661fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi_rx;
1662fd8e4032SPraveen Kaligineedi 	struct napi_struct *napi_tx;
1663fd8e4032SPraveen Kaligineedi 	struct xsk_buff_pool *pool;
1664fd8e4032SPraveen Kaligineedi 	int tx_qid;
1665fd8e4032SPraveen Kaligineedi 
1666fd8e4032SPraveen Kaligineedi 	pool = xsk_get_pool_from_qid(dev, qid);
1667fd8e4032SPraveen Kaligineedi 	if (!pool)
1668fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1669fd8e4032SPraveen Kaligineedi 	if (qid >= priv->rx_cfg.num_queues)
1670fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1671fd8e4032SPraveen Kaligineedi 
1672fd8e4032SPraveen Kaligineedi 	/* If XDP prog is not installed, unmap DMA and return */
1673fd8e4032SPraveen Kaligineedi 	if (!priv->xdp_prog)
1674fd8e4032SPraveen Kaligineedi 		goto done;
1675fd8e4032SPraveen Kaligineedi 
1676fd8e4032SPraveen Kaligineedi 	tx_qid = gve_xdp_tx_queue_id(priv, qid);
1677fd8e4032SPraveen Kaligineedi 	if (!netif_running(dev)) {
1678fd8e4032SPraveen Kaligineedi 		priv->rx[qid].xsk_pool = NULL;
1679fd8e4032SPraveen Kaligineedi 		xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1680fd8e4032SPraveen Kaligineedi 		priv->tx[tx_qid].xsk_pool = NULL;
1681fd8e4032SPraveen Kaligineedi 		goto done;
1682fd8e4032SPraveen Kaligineedi 	}
1683fd8e4032SPraveen Kaligineedi 
1684fd8e4032SPraveen Kaligineedi 	napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
1685fd8e4032SPraveen Kaligineedi 	napi_disable(napi_rx); /* make sure current rx poll is done */
1686fd8e4032SPraveen Kaligineedi 
1687fd8e4032SPraveen Kaligineedi 	napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
1688fd8e4032SPraveen Kaligineedi 	napi_disable(napi_tx); /* make sure current tx poll is done */
1689fd8e4032SPraveen Kaligineedi 
1690fd8e4032SPraveen Kaligineedi 	priv->rx[qid].xsk_pool = NULL;
1691fd8e4032SPraveen Kaligineedi 	xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
1692fd8e4032SPraveen Kaligineedi 	priv->tx[tx_qid].xsk_pool = NULL;
1693fd8e4032SPraveen Kaligineedi 	smp_mb(); /* Make sure it is visible to the workers on datapath */
1694fd8e4032SPraveen Kaligineedi 
1695fd8e4032SPraveen Kaligineedi 	napi_enable(napi_rx);
1696fd8e4032SPraveen Kaligineedi 	if (gve_rx_work_pending(&priv->rx[qid]))
1697fd8e4032SPraveen Kaligineedi 		napi_schedule(napi_rx);
1698fd8e4032SPraveen Kaligineedi 
1699fd8e4032SPraveen Kaligineedi 	napi_enable(napi_tx);
1700fd8e4032SPraveen Kaligineedi 	if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
1701fd8e4032SPraveen Kaligineedi 		napi_schedule(napi_tx);
1702fd8e4032SPraveen Kaligineedi 
1703fd8e4032SPraveen Kaligineedi done:
1704fd8e4032SPraveen Kaligineedi 	xsk_pool_dma_unmap(pool,
1705fd8e4032SPraveen Kaligineedi 			   DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
1706fd8e4032SPraveen Kaligineedi 	return 0;
1707fd8e4032SPraveen Kaligineedi }
1708fd8e4032SPraveen Kaligineedi 
gve_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)1709fd8e4032SPraveen Kaligineedi static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
1710fd8e4032SPraveen Kaligineedi {
1711fd8e4032SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
1712fd8e4032SPraveen Kaligineedi 	int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
1713fd8e4032SPraveen Kaligineedi 
1714fd8e4032SPraveen Kaligineedi 	if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
1715fd8e4032SPraveen Kaligineedi 		return -EINVAL;
1716fd8e4032SPraveen Kaligineedi 
1717fd8e4032SPraveen Kaligineedi 	if (flags & XDP_WAKEUP_TX) {
1718fd8e4032SPraveen Kaligineedi 		struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
1719fd8e4032SPraveen Kaligineedi 		struct napi_struct *napi =
1720fd8e4032SPraveen Kaligineedi 			&priv->ntfy_blocks[tx->ntfy_id].napi;
1721fd8e4032SPraveen Kaligineedi 
1722fd8e4032SPraveen Kaligineedi 		if (!napi_if_scheduled_mark_missed(napi)) {
1723fd8e4032SPraveen Kaligineedi 			/* Call local_bh_enable to trigger SoftIRQ processing */
1724fd8e4032SPraveen Kaligineedi 			local_bh_disable();
1725fd8e4032SPraveen Kaligineedi 			napi_schedule(napi);
1726fd8e4032SPraveen Kaligineedi 			local_bh_enable();
1727fd8e4032SPraveen Kaligineedi 		}
1728fd8e4032SPraveen Kaligineedi 
1729fd8e4032SPraveen Kaligineedi 		tx->xdp_xsk_wakeup++;
1730fd8e4032SPraveen Kaligineedi 	}
1731fd8e4032SPraveen Kaligineedi 
1732fd8e4032SPraveen Kaligineedi 	return 0;
1733fd8e4032SPraveen Kaligineedi }
1734fd8e4032SPraveen Kaligineedi 
verify_xdp_configuration(struct net_device * dev)173575eaae15SPraveen Kaligineedi static int verify_xdp_configuration(struct net_device *dev)
173675eaae15SPraveen Kaligineedi {
173775eaae15SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
173875eaae15SPraveen Kaligineedi 
173975eaae15SPraveen Kaligineedi 	if (dev->features & NETIF_F_LRO) {
174075eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported when LRO is on.\n");
174175eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
174275eaae15SPraveen Kaligineedi 	}
174375eaae15SPraveen Kaligineedi 
174475eaae15SPraveen Kaligineedi 	if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
174575eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported in mode %d.\n",
174675eaae15SPraveen Kaligineedi 			    priv->queue_format);
174775eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
174875eaae15SPraveen Kaligineedi 	}
174975eaae15SPraveen Kaligineedi 
1750da7d4b42SJohn Fraker 	if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
175175eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP is not supported for mtu %d.\n",
175275eaae15SPraveen Kaligineedi 			    dev->mtu);
175375eaae15SPraveen Kaligineedi 		return -EOPNOTSUPP;
175475eaae15SPraveen Kaligineedi 	}
175575eaae15SPraveen Kaligineedi 
175675eaae15SPraveen Kaligineedi 	if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
175775eaae15SPraveen Kaligineedi 	    (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
175875eaae15SPraveen Kaligineedi 		netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
175975eaae15SPraveen Kaligineedi 			    priv->rx_cfg.num_queues,
176075eaae15SPraveen Kaligineedi 			    priv->tx_cfg.num_queues,
176175eaae15SPraveen Kaligineedi 			    priv->tx_cfg.max_queues);
176275eaae15SPraveen Kaligineedi 		return -EINVAL;
176375eaae15SPraveen Kaligineedi 	}
176475eaae15SPraveen Kaligineedi 	return 0;
176575eaae15SPraveen Kaligineedi }
176675eaae15SPraveen Kaligineedi 
gve_xdp(struct net_device * dev,struct netdev_bpf * xdp)176775eaae15SPraveen Kaligineedi static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
176875eaae15SPraveen Kaligineedi {
176975eaae15SPraveen Kaligineedi 	struct gve_priv *priv = netdev_priv(dev);
177075eaae15SPraveen Kaligineedi 	int err;
177175eaae15SPraveen Kaligineedi 
177275eaae15SPraveen Kaligineedi 	err = verify_xdp_configuration(dev);
177375eaae15SPraveen Kaligineedi 	if (err)
177475eaae15SPraveen Kaligineedi 		return err;
177575eaae15SPraveen Kaligineedi 	switch (xdp->command) {
177675eaae15SPraveen Kaligineedi 	case XDP_SETUP_PROG:
177775eaae15SPraveen Kaligineedi 		return gve_set_xdp(priv, xdp->prog, xdp->extack);
1778fd8e4032SPraveen Kaligineedi 	case XDP_SETUP_XSK_POOL:
1779fd8e4032SPraveen Kaligineedi 		if (xdp->xsk.pool)
1780fd8e4032SPraveen Kaligineedi 			return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
1781fd8e4032SPraveen Kaligineedi 		else
1782fd8e4032SPraveen Kaligineedi 			return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
178375eaae15SPraveen Kaligineedi 	default:
178475eaae15SPraveen Kaligineedi 		return -EINVAL;
178575eaae15SPraveen Kaligineedi 	}
178675eaae15SPraveen Kaligineedi }
178775eaae15SPraveen Kaligineedi 
gve_flow_rules_reset(struct gve_priv * priv)17886f3bc487SJeroen de Borst int gve_flow_rules_reset(struct gve_priv *priv)
17896f3bc487SJeroen de Borst {
17906f3bc487SJeroen de Borst 	if (!priv->max_flow_rules)
17916f3bc487SJeroen de Borst 		return 0;
17926f3bc487SJeroen de Borst 
17936f3bc487SJeroen de Borst 	return gve_adminq_reset_flow_rules(priv);
17946f3bc487SJeroen de Borst }
17956f3bc487SJeroen de Borst 
gve_adjust_config(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * tx_alloc_cfg,struct gve_rx_alloc_rings_cfg * rx_alloc_cfg)1796834f9458SHarshitha Ramamurthy int gve_adjust_config(struct gve_priv *priv,
17975f08cd3dSShailend Chand 		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
17985f08cd3dSShailend Chand 		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
17995f08cd3dSShailend Chand {
18005f08cd3dSShailend Chand 	int err;
18015f08cd3dSShailend Chand 
18025f08cd3dSShailend Chand 	/* Allocate resources for the new confiugration */
1803ee24284eSShailend Chand 	err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
18045f08cd3dSShailend Chand 	if (err) {
18055f08cd3dSShailend Chand 		netif_err(priv, drv, priv->dev,
18065f08cd3dSShailend Chand 			  "Adjust config failed to alloc new queues");
18075f08cd3dSShailend Chand 		return err;
18085f08cd3dSShailend Chand 	}
18095f08cd3dSShailend Chand 
18105f08cd3dSShailend Chand 	/* Teardown the device and free existing resources */
18115f08cd3dSShailend Chand 	err = gve_close(priv->dev);
18125f08cd3dSShailend Chand 	if (err) {
18135f08cd3dSShailend Chand 		netif_err(priv, drv, priv->dev,
18145f08cd3dSShailend Chand 			  "Adjust config failed to close old queues");
1815ee24284eSShailend Chand 		gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
18165f08cd3dSShailend Chand 		return err;
18175f08cd3dSShailend Chand 	}
18185f08cd3dSShailend Chand 
18195f08cd3dSShailend Chand 	/* Bring the device back up again with the new resources. */
1820ee24284eSShailend Chand 	err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
18215f08cd3dSShailend Chand 	if (err) {
18225f08cd3dSShailend Chand 		netif_err(priv, drv, priv->dev,
18235f08cd3dSShailend Chand 			  "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
18245f08cd3dSShailend Chand 		/* No need to free on error: ownership of resources is lost after
18255f08cd3dSShailend Chand 		 * calling gve_queues_start.
18265f08cd3dSShailend Chand 		 */
18275f08cd3dSShailend Chand 		gve_turndown(priv);
18285f08cd3dSShailend Chand 		return err;
18295f08cd3dSShailend Chand 	}
18305f08cd3dSShailend Chand 
18315f08cd3dSShailend Chand 	return 0;
18325f08cd3dSShailend Chand }
18335f08cd3dSShailend Chand 
gve_adjust_queues(struct gve_priv * priv,struct gve_queue_config new_rx_config,struct gve_queue_config new_tx_config)1834e5b845dcSCatherine Sullivan int gve_adjust_queues(struct gve_priv *priv,
1835e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_rx_config,
1836e5b845dcSCatherine Sullivan 		      struct gve_queue_config new_tx_config)
1837e5b845dcSCatherine Sullivan {
18385f08cd3dSShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
18395f08cd3dSShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
1840e5b845dcSCatherine Sullivan 	int err;
1841e5b845dcSCatherine Sullivan 
1842ee24284eSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
18435f08cd3dSShailend Chand 
18445f08cd3dSShailend Chand 	/* Relay the new config from ethtool */
18455f08cd3dSShailend Chand 	tx_alloc_cfg.qcfg = &new_tx_config;
18465f08cd3dSShailend Chand 	rx_alloc_cfg.qcfg_tx = &new_tx_config;
18475f08cd3dSShailend Chand 	rx_alloc_cfg.qcfg = &new_rx_config;
18485f08cd3dSShailend Chand 	tx_alloc_cfg.num_rings = new_tx_config.num_queues;
18495f08cd3dSShailend Chand 
1850*fba917b1SPraveen Kaligineedi 	if (netif_running(priv->dev)) {
1851ee24284eSShailend Chand 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1852e5b845dcSCatherine Sullivan 		return err;
1853e5b845dcSCatherine Sullivan 	}
1854e5b845dcSCatherine Sullivan 	/* Set the config for the next up. */
1855e5b845dcSCatherine Sullivan 	priv->tx_cfg = new_tx_config;
1856e5b845dcSCatherine Sullivan 	priv->rx_cfg = new_rx_config;
1857e5b845dcSCatherine Sullivan 
1858e5b845dcSCatherine Sullivan 	return 0;
1859e5b845dcSCatherine Sullivan }
1860e5b845dcSCatherine Sullivan 
gve_turndown(struct gve_priv * priv)1861f5cedc84SCatherine Sullivan static void gve_turndown(struct gve_priv *priv)
1862f5cedc84SCatherine Sullivan {
1863f5cedc84SCatherine Sullivan 	int idx;
1864f5cedc84SCatherine Sullivan 
1865f5cedc84SCatherine Sullivan 	if (netif_carrier_ok(priv->dev))
1866f5cedc84SCatherine Sullivan 		netif_carrier_off(priv->dev);
1867f5cedc84SCatherine Sullivan 
1868f5cedc84SCatherine Sullivan 	if (!gve_get_napi_enabled(priv))
1869f5cedc84SCatherine Sullivan 		return;
1870f5cedc84SCatherine Sullivan 
1871f5cedc84SCatherine Sullivan 	/* Disable napi to prevent more work from coming in */
18722e80aeaeSPraveen Kaligineedi 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1873f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1874f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1875f5cedc84SCatherine Sullivan 
18765abc37bdSShailend Chand 		if (!gve_tx_was_added_to_block(priv, idx))
18775abc37bdSShailend Chand 			continue;
1878f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
1879f5cedc84SCatherine Sullivan 	}
1880f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1881f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1882f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1883f5cedc84SCatherine Sullivan 
18845abc37bdSShailend Chand 		if (!gve_rx_was_added_to_block(priv, idx))
18855abc37bdSShailend Chand 			continue;
1886f5cedc84SCatherine Sullivan 		napi_disable(&block->napi);
1887f5cedc84SCatherine Sullivan 	}
1888f5cedc84SCatherine Sullivan 
1889f5cedc84SCatherine Sullivan 	/* Stop tx queues */
1890f5cedc84SCatherine Sullivan 	netif_tx_disable(priv->dev);
1891f5cedc84SCatherine Sullivan 
1892f5cedc84SCatherine Sullivan 	gve_clear_napi_enabled(priv);
189324aeb56fSKuo Zhao 	gve_clear_report_stats(priv);
1894f5cedc84SCatherine Sullivan }
1895f5cedc84SCatherine Sullivan 
gve_turnup(struct gve_priv * priv)1896f5cedc84SCatherine Sullivan static void gve_turnup(struct gve_priv *priv)
1897f5cedc84SCatherine Sullivan {
1898f5cedc84SCatherine Sullivan 	int idx;
1899f5cedc84SCatherine Sullivan 
1900f5cedc84SCatherine Sullivan 	/* Start the tx queues */
1901f5cedc84SCatherine Sullivan 	netif_tx_start_all_queues(priv->dev);
1902f5cedc84SCatherine Sullivan 
1903f5cedc84SCatherine Sullivan 	/* Enable napi and unmask interrupts for all queues */
19042e80aeaeSPraveen Kaligineedi 	for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
1905f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1906f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1907f5cedc84SCatherine Sullivan 
19085abc37bdSShailend Chand 		if (!gve_tx_was_added_to_block(priv, idx))
19095abc37bdSShailend Chand 			continue;
19105abc37bdSShailend Chand 
1911f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
19120dcc144aSBailey Forrest 		if (gve_is_gqi(priv)) {
1913f5cedc84SCatherine Sullivan 			iowrite32be(0, gve_irq_doorbell(priv, block));
19140dcc144aSBailey Forrest 		} else {
19156081ac20STao Liu 			gve_set_itr_coalesce_usecs_dqo(priv, block,
19166081ac20STao Liu 						       priv->tx_coalesce_usecs);
19170dcc144aSBailey Forrest 		}
1918864616d9SShailend Chand 
1919864616d9SShailend Chand 		/* Any descs written by the NIC before this barrier will be
1920864616d9SShailend Chand 		 * handled by the one-off napi schedule below. Whereas any
1921864616d9SShailend Chand 		 * descs after the barrier will generate interrupts.
1922864616d9SShailend Chand 		 */
1923864616d9SShailend Chand 		mb();
1924864616d9SShailend Chand 		napi_schedule(&block->napi);
1925f5cedc84SCatherine Sullivan 	}
1926f5cedc84SCatherine Sullivan 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1927f5cedc84SCatherine Sullivan 		int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1928f5cedc84SCatherine Sullivan 		struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1929f5cedc84SCatherine Sullivan 
19305abc37bdSShailend Chand 		if (!gve_rx_was_added_to_block(priv, idx))
19315abc37bdSShailend Chand 			continue;
19325abc37bdSShailend Chand 
1933f5cedc84SCatherine Sullivan 		napi_enable(&block->napi);
19340dcc144aSBailey Forrest 		if (gve_is_gqi(priv)) {
1935f5cedc84SCatherine Sullivan 			iowrite32be(0, gve_irq_doorbell(priv, block));
19360dcc144aSBailey Forrest 		} else {
19376081ac20STao Liu 			gve_set_itr_coalesce_usecs_dqo(priv, block,
19386081ac20STao Liu 						       priv->rx_coalesce_usecs);
19390dcc144aSBailey Forrest 		}
1940864616d9SShailend Chand 
1941864616d9SShailend Chand 		/* Any descs written by the NIC before this barrier will be
1942864616d9SShailend Chand 		 * handled by the one-off napi schedule below. Whereas any
1943864616d9SShailend Chand 		 * descs after the barrier will generate interrupts.
1944864616d9SShailend Chand 		 */
1945864616d9SShailend Chand 		mb();
1946864616d9SShailend Chand 		napi_schedule(&block->napi);
1947f5cedc84SCatherine Sullivan 	}
1948f5cedc84SCatherine Sullivan 
1949f5cedc84SCatherine Sullivan 	gve_set_napi_enabled(priv);
1950f5cedc84SCatherine Sullivan }
1951f5cedc84SCatherine Sullivan 
gve_turnup_and_check_status(struct gve_priv * priv)1952c93462b9SShailend Chand static void gve_turnup_and_check_status(struct gve_priv *priv)
1953c93462b9SShailend Chand {
1954c93462b9SShailend Chand 	u32 status;
1955c93462b9SShailend Chand 
1956c93462b9SShailend Chand 	gve_turnup(priv);
1957c93462b9SShailend Chand 	status = ioread32be(&priv->reg_bar0->device_status);
1958c93462b9SShailend Chand 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1959c93462b9SShailend Chand }
1960c93462b9SShailend Chand 
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)19610290bd29SMichael S. Tsirkin static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1962f5cedc84SCatherine Sullivan {
196387a7f321SJohn Fraker 	struct gve_notify_block *block;
196487a7f321SJohn Fraker 	struct gve_tx_ring *tx = NULL;
196587a7f321SJohn Fraker 	struct gve_priv *priv;
196687a7f321SJohn Fraker 	u32 last_nic_done;
196787a7f321SJohn Fraker 	u32 current_time;
196887a7f321SJohn Fraker 	u32 ntfy_idx;
1969f5cedc84SCatherine Sullivan 
197087a7f321SJohn Fraker 	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
197187a7f321SJohn Fraker 	priv = netdev_priv(dev);
197287a7f321SJohn Fraker 	if (txqueue > priv->tx_cfg.num_queues)
197387a7f321SJohn Fraker 		goto reset;
197487a7f321SJohn Fraker 
197587a7f321SJohn Fraker 	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
19761c360cc1SDan Carpenter 	if (ntfy_idx >= priv->num_ntfy_blks)
197787a7f321SJohn Fraker 		goto reset;
197887a7f321SJohn Fraker 
197987a7f321SJohn Fraker 	block = &priv->ntfy_blocks[ntfy_idx];
198087a7f321SJohn Fraker 	tx = block->tx;
198187a7f321SJohn Fraker 
198287a7f321SJohn Fraker 	current_time = jiffies_to_msecs(jiffies);
198387a7f321SJohn Fraker 	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
198487a7f321SJohn Fraker 		goto reset;
198587a7f321SJohn Fraker 
198687a7f321SJohn Fraker 	/* Check to see if there are missed completions, which will allow us to
198787a7f321SJohn Fraker 	 * kick the queue.
198887a7f321SJohn Fraker 	 */
198987a7f321SJohn Fraker 	last_nic_done = gve_tx_load_event_counter(priv, tx);
199087a7f321SJohn Fraker 	if (last_nic_done - tx->done) {
199187a7f321SJohn Fraker 		netdev_info(dev, "Kicking queue %d", txqueue);
199287a7f321SJohn Fraker 		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
199387a7f321SJohn Fraker 		napi_schedule(&block->napi);
199487a7f321SJohn Fraker 		tx->last_kick_msec = current_time;
199587a7f321SJohn Fraker 		goto out;
199687a7f321SJohn Fraker 	} // Else reset.
199787a7f321SJohn Fraker 
199887a7f321SJohn Fraker reset:
19999e5f7d26SCatherine Sullivan 	gve_schedule_reset(priv);
200087a7f321SJohn Fraker 
200187a7f321SJohn Fraker out:
200287a7f321SJohn Fraker 	if (tx)
200387a7f321SJohn Fraker 		tx->queue_timeout++;
2004f5cedc84SCatherine Sullivan 	priv->tx_timeo_cnt++;
2005f5cedc84SCatherine Sullivan }
2006f5cedc84SCatherine Sullivan 
gve_get_pkt_buf_size(const struct gve_priv * priv,bool enable_hsplit)20075e37d825SJeroen de Borst u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
20085e37d825SJeroen de Borst {
20095e37d825SJeroen de Borst 	if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
20105e37d825SJeroen de Borst 		return GVE_MAX_RX_BUFFER_SIZE;
20115e37d825SJeroen de Borst 	else
20125e37d825SJeroen de Borst 		return GVE_DEFAULT_RX_BUFFER_SIZE;
20135e37d825SJeroen de Borst }
20145e37d825SJeroen de Borst 
20155e37d825SJeroen de Borst /* header-split is not supported on non-DQO_RDA yet even if device advertises it */
gve_header_split_supported(const struct gve_priv * priv)20165e37d825SJeroen de Borst bool gve_header_split_supported(const struct gve_priv *priv)
20175e37d825SJeroen de Borst {
20185e37d825SJeroen de Borst 	return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
20195e37d825SJeroen de Borst }
20205e37d825SJeroen de Borst 
gve_set_hsplit_config(struct gve_priv * priv,u8 tcp_data_split)20215e37d825SJeroen de Borst int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
20225e37d825SJeroen de Borst {
20235e37d825SJeroen de Borst 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
20245e37d825SJeroen de Borst 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
20255e37d825SJeroen de Borst 	bool enable_hdr_split;
20265e37d825SJeroen de Borst 	int err = 0;
20275e37d825SJeroen de Borst 
20285e37d825SJeroen de Borst 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
20295e37d825SJeroen de Borst 		return 0;
20305e37d825SJeroen de Borst 
20315e37d825SJeroen de Borst 	if (!gve_header_split_supported(priv)) {
20325e37d825SJeroen de Borst 		dev_err(&priv->pdev->dev, "Header-split not supported\n");
20335e37d825SJeroen de Borst 		return -EOPNOTSUPP;
20345e37d825SJeroen de Borst 	}
20355e37d825SJeroen de Borst 
20365e37d825SJeroen de Borst 	if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
20375e37d825SJeroen de Borst 		enable_hdr_split = true;
20385e37d825SJeroen de Borst 	else
20395e37d825SJeroen de Borst 		enable_hdr_split = false;
20405e37d825SJeroen de Borst 
20415e37d825SJeroen de Borst 	if (enable_hdr_split == priv->header_split_enabled)
20425e37d825SJeroen de Borst 		return 0;
20435e37d825SJeroen de Borst 
2044ee24284eSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
20455e37d825SJeroen de Borst 
20465e37d825SJeroen de Borst 	rx_alloc_cfg.enable_header_split = enable_hdr_split;
20475e37d825SJeroen de Borst 	rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
20485e37d825SJeroen de Borst 
20495e37d825SJeroen de Borst 	if (netif_running(priv->dev))
2050ee24284eSShailend Chand 		err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
20515e37d825SJeroen de Borst 	return err;
20525e37d825SJeroen de Borst }
20535e37d825SJeroen de Borst 
gve_set_features(struct net_device * netdev,netdev_features_t features)20545e8c5adfSBailey Forrest static int gve_set_features(struct net_device *netdev,
20555e8c5adfSBailey Forrest 			    netdev_features_t features)
20565e8c5adfSBailey Forrest {
20575e8c5adfSBailey Forrest 	const netdev_features_t orig_features = netdev->features;
2058f3753771SShailend Chand 	struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
2059f3753771SShailend Chand 	struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
20605e8c5adfSBailey Forrest 	struct gve_priv *priv = netdev_priv(netdev);
20615e8c5adfSBailey Forrest 	int err;
20625e8c5adfSBailey Forrest 
2063ee24284eSShailend Chand 	gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
2064f3753771SShailend Chand 
20655e8c5adfSBailey Forrest 	if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
20665e8c5adfSBailey Forrest 		netdev->features ^= NETIF_F_LRO;
2067*fba917b1SPraveen Kaligineedi 		if (netif_running(netdev)) {
2068ee24284eSShailend Chand 			err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
20696f3bc487SJeroen de Borst 			if (err)
20706f3bc487SJeroen de Borst 				goto revert_features;
2071f3753771SShailend Chand 		}
20725e8c5adfSBailey Forrest 	}
20736f3bc487SJeroen de Borst 	if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
20746f3bc487SJeroen de Borst 		err = gve_flow_rules_reset(priv);
20756f3bc487SJeroen de Borst 		if (err)
20766f3bc487SJeroen de Borst 			goto revert_features;
20775e8c5adfSBailey Forrest 	}
20785e8c5adfSBailey Forrest 
20795e8c5adfSBailey Forrest 	return 0;
20806f3bc487SJeroen de Borst 
20816f3bc487SJeroen de Borst revert_features:
20826f3bc487SJeroen de Borst 	netdev->features = orig_features;
20836f3bc487SJeroen de Borst 	return err;
20845e8c5adfSBailey Forrest }
20855e8c5adfSBailey Forrest 
2086f5cedc84SCatherine Sullivan static const struct net_device_ops gve_netdev_ops = {
20875e8c5adfSBailey Forrest 	.ndo_start_xmit		=	gve_start_xmit,
208818de1e51SEric Dumazet 	.ndo_features_check	=	gve_features_check,
2089f5cedc84SCatherine Sullivan 	.ndo_open		=	gve_open,
2090f5cedc84SCatherine Sullivan 	.ndo_stop		=	gve_close,
2091f5cedc84SCatherine Sullivan 	.ndo_get_stats64	=	gve_get_stats,
2092f5cedc84SCatherine Sullivan 	.ndo_tx_timeout         =       gve_tx_timeout,
20935e8c5adfSBailey Forrest 	.ndo_set_features	=	gve_set_features,
209475eaae15SPraveen Kaligineedi 	.ndo_bpf		=	gve_xdp,
209539a7f4aaSPraveen Kaligineedi 	.ndo_xdp_xmit		=	gve_xdp_xmit,
2096fd8e4032SPraveen Kaligineedi 	.ndo_xsk_wakeup		=	gve_xsk_wakeup,
2097f5cedc84SCatherine Sullivan };
2098f5cedc84SCatherine Sullivan 
gve_handle_status(struct gve_priv * priv,u32 status)20999e5f7d26SCatherine Sullivan static void gve_handle_status(struct gve_priv *priv, u32 status)
21009e5f7d26SCatherine Sullivan {
21019e5f7d26SCatherine Sullivan 	if (GVE_DEVICE_STATUS_RESET_MASK & status) {
21029e5f7d26SCatherine Sullivan 		dev_info(&priv->pdev->dev, "Device requested reset.\n");
21039e5f7d26SCatherine Sullivan 		gve_set_do_reset(priv);
21049e5f7d26SCatherine Sullivan 	}
210524aeb56fSKuo Zhao 	if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
210624aeb56fSKuo Zhao 		priv->stats_report_trigger_cnt++;
210724aeb56fSKuo Zhao 		gve_set_do_report_stats(priv);
210824aeb56fSKuo Zhao 	}
21099e5f7d26SCatherine Sullivan }
21109e5f7d26SCatherine Sullivan 
gve_handle_reset(struct gve_priv * priv)21119e5f7d26SCatherine Sullivan static void gve_handle_reset(struct gve_priv *priv)
21129e5f7d26SCatherine Sullivan {
21139e5f7d26SCatherine Sullivan 	/* A service task will be scheduled at the end of probe to catch any
21149e5f7d26SCatherine Sullivan 	 * resets that need to happen, and we don't want to reset until
21159e5f7d26SCatherine Sullivan 	 * probe is done.
21169e5f7d26SCatherine Sullivan 	 */
21179e5f7d26SCatherine Sullivan 	if (gve_get_probe_in_progress(priv))
21189e5f7d26SCatherine Sullivan 		return;
21199e5f7d26SCatherine Sullivan 
21209e5f7d26SCatherine Sullivan 	if (gve_get_do_reset(priv)) {
21219e5f7d26SCatherine Sullivan 		rtnl_lock();
21229e5f7d26SCatherine Sullivan 		gve_reset(priv, false);
21239e5f7d26SCatherine Sullivan 		rtnl_unlock();
21249e5f7d26SCatherine Sullivan 	}
21259e5f7d26SCatherine Sullivan }
21269e5f7d26SCatherine Sullivan 
gve_handle_report_stats(struct gve_priv * priv)212724aeb56fSKuo Zhao void gve_handle_report_stats(struct gve_priv *priv)
212824aeb56fSKuo Zhao {
212924aeb56fSKuo Zhao 	struct stats *stats = priv->stats_report->stats;
213017c37d74SEric Dumazet 	int idx, stats_idx = 0;
213117c37d74SEric Dumazet 	unsigned int start = 0;
213217c37d74SEric Dumazet 	u64 tx_bytes;
213324aeb56fSKuo Zhao 
213424aeb56fSKuo Zhao 	if (!gve_get_report_stats(priv))
213524aeb56fSKuo Zhao 		return;
213624aeb56fSKuo Zhao 
213724aeb56fSKuo Zhao 	be64_add_cpu(&priv->stats_report->written_count, 1);
213824aeb56fSKuo Zhao 	/* tx stats */
213924aeb56fSKuo Zhao 	if (priv->tx) {
21402e80aeaeSPraveen Kaligineedi 		for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
21415e8c5adfSBailey Forrest 			u32 last_completion = 0;
21425e8c5adfSBailey Forrest 			u32 tx_frames = 0;
21435e8c5adfSBailey Forrest 
21445e8c5adfSBailey Forrest 			/* DQO doesn't currently support these metrics. */
21455e8c5adfSBailey Forrest 			if (gve_is_gqi(priv)) {
21465e8c5adfSBailey Forrest 				last_completion = priv->tx[idx].done;
21475e8c5adfSBailey Forrest 				tx_frames = priv->tx[idx].req;
21485e8c5adfSBailey Forrest 			}
21495e8c5adfSBailey Forrest 
215024aeb56fSKuo Zhao 			do {
2151068c38adSThomas Gleixner 				start = u64_stats_fetch_begin(&priv->tx[idx].statss);
215224aeb56fSKuo Zhao 				tx_bytes = priv->tx[idx].bytes_done;
2153068c38adSThomas Gleixner 			} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
215424aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
215524aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_WAKE_CNT),
215624aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].wake_queue),
215724aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
215824aeb56fSKuo Zhao 			};
215924aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
216024aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_STOP_CNT),
216124aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->tx[idx].stop_queue),
216224aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
216324aeb56fSKuo Zhao 			};
216424aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
216524aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_FRAMES_SENT),
21665e8c5adfSBailey Forrest 				.value = cpu_to_be64(tx_frames),
216724aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
216824aeb56fSKuo Zhao 			};
216924aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
217024aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_BYTES_SENT),
217124aeb56fSKuo Zhao 				.value = cpu_to_be64(tx_bytes),
217224aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
217324aeb56fSKuo Zhao 			};
217424aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
217524aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
21765e8c5adfSBailey Forrest 				.value = cpu_to_be64(last_completion),
217724aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
217824aeb56fSKuo Zhao 			};
217987a7f321SJohn Fraker 			stats[stats_idx++] = (struct stats) {
218087a7f321SJohn Fraker 				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
218187a7f321SJohn Fraker 				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
218287a7f321SJohn Fraker 				.queue_id = cpu_to_be32(idx),
218387a7f321SJohn Fraker 			};
218424aeb56fSKuo Zhao 		}
218524aeb56fSKuo Zhao 	}
218624aeb56fSKuo Zhao 	/* rx stats */
218724aeb56fSKuo Zhao 	if (priv->rx) {
218824aeb56fSKuo Zhao 		for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
218924aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
219024aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
219124aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[idx].desc.seqno),
219224aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
219324aeb56fSKuo Zhao 			};
219424aeb56fSKuo Zhao 			stats[stats_idx++] = (struct stats) {
219524aeb56fSKuo Zhao 				.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
219624aeb56fSKuo Zhao 				.value = cpu_to_be64(priv->rx[0].fill_cnt),
219724aeb56fSKuo Zhao 				.queue_id = cpu_to_be32(idx),
219824aeb56fSKuo Zhao 			};
219924aeb56fSKuo Zhao 		}
220024aeb56fSKuo Zhao 	}
220124aeb56fSKuo Zhao }
220224aeb56fSKuo Zhao 
220324aeb56fSKuo Zhao /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)22049e5f7d26SCatherine Sullivan static void gve_service_task(struct work_struct *work)
22059e5f7d26SCatherine Sullivan {
22069e5f7d26SCatherine Sullivan 	struct gve_priv *priv = container_of(work, struct gve_priv,
22079e5f7d26SCatherine Sullivan 					     service_task);
22083b7cc736SPatricio Noyola 	u32 status = ioread32be(&priv->reg_bar0->device_status);
22099e5f7d26SCatherine Sullivan 
22103b7cc736SPatricio Noyola 	gve_handle_status(priv, status);
22119e5f7d26SCatherine Sullivan 
22129e5f7d26SCatherine Sullivan 	gve_handle_reset(priv);
22133b7cc736SPatricio Noyola 	gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
22149e5f7d26SCatherine Sullivan }
22159e5f7d26SCatherine Sullivan 
gve_set_netdev_xdp_features(struct gve_priv * priv)221675eaae15SPraveen Kaligineedi static void gve_set_netdev_xdp_features(struct gve_priv *priv)
221775eaae15SPraveen Kaligineedi {
221875eaae15SPraveen Kaligineedi 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
221975eaae15SPraveen Kaligineedi 		priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
222039a7f4aaSPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
222139a7f4aaSPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
2222fd8e4032SPraveen Kaligineedi 		priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
222375eaae15SPraveen Kaligineedi 	} else {
222475eaae15SPraveen Kaligineedi 		priv->dev->xdp_features = 0;
222575eaae15SPraveen Kaligineedi 	}
222675eaae15SPraveen Kaligineedi }
222775eaae15SPraveen Kaligineedi 
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)2228893ce44dSCatherine Sullivan static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
2229893ce44dSCatherine Sullivan {
2230893ce44dSCatherine Sullivan 	int num_ntfy;
2231893ce44dSCatherine Sullivan 	int err;
2232893ce44dSCatherine Sullivan 
2233893ce44dSCatherine Sullivan 	/* Set up the adminq */
2234893ce44dSCatherine Sullivan 	err = gve_adminq_alloc(&priv->pdev->dev, priv);
2235893ce44dSCatherine Sullivan 	if (err) {
2236893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2237893ce44dSCatherine Sullivan 			"Failed to alloc admin queue: err=%d\n", err);
2238893ce44dSCatherine Sullivan 		return err;
2239893ce44dSCatherine Sullivan 	}
2240893ce44dSCatherine Sullivan 
2241c2a0c3edSJeroen de Borst 	err = gve_verify_driver_compatibility(priv);
2242c2a0c3edSJeroen de Borst 	if (err) {
2243c2a0c3edSJeroen de Borst 		dev_err(&priv->pdev->dev,
2244c2a0c3edSJeroen de Borst 			"Could not verify driver compatibility: err=%d\n", err);
2245c2a0c3edSJeroen de Borst 		goto err;
2246c2a0c3edSJeroen de Borst 	}
2247c2a0c3edSJeroen de Borst 
2248f13697ccSShailend Chand 	priv->num_registered_pages = 0;
2249f13697ccSShailend Chand 
2250893ce44dSCatherine Sullivan 	if (skip_describe_device)
2251893ce44dSCatherine Sullivan 		goto setup_device;
2252893ce44dSCatherine Sullivan 
2253a5886ef4SBailey Forrest 	priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
2254893ce44dSCatherine Sullivan 	/* Get the initial information we need from the device */
2255893ce44dSCatherine Sullivan 	err = gve_adminq_describe_device(priv);
2256893ce44dSCatherine Sullivan 	if (err) {
2257893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2258893ce44dSCatherine Sullivan 			"Could not get device information: err=%d\n", err);
2259893ce44dSCatherine Sullivan 		goto err;
2260893ce44dSCatherine Sullivan 	}
2261893ce44dSCatherine Sullivan 	priv->dev->mtu = priv->dev->max_mtu;
2262893ce44dSCatherine Sullivan 	num_ntfy = pci_msix_vec_count(priv->pdev);
2263893ce44dSCatherine Sullivan 	if (num_ntfy <= 0) {
2264893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev,
2265893ce44dSCatherine Sullivan 			"could not count MSI-x vectors: err=%d\n", num_ntfy);
2266893ce44dSCatherine Sullivan 		err = num_ntfy;
2267893ce44dSCatherine Sullivan 		goto err;
2268893ce44dSCatherine Sullivan 	} else if (num_ntfy < GVE_MIN_MSIX) {
2269893ce44dSCatherine Sullivan 		dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
2270893ce44dSCatherine Sullivan 			GVE_MIN_MSIX, num_ntfy);
2271893ce44dSCatherine Sullivan 		err = -EINVAL;
2272893ce44dSCatherine Sullivan 		goto err;
2273893ce44dSCatherine Sullivan 	}
2274893ce44dSCatherine Sullivan 
2275a695641cSCoco Li 	/* Big TCP is only supported on DQ*/
2276a695641cSCoco Li 	if (!gve_is_gqi(priv))
227766ce8e6bSRushil Gupta 		netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
2278a695641cSCoco Li 
2279f5cedc84SCatherine Sullivan 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
2280893ce44dSCatherine Sullivan 	/* gvnic has one Notification Block per MSI-x vector, except for the
2281893ce44dSCatherine Sullivan 	 * management vector
2282893ce44dSCatherine Sullivan 	 */
2283893ce44dSCatherine Sullivan 	priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
2284893ce44dSCatherine Sullivan 	priv->mgmt_msix_idx = priv->num_ntfy_blks;
2285893ce44dSCatherine Sullivan 
2286f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues =
2287f5cedc84SCatherine Sullivan 		min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
2288f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues =
2289f5cedc84SCatherine Sullivan 		min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
2290f5cedc84SCatherine Sullivan 
2291f5cedc84SCatherine Sullivan 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
2292f5cedc84SCatherine Sullivan 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
2293f5cedc84SCatherine Sullivan 	if (priv->default_num_queues > 0) {
2294f5cedc84SCatherine Sullivan 		priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
2295f5cedc84SCatherine Sullivan 						priv->tx_cfg.num_queues);
2296f5cedc84SCatherine Sullivan 		priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
2297f5cedc84SCatherine Sullivan 						priv->rx_cfg.num_queues);
2298f5cedc84SCatherine Sullivan 	}
2299f5cedc84SCatherine Sullivan 
23000d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
2301f5cedc84SCatherine Sullivan 		 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
23020d5775d3SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
2303f5cedc84SCatherine Sullivan 		 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
2304f5cedc84SCatherine Sullivan 
23056081ac20STao Liu 	if (!gve_is_gqi(priv)) {
23066081ac20STao Liu 		priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
23076081ac20STao Liu 		priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
23086081ac20STao Liu 	}
23096081ac20STao Liu 
2310893ce44dSCatherine Sullivan setup_device:
231175eaae15SPraveen Kaligineedi 	gve_set_netdev_xdp_features(priv);
2312893ce44dSCatherine Sullivan 	err = gve_setup_device_resources(priv);
2313893ce44dSCatherine Sullivan 	if (!err)
2314893ce44dSCatherine Sullivan 		return 0;
2315893ce44dSCatherine Sullivan err:
2316893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
2317893ce44dSCatherine Sullivan 	return err;
2318893ce44dSCatherine Sullivan }
2319893ce44dSCatherine Sullivan 
gve_teardown_priv_resources(struct gve_priv * priv)2320893ce44dSCatherine Sullivan static void gve_teardown_priv_resources(struct gve_priv *priv)
2321893ce44dSCatherine Sullivan {
2322893ce44dSCatherine Sullivan 	gve_teardown_device_resources(priv);
2323893ce44dSCatherine Sullivan 	gve_adminq_free(&priv->pdev->dev, priv);
2324893ce44dSCatherine Sullivan }
2325893ce44dSCatherine Sullivan 
gve_trigger_reset(struct gve_priv * priv)23269e5f7d26SCatherine Sullivan static void gve_trigger_reset(struct gve_priv *priv)
23279e5f7d26SCatherine Sullivan {
23289e5f7d26SCatherine Sullivan 	/* Reset the device by releasing the AQ */
23299e5f7d26SCatherine Sullivan 	gve_adminq_release(priv);
23309e5f7d26SCatherine Sullivan }
23319e5f7d26SCatherine Sullivan 
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)23329e5f7d26SCatherine Sullivan static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
23339e5f7d26SCatherine Sullivan {
23349e5f7d26SCatherine Sullivan 	gve_trigger_reset(priv);
23359e5f7d26SCatherine Sullivan 	/* With the reset having already happened, close cannot fail */
23369e5f7d26SCatherine Sullivan 	if (was_up)
23379e5f7d26SCatherine Sullivan 		gve_close(priv->dev);
23389e5f7d26SCatherine Sullivan 	gve_teardown_priv_resources(priv);
23399e5f7d26SCatherine Sullivan }
23409e5f7d26SCatherine Sullivan 
gve_reset_recovery(struct gve_priv * priv,bool was_up)23419e5f7d26SCatherine Sullivan static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
23429e5f7d26SCatherine Sullivan {
23439e5f7d26SCatherine Sullivan 	int err;
23449e5f7d26SCatherine Sullivan 
23459e5f7d26SCatherine Sullivan 	err = gve_init_priv(priv, true);
23469e5f7d26SCatherine Sullivan 	if (err)
23479e5f7d26SCatherine Sullivan 		goto err;
23489e5f7d26SCatherine Sullivan 	if (was_up) {
23499e5f7d26SCatherine Sullivan 		err = gve_open(priv->dev);
23509e5f7d26SCatherine Sullivan 		if (err)
23519e5f7d26SCatherine Sullivan 			goto err;
23529e5f7d26SCatherine Sullivan 	}
23539e5f7d26SCatherine Sullivan 	return 0;
23549e5f7d26SCatherine Sullivan err:
23559e5f7d26SCatherine Sullivan 	dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
23569e5f7d26SCatherine Sullivan 	gve_turndown(priv);
23579e5f7d26SCatherine Sullivan 	return err;
23589e5f7d26SCatherine Sullivan }
23599e5f7d26SCatherine Sullivan 
gve_reset(struct gve_priv * priv,bool attempt_teardown)23609e5f7d26SCatherine Sullivan int gve_reset(struct gve_priv *priv, bool attempt_teardown)
23619e5f7d26SCatherine Sullivan {
2362*fba917b1SPraveen Kaligineedi 	bool was_up = netif_running(priv->dev);
23639e5f7d26SCatherine Sullivan 	int err;
23649e5f7d26SCatherine Sullivan 
23659e5f7d26SCatherine Sullivan 	dev_info(&priv->pdev->dev, "Performing reset\n");
23669e5f7d26SCatherine Sullivan 	gve_clear_do_reset(priv);
23679e5f7d26SCatherine Sullivan 	gve_set_reset_in_progress(priv);
23689e5f7d26SCatherine Sullivan 	/* If we aren't attempting to teardown normally, just go turndown and
23699e5f7d26SCatherine Sullivan 	 * reset right away.
23709e5f7d26SCatherine Sullivan 	 */
23719e5f7d26SCatherine Sullivan 	if (!attempt_teardown) {
23729e5f7d26SCatherine Sullivan 		gve_turndown(priv);
23739e5f7d26SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
23749e5f7d26SCatherine Sullivan 	} else {
23759e5f7d26SCatherine Sullivan 		/* Otherwise attempt to close normally */
23769e5f7d26SCatherine Sullivan 		if (was_up) {
23779e5f7d26SCatherine Sullivan 			err = gve_close(priv->dev);
23789e5f7d26SCatherine Sullivan 			/* If that fails reset as we did above */
23799e5f7d26SCatherine Sullivan 			if (err)
23809e5f7d26SCatherine Sullivan 				gve_reset_and_teardown(priv, was_up);
23819e5f7d26SCatherine Sullivan 		}
23829e5f7d26SCatherine Sullivan 		/* Clean up any remaining resources */
23839e5f7d26SCatherine Sullivan 		gve_teardown_priv_resources(priv);
23849e5f7d26SCatherine Sullivan 	}
23859e5f7d26SCatherine Sullivan 
23869e5f7d26SCatherine Sullivan 	/* Set it all back up */
23879e5f7d26SCatherine Sullivan 	err = gve_reset_recovery(priv, was_up);
23889e5f7d26SCatherine Sullivan 	gve_clear_reset_in_progress(priv);
2389433e274bSKuo Zhao 	priv->reset_cnt++;
2390433e274bSKuo Zhao 	priv->interface_up_cnt = 0;
2391433e274bSKuo Zhao 	priv->interface_down_cnt = 0;
239224aeb56fSKuo Zhao 	priv->stats_report_trigger_cnt = 0;
23939e5f7d26SCatherine Sullivan 	return err;
23949e5f7d26SCatherine Sullivan }
23959e5f7d26SCatherine Sullivan 
gve_write_version(u8 __iomem * driver_version_register)2396893ce44dSCatherine Sullivan static void gve_write_version(u8 __iomem *driver_version_register)
2397893ce44dSCatherine Sullivan {
2398893ce44dSCatherine Sullivan 	const char *c = gve_version_prefix;
2399893ce44dSCatherine Sullivan 
2400893ce44dSCatherine Sullivan 	while (*c) {
2401893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
2402893ce44dSCatherine Sullivan 		c++;
2403893ce44dSCatherine Sullivan 	}
2404893ce44dSCatherine Sullivan 
2405893ce44dSCatherine Sullivan 	c = gve_version_str;
2406893ce44dSCatherine Sullivan 	while (*c) {
2407893ce44dSCatherine Sullivan 		writeb(*c, driver_version_register);
2408893ce44dSCatherine Sullivan 		c++;
2409893ce44dSCatherine Sullivan 	}
2410893ce44dSCatherine Sullivan 	writeb('\n', driver_version_register);
2411893ce44dSCatherine Sullivan }
2412893ce44dSCatherine Sullivan 
gve_rx_queue_stop(struct net_device * dev,void * per_q_mem,int idx)2413c93462b9SShailend Chand static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
2414c93462b9SShailend Chand {
2415c93462b9SShailend Chand 	struct gve_priv *priv = netdev_priv(dev);
2416c93462b9SShailend Chand 	struct gve_rx_ring *gve_per_q_mem;
2417c93462b9SShailend Chand 	int err;
2418c93462b9SShailend Chand 
2419c93462b9SShailend Chand 	if (!priv->rx)
2420c93462b9SShailend Chand 		return -EAGAIN;
2421c93462b9SShailend Chand 
2422c93462b9SShailend Chand 	/* Destroying queue 0 while other queues exist is not supported in DQO */
2423c93462b9SShailend Chand 	if (!gve_is_gqi(priv) && idx == 0)
2424c93462b9SShailend Chand 		return -ERANGE;
2425c93462b9SShailend Chand 
2426c93462b9SShailend Chand 	/* Single-queue destruction requires quiescence on all queues */
2427c93462b9SShailend Chand 	gve_turndown(priv);
2428c93462b9SShailend Chand 
2429c93462b9SShailend Chand 	/* This failure will trigger a reset - no need to clean up */
2430c93462b9SShailend Chand 	err = gve_adminq_destroy_single_rx_queue(priv, idx);
2431c93462b9SShailend Chand 	if (err)
2432c93462b9SShailend Chand 		return err;
2433c93462b9SShailend Chand 
2434c93462b9SShailend Chand 	if (gve_is_qpl(priv)) {
2435c93462b9SShailend Chand 		/* This failure will trigger a reset - no need to clean up */
2436c93462b9SShailend Chand 		err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
2437c93462b9SShailend Chand 		if (err)
2438c93462b9SShailend Chand 			return err;
2439c93462b9SShailend Chand 	}
2440c93462b9SShailend Chand 
2441c93462b9SShailend Chand 	gve_rx_stop_ring(priv, idx);
2442c93462b9SShailend Chand 
2443c93462b9SShailend Chand 	/* Turn the unstopped queues back up */
2444c93462b9SShailend Chand 	gve_turnup_and_check_status(priv);
2445c93462b9SShailend Chand 
2446c93462b9SShailend Chand 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2447c93462b9SShailend Chand 	*gve_per_q_mem = priv->rx[idx];
2448c93462b9SShailend Chand 	memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2449c93462b9SShailend Chand 	return 0;
2450c93462b9SShailend Chand }
2451c93462b9SShailend Chand 
gve_rx_queue_mem_free(struct net_device * dev,void * per_q_mem)2452c93462b9SShailend Chand static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
2453c93462b9SShailend Chand {
2454c93462b9SShailend Chand 	struct gve_priv *priv = netdev_priv(dev);
2455c93462b9SShailend Chand 	struct gve_rx_alloc_rings_cfg cfg = {0};
2456c93462b9SShailend Chand 	struct gve_rx_ring *gve_per_q_mem;
2457c93462b9SShailend Chand 
2458c93462b9SShailend Chand 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2459c93462b9SShailend Chand 	gve_rx_get_curr_alloc_cfg(priv, &cfg);
2460c93462b9SShailend Chand 
2461c93462b9SShailend Chand 	if (gve_is_gqi(priv))
2462c93462b9SShailend Chand 		gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
2463c93462b9SShailend Chand 	else
2464c93462b9SShailend Chand 		gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
2465c93462b9SShailend Chand }
2466c93462b9SShailend Chand 
gve_rx_queue_mem_alloc(struct net_device * dev,void * per_q_mem,int idx)2467c93462b9SShailend Chand static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
2468c93462b9SShailend Chand 				  int idx)
2469c93462b9SShailend Chand {
2470c93462b9SShailend Chand 	struct gve_priv *priv = netdev_priv(dev);
2471c93462b9SShailend Chand 	struct gve_rx_alloc_rings_cfg cfg = {0};
2472c93462b9SShailend Chand 	struct gve_rx_ring *gve_per_q_mem;
2473c93462b9SShailend Chand 	int err;
2474c93462b9SShailend Chand 
2475c93462b9SShailend Chand 	if (!priv->rx)
2476c93462b9SShailend Chand 		return -EAGAIN;
2477c93462b9SShailend Chand 
2478c93462b9SShailend Chand 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2479c93462b9SShailend Chand 	gve_rx_get_curr_alloc_cfg(priv, &cfg);
2480c93462b9SShailend Chand 
2481c93462b9SShailend Chand 	if (gve_is_gqi(priv))
2482c93462b9SShailend Chand 		err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
2483c93462b9SShailend Chand 	else
2484c93462b9SShailend Chand 		err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
2485c93462b9SShailend Chand 
2486c93462b9SShailend Chand 	return err;
2487c93462b9SShailend Chand }
2488c93462b9SShailend Chand 
gve_rx_queue_start(struct net_device * dev,void * per_q_mem,int idx)2489c93462b9SShailend Chand static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
2490c93462b9SShailend Chand {
2491c93462b9SShailend Chand 	struct gve_priv *priv = netdev_priv(dev);
2492c93462b9SShailend Chand 	struct gve_rx_ring *gve_per_q_mem;
2493c93462b9SShailend Chand 	int err;
2494c93462b9SShailend Chand 
2495c93462b9SShailend Chand 	if (!priv->rx)
2496c93462b9SShailend Chand 		return -EAGAIN;
2497c93462b9SShailend Chand 
2498c93462b9SShailend Chand 	gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
2499c93462b9SShailend Chand 	priv->rx[idx] = *gve_per_q_mem;
2500c93462b9SShailend Chand 
2501c93462b9SShailend Chand 	/* Single-queue creation requires quiescence on all queues */
2502c93462b9SShailend Chand 	gve_turndown(priv);
2503c93462b9SShailend Chand 
2504c93462b9SShailend Chand 	gve_rx_start_ring(priv, idx);
2505c93462b9SShailend Chand 
2506c93462b9SShailend Chand 	if (gve_is_qpl(priv)) {
2507c93462b9SShailend Chand 		/* This failure will trigger a reset - no need to clean up */
2508c93462b9SShailend Chand 		err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
2509c93462b9SShailend Chand 		if (err)
2510c93462b9SShailend Chand 			goto abort;
2511c93462b9SShailend Chand 	}
2512c93462b9SShailend Chand 
2513c93462b9SShailend Chand 	/* This failure will trigger a reset - no need to clean up */
2514c93462b9SShailend Chand 	err = gve_adminq_create_single_rx_queue(priv, idx);
2515c93462b9SShailend Chand 	if (err)
2516c93462b9SShailend Chand 		goto abort;
2517c93462b9SShailend Chand 
2518c93462b9SShailend Chand 	if (gve_is_gqi(priv))
2519c93462b9SShailend Chand 		gve_rx_write_doorbell(priv, &priv->rx[idx]);
2520c93462b9SShailend Chand 	else
2521c93462b9SShailend Chand 		gve_rx_post_buffers_dqo(&priv->rx[idx]);
2522c93462b9SShailend Chand 
2523c93462b9SShailend Chand 	/* Turn the unstopped queues back up */
2524c93462b9SShailend Chand 	gve_turnup_and_check_status(priv);
2525c93462b9SShailend Chand 	return 0;
2526c93462b9SShailend Chand 
2527c93462b9SShailend Chand abort:
2528c93462b9SShailend Chand 	gve_rx_stop_ring(priv, idx);
2529c93462b9SShailend Chand 
2530c93462b9SShailend Chand 	/* All failures in this func result in a reset, by clearing the struct
2531c93462b9SShailend Chand 	 * at idx, we prevent a double free when that reset runs. The reset,
2532c93462b9SShailend Chand 	 * which needs the rtnl lock, will not run till this func returns and
2533c93462b9SShailend Chand 	 * its caller gives up the lock.
2534c93462b9SShailend Chand 	 */
2535c93462b9SShailend Chand 	memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
2536c93462b9SShailend Chand 	return err;
2537c93462b9SShailend Chand }
2538c93462b9SShailend Chand 
2539c93462b9SShailend Chand static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
2540c93462b9SShailend Chand 	.ndo_queue_mem_size	=	sizeof(struct gve_rx_ring),
2541c93462b9SShailend Chand 	.ndo_queue_mem_alloc	=	gve_rx_queue_mem_alloc,
2542c93462b9SShailend Chand 	.ndo_queue_mem_free	=	gve_rx_queue_mem_free,
2543c93462b9SShailend Chand 	.ndo_queue_start	=	gve_rx_queue_start,
2544c93462b9SShailend Chand 	.ndo_queue_stop		=	gve_rx_queue_stop,
2545c93462b9SShailend Chand };
2546c93462b9SShailend Chand 
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2547893ce44dSCatherine Sullivan static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2548893ce44dSCatherine Sullivan {
2549893ce44dSCatherine Sullivan 	int max_tx_queues, max_rx_queues;
2550893ce44dSCatherine Sullivan 	struct net_device *dev;
2551893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar;
2552893ce44dSCatherine Sullivan 	struct gve_registers __iomem *reg_bar;
2553893ce44dSCatherine Sullivan 	struct gve_priv *priv;
2554893ce44dSCatherine Sullivan 	int err;
2555893ce44dSCatherine Sullivan 
2556893ce44dSCatherine Sullivan 	err = pci_enable_device(pdev);
2557893ce44dSCatherine Sullivan 	if (err)
25586dce38b4SChristophe JAILLET 		return err;
2559893ce44dSCatherine Sullivan 
25609d0aba98SJunfeng Guo 	err = pci_request_regions(pdev, gve_driver_name);
2561893ce44dSCatherine Sullivan 	if (err)
2562893ce44dSCatherine Sullivan 		goto abort_with_enabled;
2563893ce44dSCatherine Sullivan 
2564893ce44dSCatherine Sullivan 	pci_set_master(pdev);
2565893ce44dSCatherine Sullivan 
2566bde3c8ffSChristophe JAILLET 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2567893ce44dSCatherine Sullivan 	if (err) {
2568893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
2569893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
2570893ce44dSCatherine Sullivan 	}
2571893ce44dSCatherine Sullivan 
2572893ce44dSCatherine Sullivan 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
2573893ce44dSCatherine Sullivan 	if (!reg_bar) {
2574f5cedc84SCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
2575893ce44dSCatherine Sullivan 		err = -ENOMEM;
2576893ce44dSCatherine Sullivan 		goto abort_with_pci_region;
2577893ce44dSCatherine Sullivan 	}
2578893ce44dSCatherine Sullivan 
2579893ce44dSCatherine Sullivan 	db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
2580893ce44dSCatherine Sullivan 	if (!db_bar) {
2581893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
2582893ce44dSCatherine Sullivan 		err = -ENOMEM;
2583893ce44dSCatherine Sullivan 		goto abort_with_reg_bar;
2584893ce44dSCatherine Sullivan 	}
2585893ce44dSCatherine Sullivan 
2586893ce44dSCatherine Sullivan 	gve_write_version(&reg_bar->driver_version);
2587893ce44dSCatherine Sullivan 	/* Get max queues to alloc etherdev */
25881db1a862SBailey Forrest 	max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
25891db1a862SBailey Forrest 	max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
2590893ce44dSCatherine Sullivan 	/* Alloc and setup the netdev and priv */
2591893ce44dSCatherine Sullivan 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
2592893ce44dSCatherine Sullivan 	if (!dev) {
2593893ce44dSCatherine Sullivan 		dev_err(&pdev->dev, "could not allocate netdev\n");
25946dce38b4SChristophe JAILLET 		err = -ENOMEM;
2595893ce44dSCatherine Sullivan 		goto abort_with_db_bar;
2596893ce44dSCatherine Sullivan 	}
2597893ce44dSCatherine Sullivan 	SET_NETDEV_DEV(dev, &pdev->dev);
2598893ce44dSCatherine Sullivan 	pci_set_drvdata(pdev, dev);
2599e5b845dcSCatherine Sullivan 	dev->ethtool_ops = &gve_ethtool_ops;
2600f5cedc84SCatherine Sullivan 	dev->netdev_ops = &gve_netdev_ops;
2601c93462b9SShailend Chand 	dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
26025e8c5adfSBailey Forrest 
26035e8c5adfSBailey Forrest 	/* Set default and supported features.
26045e8c5adfSBailey Forrest 	 *
26055e8c5adfSBailey Forrest 	 * Features might be set in other locations as well (such as
26065e8c5adfSBailey Forrest 	 * `gve_adminq_describe_device`).
26075e8c5adfSBailey Forrest 	 */
2608893ce44dSCatherine Sullivan 	dev->hw_features = NETIF_F_HIGHDMA;
2609893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_SG;
2610893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_HW_CSUM;
2611893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO;
2612893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO6;
2613893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_TSO_ECN;
2614893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXCSUM;
2615893ce44dSCatherine Sullivan 	dev->hw_features |= NETIF_F_RXHASH;
2616893ce44dSCatherine Sullivan 	dev->features = dev->hw_features;
2617f5cedc84SCatherine Sullivan 	dev->watchdog_timeo = 5 * HZ;
2618893ce44dSCatherine Sullivan 	dev->min_mtu = ETH_MIN_MTU;
2619893ce44dSCatherine Sullivan 	netif_carrier_off(dev);
2620893ce44dSCatherine Sullivan 
2621893ce44dSCatherine Sullivan 	priv = netdev_priv(dev);
2622893ce44dSCatherine Sullivan 	priv->dev = dev;
2623893ce44dSCatherine Sullivan 	priv->pdev = pdev;
2624893ce44dSCatherine Sullivan 	priv->msg_enable = DEFAULT_MSG_LEVEL;
2625893ce44dSCatherine Sullivan 	priv->reg_bar0 = reg_bar;
2626893ce44dSCatherine Sullivan 	priv->db_bar2 = db_bar;
26279e5f7d26SCatherine Sullivan 	priv->service_task_flags = 0x0;
2628893ce44dSCatherine Sullivan 	priv->state_flags = 0x0;
262924aeb56fSKuo Zhao 	priv->ethtool_flags = 0x0;
26300b43cf52SJeroen de Borst 	priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
26310b43cf52SJeroen de Borst 	priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
26329e5f7d26SCatherine Sullivan 
26339e5f7d26SCatherine Sullivan 	gve_set_probe_in_progress(priv);
26349e5f7d26SCatherine Sullivan 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
26359e5f7d26SCatherine Sullivan 	if (!priv->gve_wq) {
26369e5f7d26SCatherine Sullivan 		dev_err(&pdev->dev, "Could not allocate workqueue");
26379e5f7d26SCatherine Sullivan 		err = -ENOMEM;
26389e5f7d26SCatherine Sullivan 		goto abort_with_netdev;
26399e5f7d26SCatherine Sullivan 	}
26409e5f7d26SCatherine Sullivan 	INIT_WORK(&priv->service_task, gve_service_task);
264124aeb56fSKuo Zhao 	INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
2642f5cedc84SCatherine Sullivan 	priv->tx_cfg.max_queues = max_tx_queues;
2643f5cedc84SCatherine Sullivan 	priv->rx_cfg.max_queues = max_rx_queues;
2644893ce44dSCatherine Sullivan 
2645893ce44dSCatherine Sullivan 	err = gve_init_priv(priv, false);
2646893ce44dSCatherine Sullivan 	if (err)
26479e5f7d26SCatherine Sullivan 		goto abort_with_wq;
2648893ce44dSCatherine Sullivan 
2649893ce44dSCatherine Sullivan 	err = register_netdev(dev);
2650893ce44dSCatherine Sullivan 	if (err)
26512342ae10SChristophe JAILLET 		goto abort_with_gve_init;
2652893ce44dSCatherine Sullivan 
2653893ce44dSCatherine Sullivan 	dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
2654a5886ef4SBailey Forrest 	dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
26559e5f7d26SCatherine Sullivan 	gve_clear_probe_in_progress(priv);
26569e5f7d26SCatherine Sullivan 	queue_work(priv->gve_wq, &priv->service_task);
2657893ce44dSCatherine Sullivan 	return 0;
2658893ce44dSCatherine Sullivan 
26592342ae10SChristophe JAILLET abort_with_gve_init:
26602342ae10SChristophe JAILLET 	gve_teardown_priv_resources(priv);
26612342ae10SChristophe JAILLET 
26629e5f7d26SCatherine Sullivan abort_with_wq:
26639e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
26649e5f7d26SCatherine Sullivan 
2665893ce44dSCatherine Sullivan abort_with_netdev:
2666893ce44dSCatherine Sullivan 	free_netdev(dev);
2667893ce44dSCatherine Sullivan 
2668893ce44dSCatherine Sullivan abort_with_db_bar:
2669893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
2670893ce44dSCatherine Sullivan 
2671893ce44dSCatherine Sullivan abort_with_reg_bar:
2672893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
2673893ce44dSCatherine Sullivan 
2674893ce44dSCatherine Sullivan abort_with_pci_region:
2675893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
2676893ce44dSCatherine Sullivan 
2677893ce44dSCatherine Sullivan abort_with_enabled:
2678893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
26796dce38b4SChristophe JAILLET 	return err;
2680893ce44dSCatherine Sullivan }
2681893ce44dSCatherine Sullivan 
gve_remove(struct pci_dev * pdev)2682893ce44dSCatherine Sullivan static void gve_remove(struct pci_dev *pdev)
2683893ce44dSCatherine Sullivan {
2684893ce44dSCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2685893ce44dSCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2686893ce44dSCatherine Sullivan 	__be32 __iomem *db_bar = priv->db_bar2;
2687893ce44dSCatherine Sullivan 	void __iomem *reg_bar = priv->reg_bar0;
2688893ce44dSCatherine Sullivan 
2689893ce44dSCatherine Sullivan 	unregister_netdev(netdev);
2690893ce44dSCatherine Sullivan 	gve_teardown_priv_resources(priv);
26919e5f7d26SCatherine Sullivan 	destroy_workqueue(priv->gve_wq);
2692893ce44dSCatherine Sullivan 	free_netdev(netdev);
2693893ce44dSCatherine Sullivan 	pci_iounmap(pdev, db_bar);
2694893ce44dSCatherine Sullivan 	pci_iounmap(pdev, reg_bar);
2695893ce44dSCatherine Sullivan 	pci_release_regions(pdev);
2696893ce44dSCatherine Sullivan 	pci_disable_device(pdev);
2697893ce44dSCatherine Sullivan }
2698893ce44dSCatherine Sullivan 
gve_shutdown(struct pci_dev * pdev)2699974365e5SCatherine Sullivan static void gve_shutdown(struct pci_dev *pdev)
2700974365e5SCatherine Sullivan {
2701974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2702974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2703*fba917b1SPraveen Kaligineedi 	bool was_up = netif_running(priv->dev);
2704974365e5SCatherine Sullivan 
2705974365e5SCatherine Sullivan 	rtnl_lock();
2706974365e5SCatherine Sullivan 	if (was_up && gve_close(priv->dev)) {
2707974365e5SCatherine Sullivan 		/* If the dev was up, attempt to close, if close fails, reset */
2708974365e5SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
2709974365e5SCatherine Sullivan 	} else {
2710974365e5SCatherine Sullivan 		/* If the dev wasn't up or close worked, finish tearing down */
2711974365e5SCatherine Sullivan 		gve_teardown_priv_resources(priv);
2712974365e5SCatherine Sullivan 	}
2713974365e5SCatherine Sullivan 	rtnl_unlock();
2714974365e5SCatherine Sullivan }
2715974365e5SCatherine Sullivan 
2716974365e5SCatherine Sullivan #ifdef CONFIG_PM
gve_suspend(struct pci_dev * pdev,pm_message_t state)2717974365e5SCatherine Sullivan static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
2718974365e5SCatherine Sullivan {
2719974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2720974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2721*fba917b1SPraveen Kaligineedi 	bool was_up = netif_running(priv->dev);
2722974365e5SCatherine Sullivan 
2723974365e5SCatherine Sullivan 	priv->suspend_cnt++;
2724974365e5SCatherine Sullivan 	rtnl_lock();
2725974365e5SCatherine Sullivan 	if (was_up && gve_close(priv->dev)) {
2726974365e5SCatherine Sullivan 		/* If the dev was up, attempt to close, if close fails, reset */
2727974365e5SCatherine Sullivan 		gve_reset_and_teardown(priv, was_up);
2728974365e5SCatherine Sullivan 	} else {
2729974365e5SCatherine Sullivan 		/* If the dev wasn't up or close worked, finish tearing down */
2730974365e5SCatherine Sullivan 		gve_teardown_priv_resources(priv);
2731974365e5SCatherine Sullivan 	}
2732974365e5SCatherine Sullivan 	priv->up_before_suspend = was_up;
2733974365e5SCatherine Sullivan 	rtnl_unlock();
2734974365e5SCatherine Sullivan 	return 0;
2735974365e5SCatherine Sullivan }
2736974365e5SCatherine Sullivan 
gve_resume(struct pci_dev * pdev)2737974365e5SCatherine Sullivan static int gve_resume(struct pci_dev *pdev)
2738974365e5SCatherine Sullivan {
2739974365e5SCatherine Sullivan 	struct net_device *netdev = pci_get_drvdata(pdev);
2740974365e5SCatherine Sullivan 	struct gve_priv *priv = netdev_priv(netdev);
2741974365e5SCatherine Sullivan 	int err;
2742974365e5SCatherine Sullivan 
2743974365e5SCatherine Sullivan 	priv->resume_cnt++;
2744974365e5SCatherine Sullivan 	rtnl_lock();
2745974365e5SCatherine Sullivan 	err = gve_reset_recovery(priv, priv->up_before_suspend);
2746974365e5SCatherine Sullivan 	rtnl_unlock();
2747974365e5SCatherine Sullivan 	return err;
2748974365e5SCatherine Sullivan }
2749974365e5SCatherine Sullivan #endif /* CONFIG_PM */
2750974365e5SCatherine Sullivan 
2751893ce44dSCatherine Sullivan static const struct pci_device_id gve_id_table[] = {
2752893ce44dSCatherine Sullivan 	{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
2753893ce44dSCatherine Sullivan 	{ }
2754893ce44dSCatherine Sullivan };
2755893ce44dSCatherine Sullivan 
27569d0aba98SJunfeng Guo static struct pci_driver gve_driver = {
27579d0aba98SJunfeng Guo 	.name		= gve_driver_name,
2758893ce44dSCatherine Sullivan 	.id_table	= gve_id_table,
2759893ce44dSCatherine Sullivan 	.probe		= gve_probe,
2760893ce44dSCatherine Sullivan 	.remove		= gve_remove,
2761974365e5SCatherine Sullivan 	.shutdown	= gve_shutdown,
2762974365e5SCatherine Sullivan #ifdef CONFIG_PM
2763974365e5SCatherine Sullivan 	.suspend        = gve_suspend,
2764974365e5SCatherine Sullivan 	.resume         = gve_resume,
2765974365e5SCatherine Sullivan #endif
2766893ce44dSCatherine Sullivan };
2767893ce44dSCatherine Sullivan 
27689d0aba98SJunfeng Guo module_pci_driver(gve_driver);
2769893ce44dSCatherine Sullivan 
2770893ce44dSCatherine Sullivan MODULE_DEVICE_TABLE(pci, gve_id_table);
2771893ce44dSCatherine Sullivan MODULE_AUTHOR("Google, Inc.");
27729d0aba98SJunfeng Guo MODULE_DESCRIPTION("Google Virtual NIC Driver");
2773893ce44dSCatherine Sullivan MODULE_LICENSE("Dual MIT/GPL");
2774893ce44dSCatherine Sullivan MODULE_VERSION(GVE_VERSION);
2775