xref: /freebsd/sys/dev/gve/gve_main.c (revision 40097cd67c0d52e2b288e8555b12faf02768d89c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include "gve.h"
32 #include "gve_adminq.h"
33 #include "gve_dqo.h"
34 
35 #define GVE_DRIVER_VERSION "GVE-FBSD-1.3.1\n"
36 #define GVE_VERSION_MAJOR 1
37 #define GVE_VERSION_MINOR 3
38 #define GVE_VERSION_SUB 1
39 
40 #define GVE_DEFAULT_RX_COPYBREAK 256
41 
42 /* Devices supported by this driver. */
43 static struct gve_dev {
44         uint16_t vendor_id;
45         uint16_t device_id;
46         const char *name;
47 } gve_devs[] = {
48 	{ PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" }
49 };
50 
51 struct sx gve_global_lock;
52 
53 static int
gve_verify_driver_compatibility(struct gve_priv * priv)54 gve_verify_driver_compatibility(struct gve_priv *priv)
55 {
56 	int err;
57 	struct gve_driver_info *driver_info;
58 	struct gve_dma_handle driver_info_mem;
59 
60 	err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info),
61 	    PAGE_SIZE, &driver_info_mem);
62 
63 	if (err != 0)
64 		return (ENOMEM);
65 
66 	driver_info = driver_info_mem.cpu_addr;
67 
68 	*driver_info = (struct gve_driver_info) {
69 		.os_type = 3, /* Freebsd */
70 		.driver_major = GVE_VERSION_MAJOR,
71 		.driver_minor = GVE_VERSION_MINOR,
72 		.driver_sub = GVE_VERSION_SUB,
73 		.os_version_major = htobe32(FBSD_VERSION_MAJOR),
74 		.os_version_minor = htobe32(FBSD_VERSION_MINOR),
75 		.os_version_sub = htobe32(FBSD_VERSION_PATCH),
76 		.driver_capability_flags = {
77 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS1),
78 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS2),
79 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS3),
80 			htobe64(GVE_DRIVER_CAPABILITY_FLAGS4),
81 		},
82 	};
83 
84 	snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1),
85 	    "FreeBSD %u", __FreeBSD_version);
86 
87 	bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map,
88 	    BUS_DMASYNC_PREREAD);
89 
90 	err = gve_adminq_verify_driver_compatibility(priv,
91 	    sizeof(struct gve_driver_info), driver_info_mem.bus_addr);
92 
93 	/* It's ok if the device doesn't support this */
94 	if (err == EOPNOTSUPP)
95 		err = 0;
96 
97 	gve_dma_free_coherent(&driver_info_mem);
98 
99 	return (err);
100 }
101 
102 static int
gve_up(struct gve_priv * priv)103 gve_up(struct gve_priv *priv)
104 {
105 	if_t ifp = priv->ifp;
106 	int err;
107 
108 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
109 
110 	if (device_is_attached(priv->dev) == 0) {
111 		device_printf(priv->dev, "Cannot bring the iface up when detached\n");
112 		return (ENXIO);
113 	}
114 
115 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
116 		return (0);
117 
118 	if_clearhwassist(ifp);
119 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
120 		if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
121 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
122 		if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0);
123 	if (if_getcapenable(ifp) & IFCAP_TSO4)
124 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
125 	if (if_getcapenable(ifp) & IFCAP_TSO6)
126 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
127 
128 	if (gve_is_qpl(priv)) {
129 		err = gve_register_qpls(priv);
130 		if (err != 0)
131 			goto reset;
132 	}
133 
134 	err = gve_create_rx_rings(priv);
135 	if (err != 0)
136 		goto reset;
137 
138 	err = gve_create_tx_rings(priv);
139 	if (err != 0)
140 		goto reset;
141 
142 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
143 
144 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
145 		if_link_state_change(ifp, LINK_STATE_UP);
146 		gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
147 	}
148 
149 	gve_unmask_all_queue_irqs(priv);
150 	gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
151 	priv->interface_up_cnt++;
152 	return (0);
153 
154 reset:
155 	gve_schedule_reset(priv);
156 	return (err);
157 }
158 
159 static void
gve_down(struct gve_priv * priv)160 gve_down(struct gve_priv *priv)
161 {
162 	GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
163 
164 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
165 		return;
166 
167 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
168 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
169 		gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
170 	}
171 
172 	if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
173 
174 	if (gve_destroy_rx_rings(priv) != 0)
175 		goto reset;
176 
177 	if (gve_destroy_tx_rings(priv) != 0)
178 		goto reset;
179 
180 	if (gve_is_qpl(priv)) {
181 		if (gve_unregister_qpls(priv) != 0)
182 			goto reset;
183 	}
184 
185 	if (gve_is_gqi(priv))
186 		gve_mask_all_queue_irqs(priv);
187 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
188 	priv->interface_down_cnt++;
189 	return;
190 
191 reset:
192 	gve_schedule_reset(priv);
193 }
194 
195 static int
gve_set_mtu(if_t ifp,uint32_t new_mtu)196 gve_set_mtu(if_t ifp, uint32_t new_mtu)
197 {
198 	struct gve_priv *priv = if_getsoftc(ifp);
199 	int err;
200 
201 	if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
202 		device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n",
203 		    new_mtu, priv->max_mtu, ETHERMIN);
204 		return (EINVAL);
205 	}
206 
207 	err = gve_adminq_set_mtu(priv, new_mtu);
208 	if (err == 0) {
209 		if (bootverbose)
210 			device_printf(priv->dev, "MTU set to %d\n", new_mtu);
211 		if_setmtu(ifp, new_mtu);
212 	} else {
213 		device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
214 	}
215 
216 	return (err);
217 }
218 
219 static void
gve_init(void * arg)220 gve_init(void *arg)
221 {
222 	struct gve_priv *priv = (struct gve_priv *)arg;
223 
224 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) {
225 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
226 		gve_up(priv);
227 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
228 	}
229 }
230 
231 static int
gve_ioctl(if_t ifp,u_long command,caddr_t data)232 gve_ioctl(if_t ifp, u_long command, caddr_t data)
233 {
234 	struct gve_priv *priv;
235 	struct ifreq *ifr;
236 	int rc = 0;
237 
238 	priv = if_getsoftc(ifp);
239 	ifr = (struct ifreq *)data;
240 
241 	switch (command) {
242 	case SIOCSIFMTU:
243 		if (if_getmtu(ifp) == ifr->ifr_mtu)
244 			break;
245 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
246 		gve_down(priv);
247 		gve_set_mtu(ifp, ifr->ifr_mtu);
248 		rc = gve_up(priv);
249 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
250 		break;
251 
252 	case SIOCSIFFLAGS:
253 		if ((if_getflags(ifp) & IFF_UP) != 0) {
254 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
255 				GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
256 				rc = gve_up(priv);
257 				GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
258 			}
259 		} else {
260 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
261 				GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
262 				gve_down(priv);
263 				GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
264 			}
265 		}
266 		break;
267 
268 	case SIOCSIFCAP:
269 		if (ifr->ifr_reqcap == if_getcapenable(ifp))
270 			break;
271 		GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
272 		gve_down(priv);
273 		if_setcapenable(ifp, ifr->ifr_reqcap);
274 		rc = gve_up(priv);
275 		GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
276 		break;
277 
278 	case SIOCSIFMEDIA:
279 		/* FALLTHROUGH */
280 	case SIOCGIFMEDIA:
281 		rc = ifmedia_ioctl(ifp, ifr, &priv->media, command);
282 		break;
283 
284 	default:
285 		rc = ether_ioctl(ifp, command, data);
286 		break;
287 	}
288 
289 	return (rc);
290 }
291 
292 static int
gve_media_change(if_t ifp)293 gve_media_change(if_t ifp)
294 {
295 	struct gve_priv *priv = if_getsoftc(ifp);
296 
297 	device_printf(priv->dev, "Media change not supported\n");
298 	return (0);
299 }
300 
301 static void
gve_media_status(if_t ifp,struct ifmediareq * ifmr)302 gve_media_status(if_t ifp, struct ifmediareq *ifmr)
303 {
304 	struct gve_priv *priv = if_getsoftc(ifp);
305 
306 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
307 
308 	ifmr->ifm_status = IFM_AVALID;
309 	ifmr->ifm_active = IFM_ETHER;
310 
311 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
312 		ifmr->ifm_status |= IFM_ACTIVE;
313 		ifmr->ifm_active |= IFM_AUTO;
314 	} else {
315 		ifmr->ifm_active |= IFM_NONE;
316 	}
317 
318 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
319 }
320 
321 static uint64_t
gve_get_counter(if_t ifp,ift_counter cnt)322 gve_get_counter(if_t ifp, ift_counter cnt)
323 {
324 	struct gve_priv *priv;
325 	uint64_t rpackets = 0;
326 	uint64_t tpackets = 0;
327 	uint64_t rbytes = 0;
328 	uint64_t tbytes = 0;
329 	uint64_t rx_dropped_pkt = 0;
330 	uint64_t tx_dropped_pkt = 0;
331 
332 	priv = if_getsoftc(ifp);
333 
334 	gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets,
335 	    &tbytes, &tx_dropped_pkt);
336 
337 	switch (cnt) {
338 	case IFCOUNTER_IPACKETS:
339 		return (rpackets);
340 
341 	case IFCOUNTER_OPACKETS:
342 		return (tpackets);
343 
344 	case IFCOUNTER_IBYTES:
345 		return (rbytes);
346 
347 	case IFCOUNTER_OBYTES:
348 		return (tbytes);
349 
350 	case IFCOUNTER_IQDROPS:
351 		return (rx_dropped_pkt);
352 
353 	case IFCOUNTER_OQDROPS:
354 		return (tx_dropped_pkt);
355 
356 	default:
357 		return (if_get_counter_default(ifp, cnt));
358 	}
359 }
360 
361 static void
gve_setup_ifnet(device_t dev,struct gve_priv * priv)362 gve_setup_ifnet(device_t dev, struct gve_priv *priv)
363 {
364 	int caps = 0;
365 	if_t ifp;
366 
367 	ifp = priv->ifp = if_alloc(IFT_ETHER);
368 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
369 	if_setsoftc(ifp, priv);
370 	if_setdev(ifp, dev);
371 	if_setinitfn(ifp, gve_init);
372 	if_setioctlfn(ifp, gve_ioctl);
373 	if_settransmitfn(ifp, gve_xmit_ifp);
374 	if_setqflushfn(ifp, gve_qflush);
375 
376 	/*
377 	 * Set TSO limits, must match the arguments to bus_dma_tag_create
378 	 * when creating tx->dqo.buf_dmatag. Only applies to the RDA mode
379 	 * because in QPL we copy the entire pakcet into the bounce buffer
380 	 * and thus it does not matter how fragmented the mbuf is.
381 	 */
382 	if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) {
383 		if_sethwtsomaxsegcount(ifp, GVE_TX_MAX_DATA_DESCS_DQO);
384 		if_sethwtsomaxsegsize(ifp, GVE_TX_MAX_BUF_SIZE_DQO);
385 	}
386 	if_sethwtsomax(ifp, GVE_TSO_MAXSIZE_DQO);
387 
388 #if __FreeBSD_version >= 1400086
389 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
390 #else
391 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH);
392 #endif
393 
394 	ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status);
395 	if_setgetcounterfn(ifp, gve_get_counter);
396 
397 	caps = IFCAP_RXCSUM |
398 	       IFCAP_TXCSUM |
399 	       IFCAP_TXCSUM_IPV6 |
400 	       IFCAP_TSO |
401 	       IFCAP_LRO;
402 
403 	if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0)
404 		caps |= IFCAP_JUMBO_MTU;
405 
406 	if_setcapabilities(ifp, caps);
407 	if_setcapenable(ifp, caps);
408 
409 	if (bootverbose)
410 		device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu);
411 	if_setmtu(ifp, priv->max_mtu);
412 
413 	ether_ifattach(ifp, priv->mac);
414 
415 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
416 	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
417 }
418 
419 static int
gve_alloc_counter_array(struct gve_priv * priv)420 gve_alloc_counter_array(struct gve_priv *priv)
421 {
422 	int err;
423 
424 	err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters,
425 	    PAGE_SIZE, &priv->counter_array_mem);
426 	if (err != 0)
427 		return (err);
428 
429 	priv->counters = priv->counter_array_mem.cpu_addr;
430 	return (0);
431 }
432 
433 static void
gve_free_counter_array(struct gve_priv * priv)434 gve_free_counter_array(struct gve_priv *priv)
435 {
436 	if (priv->counters != NULL)
437 		gve_dma_free_coherent(&priv->counter_array_mem);
438 	priv->counter_array_mem = (struct gve_dma_handle){};
439 }
440 
441 static int
gve_alloc_irq_db_array(struct gve_priv * priv)442 gve_alloc_irq_db_array(struct gve_priv *priv)
443 {
444 	int err;
445 
446 	err = gve_dma_alloc_coherent(priv,
447 	    sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE,
448 	    &priv->irqs_db_mem);
449 	if (err != 0)
450 		return (err);
451 
452 	priv->irq_db_indices = priv->irqs_db_mem.cpu_addr;
453 	return (0);
454 }
455 
456 static void
gve_free_irq_db_array(struct gve_priv * priv)457 gve_free_irq_db_array(struct gve_priv *priv)
458 {
459 	if (priv->irq_db_indices != NULL)
460 		gve_dma_free_coherent(&priv->irqs_db_mem);
461 	priv->irqs_db_mem = (struct gve_dma_handle){};
462 }
463 
464 static void
gve_free_rings(struct gve_priv * priv)465 gve_free_rings(struct gve_priv *priv)
466 {
467 	gve_free_irqs(priv);
468 	gve_free_tx_rings(priv);
469 	gve_free_rx_rings(priv);
470 	if (gve_is_qpl(priv))
471 		gve_free_qpls(priv);
472 }
473 
474 static int
gve_alloc_rings(struct gve_priv * priv)475 gve_alloc_rings(struct gve_priv *priv)
476 {
477 	int err;
478 
479 	if (gve_is_qpl(priv)) {
480 		err = gve_alloc_qpls(priv);
481 		if (err != 0)
482 			goto abort;
483 	}
484 
485 	err = gve_alloc_rx_rings(priv);
486 	if (err != 0)
487 		goto abort;
488 
489 	err = gve_alloc_tx_rings(priv);
490 	if (err != 0)
491 		goto abort;
492 
493 	err = gve_alloc_irqs(priv);
494 	if (err != 0)
495 		goto abort;
496 
497 	return (0);
498 
499 abort:
500 	gve_free_rings(priv);
501 	return (err);
502 }
503 
504 static void
gve_deconfigure_resources(struct gve_priv * priv)505 gve_deconfigure_resources(struct gve_priv *priv)
506 {
507 	int err;
508 
509 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) {
510 		err = gve_adminq_deconfigure_device_resources(priv);
511 		if (err != 0) {
512 			device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n",
513 			    err);
514 			return;
515 		}
516 		if (bootverbose)
517 			device_printf(priv->dev, "Deconfigured device resources\n");
518 		gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
519 	}
520 
521 	gve_free_irq_db_array(priv);
522 	gve_free_counter_array(priv);
523 
524 	if (priv->ptype_lut_dqo) {
525 		free(priv->ptype_lut_dqo, M_GVE);
526 		priv->ptype_lut_dqo = NULL;
527 	}
528 }
529 
530 static int
gve_configure_resources(struct gve_priv * priv)531 gve_configure_resources(struct gve_priv *priv)
532 {
533 	int err;
534 
535 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK))
536 		return (0);
537 
538 	err = gve_alloc_counter_array(priv);
539 	if (err != 0)
540 		return (err);
541 
542 	err = gve_alloc_irq_db_array(priv);
543 	if (err != 0)
544 		goto abort;
545 
546 	err = gve_adminq_configure_device_resources(priv);
547 	if (err != 0) {
548 		device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
549 			      err);
550 		err = (ENXIO);
551 		goto abort;
552 	}
553 
554 	if (!gve_is_gqi(priv)) {
555 		priv->ptype_lut_dqo = malloc(sizeof(*priv->ptype_lut_dqo), M_GVE,
556 		    M_WAITOK | M_ZERO);
557 
558 		err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
559 		if (err != 0) {
560 			device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
561 			    err);
562 			goto abort;
563 		}
564 	}
565 
566 	gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
567 	if (bootverbose)
568 		device_printf(priv->dev, "Configured device resources\n");
569 	return (0);
570 
571 abort:
572 	gve_deconfigure_resources(priv);
573 	return (err);
574 }
575 
576 static void
gve_set_queue_cnts(struct gve_priv * priv)577 gve_set_queue_cnts(struct gve_priv *priv)
578 {
579 	priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES);
580 	priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES);
581 	priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
582 	priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
583 
584 	if (priv->default_num_queues > 0) {
585 		priv->tx_cfg.num_queues = MIN(priv->default_num_queues,
586 		    priv->tx_cfg.num_queues);
587 		priv->rx_cfg.num_queues = MIN(priv->default_num_queues,
588 		    priv->rx_cfg.num_queues);
589 	}
590 
591 	priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues;
592 	priv->mgmt_msix_idx = priv->num_queues;
593 }
594 
595 static int
gve_alloc_adminq_and_describe_device(struct gve_priv * priv)596 gve_alloc_adminq_and_describe_device(struct gve_priv *priv)
597 {
598 	int err;
599 
600 	if ((err = gve_adminq_alloc(priv)) != 0)
601 		return (err);
602 
603 	if ((err = gve_verify_driver_compatibility(priv)) != 0) {
604 		device_printf(priv->dev,
605 		    "Failed to verify driver compatibility: err=%d\n", err);
606 		goto abort;
607 	}
608 
609 	if ((err = gve_adminq_describe_device(priv)) != 0)
610 		goto abort;
611 
612 	gve_set_queue_cnts(priv);
613 
614 	priv->num_registered_pages = 0;
615 	return (0);
616 
617 abort:
618 	gve_release_adminq(priv);
619 	return (err);
620 }
621 
622 void
gve_schedule_reset(struct gve_priv * priv)623 gve_schedule_reset(struct gve_priv *priv)
624 {
625 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET))
626 		return;
627 
628 	device_printf(priv->dev, "Scheduling reset task!\n");
629 	gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
630 	taskqueue_enqueue(priv->service_tq, &priv->service_task);
631 }
632 
633 static void
gve_destroy(struct gve_priv * priv)634 gve_destroy(struct gve_priv *priv)
635 {
636 	gve_down(priv);
637 	gve_deconfigure_resources(priv);
638 	gve_release_adminq(priv);
639 }
640 
641 static void
gve_restore(struct gve_priv * priv)642 gve_restore(struct gve_priv *priv)
643 {
644 	int err;
645 
646 	err = gve_adminq_alloc(priv);
647 	if (err != 0)
648 		goto abort;
649 
650 	err = gve_configure_resources(priv);
651 	if (err != 0)
652 		goto abort;
653 
654 	err = gve_up(priv);
655 	if (err != 0)
656 		goto abort;
657 
658 	return;
659 
660 abort:
661 	device_printf(priv->dev, "Restore failed!\n");
662 	return;
663 }
664 
665 static void
gve_handle_reset(struct gve_priv * priv)666 gve_handle_reset(struct gve_priv *priv)
667 {
668 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
669 		return;
670 
671 	gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
672 	gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
673 
674 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
675 
676 	if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
677 	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
678 	gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
679 
680 	/*
681 	 * Releasing the adminq causes the NIC to destroy all resources
682 	 * registered with it, so by clearing the flags beneath we cause
683 	 * the subsequent gve_down call below to not attempt to tell the
684 	 * NIC to destroy these resources again.
685 	 *
686 	 * The call to gve_down is needed in the first place to refresh
687 	 * the state and the DMA-able memory within each driver ring.
688 	 */
689 	gve_release_adminq(priv);
690 	gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
691 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
692 	gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
693 	gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
694 
695 	gve_down(priv);
696 	gve_restore(priv);
697 
698 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
699 
700 	priv->reset_cnt++;
701 	gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
702 }
703 
704 static void
gve_handle_link_status(struct gve_priv * priv)705 gve_handle_link_status(struct gve_priv *priv)
706 {
707 	uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
708 	bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS;
709 
710 	if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP))
711 		return;
712 
713 	if (link_up) {
714 		if (bootverbose)
715 			device_printf(priv->dev, "Device link is up.\n");
716 		if_link_state_change(priv->ifp, LINK_STATE_UP);
717 		gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
718 	} else {
719 		device_printf(priv->dev, "Device link is down.\n");
720 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
721 		gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
722 	}
723 }
724 
725 static void
gve_service_task(void * arg,int pending)726 gve_service_task(void *arg, int pending)
727 {
728 	struct gve_priv *priv = (struct gve_priv *)arg;
729 	uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
730 
731 	if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) &&
732 	    !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) {
733 		device_printf(priv->dev, "Device requested reset\n");
734 		gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
735 	}
736 
737 	gve_handle_reset(priv);
738 	gve_handle_link_status(priv);
739 }
740 
741 static int
gve_probe(device_t dev)742 gve_probe(device_t dev)
743 {
744 	uint16_t deviceid, vendorid;
745 	int i;
746 
747 	vendorid = pci_get_vendor(dev);
748 	deviceid = pci_get_device(dev);
749 
750 	for (i = 0; i < nitems(gve_devs); i++) {
751 		if (vendorid == gve_devs[i].vendor_id &&
752 		    deviceid == gve_devs[i].device_id) {
753 			device_set_desc(dev, gve_devs[i].name);
754 			return (BUS_PROBE_DEFAULT);
755 		}
756 	}
757 	return (ENXIO);
758 }
759 
760 static void
gve_free_sys_res_mem(struct gve_priv * priv)761 gve_free_sys_res_mem(struct gve_priv *priv)
762 {
763 	if (priv->msix_table != NULL)
764 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
765 		    rman_get_rid(priv->msix_table), priv->msix_table);
766 
767 	if (priv->db_bar != NULL)
768 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
769 		    rman_get_rid(priv->db_bar), priv->db_bar);
770 
771 	if (priv->reg_bar != NULL)
772 		bus_release_resource(priv->dev, SYS_RES_MEMORY,
773 		    rman_get_rid(priv->reg_bar), priv->reg_bar);
774 }
775 
776 static int
gve_attach(device_t dev)777 gve_attach(device_t dev)
778 {
779 	struct gve_priv *priv;
780 	int rid;
781 	int err;
782 
783 	snprintf(gve_version, sizeof(gve_version), "%d.%d.%d",
784 	    GVE_VERSION_MAJOR, GVE_VERSION_MINOR, GVE_VERSION_SUB);
785 
786 	priv = device_get_softc(dev);
787 	priv->dev = dev;
788 	GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
789 
790 	pci_enable_busmaster(dev);
791 
792 	rid = PCIR_BAR(GVE_REGISTER_BAR);
793 	priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
794 	    &rid, RF_ACTIVE);
795 	if (priv->reg_bar == NULL) {
796 		device_printf(dev, "Failed to allocate BAR0\n");
797 		err = ENXIO;
798 		goto abort;
799 	}
800 
801 	rid = PCIR_BAR(GVE_DOORBELL_BAR);
802 	priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
803 	    &rid, RF_ACTIVE);
804 	if (priv->db_bar == NULL) {
805 		device_printf(dev, "Failed to allocate BAR2\n");
806 		err = ENXIO;
807 		goto abort;
808 	}
809 
810 	rid = pci_msix_table_bar(priv->dev);
811 	priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
812 	    &rid, RF_ACTIVE);
813 	if (priv->msix_table == NULL) {
814 		device_printf(dev, "Failed to allocate msix table\n");
815 		err = ENXIO;
816 		goto abort;
817 	}
818 
819 	err = gve_alloc_adminq_and_describe_device(priv);
820 	if (err != 0)
821 		goto abort;
822 
823 	err = gve_configure_resources(priv);
824 	if (err != 0)
825 		goto abort;
826 
827 	err = gve_alloc_rings(priv);
828 	if (err != 0)
829 		goto abort;
830 
831 	gve_setup_ifnet(dev, priv);
832 
833 	priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
834 
835 	bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION,
836 	    sizeof(GVE_DRIVER_VERSION) - 1);
837 
838 	TASK_INIT(&priv->service_task, 0, gve_service_task, priv);
839 	priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO,
840 	    taskqueue_thread_enqueue, &priv->service_tq);
841 	taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq",
842 	    device_get_nameunit(priv->dev));
843 
844         gve_setup_sysctl(priv);
845 
846 	if (bootverbose)
847 		device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION);
848 	return (0);
849 
850 abort:
851 	gve_free_rings(priv);
852 	gve_deconfigure_resources(priv);
853 	gve_release_adminq(priv);
854 	gve_free_sys_res_mem(priv);
855 	GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
856 	return (err);
857 }
858 
859 static int
gve_detach(device_t dev)860 gve_detach(device_t dev)
861 {
862 	struct gve_priv *priv = device_get_softc(dev);
863 	if_t ifp = priv->ifp;
864 	int error;
865 
866 	error = bus_generic_detach(dev);
867 	if (error != 0)
868 		return (error);
869 
870 	ether_ifdetach(ifp);
871 
872 	GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
873 	gve_destroy(priv);
874 	GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
875 
876 	gve_free_rings(priv);
877 	gve_free_sys_res_mem(priv);
878 	GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
879 
880 	while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL))
881 		taskqueue_drain(priv->service_tq, &priv->service_task);
882 	taskqueue_free(priv->service_tq);
883 
884 	if_free(ifp);
885 	return (0);
886 }
887 
888 static device_method_t gve_methods[] = {
889 	DEVMETHOD(device_probe, gve_probe),
890 	DEVMETHOD(device_attach, gve_attach),
891 	DEVMETHOD(device_detach, gve_detach),
892 	DEVMETHOD_END
893 };
894 
895 static driver_t gve_driver = {
896 	"gve",
897 	gve_methods,
898 	sizeof(struct gve_priv)
899 };
900 
901 #if __FreeBSD_version < 1301503
902 static devclass_t gve_devclass;
903 
904 DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0);
905 #else
906 DRIVER_MODULE(gve, pci, gve_driver, 0, 0);
907 #endif
908 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs,
909     nitems(gve_devs));
910