1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2023 Google LLC 5 * 6 * Redistribution and use in source and binary forms, with or without modification, 7 * are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, this 10 * list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * 3. Neither the name of the copyright holder nor the names of its contributors 17 * may be used to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 #include "gve.h" 32 #include "gve_adminq.h" 33 34 #define GVE_DRIVER_VERSION "GVE-FBSD-1.0.0\n" 35 #define GVE_VERSION_MAJOR 0 36 #define GVE_VERSION_MINOR 9 37 #define GVE_VERSION_SUB 0 38 39 #define GVE_DEFAULT_RX_COPYBREAK 256 40 41 /* Devices supported by this driver. */ 42 static struct gve_dev { 43 uint16_t vendor_id; 44 uint16_t device_id; 45 const char *name; 46 } gve_devs[] = { 47 { PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" } 48 }; 49 #define GVE_DEVS_COUNT nitems(gve_devs) 50 51 struct sx gve_global_lock; 52 53 static int 54 gve_verify_driver_compatibility(struct gve_priv *priv) 55 { 56 int err; 57 struct gve_driver_info *driver_info; 58 struct gve_dma_handle driver_info_mem; 59 60 err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info), 61 PAGE_SIZE, &driver_info_mem); 62 63 if (err != 0) 64 return (ENOMEM); 65 66 driver_info = driver_info_mem.cpu_addr; 67 68 *driver_info = (struct gve_driver_info) { 69 .os_type = 3, /* Freebsd */ 70 .driver_major = GVE_VERSION_MAJOR, 71 .driver_minor = GVE_VERSION_MINOR, 72 .driver_sub = GVE_VERSION_SUB, 73 .os_version_major = htobe32(FBSD_VERSION_MAJOR), 74 .os_version_minor = htobe32(FBSD_VERSION_MINOR), 75 .os_version_sub = htobe32(FBSD_VERSION_PATCH), 76 .driver_capability_flags = { 77 htobe64(GVE_DRIVER_CAPABILITY_FLAGS1), 78 htobe64(GVE_DRIVER_CAPABILITY_FLAGS2), 79 htobe64(GVE_DRIVER_CAPABILITY_FLAGS3), 80 htobe64(GVE_DRIVER_CAPABILITY_FLAGS4), 81 }, 82 }; 83 84 snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1), 85 "FreeBSD %u", __FreeBSD_version); 86 87 bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map, 88 BUS_DMASYNC_PREREAD); 89 90 err = gve_adminq_verify_driver_compatibility(priv, 91 sizeof(struct gve_driver_info), driver_info_mem.bus_addr); 92 93 /* It's ok if the device doesn't support this */ 94 if (err == EOPNOTSUPP) 95 err = 0; 96 97 gve_dma_free_coherent(&driver_info_mem); 98 99 return (err); 100 } 101 102 static int 103 gve_up(struct gve_priv *priv) 104 { 105 if_t ifp = priv->ifp; 106 int err; 107 108 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); 109 110 if (device_is_attached(priv->dev) == 0) { 111 device_printf(priv->dev, "Cannot bring the iface up when detached\n"); 112 return (ENXIO); 113 } 114 115 if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) 116 return (0); 117 118 if_clearhwassist(ifp); 119 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 120 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); 121 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 122 if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0); 123 if (if_getcapenable(ifp) & IFCAP_TSO4) 124 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 125 if (if_getcapenable(ifp) & IFCAP_TSO6) 126 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 127 128 err = gve_register_qpls(priv); 129 if (err != 0) 130 goto reset; 131 132 err = gve_create_rx_rings(priv); 133 if (err != 0) 134 goto reset; 135 136 err = gve_create_tx_rings(priv); 137 if (err != 0) 138 goto reset; 139 140 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 141 142 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 143 if_link_state_change(ifp, LINK_STATE_UP); 144 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 145 } 146 147 gve_unmask_all_queue_irqs(priv); 148 gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); 149 priv->interface_up_cnt++; 150 return (0); 151 152 reset: 153 gve_schedule_reset(priv); 154 return (err); 155 } 156 157 static void 158 gve_down(struct gve_priv *priv) 159 { 160 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); 161 162 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) 163 return; 164 165 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 166 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 167 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 168 } 169 170 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 171 172 if (gve_destroy_rx_rings(priv) != 0) 173 goto reset; 174 175 if (gve_destroy_tx_rings(priv) != 0) 176 goto reset; 177 178 if (gve_unregister_qpls(priv) != 0) 179 goto reset; 180 181 gve_mask_all_queue_irqs(priv); 182 gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); 183 priv->interface_down_cnt++; 184 return; 185 186 reset: 187 gve_schedule_reset(priv); 188 } 189 190 static int 191 gve_set_mtu(if_t ifp, uint32_t new_mtu) 192 { 193 struct gve_priv *priv = if_getsoftc(ifp); 194 int err; 195 196 if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) { 197 device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n", 198 new_mtu, priv->max_mtu, ETHERMIN); 199 return (EINVAL); 200 } 201 202 err = gve_adminq_set_mtu(priv, new_mtu); 203 if (err == 0) { 204 if (bootverbose) 205 device_printf(priv->dev, "MTU set to %d\n", new_mtu); 206 if_setmtu(ifp, new_mtu); 207 } else { 208 device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu); 209 } 210 211 return (err); 212 } 213 214 static void 215 gve_init(void *arg) 216 { 217 struct gve_priv *priv = (struct gve_priv *)arg; 218 219 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) { 220 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 221 gve_up(priv); 222 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 223 } 224 } 225 226 static int 227 gve_ioctl(if_t ifp, u_long command, caddr_t data) 228 { 229 struct gve_priv *priv; 230 struct ifreq *ifr; 231 int rc = 0; 232 233 priv = if_getsoftc(ifp); 234 ifr = (struct ifreq *)data; 235 236 switch (command) { 237 case SIOCSIFMTU: 238 if (if_getmtu(ifp) == ifr->ifr_mtu) 239 break; 240 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 241 gve_down(priv); 242 gve_set_mtu(ifp, ifr->ifr_mtu); 243 rc = gve_up(priv); 244 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 245 break; 246 247 case SIOCSIFFLAGS: 248 if ((if_getflags(ifp) & IFF_UP) != 0) { 249 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 250 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 251 rc = gve_up(priv); 252 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 253 } 254 } else { 255 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 256 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 257 gve_down(priv); 258 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 259 } 260 } 261 break; 262 263 case SIOCSIFCAP: 264 if (ifr->ifr_reqcap == if_getcapenable(ifp)) 265 break; 266 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 267 gve_down(priv); 268 if_setcapenable(ifp, ifr->ifr_reqcap); 269 rc = gve_up(priv); 270 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 271 break; 272 273 case SIOCSIFMEDIA: 274 /* FALLTHROUGH */ 275 case SIOCGIFMEDIA: 276 rc = ifmedia_ioctl(ifp, ifr, &priv->media, command); 277 break; 278 279 default: 280 rc = ether_ioctl(ifp, command, data); 281 break; 282 } 283 284 return (rc); 285 } 286 287 static int 288 gve_media_change(if_t ifp) 289 { 290 struct gve_priv *priv = if_getsoftc(ifp); 291 292 device_printf(priv->dev, "Media change not supported\n"); 293 return (0); 294 } 295 296 static void 297 gve_media_status(if_t ifp, struct ifmediareq *ifmr) 298 { 299 struct gve_priv *priv = if_getsoftc(ifp); 300 301 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 302 303 ifmr->ifm_status = IFM_AVALID; 304 ifmr->ifm_active = IFM_ETHER; 305 306 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 307 ifmr->ifm_status |= IFM_ACTIVE; 308 ifmr->ifm_active |= IFM_AUTO; 309 } else { 310 ifmr->ifm_active |= IFM_NONE; 311 } 312 313 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 314 } 315 316 static uint64_t 317 gve_get_counter(if_t ifp, ift_counter cnt) 318 { 319 struct gve_priv *priv; 320 uint64_t rpackets = 0; 321 uint64_t tpackets = 0; 322 uint64_t rbytes = 0; 323 uint64_t tbytes = 0; 324 uint64_t rx_dropped_pkt = 0; 325 uint64_t tx_dropped_pkt = 0; 326 327 priv = if_getsoftc(ifp); 328 329 gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets, 330 &tbytes, &tx_dropped_pkt); 331 332 switch (cnt) { 333 case IFCOUNTER_IPACKETS: 334 return (rpackets); 335 336 case IFCOUNTER_OPACKETS: 337 return (tpackets); 338 339 case IFCOUNTER_IBYTES: 340 return (rbytes); 341 342 case IFCOUNTER_OBYTES: 343 return (tbytes); 344 345 case IFCOUNTER_IQDROPS: 346 return (rx_dropped_pkt); 347 348 case IFCOUNTER_OQDROPS: 349 return (tx_dropped_pkt); 350 351 default: 352 return (if_get_counter_default(ifp, cnt)); 353 } 354 } 355 356 static int 357 gve_setup_ifnet(device_t dev, struct gve_priv *priv) 358 { 359 int caps = 0; 360 if_t ifp; 361 362 ifp = priv->ifp = if_alloc(IFT_ETHER); 363 if (ifp == NULL) { 364 device_printf(priv->dev, "Failed to allocate ifnet struct\n"); 365 return (ENXIO); 366 } 367 368 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 369 if_setsoftc(ifp, priv); 370 if_setdev(ifp, dev); 371 if_setinitfn(ifp, gve_init); 372 if_setioctlfn(ifp, gve_ioctl); 373 if_settransmitfn(ifp, gve_xmit_ifp); 374 if_setqflushfn(ifp, gve_qflush); 375 376 #if __FreeBSD_version >= 1400086 377 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 378 #else 379 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH); 380 #endif 381 382 ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status); 383 if_setgetcounterfn(ifp, gve_get_counter); 384 385 caps = IFCAP_RXCSUM | 386 IFCAP_TXCSUM | 387 IFCAP_TXCSUM_IPV6 | 388 IFCAP_TSO | 389 IFCAP_LRO; 390 391 if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0) 392 caps |= IFCAP_JUMBO_MTU; 393 394 if_setcapabilities(ifp, caps); 395 if_setcapenable(ifp, caps); 396 397 if (bootverbose) 398 device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu); 399 if_setmtu(ifp, priv->max_mtu); 400 401 ether_ifattach(ifp, priv->mac); 402 403 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 404 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 405 406 return (0); 407 } 408 409 static int 410 gve_alloc_counter_array(struct gve_priv *priv) 411 { 412 int err; 413 414 err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters, 415 PAGE_SIZE, &priv->counter_array_mem); 416 if (err != 0) 417 return (err); 418 419 priv->counters = priv->counter_array_mem.cpu_addr; 420 return (0); 421 } 422 423 static void 424 gve_free_counter_array(struct gve_priv *priv) 425 { 426 if (priv->counters != NULL) 427 gve_dma_free_coherent(&priv->counter_array_mem); 428 priv->counter_array_mem = (struct gve_dma_handle){}; 429 } 430 431 static int 432 gve_alloc_irq_db_array(struct gve_priv *priv) 433 { 434 int err; 435 436 err = gve_dma_alloc_coherent(priv, 437 sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE, 438 &priv->irqs_db_mem); 439 if (err != 0) 440 return (err); 441 442 priv->irq_db_indices = priv->irqs_db_mem.cpu_addr; 443 return (0); 444 } 445 446 static void 447 gve_free_irq_db_array(struct gve_priv *priv) 448 { 449 if (priv->irq_db_indices != NULL) 450 gve_dma_free_coherent(&priv->irqs_db_mem); 451 priv->irqs_db_mem = (struct gve_dma_handle){}; 452 } 453 454 static void 455 gve_free_rings(struct gve_priv *priv) 456 { 457 gve_free_irqs(priv); 458 gve_free_tx_rings(priv); 459 gve_free_rx_rings(priv); 460 gve_free_qpls(priv); 461 } 462 463 static int 464 gve_alloc_rings(struct gve_priv *priv) 465 { 466 int err; 467 468 err = gve_alloc_qpls(priv); 469 if (err != 0) 470 goto abort; 471 472 err = gve_alloc_rx_rings(priv); 473 if (err != 0) 474 goto abort; 475 476 err = gve_alloc_tx_rings(priv); 477 if (err != 0) 478 goto abort; 479 480 err = gve_alloc_irqs(priv); 481 if (err != 0) 482 goto abort; 483 484 return (0); 485 486 abort: 487 gve_free_rings(priv); 488 return (err); 489 } 490 491 static void 492 gve_deconfigure_resources(struct gve_priv *priv) 493 { 494 int err; 495 496 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) { 497 err = gve_adminq_deconfigure_device_resources(priv); 498 if (err != 0) { 499 device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n", 500 err); 501 return; 502 } 503 if (bootverbose) 504 device_printf(priv->dev, "Deconfigured device resources\n"); 505 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 506 } 507 508 gve_free_irq_db_array(priv); 509 gve_free_counter_array(priv); 510 } 511 512 static int 513 gve_configure_resources(struct gve_priv *priv) 514 { 515 int err; 516 517 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) 518 return (0); 519 520 err = gve_alloc_counter_array(priv); 521 if (err != 0) 522 return (err); 523 524 err = gve_alloc_irq_db_array(priv); 525 if (err != 0) 526 goto abort; 527 528 err = gve_adminq_configure_device_resources(priv); 529 if (err != 0) { 530 device_printf(priv->dev, "Failed to configure device resources: err=%d\n", 531 err); 532 err = (ENXIO); 533 goto abort; 534 } 535 536 gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 537 if (bootverbose) 538 device_printf(priv->dev, "Configured device resources\n"); 539 return (0); 540 541 abort: 542 gve_deconfigure_resources(priv); 543 return (err); 544 } 545 546 static void 547 gve_set_queue_cnts(struct gve_priv *priv) 548 { 549 priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES); 550 priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES); 551 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; 552 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; 553 554 if (priv->default_num_queues > 0) { 555 priv->tx_cfg.num_queues = MIN(priv->default_num_queues, 556 priv->tx_cfg.num_queues); 557 priv->rx_cfg.num_queues = MIN(priv->default_num_queues, 558 priv->rx_cfg.num_queues); 559 } 560 561 priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues; 562 priv->mgmt_msix_idx = priv->num_queues; 563 } 564 565 static int 566 gve_alloc_adminq_and_describe_device(struct gve_priv *priv) 567 { 568 int err; 569 570 if ((err = gve_adminq_alloc(priv)) != 0) 571 return (err); 572 573 if ((err = gve_verify_driver_compatibility(priv)) != 0) { 574 device_printf(priv->dev, 575 "Failed to verify driver compatibility: err=%d\n", err); 576 goto abort; 577 } 578 579 if ((err = gve_adminq_describe_device(priv)) != 0) 580 goto abort; 581 582 gve_set_queue_cnts(priv); 583 584 priv->num_registered_pages = 0; 585 return (0); 586 587 abort: 588 gve_release_adminq(priv); 589 return (err); 590 } 591 592 void 593 gve_schedule_reset(struct gve_priv *priv) 594 { 595 if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) 596 return; 597 598 device_printf(priv->dev, "Scheduling reset task!\n"); 599 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 600 taskqueue_enqueue(priv->service_tq, &priv->service_task); 601 } 602 603 static void 604 gve_destroy(struct gve_priv *priv) 605 { 606 gve_down(priv); 607 gve_deconfigure_resources(priv); 608 gve_release_adminq(priv); 609 } 610 611 static void 612 gve_restore(struct gve_priv *priv) 613 { 614 int err; 615 616 err = gve_adminq_alloc(priv); 617 if (err != 0) 618 goto abort; 619 620 err = gve_configure_resources(priv); 621 if (err != 0) 622 goto abort; 623 624 err = gve_up(priv); 625 if (err != 0) 626 goto abort; 627 628 return; 629 630 abort: 631 device_printf(priv->dev, "Restore failed!\n"); 632 return; 633 } 634 635 static void 636 gve_handle_reset(struct gve_priv *priv) 637 { 638 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET)) 639 return; 640 641 gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 642 gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET); 643 644 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 645 646 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 647 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 648 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 649 650 /* 651 * Releasing the adminq causes the NIC to destroy all resources 652 * registered with it, so by clearing the flags beneath we cause 653 * the subsequent gve_down call below to not attempt to tell the 654 * NIC to destroy these resources again. 655 * 656 * The call to gve_down is needed in the first place to refresh 657 * the state and the DMA-able memory within each driver ring. 658 */ 659 gve_release_adminq(priv); 660 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 661 gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK); 662 gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK); 663 gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK); 664 665 gve_down(priv); 666 gve_restore(priv); 667 668 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 669 670 priv->reset_cnt++; 671 gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET); 672 } 673 674 static void 675 gve_handle_link_status(struct gve_priv *priv) 676 { 677 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); 678 bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS; 679 680 if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) 681 return; 682 683 if (link_up) { 684 if (bootverbose) 685 device_printf(priv->dev, "Device link is up.\n"); 686 if_link_state_change(priv->ifp, LINK_STATE_UP); 687 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 688 } else { 689 device_printf(priv->dev, "Device link is down.\n"); 690 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 691 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 692 } 693 } 694 695 static void 696 gve_service_task(void *arg, int pending) 697 { 698 struct gve_priv *priv = (struct gve_priv *)arg; 699 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); 700 701 if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) && 702 !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) { 703 device_printf(priv->dev, "Device requested reset\n"); 704 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 705 } 706 707 gve_handle_reset(priv); 708 gve_handle_link_status(priv); 709 } 710 711 static int 712 gve_probe(device_t dev) 713 { 714 uint16_t deviceid, vendorid; 715 int i; 716 717 vendorid = pci_get_vendor(dev); 718 deviceid = pci_get_device(dev); 719 720 for (i = 0; i < GVE_DEVS_COUNT; i++) { 721 if (vendorid == gve_devs[i].vendor_id && 722 deviceid == gve_devs[i].device_id) { 723 device_set_desc(dev, gve_devs[i].name); 724 return (BUS_PROBE_DEFAULT); 725 } 726 } 727 return (ENXIO); 728 } 729 730 static void 731 gve_free_sys_res_mem(struct gve_priv *priv) 732 { 733 if (priv->msix_table != NULL) 734 bus_release_resource(priv->dev, SYS_RES_MEMORY, 735 rman_get_rid(priv->msix_table), priv->msix_table); 736 737 if (priv->db_bar != NULL) 738 bus_release_resource(priv->dev, SYS_RES_MEMORY, 739 rman_get_rid(priv->db_bar), priv->db_bar); 740 741 if (priv->reg_bar != NULL) 742 bus_release_resource(priv->dev, SYS_RES_MEMORY, 743 rman_get_rid(priv->reg_bar), priv->reg_bar); 744 } 745 746 static int 747 gve_attach(device_t dev) 748 { 749 struct gve_priv *priv; 750 int rid; 751 int err; 752 753 priv = device_get_softc(dev); 754 priv->dev = dev; 755 GVE_IFACE_LOCK_INIT(priv->gve_iface_lock); 756 757 pci_enable_busmaster(dev); 758 759 rid = PCIR_BAR(GVE_REGISTER_BAR); 760 priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 761 &rid, RF_ACTIVE); 762 if (priv->reg_bar == NULL) { 763 device_printf(dev, "Failed to allocate BAR0\n"); 764 err = ENXIO; 765 goto abort; 766 } 767 768 rid = PCIR_BAR(GVE_DOORBELL_BAR); 769 priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 770 &rid, RF_ACTIVE); 771 if (priv->db_bar == NULL) { 772 device_printf(dev, "Failed to allocate BAR2\n"); 773 err = ENXIO; 774 goto abort; 775 } 776 777 rid = pci_msix_table_bar(priv->dev); 778 priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 779 &rid, RF_ACTIVE); 780 if (priv->msix_table == NULL) { 781 device_printf(dev, "Failed to allocate msix table\n"); 782 err = ENXIO; 783 goto abort; 784 } 785 786 err = gve_alloc_adminq_and_describe_device(priv); 787 if (err != 0) 788 goto abort; 789 790 err = gve_configure_resources(priv); 791 if (err != 0) 792 goto abort; 793 794 err = gve_alloc_rings(priv); 795 if (err != 0) 796 goto abort; 797 798 err = gve_setup_ifnet(dev, priv); 799 if (err != 0) 800 goto abort; 801 802 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; 803 804 bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION, 805 sizeof(GVE_DRIVER_VERSION) - 1); 806 807 TASK_INIT(&priv->service_task, 0, gve_service_task, priv); 808 priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO, 809 taskqueue_thread_enqueue, &priv->service_tq); 810 taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq", 811 device_get_nameunit(priv->dev)); 812 813 gve_setup_sysctl(priv); 814 815 if (bootverbose) 816 device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION); 817 return (0); 818 819 abort: 820 gve_free_rings(priv); 821 gve_deconfigure_resources(priv); 822 gve_release_adminq(priv); 823 gve_free_sys_res_mem(priv); 824 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); 825 return (err); 826 } 827 828 static int 829 gve_detach(device_t dev) 830 { 831 struct gve_priv *priv = device_get_softc(dev); 832 if_t ifp = priv->ifp; 833 834 ether_ifdetach(ifp); 835 836 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 837 gve_destroy(priv); 838 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 839 840 gve_free_rings(priv); 841 gve_free_sys_res_mem(priv); 842 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); 843 844 while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL)) 845 taskqueue_drain(priv->service_tq, &priv->service_task); 846 taskqueue_free(priv->service_tq); 847 848 if_free(ifp); 849 return (bus_generic_detach(dev)); 850 } 851 852 static device_method_t gve_methods[] = { 853 DEVMETHOD(device_probe, gve_probe), 854 DEVMETHOD(device_attach, gve_attach), 855 DEVMETHOD(device_detach, gve_detach), 856 DEVMETHOD_END 857 }; 858 859 static driver_t gve_driver = { 860 "gve", 861 gve_methods, 862 sizeof(struct gve_priv) 863 }; 864 865 #if __FreeBSD_version < 1301503 866 static devclass_t gve_devclass; 867 868 DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0); 869 #else 870 DRIVER_MODULE(gve, pci, gve_driver, 0, 0); 871 #endif 872 MODULE_PNP_INFO("U16:vendor;U16:device", pci, gve, gve_devs, 873 GVE_DEVS_COUNT); 874