1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2023 Google LLC 5 * 6 * Redistribution and use in source and binary forms, with or without modification, 7 * are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, this 10 * list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * 3. Neither the name of the copyright holder nor the names of its contributors 17 * may be used to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 #include "gve.h" 32 #include "gve_adminq.h" 33 34 #define GVE_DRIVER_VERSION "GVE-FBSD-1.0.1\n" 35 #define GVE_VERSION_MAJOR 1 36 #define GVE_VERSION_MINOR 0 37 #define GVE_VERSION_SUB 1 38 39 #define GVE_DEFAULT_RX_COPYBREAK 256 40 41 /* Devices supported by this driver. */ 42 static struct gve_dev { 43 uint16_t vendor_id; 44 uint16_t device_id; 45 const char *name; 46 } gve_devs[] = { 47 { PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" } 48 }; 49 50 struct sx gve_global_lock; 51 52 static int 53 gve_verify_driver_compatibility(struct gve_priv *priv) 54 { 55 int err; 56 struct gve_driver_info *driver_info; 57 struct gve_dma_handle driver_info_mem; 58 59 err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info), 60 PAGE_SIZE, &driver_info_mem); 61 62 if (err != 0) 63 return (ENOMEM); 64 65 driver_info = driver_info_mem.cpu_addr; 66 67 *driver_info = (struct gve_driver_info) { 68 .os_type = 3, /* Freebsd */ 69 .driver_major = GVE_VERSION_MAJOR, 70 .driver_minor = GVE_VERSION_MINOR, 71 .driver_sub = GVE_VERSION_SUB, 72 .os_version_major = htobe32(FBSD_VERSION_MAJOR), 73 .os_version_minor = htobe32(FBSD_VERSION_MINOR), 74 .os_version_sub = htobe32(FBSD_VERSION_PATCH), 75 .driver_capability_flags = { 76 htobe64(GVE_DRIVER_CAPABILITY_FLAGS1), 77 htobe64(GVE_DRIVER_CAPABILITY_FLAGS2), 78 htobe64(GVE_DRIVER_CAPABILITY_FLAGS3), 79 htobe64(GVE_DRIVER_CAPABILITY_FLAGS4), 80 }, 81 }; 82 83 snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1), 84 "FreeBSD %u", __FreeBSD_version); 85 86 bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map, 87 BUS_DMASYNC_PREREAD); 88 89 err = gve_adminq_verify_driver_compatibility(priv, 90 sizeof(struct gve_driver_info), driver_info_mem.bus_addr); 91 92 /* It's ok if the device doesn't support this */ 93 if (err == EOPNOTSUPP) 94 err = 0; 95 96 gve_dma_free_coherent(&driver_info_mem); 97 98 return (err); 99 } 100 101 static int 102 gve_up(struct gve_priv *priv) 103 { 104 if_t ifp = priv->ifp; 105 int err; 106 107 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); 108 109 if (device_is_attached(priv->dev) == 0) { 110 device_printf(priv->dev, "Cannot bring the iface up when detached\n"); 111 return (ENXIO); 112 } 113 114 if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) 115 return (0); 116 117 if_clearhwassist(ifp); 118 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 119 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); 120 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 121 if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0); 122 if (if_getcapenable(ifp) & IFCAP_TSO4) 123 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 124 if (if_getcapenable(ifp) & IFCAP_TSO6) 125 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 126 127 err = gve_register_qpls(priv); 128 if (err != 0) 129 goto reset; 130 131 err = gve_create_rx_rings(priv); 132 if (err != 0) 133 goto reset; 134 135 err = gve_create_tx_rings(priv); 136 if (err != 0) 137 goto reset; 138 139 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 140 141 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 142 if_link_state_change(ifp, LINK_STATE_UP); 143 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 144 } 145 146 gve_unmask_all_queue_irqs(priv); 147 gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); 148 priv->interface_up_cnt++; 149 return (0); 150 151 reset: 152 gve_schedule_reset(priv); 153 return (err); 154 } 155 156 static void 157 gve_down(struct gve_priv *priv) 158 { 159 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock); 160 161 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) 162 return; 163 164 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 165 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 166 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 167 } 168 169 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 170 171 if (gve_destroy_rx_rings(priv) != 0) 172 goto reset; 173 174 if (gve_destroy_tx_rings(priv) != 0) 175 goto reset; 176 177 if (gve_unregister_qpls(priv) != 0) 178 goto reset; 179 180 gve_mask_all_queue_irqs(priv); 181 gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP); 182 priv->interface_down_cnt++; 183 return; 184 185 reset: 186 gve_schedule_reset(priv); 187 } 188 189 static int 190 gve_set_mtu(if_t ifp, uint32_t new_mtu) 191 { 192 struct gve_priv *priv = if_getsoftc(ifp); 193 int err; 194 195 if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) { 196 device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n", 197 new_mtu, priv->max_mtu, ETHERMIN); 198 return (EINVAL); 199 } 200 201 err = gve_adminq_set_mtu(priv, new_mtu); 202 if (err == 0) { 203 if (bootverbose) 204 device_printf(priv->dev, "MTU set to %d\n", new_mtu); 205 if_setmtu(ifp, new_mtu); 206 } else { 207 device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu); 208 } 209 210 return (err); 211 } 212 213 static void 214 gve_init(void *arg) 215 { 216 struct gve_priv *priv = (struct gve_priv *)arg; 217 218 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) { 219 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 220 gve_up(priv); 221 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 222 } 223 } 224 225 static int 226 gve_ioctl(if_t ifp, u_long command, caddr_t data) 227 { 228 struct gve_priv *priv; 229 struct ifreq *ifr; 230 int rc = 0; 231 232 priv = if_getsoftc(ifp); 233 ifr = (struct ifreq *)data; 234 235 switch (command) { 236 case SIOCSIFMTU: 237 if (if_getmtu(ifp) == ifr->ifr_mtu) 238 break; 239 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 240 gve_down(priv); 241 gve_set_mtu(ifp, ifr->ifr_mtu); 242 rc = gve_up(priv); 243 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 244 break; 245 246 case SIOCSIFFLAGS: 247 if ((if_getflags(ifp) & IFF_UP) != 0) { 248 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 249 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 250 rc = gve_up(priv); 251 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 252 } 253 } else { 254 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 255 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 256 gve_down(priv); 257 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 258 } 259 } 260 break; 261 262 case SIOCSIFCAP: 263 if (ifr->ifr_reqcap == if_getcapenable(ifp)) 264 break; 265 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 266 gve_down(priv); 267 if_setcapenable(ifp, ifr->ifr_reqcap); 268 rc = gve_up(priv); 269 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 270 break; 271 272 case SIOCSIFMEDIA: 273 /* FALLTHROUGH */ 274 case SIOCGIFMEDIA: 275 rc = ifmedia_ioctl(ifp, ifr, &priv->media, command); 276 break; 277 278 default: 279 rc = ether_ioctl(ifp, command, data); 280 break; 281 } 282 283 return (rc); 284 } 285 286 static int 287 gve_media_change(if_t ifp) 288 { 289 struct gve_priv *priv = if_getsoftc(ifp); 290 291 device_printf(priv->dev, "Media change not supported\n"); 292 return (0); 293 } 294 295 static void 296 gve_media_status(if_t ifp, struct ifmediareq *ifmr) 297 { 298 struct gve_priv *priv = if_getsoftc(ifp); 299 300 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 301 302 ifmr->ifm_status = IFM_AVALID; 303 ifmr->ifm_active = IFM_ETHER; 304 305 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) { 306 ifmr->ifm_status |= IFM_ACTIVE; 307 ifmr->ifm_active |= IFM_AUTO; 308 } else { 309 ifmr->ifm_active |= IFM_NONE; 310 } 311 312 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 313 } 314 315 static uint64_t 316 gve_get_counter(if_t ifp, ift_counter cnt) 317 { 318 struct gve_priv *priv; 319 uint64_t rpackets = 0; 320 uint64_t tpackets = 0; 321 uint64_t rbytes = 0; 322 uint64_t tbytes = 0; 323 uint64_t rx_dropped_pkt = 0; 324 uint64_t tx_dropped_pkt = 0; 325 326 priv = if_getsoftc(ifp); 327 328 gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets, 329 &tbytes, &tx_dropped_pkt); 330 331 switch (cnt) { 332 case IFCOUNTER_IPACKETS: 333 return (rpackets); 334 335 case IFCOUNTER_OPACKETS: 336 return (tpackets); 337 338 case IFCOUNTER_IBYTES: 339 return (rbytes); 340 341 case IFCOUNTER_OBYTES: 342 return (tbytes); 343 344 case IFCOUNTER_IQDROPS: 345 return (rx_dropped_pkt); 346 347 case IFCOUNTER_OQDROPS: 348 return (tx_dropped_pkt); 349 350 default: 351 return (if_get_counter_default(ifp, cnt)); 352 } 353 } 354 355 static void 356 gve_setup_ifnet(device_t dev, struct gve_priv *priv) 357 { 358 int caps = 0; 359 if_t ifp; 360 361 ifp = priv->ifp = if_alloc(IFT_ETHER); 362 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 363 if_setsoftc(ifp, priv); 364 if_setdev(ifp, dev); 365 if_setinitfn(ifp, gve_init); 366 if_setioctlfn(ifp, gve_ioctl); 367 if_settransmitfn(ifp, gve_xmit_ifp); 368 if_setqflushfn(ifp, gve_qflush); 369 370 #if __FreeBSD_version >= 1400086 371 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 372 #else 373 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH); 374 #endif 375 376 ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status); 377 if_setgetcounterfn(ifp, gve_get_counter); 378 379 caps = IFCAP_RXCSUM | 380 IFCAP_TXCSUM | 381 IFCAP_TXCSUM_IPV6 | 382 IFCAP_TSO | 383 IFCAP_LRO; 384 385 if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0) 386 caps |= IFCAP_JUMBO_MTU; 387 388 if_setcapabilities(ifp, caps); 389 if_setcapenable(ifp, caps); 390 391 if (bootverbose) 392 device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu); 393 if_setmtu(ifp, priv->max_mtu); 394 395 ether_ifattach(ifp, priv->mac); 396 397 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 398 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 399 } 400 401 static int 402 gve_alloc_counter_array(struct gve_priv *priv) 403 { 404 int err; 405 406 err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters, 407 PAGE_SIZE, &priv->counter_array_mem); 408 if (err != 0) 409 return (err); 410 411 priv->counters = priv->counter_array_mem.cpu_addr; 412 return (0); 413 } 414 415 static void 416 gve_free_counter_array(struct gve_priv *priv) 417 { 418 if (priv->counters != NULL) 419 gve_dma_free_coherent(&priv->counter_array_mem); 420 priv->counter_array_mem = (struct gve_dma_handle){}; 421 } 422 423 static int 424 gve_alloc_irq_db_array(struct gve_priv *priv) 425 { 426 int err; 427 428 err = gve_dma_alloc_coherent(priv, 429 sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE, 430 &priv->irqs_db_mem); 431 if (err != 0) 432 return (err); 433 434 priv->irq_db_indices = priv->irqs_db_mem.cpu_addr; 435 return (0); 436 } 437 438 static void 439 gve_free_irq_db_array(struct gve_priv *priv) 440 { 441 if (priv->irq_db_indices != NULL) 442 gve_dma_free_coherent(&priv->irqs_db_mem); 443 priv->irqs_db_mem = (struct gve_dma_handle){}; 444 } 445 446 static void 447 gve_free_rings(struct gve_priv *priv) 448 { 449 gve_free_irqs(priv); 450 gve_free_tx_rings(priv); 451 gve_free_rx_rings(priv); 452 gve_free_qpls(priv); 453 } 454 455 static int 456 gve_alloc_rings(struct gve_priv *priv) 457 { 458 int err; 459 460 err = gve_alloc_qpls(priv); 461 if (err != 0) 462 goto abort; 463 464 err = gve_alloc_rx_rings(priv); 465 if (err != 0) 466 goto abort; 467 468 err = gve_alloc_tx_rings(priv); 469 if (err != 0) 470 goto abort; 471 472 err = gve_alloc_irqs(priv); 473 if (err != 0) 474 goto abort; 475 476 return (0); 477 478 abort: 479 gve_free_rings(priv); 480 return (err); 481 } 482 483 static void 484 gve_deconfigure_resources(struct gve_priv *priv) 485 { 486 int err; 487 488 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) { 489 err = gve_adminq_deconfigure_device_resources(priv); 490 if (err != 0) { 491 device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n", 492 err); 493 return; 494 } 495 if (bootverbose) 496 device_printf(priv->dev, "Deconfigured device resources\n"); 497 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 498 } 499 500 gve_free_irq_db_array(priv); 501 gve_free_counter_array(priv); 502 } 503 504 static int 505 gve_configure_resources(struct gve_priv *priv) 506 { 507 int err; 508 509 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) 510 return (0); 511 512 err = gve_alloc_counter_array(priv); 513 if (err != 0) 514 return (err); 515 516 err = gve_alloc_irq_db_array(priv); 517 if (err != 0) 518 goto abort; 519 520 err = gve_adminq_configure_device_resources(priv); 521 if (err != 0) { 522 device_printf(priv->dev, "Failed to configure device resources: err=%d\n", 523 err); 524 err = (ENXIO); 525 goto abort; 526 } 527 528 gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 529 if (bootverbose) 530 device_printf(priv->dev, "Configured device resources\n"); 531 return (0); 532 533 abort: 534 gve_deconfigure_resources(priv); 535 return (err); 536 } 537 538 static void 539 gve_set_queue_cnts(struct gve_priv *priv) 540 { 541 priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES); 542 priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES); 543 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; 544 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; 545 546 if (priv->default_num_queues > 0) { 547 priv->tx_cfg.num_queues = MIN(priv->default_num_queues, 548 priv->tx_cfg.num_queues); 549 priv->rx_cfg.num_queues = MIN(priv->default_num_queues, 550 priv->rx_cfg.num_queues); 551 } 552 553 priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues; 554 priv->mgmt_msix_idx = priv->num_queues; 555 } 556 557 static int 558 gve_alloc_adminq_and_describe_device(struct gve_priv *priv) 559 { 560 int err; 561 562 if ((err = gve_adminq_alloc(priv)) != 0) 563 return (err); 564 565 if ((err = gve_verify_driver_compatibility(priv)) != 0) { 566 device_printf(priv->dev, 567 "Failed to verify driver compatibility: err=%d\n", err); 568 goto abort; 569 } 570 571 if ((err = gve_adminq_describe_device(priv)) != 0) 572 goto abort; 573 574 gve_set_queue_cnts(priv); 575 576 priv->num_registered_pages = 0; 577 return (0); 578 579 abort: 580 gve_release_adminq(priv); 581 return (err); 582 } 583 584 void 585 gve_schedule_reset(struct gve_priv *priv) 586 { 587 if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) 588 return; 589 590 device_printf(priv->dev, "Scheduling reset task!\n"); 591 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 592 taskqueue_enqueue(priv->service_tq, &priv->service_task); 593 } 594 595 static void 596 gve_destroy(struct gve_priv *priv) 597 { 598 gve_down(priv); 599 gve_deconfigure_resources(priv); 600 gve_release_adminq(priv); 601 } 602 603 static void 604 gve_restore(struct gve_priv *priv) 605 { 606 int err; 607 608 err = gve_adminq_alloc(priv); 609 if (err != 0) 610 goto abort; 611 612 err = gve_configure_resources(priv); 613 if (err != 0) 614 goto abort; 615 616 err = gve_up(priv); 617 if (err != 0) 618 goto abort; 619 620 return; 621 622 abort: 623 device_printf(priv->dev, "Restore failed!\n"); 624 return; 625 } 626 627 static void 628 gve_handle_reset(struct gve_priv *priv) 629 { 630 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET)) 631 return; 632 633 gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 634 gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET); 635 636 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 637 638 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 639 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 640 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 641 642 /* 643 * Releasing the adminq causes the NIC to destroy all resources 644 * registered with it, so by clearing the flags beneath we cause 645 * the subsequent gve_down call below to not attempt to tell the 646 * NIC to destroy these resources again. 647 * 648 * The call to gve_down is needed in the first place to refresh 649 * the state and the DMA-able memory within each driver ring. 650 */ 651 gve_release_adminq(priv); 652 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK); 653 gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK); 654 gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK); 655 gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK); 656 657 gve_down(priv); 658 gve_restore(priv); 659 660 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 661 662 priv->reset_cnt++; 663 gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET); 664 } 665 666 static void 667 gve_handle_link_status(struct gve_priv *priv) 668 { 669 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); 670 bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS; 671 672 if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) 673 return; 674 675 if (link_up) { 676 if (bootverbose) 677 device_printf(priv->dev, "Device link is up.\n"); 678 if_link_state_change(priv->ifp, LINK_STATE_UP); 679 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 680 } else { 681 device_printf(priv->dev, "Device link is down.\n"); 682 if_link_state_change(priv->ifp, LINK_STATE_DOWN); 683 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP); 684 } 685 } 686 687 static void 688 gve_service_task(void *arg, int pending) 689 { 690 struct gve_priv *priv = (struct gve_priv *)arg; 691 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS); 692 693 if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) && 694 !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) { 695 device_printf(priv->dev, "Device requested reset\n"); 696 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET); 697 } 698 699 gve_handle_reset(priv); 700 gve_handle_link_status(priv); 701 } 702 703 static int 704 gve_probe(device_t dev) 705 { 706 uint16_t deviceid, vendorid; 707 int i; 708 709 vendorid = pci_get_vendor(dev); 710 deviceid = pci_get_device(dev); 711 712 for (i = 0; i < nitems(gve_devs); i++) { 713 if (vendorid == gve_devs[i].vendor_id && 714 deviceid == gve_devs[i].device_id) { 715 device_set_desc(dev, gve_devs[i].name); 716 return (BUS_PROBE_DEFAULT); 717 } 718 } 719 return (ENXIO); 720 } 721 722 static void 723 gve_free_sys_res_mem(struct gve_priv *priv) 724 { 725 if (priv->msix_table != NULL) 726 bus_release_resource(priv->dev, SYS_RES_MEMORY, 727 rman_get_rid(priv->msix_table), priv->msix_table); 728 729 if (priv->db_bar != NULL) 730 bus_release_resource(priv->dev, SYS_RES_MEMORY, 731 rman_get_rid(priv->db_bar), priv->db_bar); 732 733 if (priv->reg_bar != NULL) 734 bus_release_resource(priv->dev, SYS_RES_MEMORY, 735 rman_get_rid(priv->reg_bar), priv->reg_bar); 736 } 737 738 static int 739 gve_attach(device_t dev) 740 { 741 struct gve_priv *priv; 742 int rid; 743 int err; 744 745 priv = device_get_softc(dev); 746 priv->dev = dev; 747 GVE_IFACE_LOCK_INIT(priv->gve_iface_lock); 748 749 pci_enable_busmaster(dev); 750 751 rid = PCIR_BAR(GVE_REGISTER_BAR); 752 priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 753 &rid, RF_ACTIVE); 754 if (priv->reg_bar == NULL) { 755 device_printf(dev, "Failed to allocate BAR0\n"); 756 err = ENXIO; 757 goto abort; 758 } 759 760 rid = PCIR_BAR(GVE_DOORBELL_BAR); 761 priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 762 &rid, RF_ACTIVE); 763 if (priv->db_bar == NULL) { 764 device_printf(dev, "Failed to allocate BAR2\n"); 765 err = ENXIO; 766 goto abort; 767 } 768 769 rid = pci_msix_table_bar(priv->dev); 770 priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 771 &rid, RF_ACTIVE); 772 if (priv->msix_table == NULL) { 773 device_printf(dev, "Failed to allocate msix table\n"); 774 err = ENXIO; 775 goto abort; 776 } 777 778 err = gve_alloc_adminq_and_describe_device(priv); 779 if (err != 0) 780 goto abort; 781 782 err = gve_configure_resources(priv); 783 if (err != 0) 784 goto abort; 785 786 err = gve_alloc_rings(priv); 787 if (err != 0) 788 goto abort; 789 790 gve_setup_ifnet(dev, priv); 791 792 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; 793 794 bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION, 795 sizeof(GVE_DRIVER_VERSION) - 1); 796 797 TASK_INIT(&priv->service_task, 0, gve_service_task, priv); 798 priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO, 799 taskqueue_thread_enqueue, &priv->service_tq); 800 taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq", 801 device_get_nameunit(priv->dev)); 802 803 gve_setup_sysctl(priv); 804 805 if (bootverbose) 806 device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION); 807 return (0); 808 809 abort: 810 gve_free_rings(priv); 811 gve_deconfigure_resources(priv); 812 gve_release_adminq(priv); 813 gve_free_sys_res_mem(priv); 814 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); 815 return (err); 816 } 817 818 static int 819 gve_detach(device_t dev) 820 { 821 struct gve_priv *priv = device_get_softc(dev); 822 if_t ifp = priv->ifp; 823 824 ether_ifdetach(ifp); 825 826 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock); 827 gve_destroy(priv); 828 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock); 829 830 gve_free_rings(priv); 831 gve_free_sys_res_mem(priv); 832 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock); 833 834 while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL)) 835 taskqueue_drain(priv->service_tq, &priv->service_task); 836 taskqueue_free(priv->service_tq); 837 838 if_free(ifp); 839 return (bus_generic_detach(dev)); 840 } 841 842 static device_method_t gve_methods[] = { 843 DEVMETHOD(device_probe, gve_probe), 844 DEVMETHOD(device_attach, gve_attach), 845 DEVMETHOD(device_detach, gve_detach), 846 DEVMETHOD_END 847 }; 848 849 static driver_t gve_driver = { 850 "gve", 851 gve_methods, 852 sizeof(struct gve_priv) 853 }; 854 855 #if __FreeBSD_version < 1301503 856 static devclass_t gve_devclass; 857 858 DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0); 859 #else 860 DRIVER_MODULE(gve, pci, gve_driver, 0, 0); 861 #endif 862 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs, 863 nitems(gve_devs)); 864