1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/rman.h> 43 #include <sys/smp.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 #include <sys/taskqueue.h> 48 #include <sys/time.h> 49 #include <sys/eventhandler.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 #include <machine/in_cksum.h> 54 55 #include <net/bpf.h> 56 #include <net/ethernet.h> 57 #include <net/if.h> 58 #include <net/if_var.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_types.h> 63 #include <net/if_vlan_var.h> 64 65 #include <netinet/in_systm.h> 66 #include <netinet/in.h> 67 #include <netinet/if_ether.h> 68 #include <netinet/ip.h> 69 #include <netinet/ip6.h> 70 #include <netinet/tcp.h> 71 #include <netinet/udp.h> 72 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcireg.h> 75 76 #include <vm/vm.h> 77 #include <vm/pmap.h> 78 79 #include "ena_datapath.h" 80 #include "ena.h" 81 #include "ena_sysctl.h" 82 83 #ifdef DEV_NETMAP 84 #include "ena_netmap.h" 85 #endif /* DEV_NETMAP */ 86 87 /********************************************************* 88 * Function prototypes 89 *********************************************************/ 90 static int ena_probe(device_t); 91 static void ena_intr_msix_mgmnt(void *); 92 static void ena_free_pci_resources(struct ena_adapter *); 93 static int ena_change_mtu(if_t, int); 94 static inline void ena_alloc_counters(counter_u64_t *, int); 95 static inline void ena_free_counters(counter_u64_t *, int); 96 static inline void ena_reset_counters(counter_u64_t *, int); 97 static void ena_init_io_rings_common(struct ena_adapter *, 98 struct ena_ring *, uint16_t); 99 static void ena_init_io_rings(struct ena_adapter *); 100 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 101 static void ena_free_all_io_rings_resources(struct ena_adapter *); 102 static int ena_setup_tx_dma_tag(struct ena_adapter *); 103 static int ena_free_tx_dma_tag(struct ena_adapter *); 104 static int ena_setup_rx_dma_tag(struct ena_adapter *); 105 static int ena_free_rx_dma_tag(struct ena_adapter *); 106 static void ena_release_all_tx_dmamap(struct ena_ring *); 107 static int ena_setup_tx_resources(struct ena_adapter *, int); 108 static void ena_free_tx_resources(struct ena_adapter *, int); 109 static int ena_setup_all_tx_resources(struct ena_adapter *); 110 static void ena_free_all_tx_resources(struct ena_adapter *); 111 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 112 static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 113 static int ena_setup_all_rx_resources(struct ena_adapter *); 114 static void ena_free_all_rx_resources(struct ena_adapter *); 115 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 116 struct ena_rx_buffer *); 117 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 118 struct ena_rx_buffer *); 119 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 120 static void ena_refill_all_rx_bufs(struct ena_adapter *); 121 static void ena_free_all_rx_bufs(struct ena_adapter *); 122 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 123 static void ena_free_all_tx_bufs(struct ena_adapter *); 124 static void ena_destroy_all_tx_queues(struct ena_adapter *); 125 static void ena_destroy_all_rx_queues(struct ena_adapter *); 126 static void ena_destroy_all_io_queues(struct ena_adapter *); 127 static int ena_create_io_queues(struct ena_adapter *); 128 static int ena_handle_msix(void *); 129 static int ena_enable_msix(struct ena_adapter *); 130 static void ena_setup_mgmnt_intr(struct ena_adapter *); 131 static int ena_setup_io_intr(struct ena_adapter *); 132 static int ena_request_mgmnt_irq(struct ena_adapter *); 133 static int ena_request_io_irq(struct ena_adapter *); 134 static void ena_free_mgmnt_irq(struct ena_adapter *); 135 static void ena_free_io_irq(struct ena_adapter *); 136 static void ena_free_irqs(struct ena_adapter*); 137 static void ena_disable_msix(struct ena_adapter *); 138 static void ena_unmask_all_io_irqs(struct ena_adapter *); 139 static int ena_rss_configure(struct ena_adapter *); 140 static int ena_up_complete(struct ena_adapter *); 141 static uint64_t ena_get_counter(if_t, ift_counter); 142 static int ena_media_change(if_t); 143 static void ena_media_status(if_t, struct ifmediareq *); 144 static void ena_init(void *); 145 static int ena_ioctl(if_t, u_long, caddr_t); 146 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 147 static void ena_update_host_info(struct ena_admin_host_info *, if_t); 148 static void ena_update_hwassist(struct ena_adapter *); 149 static int ena_setup_ifnet(device_t, struct ena_adapter *, 150 struct ena_com_dev_get_features_ctx *); 151 static int ena_enable_wc(struct resource *); 152 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 153 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 154 static int ena_calc_io_queue_num(struct ena_adapter *, 155 struct ena_com_dev_get_features_ctx *); 156 static int ena_calc_queue_size(struct ena_adapter *, 157 struct ena_calc_queue_size_ctx *); 158 static int ena_handle_updated_queues(struct ena_adapter *, 159 struct ena_com_dev_get_features_ctx *); 160 static int ena_rss_init_default(struct ena_adapter *); 161 static void ena_rss_init_default_deferred(void *); 162 static void ena_config_host_info(struct ena_com_dev *, device_t); 163 static int ena_attach(device_t); 164 static int ena_detach(device_t); 165 static int ena_device_init(struct ena_adapter *, device_t, 166 struct ena_com_dev_get_features_ctx *, int *); 167 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, 168 int); 169 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 170 static void unimplemented_aenq_handler(void *, 171 struct ena_admin_aenq_entry *); 172 static void ena_timer_service(void *); 173 174 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 175 176 static ena_vendor_info_t ena_vendor_info_array[] = { 177 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 178 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, 179 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 180 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, 181 /* Last entry */ 182 { 0, 0, 0 } 183 }; 184 185 /* 186 * Contains pointers to event handlers, e.g. link state chage. 187 */ 188 static struct ena_aenq_handlers aenq_handlers; 189 190 void 191 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 192 { 193 if (error != 0) 194 return; 195 *(bus_addr_t *) arg = segs[0].ds_addr; 196 } 197 198 int 199 ena_dma_alloc(device_t dmadev, bus_size_t size, 200 ena_mem_handle_t *dma , int mapflags) 201 { 202 struct ena_adapter* adapter = device_get_softc(dmadev); 203 uint32_t maxsize; 204 uint64_t dma_space_addr; 205 int error; 206 207 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 208 209 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 210 if (unlikely(dma_space_addr == 0)) 211 dma_space_addr = BUS_SPACE_MAXADDR; 212 213 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 214 8, 0, /* alignment, bounds */ 215 dma_space_addr, /* lowaddr of exclusion window */ 216 BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 217 NULL, NULL, /* filter, filterarg */ 218 maxsize, /* maxsize */ 219 1, /* nsegments */ 220 maxsize, /* maxsegsize */ 221 BUS_DMA_ALLOCNOW, /* flags */ 222 NULL, /* lockfunc */ 223 NULL, /* lockarg */ 224 &dma->tag); 225 if (unlikely(error != 0)) { 226 ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); 227 goto fail_tag; 228 } 229 230 error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, 231 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 232 if (unlikely(error != 0)) { 233 ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", 234 (uintmax_t)size, error); 235 goto fail_map_create; 236 } 237 238 dma->paddr = 0; 239 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 240 size, ena_dmamap_callback, &dma->paddr, mapflags); 241 if (unlikely((error != 0) || (dma->paddr == 0))) { 242 ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); 243 goto fail_map_load; 244 } 245 246 bus_dmamap_sync(dma->tag, dma->map, 247 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 248 249 return (0); 250 251 fail_map_load: 252 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 253 fail_map_create: 254 bus_dma_tag_destroy(dma->tag); 255 fail_tag: 256 dma->tag = NULL; 257 dma->vaddr = NULL; 258 dma->paddr = 0; 259 260 return (error); 261 } 262 263 static void 264 ena_free_pci_resources(struct ena_adapter *adapter) 265 { 266 device_t pdev = adapter->pdev; 267 268 if (adapter->memory != NULL) { 269 bus_release_resource(pdev, SYS_RES_MEMORY, 270 PCIR_BAR(ENA_MEM_BAR), adapter->memory); 271 } 272 273 if (adapter->registers != NULL) { 274 bus_release_resource(pdev, SYS_RES_MEMORY, 275 PCIR_BAR(ENA_REG_BAR), adapter->registers); 276 } 277 } 278 279 static int 280 ena_probe(device_t dev) 281 { 282 ena_vendor_info_t *ent; 283 char adapter_name[60]; 284 uint16_t pci_vendor_id = 0; 285 uint16_t pci_device_id = 0; 286 287 pci_vendor_id = pci_get_vendor(dev); 288 pci_device_id = pci_get_device(dev); 289 290 ent = ena_vendor_info_array; 291 while (ent->vendor_id != 0) { 292 if ((pci_vendor_id == ent->vendor_id) && 293 (pci_device_id == ent->device_id)) { 294 ena_trace(ENA_DBG, "vendor=%x device=%x\n", 295 pci_vendor_id, pci_device_id); 296 297 sprintf(adapter_name, DEVICE_DESC); 298 device_set_desc_copy(dev, adapter_name); 299 return (BUS_PROBE_DEFAULT); 300 } 301 302 ent++; 303 304 } 305 306 return (ENXIO); 307 } 308 309 static int 310 ena_change_mtu(if_t ifp, int new_mtu) 311 { 312 struct ena_adapter *adapter = if_getsoftc(ifp); 313 int rc; 314 315 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 316 device_printf(adapter->pdev, "Invalid MTU setting. " 317 "new_mtu: %d max mtu: %d min mtu: %d\n", 318 new_mtu, adapter->max_mtu, ENA_MIN_MTU); 319 return (EINVAL); 320 } 321 322 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 323 if (likely(rc == 0)) { 324 ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); 325 if_setmtu(ifp, new_mtu); 326 } else { 327 device_printf(adapter->pdev, "Failed to set MTU to %d\n", 328 new_mtu); 329 } 330 331 return (rc); 332 } 333 334 static inline void 335 ena_alloc_counters(counter_u64_t *begin, int size) 336 { 337 counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 338 339 for (; begin < end; ++begin) 340 *begin = counter_u64_alloc(M_WAITOK); 341 } 342 343 static inline void 344 ena_free_counters(counter_u64_t *begin, int size) 345 { 346 counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 347 348 for (; begin < end; ++begin) 349 counter_u64_free(*begin); 350 } 351 352 static inline void 353 ena_reset_counters(counter_u64_t *begin, int size) 354 { 355 counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 356 357 for (; begin < end; ++begin) 358 counter_u64_zero(*begin); 359 } 360 361 static void 362 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 363 uint16_t qid) 364 { 365 366 ring->qid = qid; 367 ring->adapter = adapter; 368 ring->ena_dev = adapter->ena_dev; 369 ring->first_interrupt = false; 370 ring->no_interrupt_event_cnt = 0; 371 ring->rx_mbuf_sz = ena_mbuf_sz; 372 } 373 374 static void 375 ena_init_io_rings(struct ena_adapter *adapter) 376 { 377 struct ena_com_dev *ena_dev; 378 struct ena_ring *txr, *rxr; 379 struct ena_que *que; 380 int i; 381 382 ena_dev = adapter->ena_dev; 383 384 for (i = 0; i < adapter->num_queues; i++) { 385 txr = &adapter->tx_ring[i]; 386 rxr = &adapter->rx_ring[i]; 387 388 /* TX/RX common ring state */ 389 ena_init_io_rings_common(adapter, txr, i); 390 ena_init_io_rings_common(adapter, rxr, i); 391 392 /* TX specific ring state */ 393 txr->ring_size = adapter->tx_ring_size; 394 txr->tx_max_header_size = ena_dev->tx_max_header_size; 395 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 396 txr->smoothed_interval = 397 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 398 399 /* Allocate a buf ring */ 400 txr->buf_ring_size = adapter->buf_ring_size; 401 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 402 M_WAITOK, &txr->ring_mtx); 403 404 /* Alloc TX statistics. */ 405 ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 406 sizeof(txr->tx_stats)); 407 408 /* RX specific ring state */ 409 rxr->ring_size = adapter->rx_ring_size; 410 rxr->smoothed_interval = 411 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 412 413 /* Alloc RX statistics. */ 414 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 415 sizeof(rxr->rx_stats)); 416 417 /* Initialize locks */ 418 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 419 device_get_nameunit(adapter->pdev), i); 420 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 421 device_get_nameunit(adapter->pdev), i); 422 423 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 424 425 que = &adapter->que[i]; 426 que->adapter = adapter; 427 que->id = i; 428 que->tx_ring = txr; 429 que->rx_ring = rxr; 430 431 txr->que = que; 432 rxr->que = que; 433 434 rxr->empty_rx_queue = 0; 435 } 436 } 437 438 static void 439 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 440 { 441 struct ena_ring *txr = &adapter->tx_ring[qid]; 442 struct ena_ring *rxr = &adapter->rx_ring[qid]; 443 444 ena_free_counters((counter_u64_t *)&txr->tx_stats, 445 sizeof(txr->tx_stats)); 446 ena_free_counters((counter_u64_t *)&rxr->rx_stats, 447 sizeof(rxr->rx_stats)); 448 449 ENA_RING_MTX_LOCK(txr); 450 drbr_free(txr->br, M_DEVBUF); 451 ENA_RING_MTX_UNLOCK(txr); 452 453 mtx_destroy(&txr->ring_mtx); 454 } 455 456 static void 457 ena_free_all_io_rings_resources(struct ena_adapter *adapter) 458 { 459 int i; 460 461 for (i = 0; i < adapter->num_queues; i++) 462 ena_free_io_ring_resources(adapter, i); 463 464 } 465 466 static int 467 ena_setup_tx_dma_tag(struct ena_adapter *adapter) 468 { 469 int ret; 470 471 /* Create DMA tag for Tx buffers */ 472 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 473 1, 0, /* alignment, bounds */ 474 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 475 BUS_SPACE_MAXADDR, /* highaddr of excl window */ 476 NULL, NULL, /* filter, filterarg */ 477 ENA_TSO_MAXSIZE, /* maxsize */ 478 adapter->max_tx_sgl_size - 1, /* nsegments */ 479 ENA_TSO_MAXSIZE, /* maxsegsize */ 480 0, /* flags */ 481 NULL, /* lockfunc */ 482 NULL, /* lockfuncarg */ 483 &adapter->tx_buf_tag); 484 485 return (ret); 486 } 487 488 static int 489 ena_free_tx_dma_tag(struct ena_adapter *adapter) 490 { 491 int ret; 492 493 ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 494 495 if (likely(ret == 0)) 496 adapter->tx_buf_tag = NULL; 497 498 return (ret); 499 } 500 501 static int 502 ena_setup_rx_dma_tag(struct ena_adapter *adapter) 503 { 504 int ret; 505 506 /* Create DMA tag for Rx buffers*/ 507 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 508 1, 0, /* alignment, bounds */ 509 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 510 BUS_SPACE_MAXADDR, /* highaddr of excl window */ 511 NULL, NULL, /* filter, filterarg */ 512 ena_mbuf_sz, /* maxsize */ 513 adapter->max_rx_sgl_size, /* nsegments */ 514 ena_mbuf_sz, /* maxsegsize */ 515 0, /* flags */ 516 NULL, /* lockfunc */ 517 NULL, /* lockarg */ 518 &adapter->rx_buf_tag); 519 520 return (ret); 521 } 522 523 static int 524 ena_free_rx_dma_tag(struct ena_adapter *adapter) 525 { 526 int ret; 527 528 ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 529 530 if (likely(ret == 0)) 531 adapter->rx_buf_tag = NULL; 532 533 return (ret); 534 } 535 536 static void 537 ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 538 { 539 struct ena_adapter *adapter = tx_ring->adapter; 540 struct ena_tx_buffer *tx_info; 541 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;; 542 int i; 543 #ifdef DEV_NETMAP 544 struct ena_netmap_tx_info *nm_info; 545 int j; 546 #endif /* DEV_NETMAP */ 547 548 for (i = 0; i < tx_ring->ring_size; ++i) { 549 tx_info = &tx_ring->tx_buffer_info[i]; 550 #ifdef DEV_NETMAP 551 if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 552 nm_info = &tx_info->nm_info; 553 for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { 554 if (nm_info->map_seg[j] != NULL) { 555 bus_dmamap_destroy(tx_tag, 556 nm_info->map_seg[j]); 557 nm_info->map_seg[j] = NULL; 558 } 559 } 560 } 561 #endif /* DEV_NETMAP */ 562 if (tx_info->dmamap != NULL) { 563 bus_dmamap_destroy(tx_tag, tx_info->dmamap); 564 tx_info->dmamap = NULL; 565 } 566 } 567 } 568 569 /** 570 * ena_setup_tx_resources - allocate Tx resources (Descriptors) 571 * @adapter: network interface device structure 572 * @qid: queue index 573 * 574 * Returns 0 on success, otherwise on failure. 575 **/ 576 static int 577 ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 578 { 579 struct ena_que *que = &adapter->que[qid]; 580 struct ena_ring *tx_ring = que->tx_ring; 581 int size, i, err; 582 #ifdef DEV_NETMAP 583 bus_dmamap_t *map; 584 int j; 585 586 ena_netmap_reset_tx_ring(adapter, qid); 587 #endif /* DEV_NETMAP */ 588 589 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 590 591 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 592 if (unlikely(tx_ring->tx_buffer_info == NULL)) 593 return (ENOMEM); 594 595 size = sizeof(uint16_t) * tx_ring->ring_size; 596 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 597 if (unlikely(tx_ring->free_tx_ids == NULL)) 598 goto err_buf_info_free; 599 600 size = tx_ring->tx_max_header_size; 601 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 602 M_NOWAIT | M_ZERO); 603 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 604 goto err_tx_ids_free; 605 606 /* Req id stack for TX OOO completions */ 607 for (i = 0; i < tx_ring->ring_size; i++) 608 tx_ring->free_tx_ids[i] = i; 609 610 /* Reset TX statistics. */ 611 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 612 sizeof(tx_ring->tx_stats)); 613 614 tx_ring->next_to_use = 0; 615 tx_ring->next_to_clean = 0; 616 tx_ring->acum_pkts = 0; 617 618 /* Make sure that drbr is empty */ 619 ENA_RING_MTX_LOCK(tx_ring); 620 drbr_flush(adapter->ifp, tx_ring->br); 621 ENA_RING_MTX_UNLOCK(tx_ring); 622 623 /* ... and create the buffer DMA maps */ 624 for (i = 0; i < tx_ring->ring_size; i++) { 625 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 626 &tx_ring->tx_buffer_info[i].dmamap); 627 if (unlikely(err != 0)) { 628 ena_trace(ENA_ALERT, 629 "Unable to create Tx DMA map for buffer %d\n", 630 i); 631 goto err_map_release; 632 } 633 634 #ifdef DEV_NETMAP 635 if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 636 map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 637 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 638 err = bus_dmamap_create(adapter->tx_buf_tag, 0, 639 &map[j]); 640 if (unlikely(err != 0)) { 641 ena_trace(ENA_ALERT, "Unable to create " 642 "Tx DMA for buffer %d %d\n", i, j); 643 goto err_map_release; 644 } 645 } 646 } 647 #endif /* DEV_NETMAP */ 648 } 649 650 /* Allocate taskqueues */ 651 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 652 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 653 taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 654 if (unlikely(tx_ring->enqueue_tq == NULL)) { 655 ena_trace(ENA_ALERT, 656 "Unable to create taskqueue for enqueue task\n"); 657 i = tx_ring->ring_size; 658 goto err_map_release; 659 } 660 661 tx_ring->running = true; 662 663 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, 664 "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); 665 666 return (0); 667 668 err_map_release: 669 ena_release_all_tx_dmamap(tx_ring); 670 err_tx_ids_free: 671 free(tx_ring->free_tx_ids, M_DEVBUF); 672 tx_ring->free_tx_ids = NULL; 673 err_buf_info_free: 674 free(tx_ring->tx_buffer_info, M_DEVBUF); 675 tx_ring->tx_buffer_info = NULL; 676 677 return (ENOMEM); 678 } 679 680 /** 681 * ena_free_tx_resources - Free Tx Resources per Queue 682 * @adapter: network interface device structure 683 * @qid: queue index 684 * 685 * Free all transmit software resources 686 **/ 687 static void 688 ena_free_tx_resources(struct ena_adapter *adapter, int qid) 689 { 690 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 691 #ifdef DEV_NETMAP 692 struct ena_netmap_tx_info *nm_info; 693 int j; 694 #endif /* DEV_NETMAP */ 695 696 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 697 NULL)) 698 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 699 700 taskqueue_free(tx_ring->enqueue_tq); 701 702 ENA_RING_MTX_LOCK(tx_ring); 703 /* Flush buffer ring, */ 704 drbr_flush(adapter->ifp, tx_ring->br); 705 706 /* Free buffer DMA maps, */ 707 for (int i = 0; i < tx_ring->ring_size; i++) { 708 bus_dmamap_sync(adapter->tx_buf_tag, 709 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); 710 bus_dmamap_unload(adapter->tx_buf_tag, 711 tx_ring->tx_buffer_info[i].dmamap); 712 bus_dmamap_destroy(adapter->tx_buf_tag, 713 tx_ring->tx_buffer_info[i].dmamap); 714 715 #ifdef DEV_NETMAP 716 if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 717 nm_info = &tx_ring->tx_buffer_info[i].nm_info; 718 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 719 if (nm_info->socket_buf_idx[j] != 0) { 720 bus_dmamap_sync(adapter->tx_buf_tag, 721 nm_info->map_seg[j], 722 BUS_DMASYNC_POSTWRITE); 723 ena_netmap_unload(adapter, 724 nm_info->map_seg[j]); 725 } 726 bus_dmamap_destroy(adapter->tx_buf_tag, 727 nm_info->map_seg[j]); 728 nm_info->socket_buf_idx[j] = 0; 729 } 730 } 731 #endif /* DEV_NETMAP */ 732 733 m_freem(tx_ring->tx_buffer_info[i].mbuf); 734 tx_ring->tx_buffer_info[i].mbuf = NULL; 735 } 736 ENA_RING_MTX_UNLOCK(tx_ring); 737 738 /* And free allocated memory. */ 739 free(tx_ring->tx_buffer_info, M_DEVBUF); 740 tx_ring->tx_buffer_info = NULL; 741 742 free(tx_ring->free_tx_ids, M_DEVBUF); 743 tx_ring->free_tx_ids = NULL; 744 745 ENA_MEM_FREE(adapter->ena_dev->dmadev, 746 tx_ring->push_buf_intermediate_buf); 747 tx_ring->push_buf_intermediate_buf = NULL; 748 } 749 750 /** 751 * ena_setup_all_tx_resources - allocate all queues Tx resources 752 * @adapter: network interface device structure 753 * 754 * Returns 0 on success, otherwise on failure. 755 **/ 756 static int 757 ena_setup_all_tx_resources(struct ena_adapter *adapter) 758 { 759 int i, rc; 760 761 for (i = 0; i < adapter->num_queues; i++) { 762 rc = ena_setup_tx_resources(adapter, i); 763 if (rc != 0) { 764 device_printf(adapter->pdev, 765 "Allocation for Tx Queue %u failed\n", i); 766 goto err_setup_tx; 767 } 768 } 769 770 return (0); 771 772 err_setup_tx: 773 /* Rewind the index freeing the rings as we go */ 774 while (i--) 775 ena_free_tx_resources(adapter, i); 776 return (rc); 777 } 778 779 /** 780 * ena_free_all_tx_resources - Free Tx Resources for All Queues 781 * @adapter: network interface device structure 782 * 783 * Free all transmit software resources 784 **/ 785 static void 786 ena_free_all_tx_resources(struct ena_adapter *adapter) 787 { 788 int i; 789 790 for (i = 0; i < adapter->num_queues; i++) 791 ena_free_tx_resources(adapter, i); 792 } 793 794 /** 795 * ena_setup_rx_resources - allocate Rx resources (Descriptors) 796 * @adapter: network interface device structure 797 * @qid: queue index 798 * 799 * Returns 0 on success, otherwise on failure. 800 **/ 801 static int 802 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 803 { 804 struct ena_que *que = &adapter->que[qid]; 805 struct ena_ring *rx_ring = que->rx_ring; 806 int size, err, i; 807 808 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 809 810 #ifdef DEV_NETMAP 811 ena_netmap_reset_rx_ring(adapter, qid); 812 rx_ring->initialized = false; 813 #endif /* DEV_NETMAP */ 814 815 /* 816 * Alloc extra element so in rx path 817 * we can always prefetch rx_info + 1 818 */ 819 size += sizeof(struct ena_rx_buffer); 820 821 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 822 823 size = sizeof(uint16_t) * rx_ring->ring_size; 824 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 825 826 for (i = 0; i < rx_ring->ring_size; i++) 827 rx_ring->free_rx_ids[i] = i; 828 829 /* Reset RX statistics. */ 830 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 831 sizeof(rx_ring->rx_stats)); 832 833 rx_ring->next_to_clean = 0; 834 rx_ring->next_to_use = 0; 835 836 /* ... and create the buffer DMA maps */ 837 for (i = 0; i < rx_ring->ring_size; i++) { 838 err = bus_dmamap_create(adapter->rx_buf_tag, 0, 839 &(rx_ring->rx_buffer_info[i].map)); 840 if (err != 0) { 841 ena_trace(ENA_ALERT, 842 "Unable to create Rx DMA map for buffer %d\n", i); 843 goto err_buf_info_unmap; 844 } 845 } 846 847 /* Create LRO for the ring */ 848 if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { 849 int err = tcp_lro_init(&rx_ring->lro); 850 if (err != 0) { 851 device_printf(adapter->pdev, 852 "LRO[%d] Initialization failed!\n", qid); 853 } else { 854 ena_trace(ENA_INFO, 855 "RX Soft LRO[%d] Initialized\n", qid); 856 rx_ring->lro.ifp = adapter->ifp; 857 } 858 } 859 860 return (0); 861 862 err_buf_info_unmap: 863 while (i--) { 864 bus_dmamap_destroy(adapter->rx_buf_tag, 865 rx_ring->rx_buffer_info[i].map); 866 } 867 868 free(rx_ring->free_rx_ids, M_DEVBUF); 869 rx_ring->free_rx_ids = NULL; 870 free(rx_ring->rx_buffer_info, M_DEVBUF); 871 rx_ring->rx_buffer_info = NULL; 872 return (ENOMEM); 873 } 874 875 /** 876 * ena_free_rx_resources - Free Rx Resources 877 * @adapter: network interface device structure 878 * @qid: queue index 879 * 880 * Free all receive software resources 881 **/ 882 static void 883 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 884 { 885 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 886 887 /* Free buffer DMA maps, */ 888 for (int i = 0; i < rx_ring->ring_size; i++) { 889 bus_dmamap_sync(adapter->rx_buf_tag, 890 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 891 m_freem(rx_ring->rx_buffer_info[i].mbuf); 892 rx_ring->rx_buffer_info[i].mbuf = NULL; 893 bus_dmamap_unload(adapter->rx_buf_tag, 894 rx_ring->rx_buffer_info[i].map); 895 bus_dmamap_destroy(adapter->rx_buf_tag, 896 rx_ring->rx_buffer_info[i].map); 897 } 898 899 /* free LRO resources, */ 900 tcp_lro_free(&rx_ring->lro); 901 902 /* free allocated memory */ 903 free(rx_ring->rx_buffer_info, M_DEVBUF); 904 rx_ring->rx_buffer_info = NULL; 905 906 free(rx_ring->free_rx_ids, M_DEVBUF); 907 rx_ring->free_rx_ids = NULL; 908 } 909 910 /** 911 * ena_setup_all_rx_resources - allocate all queues Rx resources 912 * @adapter: network interface device structure 913 * 914 * Returns 0 on success, otherwise on failure. 915 **/ 916 static int 917 ena_setup_all_rx_resources(struct ena_adapter *adapter) 918 { 919 int i, rc = 0; 920 921 for (i = 0; i < adapter->num_queues; i++) { 922 rc = ena_setup_rx_resources(adapter, i); 923 if (rc != 0) { 924 device_printf(adapter->pdev, 925 "Allocation for Rx Queue %u failed\n", i); 926 goto err_setup_rx; 927 } 928 } 929 return (0); 930 931 err_setup_rx: 932 /* rewind the index freeing the rings as we go */ 933 while (i--) 934 ena_free_rx_resources(adapter, i); 935 return (rc); 936 } 937 938 /** 939 * ena_free_all_rx_resources - Free Rx resources for all queues 940 * @adapter: network interface device structure 941 * 942 * Free all receive software resources 943 **/ 944 static void 945 ena_free_all_rx_resources(struct ena_adapter *adapter) 946 { 947 int i; 948 949 for (i = 0; i < adapter->num_queues; i++) 950 ena_free_rx_resources(adapter, i); 951 } 952 953 static inline int 954 ena_alloc_rx_mbuf(struct ena_adapter *adapter, 955 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 956 { 957 struct ena_com_buf *ena_buf; 958 bus_dma_segment_t segs[1]; 959 int nsegs, error; 960 int mlen; 961 962 /* if previous allocated frag is not used */ 963 if (unlikely(rx_info->mbuf != NULL)) 964 return (0); 965 966 /* Get mbuf using UMA allocator */ 967 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 968 rx_ring->rx_mbuf_sz); 969 970 if (unlikely(rx_info->mbuf == NULL)) { 971 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 972 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 973 if (unlikely(rx_info->mbuf == NULL)) { 974 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 975 return (ENOMEM); 976 } 977 mlen = MCLBYTES; 978 } else { 979 mlen = rx_ring->rx_mbuf_sz; 980 } 981 /* Set mbuf length*/ 982 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 983 984 /* Map packets for DMA */ 985 ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 986 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 987 adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); 988 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 989 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 990 if (unlikely((error != 0) || (nsegs != 1))) { 991 ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " 992 "nsegs: %d\n", error, nsegs); 993 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 994 goto exit; 995 996 } 997 998 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 999 1000 ena_buf = &rx_info->ena_buf; 1001 ena_buf->paddr = segs[0].ds_addr; 1002 ena_buf->len = mlen; 1003 1004 ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 1005 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 1006 rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); 1007 1008 return (0); 1009 1010 exit: 1011 m_freem(rx_info->mbuf); 1012 rx_info->mbuf = NULL; 1013 return (EFAULT); 1014 } 1015 1016 static void 1017 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 1018 struct ena_rx_buffer *rx_info) 1019 { 1020 1021 if (rx_info->mbuf == NULL) { 1022 ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); 1023 return; 1024 } 1025 1026 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1027 BUS_DMASYNC_POSTREAD); 1028 bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 1029 m_freem(rx_info->mbuf); 1030 rx_info->mbuf = NULL; 1031 } 1032 1033 /** 1034 * ena_refill_rx_bufs - Refills ring with descriptors 1035 * @rx_ring: the ring which we want to feed with free descriptors 1036 * @num: number of descriptors to refill 1037 * Refills the ring with newly allocated DMA-mapped mbufs for receiving 1038 **/ 1039 int 1040 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 1041 { 1042 struct ena_adapter *adapter = rx_ring->adapter; 1043 uint16_t next_to_use, req_id; 1044 uint32_t i; 1045 int rc; 1046 1047 ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", 1048 rx_ring->qid); 1049 1050 next_to_use = rx_ring->next_to_use; 1051 1052 for (i = 0; i < num; i++) { 1053 struct ena_rx_buffer *rx_info; 1054 1055 ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, 1056 "RX buffer - next to use: %d\n", next_to_use); 1057 1058 req_id = rx_ring->free_rx_ids[next_to_use]; 1059 rx_info = &rx_ring->rx_buffer_info[req_id]; 1060 #ifdef DEV_NETMAP 1061 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) 1062 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); 1063 else 1064 #endif /* DEV_NETMAP */ 1065 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 1066 if (unlikely(rc != 0)) { 1067 ena_trace(ENA_WARNING, 1068 "failed to alloc buffer for rx queue %d\n", 1069 rx_ring->qid); 1070 break; 1071 } 1072 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 1073 &rx_info->ena_buf, req_id); 1074 if (unlikely(rc != 0)) { 1075 ena_trace(ENA_WARNING, 1076 "failed to add buffer for rx queue %d\n", 1077 rx_ring->qid); 1078 break; 1079 } 1080 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 1081 rx_ring->ring_size); 1082 } 1083 1084 if (unlikely(i < num)) { 1085 counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 1086 ena_trace(ENA_WARNING, 1087 "refilled rx qid %d with only %d mbufs (from %d)\n", 1088 rx_ring->qid, i, num); 1089 } 1090 1091 if (likely(i != 0)) { 1092 wmb(); 1093 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 1094 } 1095 rx_ring->next_to_use = next_to_use; 1096 return (i); 1097 } 1098 1099 static void 1100 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 1101 { 1102 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 1103 unsigned int i; 1104 1105 for (i = 0; i < rx_ring->ring_size; i++) { 1106 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 1107 1108 if (rx_info->mbuf != NULL) 1109 ena_free_rx_mbuf(adapter, rx_ring, rx_info); 1110 #ifdef DEV_NETMAP 1111 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && 1112 (adapter->ifp->if_capenable & IFCAP_NETMAP)) { 1113 if (rx_info->netmap_buf_idx != 0) 1114 ena_netmap_free_rx_slot(adapter, rx_ring, 1115 rx_info); 1116 } 1117 #endif /* DEV_NETMAP */ 1118 } 1119 } 1120 1121 /** 1122 * ena_refill_all_rx_bufs - allocate all queues Rx buffers 1123 * @adapter: network interface device structure 1124 * 1125 */ 1126 static void 1127 ena_refill_all_rx_bufs(struct ena_adapter *adapter) 1128 { 1129 struct ena_ring *rx_ring; 1130 int i, rc, bufs_num; 1131 1132 for (i = 0; i < adapter->num_queues; i++) { 1133 rx_ring = &adapter->rx_ring[i]; 1134 bufs_num = rx_ring->ring_size - 1; 1135 rc = ena_refill_rx_bufs(rx_ring, bufs_num); 1136 if (unlikely(rc != bufs_num)) 1137 ena_trace(ENA_WARNING, "refilling Queue %d failed. " 1138 "Allocated %d buffers from: %d\n", i, rc, bufs_num); 1139 #ifdef DEV_NETMAP 1140 rx_ring->initialized = true; 1141 #endif /* DEV_NETMAP */ 1142 } 1143 } 1144 1145 static void 1146 ena_free_all_rx_bufs(struct ena_adapter *adapter) 1147 { 1148 int i; 1149 1150 for (i = 0; i < adapter->num_queues; i++) 1151 ena_free_rx_bufs(adapter, i); 1152 } 1153 1154 /** 1155 * ena_free_tx_bufs - Free Tx Buffers per Queue 1156 * @adapter: network interface device structure 1157 * @qid: queue index 1158 **/ 1159 static void 1160 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 1161 { 1162 bool print_once = true; 1163 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 1164 1165 ENA_RING_MTX_LOCK(tx_ring); 1166 for (int i = 0; i < tx_ring->ring_size; i++) { 1167 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 1168 1169 if (tx_info->mbuf == NULL) 1170 continue; 1171 1172 if (print_once) { 1173 device_printf(adapter->pdev, 1174 "free uncompleted tx mbuf qid %d idx 0x%x\n", 1175 qid, i); 1176 print_once = false; 1177 } else { 1178 ena_trace(ENA_DBG, 1179 "free uncompleted tx mbuf qid %d idx 0x%x\n", 1180 qid, i); 1181 } 1182 1183 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1184 BUS_DMASYNC_POSTWRITE); 1185 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 1186 1187 m_free(tx_info->mbuf); 1188 tx_info->mbuf = NULL; 1189 } 1190 ENA_RING_MTX_UNLOCK(tx_ring); 1191 } 1192 1193 static void 1194 ena_free_all_tx_bufs(struct ena_adapter *adapter) 1195 { 1196 1197 for (int i = 0; i < adapter->num_queues; i++) 1198 ena_free_tx_bufs(adapter, i); 1199 } 1200 1201 static void 1202 ena_destroy_all_tx_queues(struct ena_adapter *adapter) 1203 { 1204 uint16_t ena_qid; 1205 int i; 1206 1207 for (i = 0; i < adapter->num_queues; i++) { 1208 ena_qid = ENA_IO_TXQ_IDX(i); 1209 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 1210 } 1211 } 1212 1213 static void 1214 ena_destroy_all_rx_queues(struct ena_adapter *adapter) 1215 { 1216 uint16_t ena_qid; 1217 int i; 1218 1219 for (i = 0; i < adapter->num_queues; i++) { 1220 ena_qid = ENA_IO_RXQ_IDX(i); 1221 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 1222 } 1223 } 1224 1225 static void 1226 ena_destroy_all_io_queues(struct ena_adapter *adapter) 1227 { 1228 struct ena_que *queue; 1229 int i; 1230 1231 for (i = 0; i < adapter->num_queues; i++) { 1232 queue = &adapter->que[i]; 1233 while (taskqueue_cancel(queue->cleanup_tq, 1234 &queue->cleanup_task, NULL)) 1235 taskqueue_drain(queue->cleanup_tq, 1236 &queue->cleanup_task); 1237 taskqueue_free(queue->cleanup_tq); 1238 } 1239 1240 ena_destroy_all_tx_queues(adapter); 1241 ena_destroy_all_rx_queues(adapter); 1242 } 1243 1244 static int 1245 ena_create_io_queues(struct ena_adapter *adapter) 1246 { 1247 struct ena_com_dev *ena_dev = adapter->ena_dev; 1248 struct ena_com_create_io_ctx ctx; 1249 struct ena_ring *ring; 1250 struct ena_que *queue; 1251 uint16_t ena_qid; 1252 uint32_t msix_vector; 1253 int rc, i; 1254 1255 /* Create TX queues */ 1256 for (i = 0; i < adapter->num_queues; i++) { 1257 msix_vector = ENA_IO_IRQ_IDX(i); 1258 ena_qid = ENA_IO_TXQ_IDX(i); 1259 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1260 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1261 ctx.queue_size = adapter->tx_ring_size; 1262 ctx.msix_vector = msix_vector; 1263 ctx.qid = ena_qid; 1264 rc = ena_com_create_io_queue(ena_dev, &ctx); 1265 if (rc != 0) { 1266 device_printf(adapter->pdev, 1267 "Failed to create io TX queue #%d rc: %d\n", i, rc); 1268 goto err_tx; 1269 } 1270 ring = &adapter->tx_ring[i]; 1271 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1272 &ring->ena_com_io_sq, 1273 &ring->ena_com_io_cq); 1274 if (rc != 0) { 1275 device_printf(adapter->pdev, 1276 "Failed to get TX queue handlers. TX queue num" 1277 " %d rc: %d\n", i, rc); 1278 ena_com_destroy_io_queue(ena_dev, ena_qid); 1279 goto err_tx; 1280 } 1281 } 1282 1283 /* Create RX queues */ 1284 for (i = 0; i < adapter->num_queues; i++) { 1285 msix_vector = ENA_IO_IRQ_IDX(i); 1286 ena_qid = ENA_IO_RXQ_IDX(i); 1287 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1288 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1289 ctx.queue_size = adapter->rx_ring_size; 1290 ctx.msix_vector = msix_vector; 1291 ctx.qid = ena_qid; 1292 rc = ena_com_create_io_queue(ena_dev, &ctx); 1293 if (unlikely(rc != 0)) { 1294 device_printf(adapter->pdev, 1295 "Failed to create io RX queue[%d] rc: %d\n", i, rc); 1296 goto err_rx; 1297 } 1298 1299 ring = &adapter->rx_ring[i]; 1300 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1301 &ring->ena_com_io_sq, 1302 &ring->ena_com_io_cq); 1303 if (unlikely(rc != 0)) { 1304 device_printf(adapter->pdev, 1305 "Failed to get RX queue handlers. RX queue num" 1306 " %d rc: %d\n", i, rc); 1307 ena_com_destroy_io_queue(ena_dev, ena_qid); 1308 goto err_rx; 1309 } 1310 } 1311 1312 for (i = 0; i < adapter->num_queues; i++) { 1313 queue = &adapter->que[i]; 1314 1315 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 1316 queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 1317 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 1318 1319 taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, 1320 "%s queue %d cleanup", 1321 device_get_nameunit(adapter->pdev), i); 1322 } 1323 1324 return (0); 1325 1326 err_rx: 1327 while (i--) 1328 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 1329 i = adapter->num_queues; 1330 err_tx: 1331 while (i--) 1332 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 1333 1334 return (ENXIO); 1335 } 1336 1337 /********************************************************************* 1338 * 1339 * MSIX & Interrupt Service routine 1340 * 1341 **********************************************************************/ 1342 1343 /** 1344 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 1345 * @arg: interrupt number 1346 **/ 1347 static void 1348 ena_intr_msix_mgmnt(void *arg) 1349 { 1350 struct ena_adapter *adapter = (struct ena_adapter *)arg; 1351 1352 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1353 if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 1354 ena_com_aenq_intr_handler(adapter->ena_dev, arg); 1355 } 1356 1357 /** 1358 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 1359 * @arg: queue 1360 **/ 1361 static int 1362 ena_handle_msix(void *arg) 1363 { 1364 struct ena_que *queue = arg; 1365 struct ena_adapter *adapter = queue->adapter; 1366 if_t ifp = adapter->ifp; 1367 1368 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 1369 return (FILTER_STRAY); 1370 1371 taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 1372 1373 return (FILTER_HANDLED); 1374 } 1375 1376 static int 1377 ena_enable_msix(struct ena_adapter *adapter) 1378 { 1379 device_t dev = adapter->pdev; 1380 int msix_vecs, msix_req; 1381 int i, rc = 0; 1382 1383 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1384 device_printf(dev, "Error, MSI-X is already enabled\n"); 1385 return (EINVAL); 1386 } 1387 1388 /* Reserved the max msix vectors we might need */ 1389 msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); 1390 1391 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1392 M_DEVBUF, M_WAITOK | M_ZERO); 1393 1394 ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); 1395 1396 for (i = 0; i < msix_vecs; i++) { 1397 adapter->msix_entries[i].entry = i; 1398 /* Vectors must start from 1 */ 1399 adapter->msix_entries[i].vector = i + 1; 1400 } 1401 1402 msix_req = msix_vecs; 1403 rc = pci_alloc_msix(dev, &msix_vecs); 1404 if (unlikely(rc != 0)) { 1405 device_printf(dev, 1406 "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); 1407 1408 rc = ENOSPC; 1409 goto err_msix_free; 1410 } 1411 1412 if (msix_vecs != msix_req) { 1413 if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 1414 device_printf(dev, 1415 "Not enough number of MSI-x allocated: %d\n", 1416 msix_vecs); 1417 pci_release_msi(dev); 1418 rc = ENOSPC; 1419 goto err_msix_free; 1420 } 1421 device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " 1422 "the number of queues\n", msix_vecs, msix_req); 1423 adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; 1424 } 1425 1426 adapter->msix_vecs = msix_vecs; 1427 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1428 1429 return (0); 1430 1431 err_msix_free: 1432 free(adapter->msix_entries, M_DEVBUF); 1433 adapter->msix_entries = NULL; 1434 1435 return (rc); 1436 } 1437 1438 static void 1439 ena_setup_mgmnt_intr(struct ena_adapter *adapter) 1440 { 1441 1442 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 1443 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 1444 device_get_nameunit(adapter->pdev)); 1445 /* 1446 * Handler is NULL on purpose, it will be set 1447 * when mgmnt interrupt is acquired 1448 */ 1449 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 1450 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 1451 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 1452 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 1453 } 1454 1455 static int 1456 ena_setup_io_intr(struct ena_adapter *adapter) 1457 { 1458 static int last_bind_cpu = -1; 1459 int irq_idx; 1460 1461 if (adapter->msix_entries == NULL) 1462 return (EINVAL); 1463 1464 for (int i = 0; i < adapter->num_queues; i++) { 1465 irq_idx = ENA_IO_IRQ_IDX(i); 1466 1467 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 1468 "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 1469 adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 1470 adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 1471 adapter->irq_tbl[irq_idx].vector = 1472 adapter->msix_entries[irq_idx].vector; 1473 ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", 1474 adapter->msix_entries[irq_idx].vector); 1475 1476 /* 1477 * We want to bind rings to the corresponding cpu 1478 * using something similar to the RSS round-robin technique. 1479 */ 1480 if (unlikely(last_bind_cpu < 0)) 1481 last_bind_cpu = CPU_FIRST(); 1482 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1483 last_bind_cpu; 1484 last_bind_cpu = CPU_NEXT(last_bind_cpu); 1485 } 1486 1487 return (0); 1488 } 1489 1490 static int 1491 ena_request_mgmnt_irq(struct ena_adapter *adapter) 1492 { 1493 struct ena_irq *irq; 1494 unsigned long flags; 1495 int rc, rcc; 1496 1497 flags = RF_ACTIVE | RF_SHAREABLE; 1498 1499 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1500 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 1501 &irq->vector, flags); 1502 1503 if (unlikely(irq->res == NULL)) { 1504 device_printf(adapter->pdev, "could not allocate " 1505 "irq vector: %d\n", irq->vector); 1506 return (ENXIO); 1507 } 1508 1509 rc = bus_setup_intr(adapter->pdev, irq->res, 1510 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 1511 irq->data, &irq->cookie); 1512 if (unlikely(rc != 0)) { 1513 device_printf(adapter->pdev, "failed to register " 1514 "interrupt handler for irq %ju: %d\n", 1515 rman_get_start(irq->res), rc); 1516 goto err_res_free; 1517 } 1518 irq->requested = true; 1519 1520 return (rc); 1521 1522 err_res_free: 1523 ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", 1524 irq->vector); 1525 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1526 irq->vector, irq->res); 1527 if (unlikely(rcc != 0)) 1528 device_printf(adapter->pdev, "dev has no parent while " 1529 "releasing res for irq: %d\n", irq->vector); 1530 irq->res = NULL; 1531 1532 return (rc); 1533 } 1534 1535 static int 1536 ena_request_io_irq(struct ena_adapter *adapter) 1537 { 1538 struct ena_irq *irq; 1539 unsigned long flags = 0; 1540 int rc = 0, i, rcc; 1541 1542 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 1543 device_printf(adapter->pdev, 1544 "failed to request I/O IRQ: MSI-X is not enabled\n"); 1545 return (EINVAL); 1546 } else { 1547 flags = RF_ACTIVE | RF_SHAREABLE; 1548 } 1549 1550 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1551 irq = &adapter->irq_tbl[i]; 1552 1553 if (unlikely(irq->requested)) 1554 continue; 1555 1556 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 1557 &irq->vector, flags); 1558 if (unlikely(irq->res == NULL)) { 1559 rc = ENOMEM; 1560 device_printf(adapter->pdev, "could not allocate " 1561 "irq vector: %d\n", irq->vector); 1562 goto err; 1563 } 1564 1565 rc = bus_setup_intr(adapter->pdev, irq->res, 1566 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 1567 irq->data, &irq->cookie); 1568 if (unlikely(rc != 0)) { 1569 device_printf(adapter->pdev, "failed to register " 1570 "interrupt handler for irq %ju: %d\n", 1571 rman_get_start(irq->res), rc); 1572 goto err; 1573 } 1574 irq->requested = true; 1575 1576 ena_trace(ENA_INFO, "queue %d - cpu %d\n", 1577 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 1578 } 1579 1580 return (rc); 1581 1582 err: 1583 1584 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 1585 irq = &adapter->irq_tbl[i]; 1586 rcc = 0; 1587 1588 /* Once we entered err: section and irq->requested is true we 1589 free both intr and resources */ 1590 if (irq->requested) 1591 rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 1592 if (unlikely(rcc != 0)) 1593 device_printf(adapter->pdev, "could not release" 1594 " irq: %d, error: %d\n", irq->vector, rcc); 1595 1596 /* If we entred err: section without irq->requested set we know 1597 it was bus_alloc_resource_any() that needs cleanup, provided 1598 res is not NULL. In case res is NULL no work in needed in 1599 this iteration */ 1600 rcc = 0; 1601 if (irq->res != NULL) { 1602 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1603 irq->vector, irq->res); 1604 } 1605 if (unlikely(rcc != 0)) 1606 device_printf(adapter->pdev, "dev has no parent while " 1607 "releasing res for irq: %d\n", irq->vector); 1608 irq->requested = false; 1609 irq->res = NULL; 1610 } 1611 1612 return (rc); 1613 } 1614 1615 static void 1616 ena_free_mgmnt_irq(struct ena_adapter *adapter) 1617 { 1618 struct ena_irq *irq; 1619 int rc; 1620 1621 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 1622 if (irq->requested) { 1623 ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", 1624 irq->vector); 1625 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 1626 if (unlikely(rc != 0)) 1627 device_printf(adapter->pdev, "failed to tear " 1628 "down irq: %d\n", irq->vector); 1629 irq->requested = 0; 1630 } 1631 1632 if (irq->res != NULL) { 1633 ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", 1634 irq->vector); 1635 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1636 irq->vector, irq->res); 1637 irq->res = NULL; 1638 if (unlikely(rc != 0)) 1639 device_printf(adapter->pdev, "dev has no parent while " 1640 "releasing res for irq: %d\n", irq->vector); 1641 } 1642 } 1643 1644 static void 1645 ena_free_io_irq(struct ena_adapter *adapter) 1646 { 1647 struct ena_irq *irq; 1648 int rc; 1649 1650 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 1651 irq = &adapter->irq_tbl[i]; 1652 if (irq->requested) { 1653 ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", 1654 irq->vector); 1655 rc = bus_teardown_intr(adapter->pdev, irq->res, 1656 irq->cookie); 1657 if (unlikely(rc != 0)) { 1658 device_printf(adapter->pdev, "failed to tear " 1659 "down irq: %d\n", irq->vector); 1660 } 1661 irq->requested = 0; 1662 } 1663 1664 if (irq->res != NULL) { 1665 ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", 1666 irq->vector); 1667 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 1668 irq->vector, irq->res); 1669 irq->res = NULL; 1670 if (unlikely(rc != 0)) { 1671 device_printf(adapter->pdev, "dev has no parent" 1672 " while releasing res for irq: %d\n", 1673 irq->vector); 1674 } 1675 } 1676 } 1677 } 1678 1679 static void 1680 ena_free_irqs(struct ena_adapter* adapter) 1681 { 1682 1683 ena_free_io_irq(adapter); 1684 ena_free_mgmnt_irq(adapter); 1685 ena_disable_msix(adapter); 1686 } 1687 1688 static void 1689 ena_disable_msix(struct ena_adapter *adapter) 1690 { 1691 1692 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1693 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 1694 pci_release_msi(adapter->pdev); 1695 } 1696 1697 adapter->msix_vecs = 0; 1698 if (adapter->msix_entries != NULL) 1699 free(adapter->msix_entries, M_DEVBUF); 1700 adapter->msix_entries = NULL; 1701 } 1702 1703 static void 1704 ena_unmask_all_io_irqs(struct ena_adapter *adapter) 1705 { 1706 struct ena_com_io_cq* io_cq; 1707 struct ena_eth_io_intr_reg intr_reg; 1708 uint16_t ena_qid; 1709 int i; 1710 1711 /* Unmask interrupts for all queues */ 1712 for (i = 0; i < adapter->num_queues; i++) { 1713 ena_qid = ENA_IO_TXQ_IDX(i); 1714 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 1715 ena_com_update_intr_reg(&intr_reg, 0, 0, true); 1716 ena_com_unmask_intr(io_cq, &intr_reg); 1717 } 1718 } 1719 1720 /* Configure the Rx forwarding */ 1721 static int 1722 ena_rss_configure(struct ena_adapter *adapter) 1723 { 1724 struct ena_com_dev *ena_dev = adapter->ena_dev; 1725 int rc; 1726 1727 /* Set indirect table */ 1728 rc = ena_com_indirect_table_set(ena_dev); 1729 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 1730 return (rc); 1731 1732 /* Configure hash function (if supported) */ 1733 rc = ena_com_set_hash_function(ena_dev); 1734 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 1735 return (rc); 1736 1737 /* Configure hash inputs (if supported) */ 1738 rc = ena_com_set_hash_ctrl(ena_dev); 1739 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 1740 return (rc); 1741 1742 return (0); 1743 } 1744 1745 static int 1746 ena_up_complete(struct ena_adapter *adapter) 1747 { 1748 int rc; 1749 1750 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 1751 rc = ena_rss_configure(adapter); 1752 if (rc != 0) 1753 return (rc); 1754 } 1755 1756 rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); 1757 if (unlikely(rc != 0)) 1758 return (rc); 1759 1760 ena_refill_all_rx_bufs(adapter); 1761 ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 1762 sizeof(adapter->hw_stats)); 1763 1764 return (0); 1765 } 1766 1767 int 1768 ena_up(struct ena_adapter *adapter) 1769 { 1770 int rc = 0; 1771 1772 if (unlikely(device_is_attached(adapter->pdev) == 0)) { 1773 device_printf(adapter->pdev, "device is not attached!\n"); 1774 return (ENXIO); 1775 } 1776 1777 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 1778 device_printf(adapter->pdev, "device is going UP\n"); 1779 1780 /* setup interrupts for IO queues */ 1781 rc = ena_setup_io_intr(adapter); 1782 if (unlikely(rc != 0)) { 1783 ena_trace(ENA_ALERT, "error setting up IO interrupt\n"); 1784 goto error; 1785 } 1786 rc = ena_request_io_irq(adapter); 1787 if (unlikely(rc != 0)) { 1788 ena_trace(ENA_ALERT, "err_req_irq\n"); 1789 goto error; 1790 } 1791 1792 /* allocate transmit descriptors */ 1793 rc = ena_setup_all_tx_resources(adapter); 1794 if (unlikely(rc != 0)) { 1795 ena_trace(ENA_ALERT, "err_setup_tx\n"); 1796 goto err_setup_tx; 1797 } 1798 1799 /* allocate receive descriptors */ 1800 rc = ena_setup_all_rx_resources(adapter); 1801 if (unlikely(rc != 0)) { 1802 ena_trace(ENA_ALERT, "err_setup_rx\n"); 1803 goto err_setup_rx; 1804 } 1805 1806 /* create IO queues for Rx & Tx */ 1807 rc = ena_create_io_queues(adapter); 1808 if (unlikely(rc != 0)) { 1809 ena_trace(ENA_ALERT, 1810 "create IO queues failed\n"); 1811 goto err_io_que; 1812 } 1813 1814 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 1815 if_link_state_change(adapter->ifp, LINK_STATE_UP); 1816 1817 rc = ena_up_complete(adapter); 1818 if (unlikely(rc != 0)) 1819 goto err_up_complete; 1820 1821 counter_u64_add(adapter->dev_stats.interface_up, 1); 1822 1823 ena_update_hwassist(adapter); 1824 1825 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 1826 IFF_DRV_OACTIVE); 1827 1828 /* Activate timer service only if the device is running. 1829 * If this flag is not set, it means that the driver is being 1830 * reset and timer service will be activated afterwards. 1831 */ 1832 if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) { 1833 callout_reset_sbt(&adapter->timer_service, SBT_1S, 1834 SBT_1S, ena_timer_service, (void *)adapter, 0); 1835 } 1836 1837 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 1838 1839 ena_unmask_all_io_irqs(adapter); 1840 } 1841 1842 return (0); 1843 1844 err_up_complete: 1845 ena_destroy_all_io_queues(adapter); 1846 err_io_que: 1847 ena_free_all_rx_resources(adapter); 1848 err_setup_rx: 1849 ena_free_all_tx_resources(adapter); 1850 err_setup_tx: 1851 ena_free_io_irq(adapter); 1852 error: 1853 return (rc); 1854 } 1855 1856 static uint64_t 1857 ena_get_counter(if_t ifp, ift_counter cnt) 1858 { 1859 struct ena_adapter *adapter; 1860 struct ena_hw_stats *stats; 1861 1862 adapter = if_getsoftc(ifp); 1863 stats = &adapter->hw_stats; 1864 1865 switch (cnt) { 1866 case IFCOUNTER_IPACKETS: 1867 return (counter_u64_fetch(stats->rx_packets)); 1868 case IFCOUNTER_OPACKETS: 1869 return (counter_u64_fetch(stats->tx_packets)); 1870 case IFCOUNTER_IBYTES: 1871 return (counter_u64_fetch(stats->rx_bytes)); 1872 case IFCOUNTER_OBYTES: 1873 return (counter_u64_fetch(stats->tx_bytes)); 1874 case IFCOUNTER_IQDROPS: 1875 return (counter_u64_fetch(stats->rx_drops)); 1876 default: 1877 return (if_get_counter_default(ifp, cnt)); 1878 } 1879 } 1880 1881 static int 1882 ena_media_change(if_t ifp) 1883 { 1884 /* Media Change is not supported by firmware */ 1885 return (0); 1886 } 1887 1888 static void 1889 ena_media_status(if_t ifp, struct ifmediareq *ifmr) 1890 { 1891 struct ena_adapter *adapter = if_getsoftc(ifp); 1892 ena_trace(ENA_DBG, "enter\n"); 1893 1894 mtx_lock(&adapter->global_mtx); 1895 1896 ifmr->ifm_status = IFM_AVALID; 1897 ifmr->ifm_active = IFM_ETHER; 1898 1899 if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 1900 mtx_unlock(&adapter->global_mtx); 1901 ena_trace(ENA_INFO, "Link is down\n"); 1902 return; 1903 } 1904 1905 ifmr->ifm_status |= IFM_ACTIVE; 1906 ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 1907 1908 mtx_unlock(&adapter->global_mtx); 1909 } 1910 1911 static void 1912 ena_init(void *arg) 1913 { 1914 struct ena_adapter *adapter = (struct ena_adapter *)arg; 1915 1916 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 1917 sx_xlock(&adapter->ioctl_sx); 1918 ena_up(adapter); 1919 sx_unlock(&adapter->ioctl_sx); 1920 } 1921 } 1922 1923 static int 1924 ena_ioctl(if_t ifp, u_long command, caddr_t data) 1925 { 1926 struct ena_adapter *adapter; 1927 struct ifreq *ifr; 1928 int rc; 1929 1930 adapter = ifp->if_softc; 1931 ifr = (struct ifreq *)data; 1932 1933 /* 1934 * Acquiring lock to prevent from running up and down routines parallel. 1935 */ 1936 rc = 0; 1937 switch (command) { 1938 case SIOCSIFMTU: 1939 if (ifp->if_mtu == ifr->ifr_mtu) 1940 break; 1941 sx_xlock(&adapter->ioctl_sx); 1942 ena_down(adapter); 1943 1944 ena_change_mtu(ifp, ifr->ifr_mtu); 1945 1946 rc = ena_up(adapter); 1947 sx_unlock(&adapter->ioctl_sx); 1948 break; 1949 1950 case SIOCSIFFLAGS: 1951 if ((ifp->if_flags & IFF_UP) != 0) { 1952 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1953 if ((ifp->if_flags & (IFF_PROMISC | 1954 IFF_ALLMULTI)) != 0) { 1955 device_printf(adapter->pdev, 1956 "ioctl promisc/allmulti\n"); 1957 } 1958 } else { 1959 sx_xlock(&adapter->ioctl_sx); 1960 rc = ena_up(adapter); 1961 sx_unlock(&adapter->ioctl_sx); 1962 } 1963 } else { 1964 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1965 sx_xlock(&adapter->ioctl_sx); 1966 ena_down(adapter); 1967 sx_unlock(&adapter->ioctl_sx); 1968 } 1969 } 1970 break; 1971 1972 case SIOCADDMULTI: 1973 case SIOCDELMULTI: 1974 break; 1975 1976 case SIOCSIFMEDIA: 1977 case SIOCGIFMEDIA: 1978 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1979 break; 1980 1981 case SIOCSIFCAP: 1982 { 1983 int reinit = 0; 1984 1985 if (ifr->ifr_reqcap != ifp->if_capenable) { 1986 ifp->if_capenable = ifr->ifr_reqcap; 1987 reinit = 1; 1988 } 1989 1990 if ((reinit != 0) && 1991 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 1992 sx_xlock(&adapter->ioctl_sx); 1993 ena_down(adapter); 1994 rc = ena_up(adapter); 1995 sx_unlock(&adapter->ioctl_sx); 1996 } 1997 } 1998 1999 break; 2000 default: 2001 rc = ether_ioctl(ifp, command, data); 2002 break; 2003 } 2004 2005 return (rc); 2006 } 2007 2008 static int 2009 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 2010 { 2011 int caps = 0; 2012 2013 if ((feat->offload.tx & 2014 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 2015 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 2016 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 2017 caps |= IFCAP_TXCSUM; 2018 2019 if ((feat->offload.tx & 2020 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 2021 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 2022 caps |= IFCAP_TXCSUM_IPV6; 2023 2024 if ((feat->offload.tx & 2025 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 2026 caps |= IFCAP_TSO4; 2027 2028 if ((feat->offload.tx & 2029 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 2030 caps |= IFCAP_TSO6; 2031 2032 if ((feat->offload.rx_supported & 2033 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 2034 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 2035 caps |= IFCAP_RXCSUM; 2036 2037 if ((feat->offload.rx_supported & 2038 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 2039 caps |= IFCAP_RXCSUM_IPV6; 2040 2041 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 2042 2043 return (caps); 2044 } 2045 2046 static void 2047 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 2048 { 2049 2050 host_info->supported_network_features[0] = 2051 (uint32_t)if_getcapabilities(ifp); 2052 } 2053 2054 static void 2055 ena_update_hwassist(struct ena_adapter *adapter) 2056 { 2057 if_t ifp = adapter->ifp; 2058 uint32_t feat = adapter->tx_offload_cap; 2059 int cap = if_getcapenable(ifp); 2060 int flags = 0; 2061 2062 if_clearhwassist(ifp); 2063 2064 if ((cap & IFCAP_TXCSUM) != 0) { 2065 if ((feat & 2066 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 2067 flags |= CSUM_IP; 2068 if ((feat & 2069 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 2070 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 2071 flags |= CSUM_IP_UDP | CSUM_IP_TCP; 2072 } 2073 2074 if ((cap & IFCAP_TXCSUM_IPV6) != 0) 2075 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 2076 2077 if ((cap & IFCAP_TSO4) != 0) 2078 flags |= CSUM_IP_TSO; 2079 2080 if ((cap & IFCAP_TSO6) != 0) 2081 flags |= CSUM_IP6_TSO; 2082 2083 if_sethwassistbits(ifp, flags, 0); 2084 } 2085 2086 static int 2087 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 2088 struct ena_com_dev_get_features_ctx *feat) 2089 { 2090 if_t ifp; 2091 int caps = 0; 2092 2093 ifp = adapter->ifp = if_gethandle(IFT_ETHER); 2094 if (unlikely(ifp == NULL)) { 2095 ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); 2096 return (ENXIO); 2097 } 2098 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 2099 if_setdev(ifp, pdev); 2100 if_setsoftc(ifp, adapter); 2101 2102 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2103 if_setinitfn(ifp, ena_init); 2104 if_settransmitfn(ifp, ena_mq_start); 2105 if_setqflushfn(ifp, ena_qflush); 2106 if_setioctlfn(ifp, ena_ioctl); 2107 if_setgetcounterfn(ifp, ena_get_counter); 2108 2109 if_setsendqlen(ifp, adapter->tx_ring_size); 2110 if_setsendqready(ifp); 2111 if_setmtu(ifp, ETHERMTU); 2112 if_setbaudrate(ifp, 0); 2113 /* Zeroize capabilities... */ 2114 if_setcapabilities(ifp, 0); 2115 if_setcapenable(ifp, 0); 2116 /* check hardware support */ 2117 caps = ena_get_dev_offloads(feat); 2118 /* ... and set them */ 2119 if_setcapabilitiesbit(ifp, caps, 0); 2120 2121 /* TSO parameters */ 2122 ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - 2123 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2124 ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; 2125 ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; 2126 2127 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2128 if_setcapenable(ifp, if_getcapabilities(ifp)); 2129 2130 /* 2131 * Specify the media types supported by this adapter and register 2132 * callbacks to update media and link information 2133 */ 2134 ifmedia_init(&adapter->media, IFM_IMASK, 2135 ena_media_change, ena_media_status); 2136 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2137 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2138 2139 ether_ifattach(ifp, adapter->mac_addr); 2140 2141 return (0); 2142 } 2143 2144 void 2145 ena_down(struct ena_adapter *adapter) 2146 { 2147 int rc; 2148 2149 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 2150 device_printf(adapter->pdev, "device is going DOWN\n"); 2151 2152 callout_drain(&adapter->timer_service); 2153 2154 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 2155 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 2156 IFF_DRV_RUNNING); 2157 2158 ena_free_io_irq(adapter); 2159 2160 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 2161 rc = ena_com_dev_reset(adapter->ena_dev, 2162 adapter->reset_reason); 2163 if (unlikely(rc != 0)) 2164 device_printf(adapter->pdev, 2165 "Device reset failed\n"); 2166 } 2167 2168 ena_destroy_all_io_queues(adapter); 2169 2170 ena_free_all_tx_bufs(adapter); 2171 ena_free_all_rx_bufs(adapter); 2172 ena_free_all_tx_resources(adapter); 2173 ena_free_all_rx_resources(adapter); 2174 2175 counter_u64_add(adapter->dev_stats.interface_down, 1); 2176 } 2177 } 2178 2179 static int 2180 ena_calc_io_queue_num(struct ena_adapter *adapter, 2181 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2182 { 2183 struct ena_com_dev *ena_dev = adapter->ena_dev; 2184 int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 2185 2186 /* Regular queues capabilities */ 2187 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2188 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2189 &get_feat_ctx->max_queue_ext.max_queue_ext; 2190 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 2191 max_queue_ext->max_rx_cq_num); 2192 2193 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 2194 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2195 } else { 2196 struct ena_admin_queue_feature_desc *max_queues = 2197 &get_feat_ctx->max_queues; 2198 io_tx_sq_num = max_queues->max_sq_num; 2199 io_tx_cq_num = max_queues->max_cq_num; 2200 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 2201 } 2202 2203 /* In case of LLQ use the llq fields for the tx SQ/CQ */ 2204 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 2205 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 2206 2207 io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 2208 io_queue_num = min_t(int, io_queue_num, io_rx_num); 2209 io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); 2210 io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); 2211 /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ 2212 io_queue_num = min_t(int, io_queue_num, 2213 pci_msix_count(adapter->pdev) - 1); 2214 2215 return (io_queue_num); 2216 } 2217 2218 static int 2219 ena_enable_wc(struct resource *res) 2220 { 2221 #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 2222 vm_offset_t va; 2223 vm_size_t len; 2224 int rc; 2225 2226 va = (vm_offset_t)rman_get_virtual(res); 2227 len = rman_get_size(res); 2228 /* Enable write combining */ 2229 rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 2230 if (unlikely(rc != 0)) { 2231 ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); 2232 return (rc); 2233 } 2234 2235 return (0); 2236 #endif 2237 return (EOPNOTSUPP); 2238 } 2239 2240 static int 2241 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 2242 struct ena_admin_feature_llq_desc *llq, 2243 struct ena_llq_configurations *llq_default_configurations) 2244 { 2245 struct ena_adapter *adapter = device_get_softc(pdev); 2246 int rc, rid; 2247 uint32_t llq_feature_mask; 2248 2249 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 2250 if (!(ena_dev->supported_features & llq_feature_mask)) { 2251 device_printf(pdev, 2252 "LLQ is not supported. Fallback to host mode policy.\n"); 2253 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2254 return (0); 2255 } 2256 2257 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 2258 if (unlikely(rc != 0)) { 2259 device_printf(pdev, "Failed to configure the device mode. " 2260 "Fallback to host mode policy.\n"); 2261 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2262 return (0); 2263 } 2264 2265 /* Nothing to config, exit */ 2266 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 2267 return (0); 2268 2269 /* Try to allocate resources for LLQ bar */ 2270 rid = PCIR_BAR(ENA_MEM_BAR); 2271 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 2272 &rid, RF_ACTIVE); 2273 if (unlikely(adapter->memory == NULL)) { 2274 device_printf(pdev, "unable to allocate LLQ bar resource. " 2275 "Fallback to host mode policy.\n"); 2276 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2277 return (0); 2278 } 2279 2280 /* Enable write combining for better LLQ performance */ 2281 rc = ena_enable_wc(adapter->memory); 2282 if (unlikely(rc != 0)) { 2283 device_printf(pdev, "failed to enable write combining.\n"); 2284 return (rc); 2285 } 2286 2287 /* 2288 * Save virtual address of the device's memory region 2289 * for the ena_com layer. 2290 */ 2291 ena_dev->mem_bar = rman_get_virtual(adapter->memory); 2292 2293 return (0); 2294 } 2295 2296 static inline 2297 void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 2298 { 2299 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 2300 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 2301 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 2302 llq_config->llq_num_decs_before_header = 2303 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 2304 llq_config->llq_ring_entry_size_value = 128; 2305 } 2306 2307 static int 2308 ena_calc_queue_size(struct ena_adapter *adapter, 2309 struct ena_calc_queue_size_ctx *ctx) 2310 { 2311 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 2312 struct ena_com_dev *ena_dev = ctx->ena_dev; 2313 uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 2314 uint32_t rx_queue_size = adapter->rx_ring_size; 2315 2316 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2317 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2318 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 2319 rx_queue_size = min_t(uint32_t, rx_queue_size, 2320 max_queue_ext->max_rx_cq_depth); 2321 rx_queue_size = min_t(uint32_t, rx_queue_size, 2322 max_queue_ext->max_rx_sq_depth); 2323 tx_queue_size = min_t(uint32_t, tx_queue_size, 2324 max_queue_ext->max_tx_cq_depth); 2325 2326 if (ena_dev->tx_mem_queue_type == 2327 ENA_ADMIN_PLACEMENT_POLICY_DEV) 2328 tx_queue_size = min_t(uint32_t, tx_queue_size, 2329 llq->max_llq_depth); 2330 else 2331 tx_queue_size = min_t(uint32_t, tx_queue_size, 2332 max_queue_ext->max_tx_sq_depth); 2333 2334 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2335 max_queue_ext->max_per_packet_rx_descs); 2336 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2337 max_queue_ext->max_per_packet_tx_descs); 2338 } else { 2339 struct ena_admin_queue_feature_desc *max_queues = 2340 &ctx->get_feat_ctx->max_queues; 2341 rx_queue_size = min_t(uint32_t, rx_queue_size, 2342 max_queues->max_cq_depth); 2343 rx_queue_size = min_t(uint32_t, rx_queue_size, 2344 max_queues->max_sq_depth); 2345 tx_queue_size = min_t(uint32_t, tx_queue_size, 2346 max_queues->max_cq_depth); 2347 2348 if (ena_dev->tx_mem_queue_type == 2349 ENA_ADMIN_PLACEMENT_POLICY_DEV) 2350 tx_queue_size = min_t(uint32_t, tx_queue_size, 2351 llq->max_llq_depth); 2352 else 2353 tx_queue_size = min_t(uint32_t, tx_queue_size, 2354 max_queues->max_sq_depth); 2355 2356 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2357 max_queues->max_packet_tx_descs); 2358 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 2359 max_queues->max_packet_rx_descs); 2360 } 2361 2362 /* round down to the nearest power of 2 */ 2363 rx_queue_size = 1 << (fls(rx_queue_size) - 1); 2364 tx_queue_size = 1 << (fls(tx_queue_size) - 1); 2365 2366 if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 2367 device_printf(ctx->pdev, "Invalid queue size\n"); 2368 return (EFAULT); 2369 } 2370 2371 ctx->rx_queue_size = rx_queue_size; 2372 ctx->tx_queue_size = tx_queue_size; 2373 2374 return (0); 2375 } 2376 2377 static int 2378 ena_handle_updated_queues(struct ena_adapter *adapter, 2379 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2380 { 2381 struct ena_com_dev *ena_dev = adapter->ena_dev; 2382 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 2383 device_t pdev = adapter->pdev; 2384 bool are_queues_changed = false; 2385 int io_queue_num, rc; 2386 2387 calc_queue_ctx.ena_dev = ena_dev; 2388 calc_queue_ctx.get_feat_ctx = get_feat_ctx; 2389 calc_queue_ctx.pdev = pdev; 2390 2391 io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); 2392 rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 2393 if (unlikely(rc != 0 || io_queue_num <= 0)) 2394 return EFAULT; 2395 2396 if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) 2397 are_queues_changed = true; 2398 2399 if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || 2400 adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { 2401 device_printf(pdev, 2402 "Not enough resources to allocate requested queue sizes " 2403 "(TX,RX)=(%d,%d), falling back to queue sizes " 2404 "(TX,RX)=(%d,%d)\n", 2405 adapter->tx_ring_size, 2406 adapter->rx_ring_size, 2407 calc_queue_ctx.tx_queue_size, 2408 calc_queue_ctx.rx_queue_size); 2409 adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 2410 adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 2411 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2412 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 2413 are_queues_changed = true; 2414 } 2415 2416 if (unlikely(adapter->num_queues > io_queue_num)) { 2417 device_printf(pdev, 2418 "Not enough resources to allocate %d queues, " 2419 "falling back to %d queues\n", 2420 adapter->num_queues, io_queue_num); 2421 adapter->num_queues = io_queue_num; 2422 if (ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)) { 2423 ena_com_rss_destroy(ena_dev); 2424 rc = ena_rss_init_default(adapter); 2425 if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { 2426 device_printf(pdev, "Cannot init RSS rc: %d\n", 2427 rc); 2428 return (rc); 2429 } 2430 } 2431 are_queues_changed = true; 2432 } 2433 2434 if (unlikely(are_queues_changed)) { 2435 ena_free_all_io_rings_resources(adapter); 2436 ena_init_io_rings(adapter); 2437 } 2438 2439 return (0); 2440 } 2441 2442 static int 2443 ena_rss_init_default(struct ena_adapter *adapter) 2444 { 2445 struct ena_com_dev *ena_dev = adapter->ena_dev; 2446 device_t dev = adapter->pdev; 2447 int qid, rc, i; 2448 2449 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 2450 if (unlikely(rc != 0)) { 2451 device_printf(dev, "Cannot init indirect table\n"); 2452 return (rc); 2453 } 2454 2455 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 2456 qid = i % adapter->num_queues; 2457 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 2458 ENA_IO_RXQ_IDX(qid)); 2459 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 2460 device_printf(dev, "Cannot fill indirect table\n"); 2461 goto err_rss_destroy; 2462 } 2463 } 2464 2465 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 2466 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 2467 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 2468 device_printf(dev, "Cannot fill hash function\n"); 2469 goto err_rss_destroy; 2470 } 2471 2472 rc = ena_com_set_default_hash_ctrl(ena_dev); 2473 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 2474 device_printf(dev, "Cannot fill hash control\n"); 2475 goto err_rss_destroy; 2476 } 2477 2478 return (0); 2479 2480 err_rss_destroy: 2481 ena_com_rss_destroy(ena_dev); 2482 return (rc); 2483 } 2484 2485 static void 2486 ena_rss_init_default_deferred(void *arg) 2487 { 2488 struct ena_adapter *adapter; 2489 devclass_t dc; 2490 int max; 2491 int rc; 2492 2493 dc = devclass_find("ena"); 2494 if (unlikely(dc == NULL)) { 2495 ena_trace(ENA_ALERT, "No devclass ena\n"); 2496 return; 2497 } 2498 2499 max = devclass_get_maxunit(dc); 2500 while (max-- >= 0) { 2501 adapter = devclass_get_softc(dc, max); 2502 if (adapter != NULL) { 2503 rc = ena_rss_init_default(adapter); 2504 ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); 2505 if (unlikely(rc != 0)) { 2506 device_printf(adapter->pdev, 2507 "WARNING: RSS was not properly initialized," 2508 " it will affect bandwidth\n"); 2509 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); 2510 } 2511 } 2512 } 2513 } 2514 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); 2515 2516 static void 2517 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 2518 { 2519 struct ena_admin_host_info *host_info; 2520 uintptr_t rid; 2521 int rc; 2522 2523 /* Allocate only the host info */ 2524 rc = ena_com_allocate_host_info(ena_dev); 2525 if (unlikely(rc != 0)) { 2526 ena_trace(ENA_ALERT, "Cannot allocate host info\n"); 2527 return; 2528 } 2529 2530 host_info = ena_dev->host_attr.host_info; 2531 2532 if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 2533 host_info->bdf = rid; 2534 host_info->os_type = ENA_ADMIN_OS_FREEBSD; 2535 host_info->kernel_ver = osreldate; 2536 2537 sprintf(host_info->kernel_ver_str, "%d", osreldate); 2538 host_info->os_dist = 0; 2539 strncpy(host_info->os_dist_str, osrelease, 2540 sizeof(host_info->os_dist_str) - 1); 2541 2542 host_info->driver_version = 2543 (DRV_MODULE_VER_MAJOR) | 2544 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2545 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 2546 host_info->num_cpus = mp_ncpus; 2547 2548 rc = ena_com_set_host_attributes(ena_dev); 2549 if (unlikely(rc != 0)) { 2550 if (rc == EOPNOTSUPP) 2551 ena_trace(ENA_WARNING, "Cannot set host attributes\n"); 2552 else 2553 ena_trace(ENA_ALERT, "Cannot set host attributes\n"); 2554 2555 goto err; 2556 } 2557 2558 return; 2559 2560 err: 2561 ena_com_delete_host_info(ena_dev); 2562 } 2563 2564 static int 2565 ena_device_init(struct ena_adapter *adapter, device_t pdev, 2566 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 2567 { 2568 struct ena_com_dev* ena_dev = adapter->ena_dev; 2569 bool readless_supported; 2570 uint32_t aenq_groups; 2571 int dma_width; 2572 int rc; 2573 2574 rc = ena_com_mmio_reg_read_request_init(ena_dev); 2575 if (unlikely(rc != 0)) { 2576 device_printf(pdev, "failed to init mmio read less\n"); 2577 return (rc); 2578 } 2579 2580 /* 2581 * The PCIe configuration space revision id indicate if mmio reg 2582 * read is disabled 2583 */ 2584 readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 2585 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 2586 2587 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 2588 if (unlikely(rc != 0)) { 2589 device_printf(pdev, "Can not reset device\n"); 2590 goto err_mmio_read_less; 2591 } 2592 2593 rc = ena_com_validate_version(ena_dev); 2594 if (unlikely(rc != 0)) { 2595 device_printf(pdev, "device version is too low\n"); 2596 goto err_mmio_read_less; 2597 } 2598 2599 dma_width = ena_com_get_dma_width(ena_dev); 2600 if (unlikely(dma_width < 0)) { 2601 device_printf(pdev, "Invalid dma width value %d", dma_width); 2602 rc = dma_width; 2603 goto err_mmio_read_less; 2604 } 2605 adapter->dma_width = dma_width; 2606 2607 /* ENA admin level init */ 2608 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 2609 if (unlikely(rc != 0)) { 2610 device_printf(pdev, 2611 "Can not initialize ena admin queue with device\n"); 2612 goto err_mmio_read_less; 2613 } 2614 2615 /* 2616 * To enable the msix interrupts the driver needs to know the number 2617 * of queues. So the driver uses polling mode to retrieve this 2618 * information 2619 */ 2620 ena_com_set_admin_polling_mode(ena_dev, true); 2621 2622 ena_config_host_info(ena_dev, pdev); 2623 2624 /* Get Device Attributes */ 2625 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 2626 if (unlikely(rc != 0)) { 2627 device_printf(pdev, 2628 "Cannot get attribute for ena device rc: %d\n", rc); 2629 goto err_admin_init; 2630 } 2631 2632 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2633 BIT(ENA_ADMIN_FATAL_ERROR) | 2634 BIT(ENA_ADMIN_WARNING) | 2635 BIT(ENA_ADMIN_NOTIFICATION) | 2636 BIT(ENA_ADMIN_KEEP_ALIVE); 2637 2638 aenq_groups &= get_feat_ctx->aenq.supported_groups; 2639 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 2640 if (unlikely(rc != 0)) { 2641 device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); 2642 goto err_admin_init; 2643 } 2644 2645 *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 2646 2647 return (0); 2648 2649 err_admin_init: 2650 ena_com_delete_host_info(ena_dev); 2651 ena_com_admin_destroy(ena_dev); 2652 err_mmio_read_less: 2653 ena_com_mmio_reg_read_request_destroy(ena_dev); 2654 2655 return (rc); 2656 } 2657 2658 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 2659 int io_vectors) 2660 { 2661 struct ena_com_dev *ena_dev = adapter->ena_dev; 2662 int rc; 2663 2664 rc = ena_enable_msix(adapter); 2665 if (unlikely(rc != 0)) { 2666 device_printf(adapter->pdev, "Error with MSI-X enablement\n"); 2667 return (rc); 2668 } 2669 2670 ena_setup_mgmnt_intr(adapter); 2671 2672 rc = ena_request_mgmnt_irq(adapter); 2673 if (unlikely(rc != 0)) { 2674 device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); 2675 goto err_disable_msix; 2676 } 2677 2678 ena_com_set_admin_polling_mode(ena_dev, false); 2679 2680 ena_com_admin_aenq_enable(ena_dev); 2681 2682 return (0); 2683 2684 err_disable_msix: 2685 ena_disable_msix(adapter); 2686 2687 return (rc); 2688 } 2689 2690 /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 2691 static void ena_keep_alive_wd(void *adapter_data, 2692 struct ena_admin_aenq_entry *aenq_e) 2693 { 2694 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2695 struct ena_admin_aenq_keep_alive_desc *desc; 2696 sbintime_t stime; 2697 uint64_t rx_drops; 2698 2699 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 2700 2701 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 2702 counter_u64_zero(adapter->hw_stats.rx_drops); 2703 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 2704 2705 stime = getsbinuptime(); 2706 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 2707 } 2708 2709 /* Check for keep alive expiration */ 2710 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 2711 { 2712 sbintime_t timestamp, time; 2713 2714 if (adapter->wd_active == 0) 2715 return; 2716 2717 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2718 return; 2719 2720 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 2721 time = getsbinuptime() - timestamp; 2722 if (unlikely(time > adapter->keep_alive_timeout)) { 2723 device_printf(adapter->pdev, 2724 "Keep alive watchdog timeout.\n"); 2725 counter_u64_add(adapter->dev_stats.wd_expired, 1); 2726 if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 2727 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 2728 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 2729 } 2730 } 2731 } 2732 2733 /* Check if admin queue is enabled */ 2734 static void check_for_admin_com_state(struct ena_adapter *adapter) 2735 { 2736 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 2737 false)) { 2738 device_printf(adapter->pdev, 2739 "ENA admin queue is not in running state!\n"); 2740 counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 2741 if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 2742 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 2743 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 2744 } 2745 } 2746 } 2747 2748 static int 2749 check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2750 struct ena_ring *rx_ring) 2751 { 2752 if (likely(rx_ring->first_interrupt)) 2753 return (0); 2754 2755 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2756 return (0); 2757 2758 rx_ring->no_interrupt_event_cnt++; 2759 2760 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 2761 device_printf(adapter->pdev, "Potential MSIX issue on Rx side " 2762 "Queue = %d. Reset the device\n", rx_ring->qid); 2763 if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 2764 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 2765 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 2766 } 2767 return (EIO); 2768 } 2769 2770 return (0); 2771 } 2772 2773 static int 2774 check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 2775 struct ena_ring *tx_ring) 2776 { 2777 struct bintime curtime, time; 2778 struct ena_tx_buffer *tx_buf; 2779 sbintime_t time_offset; 2780 uint32_t missed_tx = 0; 2781 int i, rc = 0; 2782 2783 getbinuptime(&curtime); 2784 2785 for (i = 0; i < tx_ring->ring_size; i++) { 2786 tx_buf = &tx_ring->tx_buffer_info[i]; 2787 2788 if (bintime_isset(&tx_buf->timestamp) == 0) 2789 continue; 2790 2791 time = curtime; 2792 bintime_sub(&time, &tx_buf->timestamp); 2793 time_offset = bttosbt(time); 2794 2795 if (unlikely(!tx_ring->first_interrupt && 2796 time_offset > 2 * adapter->missing_tx_timeout)) { 2797 /* 2798 * If after graceful period interrupt is still not 2799 * received, we schedule a reset. 2800 */ 2801 device_printf(adapter->pdev, 2802 "Potential MSIX issue on Tx side Queue = %d. " 2803 "Reset the device\n", tx_ring->qid); 2804 if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, 2805 adapter))) { 2806 adapter->reset_reason = 2807 ENA_REGS_RESET_MISS_INTERRUPT; 2808 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, 2809 adapter); 2810 } 2811 return (EIO); 2812 } 2813 2814 /* Check again if packet is still waiting */ 2815 if (unlikely(time_offset > adapter->missing_tx_timeout)) { 2816 2817 if (!tx_buf->print_once) 2818 ena_trace(ENA_WARNING, "Found a Tx that wasn't " 2819 "completed on time, qid %d, index %d.\n", 2820 tx_ring->qid, i); 2821 2822 tx_buf->print_once = true; 2823 missed_tx++; 2824 } 2825 } 2826 2827 if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 2828 device_printf(adapter->pdev, 2829 "The number of lost tx completion is above the threshold " 2830 "(%d > %d). Reset the device\n", 2831 missed_tx, adapter->missing_tx_threshold); 2832 if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 2833 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 2834 ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 2835 } 2836 rc = EIO; 2837 } 2838 2839 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 2840 2841 return (rc); 2842 } 2843 2844 /* 2845 * Check for TX which were not completed on time. 2846 * Timeout is defined by "missing_tx_timeout". 2847 * Reset will be performed if number of incompleted 2848 * transactions exceeds "missing_tx_threshold". 2849 */ 2850 static void 2851 check_for_missing_completions(struct ena_adapter *adapter) 2852 { 2853 struct ena_ring *tx_ring; 2854 struct ena_ring *rx_ring; 2855 int i, budget, rc; 2856 2857 /* Make sure the driver doesn't turn the device in other process */ 2858 rmb(); 2859 2860 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2861 return; 2862 2863 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 2864 return; 2865 2866 if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2867 return; 2868 2869 budget = adapter->missing_tx_max_queues; 2870 2871 for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { 2872 tx_ring = &adapter->tx_ring[i]; 2873 rx_ring = &adapter->rx_ring[i]; 2874 2875 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 2876 if (unlikely(rc != 0)) 2877 return; 2878 2879 rc = check_for_rx_interrupt_queue(adapter, rx_ring); 2880 if (unlikely(rc != 0)) 2881 return; 2882 2883 budget--; 2884 if (budget == 0) { 2885 i++; 2886 break; 2887 } 2888 } 2889 2890 adapter->next_monitored_tx_qid = i % adapter->num_queues; 2891 } 2892 2893 /* trigger rx cleanup after 2 consecutive detections */ 2894 #define EMPTY_RX_REFILL 2 2895 /* For the rare case where the device runs out of Rx descriptors and the 2896 * msix handler failed to refill new Rx descriptors (due to a lack of memory 2897 * for example). 2898 * This case will lead to a deadlock: 2899 * The device won't send interrupts since all the new Rx packets will be dropped 2900 * The msix handler won't allocate new Rx descriptors so the device won't be 2901 * able to send new packets. 2902 * 2903 * When such a situation is detected - execute rx cleanup task in another thread 2904 */ 2905 static void 2906 check_for_empty_rx_ring(struct ena_adapter *adapter) 2907 { 2908 struct ena_ring *rx_ring; 2909 int i, refill_required; 2910 2911 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2912 return; 2913 2914 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 2915 return; 2916 2917 for (i = 0; i < adapter->num_queues; i++) { 2918 rx_ring = &adapter->rx_ring[i]; 2919 2920 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); 2921 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 2922 rx_ring->empty_rx_queue++; 2923 2924 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 2925 counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 2926 1); 2927 2928 device_printf(adapter->pdev, 2929 "trigger refill for ring %d\n", i); 2930 2931 taskqueue_enqueue(rx_ring->que->cleanup_tq, 2932 &rx_ring->que->cleanup_task); 2933 rx_ring->empty_rx_queue = 0; 2934 } 2935 } else { 2936 rx_ring->empty_rx_queue = 0; 2937 } 2938 } 2939 } 2940 2941 static void ena_update_hints(struct ena_adapter *adapter, 2942 struct ena_admin_ena_hw_hints *hints) 2943 { 2944 struct ena_com_dev *ena_dev = adapter->ena_dev; 2945 2946 if (hints->admin_completion_tx_timeout) 2947 ena_dev->admin_queue.completion_timeout = 2948 hints->admin_completion_tx_timeout * 1000; 2949 2950 if (hints->mmio_read_timeout) 2951 /* convert to usec */ 2952 ena_dev->mmio_read.reg_read_to = 2953 hints->mmio_read_timeout * 1000; 2954 2955 if (hints->missed_tx_completion_count_threshold_to_reset) 2956 adapter->missing_tx_threshold = 2957 hints->missed_tx_completion_count_threshold_to_reset; 2958 2959 if (hints->missing_tx_completion_timeout) { 2960 if (hints->missing_tx_completion_timeout == 2961 ENA_HW_HINTS_NO_TIMEOUT) 2962 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2963 else 2964 adapter->missing_tx_timeout = 2965 SBT_1MS * hints->missing_tx_completion_timeout; 2966 } 2967 2968 if (hints->driver_watchdog_timeout) { 2969 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2970 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2971 else 2972 adapter->keep_alive_timeout = 2973 SBT_1MS * hints->driver_watchdog_timeout; 2974 } 2975 } 2976 2977 static void 2978 ena_timer_service(void *data) 2979 { 2980 struct ena_adapter *adapter = (struct ena_adapter *)data; 2981 struct ena_admin_host_info *host_info = 2982 adapter->ena_dev->host_attr.host_info; 2983 2984 check_for_missing_keep_alive(adapter); 2985 2986 check_for_admin_com_state(adapter); 2987 2988 check_for_missing_completions(adapter); 2989 2990 check_for_empty_rx_ring(adapter); 2991 2992 if (host_info != NULL) 2993 ena_update_host_info(host_info, adapter->ifp); 2994 2995 if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 2996 device_printf(adapter->pdev, "Trigger reset is on\n"); 2997 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 2998 return; 2999 } 3000 3001 /* 3002 * Schedule another timeout one second from now. 3003 */ 3004 callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); 3005 } 3006 3007 void 3008 ena_destroy_device(struct ena_adapter *adapter, bool graceful) 3009 { 3010 if_t ifp = adapter->ifp; 3011 struct ena_com_dev *ena_dev = adapter->ena_dev; 3012 bool dev_up; 3013 3014 if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 3015 return; 3016 3017 if_link_state_change(ifp, LINK_STATE_DOWN); 3018 3019 callout_drain(&adapter->timer_service); 3020 3021 dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 3022 if (dev_up) 3023 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 3024 else 3025 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 3026 3027 if (!graceful) 3028 ena_com_set_admin_running_state(ena_dev, false); 3029 3030 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 3031 ena_down(adapter); 3032 3033 /* 3034 * Stop the device from sending AENQ events (if the device was up, and 3035 * the trigger reset was on, ena_down already performs device reset) 3036 */ 3037 if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 3038 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 3039 3040 ena_free_mgmnt_irq(adapter); 3041 3042 ena_disable_msix(adapter); 3043 3044 ena_com_abort_admin_commands(ena_dev); 3045 3046 ena_com_wait_for_abort_completion(ena_dev); 3047 3048 ena_com_admin_destroy(ena_dev); 3049 3050 ena_com_mmio_reg_read_request_destroy(ena_dev); 3051 3052 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 3053 3054 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3055 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3056 } 3057 3058 static int 3059 ena_device_validate_params(struct ena_adapter *adapter, 3060 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3061 { 3062 3063 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 3064 ETHER_ADDR_LEN) != 0) { 3065 device_printf(adapter->pdev, 3066 "Error, mac address are different\n"); 3067 return (EINVAL); 3068 } 3069 3070 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 3071 device_printf(adapter->pdev, 3072 "Error, device max mtu is smaller than ifp MTU\n"); 3073 return (EINVAL); 3074 } 3075 3076 return 0; 3077 } 3078 3079 int 3080 ena_restore_device(struct ena_adapter *adapter) 3081 { 3082 struct ena_com_dev_get_features_ctx get_feat_ctx; 3083 struct ena_com_dev *ena_dev = adapter->ena_dev; 3084 if_t ifp = adapter->ifp; 3085 device_t dev = adapter->pdev; 3086 int wd_active; 3087 int rc; 3088 3089 ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 3090 3091 rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 3092 if (rc != 0) { 3093 device_printf(dev, "Cannot initialize device\n"); 3094 goto err; 3095 } 3096 /* 3097 * Only enable WD if it was enabled before reset, so it won't override 3098 * value set by the user by the sysctl. 3099 */ 3100 if (adapter->wd_active != 0) 3101 adapter->wd_active = wd_active; 3102 3103 rc = ena_device_validate_params(adapter, &get_feat_ctx); 3104 if (rc != 0) { 3105 device_printf(dev, "Validation of device parameters failed\n"); 3106 goto err_device_destroy; 3107 } 3108 3109 rc = ena_handle_updated_queues(adapter, &get_feat_ctx); 3110 if (rc != 0) 3111 goto err_device_destroy; 3112 3113 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 3114 /* Make sure we don't have a race with AENQ Links state handler */ 3115 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 3116 if_link_state_change(ifp, LINK_STATE_UP); 3117 3118 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 3119 adapter->num_queues); 3120 if (rc != 0) { 3121 device_printf(dev, "Enable MSI-X failed\n"); 3122 goto err_device_destroy; 3123 } 3124 3125 /* If the interface was up before the reset bring it up */ 3126 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 3127 rc = ena_up(adapter); 3128 if (rc != 0) { 3129 device_printf(dev, "Failed to create I/O queues\n"); 3130 goto err_disable_msix; 3131 } 3132 } 3133 3134 /* Indicate that device is running again and ready to work */ 3135 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3136 3137 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 3138 /* 3139 * As the AENQ handlers weren't executed during reset because 3140 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 3141 * timestamp must be updated again That will prevent next reset 3142 * caused by missing keep alive. 3143 */ 3144 adapter->keep_alive_timestamp = getsbinuptime(); 3145 callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 3146 ena_timer_service, (void *)adapter, 0); 3147 } 3148 3149 device_printf(dev, 3150 "Device reset completed successfully, Driver info: %s\n", ena_version); 3151 3152 return (rc); 3153 3154 err_disable_msix: 3155 ena_free_mgmnt_irq(adapter); 3156 ena_disable_msix(adapter); 3157 err_device_destroy: 3158 ena_com_abort_admin_commands(ena_dev); 3159 ena_com_wait_for_abort_completion(ena_dev); 3160 ena_com_admin_destroy(ena_dev); 3161 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 3162 ena_com_mmio_reg_read_request_destroy(ena_dev); 3163 err: 3164 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3165 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 3166 device_printf(dev, "Reset attempt failed. Can not reset the device\n"); 3167 3168 return (rc); 3169 } 3170 3171 static void 3172 ena_reset_task(void *arg, int pending) 3173 { 3174 struct ena_adapter *adapter = (struct ena_adapter *)arg; 3175 3176 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3177 device_printf(adapter->pdev, 3178 "device reset scheduled but trigger_reset is off\n"); 3179 return; 3180 } 3181 3182 sx_xlock(&adapter->ioctl_sx); 3183 ena_destroy_device(adapter, false); 3184 ena_restore_device(adapter); 3185 sx_unlock(&adapter->ioctl_sx); 3186 } 3187 3188 /** 3189 * ena_attach - Device Initialization Routine 3190 * @pdev: device information struct 3191 * 3192 * Returns 0 on success, otherwise on failure. 3193 * 3194 * ena_attach initializes an adapter identified by a device structure. 3195 * The OS initialization, configuring of the adapter private structure, 3196 * and a hardware reset occur. 3197 **/ 3198 static int 3199 ena_attach(device_t pdev) 3200 { 3201 struct ena_com_dev_get_features_ctx get_feat_ctx; 3202 struct ena_llq_configurations llq_config; 3203 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 3204 static int version_printed; 3205 struct ena_adapter *adapter; 3206 struct ena_com_dev *ena_dev = NULL; 3207 const char *queue_type_str; 3208 int io_queue_num; 3209 int rid, rc; 3210 3211 adapter = device_get_softc(pdev); 3212 adapter->pdev = pdev; 3213 3214 mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); 3215 sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); 3216 3217 /* Set up the timer service */ 3218 callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); 3219 adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; 3220 adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; 3221 adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; 3222 adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; 3223 3224 if (version_printed++ == 0) 3225 device_printf(pdev, "%s\n", ena_version); 3226 3227 /* Allocate memory for ena_dev structure */ 3228 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3229 M_WAITOK | M_ZERO); 3230 3231 adapter->ena_dev = ena_dev; 3232 ena_dev->dmadev = pdev; 3233 3234 rid = PCIR_BAR(ENA_REG_BAR); 3235 adapter->memory = NULL; 3236 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 3237 &rid, RF_ACTIVE); 3238 if (unlikely(adapter->registers == NULL)) { 3239 device_printf(pdev, 3240 "unable to allocate bus resource: registers!\n"); 3241 rc = ENOMEM; 3242 goto err_dev_free; 3243 } 3244 3245 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 3246 M_WAITOK | M_ZERO); 3247 3248 /* Store register resources */ 3249 ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 3250 rman_get_bustag(adapter->registers); 3251 ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 3252 rman_get_bushandle(adapter->registers); 3253 3254 if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { 3255 device_printf(pdev, "failed to pmap registers bar\n"); 3256 rc = ENXIO; 3257 goto err_bus_free; 3258 } 3259 3260 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3261 3262 /* Initially clear all the flags */ 3263 ENA_FLAG_ZERO(adapter); 3264 3265 /* Device initialization */ 3266 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 3267 if (unlikely(rc != 0)) { 3268 device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); 3269 rc = ENXIO; 3270 goto err_bus_free; 3271 } 3272 3273 set_default_llq_configurations(&llq_config); 3274 3275 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 3276 &llq_config); 3277 if (unlikely(rc != 0)) { 3278 device_printf(pdev, "failed to set placement policy\n"); 3279 goto err_com_free; 3280 } 3281 3282 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 3283 queue_type_str = "Regular"; 3284 else 3285 queue_type_str = "Low Latency"; 3286 device_printf(pdev, "Placement policy: %s\n", queue_type_str); 3287 3288 adapter->keep_alive_timestamp = getsbinuptime(); 3289 3290 adapter->tx_offload_cap = get_feat_ctx.offload.tx; 3291 3292 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 3293 ETHER_ADDR_LEN); 3294 3295 calc_queue_ctx.ena_dev = ena_dev; 3296 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 3297 calc_queue_ctx.pdev = pdev; 3298 3299 /* calculate IO queue number to create */ 3300 io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); 3301 3302 ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", 3303 io_queue_num); 3304 adapter->num_queues = io_queue_num; 3305 3306 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 3307 // Set the requested Rx ring size 3308 adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; 3309 /* calculatre ring sizes */ 3310 rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 3311 if (unlikely((rc != 0) || (io_queue_num <= 0))) { 3312 rc = EFAULT; 3313 goto err_com_free; 3314 } 3315 3316 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 3317 3318 adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 3319 adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 3320 3321 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 3322 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 3323 3324 adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 3325 3326 /* set up dma tags for rx and tx buffers */ 3327 rc = ena_setup_tx_dma_tag(adapter); 3328 if (unlikely(rc != 0)) { 3329 device_printf(pdev, "Failed to create TX DMA tag\n"); 3330 goto err_com_free; 3331 } 3332 3333 rc = ena_setup_rx_dma_tag(adapter); 3334 if (unlikely(rc != 0)) { 3335 device_printf(pdev, "Failed to create RX DMA tag\n"); 3336 goto err_tx_tag_free; 3337 } 3338 3339 /* initialize rings basic information */ 3340 device_printf(pdev, 3341 "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", 3342 io_queue_num, 3343 calc_queue_ctx.rx_queue_size, 3344 calc_queue_ctx.tx_queue_size); 3345 ena_init_io_rings(adapter); 3346 3347 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 3348 if (unlikely(rc != 0)) { 3349 device_printf(pdev, 3350 "Failed to enable and set the admin interrupts\n"); 3351 goto err_io_free; 3352 } 3353 3354 /* setup network interface */ 3355 rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 3356 if (unlikely(rc != 0)) { 3357 device_printf(pdev, "Error with network interface setup\n"); 3358 goto err_msix_free; 3359 } 3360 3361 /* Initialize reset task queue */ 3362 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3363 adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3364 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 3365 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 3366 "%s rstq", device_get_nameunit(adapter->pdev)); 3367 3368 /* Initialize statistics */ 3369 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3370 sizeof(struct ena_stats_dev)); 3371 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3372 sizeof(struct ena_hw_stats)); 3373 ena_sysctl_add_nodes(adapter); 3374 3375 #ifdef DEV_NETMAP 3376 rc = ena_netmap_attach(adapter); 3377 if (rc != 0) { 3378 device_printf(pdev, "netmap attach failed: %d\n", rc); 3379 goto err_detach; 3380 } 3381 #endif /* DEV_NETMAP */ 3382 3383 /* Tell the stack that the interface is not active */ 3384 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 3385 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 3386 3387 return (0); 3388 3389 #ifdef DEV_NETMAP 3390 err_detach: 3391 ether_ifdetach(adapter->ifp); 3392 #endif /* DEV_NETMAP */ 3393 err_msix_free: 3394 ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 3395 ena_free_mgmnt_irq(adapter); 3396 ena_disable_msix(adapter); 3397 err_io_free: 3398 ena_free_all_io_rings_resources(adapter); 3399 ena_free_rx_dma_tag(adapter); 3400 err_tx_tag_free: 3401 ena_free_tx_dma_tag(adapter); 3402 err_com_free: 3403 ena_com_admin_destroy(ena_dev); 3404 ena_com_delete_host_info(ena_dev); 3405 ena_com_mmio_reg_read_request_destroy(ena_dev); 3406 err_bus_free: 3407 free(ena_dev->bus, M_DEVBUF); 3408 ena_free_pci_resources(adapter); 3409 err_dev_free: 3410 free(ena_dev, M_DEVBUF); 3411 3412 return (rc); 3413 } 3414 3415 /** 3416 * ena_detach - Device Removal Routine 3417 * @pdev: device information struct 3418 * 3419 * ena_detach is called by the device subsystem to alert the driver 3420 * that it should release a PCI device. 3421 **/ 3422 static int 3423 ena_detach(device_t pdev) 3424 { 3425 struct ena_adapter *adapter = device_get_softc(pdev); 3426 struct ena_com_dev *ena_dev = adapter->ena_dev; 3427 int rc; 3428 3429 /* Make sure VLANS are not using driver */ 3430 if (adapter->ifp->if_vlantrunk != NULL) { 3431 device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); 3432 return (EBUSY); 3433 } 3434 3435 ether_ifdetach(adapter->ifp); 3436 3437 /* Free reset task and callout */ 3438 callout_drain(&adapter->timer_service); 3439 while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 3440 taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 3441 taskqueue_free(adapter->reset_tq); 3442 3443 sx_xlock(&adapter->ioctl_sx); 3444 ena_down(adapter); 3445 ena_destroy_device(adapter, true); 3446 sx_unlock(&adapter->ioctl_sx); 3447 3448 #ifdef DEV_NETMAP 3449 netmap_detach(adapter->ifp); 3450 #endif /* DEV_NETMAP */ 3451 3452 ena_free_all_io_rings_resources(adapter); 3453 3454 ena_free_counters((counter_u64_t *)&adapter->hw_stats, 3455 sizeof(struct ena_hw_stats)); 3456 ena_free_counters((counter_u64_t *)&adapter->dev_stats, 3457 sizeof(struct ena_stats_dev)); 3458 3459 rc = ena_free_rx_dma_tag(adapter); 3460 if (unlikely(rc != 0)) 3461 device_printf(adapter->pdev, 3462 "Unmapped RX DMA tag associations\n"); 3463 3464 rc = ena_free_tx_dma_tag(adapter); 3465 if (unlikely(rc != 0)) 3466 device_printf(adapter->pdev, 3467 "Unmapped TX DMA tag associations\n"); 3468 3469 ena_free_irqs(adapter); 3470 3471 ena_free_pci_resources(adapter); 3472 3473 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 3474 ena_com_rss_destroy(ena_dev); 3475 3476 ena_com_delete_host_info(ena_dev); 3477 3478 mtx_destroy(&adapter->global_mtx); 3479 sx_destroy(&adapter->ioctl_sx); 3480 3481 if_free(adapter->ifp); 3482 3483 if (ena_dev->bus != NULL) 3484 free(ena_dev->bus, M_DEVBUF); 3485 3486 if (ena_dev != NULL) 3487 free(ena_dev, M_DEVBUF); 3488 3489 return (bus_generic_detach(pdev)); 3490 } 3491 3492 /****************************************************************************** 3493 ******************************** AENQ Handlers ******************************* 3494 *****************************************************************************/ 3495 /** 3496 * ena_update_on_link_change: 3497 * Notify the network interface about the change in link status 3498 **/ 3499 static void 3500 ena_update_on_link_change(void *adapter_data, 3501 struct ena_admin_aenq_entry *aenq_e) 3502 { 3503 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3504 struct ena_admin_aenq_link_change_desc *aenq_desc; 3505 int status; 3506 if_t ifp; 3507 3508 aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3509 ifp = adapter->ifp; 3510 status = aenq_desc->flags & 3511 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 3512 3513 if (status != 0) { 3514 device_printf(adapter->pdev, "link is UP\n"); 3515 ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 3516 if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 3517 if_link_state_change(ifp, LINK_STATE_UP); 3518 } else { 3519 device_printf(adapter->pdev, "link is DOWN\n"); 3520 if_link_state_change(ifp, LINK_STATE_DOWN); 3521 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 3522 } 3523 } 3524 3525 static void ena_notification(void *adapter_data, 3526 struct ena_admin_aenq_entry *aenq_e) 3527 { 3528 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3529 struct ena_admin_ena_hw_hints *hints; 3530 3531 ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 3532 "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 3533 ENA_ADMIN_NOTIFICATION); 3534 3535 switch (aenq_e->aenq_common_desc.syndrom) { 3536 case ENA_ADMIN_UPDATE_HINTS: 3537 hints = 3538 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 3539 ena_update_hints(adapter, hints); 3540 break; 3541 default: 3542 device_printf(adapter->pdev, 3543 "Invalid aenq notification link state %d\n", 3544 aenq_e->aenq_common_desc.syndrom); 3545 } 3546 } 3547 3548 /** 3549 * This handler will called for unknown event group or unimplemented handlers 3550 **/ 3551 static void 3552 unimplemented_aenq_handler(void *adapter_data, 3553 struct ena_admin_aenq_entry *aenq_e) 3554 { 3555 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3556 3557 device_printf(adapter->pdev, 3558 "Unknown event was received or event with unimplemented handler\n"); 3559 } 3560 3561 static struct ena_aenq_handlers aenq_handlers = { 3562 .handlers = { 3563 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3564 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3565 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 3566 }, 3567 .unimplemented_handler = unimplemented_aenq_handler 3568 }; 3569 3570 /********************************************************************* 3571 * FreeBSD Device Interface Entry Points 3572 *********************************************************************/ 3573 3574 static device_method_t ena_methods[] = { 3575 /* Device interface */ 3576 DEVMETHOD(device_probe, ena_probe), 3577 DEVMETHOD(device_attach, ena_attach), 3578 DEVMETHOD(device_detach, ena_detach), 3579 DEVMETHOD_END 3580 }; 3581 3582 static driver_t ena_driver = { 3583 "ena", ena_methods, sizeof(struct ena_adapter), 3584 }; 3585 3586 devclass_t ena_devclass; 3587 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); 3588 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 3589 nitems(ena_vendor_info_array) - 1); 3590 MODULE_DEPEND(ena, pci, 1, 1, 1); 3591 MODULE_DEPEND(ena, ether, 1, 1, 1); 3592 #ifdef DEV_NETMAP 3593 MODULE_DEPEND(ena, netmap, 1, 1, 1); 3594 #endif /* DEV_NETMAP */ 3595 3596 /*********************************************************************/ 3597