19b8d05b8SZbigniew Bodek /*- 29b8d05b8SZbigniew Bodek * BSD LICENSE 39b8d05b8SZbigniew Bodek * 49b8d05b8SZbigniew Bodek * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 319b8d05b8SZbigniew Bodek __FBSDID("$FreeBSD$"); 329b8d05b8SZbigniew Bodek 339b8d05b8SZbigniew Bodek #include <sys/param.h> 349b8d05b8SZbigniew Bodek #include <sys/systm.h> 359b8d05b8SZbigniew Bodek #include <sys/bus.h> 369b8d05b8SZbigniew Bodek #include <sys/endian.h> 379b8d05b8SZbigniew Bodek #include <sys/kernel.h> 389b8d05b8SZbigniew Bodek #include <sys/kthread.h> 399b8d05b8SZbigniew Bodek #include <sys/malloc.h> 409b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 419b8d05b8SZbigniew Bodek #include <sys/module.h> 429b8d05b8SZbigniew Bodek #include <sys/rman.h> 439b8d05b8SZbigniew Bodek #include <sys/smp.h> 449b8d05b8SZbigniew Bodek #include <sys/socket.h> 459b8d05b8SZbigniew Bodek #include <sys/sockio.h> 469b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 479b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 489b8d05b8SZbigniew Bodek #include <sys/time.h> 499b8d05b8SZbigniew Bodek #include <sys/eventhandler.h> 509b8d05b8SZbigniew Bodek 519b8d05b8SZbigniew Bodek #include <machine/bus.h> 529b8d05b8SZbigniew Bodek #include <machine/resource.h> 539b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 549b8d05b8SZbigniew Bodek 559b8d05b8SZbigniew Bodek #include <net/bpf.h> 569b8d05b8SZbigniew Bodek #include <net/ethernet.h> 579b8d05b8SZbigniew Bodek #include <net/if.h> 589b8d05b8SZbigniew Bodek #include <net/if_var.h> 599b8d05b8SZbigniew Bodek #include <net/if_arp.h> 609b8d05b8SZbigniew Bodek #include <net/if_dl.h> 619b8d05b8SZbigniew Bodek #include <net/if_media.h> 629b8d05b8SZbigniew Bodek #include <net/if_types.h> 639b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 649b8d05b8SZbigniew Bodek 659b8d05b8SZbigniew Bodek #include <netinet/in_systm.h> 669b8d05b8SZbigniew Bodek #include <netinet/in.h> 679b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 689b8d05b8SZbigniew Bodek #include <netinet/ip.h> 699b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 709b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 719b8d05b8SZbigniew Bodek #include <netinet/udp.h> 729b8d05b8SZbigniew Bodek 739b8d05b8SZbigniew Bodek #include <dev/pci/pcivar.h> 749b8d05b8SZbigniew Bodek #include <dev/pci/pcireg.h> 759b8d05b8SZbigniew Bodek 764fa9e02dSMarcin Wojtas #include <vm/vm.h> 774fa9e02dSMarcin Wojtas #include <vm/pmap.h> 784fa9e02dSMarcin Wojtas 799b8d05b8SZbigniew Bodek #include "ena.h" 809b8d05b8SZbigniew Bodek #include "ena_sysctl.h" 819b8d05b8SZbigniew Bodek 829b8d05b8SZbigniew Bodek /********************************************************* 839b8d05b8SZbigniew Bodek * Function prototypes 849b8d05b8SZbigniew Bodek *********************************************************/ 859b8d05b8SZbigniew Bodek static int ena_probe(device_t); 869b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 879b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 889b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 899b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 909b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 919b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 929b8d05b8SZbigniew Bodek static void ena_init_io_rings_common(struct ena_adapter *, 939b8d05b8SZbigniew Bodek struct ena_ring *, uint16_t); 94cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 959b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 969b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 979b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 989b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 999b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1009b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1019b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1029b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1039b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1049b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 10543fefd16SMarcin Wojtas static inline int validate_rx_req_id(struct ena_ring *, uint16_t); 1069b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1079b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1089b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1099b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1109b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1119b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1129b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1139b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1149b8d05b8SZbigniew Bodek static int ena_refill_rx_bufs(struct ena_ring *, uint32_t); 1159b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1169b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1189b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1199b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1209b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1219b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1229b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1239b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1249b8d05b8SZbigniew Bodek static int ena_tx_cleanup(struct ena_ring *); 1259b8d05b8SZbigniew Bodek static int ena_rx_cleanup(struct ena_ring *); 126fceb9387SMarcin Wojtas static inline int validate_tx_req_id(struct ena_ring *, uint16_t); 1279b8d05b8SZbigniew Bodek static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, 1289b8d05b8SZbigniew Bodek struct mbuf *); 1299b8d05b8SZbigniew Bodek static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, 1309b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *, uint16_t *); 1319b8d05b8SZbigniew Bodek static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, 1329b8d05b8SZbigniew Bodek struct mbuf *); 1335cb9db07SMarcin Wojtas static void ena_cleanup(void *arg, int pending); 1345cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1359b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1369b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static void ena_setup_io_intr(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter*); 1439b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1449b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1459b8d05b8SZbigniew Bodek static int ena_rss_configure(struct ena_adapter *); 1469b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1479b8d05b8SZbigniew Bodek static int ena_up(struct ena_adapter *); 1489b8d05b8SZbigniew Bodek static void ena_down(struct ena_adapter *); 1499b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1509b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1519b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1529b8d05b8SZbigniew Bodek static void ena_init(void *); 1539b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1549b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1559b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1569b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 1579b8d05b8SZbigniew Bodek static int ena_setup_ifnet(device_t, struct ena_adapter *, 1589b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1599b8d05b8SZbigniew Bodek static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *); 1601b069f1cSZbigniew Bodek static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, 1611b069f1cSZbigniew Bodek struct mbuf **mbuf); 1624fa9e02dSMarcin Wojtas static void ena_dmamap_llq(void *, bus_dma_segment_t *, int, int); 1631e9fb899SZbigniew Bodek static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); 1649b8d05b8SZbigniew Bodek static void ena_start_xmit(struct ena_ring *); 1659b8d05b8SZbigniew Bodek static int ena_mq_start(if_t, struct mbuf *); 1669b8d05b8SZbigniew Bodek static void ena_deferred_mq_start(void *, int); 1679b8d05b8SZbigniew Bodek static void ena_qflush(if_t); 1684fa9e02dSMarcin Wojtas static int ena_enable_wc(struct resource *); 1694fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1704fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 1719b8d05b8SZbigniew Bodek static int ena_calc_io_queue_num(struct ena_adapter *, 1729b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1736064f289SMarcin Wojtas static int ena_calc_queue_size(struct ena_adapter *, 1746064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *); 1756064f289SMarcin Wojtas static int ena_handle_updated_queues(struct ena_adapter *, 1766064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *); 1779b8d05b8SZbigniew Bodek static int ena_rss_init_default(struct ena_adapter *); 1789b8d05b8SZbigniew Bodek static void ena_rss_init_default_deferred(void *); 1799b8d05b8SZbigniew Bodek static void ena_config_host_info(struct ena_com_dev *); 1809b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1819b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1829b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1839b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 1849b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, 1859b8d05b8SZbigniew Bodek int); 1869b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 1879b8d05b8SZbigniew Bodek static void unimplemented_aenq_handler(void *, 1889b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *); 1899b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 1909b8d05b8SZbigniew Bodek 1919b8d05b8SZbigniew Bodek static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 1929b8d05b8SZbigniew Bodek 1939b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1949b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 1959b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, 1969b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 1979b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, 1989b8d05b8SZbigniew Bodek /* Last entry */ 1999b8d05b8SZbigniew Bodek { 0, 0, 0 } 2009b8d05b8SZbigniew Bodek }; 2019b8d05b8SZbigniew Bodek 2029b8d05b8SZbigniew Bodek /* 2039b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 2049b8d05b8SZbigniew Bodek */ 2059b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 2069b8d05b8SZbigniew Bodek 2079b8d05b8SZbigniew Bodek void 2089b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2099b8d05b8SZbigniew Bodek { 2100bdffe59SMarcin Wojtas if (error != 0) 2119b8d05b8SZbigniew Bodek return; 2129b8d05b8SZbigniew Bodek *(bus_addr_t *) arg = segs[0].ds_addr; 2139b8d05b8SZbigniew Bodek } 2149b8d05b8SZbigniew Bodek 2159b8d05b8SZbigniew Bodek int 2169b8d05b8SZbigniew Bodek ena_dma_alloc(device_t dmadev, bus_size_t size, 2179b8d05b8SZbigniew Bodek ena_mem_handle_t *dma , int mapflags) 2189b8d05b8SZbigniew Bodek { 2199b8d05b8SZbigniew Bodek struct ena_adapter* adapter = device_get_softc(dmadev); 2200bdffe59SMarcin Wojtas uint32_t maxsize; 2210bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2229b8d05b8SZbigniew Bodek int error; 2239b8d05b8SZbigniew Bodek 2240bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2250bdffe59SMarcin Wojtas 2260bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2273f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2289b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2290bdffe59SMarcin Wojtas 2309b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2319b8d05b8SZbigniew Bodek 8, 0, /* alignment, bounds */ 2328a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2338a573700SZbigniew Bodek BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 2349b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2359b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2369b8d05b8SZbigniew Bodek 1, /* nsegments */ 2379b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2389b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2399b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2409b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2419b8d05b8SZbigniew Bodek &dma->tag); 2423f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2434e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); 2449b8d05b8SZbigniew Bodek goto fail_tag; 2459b8d05b8SZbigniew Bodek } 2469b8d05b8SZbigniew Bodek 2479b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, 2489b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2493f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2504e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", 2514e8acd84SMarcin Wojtas (uintmax_t)size, error); 2529b8d05b8SZbigniew Bodek goto fail_map_create; 2539b8d05b8SZbigniew Bodek } 2549b8d05b8SZbigniew Bodek 2559b8d05b8SZbigniew Bodek dma->paddr = 0; 2569b8d05b8SZbigniew Bodek error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 2579b8d05b8SZbigniew Bodek size, ena_dmamap_callback, &dma->paddr, mapflags); 2583f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2594e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); 2609b8d05b8SZbigniew Bodek goto fail_map_load; 2619b8d05b8SZbigniew Bodek } 2629b8d05b8SZbigniew Bodek 263e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 264e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 265e8073738SMarcin Wojtas 2669b8d05b8SZbigniew Bodek return (0); 2679b8d05b8SZbigniew Bodek 2689b8d05b8SZbigniew Bodek fail_map_load: 2699b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2707d2544e6SMarcin Wojtas fail_map_create: 2719b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2729b8d05b8SZbigniew Bodek fail_tag: 2739b8d05b8SZbigniew Bodek dma->tag = NULL; 2745b14f92eSMarcin Wojtas dma->vaddr = NULL; 2755b14f92eSMarcin Wojtas dma->paddr = 0; 2769b8d05b8SZbigniew Bodek 2779b8d05b8SZbigniew Bodek return (error); 2789b8d05b8SZbigniew Bodek } 2799b8d05b8SZbigniew Bodek 2809b8d05b8SZbigniew Bodek static void 2819b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2829b8d05b8SZbigniew Bodek { 2839b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2849b8d05b8SZbigniew Bodek 2859b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2869b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2879b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2889b8d05b8SZbigniew Bodek } 2899b8d05b8SZbigniew Bodek 2909b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2919b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2929b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2939b8d05b8SZbigniew Bodek } 2949b8d05b8SZbigniew Bodek } 2959b8d05b8SZbigniew Bodek 2969b8d05b8SZbigniew Bodek static int 2979b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2989b8d05b8SZbigniew Bodek { 2999b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 3009b8d05b8SZbigniew Bodek char adapter_name[60]; 3019b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3029b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3039b8d05b8SZbigniew Bodek 3049b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3059b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3069b8d05b8SZbigniew Bodek 3079b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3089b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3099b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3109b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 3119b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "vendor=%x device=%x ", 3129b8d05b8SZbigniew Bodek pci_vendor_id, pci_device_id); 3139b8d05b8SZbigniew Bodek 3149b8d05b8SZbigniew Bodek sprintf(adapter_name, DEVICE_DESC); 3159b8d05b8SZbigniew Bodek device_set_desc_copy(dev, adapter_name); 3169b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3179b8d05b8SZbigniew Bodek } 3189b8d05b8SZbigniew Bodek 3199b8d05b8SZbigniew Bodek ent++; 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek } 3229b8d05b8SZbigniew Bodek 3239b8d05b8SZbigniew Bodek return (ENXIO); 3249b8d05b8SZbigniew Bodek } 3259b8d05b8SZbigniew Bodek 3269b8d05b8SZbigniew Bodek static int 3279b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3289b8d05b8SZbigniew Bodek { 3299b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3303cfadb28SMarcin Wojtas int rc; 3319b8d05b8SZbigniew Bodek 3323cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 3339b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Invalid MTU setting. " 3343cfadb28SMarcin Wojtas "new_mtu: %d max mtu: %d min mtu: %d\n", 3353cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3363cfadb28SMarcin Wojtas return (EINVAL); 3379b8d05b8SZbigniew Bodek } 3389b8d05b8SZbigniew Bodek 3399b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3403cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3413cfadb28SMarcin Wojtas ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); 3423cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3433cfadb28SMarcin Wojtas } else { 3443cfadb28SMarcin Wojtas device_printf(adapter->pdev, "Failed to set MTU to %d\n", 3453cfadb28SMarcin Wojtas new_mtu); 3463cfadb28SMarcin Wojtas } 3479b8d05b8SZbigniew Bodek 3483cfadb28SMarcin Wojtas return (rc); 3499b8d05b8SZbigniew Bodek } 3509b8d05b8SZbigniew Bodek 3519b8d05b8SZbigniew Bodek static inline void 3529b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3539b8d05b8SZbigniew Bodek { 3549b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3559b8d05b8SZbigniew Bodek 3569b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3579b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3589b8d05b8SZbigniew Bodek } 3599b8d05b8SZbigniew Bodek 3609b8d05b8SZbigniew Bodek static inline void 3619b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3629b8d05b8SZbigniew Bodek { 3639b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3649b8d05b8SZbigniew Bodek 3659b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3669b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3679b8d05b8SZbigniew Bodek } 3689b8d05b8SZbigniew Bodek 3699b8d05b8SZbigniew Bodek static inline void 3709b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3719b8d05b8SZbigniew Bodek { 3729b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3739b8d05b8SZbigniew Bodek 3749b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3759b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3769b8d05b8SZbigniew Bodek } 3779b8d05b8SZbigniew Bodek 3789b8d05b8SZbigniew Bodek static void 3799b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3809b8d05b8SZbigniew Bodek uint16_t qid) 3819b8d05b8SZbigniew Bodek { 3829b8d05b8SZbigniew Bodek 3839b8d05b8SZbigniew Bodek ring->qid = qid; 3849b8d05b8SZbigniew Bodek ring->adapter = adapter; 3859b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 386d12f7bfcSMarcin Wojtas ring->first_interrupt = false; 387d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3889b8d05b8SZbigniew Bodek } 3899b8d05b8SZbigniew Bodek 390cd5d5804SMarcin Wojtas static void 3919b8d05b8SZbigniew Bodek ena_init_io_rings(struct ena_adapter *adapter) 3929b8d05b8SZbigniew Bodek { 3939b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3949b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3959b8d05b8SZbigniew Bodek struct ena_que *que; 3969b8d05b8SZbigniew Bodek int i; 3979b8d05b8SZbigniew Bodek 3989b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3999b8d05b8SZbigniew Bodek 4009b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 4019b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 4029b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 4039b8d05b8SZbigniew Bodek 4049b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4059b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4069b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4079b8d05b8SZbigniew Bodek 4089b8d05b8SZbigniew Bodek /* TX specific ring state */ 4099b8d05b8SZbigniew Bodek txr->ring_size = adapter->tx_ring_size; 4109b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4119b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4129b8d05b8SZbigniew Bodek txr->smoothed_interval = 4139b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 4149b8d05b8SZbigniew Bodek 4159b8d05b8SZbigniew Bodek /* Allocate a buf ring */ 4166064f289SMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 4176064f289SMarcin Wojtas txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 4189b8d05b8SZbigniew Bodek M_WAITOK, &txr->ring_mtx); 4199b8d05b8SZbigniew Bodek 4209b8d05b8SZbigniew Bodek /* Alloc TX statistics. */ 4219b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4229b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4239b8d05b8SZbigniew Bodek 4249b8d05b8SZbigniew Bodek /* RX specific ring state */ 4259b8d05b8SZbigniew Bodek rxr->ring_size = adapter->rx_ring_size; 4269b8d05b8SZbigniew Bodek rxr->smoothed_interval = 4279b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 4289b8d05b8SZbigniew Bodek 4299b8d05b8SZbigniew Bodek /* Alloc RX statistics. */ 4309b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4319b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4329b8d05b8SZbigniew Bodek 4339b8d05b8SZbigniew Bodek /* Initialize locks */ 4349b8d05b8SZbigniew Bodek snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4359b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4369b8d05b8SZbigniew Bodek snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4379b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4389b8d05b8SZbigniew Bodek 4399b8d05b8SZbigniew Bodek mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4409b8d05b8SZbigniew Bodek 4419b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4429b8d05b8SZbigniew Bodek que->adapter = adapter; 4439b8d05b8SZbigniew Bodek que->id = i; 4449b8d05b8SZbigniew Bodek que->tx_ring = txr; 4459b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4469b8d05b8SZbigniew Bodek 4479b8d05b8SZbigniew Bodek txr->que = que; 4489b8d05b8SZbigniew Bodek rxr->que = que; 449efe6ab18SMarcin Wojtas 450efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4519b8d05b8SZbigniew Bodek } 4529b8d05b8SZbigniew Bodek } 4539b8d05b8SZbigniew Bodek 4549b8d05b8SZbigniew Bodek static void 4559b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4569b8d05b8SZbigniew Bodek { 4579b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4589b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4599b8d05b8SZbigniew Bodek 4609b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4619b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4629b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4639b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4649b8d05b8SZbigniew Bodek 4657d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4667d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4677d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4687d2544e6SMarcin Wojtas 4699b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4709b8d05b8SZbigniew Bodek } 4719b8d05b8SZbigniew Bodek 4729b8d05b8SZbigniew Bodek static void 4739b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4749b8d05b8SZbigniew Bodek { 4759b8d05b8SZbigniew Bodek int i; 4769b8d05b8SZbigniew Bodek 4779b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 4789b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4799b8d05b8SZbigniew Bodek 4809b8d05b8SZbigniew Bodek } 4819b8d05b8SZbigniew Bodek 4829b8d05b8SZbigniew Bodek static int 4839b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 4849b8d05b8SZbigniew Bodek { 4859b8d05b8SZbigniew Bodek int ret; 4869b8d05b8SZbigniew Bodek 4879b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 4889b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 4899b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 4908a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 4918a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 4929b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 4939b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 4948a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 4959b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 4969b8d05b8SZbigniew Bodek 0, /* flags */ 4979b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 4989b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 4999b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5009b8d05b8SZbigniew Bodek 5019b8d05b8SZbigniew Bodek return (ret); 5029b8d05b8SZbigniew Bodek } 5039b8d05b8SZbigniew Bodek 5049b8d05b8SZbigniew Bodek static int 5059b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5069b8d05b8SZbigniew Bodek { 5079b8d05b8SZbigniew Bodek int ret; 5089b8d05b8SZbigniew Bodek 5099b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5109b8d05b8SZbigniew Bodek 5113f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5129b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5139b8d05b8SZbigniew Bodek 5149b8d05b8SZbigniew Bodek return (ret); 5159b8d05b8SZbigniew Bodek } 5169b8d05b8SZbigniew Bodek 5179b8d05b8SZbigniew Bodek static int 5189b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5199b8d05b8SZbigniew Bodek { 5209b8d05b8SZbigniew Bodek int ret; 5219b8d05b8SZbigniew Bodek 5229b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5239b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5249b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5258a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5268a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5279b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5289b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsize */ 5294727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 5309b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsegsize */ 5319b8d05b8SZbigniew Bodek 0, /* flags */ 5329b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5339b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5349b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5359b8d05b8SZbigniew Bodek 5369b8d05b8SZbigniew Bodek return (ret); 5379b8d05b8SZbigniew Bodek } 5389b8d05b8SZbigniew Bodek 5399b8d05b8SZbigniew Bodek static int 5409b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5419b8d05b8SZbigniew Bodek { 5429b8d05b8SZbigniew Bodek int ret; 5439b8d05b8SZbigniew Bodek 5449b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5459b8d05b8SZbigniew Bodek 5463f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5479b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5489b8d05b8SZbigniew Bodek 5499b8d05b8SZbigniew Bodek return (ret); 5509b8d05b8SZbigniew Bodek } 5519b8d05b8SZbigniew Bodek 5529b8d05b8SZbigniew Bodek /** 5539b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 5549b8d05b8SZbigniew Bodek * @adapter: network interface device structure 5559b8d05b8SZbigniew Bodek * @qid: queue index 5569b8d05b8SZbigniew Bodek * 5579b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 5589b8d05b8SZbigniew Bodek **/ 5599b8d05b8SZbigniew Bodek static int 5609b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 5619b8d05b8SZbigniew Bodek { 5629b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 5639b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 5649b8d05b8SZbigniew Bodek int size, i, err; 5659b8d05b8SZbigniew Bodek 5669b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 5679b8d05b8SZbigniew Bodek 5689b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5693f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 5707d2544e6SMarcin Wojtas return (ENOMEM); 5719b8d05b8SZbigniew Bodek 5729b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 5739b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5743f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 5757d2544e6SMarcin Wojtas goto err_buf_info_free; 5769b8d05b8SZbigniew Bodek 5774fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 5784fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 5794fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 5804fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 5814fa9e02dSMarcin Wojtas goto err_tx_ids_free; 5824fa9e02dSMarcin Wojtas 5839b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 5849b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 5859b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 5869b8d05b8SZbigniew Bodek 5879b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 5889b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 5899b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 5909b8d05b8SZbigniew Bodek 5919b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 5929b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 5939b8d05b8SZbigniew Bodek 5949b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 595b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 5969b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 597b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 5989b8d05b8SZbigniew Bodek 5999b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6009b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6019b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6024fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_head); 6033f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6044e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6054fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_head for buffer %d\n", 6064fa9e02dSMarcin Wojtas i); 6077d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6089b8d05b8SZbigniew Bodek } 6094fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 6104fa9e02dSMarcin Wojtas 6114fa9e02dSMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6124fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_seg); 6134fa9e02dSMarcin Wojtas if (unlikely(err != 0)) { 6144fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, 6154fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_seg for buffer %d\n", 6164fa9e02dSMarcin Wojtas i); 6174fa9e02dSMarcin Wojtas goto err_buf_info_head_unmap; 6184fa9e02dSMarcin Wojtas } 6194fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6209b8d05b8SZbigniew Bodek } 6219b8d05b8SZbigniew Bodek 6229b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 6239b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 6249b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 6259b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 6263f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 6274e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6289b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 6299b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 6307d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6319b8d05b8SZbigniew Bodek } 6329b8d05b8SZbigniew Bodek 6335cb9db07SMarcin Wojtas tx_ring->running = true; 6345cb9db07SMarcin Wojtas 6359b8d05b8SZbigniew Bodek taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, 6369b8d05b8SZbigniew Bodek "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); 6379b8d05b8SZbigniew Bodek 6389b8d05b8SZbigniew Bodek return (0); 6399b8d05b8SZbigniew Bodek 6404fa9e02dSMarcin Wojtas err_buf_info_head_unmap: 6414fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6424fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6437d2544e6SMarcin Wojtas err_buf_info_unmap: 6449b8d05b8SZbigniew Bodek while (i--) { 6459b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6464fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6474fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6484fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 6499b8d05b8SZbigniew Bodek } 6504fa9e02dSMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 6514fa9e02dSMarcin Wojtas err_tx_ids_free: 652cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 6537d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 6547d2544e6SMarcin Wojtas err_buf_info_free: 655cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 6567d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 6577d2544e6SMarcin Wojtas 6589b8d05b8SZbigniew Bodek return (ENOMEM); 6599b8d05b8SZbigniew Bodek } 6609b8d05b8SZbigniew Bodek 6619b8d05b8SZbigniew Bodek /** 6629b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 6639b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6649b8d05b8SZbigniew Bodek * @qid: queue index 6659b8d05b8SZbigniew Bodek * 6669b8d05b8SZbigniew Bodek * Free all transmit software resources 6679b8d05b8SZbigniew Bodek **/ 6689b8d05b8SZbigniew Bodek static void 6699b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 6709b8d05b8SZbigniew Bodek { 6719b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 6729b8d05b8SZbigniew Bodek 6739b8d05b8SZbigniew Bodek while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 6749b8d05b8SZbigniew Bodek NULL)) 6759b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 6769b8d05b8SZbigniew Bodek 6779b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 6789b8d05b8SZbigniew Bodek 679b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6809b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 6819b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 6829b8d05b8SZbigniew Bodek 6839b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 6849b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 6854fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].head_mapped == true) { 686e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6874fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head, 6884fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 6899b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 6904fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6914fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6924fa9e02dSMarcin Wojtas } 6939b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6944fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6954fa9e02dSMarcin Wojtas 6964fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].seg_mapped == true) { 6974fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6984fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg, 6994fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7004fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 7014fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7024fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 7034fa9e02dSMarcin Wojtas } 7044fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7054fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7064fa9e02dSMarcin Wojtas 707e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 708e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 7099b8d05b8SZbigniew Bodek } 710416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 7119b8d05b8SZbigniew Bodek 7129b8d05b8SZbigniew Bodek /* And free allocated memory. */ 713cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7149b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 7159b8d05b8SZbigniew Bodek 716cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7179b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 7184fa9e02dSMarcin Wojtas 7194fa9e02dSMarcin Wojtas ENA_MEM_FREE(adapter->ena_dev->dmadev, 7204fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 7214fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 7229b8d05b8SZbigniew Bodek } 7239b8d05b8SZbigniew Bodek 7249b8d05b8SZbigniew Bodek /** 7259b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 7269b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7279b8d05b8SZbigniew Bodek * 7289b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7299b8d05b8SZbigniew Bodek **/ 7309b8d05b8SZbigniew Bodek static int 7319b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 7329b8d05b8SZbigniew Bodek { 7339b8d05b8SZbigniew Bodek int i, rc; 7349b8d05b8SZbigniew Bodek 7359b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 7369b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 7370bdffe59SMarcin Wojtas if (rc != 0) { 7389b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 7399b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 7409b8d05b8SZbigniew Bodek goto err_setup_tx; 7419b8d05b8SZbigniew Bodek } 7427d2544e6SMarcin Wojtas } 7439b8d05b8SZbigniew Bodek 7449b8d05b8SZbigniew Bodek return (0); 7459b8d05b8SZbigniew Bodek 7469b8d05b8SZbigniew Bodek err_setup_tx: 7479b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 7489b8d05b8SZbigniew Bodek while (i--) 7499b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7509b8d05b8SZbigniew Bodek return (rc); 7519b8d05b8SZbigniew Bodek } 7529b8d05b8SZbigniew Bodek 7539b8d05b8SZbigniew Bodek /** 7549b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 7559b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7569b8d05b8SZbigniew Bodek * 7579b8d05b8SZbigniew Bodek * Free all transmit software resources 7589b8d05b8SZbigniew Bodek **/ 7599b8d05b8SZbigniew Bodek static void 7609b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 7619b8d05b8SZbigniew Bodek { 7629b8d05b8SZbigniew Bodek int i; 7639b8d05b8SZbigniew Bodek 7649b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 7659b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7669b8d05b8SZbigniew Bodek } 7679b8d05b8SZbigniew Bodek 76843fefd16SMarcin Wojtas static inline int 76943fefd16SMarcin Wojtas validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 77043fefd16SMarcin Wojtas { 77143fefd16SMarcin Wojtas if (likely(req_id < rx_ring->ring_size)) 77243fefd16SMarcin Wojtas return (0); 77343fefd16SMarcin Wojtas 77443fefd16SMarcin Wojtas device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", 77543fefd16SMarcin Wojtas req_id); 77643fefd16SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); 77743fefd16SMarcin Wojtas 77843fefd16SMarcin Wojtas /* Trigger device reset */ 77943fefd16SMarcin Wojtas rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 78043fefd16SMarcin Wojtas rx_ring->adapter->trigger_reset = true; 78143fefd16SMarcin Wojtas 78243fefd16SMarcin Wojtas return (EFAULT); 78343fefd16SMarcin Wojtas } 78443fefd16SMarcin Wojtas 7859b8d05b8SZbigniew Bodek /** 7869b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 7879b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7889b8d05b8SZbigniew Bodek * @qid: queue index 7899b8d05b8SZbigniew Bodek * 7909b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7919b8d05b8SZbigniew Bodek **/ 7929b8d05b8SZbigniew Bodek static int 7939b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 7949b8d05b8SZbigniew Bodek { 7959b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 7969b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 7979b8d05b8SZbigniew Bodek int size, err, i; 7989b8d05b8SZbigniew Bodek 7999b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8009b8d05b8SZbigniew Bodek 8019b8d05b8SZbigniew Bodek /* 8029b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8039b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8049b8d05b8SZbigniew Bodek */ 8059b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8069b8d05b8SZbigniew Bodek 807cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8089b8d05b8SZbigniew Bodek 80943fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 81043fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 81143fefd16SMarcin Wojtas 81243fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 81343fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 81443fefd16SMarcin Wojtas 8159b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8169b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8179b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8189b8d05b8SZbigniew Bodek 8199b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 8209b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 8219b8d05b8SZbigniew Bodek 8229b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 8239b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 8249b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 8259b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 8269b8d05b8SZbigniew Bodek if (err != 0) { 8274e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 8289b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 8297d2544e6SMarcin Wojtas goto err_buf_info_unmap; 8309b8d05b8SZbigniew Bodek } 8319b8d05b8SZbigniew Bodek } 8329b8d05b8SZbigniew Bodek 8339b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 8340bdffe59SMarcin Wojtas if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { 8359b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 8360bdffe59SMarcin Wojtas if (err != 0) { 8379b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 8389b8d05b8SZbigniew Bodek "LRO[%d] Initialization failed!\n", qid); 8399b8d05b8SZbigniew Bodek } else { 8409b8d05b8SZbigniew Bodek ena_trace(ENA_INFO, 8419b8d05b8SZbigniew Bodek "RX Soft LRO[%d] Initialized\n", qid); 8429b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 8439b8d05b8SZbigniew Bodek } 8449b8d05b8SZbigniew Bodek } 8459b8d05b8SZbigniew Bodek 8469b8d05b8SZbigniew Bodek return (0); 8479b8d05b8SZbigniew Bodek 8487d2544e6SMarcin Wojtas err_buf_info_unmap: 8499b8d05b8SZbigniew Bodek while (i--) { 8509b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8519b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8529b8d05b8SZbigniew Bodek } 8539b8d05b8SZbigniew Bodek 85443fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 85543fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 856cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8579b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8589b8d05b8SZbigniew Bodek return (ENOMEM); 8599b8d05b8SZbigniew Bodek } 8609b8d05b8SZbigniew Bodek 8619b8d05b8SZbigniew Bodek /** 8629b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 8639b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8649b8d05b8SZbigniew Bodek * @qid: queue index 8659b8d05b8SZbigniew Bodek * 8669b8d05b8SZbigniew Bodek * Free all receive software resources 8679b8d05b8SZbigniew Bodek **/ 8689b8d05b8SZbigniew Bodek static void 8699b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8709b8d05b8SZbigniew Bodek { 8719b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 8729b8d05b8SZbigniew Bodek 8739b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 8749b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 875e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 876e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 8779b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 8789b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 8799b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 8809b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8819b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8829b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8839b8d05b8SZbigniew Bodek } 8849b8d05b8SZbigniew Bodek 8859b8d05b8SZbigniew Bodek /* free LRO resources, */ 8869b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 8879b8d05b8SZbigniew Bodek 8889b8d05b8SZbigniew Bodek /* free allocated memory */ 889cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8909b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8919b8d05b8SZbigniew Bodek 89243fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 89343fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 8949b8d05b8SZbigniew Bodek } 8959b8d05b8SZbigniew Bodek 8969b8d05b8SZbigniew Bodek /** 8979b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 8989b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8999b8d05b8SZbigniew Bodek * 9009b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9019b8d05b8SZbigniew Bodek **/ 9029b8d05b8SZbigniew Bodek static int 9039b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9049b8d05b8SZbigniew Bodek { 9059b8d05b8SZbigniew Bodek int i, rc = 0; 9069b8d05b8SZbigniew Bodek 9079b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 9089b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9090bdffe59SMarcin Wojtas if (rc != 0) { 9109b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 9119b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9129b8d05b8SZbigniew Bodek goto err_setup_rx; 9139b8d05b8SZbigniew Bodek } 9147d2544e6SMarcin Wojtas } 9159b8d05b8SZbigniew Bodek return (0); 9169b8d05b8SZbigniew Bodek 9179b8d05b8SZbigniew Bodek err_setup_rx: 9189b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9199b8d05b8SZbigniew Bodek while (i--) 9209b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9219b8d05b8SZbigniew Bodek return (rc); 9229b8d05b8SZbigniew Bodek } 9239b8d05b8SZbigniew Bodek 9249b8d05b8SZbigniew Bodek /** 9259b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 9269b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9279b8d05b8SZbigniew Bodek * 9289b8d05b8SZbigniew Bodek * Free all receive software resources 9299b8d05b8SZbigniew Bodek **/ 9309b8d05b8SZbigniew Bodek static void 9319b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 9329b8d05b8SZbigniew Bodek { 9339b8d05b8SZbigniew Bodek int i; 9349b8d05b8SZbigniew Bodek 9359b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 9369b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9379b8d05b8SZbigniew Bodek } 9389b8d05b8SZbigniew Bodek 9399b8d05b8SZbigniew Bodek static inline int 9409b8d05b8SZbigniew Bodek ena_alloc_rx_mbuf(struct ena_adapter *adapter, 9419b8d05b8SZbigniew Bodek struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 9429b8d05b8SZbigniew Bodek { 9439b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 9449b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 9459b8d05b8SZbigniew Bodek int nsegs, error; 9464727bda6SMarcin Wojtas int mlen; 9479b8d05b8SZbigniew Bodek 9489b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 9493f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 9509b8d05b8SZbigniew Bodek return (0); 9519b8d05b8SZbigniew Bodek 9529b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 9539b8d05b8SZbigniew Bodek rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); 9549b8d05b8SZbigniew Bodek 9553f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9564727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 9574727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 9584727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9599b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 9609b8d05b8SZbigniew Bodek return (ENOMEM); 9619b8d05b8SZbigniew Bodek } 9624727bda6SMarcin Wojtas mlen = MCLBYTES; 9634727bda6SMarcin Wojtas } else { 9644727bda6SMarcin Wojtas mlen = MJUM16BYTES; 9654727bda6SMarcin Wojtas } 9669b8d05b8SZbigniew Bodek /* Set mbuf length*/ 9674727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 9689b8d05b8SZbigniew Bodek 9699b8d05b8SZbigniew Bodek /* Map packets for DMA */ 9709b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 9719b8d05b8SZbigniew Bodek "Using tag %p for buffers' DMA mapping, mbuf %p len: %d", 9729b8d05b8SZbigniew Bodek adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); 9739b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 9749b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 9753f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 9764e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " 9779b8d05b8SZbigniew Bodek "nsegs: %d\n", error, nsegs); 9789b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 9799b8d05b8SZbigniew Bodek goto exit; 9809b8d05b8SZbigniew Bodek 9819b8d05b8SZbigniew Bodek } 9829b8d05b8SZbigniew Bodek 9839b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 9849b8d05b8SZbigniew Bodek 9859b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 9869b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 9874727bda6SMarcin Wojtas ena_buf->len = mlen; 9889b8d05b8SZbigniew Bodek 9899b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 9909b8d05b8SZbigniew Bodek "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 9919b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); 9929b8d05b8SZbigniew Bodek 9939b8d05b8SZbigniew Bodek return (0); 9949b8d05b8SZbigniew Bodek 9959b8d05b8SZbigniew Bodek exit: 9969b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 9979b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 9989b8d05b8SZbigniew Bodek return (EFAULT); 9999b8d05b8SZbigniew Bodek } 10009b8d05b8SZbigniew Bodek 10019b8d05b8SZbigniew Bodek static void 10029b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10039b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10049b8d05b8SZbigniew Bodek { 10059b8d05b8SZbigniew Bodek 10064e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10074e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); 10089b8d05b8SZbigniew Bodek return; 10094e8acd84SMarcin Wojtas } 10109b8d05b8SZbigniew Bodek 1011e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1012e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10139b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10149b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10159b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10169b8d05b8SZbigniew Bodek } 10179b8d05b8SZbigniew Bodek 10189b8d05b8SZbigniew Bodek /** 10199b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 10209b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 10219b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 10229b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 10239b8d05b8SZbigniew Bodek **/ 10249b8d05b8SZbigniew Bodek static int 10259b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 10269b8d05b8SZbigniew Bodek { 10279b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 102843fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 10299b8d05b8SZbigniew Bodek uint32_t i; 10309b8d05b8SZbigniew Bodek int rc; 10319b8d05b8SZbigniew Bodek 10329b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d", 10339b8d05b8SZbigniew Bodek rx_ring->qid); 10349b8d05b8SZbigniew Bodek 10359b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 10369b8d05b8SZbigniew Bodek 10379b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 103843fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 103943fefd16SMarcin Wojtas 10409b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, 10419b8d05b8SZbigniew Bodek "RX buffer - next to use: %d", next_to_use); 10429b8d05b8SZbigniew Bodek 104343fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 104443fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 10459b8d05b8SZbigniew Bodek 10469b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 10473f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 10484e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10494e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 10504e8acd84SMarcin Wojtas rx_ring->qid); 10519b8d05b8SZbigniew Bodek break; 10529b8d05b8SZbigniew Bodek } 10539b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 105443fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 10550bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 10564e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10579b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 10589b8d05b8SZbigniew Bodek rx_ring->qid); 10599b8d05b8SZbigniew Bodek break; 10609b8d05b8SZbigniew Bodek } 10619b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 10629b8d05b8SZbigniew Bodek rx_ring->ring_size); 10639b8d05b8SZbigniew Bodek } 10649b8d05b8SZbigniew Bodek 10653f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 10669b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 10674e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10684e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 10694e8acd84SMarcin Wojtas rx_ring->qid, i, num); 10709b8d05b8SZbigniew Bodek } 10719b8d05b8SZbigniew Bodek 10723f9ed7abSMarcin Wojtas if (likely(i != 0)) { 10739b8d05b8SZbigniew Bodek wmb(); 10749b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 10759b8d05b8SZbigniew Bodek } 10769b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 10779b8d05b8SZbigniew Bodek return (i); 10789b8d05b8SZbigniew Bodek } 10799b8d05b8SZbigniew Bodek 10809b8d05b8SZbigniew Bodek static void 10819b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 10829b8d05b8SZbigniew Bodek { 10839b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 10849b8d05b8SZbigniew Bodek unsigned int i; 10859b8d05b8SZbigniew Bodek 10869b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 10879b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 10889b8d05b8SZbigniew Bodek 10890bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 10909b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 10919b8d05b8SZbigniew Bodek } 10929b8d05b8SZbigniew Bodek } 10939b8d05b8SZbigniew Bodek 10949b8d05b8SZbigniew Bodek /** 10959b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 10969b8d05b8SZbigniew Bodek * @adapter: network interface device structure 10979b8d05b8SZbigniew Bodek * 10989b8d05b8SZbigniew Bodek */ 10999b8d05b8SZbigniew Bodek static void 11009b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 11019b8d05b8SZbigniew Bodek { 11029b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 11039b8d05b8SZbigniew Bodek int i, rc, bufs_num; 11049b8d05b8SZbigniew Bodek 11059b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11069b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 11079b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 11089b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 11099b8d05b8SZbigniew Bodek 11109b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 11114e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "refilling Queue %d failed. " 11124e8acd84SMarcin Wojtas "Allocated %d buffers from: %d\n", i, rc, bufs_num); 11139b8d05b8SZbigniew Bodek } 11149b8d05b8SZbigniew Bodek } 11159b8d05b8SZbigniew Bodek 11169b8d05b8SZbigniew Bodek static void 11179b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 11189b8d05b8SZbigniew Bodek { 11199b8d05b8SZbigniew Bodek int i; 11209b8d05b8SZbigniew Bodek 11219b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 11229b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 11239b8d05b8SZbigniew Bodek } 11249b8d05b8SZbigniew Bodek 11259b8d05b8SZbigniew Bodek /** 11269b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 11279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 11289b8d05b8SZbigniew Bodek * @qid: queue index 11299b8d05b8SZbigniew Bodek **/ 11309b8d05b8SZbigniew Bodek static void 11319b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 11329b8d05b8SZbigniew Bodek { 11334e8acd84SMarcin Wojtas bool print_once = true; 11349b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 11359b8d05b8SZbigniew Bodek 1136416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 11379b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 11389b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 11399b8d05b8SZbigniew Bodek 11409b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 11419b8d05b8SZbigniew Bodek continue; 11429b8d05b8SZbigniew Bodek 11434e8acd84SMarcin Wojtas if (print_once) { 11444e8acd84SMarcin Wojtas device_printf(adapter->pdev, 11454e8acd84SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x", 11464e8acd84SMarcin Wojtas qid, i); 11474e8acd84SMarcin Wojtas print_once = false; 11484e8acd84SMarcin Wojtas } else { 11494e8acd84SMarcin Wojtas ena_trace(ENA_DBG, 11504e8acd84SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x", 11514e8acd84SMarcin Wojtas qid, i); 11524e8acd84SMarcin Wojtas } 11539b8d05b8SZbigniew Bodek 11544fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 11554fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1156e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11574fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11584fa9e02dSMarcin Wojtas tx_info->map_head); 11594fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 11604fa9e02dSMarcin Wojtas } 11614fa9e02dSMarcin Wojtas 11624fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 11634fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 11644fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11654fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11664fa9e02dSMarcin Wojtas tx_info->map_seg); 11674fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 11684fa9e02dSMarcin Wojtas } 11694fa9e02dSMarcin Wojtas 11709b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 11719b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 11729b8d05b8SZbigniew Bodek } 1173416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 11749b8d05b8SZbigniew Bodek } 11759b8d05b8SZbigniew Bodek 11769b8d05b8SZbigniew Bodek static void 11779b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 11789b8d05b8SZbigniew Bodek { 11799b8d05b8SZbigniew Bodek 11809b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) 11819b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 11829b8d05b8SZbigniew Bodek } 11839b8d05b8SZbigniew Bodek 11849b8d05b8SZbigniew Bodek static void 11859b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 11869b8d05b8SZbigniew Bodek { 11879b8d05b8SZbigniew Bodek uint16_t ena_qid; 11889b8d05b8SZbigniew Bodek int i; 11899b8d05b8SZbigniew Bodek 11909b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11919b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 11929b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 11939b8d05b8SZbigniew Bodek } 11949b8d05b8SZbigniew Bodek } 11959b8d05b8SZbigniew Bodek 11969b8d05b8SZbigniew Bodek static void 11979b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 11989b8d05b8SZbigniew Bodek { 11999b8d05b8SZbigniew Bodek uint16_t ena_qid; 12009b8d05b8SZbigniew Bodek int i; 12019b8d05b8SZbigniew Bodek 12029b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12039b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12049b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 12059b8d05b8SZbigniew Bodek } 12069b8d05b8SZbigniew Bodek } 12079b8d05b8SZbigniew Bodek 12089b8d05b8SZbigniew Bodek static void 12099b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 12109b8d05b8SZbigniew Bodek { 12115cb9db07SMarcin Wojtas struct ena_que *queue; 12125cb9db07SMarcin Wojtas int i; 12135cb9db07SMarcin Wojtas 12145cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 12155cb9db07SMarcin Wojtas queue = &adapter->que[i]; 12165cb9db07SMarcin Wojtas while (taskqueue_cancel(queue->cleanup_tq, 12175cb9db07SMarcin Wojtas &queue->cleanup_task, NULL)) 12185cb9db07SMarcin Wojtas taskqueue_drain(queue->cleanup_tq, 12195cb9db07SMarcin Wojtas &queue->cleanup_task); 12205cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 12215cb9db07SMarcin Wojtas } 12225cb9db07SMarcin Wojtas 12239b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 12249b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 12259b8d05b8SZbigniew Bodek } 12269b8d05b8SZbigniew Bodek 1227fceb9387SMarcin Wojtas static inline int 12289b8d05b8SZbigniew Bodek validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) 12299b8d05b8SZbigniew Bodek { 12304e8acd84SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 12319b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = NULL; 12329b8d05b8SZbigniew Bodek 12339b8d05b8SZbigniew Bodek if (likely(req_id < tx_ring->ring_size)) { 12349b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 12350bdffe59SMarcin Wojtas if (tx_info->mbuf != NULL) 12360bdffe59SMarcin Wojtas return (0); 12374e8acd84SMarcin Wojtas device_printf(adapter->pdev, 12384e8acd84SMarcin Wojtas "tx_info doesn't have valid mbuf\n"); 12394e306999SMarcin Wojtas } 12404e8acd84SMarcin Wojtas 12414e306999SMarcin Wojtas device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); 12429b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 12439b8d05b8SZbigniew Bodek 12449b8d05b8SZbigniew Bodek return (EFAULT); 12459b8d05b8SZbigniew Bodek } 12469b8d05b8SZbigniew Bodek 12479b8d05b8SZbigniew Bodek static int 12489b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 12499b8d05b8SZbigniew Bodek { 12509b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 12519b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 12529b8d05b8SZbigniew Bodek struct ena_ring *ring; 12535cb9db07SMarcin Wojtas struct ena_que *queue; 12549b8d05b8SZbigniew Bodek uint16_t ena_qid; 12559b8d05b8SZbigniew Bodek uint32_t msix_vector; 12569b8d05b8SZbigniew Bodek int rc, i; 12579b8d05b8SZbigniew Bodek 12589b8d05b8SZbigniew Bodek /* Create TX queues */ 12599b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12609b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12619b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 12629b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 12639b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 12649b8d05b8SZbigniew Bodek ctx.queue_size = adapter->tx_ring_size; 12659b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 12669b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 12679b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 12680bdffe59SMarcin Wojtas if (rc != 0) { 12699b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12709b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 12719b8d05b8SZbigniew Bodek goto err_tx; 12729b8d05b8SZbigniew Bodek } 12739b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 12749b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 12759b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 12769b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 12770bdffe59SMarcin Wojtas if (rc != 0) { 12789b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12799b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 12809b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 12819b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 12829b8d05b8SZbigniew Bodek goto err_tx; 12839b8d05b8SZbigniew Bodek } 12849b8d05b8SZbigniew Bodek } 12859b8d05b8SZbigniew Bodek 12869b8d05b8SZbigniew Bodek /* Create RX queues */ 12879b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12889b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12899b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12909b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 12919b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 12929b8d05b8SZbigniew Bodek ctx.queue_size = adapter->rx_ring_size; 12939b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 12949b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 12959b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 12963f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 12979b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12989b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 12999b8d05b8SZbigniew Bodek goto err_rx; 13009b8d05b8SZbigniew Bodek } 13019b8d05b8SZbigniew Bodek 13029b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 13039b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 13049b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 13059b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 13063f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 13079b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 13089b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 13099b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 13109b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 13119b8d05b8SZbigniew Bodek goto err_rx; 13129b8d05b8SZbigniew Bodek } 13139b8d05b8SZbigniew Bodek } 13149b8d05b8SZbigniew Bodek 13155cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 13165cb9db07SMarcin Wojtas queue = &adapter->que[i]; 13175cb9db07SMarcin Wojtas 13185cb9db07SMarcin Wojtas TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 13195cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 13205cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 13215cb9db07SMarcin Wojtas 13225cb9db07SMarcin Wojtas taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, 13235cb9db07SMarcin Wojtas "%s queue %d cleanup", 13245cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 13255cb9db07SMarcin Wojtas } 13265cb9db07SMarcin Wojtas 13279b8d05b8SZbigniew Bodek return (0); 13289b8d05b8SZbigniew Bodek 13299b8d05b8SZbigniew Bodek err_rx: 13309b8d05b8SZbigniew Bodek while (i--) 13319b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 13329b8d05b8SZbigniew Bodek i = adapter->num_queues; 13339b8d05b8SZbigniew Bodek err_tx: 13349b8d05b8SZbigniew Bodek while (i--) 13359b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 13369b8d05b8SZbigniew Bodek 13379b8d05b8SZbigniew Bodek return (ENXIO); 13389b8d05b8SZbigniew Bodek } 13399b8d05b8SZbigniew Bodek 13409b8d05b8SZbigniew Bodek /** 13419b8d05b8SZbigniew Bodek * ena_tx_cleanup - clear sent packets and corresponding descriptors 13429b8d05b8SZbigniew Bodek * @tx_ring: ring for which we want to clean packets 13439b8d05b8SZbigniew Bodek * 13449b8d05b8SZbigniew Bodek * Once packets are sent, we ask the device in a loop for no longer used 13459b8d05b8SZbigniew Bodek * descriptors. We find the related mbuf chain in a map (index in an array) 13469b8d05b8SZbigniew Bodek * and free it, then update ring state. 13479b8d05b8SZbigniew Bodek * This is performed in "endless" loop, updating ring pointers every 13489b8d05b8SZbigniew Bodek * TX_COMMIT. The first check of free descriptor is performed before the actual 13499b8d05b8SZbigniew Bodek * loop, then repeated at the loop end. 13509b8d05b8SZbigniew Bodek **/ 13519b8d05b8SZbigniew Bodek static int 13529b8d05b8SZbigniew Bodek ena_tx_cleanup(struct ena_ring *tx_ring) 13539b8d05b8SZbigniew Bodek { 13549b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 13559b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 13569b8d05b8SZbigniew Bodek uint16_t next_to_clean; 13579b8d05b8SZbigniew Bodek uint16_t req_id; 13589b8d05b8SZbigniew Bodek uint16_t ena_qid; 13599b8d05b8SZbigniew Bodek unsigned int total_done = 0; 13609b8d05b8SZbigniew Bodek int rc; 13619b8d05b8SZbigniew Bodek int commit = TX_COMMIT; 13629b8d05b8SZbigniew Bodek int budget = TX_BUDGET; 13639b8d05b8SZbigniew Bodek int work_done; 13645cb9db07SMarcin Wojtas bool above_thresh; 13659b8d05b8SZbigniew Bodek 13669b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 13679b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 13689b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 13699b8d05b8SZbigniew Bodek next_to_clean = tx_ring->next_to_clean; 13709b8d05b8SZbigniew Bodek 13719b8d05b8SZbigniew Bodek do { 13729b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 13739b8d05b8SZbigniew Bodek struct mbuf *mbuf; 13749b8d05b8SZbigniew Bodek 13759b8d05b8SZbigniew Bodek rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); 13763f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13779b8d05b8SZbigniew Bodek break; 13789b8d05b8SZbigniew Bodek 13799b8d05b8SZbigniew Bodek rc = validate_tx_req_id(tx_ring, req_id); 13803f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13819b8d05b8SZbigniew Bodek break; 13829b8d05b8SZbigniew Bodek 13839b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 13849b8d05b8SZbigniew Bodek 13859b8d05b8SZbigniew Bodek mbuf = tx_info->mbuf; 13869b8d05b8SZbigniew Bodek 13879b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 13889b8d05b8SZbigniew Bodek bintime_clear(&tx_info->timestamp); 13899b8d05b8SZbigniew Bodek 13909b8d05b8SZbigniew Bodek /* Map is no longer required */ 13914fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 13924fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1393e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 13944fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 13954fa9e02dSMarcin Wojtas tx_info->map_head); 13964fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 13974fa9e02dSMarcin Wojtas } 13984fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 13994fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 14004fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 14014fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 14024fa9e02dSMarcin Wojtas tx_info->map_seg); 14034fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 14049b8d05b8SZbigniew Bodek } 14059b8d05b8SZbigniew Bodek 14064e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed", 14074e8acd84SMarcin Wojtas tx_ring->qid, mbuf); 14084e8acd84SMarcin Wojtas 14099b8d05b8SZbigniew Bodek m_freem(mbuf); 14109b8d05b8SZbigniew Bodek 14119b8d05b8SZbigniew Bodek total_done += tx_info->tx_descs; 14129b8d05b8SZbigniew Bodek 14139b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[next_to_clean] = req_id; 14149b8d05b8SZbigniew Bodek next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 14159b8d05b8SZbigniew Bodek tx_ring->ring_size); 14169b8d05b8SZbigniew Bodek 14173f9ed7abSMarcin Wojtas if (unlikely(--commit == 0)) { 14189b8d05b8SZbigniew Bodek commit = TX_COMMIT; 14199b8d05b8SZbigniew Bodek /* update ring state every TX_COMMIT descriptor */ 14209b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14210bdffe59SMarcin Wojtas ena_com_comp_ack( 14220bdffe59SMarcin Wojtas &adapter->ena_dev->io_sq_queues[ena_qid], 14230bdffe59SMarcin Wojtas total_done); 14249b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14259b8d05b8SZbigniew Bodek total_done = 0; 14269b8d05b8SZbigniew Bodek } 14273f9ed7abSMarcin Wojtas } while (likely(--budget)); 14289b8d05b8SZbigniew Bodek 14299b8d05b8SZbigniew Bodek work_done = TX_BUDGET - budget; 14309b8d05b8SZbigniew Bodek 14314e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d", 14324e8acd84SMarcin Wojtas tx_ring->qid, work_done); 14334e8acd84SMarcin Wojtas 14349b8d05b8SZbigniew Bodek /* If there is still something to commit update ring state */ 14353f9ed7abSMarcin Wojtas if (likely(commit != TX_COMMIT)) { 14369b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14370bdffe59SMarcin Wojtas ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], 14380bdffe59SMarcin Wojtas total_done); 14399b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14409b8d05b8SZbigniew Bodek } 14419b8d05b8SZbigniew Bodek 14425cb9db07SMarcin Wojtas /* 14435cb9db07SMarcin Wojtas * Need to make the rings circular update visible to 14445cb9db07SMarcin Wojtas * ena_xmit_mbuf() before checking for tx_ring->running. 14455cb9db07SMarcin Wojtas */ 14465cb9db07SMarcin Wojtas mb(); 14475cb9db07SMarcin Wojtas 14485cb9db07SMarcin Wojtas above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14495cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14505cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running && above_thresh)) { 14515cb9db07SMarcin Wojtas ENA_RING_MTX_LOCK(tx_ring); 14525cb9db07SMarcin Wojtas above_thresh = 14535cb9db07SMarcin Wojtas ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14545cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14555cb9db07SMarcin Wojtas if (!tx_ring->running && above_thresh) { 14565cb9db07SMarcin Wojtas tx_ring->running = true; 14575cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 14585cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->enqueue_tq, 14595cb9db07SMarcin Wojtas &tx_ring->enqueue_task); 14605cb9db07SMarcin Wojtas } 14615cb9db07SMarcin Wojtas ENA_RING_MTX_UNLOCK(tx_ring); 14625cb9db07SMarcin Wojtas } 14639b8d05b8SZbigniew Bodek 14649b8d05b8SZbigniew Bodek return (work_done); 14659b8d05b8SZbigniew Bodek } 14669b8d05b8SZbigniew Bodek 14679b8d05b8SZbigniew Bodek static void 14689b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 14699b8d05b8SZbigniew Bodek struct mbuf *mbuf) 14709b8d05b8SZbigniew Bodek { 14719b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 14729b8d05b8SZbigniew Bodek 14733f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) { 14749b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; 14759b8d05b8SZbigniew Bodek 14769b8d05b8SZbigniew Bodek if (ena_rx_ctx->frag && 1477bfea0e93SMarcin Wojtas (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { 14789b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 14799b8d05b8SZbigniew Bodek return; 14809b8d05b8SZbigniew Bodek } 14819b8d05b8SZbigniew Bodek 14829b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l3_proto) { 14839b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV4: 14849b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 14859b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 14869b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); 14879b8d05b8SZbigniew Bodek break; 14889b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 14899b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); 14909b8d05b8SZbigniew Bodek break; 14919b8d05b8SZbigniew Bodek default: 14929b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); 14939b8d05b8SZbigniew Bodek } 14949b8d05b8SZbigniew Bodek break; 14959b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV6: 14969b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 14979b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 14989b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); 14999b8d05b8SZbigniew Bodek break; 15009b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 15019b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); 15029b8d05b8SZbigniew Bodek break; 15039b8d05b8SZbigniew Bodek default: 15049b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); 15059b8d05b8SZbigniew Bodek } 15069b8d05b8SZbigniew Bodek break; 15079b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_UNKNOWN: 15089b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15099b8d05b8SZbigniew Bodek break; 15109b8d05b8SZbigniew Bodek default: 15119b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 15129b8d05b8SZbigniew Bodek } 15139b8d05b8SZbigniew Bodek } else { 15149b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = rx_ring->qid; 15159b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15169b8d05b8SZbigniew Bodek } 15179b8d05b8SZbigniew Bodek } 15189b8d05b8SZbigniew Bodek 15199b8d05b8SZbigniew Bodek /** 15209b8d05b8SZbigniew Bodek * ena_rx_mbuf - assemble mbuf from descriptors 15219b8d05b8SZbigniew Bodek * @rx_ring: ring for which we want to clean packets 15229b8d05b8SZbigniew Bodek * @ena_bufs: buffer info 15239b8d05b8SZbigniew Bodek * @ena_rx_ctx: metadata for this packet(s) 152443fefd16SMarcin Wojtas * @next_to_clean: ring pointer, will be updated only upon success 15259b8d05b8SZbigniew Bodek * 15269b8d05b8SZbigniew Bodek **/ 15279b8d05b8SZbigniew Bodek static struct mbuf* 15289b8d05b8SZbigniew Bodek ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, 15299b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) 15309b8d05b8SZbigniew Bodek { 15319b8d05b8SZbigniew Bodek struct mbuf *mbuf; 15329b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info; 15339b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 15349b8d05b8SZbigniew Bodek unsigned int descs = ena_rx_ctx->descs; 1535c51a229cSMarcin Wojtas int rc; 153643fefd16SMarcin Wojtas uint16_t ntc, len, req_id, buf = 0; 15379b8d05b8SZbigniew Bodek 153843fefd16SMarcin Wojtas ntc = *next_to_clean; 15399b8d05b8SZbigniew Bodek adapter = rx_ring->adapter; 15409b8d05b8SZbigniew Bodek 154143fefd16SMarcin Wojtas len = ena_bufs[buf].len; 154243fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1543c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1544c51a229cSMarcin Wojtas if (unlikely(rc != 0)) 1545c51a229cSMarcin Wojtas return (NULL); 1546c51a229cSMarcin Wojtas 154743fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 15481d65b4c0SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 15491d65b4c0SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 15501d65b4c0SMarcin Wojtas return (NULL); 15511d65b4c0SMarcin Wojtas } 155243fefd16SMarcin Wojtas 15539b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx", 15549b8d05b8SZbigniew Bodek rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); 15559b8d05b8SZbigniew Bodek 1556e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1557e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 15589b8d05b8SZbigniew Bodek mbuf = rx_info->mbuf; 15599b8d05b8SZbigniew Bodek mbuf->m_flags |= M_PKTHDR; 15609b8d05b8SZbigniew Bodek mbuf->m_pkthdr.len = len; 15619b8d05b8SZbigniew Bodek mbuf->m_len = len; 15629b8d05b8SZbigniew Bodek mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; 15639b8d05b8SZbigniew Bodek 15649b8d05b8SZbigniew Bodek /* Fill mbuf with hash key and it's interpretation for optimization */ 15659b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); 15669b8d05b8SZbigniew Bodek 15679b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d", 15689b8d05b8SZbigniew Bodek mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); 15699b8d05b8SZbigniew Bodek 15709b8d05b8SZbigniew Bodek /* DMA address is not needed anymore, unmap it */ 15719b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 15729b8d05b8SZbigniew Bodek 15739b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 157443fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 157543fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 15769b8d05b8SZbigniew Bodek 15779b8d05b8SZbigniew Bodek /* 15789b8d05b8SZbigniew Bodek * While we have more than 1 descriptors for one rcvd packet, append 15799b8d05b8SZbigniew Bodek * other mbufs to the main one 15809b8d05b8SZbigniew Bodek */ 15819b8d05b8SZbigniew Bodek while (--descs) { 158243fefd16SMarcin Wojtas ++buf; 158343fefd16SMarcin Wojtas len = ena_bufs[buf].len; 158443fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1585c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1586c51a229cSMarcin Wojtas if (unlikely(rc != 0)) { 1587c51a229cSMarcin Wojtas /* 1588c51a229cSMarcin Wojtas * If the req_id is invalid, then the device will be 1589c51a229cSMarcin Wojtas * reset. In that case we must free all mbufs that 1590c51a229cSMarcin Wojtas * were already gathered. 1591c51a229cSMarcin Wojtas */ 1592c51a229cSMarcin Wojtas m_freem(mbuf); 1593c51a229cSMarcin Wojtas return (NULL); 1594c51a229cSMarcin Wojtas } 159543fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 15969b8d05b8SZbigniew Bodek 159743fefd16SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 159843fefd16SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 159943fefd16SMarcin Wojtas /* 160043fefd16SMarcin Wojtas * If one of the required mbufs was not allocated yet, 160143fefd16SMarcin Wojtas * we can break there. 160243fefd16SMarcin Wojtas * All earlier used descriptors will be reallocated 160343fefd16SMarcin Wojtas * later and not used mbufs can be reused. 160443fefd16SMarcin Wojtas * The next_to_clean pointer will not be updated in case 160543fefd16SMarcin Wojtas * of an error, so caller should advance it manually 160643fefd16SMarcin Wojtas * in error handling routine to keep it up to date 160743fefd16SMarcin Wojtas * with hw ring. 160843fefd16SMarcin Wojtas */ 160943fefd16SMarcin Wojtas m_freem(mbuf); 161043fefd16SMarcin Wojtas return (NULL); 161143fefd16SMarcin Wojtas } 161243fefd16SMarcin Wojtas 1613e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1614e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 16153f9ed7abSMarcin Wojtas if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { 16169b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 16179b8d05b8SZbigniew Bodek ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p", 16189b8d05b8SZbigniew Bodek mbuf); 16199b8d05b8SZbigniew Bodek } 16204e8acd84SMarcin Wojtas 16214e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 16224e8acd84SMarcin Wojtas "rx mbuf updated. len %d", mbuf->m_pkthdr.len); 16234e8acd84SMarcin Wojtas 16249b8d05b8SZbigniew Bodek /* Free already appended mbuf, it won't be useful anymore */ 16259b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 16269b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 16279b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 16289b8d05b8SZbigniew Bodek 162943fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 163043fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 16319b8d05b8SZbigniew Bodek } 16329b8d05b8SZbigniew Bodek 163343fefd16SMarcin Wojtas *next_to_clean = ntc; 163443fefd16SMarcin Wojtas 16359b8d05b8SZbigniew Bodek return (mbuf); 16369b8d05b8SZbigniew Bodek } 16379b8d05b8SZbigniew Bodek 16389b8d05b8SZbigniew Bodek /** 16399b8d05b8SZbigniew Bodek * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum 16409b8d05b8SZbigniew Bodek **/ 16419b8d05b8SZbigniew Bodek static inline void 16429b8d05b8SZbigniew Bodek ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 16439b8d05b8SZbigniew Bodek struct mbuf *mbuf) 16449b8d05b8SZbigniew Bodek { 16459b8d05b8SZbigniew Bodek 16469b8d05b8SZbigniew Bodek /* if IP and error */ 16473f9ed7abSMarcin Wojtas if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 16483f9ed7abSMarcin Wojtas ena_rx_ctx->l3_csum_err)) { 16499b8d05b8SZbigniew Bodek /* ipv4 checksum error */ 16509b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16519b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 16524e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "RX IPv4 header checksum error"); 16539b8d05b8SZbigniew Bodek return; 16549b8d05b8SZbigniew Bodek } 16559b8d05b8SZbigniew Bodek 16569b8d05b8SZbigniew Bodek /* if TCP/UDP */ 16579b8d05b8SZbigniew Bodek if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 16589b8d05b8SZbigniew Bodek (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { 16599b8d05b8SZbigniew Bodek if (ena_rx_ctx->l4_csum_err) { 16609b8d05b8SZbigniew Bodek /* TCP/UDP checksum error */ 16619b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16629b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 16634e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "RX L4 checksum error"); 16649b8d05b8SZbigniew Bodek } else { 16659b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 16669b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 16679b8d05b8SZbigniew Bodek } 16689b8d05b8SZbigniew Bodek } 16699b8d05b8SZbigniew Bodek } 16709b8d05b8SZbigniew Bodek 16719b8d05b8SZbigniew Bodek /** 16729b8d05b8SZbigniew Bodek * ena_rx_cleanup - handle rx irq 16739b8d05b8SZbigniew Bodek * @arg: ring for which irq is being handled 16749b8d05b8SZbigniew Bodek **/ 16759b8d05b8SZbigniew Bodek static int 16769b8d05b8SZbigniew Bodek ena_rx_cleanup(struct ena_ring *rx_ring) 16779b8d05b8SZbigniew Bodek { 16789b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 16799b8d05b8SZbigniew Bodek struct mbuf *mbuf; 16809b8d05b8SZbigniew Bodek struct ena_com_rx_ctx ena_rx_ctx; 16819b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 16829b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 16839b8d05b8SZbigniew Bodek if_t ifp; 16849b8d05b8SZbigniew Bodek uint16_t ena_qid; 16859b8d05b8SZbigniew Bodek uint16_t next_to_clean; 16869b8d05b8SZbigniew Bodek uint32_t refill_required; 16879b8d05b8SZbigniew Bodek uint32_t refill_threshold; 16889b8d05b8SZbigniew Bodek uint32_t do_if_input = 0; 16899b8d05b8SZbigniew Bodek unsigned int qid; 169043fefd16SMarcin Wojtas int rc, i; 16919b8d05b8SZbigniew Bodek int budget = RX_BUDGET; 16929b8d05b8SZbigniew Bodek 16939b8d05b8SZbigniew Bodek adapter = rx_ring->que->adapter; 16949b8d05b8SZbigniew Bodek ifp = adapter->ifp; 16959b8d05b8SZbigniew Bodek qid = rx_ring->que->id; 16969b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(qid); 16979b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 16989b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 16999b8d05b8SZbigniew Bodek next_to_clean = rx_ring->next_to_clean; 17009b8d05b8SZbigniew Bodek 17014e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "rx: qid %d", qid); 17024e8acd84SMarcin Wojtas 17039b8d05b8SZbigniew Bodek do { 17049b8d05b8SZbigniew Bodek ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 17059b8d05b8SZbigniew Bodek ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; 17069b8d05b8SZbigniew Bodek ena_rx_ctx.descs = 0; 1707e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1708e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 17099b8d05b8SZbigniew Bodek rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); 17109b8d05b8SZbigniew Bodek 17110bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 17129b8d05b8SZbigniew Bodek goto error; 17139b8d05b8SZbigniew Bodek 17149b8d05b8SZbigniew Bodek if (unlikely(ena_rx_ctx.descs == 0)) 17159b8d05b8SZbigniew Bodek break; 17169b8d05b8SZbigniew Bodek 17174e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " 17184e8acd84SMarcin Wojtas "descs #: %d l3 proto %d l4 proto %d hash: %x", 17194e8acd84SMarcin Wojtas rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 17204e8acd84SMarcin Wojtas ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 17214e8acd84SMarcin Wojtas 17229b8d05b8SZbigniew Bodek /* Receive mbuf from the ring */ 17239b8d05b8SZbigniew Bodek mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, 17249b8d05b8SZbigniew Bodek &ena_rx_ctx, &next_to_clean); 1725e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1726e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 17279b8d05b8SZbigniew Bodek /* Exit if we failed to retrieve a buffer */ 17280bdffe59SMarcin Wojtas if (unlikely(mbuf == NULL)) { 172943fefd16SMarcin Wojtas for (i = 0; i < ena_rx_ctx.descs; ++i) { 173043fefd16SMarcin Wojtas rx_ring->free_rx_ids[next_to_clean] = 173143fefd16SMarcin Wojtas rx_ring->ena_bufs[i].req_id; 173243fefd16SMarcin Wojtas next_to_clean = 173343fefd16SMarcin Wojtas ENA_RX_RING_IDX_NEXT(next_to_clean, 173443fefd16SMarcin Wojtas rx_ring->ring_size); 173543fefd16SMarcin Wojtas 173643fefd16SMarcin Wojtas } 17379b8d05b8SZbigniew Bodek break; 17389b8d05b8SZbigniew Bodek } 17399b8d05b8SZbigniew Bodek 17400bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || 17410bdffe59SMarcin Wojtas ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { 17429b8d05b8SZbigniew Bodek ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); 17439b8d05b8SZbigniew Bodek } 17449b8d05b8SZbigniew Bodek 174530217e2dSMarcin Wojtas counter_enter(); 174630217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.bytes, 174730217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 174830217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_bytes, 174930217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 175030217e2dSMarcin Wojtas counter_exit(); 17519b8d05b8SZbigniew Bodek /* 17529b8d05b8SZbigniew Bodek * LRO is only for IP/TCP packets and TCP checksum of the packet 17539b8d05b8SZbigniew Bodek * should be computed by hardware. 17549b8d05b8SZbigniew Bodek */ 17559b8d05b8SZbigniew Bodek do_if_input = 1; 17560bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_LRO) != 0) && 17570bdffe59SMarcin Wojtas ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && 17580bdffe59SMarcin Wojtas (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { 17599b8d05b8SZbigniew Bodek /* 17609b8d05b8SZbigniew Bodek * Send to the stack if: 17619b8d05b8SZbigniew Bodek * - LRO not enabled, or 17629b8d05b8SZbigniew Bodek * - no LRO resources, or 17639b8d05b8SZbigniew Bodek * - lro enqueue fails 17649b8d05b8SZbigniew Bodek */ 17650bdffe59SMarcin Wojtas if ((rx_ring->lro.lro_cnt != 0) && 17660bdffe59SMarcin Wojtas (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) 17679b8d05b8SZbigniew Bodek do_if_input = 0; 17689b8d05b8SZbigniew Bodek } 17690bdffe59SMarcin Wojtas if (do_if_input != 0) { 17700bdffe59SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 17710bdffe59SMarcin Wojtas "calling if_input() with mbuf %p", mbuf); 17729b8d05b8SZbigniew Bodek (*ifp->if_input)(ifp, mbuf); 17739b8d05b8SZbigniew Bodek } 17749b8d05b8SZbigniew Bodek 177530217e2dSMarcin Wojtas counter_enter(); 177630217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); 177730217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); 177830217e2dSMarcin Wojtas counter_exit(); 17799b8d05b8SZbigniew Bodek } while (--budget); 17809b8d05b8SZbigniew Bodek 17819b8d05b8SZbigniew Bodek rx_ring->next_to_clean = next_to_clean; 17829b8d05b8SZbigniew Bodek 1783a195fab0SMarcin Wojtas refill_required = ena_com_free_desc(io_sq); 1784*82f5a792SMarcin Wojtas refill_threshold = min_t(int, 1785*82f5a792SMarcin Wojtas rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 1786*82f5a792SMarcin Wojtas ENA_RX_REFILL_THRESH_PACKET); 17879b8d05b8SZbigniew Bodek 17889b8d05b8SZbigniew Bodek if (refill_required > refill_threshold) { 17899b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 17909b8d05b8SZbigniew Bodek ena_refill_rx_bufs(rx_ring, refill_required); 17919b8d05b8SZbigniew Bodek } 17929b8d05b8SZbigniew Bodek 17939b8d05b8SZbigniew Bodek tcp_lro_flush_all(&rx_ring->lro); 17949b8d05b8SZbigniew Bodek 17959b8d05b8SZbigniew Bodek return (RX_BUDGET - budget); 17969b8d05b8SZbigniew Bodek 17979b8d05b8SZbigniew Bodek error: 17989b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); 1799c9b099ecSMarcin Wojtas 1800c9b099ecSMarcin Wojtas /* Too many desc from the device. Trigger reset */ 1801c9b099ecSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1802c9b099ecSMarcin Wojtas adapter->trigger_reset = true; 1803c9b099ecSMarcin Wojtas 1804c9b099ecSMarcin Wojtas return (0); 18059b8d05b8SZbigniew Bodek } 18069b8d05b8SZbigniew Bodek 18079b8d05b8SZbigniew Bodek /********************************************************************* 18089b8d05b8SZbigniew Bodek * 18099b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 18109b8d05b8SZbigniew Bodek * 18119b8d05b8SZbigniew Bodek **********************************************************************/ 18129b8d05b8SZbigniew Bodek 18139b8d05b8SZbigniew Bodek /** 18149b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 18159b8d05b8SZbigniew Bodek * @arg: interrupt number 18169b8d05b8SZbigniew Bodek **/ 18179b8d05b8SZbigniew Bodek static void 18189b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 18199b8d05b8SZbigniew Bodek { 18209b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 18219b8d05b8SZbigniew Bodek 18229b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 18239b8d05b8SZbigniew Bodek if (likely(adapter->running)) 18249b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 18259b8d05b8SZbigniew Bodek } 18269b8d05b8SZbigniew Bodek 18279b8d05b8SZbigniew Bodek static void 18285cb9db07SMarcin Wojtas ena_cleanup(void *arg, int pending) 18299b8d05b8SZbigniew Bodek { 18309b8d05b8SZbigniew Bodek struct ena_que *que = arg; 18319b8d05b8SZbigniew Bodek struct ena_adapter *adapter = que->adapter; 18329b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 18339b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 18349b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 18359b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 18369b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 18379b8d05b8SZbigniew Bodek int qid, ena_qid; 18389b8d05b8SZbigniew Bodek int txc, rxc, i; 18399b8d05b8SZbigniew Bodek 18403f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18419b8d05b8SZbigniew Bodek return; 18429b8d05b8SZbigniew Bodek 18439b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "MSI-X TX/RX routine"); 18449b8d05b8SZbigniew Bodek 18459b8d05b8SZbigniew Bodek tx_ring = que->tx_ring; 18469b8d05b8SZbigniew Bodek rx_ring = que->rx_ring; 18479b8d05b8SZbigniew Bodek qid = que->id; 18489b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(qid); 18499b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 18509b8d05b8SZbigniew Bodek 1851d12f7bfcSMarcin Wojtas tx_ring->first_interrupt = true; 1852d12f7bfcSMarcin Wojtas rx_ring->first_interrupt = true; 1853d12f7bfcSMarcin Wojtas 18549b8d05b8SZbigniew Bodek for (i = 0; i < CLEAN_BUDGET; ++i) { 18559b8d05b8SZbigniew Bodek rxc = ena_rx_cleanup(rx_ring); 18569b8d05b8SZbigniew Bodek txc = ena_tx_cleanup(tx_ring); 18579b8d05b8SZbigniew Bodek 18583f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18599b8d05b8SZbigniew Bodek return; 18609b8d05b8SZbigniew Bodek 18610bdffe59SMarcin Wojtas if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) 18629b8d05b8SZbigniew Bodek break; 18639b8d05b8SZbigniew Bodek } 18649b8d05b8SZbigniew Bodek 18659b8d05b8SZbigniew Bodek /* Signal that work is done and unmask interrupt */ 18669b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 18679b8d05b8SZbigniew Bodek RX_IRQ_INTERVAL, 18689b8d05b8SZbigniew Bodek TX_IRQ_INTERVAL, 18699b8d05b8SZbigniew Bodek true); 18709b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 18719b8d05b8SZbigniew Bodek } 18729b8d05b8SZbigniew Bodek 18735cb9db07SMarcin Wojtas /** 18745cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 18755cb9db07SMarcin Wojtas * @arg: queue 18765cb9db07SMarcin Wojtas **/ 18775cb9db07SMarcin Wojtas static int 18785cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 18795cb9db07SMarcin Wojtas { 18805cb9db07SMarcin Wojtas struct ena_que *queue = arg; 18815cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 18825cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 18835cb9db07SMarcin Wojtas 18845cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18855cb9db07SMarcin Wojtas return (FILTER_STRAY); 18865cb9db07SMarcin Wojtas 18875cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 18885cb9db07SMarcin Wojtas 18895cb9db07SMarcin Wojtas return (FILTER_HANDLED); 18905cb9db07SMarcin Wojtas } 18915cb9db07SMarcin Wojtas 18929b8d05b8SZbigniew Bodek static int 18939b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 18949b8d05b8SZbigniew Bodek { 18959b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 18968805021aSMarcin Wojtas int msix_vecs, msix_req; 18978805021aSMarcin Wojtas int i, rc = 0; 18989b8d05b8SZbigniew Bodek 18999b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 19009b8d05b8SZbigniew Bodek msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); 19019b8d05b8SZbigniew Bodek 1902cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1903cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1904cd5d5804SMarcin Wojtas 19054e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d", msix_vecs); 19069b8d05b8SZbigniew Bodek 19079b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 19089b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 19099b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 19109b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 19119b8d05b8SZbigniew Bodek } 19129b8d05b8SZbigniew Bodek 19138805021aSMarcin Wojtas msix_req = msix_vecs; 19149b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 19153f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 19169b8d05b8SZbigniew Bodek device_printf(dev, 19179b8d05b8SZbigniew Bodek "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); 19187d2544e6SMarcin Wojtas 19199b8d05b8SZbigniew Bodek rc = ENOSPC; 19207d2544e6SMarcin Wojtas goto err_msix_free; 19219b8d05b8SZbigniew Bodek } 19229b8d05b8SZbigniew Bodek 19238805021aSMarcin Wojtas if (msix_vecs != msix_req) { 19242b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 19252b5b60feSMarcin Wojtas device_printf(dev, 19262b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 19272b5b60feSMarcin Wojtas msix_vecs); 19282b5b60feSMarcin Wojtas pci_release_msi(dev); 19292b5b60feSMarcin Wojtas rc = ENOSPC; 19302b5b60feSMarcin Wojtas goto err_msix_free; 19312b5b60feSMarcin Wojtas } 19328805021aSMarcin Wojtas device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " 19338805021aSMarcin Wojtas "the number of queues\n", msix_vecs, msix_req); 19348805021aSMarcin Wojtas adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; 19358805021aSMarcin Wojtas } 19368805021aSMarcin Wojtas 19379b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 19389b8d05b8SZbigniew Bodek adapter->msix_enabled = true; 19399b8d05b8SZbigniew Bodek 19407d2544e6SMarcin Wojtas return (0); 19417d2544e6SMarcin Wojtas 19427d2544e6SMarcin Wojtas err_msix_free: 19437d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 19447d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 19457d2544e6SMarcin Wojtas 19469b8d05b8SZbigniew Bodek return (rc); 19479b8d05b8SZbigniew Bodek } 19489b8d05b8SZbigniew Bodek 19499b8d05b8SZbigniew Bodek static void 19509b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 19519b8d05b8SZbigniew Bodek { 19529b8d05b8SZbigniew Bodek 19539b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 19549b8d05b8SZbigniew Bodek ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 19559b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev)); 19569b8d05b8SZbigniew Bodek /* 19579b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 19589b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 19599b8d05b8SZbigniew Bodek */ 19609b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 19619b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 19629b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 19639b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 19649b8d05b8SZbigniew Bodek } 19659b8d05b8SZbigniew Bodek 19669b8d05b8SZbigniew Bodek static void 19679b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 19689b8d05b8SZbigniew Bodek { 19699b8d05b8SZbigniew Bodek static int last_bind_cpu = -1; 19709b8d05b8SZbigniew Bodek int irq_idx; 19719b8d05b8SZbigniew Bodek 19729b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) { 19739b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 19749b8d05b8SZbigniew Bodek 19759b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 19769b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 19779b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 19789b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 19799b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 19809b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 19819b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", 19829b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1983277f11c4SMarcin Wojtas 19849b8d05b8SZbigniew Bodek /* 1985277f11c4SMarcin Wojtas * We want to bind rings to the corresponding cpu 19869b8d05b8SZbigniew Bodek * using something similar to the RSS round-robin technique. 19879b8d05b8SZbigniew Bodek */ 19883f9ed7abSMarcin Wojtas if (unlikely(last_bind_cpu < 0)) 19899b8d05b8SZbigniew Bodek last_bind_cpu = CPU_FIRST(); 19909b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 19919b8d05b8SZbigniew Bodek last_bind_cpu; 19929b8d05b8SZbigniew Bodek last_bind_cpu = CPU_NEXT(last_bind_cpu); 19939b8d05b8SZbigniew Bodek } 19949b8d05b8SZbigniew Bodek } 19959b8d05b8SZbigniew Bodek 19969b8d05b8SZbigniew Bodek static int 19979b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 19989b8d05b8SZbigniew Bodek { 19999b8d05b8SZbigniew Bodek struct ena_irq *irq; 20009b8d05b8SZbigniew Bodek unsigned long flags; 20019b8d05b8SZbigniew Bodek int rc, rcc; 20029b8d05b8SZbigniew Bodek 20039b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20049b8d05b8SZbigniew Bodek 20059b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 20069b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20079b8d05b8SZbigniew Bodek &irq->vector, flags); 20089b8d05b8SZbigniew Bodek 20093f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 20109b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20119b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20127d2544e6SMarcin Wojtas return (ENXIO); 20139b8d05b8SZbigniew Bodek } 20149b8d05b8SZbigniew Bodek 20150bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20160bdffe59SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 20170bdffe59SMarcin Wojtas irq->data, &irq->cookie); 20183f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20199b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20209b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20219b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20227d2544e6SMarcin Wojtas goto err_res_free; 20239b8d05b8SZbigniew Bodek } 20249b8d05b8SZbigniew Bodek irq->requested = true; 20259b8d05b8SZbigniew Bodek 20269b8d05b8SZbigniew Bodek return (rc); 20279b8d05b8SZbigniew Bodek 20287d2544e6SMarcin Wojtas err_res_free: 20294e8acd84SMarcin Wojtas ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", 20307d2544e6SMarcin Wojtas irq->vector); 20319b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20329b8d05b8SZbigniew Bodek irq->vector, irq->res); 20333f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 20349b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 20359b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 20369b8d05b8SZbigniew Bodek irq->res = NULL; 20379b8d05b8SZbigniew Bodek 20389b8d05b8SZbigniew Bodek return (rc); 20399b8d05b8SZbigniew Bodek } 20409b8d05b8SZbigniew Bodek 20419b8d05b8SZbigniew Bodek static int 20429b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 20439b8d05b8SZbigniew Bodek { 20449b8d05b8SZbigniew Bodek struct ena_irq *irq; 20459b8d05b8SZbigniew Bodek unsigned long flags = 0; 20469b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 20479b8d05b8SZbigniew Bodek 20483f9ed7abSMarcin Wojtas if (unlikely(adapter->msix_enabled == 0)) { 20494e8acd84SMarcin Wojtas device_printf(adapter->pdev, 20504e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 20519b8d05b8SZbigniew Bodek return (EINVAL); 20529b8d05b8SZbigniew Bodek } else { 20539b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20549b8d05b8SZbigniew Bodek } 20559b8d05b8SZbigniew Bodek 20569b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 20579b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20589b8d05b8SZbigniew Bodek 20593f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 20609b8d05b8SZbigniew Bodek continue; 20619b8d05b8SZbigniew Bodek 20629b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20639b8d05b8SZbigniew Bodek &irq->vector, flags); 20643f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 2065469a8407SMarcin Wojtas rc = ENOMEM; 20669b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20679b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20689b8d05b8SZbigniew Bodek goto err; 20699b8d05b8SZbigniew Bodek } 20709b8d05b8SZbigniew Bodek 20710bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20725cb9db07SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 20735cb9db07SMarcin Wojtas irq->data, &irq->cookie); 20743f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20759b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20769b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20779b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20789b8d05b8SZbigniew Bodek goto err; 20799b8d05b8SZbigniew Bodek } 20809b8d05b8SZbigniew Bodek irq->requested = true; 20819b8d05b8SZbigniew Bodek 20824e8acd84SMarcin Wojtas ena_trace(ENA_INFO, "queue %d - cpu %d\n", 20839b8d05b8SZbigniew Bodek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 20849b8d05b8SZbigniew Bodek } 20859b8d05b8SZbigniew Bodek 20869b8d05b8SZbigniew Bodek return (rc); 20879b8d05b8SZbigniew Bodek 20889b8d05b8SZbigniew Bodek err: 20899b8d05b8SZbigniew Bodek 20909b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 20919b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20929b8d05b8SZbigniew Bodek rcc = 0; 20939b8d05b8SZbigniew Bodek 20949b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 20959b8d05b8SZbigniew Bodek free both intr and resources */ 20960bdffe59SMarcin Wojtas if (irq->requested) 20979b8d05b8SZbigniew Bodek rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 20983f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 20999b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not release" 21009b8d05b8SZbigniew Bodek " irq: %d, error: %d\n", irq->vector, rcc); 21019b8d05b8SZbigniew Bodek 21029b8d05b8SZbigniew Bodek /* If we entred err: section without irq->requested set we know 21039b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 21049b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 21059b8d05b8SZbigniew Bodek this iteration */ 21069b8d05b8SZbigniew Bodek rcc = 0; 21079b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21089b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21099b8d05b8SZbigniew Bodek irq->vector, irq->res); 21109b8d05b8SZbigniew Bodek } 21113f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 21129b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21139b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21149b8d05b8SZbigniew Bodek irq->requested = false; 21159b8d05b8SZbigniew Bodek irq->res = NULL; 21169b8d05b8SZbigniew Bodek } 21179b8d05b8SZbigniew Bodek 21189b8d05b8SZbigniew Bodek return (rc); 21199b8d05b8SZbigniew Bodek } 21209b8d05b8SZbigniew Bodek 21219b8d05b8SZbigniew Bodek static void 21229b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 21239b8d05b8SZbigniew Bodek { 21249b8d05b8SZbigniew Bodek struct ena_irq *irq; 21259b8d05b8SZbigniew Bodek int rc; 21269b8d05b8SZbigniew Bodek 21279b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 21289b8d05b8SZbigniew Bodek if (irq->requested) { 21299b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", 21309b8d05b8SZbigniew Bodek irq->vector); 21319b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 21323f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21339b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21349b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21359b8d05b8SZbigniew Bodek irq->requested = 0; 21369b8d05b8SZbigniew Bodek } 21379b8d05b8SZbigniew Bodek 21389b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21399b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", 21409b8d05b8SZbigniew Bodek irq->vector); 21419b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21429b8d05b8SZbigniew Bodek irq->vector, irq->res); 21439b8d05b8SZbigniew Bodek irq->res = NULL; 21443f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21459b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21469b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21479b8d05b8SZbigniew Bodek } 21489b8d05b8SZbigniew Bodek } 21499b8d05b8SZbigniew Bodek 21509b8d05b8SZbigniew Bodek static void 21519b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 21529b8d05b8SZbigniew Bodek { 21539b8d05b8SZbigniew Bodek struct ena_irq *irq; 21549b8d05b8SZbigniew Bodek int rc; 21559b8d05b8SZbigniew Bodek 21569b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 21579b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 21589b8d05b8SZbigniew Bodek if (irq->requested) { 21599b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", 21609b8d05b8SZbigniew Bodek irq->vector); 21619b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 21629b8d05b8SZbigniew Bodek irq->cookie); 21633f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21649b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21659b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21669b8d05b8SZbigniew Bodek } 21679b8d05b8SZbigniew Bodek irq->requested = 0; 21689b8d05b8SZbigniew Bodek } 21699b8d05b8SZbigniew Bodek 21709b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21719b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", 21729b8d05b8SZbigniew Bodek irq->vector); 21739b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21749b8d05b8SZbigniew Bodek irq->vector, irq->res); 21759b8d05b8SZbigniew Bodek irq->res = NULL; 21763f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21779b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent" 21789b8d05b8SZbigniew Bodek " while releasing res for irq: %d\n", 21799b8d05b8SZbigniew Bodek irq->vector); 21809b8d05b8SZbigniew Bodek } 21819b8d05b8SZbigniew Bodek } 21829b8d05b8SZbigniew Bodek } 21839b8d05b8SZbigniew Bodek } 21849b8d05b8SZbigniew Bodek 21859b8d05b8SZbigniew Bodek static void 21869b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter* adapter) 21879b8d05b8SZbigniew Bodek { 21889b8d05b8SZbigniew Bodek 21899b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 21909b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 21919b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 21929b8d05b8SZbigniew Bodek } 21939b8d05b8SZbigniew Bodek 21949b8d05b8SZbigniew Bodek static void 21959b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 21969b8d05b8SZbigniew Bodek { 21979b8d05b8SZbigniew Bodek 21989b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 21999b8d05b8SZbigniew Bodek 22009b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 2201cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 22029b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 22039b8d05b8SZbigniew Bodek } 22049b8d05b8SZbigniew Bodek 22059b8d05b8SZbigniew Bodek static void 22069b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 22079b8d05b8SZbigniew Bodek { 22089b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 22099b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 22109b8d05b8SZbigniew Bodek uint16_t ena_qid; 22119b8d05b8SZbigniew Bodek int i; 22129b8d05b8SZbigniew Bodek 22139b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 22149b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 22159b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 22169b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 22179b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 0, 0, true); 22189b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 22199b8d05b8SZbigniew Bodek } 22209b8d05b8SZbigniew Bodek } 22219b8d05b8SZbigniew Bodek 22229b8d05b8SZbigniew Bodek /* Configure the Rx forwarding */ 22230bdffe59SMarcin Wojtas static int 22240bdffe59SMarcin Wojtas ena_rss_configure(struct ena_adapter *adapter) 22259b8d05b8SZbigniew Bodek { 22269b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 22279b8d05b8SZbigniew Bodek int rc; 22289b8d05b8SZbigniew Bodek 22299b8d05b8SZbigniew Bodek /* Set indirect table */ 22309b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_set(ena_dev); 22310bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22320bdffe59SMarcin Wojtas return (rc); 22339b8d05b8SZbigniew Bodek 22349b8d05b8SZbigniew Bodek /* Configure hash function (if supported) */ 22359b8d05b8SZbigniew Bodek rc = ena_com_set_hash_function(ena_dev); 22360bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22370bdffe59SMarcin Wojtas return (rc); 22389b8d05b8SZbigniew Bodek 22399b8d05b8SZbigniew Bodek /* Configure hash inputs (if supported) */ 22409b8d05b8SZbigniew Bodek rc = ena_com_set_hash_ctrl(ena_dev); 22410bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22420bdffe59SMarcin Wojtas return (rc); 22439b8d05b8SZbigniew Bodek 22440bdffe59SMarcin Wojtas return (0); 22459b8d05b8SZbigniew Bodek } 22469b8d05b8SZbigniew Bodek 22479b8d05b8SZbigniew Bodek static int 22489b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 22499b8d05b8SZbigniew Bodek { 22509b8d05b8SZbigniew Bodek int rc; 22519b8d05b8SZbigniew Bodek 22523f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) { 22539b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 22540bdffe59SMarcin Wojtas if (rc != 0) 22559b8d05b8SZbigniew Bodek return (rc); 22569b8d05b8SZbigniew Bodek } 22579b8d05b8SZbigniew Bodek 22587d2544e6SMarcin Wojtas rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); 22593f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 22607d2544e6SMarcin Wojtas return (rc); 22617d2544e6SMarcin Wojtas 22629b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 226330217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 226430217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 22659b8d05b8SZbigniew Bodek 22669b8d05b8SZbigniew Bodek return (0); 22679b8d05b8SZbigniew Bodek } 22689b8d05b8SZbigniew Bodek 22699b8d05b8SZbigniew Bodek static int 22709b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 22719b8d05b8SZbigniew Bodek { 22729b8d05b8SZbigniew Bodek int rc = 0; 22739b8d05b8SZbigniew Bodek 22743f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 22759b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is not attached!\n"); 22769b8d05b8SZbigniew Bodek return (ENXIO); 22779b8d05b8SZbigniew Bodek } 22789b8d05b8SZbigniew Bodek 22793f9ed7abSMarcin Wojtas if (unlikely(!adapter->running)) { 22809b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is not running!\n"); 22819b8d05b8SZbigniew Bodek return (ENXIO); 22829b8d05b8SZbigniew Bodek } 22839b8d05b8SZbigniew Bodek 22849b8d05b8SZbigniew Bodek if (!adapter->up) { 22859b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going UP\n"); 22869b8d05b8SZbigniew Bodek 22879b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 22889b8d05b8SZbigniew Bodek ena_setup_io_intr(adapter); 22899b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 22903f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22919b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_req_irq"); 22929b8d05b8SZbigniew Bodek goto err_req_irq; 22939b8d05b8SZbigniew Bodek } 22949b8d05b8SZbigniew Bodek 22959b8d05b8SZbigniew Bodek /* allocate transmit descriptors */ 22969b8d05b8SZbigniew Bodek rc = ena_setup_all_tx_resources(adapter); 22973f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22989b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_setup_tx"); 22999b8d05b8SZbigniew Bodek goto err_setup_tx; 23009b8d05b8SZbigniew Bodek } 23019b8d05b8SZbigniew Bodek 23029b8d05b8SZbigniew Bodek /* allocate receive descriptors */ 23039b8d05b8SZbigniew Bodek rc = ena_setup_all_rx_resources(adapter); 23043f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 23059b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_setup_rx"); 23069b8d05b8SZbigniew Bodek goto err_setup_rx; 23079b8d05b8SZbigniew Bodek } 23089b8d05b8SZbigniew Bodek 23099b8d05b8SZbigniew Bodek /* create IO queues for Rx & Tx */ 23109b8d05b8SZbigniew Bodek rc = ena_create_io_queues(adapter); 23113f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 23129b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, 23139b8d05b8SZbigniew Bodek "create IO queues failed"); 23149b8d05b8SZbigniew Bodek goto err_io_que; 23159b8d05b8SZbigniew Bodek } 23169b8d05b8SZbigniew Bodek 23173f9ed7abSMarcin Wojtas if (unlikely(adapter->link_status)) 23189b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 23199b8d05b8SZbigniew Bodek 23209b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 23213f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 23229b8d05b8SZbigniew Bodek goto err_up_complete; 23239b8d05b8SZbigniew Bodek 23249b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 23259b8d05b8SZbigniew Bodek 23269b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 23279b8d05b8SZbigniew Bodek 23289b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 23299b8d05b8SZbigniew Bodek IFF_DRV_OACTIVE); 23309b8d05b8SZbigniew Bodek 23319b8d05b8SZbigniew Bodek callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 23329b8d05b8SZbigniew Bodek ena_timer_service, (void *)adapter, 0); 23339b8d05b8SZbigniew Bodek 23349b8d05b8SZbigniew Bodek adapter->up = true; 233593471047SZbigniew Bodek 233693471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 23379b8d05b8SZbigniew Bodek } 23389b8d05b8SZbigniew Bodek 23399b8d05b8SZbigniew Bodek return (0); 23409b8d05b8SZbigniew Bodek 23419b8d05b8SZbigniew Bodek err_up_complete: 23429b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 23439b8d05b8SZbigniew Bodek err_io_que: 23449b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 23459b8d05b8SZbigniew Bodek err_setup_rx: 23469b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 23479b8d05b8SZbigniew Bodek err_setup_tx: 23489b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 23499b8d05b8SZbigniew Bodek err_req_irq: 23509b8d05b8SZbigniew Bodek return (rc); 23519b8d05b8SZbigniew Bodek } 23529b8d05b8SZbigniew Bodek 23539b8d05b8SZbigniew Bodek static uint64_t 23549b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 23559b8d05b8SZbigniew Bodek { 23569b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23579b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 23589b8d05b8SZbigniew Bodek 23599b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 23609b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 23619b8d05b8SZbigniew Bodek 23629b8d05b8SZbigniew Bodek switch (cnt) { 23639b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 236430217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 23659b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 236630217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 23679b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 236830217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 23699b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 237030217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 23719b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 237230217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 23739b8d05b8SZbigniew Bodek default: 23749b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 23759b8d05b8SZbigniew Bodek } 23769b8d05b8SZbigniew Bodek } 23779b8d05b8SZbigniew Bodek 23789b8d05b8SZbigniew Bodek static int 23799b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 23809b8d05b8SZbigniew Bodek { 23819b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 23829b8d05b8SZbigniew Bodek return (0); 23839b8d05b8SZbigniew Bodek } 23849b8d05b8SZbigniew Bodek 23859b8d05b8SZbigniew Bodek static void 23869b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 23879b8d05b8SZbigniew Bodek { 23889b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 23899b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "enter"); 23909b8d05b8SZbigniew Bodek 23915a990212SMarcin Wojtas mtx_lock(&adapter->global_mtx); 23929b8d05b8SZbigniew Bodek 23939b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 23949b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 23959b8d05b8SZbigniew Bodek 23969b8d05b8SZbigniew Bodek if (!adapter->link_status) { 23975a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 23984e8acd84SMarcin Wojtas ena_trace(ENA_INFO, "link_status = false"); 23999b8d05b8SZbigniew Bodek return; 24009b8d05b8SZbigniew Bodek } 24019b8d05b8SZbigniew Bodek 24029b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2403b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 24049b8d05b8SZbigniew Bodek 24055a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 24069b8d05b8SZbigniew Bodek } 24079b8d05b8SZbigniew Bodek 24089b8d05b8SZbigniew Bodek static void 24099b8d05b8SZbigniew Bodek ena_init(void *arg) 24109b8d05b8SZbigniew Bodek { 24119b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 24129b8d05b8SZbigniew Bodek 24130bdffe59SMarcin Wojtas if (!adapter->up) { 24143d3a90f9SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24159b8d05b8SZbigniew Bodek ena_up(adapter); 24163d3a90f9SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24173d3a90f9SZbigniew Bodek } 24189b8d05b8SZbigniew Bodek } 24199b8d05b8SZbigniew Bodek 24209b8d05b8SZbigniew Bodek static int 24219b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 24229b8d05b8SZbigniew Bodek { 24239b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 24249b8d05b8SZbigniew Bodek struct ifreq *ifr; 24259b8d05b8SZbigniew Bodek int rc; 24269b8d05b8SZbigniew Bodek 24279b8d05b8SZbigniew Bodek adapter = ifp->if_softc; 24289b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 24299b8d05b8SZbigniew Bodek 24309b8d05b8SZbigniew Bodek /* 24319b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 24329b8d05b8SZbigniew Bodek */ 24339b8d05b8SZbigniew Bodek rc = 0; 24349b8d05b8SZbigniew Bodek switch (command) { 24359b8d05b8SZbigniew Bodek case SIOCSIFMTU: 2436dbf2eb54SMarcin Wojtas if (ifp->if_mtu == ifr->ifr_mtu) 2437dbf2eb54SMarcin Wojtas break; 2438e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24399b8d05b8SZbigniew Bodek ena_down(adapter); 24409b8d05b8SZbigniew Bodek 24419b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 24429b8d05b8SZbigniew Bodek 24439b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2444e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24459b8d05b8SZbigniew Bodek break; 24469b8d05b8SZbigniew Bodek 24479b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 24480bdffe59SMarcin Wojtas if ((ifp->if_flags & IFF_UP) != 0) { 24490bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 24500bdffe59SMarcin Wojtas if ((ifp->if_flags & (IFF_PROMISC | 24510bdffe59SMarcin Wojtas IFF_ALLMULTI)) != 0) { 24529b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 24539b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 24549b8d05b8SZbigniew Bodek } 24559b8d05b8SZbigniew Bodek } else { 2456e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24579b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2458e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24599b8d05b8SZbigniew Bodek } 24609b8d05b8SZbigniew Bodek } else { 24610bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 2462e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24639b8d05b8SZbigniew Bodek ena_down(adapter); 2464e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 2465e67c6554SZbigniew Bodek } 24669b8d05b8SZbigniew Bodek } 24679b8d05b8SZbigniew Bodek break; 24689b8d05b8SZbigniew Bodek 24699b8d05b8SZbigniew Bodek case SIOCADDMULTI: 24709b8d05b8SZbigniew Bodek case SIOCDELMULTI: 24719b8d05b8SZbigniew Bodek break; 24729b8d05b8SZbigniew Bodek 24739b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 24749b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 24759b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 24769b8d05b8SZbigniew Bodek break; 24779b8d05b8SZbigniew Bodek 24789b8d05b8SZbigniew Bodek case SIOCSIFCAP: 24799b8d05b8SZbigniew Bodek { 24809b8d05b8SZbigniew Bodek int reinit = 0; 24819b8d05b8SZbigniew Bodek 24829b8d05b8SZbigniew Bodek if (ifr->ifr_reqcap != ifp->if_capenable) { 24839b8d05b8SZbigniew Bodek ifp->if_capenable = ifr->ifr_reqcap; 24849b8d05b8SZbigniew Bodek reinit = 1; 24859b8d05b8SZbigniew Bodek } 24869b8d05b8SZbigniew Bodek 24870bdffe59SMarcin Wojtas if ((reinit != 0) && 24880bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 2489e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24909b8d05b8SZbigniew Bodek ena_down(adapter); 24919b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2492e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24939b8d05b8SZbigniew Bodek } 24949b8d05b8SZbigniew Bodek } 24959b8d05b8SZbigniew Bodek 24969b8d05b8SZbigniew Bodek break; 24979b8d05b8SZbigniew Bodek default: 24989b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 24999b8d05b8SZbigniew Bodek break; 25009b8d05b8SZbigniew Bodek } 25019b8d05b8SZbigniew Bodek 25029b8d05b8SZbigniew Bodek return (rc); 25039b8d05b8SZbigniew Bodek } 25049b8d05b8SZbigniew Bodek 25059b8d05b8SZbigniew Bodek static int 25069b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 25079b8d05b8SZbigniew Bodek { 25089b8d05b8SZbigniew Bodek int caps = 0; 25099b8d05b8SZbigniew Bodek 25100bdffe59SMarcin Wojtas if ((feat->offload.tx & 25119b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25129b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 25130bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 25149b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 25159b8d05b8SZbigniew Bodek 25160bdffe59SMarcin Wojtas if ((feat->offload.tx & 25179b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 25180bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 25199b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 25209b8d05b8SZbigniew Bodek 25210bdffe59SMarcin Wojtas if ((feat->offload.tx & 25220bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 25239b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 25249b8d05b8SZbigniew Bodek 25250bdffe59SMarcin Wojtas if ((feat->offload.tx & 25260bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 25279b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 25289b8d05b8SZbigniew Bodek 25290bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25309b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 25310bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 25329b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 25339b8d05b8SZbigniew Bodek 25340bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25350bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 25369b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 25379b8d05b8SZbigniew Bodek 25389b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 25399b8d05b8SZbigniew Bodek 25409b8d05b8SZbigniew Bodek return (caps); 25419b8d05b8SZbigniew Bodek } 25429b8d05b8SZbigniew Bodek 25439b8d05b8SZbigniew Bodek static void 25449b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 25459b8d05b8SZbigniew Bodek { 25469b8d05b8SZbigniew Bodek 25479b8d05b8SZbigniew Bodek host_info->supported_network_features[0] = 25489b8d05b8SZbigniew Bodek (uint32_t)if_getcapabilities(ifp); 25499b8d05b8SZbigniew Bodek } 25509b8d05b8SZbigniew Bodek 25519b8d05b8SZbigniew Bodek static void 25529b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 25539b8d05b8SZbigniew Bodek { 25549b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 25559b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 25569b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 25579b8d05b8SZbigniew Bodek int flags = 0; 25589b8d05b8SZbigniew Bodek 25599b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 25609b8d05b8SZbigniew Bodek 25610bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 25620bdffe59SMarcin Wojtas if ((feat & 25630bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 25649b8d05b8SZbigniew Bodek flags |= CSUM_IP; 25650bdffe59SMarcin Wojtas if ((feat & 25669b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25670bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 25689b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 25699b8d05b8SZbigniew Bodek } 25709b8d05b8SZbigniew Bodek 25710bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 25729b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 25739b8d05b8SZbigniew Bodek 25740bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 25759b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 25769b8d05b8SZbigniew Bodek 25770bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 25789b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 25799b8d05b8SZbigniew Bodek 25809b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 25819b8d05b8SZbigniew Bodek } 25829b8d05b8SZbigniew Bodek 25839b8d05b8SZbigniew Bodek static int 25849b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 25859b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 25869b8d05b8SZbigniew Bodek { 25879b8d05b8SZbigniew Bodek if_t ifp; 25889b8d05b8SZbigniew Bodek int caps = 0; 25899b8d05b8SZbigniew Bodek 25909b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 25913f9ed7abSMarcin Wojtas if (unlikely(ifp == NULL)) { 25924e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); 25939b8d05b8SZbigniew Bodek return (ENXIO); 25949b8d05b8SZbigniew Bodek } 25959b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 25969b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 25979b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 25989b8d05b8SZbigniew Bodek 25999b8d05b8SZbigniew Bodek if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 26009b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 26019b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 26029b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 26039b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 26049b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 26059b8d05b8SZbigniew Bodek 26069b8d05b8SZbigniew Bodek if_setsendqlen(ifp, adapter->tx_ring_size); 26079b8d05b8SZbigniew Bodek if_setsendqready(ifp); 26089b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 26099b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 26109b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 26119b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 26129b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 26139b8d05b8SZbigniew Bodek /* check hardware support */ 26149b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 26159b8d05b8SZbigniew Bodek /* ... and set them */ 26169b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 26179b8d05b8SZbigniew Bodek 26189b8d05b8SZbigniew Bodek /* TSO parameters */ 26198a573700SZbigniew Bodek ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - 26208a573700SZbigniew Bodek (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 26218a573700SZbigniew Bodek ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; 26228a573700SZbigniew Bodek ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; 26239b8d05b8SZbigniew Bodek 26249b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 26259b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 26269b8d05b8SZbigniew Bodek 26279b8d05b8SZbigniew Bodek /* 26289b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 26299b8d05b8SZbigniew Bodek * callbacks to update media and link information 26309b8d05b8SZbigniew Bodek */ 26319b8d05b8SZbigniew Bodek ifmedia_init(&adapter->media, IFM_IMASK, 26329b8d05b8SZbigniew Bodek ena_media_change, ena_media_status); 26339b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 26349b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 26359b8d05b8SZbigniew Bodek 26369b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 26379b8d05b8SZbigniew Bodek 26389b8d05b8SZbigniew Bodek return (0); 26399b8d05b8SZbigniew Bodek } 26409b8d05b8SZbigniew Bodek 26419b8d05b8SZbigniew Bodek static void 26429b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 26439b8d05b8SZbigniew Bodek { 2644a195fab0SMarcin Wojtas int rc; 26459b8d05b8SZbigniew Bodek 26469b8d05b8SZbigniew Bodek if (adapter->up) { 26479b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going DOWN\n"); 26489b8d05b8SZbigniew Bodek 26499b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 26509b8d05b8SZbigniew Bodek 26519b8d05b8SZbigniew Bodek adapter->up = false; 26529b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 26539b8d05b8SZbigniew Bodek IFF_DRV_RUNNING); 26549b8d05b8SZbigniew Bodek 26559b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 26569b8d05b8SZbigniew Bodek 2657a195fab0SMarcin Wojtas if (adapter->trigger_reset) { 2658a195fab0SMarcin Wojtas rc = ena_com_dev_reset(adapter->ena_dev, 2659a195fab0SMarcin Wojtas adapter->reset_reason); 26603f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 2661a195fab0SMarcin Wojtas device_printf(adapter->pdev, 2662a195fab0SMarcin Wojtas "Device reset failed\n"); 2663a195fab0SMarcin Wojtas } 2664a195fab0SMarcin Wojtas 26659b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 26669b8d05b8SZbigniew Bodek 26679b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 26689b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 26699b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 26709b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 26719b8d05b8SZbigniew Bodek 26729b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 26739b8d05b8SZbigniew Bodek } 26749b8d05b8SZbigniew Bodek } 26759b8d05b8SZbigniew Bodek 26769b8d05b8SZbigniew Bodek static void 26779b8d05b8SZbigniew Bodek ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf) 26789b8d05b8SZbigniew Bodek { 26799b8d05b8SZbigniew Bodek struct ena_com_tx_meta *ena_meta; 26809b8d05b8SZbigniew Bodek struct ether_vlan_header *eh; 26819b8d05b8SZbigniew Bodek u32 mss; 26829b8d05b8SZbigniew Bodek bool offload; 26839b8d05b8SZbigniew Bodek uint16_t etype; 26849b8d05b8SZbigniew Bodek int ehdrlen; 26859b8d05b8SZbigniew Bodek struct ip *ip; 26869b8d05b8SZbigniew Bodek int iphlen; 26879b8d05b8SZbigniew Bodek struct tcphdr *th; 26889b8d05b8SZbigniew Bodek 26899b8d05b8SZbigniew Bodek offload = false; 26909b8d05b8SZbigniew Bodek ena_meta = &ena_tx_ctx->ena_meta; 26919b8d05b8SZbigniew Bodek mss = mbuf->m_pkthdr.tso_segsz; 26929b8d05b8SZbigniew Bodek 26939b8d05b8SZbigniew Bodek if (mss != 0) 26949b8d05b8SZbigniew Bodek offload = true; 26959b8d05b8SZbigniew Bodek 26969b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) 26979b8d05b8SZbigniew Bodek offload = true; 26989b8d05b8SZbigniew Bodek 26999b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) 27009b8d05b8SZbigniew Bodek offload = true; 27019b8d05b8SZbigniew Bodek 27020bdffe59SMarcin Wojtas if (!offload) { 27039b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 0; 27049b8d05b8SZbigniew Bodek return; 27059b8d05b8SZbigniew Bodek } 27069b8d05b8SZbigniew Bodek 27079b8d05b8SZbigniew Bodek /* Determine where frame payload starts. */ 27089b8d05b8SZbigniew Bodek eh = mtod(mbuf, struct ether_vlan_header *); 27099b8d05b8SZbigniew Bodek if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 27109b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_proto); 27119b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 27129b8d05b8SZbigniew Bodek } else { 27139b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_encap_proto); 27149b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN; 27159b8d05b8SZbigniew Bodek } 27169b8d05b8SZbigniew Bodek 27179b8d05b8SZbigniew Bodek ip = (struct ip *)(mbuf->m_data + ehdrlen); 27189b8d05b8SZbigniew Bodek iphlen = ip->ip_hl << 2; 27199b8d05b8SZbigniew Bodek th = (struct tcphdr *)((caddr_t)ip + iphlen); 27209b8d05b8SZbigniew Bodek 27219b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { 27229b8d05b8SZbigniew Bodek ena_tx_ctx->l3_csum_enable = 1; 27239b8d05b8SZbigniew Bodek } 27249b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 27259b8d05b8SZbigniew Bodek ena_tx_ctx->tso_enable = 1; 27269b8d05b8SZbigniew Bodek ena_meta->l4_hdr_len = (th->th_off); 27279b8d05b8SZbigniew Bodek } 27289b8d05b8SZbigniew Bodek 27299b8d05b8SZbigniew Bodek switch (etype) { 27309b8d05b8SZbigniew Bodek case ETHERTYPE_IP: 27319b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 2732cd433385SMarcin Wojtas if ((ip->ip_off & htons(IP_DF)) != 0) 27339b8d05b8SZbigniew Bodek ena_tx_ctx->df = 1; 27349b8d05b8SZbigniew Bodek break; 27359b8d05b8SZbigniew Bodek case ETHERTYPE_IPV6: 27369b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 27379b8d05b8SZbigniew Bodek 27389b8d05b8SZbigniew Bodek default: 27399b8d05b8SZbigniew Bodek break; 27409b8d05b8SZbigniew Bodek } 27419b8d05b8SZbigniew Bodek 27429b8d05b8SZbigniew Bodek if (ip->ip_p == IPPROTO_TCP) { 27439b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 27440bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27450bdffe59SMarcin Wojtas (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) 27469b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27479b8d05b8SZbigniew Bodek else 27489b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27499b8d05b8SZbigniew Bodek } else if (ip->ip_p == IPPROTO_UDP) { 27509b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 27510bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27520bdffe59SMarcin Wojtas (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) 27539b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27549b8d05b8SZbigniew Bodek else 27559b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27569b8d05b8SZbigniew Bodek } else { 27579b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 27589b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27599b8d05b8SZbigniew Bodek } 27609b8d05b8SZbigniew Bodek 27619b8d05b8SZbigniew Bodek ena_meta->mss = mss; 27629b8d05b8SZbigniew Bodek ena_meta->l3_hdr_len = iphlen; 27639b8d05b8SZbigniew Bodek ena_meta->l3_hdr_offset = ehdrlen; 27649b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 1; 27659b8d05b8SZbigniew Bodek } 27669b8d05b8SZbigniew Bodek 27679b8d05b8SZbigniew Bodek static int 27681b069f1cSZbigniew Bodek ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 27691e9fb899SZbigniew Bodek { 27701e9fb899SZbigniew Bodek struct ena_adapter *adapter; 27711b069f1cSZbigniew Bodek struct mbuf *collapsed_mbuf; 27721e9fb899SZbigniew Bodek int num_frags; 27731e9fb899SZbigniew Bodek 27741e9fb899SZbigniew Bodek adapter = tx_ring->adapter; 27751e9fb899SZbigniew Bodek num_frags = ena_mbuf_count(*mbuf); 27761e9fb899SZbigniew Bodek 27771e9fb899SZbigniew Bodek /* One segment must be reserved for configuration descriptor. */ 27781e9fb899SZbigniew Bodek if (num_frags < adapter->max_tx_sgl_size) 27791e9fb899SZbigniew Bodek return (0); 27801b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse, 1); 27811e9fb899SZbigniew Bodek 27821b069f1cSZbigniew Bodek collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, 27831b069f1cSZbigniew Bodek adapter->max_tx_sgl_size - 1); 27843f9ed7abSMarcin Wojtas if (unlikely(collapsed_mbuf == NULL)) { 27851b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse_err, 1); 27861e9fb899SZbigniew Bodek return (ENOMEM); 27871e9fb899SZbigniew Bodek } 27881e9fb899SZbigniew Bodek 27891b069f1cSZbigniew Bodek /* If mbuf was collapsed succesfully, original mbuf is released. */ 27901b069f1cSZbigniew Bodek *mbuf = collapsed_mbuf; 27911e9fb899SZbigniew Bodek 27921e9fb899SZbigniew Bodek return (0); 27931e9fb899SZbigniew Bodek } 27941e9fb899SZbigniew Bodek 27954fa9e02dSMarcin Wojtas static void 27964fa9e02dSMarcin Wojtas ena_dmamap_llq(void *arg, bus_dma_segment_t *segs, int nseg, int error) 27974fa9e02dSMarcin Wojtas { 27984fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf = arg; 27994fa9e02dSMarcin Wojtas 28004fa9e02dSMarcin Wojtas if (unlikely(error != 0)) { 28014fa9e02dSMarcin Wojtas ena_buf->paddr = 0; 28024fa9e02dSMarcin Wojtas return; 28034fa9e02dSMarcin Wojtas } 28044fa9e02dSMarcin Wojtas 28054fa9e02dSMarcin Wojtas KASSERT(nseg == 1, ("Invalid num of segments for LLQ dma")); 28064fa9e02dSMarcin Wojtas 28074fa9e02dSMarcin Wojtas ena_buf->paddr = segs->ds_addr; 28084fa9e02dSMarcin Wojtas ena_buf->len = segs->ds_len; 28094fa9e02dSMarcin Wojtas } 28104fa9e02dSMarcin Wojtas 28114fa9e02dSMarcin Wojtas static int 28124fa9e02dSMarcin Wojtas ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, 28134fa9e02dSMarcin Wojtas struct mbuf *mbuf, void **push_hdr, u16 *header_len) 28144fa9e02dSMarcin Wojtas { 28154fa9e02dSMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 28164fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf; 28174fa9e02dSMarcin Wojtas bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; 28184fa9e02dSMarcin Wojtas uint32_t mbuf_head_len, frag_len; 28194fa9e02dSMarcin Wojtas uint16_t push_len = 0; 28204fa9e02dSMarcin Wojtas uint16_t delta = 0; 28214fa9e02dSMarcin Wojtas int i, rc, nsegs; 28224fa9e02dSMarcin Wojtas 28234fa9e02dSMarcin Wojtas mbuf_head_len = mbuf->m_len; 28244fa9e02dSMarcin Wojtas tx_info->mbuf = mbuf; 28254fa9e02dSMarcin Wojtas ena_buf = tx_info->bufs; 28264fa9e02dSMarcin Wojtas 28274fa9e02dSMarcin Wojtas if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 28284fa9e02dSMarcin Wojtas /* 28294fa9e02dSMarcin Wojtas * When the device is LLQ mode, the driver will copy 28304fa9e02dSMarcin Wojtas * the header into the device memory space. 28314fa9e02dSMarcin Wojtas * the ena_com layer assumes the header is in a linear 28324fa9e02dSMarcin Wojtas * memory space. 28334fa9e02dSMarcin Wojtas * This assumption might be wrong since part of the header 28344fa9e02dSMarcin Wojtas * can be in the fragmented buffers. 28354fa9e02dSMarcin Wojtas * First check if header fits in the mbuf. If not, copy it to 28364fa9e02dSMarcin Wojtas * separate buffer that will be holding linearized data. 28374fa9e02dSMarcin Wojtas */ 28384fa9e02dSMarcin Wojtas push_len = min_t(uint32_t, mbuf->m_pkthdr.len, 28394fa9e02dSMarcin Wojtas tx_ring->tx_max_header_size); 28404fa9e02dSMarcin Wojtas *header_len = push_len; 28414fa9e02dSMarcin Wojtas /* If header is in linear space, just point into mbuf's data. */ 28424fa9e02dSMarcin Wojtas if (likely(push_len <= mbuf_head_len)) { 28434fa9e02dSMarcin Wojtas *push_hdr = mbuf->m_data; 28444fa9e02dSMarcin Wojtas /* 28454fa9e02dSMarcin Wojtas * Otherwise, copy whole portion of header from multiple mbufs 28464fa9e02dSMarcin Wojtas * to intermediate buffer. 28474fa9e02dSMarcin Wojtas */ 28484fa9e02dSMarcin Wojtas } else { 28494fa9e02dSMarcin Wojtas m_copydata(mbuf, 0, push_len, 28504fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 28514fa9e02dSMarcin Wojtas *push_hdr = tx_ring->push_buf_intermediate_buf; 28524fa9e02dSMarcin Wojtas 28534fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); 28544fa9e02dSMarcin Wojtas delta = push_len - mbuf_head_len; 28554fa9e02dSMarcin Wojtas } 28564fa9e02dSMarcin Wojtas 28574fa9e02dSMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 28584fa9e02dSMarcin Wojtas "mbuf: %p header_buf->vaddr: %p push_len: %d\n", 28594fa9e02dSMarcin Wojtas mbuf, *push_hdr, push_len); 28604fa9e02dSMarcin Wojtas 28614fa9e02dSMarcin Wojtas /* 28624fa9e02dSMarcin Wojtas * If header was in linear memory space, map for the dma rest of the data 28634fa9e02dSMarcin Wojtas * in the first mbuf of the mbuf chain. 28644fa9e02dSMarcin Wojtas */ 28654fa9e02dSMarcin Wojtas if (mbuf_head_len > push_len) { 28664fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 28674fa9e02dSMarcin Wojtas tx_info->map_head, 28684fa9e02dSMarcin Wojtas mbuf->m_data + push_len, mbuf_head_len - push_len, 28694fa9e02dSMarcin Wojtas ena_dmamap_llq, ena_buf, BUS_DMA_NOWAIT); 28704fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 28714fa9e02dSMarcin Wojtas goto single_dma_error; 28724fa9e02dSMarcin Wojtas 28734fa9e02dSMarcin Wojtas ena_buf++; 28744fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 28754fa9e02dSMarcin Wojtas 28764fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 28774fa9e02dSMarcin Wojtas } 28784fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 28794fa9e02dSMarcin Wojtas } else { 28804fa9e02dSMarcin Wojtas *push_hdr = NULL; 28814fa9e02dSMarcin Wojtas /* 28824fa9e02dSMarcin Wojtas * header_len is just a hint for the device. Because FreeBSD is not 28834fa9e02dSMarcin Wojtas * giving us information about packet header length and it is not 28844fa9e02dSMarcin Wojtas * guaranteed that all packet headers will be in the 1st mbuf, setting 28854fa9e02dSMarcin Wojtas * header_len to 0 is making the device ignore this value and resolve 28864fa9e02dSMarcin Wojtas * header on it's own. 28874fa9e02dSMarcin Wojtas */ 28884fa9e02dSMarcin Wojtas *header_len = 0; 28894fa9e02dSMarcin Wojtas } 28904fa9e02dSMarcin Wojtas 28914fa9e02dSMarcin Wojtas /* 28924fa9e02dSMarcin Wojtas * If header is in non linear space (delta > 0), then skip mbufs 28934fa9e02dSMarcin Wojtas * containing header and map the last one containing both header and the 28944fa9e02dSMarcin Wojtas * packet data. 28954fa9e02dSMarcin Wojtas * The first segment is already counted in. 28964fa9e02dSMarcin Wojtas * If LLQ is not supported, the loop will be skipped. 28974fa9e02dSMarcin Wojtas */ 28984fa9e02dSMarcin Wojtas while (delta > 0) { 28994fa9e02dSMarcin Wojtas frag_len = mbuf->m_len; 29004fa9e02dSMarcin Wojtas 29014fa9e02dSMarcin Wojtas /* 29024fa9e02dSMarcin Wojtas * If whole segment contains header just move to the 29034fa9e02dSMarcin Wojtas * next one and reduce delta. 29044fa9e02dSMarcin Wojtas */ 29054fa9e02dSMarcin Wojtas if (unlikely(delta >= frag_len)) { 29064fa9e02dSMarcin Wojtas delta -= frag_len; 29074fa9e02dSMarcin Wojtas } else { 29084fa9e02dSMarcin Wojtas /* 29094fa9e02dSMarcin Wojtas * Map rest of the packet data that was contained in 29104fa9e02dSMarcin Wojtas * the mbuf. 29114fa9e02dSMarcin Wojtas */ 29124fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 29134fa9e02dSMarcin Wojtas tx_info->map_head, mbuf->m_data + delta, 29144fa9e02dSMarcin Wojtas frag_len - delta, ena_dmamap_llq, ena_buf, 29154fa9e02dSMarcin Wojtas BUS_DMA_NOWAIT); 29164fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 29174fa9e02dSMarcin Wojtas goto single_dma_error; 29184fa9e02dSMarcin Wojtas 29194fa9e02dSMarcin Wojtas ena_buf++; 29204fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 29214fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 29224fa9e02dSMarcin Wojtas 29234fa9e02dSMarcin Wojtas delta = 0; 29244fa9e02dSMarcin Wojtas } 29254fa9e02dSMarcin Wojtas 29264fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 29274fa9e02dSMarcin Wojtas } 29284fa9e02dSMarcin Wojtas 29294fa9e02dSMarcin Wojtas if (mbuf == NULL) { 29304fa9e02dSMarcin Wojtas return (0); 29314fa9e02dSMarcin Wojtas } 29324fa9e02dSMarcin Wojtas 29334fa9e02dSMarcin Wojtas /* Map rest of the mbufs */ 29344fa9e02dSMarcin Wojtas rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map_seg, mbuf, 29354fa9e02dSMarcin Wojtas segs, &nsegs, BUS_DMA_NOWAIT); 29364fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (nsegs == 0))) { 29374fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, 29384fa9e02dSMarcin Wojtas "dmamap load failed! err: %d nsegs: %d", rc, nsegs); 29394fa9e02dSMarcin Wojtas goto dma_error; 29404fa9e02dSMarcin Wojtas } 29414fa9e02dSMarcin Wojtas 29424fa9e02dSMarcin Wojtas for (i = 0; i < nsegs; i++) { 29434fa9e02dSMarcin Wojtas ena_buf->len = segs[i].ds_len; 29444fa9e02dSMarcin Wojtas ena_buf->paddr = segs[i].ds_addr; 29454fa9e02dSMarcin Wojtas ena_buf++; 29464fa9e02dSMarcin Wojtas } 29474fa9e02dSMarcin Wojtas tx_info->num_of_bufs += nsegs; 29484fa9e02dSMarcin Wojtas tx_info->seg_mapped = true; 29494fa9e02dSMarcin Wojtas 29504fa9e02dSMarcin Wojtas return (0); 29514fa9e02dSMarcin Wojtas 29524fa9e02dSMarcin Wojtas dma_error: 29534fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 29544fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 29554fa9e02dSMarcin Wojtas single_dma_error: 29564fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); 29574fa9e02dSMarcin Wojtas tx_info->mbuf = NULL; 29584fa9e02dSMarcin Wojtas return (rc); 29594fa9e02dSMarcin Wojtas } 29604fa9e02dSMarcin Wojtas 29611e9fb899SZbigniew Bodek static int 29621e9fb899SZbigniew Bodek ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 29639b8d05b8SZbigniew Bodek { 29649b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 29659b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 29669b8d05b8SZbigniew Bodek struct ena_com_tx_ctx ena_tx_ctx; 29679b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 29689b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 29699b8d05b8SZbigniew Bodek void *push_hdr; 29709b8d05b8SZbigniew Bodek uint16_t next_to_use; 29719b8d05b8SZbigniew Bodek uint16_t req_id; 29729b8d05b8SZbigniew Bodek uint16_t ena_qid; 29734fa9e02dSMarcin Wojtas uint16_t header_len; 29744fa9e02dSMarcin Wojtas int rc; 29751e9fb899SZbigniew Bodek int nb_hw_desc; 29769b8d05b8SZbigniew Bodek 29779b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 29789b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 29799b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 298090f4da8bSMarcin Wojtas io_sq = &ena_dev->io_sq_queues[ena_qid]; 29819b8d05b8SZbigniew Bodek 29821b069f1cSZbigniew Bodek rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); 29833f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29841e9fb899SZbigniew Bodek ena_trace(ENA_WARNING, 29851b069f1cSZbigniew Bodek "Failed to collapse mbuf! err: %d", rc); 29861e9fb899SZbigniew Bodek return (rc); 29879b8d05b8SZbigniew Bodek } 29889b8d05b8SZbigniew Bodek 29894fa9e02dSMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len); 29904fa9e02dSMarcin Wojtas 29919b8d05b8SZbigniew Bodek next_to_use = tx_ring->next_to_use; 29929b8d05b8SZbigniew Bodek req_id = tx_ring->free_tx_ids[next_to_use]; 29939b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 29949b8d05b8SZbigniew Bodek tx_info->num_of_bufs = 0; 29959b8d05b8SZbigniew Bodek 29964fa9e02dSMarcin Wojtas rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); 29974fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 29984fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); 29994fa9e02dSMarcin Wojtas return (rc); 30009b8d05b8SZbigniew Bodek } 30019b8d05b8SZbigniew Bodek memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 30029b8d05b8SZbigniew Bodek ena_tx_ctx.ena_bufs = tx_info->bufs; 30039b8d05b8SZbigniew Bodek ena_tx_ctx.push_header = push_hdr; 30049b8d05b8SZbigniew Bodek ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 30059b8d05b8SZbigniew Bodek ena_tx_ctx.req_id = req_id; 30069b8d05b8SZbigniew Bodek ena_tx_ctx.header_len = header_len; 30079b8d05b8SZbigniew Bodek 30089b8d05b8SZbigniew Bodek /* Set flags and meta data */ 30091e9fb899SZbigniew Bodek ena_tx_csum(&ena_tx_ctx, *mbuf); 30109b8d05b8SZbigniew Bodek /* Prepare the packet's descriptors and send them to device */ 30119b8d05b8SZbigniew Bodek rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); 30123f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3013e3cecf70SMarcin Wojtas if (likely(rc == ENA_COM_NO_MEM)) { 3014e3cecf70SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 3015e3cecf70SMarcin Wojtas "tx ring[%d] if out of space\n", tx_ring->que->id); 3016e3cecf70SMarcin Wojtas } else { 3017e3cecf70SMarcin Wojtas device_printf(adapter->pdev, 3018e3cecf70SMarcin Wojtas "failed to prepare tx bufs\n"); 3019e3cecf70SMarcin Wojtas } 30200052f3b5SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); 30219b8d05b8SZbigniew Bodek goto dma_error; 30229b8d05b8SZbigniew Bodek } 30239b8d05b8SZbigniew Bodek 30249b8d05b8SZbigniew Bodek counter_enter(); 30259b8d05b8SZbigniew Bodek counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); 30260bdffe59SMarcin Wojtas counter_u64_add_protected(tx_ring->tx_stats.bytes, 30270bdffe59SMarcin Wojtas (*mbuf)->m_pkthdr.len); 302830217e2dSMarcin Wojtas 302930217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); 303030217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_bytes, 303130217e2dSMarcin Wojtas (*mbuf)->m_pkthdr.len); 30329b8d05b8SZbigniew Bodek counter_exit(); 30339b8d05b8SZbigniew Bodek 30349b8d05b8SZbigniew Bodek tx_info->tx_descs = nb_hw_desc; 30359b8d05b8SZbigniew Bodek getbinuptime(&tx_info->timestamp); 30369b8d05b8SZbigniew Bodek tx_info->print_once = true; 30379b8d05b8SZbigniew Bodek 30389b8d05b8SZbigniew Bodek tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 30399b8d05b8SZbigniew Bodek tx_ring->ring_size); 30409b8d05b8SZbigniew Bodek 30415cb9db07SMarcin Wojtas /* stop the queue when no more space available, the packet can have up 30425cb9db07SMarcin Wojtas * to sgl_size + 2. one for the meta descriptor and one for header 30435cb9db07SMarcin Wojtas * (if the header is larger than tx_max_header_size). 30445cb9db07SMarcin Wojtas */ 30455cb9db07SMarcin Wojtas if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 30465cb9db07SMarcin Wojtas adapter->max_tx_sgl_size + 2))) { 30475cb9db07SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", 30485cb9db07SMarcin Wojtas tx_ring->que->id); 30495cb9db07SMarcin Wojtas 30505cb9db07SMarcin Wojtas tx_ring->running = false; 30515cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_stop, 1); 30525cb9db07SMarcin Wojtas 30535cb9db07SMarcin Wojtas /* There is a rare condition where this function decides to 30545cb9db07SMarcin Wojtas * stop the queue but meanwhile tx_cleanup() updates 30555cb9db07SMarcin Wojtas * next_to_completion and terminates. 30565cb9db07SMarcin Wojtas * The queue will remain stopped forever. 30575cb9db07SMarcin Wojtas * To solve this issue this function performs mb(), checks 30585cb9db07SMarcin Wojtas * the wakeup condition and wakes up the queue if needed. 30595cb9db07SMarcin Wojtas */ 30605cb9db07SMarcin Wojtas mb(); 30615cb9db07SMarcin Wojtas 30625cb9db07SMarcin Wojtas if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 30635cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH)) { 30645cb9db07SMarcin Wojtas tx_ring->running = true; 30655cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 30665cb9db07SMarcin Wojtas } 30675cb9db07SMarcin Wojtas } 30685cb9db07SMarcin Wojtas 30694fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 30704fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 30714fa9e02dSMarcin Wojtas BUS_DMASYNC_PREWRITE); 30724fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) 30734fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 30740bdffe59SMarcin Wojtas BUS_DMASYNC_PREWRITE); 30759b8d05b8SZbigniew Bodek 30769b8d05b8SZbigniew Bodek return (0); 30779b8d05b8SZbigniew Bodek 30789b8d05b8SZbigniew Bodek dma_error: 30799b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 30804fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 30814fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg); 30824fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 30834fa9e02dSMarcin Wojtas } 30844fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 30854fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 30864fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 30874fa9e02dSMarcin Wojtas } 30889b8d05b8SZbigniew Bodek 30899b8d05b8SZbigniew Bodek return (rc); 30909b8d05b8SZbigniew Bodek } 30919b8d05b8SZbigniew Bodek 30929b8d05b8SZbigniew Bodek static void 30939b8d05b8SZbigniew Bodek ena_start_xmit(struct ena_ring *tx_ring) 30949b8d05b8SZbigniew Bodek { 30959b8d05b8SZbigniew Bodek struct mbuf *mbuf; 30969b8d05b8SZbigniew Bodek struct ena_adapter *adapter = tx_ring->adapter; 30979b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 30989b8d05b8SZbigniew Bodek int ena_qid; 30999b8d05b8SZbigniew Bodek int acum_pkts = 0; 31009b8d05b8SZbigniew Bodek int ret = 0; 31019b8d05b8SZbigniew Bodek 31023f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 31039b8d05b8SZbigniew Bodek return; 31049b8d05b8SZbigniew Bodek 31053f9ed7abSMarcin Wojtas if (unlikely(!adapter->link_status)) 31069b8d05b8SZbigniew Bodek return; 31079b8d05b8SZbigniew Bodek 31089b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 31099b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 31109b8d05b8SZbigniew Bodek 31119b8d05b8SZbigniew Bodek while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { 31129b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" 31139b8d05b8SZbigniew Bodek " header csum flags %#jx", 31144e8acd84SMarcin Wojtas mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); 31159b8d05b8SZbigniew Bodek 31165cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) { 31175cb9db07SMarcin Wojtas drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31185cb9db07SMarcin Wojtas break; 31195cb9db07SMarcin Wojtas } 31209b8d05b8SZbigniew Bodek 31213f9ed7abSMarcin Wojtas if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { 31229b8d05b8SZbigniew Bodek if (ret == ENA_COM_NO_MEM) { 31239b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31249b8d05b8SZbigniew Bodek } else if (ret == ENA_COM_NO_SPACE) { 31259b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31269b8d05b8SZbigniew Bodek } else { 31279b8d05b8SZbigniew Bodek m_freem(mbuf); 31289b8d05b8SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 31299b8d05b8SZbigniew Bodek } 31309b8d05b8SZbigniew Bodek 31319b8d05b8SZbigniew Bodek break; 31329b8d05b8SZbigniew Bodek } 31339b8d05b8SZbigniew Bodek 3134b4b29032SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 3135b4b29032SZbigniew Bodek 31363f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & 31373f9ed7abSMarcin Wojtas IFF_DRV_RUNNING) == 0)) 31389b8d05b8SZbigniew Bodek return; 31399b8d05b8SZbigniew Bodek 31409b8d05b8SZbigniew Bodek acum_pkts++; 31419b8d05b8SZbigniew Bodek 31429b8d05b8SZbigniew Bodek BPF_MTAP(adapter->ifp, mbuf); 31439b8d05b8SZbigniew Bodek 31443f9ed7abSMarcin Wojtas if (unlikely(acum_pkts == DB_THRESHOLD)) { 31459b8d05b8SZbigniew Bodek acum_pkts = 0; 31469b8d05b8SZbigniew Bodek wmb(); 31479b8d05b8SZbigniew Bodek /* Trigger the dma engine */ 31489b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(io_sq); 31499b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.doorbells, 1); 31509b8d05b8SZbigniew Bodek } 31519b8d05b8SZbigniew Bodek 31529b8d05b8SZbigniew Bodek } 31539b8d05b8SZbigniew Bodek 31543f9ed7abSMarcin Wojtas if (likely(acum_pkts != 0)) { 31559b8d05b8SZbigniew Bodek wmb(); 31569b8d05b8SZbigniew Bodek /* Trigger the dma engine */ 31579b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(io_sq); 31589b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.doorbells, 1); 31599b8d05b8SZbigniew Bodek } 31609b8d05b8SZbigniew Bodek 31615cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) 31625cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->que->cleanup_tq, 31635cb9db07SMarcin Wojtas &tx_ring->que->cleanup_task); 31649b8d05b8SZbigniew Bodek } 31659b8d05b8SZbigniew Bodek 31669b8d05b8SZbigniew Bodek static void 31679b8d05b8SZbigniew Bodek ena_deferred_mq_start(void *arg, int pending) 31689b8d05b8SZbigniew Bodek { 31699b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = (struct ena_ring *)arg; 31709b8d05b8SZbigniew Bodek struct ifnet *ifp = tx_ring->adapter->ifp; 31719b8d05b8SZbigniew Bodek 31720bdffe59SMarcin Wojtas while (!drbr_empty(ifp, tx_ring->br) && 31735cb9db07SMarcin Wojtas tx_ring->running && 31740bdffe59SMarcin Wojtas (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 31759b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 31769b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 31779b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 31789b8d05b8SZbigniew Bodek } 31799b8d05b8SZbigniew Bodek } 31809b8d05b8SZbigniew Bodek 31819b8d05b8SZbigniew Bodek static int 31829b8d05b8SZbigniew Bodek ena_mq_start(if_t ifp, struct mbuf *m) 31839b8d05b8SZbigniew Bodek { 31849b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 31859b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 31869b8d05b8SZbigniew Bodek int ret, is_drbr_empty; 31879b8d05b8SZbigniew Bodek uint32_t i; 31889b8d05b8SZbigniew Bodek 31893f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 31909b8d05b8SZbigniew Bodek return (ENODEV); 31919b8d05b8SZbigniew Bodek 31929b8d05b8SZbigniew Bodek /* Which queue to use */ 31939b8d05b8SZbigniew Bodek /* 31949b8d05b8SZbigniew Bodek * If everything is setup correctly, it should be the 31959b8d05b8SZbigniew Bodek * same bucket that the current CPU we're on is. 31969b8d05b8SZbigniew Bodek * It should improve performance. 31979b8d05b8SZbigniew Bodek */ 31989b8d05b8SZbigniew Bodek if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 31999b8d05b8SZbigniew Bodek i = m->m_pkthdr.flowid % adapter->num_queues; 32009b8d05b8SZbigniew Bodek } else { 32019b8d05b8SZbigniew Bodek i = curcpu % adapter->num_queues; 32029b8d05b8SZbigniew Bodek } 32039b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 32049b8d05b8SZbigniew Bodek 32059b8d05b8SZbigniew Bodek /* Check if drbr is empty before putting packet */ 32069b8d05b8SZbigniew Bodek is_drbr_empty = drbr_empty(ifp, tx_ring->br); 32079b8d05b8SZbigniew Bodek ret = drbr_enqueue(ifp, tx_ring->br, m); 32083f9ed7abSMarcin Wojtas if (unlikely(ret != 0)) { 32099b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32109b8d05b8SZbigniew Bodek return (ret); 32119b8d05b8SZbigniew Bodek } 32129b8d05b8SZbigniew Bodek 321367ec48bbSMarcin Wojtas if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { 32149b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 32159b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32169b8d05b8SZbigniew Bodek } else { 32179b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32189b8d05b8SZbigniew Bodek } 32199b8d05b8SZbigniew Bodek 32209b8d05b8SZbigniew Bodek return (0); 32219b8d05b8SZbigniew Bodek } 32229b8d05b8SZbigniew Bodek 32239b8d05b8SZbigniew Bodek static void 32249b8d05b8SZbigniew Bodek ena_qflush(if_t ifp) 32259b8d05b8SZbigniew Bodek { 32269b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 32279b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = adapter->tx_ring; 32289b8d05b8SZbigniew Bodek int i; 32299b8d05b8SZbigniew Bodek 32309b8d05b8SZbigniew Bodek for(i = 0; i < adapter->num_queues; ++i, ++tx_ring) 32310bdffe59SMarcin Wojtas if (!drbr_empty(ifp, tx_ring->br)) { 32329b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 32339b8d05b8SZbigniew Bodek drbr_flush(ifp, tx_ring->br); 32349b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32359b8d05b8SZbigniew Bodek } 32369b8d05b8SZbigniew Bodek 32379b8d05b8SZbigniew Bodek if_qflush(ifp); 32389b8d05b8SZbigniew Bodek } 32399b8d05b8SZbigniew Bodek 32400bdffe59SMarcin Wojtas static int 32410bdffe59SMarcin Wojtas ena_calc_io_queue_num(struct ena_adapter *adapter, 32429b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 32439b8d05b8SZbigniew Bodek { 32446064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 32454fa9e02dSMarcin Wojtas int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 32469b8d05b8SZbigniew Bodek 32476064f289SMarcin Wojtas /* Regular queues capabilities */ 32486064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 32496064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 32506064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 32514fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 32524fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 32536064f289SMarcin Wojtas 32544fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 32554fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 32566064f289SMarcin Wojtas } else { 32576064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 32586064f289SMarcin Wojtas &get_feat_ctx->max_queues; 32594fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 32604fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 32614fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 32626064f289SMarcin Wojtas } 32639b8d05b8SZbigniew Bodek 32644fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 32654fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 32664fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 32674fa9e02dSMarcin Wojtas 32689b8d05b8SZbigniew Bodek io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 32694fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_rx_num); 32704fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); 32714fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); 32729b8d05b8SZbigniew Bodek /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ 32739b8d05b8SZbigniew Bodek io_queue_num = min_t(int, io_queue_num, 32749b8d05b8SZbigniew Bodek pci_msix_count(adapter->pdev) - 1); 32759b8d05b8SZbigniew Bodek 32760bdffe59SMarcin Wojtas return (io_queue_num); 32779b8d05b8SZbigniew Bodek } 32789b8d05b8SZbigniew Bodek 32790bdffe59SMarcin Wojtas static int 32804fa9e02dSMarcin Wojtas ena_enable_wc(struct resource *res) 32814fa9e02dSMarcin Wojtas { 32824fa9e02dSMarcin Wojtas #if defined(__i386) || defined(__amd64) 32834fa9e02dSMarcin Wojtas vm_offset_t va; 32844fa9e02dSMarcin Wojtas vm_size_t len; 32854fa9e02dSMarcin Wojtas int rc; 32864fa9e02dSMarcin Wojtas 32874fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 32884fa9e02dSMarcin Wojtas len = rman_get_size(res); 32894fa9e02dSMarcin Wojtas /* Enable write combining */ 32904fa9e02dSMarcin Wojtas rc = pmap_change_attr(va, len, PAT_WRITE_COMBINING); 32914fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 32924fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); 32934fa9e02dSMarcin Wojtas return (rc); 32944fa9e02dSMarcin Wojtas } 32954fa9e02dSMarcin Wojtas 32964fa9e02dSMarcin Wojtas return (0); 32974fa9e02dSMarcin Wojtas #endif 32984fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 32994fa9e02dSMarcin Wojtas } 33004fa9e02dSMarcin Wojtas 33014fa9e02dSMarcin Wojtas static int 33024fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 33034fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 33044fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 33054fa9e02dSMarcin Wojtas { 33064fa9e02dSMarcin Wojtas struct ena_adapter *adapter = device_get_softc(pdev); 33074fa9e02dSMarcin Wojtas int rc, rid; 33084fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 33094fa9e02dSMarcin Wojtas 33104fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 33114fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 33124fa9e02dSMarcin Wojtas device_printf(pdev, 33134fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 33144fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33154fa9e02dSMarcin Wojtas return (0); 33164fa9e02dSMarcin Wojtas } 33174fa9e02dSMarcin Wojtas 33184fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 33194fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33204fa9e02dSMarcin Wojtas device_printf(pdev, "Failed to configure the device mode. " 33214fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33224fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33234fa9e02dSMarcin Wojtas return (0); 33244fa9e02dSMarcin Wojtas } 33254fa9e02dSMarcin Wojtas 33264fa9e02dSMarcin Wojtas /* Nothing to config, exit */ 33274fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 33284fa9e02dSMarcin Wojtas return (0); 33294fa9e02dSMarcin Wojtas 33304fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 33314fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 33324fa9e02dSMarcin Wojtas adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 33334fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 33344fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 33354fa9e02dSMarcin Wojtas device_printf(pdev, "unable to allocate LLQ bar resource. " 33364fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33374fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33384fa9e02dSMarcin Wojtas return (0); 33394fa9e02dSMarcin Wojtas } 33404fa9e02dSMarcin Wojtas 33414fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 33424fa9e02dSMarcin Wojtas rc = ena_enable_wc(adapter->memory); 33434fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33444fa9e02dSMarcin Wojtas device_printf(pdev, "failed to enable write combining.\n"); 33454fa9e02dSMarcin Wojtas return (rc); 33464fa9e02dSMarcin Wojtas } 33474fa9e02dSMarcin Wojtas 33484fa9e02dSMarcin Wojtas /* 33494fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 33504fa9e02dSMarcin Wojtas * for the ena_com layer. 33514fa9e02dSMarcin Wojtas */ 33524fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 33534fa9e02dSMarcin Wojtas 33544fa9e02dSMarcin Wojtas return (0); 33554fa9e02dSMarcin Wojtas } 33564fa9e02dSMarcin Wojtas 33574fa9e02dSMarcin Wojtas static inline 33584fa9e02dSMarcin Wojtas void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 33594fa9e02dSMarcin Wojtas { 33604fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 33614fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 33624fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 33634fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 33644fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 33654fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 33664fa9e02dSMarcin Wojtas } 33674fa9e02dSMarcin Wojtas 33684fa9e02dSMarcin Wojtas static int 33696064f289SMarcin Wojtas ena_calc_queue_size(struct ena_adapter *adapter, 33706064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *ctx) 33719b8d05b8SZbigniew Bodek { 33724fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 33734fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 33746064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 33756064f289SMarcin Wojtas uint32_t rx_queue_size = adapter->rx_ring_size; 33769b8d05b8SZbigniew Bodek 33774fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 33786064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 33796064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 33806064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 33816064f289SMarcin Wojtas max_queue_ext->max_rx_cq_depth); 33826064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 33836064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 33846064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33856064f289SMarcin Wojtas max_queue_ext->max_tx_cq_depth); 33864fa9e02dSMarcin Wojtas 33874fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 33884fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 33894fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33904fa9e02dSMarcin Wojtas llq->max_llq_depth); 33914fa9e02dSMarcin Wojtas else 33926064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33936064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 33944fa9e02dSMarcin Wojtas 33956064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 33966064f289SMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 33976064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 33986064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 33996064f289SMarcin Wojtas } else { 34006064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 34016064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 34026064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34036064f289SMarcin Wojtas max_queues->max_cq_depth); 34046064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34056064f289SMarcin Wojtas max_queues->max_sq_depth); 34064fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34074fa9e02dSMarcin Wojtas max_queues->max_cq_depth); 34084fa9e02dSMarcin Wojtas 34094fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 34104fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 34114fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34124fa9e02dSMarcin Wojtas llq->max_llq_depth); 34134fa9e02dSMarcin Wojtas else 34144fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34154fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 34164fa9e02dSMarcin Wojtas 34176064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34186064f289SMarcin Wojtas max_queues->max_packet_tx_descs); 34196064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34206064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 34216064f289SMarcin Wojtas } 34229b8d05b8SZbigniew Bodek 34239b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 34246064f289SMarcin Wojtas rx_queue_size = 1 << (fls(rx_queue_size) - 1); 34256064f289SMarcin Wojtas tx_queue_size = 1 << (fls(tx_queue_size) - 1); 34266064f289SMarcin Wojtas 34276064f289SMarcin Wojtas if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 34286064f289SMarcin Wojtas device_printf(ctx->pdev, "Invalid queue size\n"); 34296064f289SMarcin Wojtas return (EFAULT); 34309b8d05b8SZbigniew Bodek } 34319b8d05b8SZbigniew Bodek 34326064f289SMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 34336064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 34346064f289SMarcin Wojtas 34356064f289SMarcin Wojtas return (0); 34369b8d05b8SZbigniew Bodek } 34379b8d05b8SZbigniew Bodek 34386064f289SMarcin Wojtas static int 34396064f289SMarcin Wojtas ena_handle_updated_queues(struct ena_adapter *adapter, 34406064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 34416064f289SMarcin Wojtas { 34426064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 34436064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 34446064f289SMarcin Wojtas device_t pdev = adapter->pdev; 34456064f289SMarcin Wojtas bool are_queues_changed = false; 34466064f289SMarcin Wojtas int io_queue_num, rc; 34479b8d05b8SZbigniew Bodek 34486064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 34496064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = get_feat_ctx; 34506064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 34516064f289SMarcin Wojtas 34526064f289SMarcin Wojtas io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); 34536064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 34546064f289SMarcin Wojtas if (unlikely(rc != 0 || io_queue_num <= 0)) 34556064f289SMarcin Wojtas return EFAULT; 34566064f289SMarcin Wojtas 34576064f289SMarcin Wojtas if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) 34586064f289SMarcin Wojtas are_queues_changed = true; 34596064f289SMarcin Wojtas 34606064f289SMarcin Wojtas if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || 34616064f289SMarcin Wojtas adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { 34626064f289SMarcin Wojtas device_printf(pdev, 34636064f289SMarcin Wojtas "Not enough resources to allocate requested queue sizes " 34646064f289SMarcin Wojtas "(TX,RX)=(%d,%d), falling back to queue sizes " 34656064f289SMarcin Wojtas "(TX,RX)=(%d,%d)\n", 34666064f289SMarcin Wojtas adapter->tx_ring_size, 34676064f289SMarcin Wojtas adapter->rx_ring_size, 34686064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size, 34696064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size); 34706064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 34716064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 34726064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 34736064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 34746064f289SMarcin Wojtas are_queues_changed = true; 34756064f289SMarcin Wojtas } 34766064f289SMarcin Wojtas 34776064f289SMarcin Wojtas if (unlikely(adapter->num_queues > io_queue_num)) { 34786064f289SMarcin Wojtas device_printf(pdev, 34796064f289SMarcin Wojtas "Not enough resources to allocate %d queues, " 34806064f289SMarcin Wojtas "falling back to %d queues\n", 34816064f289SMarcin Wojtas adapter->num_queues, io_queue_num); 34826064f289SMarcin Wojtas adapter->num_queues = io_queue_num; 34836064f289SMarcin Wojtas if (adapter->rss_support) { 34846064f289SMarcin Wojtas ena_com_rss_destroy(ena_dev); 34856064f289SMarcin Wojtas rc = ena_rss_init_default(adapter); 34866064f289SMarcin Wojtas if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { 34876064f289SMarcin Wojtas device_printf(pdev, "Cannot init RSS rc: %d\n", 34886064f289SMarcin Wojtas rc); 34896064f289SMarcin Wojtas return (rc); 34906064f289SMarcin Wojtas } 34916064f289SMarcin Wojtas } 34926064f289SMarcin Wojtas are_queues_changed = true; 34936064f289SMarcin Wojtas } 34946064f289SMarcin Wojtas 34956064f289SMarcin Wojtas if (unlikely(are_queues_changed)) { 34966064f289SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 34976064f289SMarcin Wojtas ena_init_io_rings(adapter); 34986064f289SMarcin Wojtas } 34996064f289SMarcin Wojtas 35006064f289SMarcin Wojtas return (0); 35019b8d05b8SZbigniew Bodek } 35029b8d05b8SZbigniew Bodek 35030bdffe59SMarcin Wojtas static int 35040bdffe59SMarcin Wojtas ena_rss_init_default(struct ena_adapter *adapter) 35059b8d05b8SZbigniew Bodek { 35069b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 35079b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 35089b8d05b8SZbigniew Bodek int qid, rc, i; 35099b8d05b8SZbigniew Bodek 35109b8d05b8SZbigniew Bodek rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 35110bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 35124e8acd84SMarcin Wojtas device_printf(dev, "Cannot init indirect table\n"); 35137d2544e6SMarcin Wojtas return (rc); 35149b8d05b8SZbigniew Bodek } 35159b8d05b8SZbigniew Bodek 35169b8d05b8SZbigniew Bodek for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 35179b8d05b8SZbigniew Bodek qid = i % adapter->num_queues; 35189b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_fill_entry(ena_dev, i, 35199b8d05b8SZbigniew Bodek ENA_IO_RXQ_IDX(qid)); 35200bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35219b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill indirect table\n"); 35227d2544e6SMarcin Wojtas goto err_rss_destroy; 35239b8d05b8SZbigniew Bodek } 35249b8d05b8SZbigniew Bodek } 35259b8d05b8SZbigniew Bodek 35269b8d05b8SZbigniew Bodek rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 35279b8d05b8SZbigniew Bodek ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 35280bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35299b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash function\n"); 35307d2544e6SMarcin Wojtas goto err_rss_destroy; 35319b8d05b8SZbigniew Bodek } 35329b8d05b8SZbigniew Bodek 35339b8d05b8SZbigniew Bodek rc = ena_com_set_default_hash_ctrl(ena_dev); 35340bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35359b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash control\n"); 35367d2544e6SMarcin Wojtas goto err_rss_destroy; 35379b8d05b8SZbigniew Bodek } 35389b8d05b8SZbigniew Bodek 35399b8d05b8SZbigniew Bodek return (0); 35409b8d05b8SZbigniew Bodek 35417d2544e6SMarcin Wojtas err_rss_destroy: 35429b8d05b8SZbigniew Bodek ena_com_rss_destroy(ena_dev); 35439b8d05b8SZbigniew Bodek return (rc); 35449b8d05b8SZbigniew Bodek } 35459b8d05b8SZbigniew Bodek 35469b8d05b8SZbigniew Bodek static void 35479b8d05b8SZbigniew Bodek ena_rss_init_default_deferred(void *arg) 35489b8d05b8SZbigniew Bodek { 35499b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 35509b8d05b8SZbigniew Bodek devclass_t dc; 35519b8d05b8SZbigniew Bodek int max; 35529b8d05b8SZbigniew Bodek int rc; 35539b8d05b8SZbigniew Bodek 35549b8d05b8SZbigniew Bodek dc = devclass_find("ena"); 35553f9ed7abSMarcin Wojtas if (unlikely(dc == NULL)) { 35564e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "No devclass ena\n"); 35579b8d05b8SZbigniew Bodek return; 35589b8d05b8SZbigniew Bodek } 35599b8d05b8SZbigniew Bodek 35609b8d05b8SZbigniew Bodek max = devclass_get_maxunit(dc); 35619b8d05b8SZbigniew Bodek while (max-- >= 0) { 35629b8d05b8SZbigniew Bodek adapter = devclass_get_softc(dc, max); 35639b8d05b8SZbigniew Bodek if (adapter != NULL) { 35649b8d05b8SZbigniew Bodek rc = ena_rss_init_default(adapter); 35659b8d05b8SZbigniew Bodek adapter->rss_support = true; 35663f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 35679b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 35689b8d05b8SZbigniew Bodek "WARNING: RSS was not properly initialized," 35690bdffe59SMarcin Wojtas " it will affect bandwidth\n"); 35709b8d05b8SZbigniew Bodek adapter->rss_support = false; 35719b8d05b8SZbigniew Bodek } 35729b8d05b8SZbigniew Bodek } 35739b8d05b8SZbigniew Bodek } 35749b8d05b8SZbigniew Bodek } 35759b8d05b8SZbigniew Bodek SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); 35769b8d05b8SZbigniew Bodek 35770bdffe59SMarcin Wojtas static void 35780bdffe59SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev) 35799b8d05b8SZbigniew Bodek { 35809b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 35819b8d05b8SZbigniew Bodek int rc; 35829b8d05b8SZbigniew Bodek 35839b8d05b8SZbigniew Bodek /* Allocate only the host info */ 35849b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 35853f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 35869b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot allocate host info\n"); 35879b8d05b8SZbigniew Bodek return; 35889b8d05b8SZbigniew Bodek } 35899b8d05b8SZbigniew Bodek 35909b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 35919b8d05b8SZbigniew Bodek 35929b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 35939b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 35949b8d05b8SZbigniew Bodek 35959b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 35969b8d05b8SZbigniew Bodek host_info->os_dist = 0; 35979b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 35989b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 35999b8d05b8SZbigniew Bodek 36009b8d05b8SZbigniew Bodek host_info->driver_version = 36019b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MAJOR) | 36029b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 36039b8d05b8SZbigniew Bodek (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 36048ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 36059b8d05b8SZbigniew Bodek 36069b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 36073f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3608a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 36099b8d05b8SZbigniew Bodek ena_trace(ENA_WARNING, "Cannot set host attributes\n"); 36109b8d05b8SZbigniew Bodek else 36119b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot set host attributes\n"); 36129b8d05b8SZbigniew Bodek 36139b8d05b8SZbigniew Bodek goto err; 36149b8d05b8SZbigniew Bodek } 36159b8d05b8SZbigniew Bodek 36169b8d05b8SZbigniew Bodek return; 36179b8d05b8SZbigniew Bodek 36189b8d05b8SZbigniew Bodek err: 36199b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 36209b8d05b8SZbigniew Bodek } 36219b8d05b8SZbigniew Bodek 36229b8d05b8SZbigniew Bodek static int 36239b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 36249b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 36259b8d05b8SZbigniew Bodek { 36269b8d05b8SZbigniew Bodek struct ena_com_dev* ena_dev = adapter->ena_dev; 36279b8d05b8SZbigniew Bodek bool readless_supported; 36289b8d05b8SZbigniew Bodek uint32_t aenq_groups; 36299b8d05b8SZbigniew Bodek int dma_width; 36309b8d05b8SZbigniew Bodek int rc; 36319b8d05b8SZbigniew Bodek 36329b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 36333f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36349b8d05b8SZbigniew Bodek device_printf(pdev, "failed to init mmio read less\n"); 36350bdffe59SMarcin Wojtas return (rc); 36369b8d05b8SZbigniew Bodek } 36379b8d05b8SZbigniew Bodek 36389b8d05b8SZbigniew Bodek /* 36399b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 36409b8d05b8SZbigniew Bodek * read is disabled 36419b8d05b8SZbigniew Bodek */ 36429b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 36439b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 36449b8d05b8SZbigniew Bodek 3645a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 36463f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36479b8d05b8SZbigniew Bodek device_printf(pdev, "Can not reset device\n"); 36489b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36499b8d05b8SZbigniew Bodek } 36509b8d05b8SZbigniew Bodek 36519b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 36523f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36539b8d05b8SZbigniew Bodek device_printf(pdev, "device version is too low\n"); 36549b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36559b8d05b8SZbigniew Bodek } 36569b8d05b8SZbigniew Bodek 36579b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 36583f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 36599b8d05b8SZbigniew Bodek device_printf(pdev, "Invalid dma width value %d", dma_width); 36609b8d05b8SZbigniew Bodek rc = dma_width; 36619b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36629b8d05b8SZbigniew Bodek } 36639b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 36649b8d05b8SZbigniew Bodek 36659b8d05b8SZbigniew Bodek /* ENA admin level init */ 366667ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 36673f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36689b8d05b8SZbigniew Bodek device_printf(pdev, 36699b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 36709b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36719b8d05b8SZbigniew Bodek } 36729b8d05b8SZbigniew Bodek 36739b8d05b8SZbigniew Bodek /* 36749b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 36759b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 36769b8d05b8SZbigniew Bodek * information 36779b8d05b8SZbigniew Bodek */ 36789b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 36799b8d05b8SZbigniew Bodek 36809b8d05b8SZbigniew Bodek ena_config_host_info(ena_dev); 36819b8d05b8SZbigniew Bodek 36829b8d05b8SZbigniew Bodek /* Get Device Attributes */ 36839b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 36843f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36859b8d05b8SZbigniew Bodek device_printf(pdev, 36869b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 36879b8d05b8SZbigniew Bodek goto err_admin_init; 36889b8d05b8SZbigniew Bodek } 36899b8d05b8SZbigniew Bodek 3690e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 3691e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 3692e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 369340621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 3694e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_KEEP_ALIVE); 36959b8d05b8SZbigniew Bodek 36969b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 36979b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 36983f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36999b8d05b8SZbigniew Bodek device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); 37009b8d05b8SZbigniew Bodek goto err_admin_init; 37019b8d05b8SZbigniew Bodek } 37029b8d05b8SZbigniew Bodek 37039b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 37049b8d05b8SZbigniew Bodek 37050bdffe59SMarcin Wojtas return (0); 37069b8d05b8SZbigniew Bodek 37079b8d05b8SZbigniew Bodek err_admin_init: 37089b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 37099b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 37109b8d05b8SZbigniew Bodek err_mmio_read_less: 37119b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 37129b8d05b8SZbigniew Bodek 37130bdffe59SMarcin Wojtas return (rc); 37149b8d05b8SZbigniew Bodek } 37159b8d05b8SZbigniew Bodek 37169b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 37179b8d05b8SZbigniew Bodek int io_vectors) 37189b8d05b8SZbigniew Bodek { 37199b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 37209b8d05b8SZbigniew Bodek int rc; 37219b8d05b8SZbigniew Bodek 37229b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 37233f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37249b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Error with MSI-X enablement\n"); 37250bdffe59SMarcin Wojtas return (rc); 37269b8d05b8SZbigniew Bodek } 37279b8d05b8SZbigniew Bodek 37289b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 37299b8d05b8SZbigniew Bodek 37309b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 37313f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37329b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); 37339b8d05b8SZbigniew Bodek goto err_disable_msix; 37349b8d05b8SZbigniew Bodek } 37359b8d05b8SZbigniew Bodek 37369b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 37379b8d05b8SZbigniew Bodek 37389b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 37399b8d05b8SZbigniew Bodek 37400bdffe59SMarcin Wojtas return (0); 37419b8d05b8SZbigniew Bodek 37429b8d05b8SZbigniew Bodek err_disable_msix: 37439b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 37449b8d05b8SZbigniew Bodek 37450bdffe59SMarcin Wojtas return (rc); 37469b8d05b8SZbigniew Bodek } 37479b8d05b8SZbigniew Bodek 37489b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 37499b8d05b8SZbigniew Bodek static void ena_keep_alive_wd(void *adapter_data, 37509b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 37519b8d05b8SZbigniew Bodek { 37529b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 375330217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 37549b8d05b8SZbigniew Bodek sbintime_t stime; 375530217e2dSMarcin Wojtas uint64_t rx_drops; 375630217e2dSMarcin Wojtas 375730217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 375830217e2dSMarcin Wojtas 375930217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 376030217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 376130217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 37629b8d05b8SZbigniew Bodek 37639b8d05b8SZbigniew Bodek stime = getsbinuptime(); 37649b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 37659b8d05b8SZbigniew Bodek } 37669b8d05b8SZbigniew Bodek 37679b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 37689b8d05b8SZbigniew Bodek static void check_for_missing_keep_alive(struct ena_adapter *adapter) 37699b8d05b8SZbigniew Bodek { 37709b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 37719b8d05b8SZbigniew Bodek 37729b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 37739b8d05b8SZbigniew Bodek return; 37749b8d05b8SZbigniew Bodek 377540621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 37769b8d05b8SZbigniew Bodek return; 37779b8d05b8SZbigniew Bodek 37789b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 37799b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 37809b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 37819b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 37829b8d05b8SZbigniew Bodek "Keep alive watchdog timeout.\n"); 37839b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.wd_expired, 1); 3784a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 37859b8d05b8SZbigniew Bodek adapter->trigger_reset = true; 37869b8d05b8SZbigniew Bodek } 37879b8d05b8SZbigniew Bodek } 37889b8d05b8SZbigniew Bodek 37899b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 37909b8d05b8SZbigniew Bodek static void check_for_admin_com_state(struct ena_adapter *adapter) 37919b8d05b8SZbigniew Bodek { 37920bdffe59SMarcin Wojtas if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 37930bdffe59SMarcin Wojtas false)) { 37949b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 37959b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 37969b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 3797a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 37989b8d05b8SZbigniew Bodek adapter->trigger_reset = true; 37999b8d05b8SZbigniew Bodek } 38009b8d05b8SZbigniew Bodek } 38019b8d05b8SZbigniew Bodek 380274dba3adSMarcin Wojtas static int 3803d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3804d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 3805d12f7bfcSMarcin Wojtas { 3806d12f7bfcSMarcin Wojtas if (likely(rx_ring->first_interrupt)) 3807d12f7bfcSMarcin Wojtas return (0); 3808d12f7bfcSMarcin Wojtas 3809d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3810d12f7bfcSMarcin Wojtas return (0); 3811d12f7bfcSMarcin Wojtas 3812d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 3813d12f7bfcSMarcin Wojtas 3814d12f7bfcSMarcin Wojtas if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 3815d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, "Potential MSIX issue on Rx side " 3816d12f7bfcSMarcin Wojtas "Queue = %d. Reset the device\n", rx_ring->qid); 3817d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3818d12f7bfcSMarcin Wojtas adapter->trigger_reset = true; 3819d12f7bfcSMarcin Wojtas return (EIO); 3820d12f7bfcSMarcin Wojtas } 3821d12f7bfcSMarcin Wojtas 3822d12f7bfcSMarcin Wojtas return (0); 3823d12f7bfcSMarcin Wojtas } 3824d12f7bfcSMarcin Wojtas 3825d12f7bfcSMarcin Wojtas static int 3826d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 382774dba3adSMarcin Wojtas struct ena_ring *tx_ring) 382874dba3adSMarcin Wojtas { 382974dba3adSMarcin Wojtas struct bintime curtime, time; 383074dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3831d12f7bfcSMarcin Wojtas sbintime_t time_offset; 383274dba3adSMarcin Wojtas uint32_t missed_tx = 0; 3833d12f7bfcSMarcin Wojtas int i, rc = 0; 383474dba3adSMarcin Wojtas 383574dba3adSMarcin Wojtas getbinuptime(&curtime); 383674dba3adSMarcin Wojtas 383774dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 383874dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 383974dba3adSMarcin Wojtas 38400bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 384174dba3adSMarcin Wojtas continue; 384274dba3adSMarcin Wojtas 384374dba3adSMarcin Wojtas time = curtime; 384474dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3845d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3846d12f7bfcSMarcin Wojtas 3847d12f7bfcSMarcin Wojtas if (unlikely(!tx_ring->first_interrupt && 3848d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3849d12f7bfcSMarcin Wojtas /* 3850d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3851d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3852d12f7bfcSMarcin Wojtas */ 3853d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, 3854d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 3855d12f7bfcSMarcin Wojtas "Reset the device\n", tx_ring->qid); 3856d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3857d12f7bfcSMarcin Wojtas adapter->trigger_reset = true; 3858d12f7bfcSMarcin Wojtas return (EIO); 3859d12f7bfcSMarcin Wojtas } 386074dba3adSMarcin Wojtas 386174dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3862d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 386374dba3adSMarcin Wojtas 386474dba3adSMarcin Wojtas if (!tx_buf->print_once) 386574dba3adSMarcin Wojtas ena_trace(ENA_WARNING, "Found a Tx that wasn't " 386674dba3adSMarcin Wojtas "completed on time, qid %d, index %d.\n", 386774dba3adSMarcin Wojtas tx_ring->qid, i); 386874dba3adSMarcin Wojtas 386974dba3adSMarcin Wojtas tx_buf->print_once = true; 387074dba3adSMarcin Wojtas missed_tx++; 3871d12f7bfcSMarcin Wojtas } 3872d12f7bfcSMarcin Wojtas } 387374dba3adSMarcin Wojtas 3874d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 387574dba3adSMarcin Wojtas device_printf(adapter->pdev, 3876d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3877d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 38784e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 3879d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 388074dba3adSMarcin Wojtas adapter->trigger_reset = true; 3881d12f7bfcSMarcin Wojtas rc = EIO; 388274dba3adSMarcin Wojtas } 388374dba3adSMarcin Wojtas 3884d12f7bfcSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 3885d12f7bfcSMarcin Wojtas 3886d12f7bfcSMarcin Wojtas return (rc); 388774dba3adSMarcin Wojtas } 388874dba3adSMarcin Wojtas 38899b8d05b8SZbigniew Bodek /* 38909b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 38919b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 38929b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 38939b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 38949b8d05b8SZbigniew Bodek */ 38950bdffe59SMarcin Wojtas static void 3896d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 38979b8d05b8SZbigniew Bodek { 38989b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3899d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 390074dba3adSMarcin Wojtas int i, budget, rc; 39019b8d05b8SZbigniew Bodek 39029b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 39039b8d05b8SZbigniew Bodek rmb(); 39049b8d05b8SZbigniew Bodek 39059b8d05b8SZbigniew Bodek if (!adapter->up) 39069b8d05b8SZbigniew Bodek return; 39079b8d05b8SZbigniew Bodek 39089b8d05b8SZbigniew Bodek if (adapter->trigger_reset) 39099b8d05b8SZbigniew Bodek return; 39109b8d05b8SZbigniew Bodek 391140621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 39129b8d05b8SZbigniew Bodek return; 39139b8d05b8SZbigniew Bodek 39149b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 39159b8d05b8SZbigniew Bodek 39169b8d05b8SZbigniew Bodek for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { 39179b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3918d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 39199b8d05b8SZbigniew Bodek 3920d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3921d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3922d12f7bfcSMarcin Wojtas return; 3923d12f7bfcSMarcin Wojtas 3924d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 39250bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 39269b8d05b8SZbigniew Bodek return; 39279b8d05b8SZbigniew Bodek 39289b8d05b8SZbigniew Bodek budget--; 3929cd5d5804SMarcin Wojtas if (budget == 0) { 39309b8d05b8SZbigniew Bodek i++; 39319b8d05b8SZbigniew Bodek break; 39329b8d05b8SZbigniew Bodek } 39339b8d05b8SZbigniew Bodek } 39349b8d05b8SZbigniew Bodek 39359b8d05b8SZbigniew Bodek adapter->next_monitored_tx_qid = i % adapter->num_queues; 39369b8d05b8SZbigniew Bodek } 39379b8d05b8SZbigniew Bodek 39385cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3939efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3940efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3941efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3942efe6ab18SMarcin Wojtas * for example). 3943efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3944efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3945efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3946efe6ab18SMarcin Wojtas * able to send new packets. 3947efe6ab18SMarcin Wojtas * 3948efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3949efe6ab18SMarcin Wojtas */ 3950efe6ab18SMarcin Wojtas static void 3951efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3952efe6ab18SMarcin Wojtas { 3953efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3954efe6ab18SMarcin Wojtas int i, refill_required; 3955efe6ab18SMarcin Wojtas 3956efe6ab18SMarcin Wojtas if (!adapter->up) 3957efe6ab18SMarcin Wojtas return; 3958efe6ab18SMarcin Wojtas 3959efe6ab18SMarcin Wojtas if (adapter->trigger_reset) 3960efe6ab18SMarcin Wojtas return; 3961efe6ab18SMarcin Wojtas 3962efe6ab18SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 3963efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3964efe6ab18SMarcin Wojtas 3965efe6ab18SMarcin Wojtas refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); 3966efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3967efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3968efe6ab18SMarcin Wojtas 3969efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3970efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3971efe6ab18SMarcin Wojtas 1); 3972efe6ab18SMarcin Wojtas 3973efe6ab18SMarcin Wojtas device_printf(adapter->pdev, 3974efe6ab18SMarcin Wojtas "trigger refill for ring %d\n", i); 3975efe6ab18SMarcin Wojtas 39765cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 39775cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3978efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3979efe6ab18SMarcin Wojtas } 3980efe6ab18SMarcin Wojtas } else { 3981efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3982efe6ab18SMarcin Wojtas } 3983efe6ab18SMarcin Wojtas } 3984efe6ab18SMarcin Wojtas } 39859b8d05b8SZbigniew Bodek 398640621d71SMarcin Wojtas static void ena_update_hints(struct ena_adapter *adapter, 398740621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 398840621d71SMarcin Wojtas { 398940621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 399040621d71SMarcin Wojtas 399140621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 399240621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 399340621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 399440621d71SMarcin Wojtas 399540621d71SMarcin Wojtas if (hints->mmio_read_timeout) 399640621d71SMarcin Wojtas /* convert to usec */ 399740621d71SMarcin Wojtas ena_dev->mmio_read.reg_read_to = 399840621d71SMarcin Wojtas hints->mmio_read_timeout * 1000; 399940621d71SMarcin Wojtas 400040621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 400140621d71SMarcin Wojtas adapter->missing_tx_threshold = 400240621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 400340621d71SMarcin Wojtas 400440621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 400540621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 400640621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 400740621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 400840621d71SMarcin Wojtas else 400940621d71SMarcin Wojtas adapter->missing_tx_timeout = 401040621d71SMarcin Wojtas SBT_1MS * hints->missing_tx_completion_timeout; 401140621d71SMarcin Wojtas } 401240621d71SMarcin Wojtas 401340621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 401440621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 401540621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 401640621d71SMarcin Wojtas else 401740621d71SMarcin Wojtas adapter->keep_alive_timeout = 401840621d71SMarcin Wojtas SBT_1MS * hints->driver_watchdog_timeout; 401940621d71SMarcin Wojtas } 402040621d71SMarcin Wojtas } 402140621d71SMarcin Wojtas 40229b8d05b8SZbigniew Bodek static void 40239b8d05b8SZbigniew Bodek ena_timer_service(void *data) 40249b8d05b8SZbigniew Bodek { 40259b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 40269b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 40279b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 40289b8d05b8SZbigniew Bodek 40299b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 40309b8d05b8SZbigniew Bodek 40319b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 40329b8d05b8SZbigniew Bodek 4033d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 40349b8d05b8SZbigniew Bodek 4035efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 4036efe6ab18SMarcin Wojtas 40370bdffe59SMarcin Wojtas if (host_info != NULL) 40389b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 40399b8d05b8SZbigniew Bodek 40409b8d05b8SZbigniew Bodek if (unlikely(adapter->trigger_reset)) { 40419b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Trigger reset is on\n"); 40429b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 40439b8d05b8SZbigniew Bodek return; 40449b8d05b8SZbigniew Bodek } 40459b8d05b8SZbigniew Bodek 40469b8d05b8SZbigniew Bodek /* 40479b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 40489b8d05b8SZbigniew Bodek */ 40499b8d05b8SZbigniew Bodek callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); 40509b8d05b8SZbigniew Bodek } 40519b8d05b8SZbigniew Bodek 40529b8d05b8SZbigniew Bodek static void 40539b8d05b8SZbigniew Bodek ena_reset_task(void *arg, int pending) 40549b8d05b8SZbigniew Bodek { 40559b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 40569b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 40579b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 40589b8d05b8SZbigniew Bodek bool dev_up; 40599b8d05b8SZbigniew Bodek int rc; 40609b8d05b8SZbigniew Bodek 40619b8d05b8SZbigniew Bodek if (unlikely(!adapter->trigger_reset)) { 40629b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 40639b8d05b8SZbigniew Bodek "device reset scheduled but trigger_reset is off\n"); 40649b8d05b8SZbigniew Bodek return; 40659b8d05b8SZbigniew Bodek } 40669b8d05b8SZbigniew Bodek 40679b8d05b8SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 40689b8d05b8SZbigniew Bodek 40699b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 40709b8d05b8SZbigniew Bodek 40719b8d05b8SZbigniew Bodek dev_up = adapter->up; 40729b8d05b8SZbigniew Bodek 40739b8d05b8SZbigniew Bodek ena_com_set_admin_running_state(ena_dev, false); 40749b8d05b8SZbigniew Bodek ena_down(adapter); 4075a195fab0SMarcin Wojtas ena_free_mgmnt_irq(adapter); 40769b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 40779b8d05b8SZbigniew Bodek ena_com_abort_admin_commands(ena_dev); 40789b8d05b8SZbigniew Bodek ena_com_wait_for_abort_completion(ena_dev); 40799b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 40809b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 40819b8d05b8SZbigniew Bodek 4082a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 40839b8d05b8SZbigniew Bodek adapter->trigger_reset = false; 40849b8d05b8SZbigniew Bodek 40859b8d05b8SZbigniew Bodek /* Finished destroy part. Restart the device */ 40869b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, 40879b8d05b8SZbigniew Bodek &adapter->wd_active); 40883f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 40899b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 40909b8d05b8SZbigniew Bodek "ENA device init failed! (err: %d)\n", rc); 40919b8d05b8SZbigniew Bodek goto err_dev_free; 40929b8d05b8SZbigniew Bodek } 40939b8d05b8SZbigniew Bodek 40946064f289SMarcin Wojtas rc = ena_handle_updated_queues(adapter, &get_feat_ctx); 40956064f289SMarcin Wojtas if (unlikely(rc != 0)) 40966064f289SMarcin Wojtas goto err_dev_free; 40976064f289SMarcin Wojtas 40989b8d05b8SZbigniew Bodek rc = ena_enable_msix_and_set_admin_interrupts(adapter, 40999b8d05b8SZbigniew Bodek adapter->num_queues); 41003f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 41019b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Enable MSI-X failed\n"); 41029b8d05b8SZbigniew Bodek goto err_com_free; 41039b8d05b8SZbigniew Bodek } 41049b8d05b8SZbigniew Bodek 41059b8d05b8SZbigniew Bodek /* If the interface was up before the reset bring it up */ 41069b8d05b8SZbigniew Bodek if (dev_up) { 41079b8d05b8SZbigniew Bodek rc = ena_up(adapter); 41083f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 41099b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 41109b8d05b8SZbigniew Bodek "Failed to create I/O queues\n"); 41119b8d05b8SZbigniew Bodek goto err_msix_free; 41129b8d05b8SZbigniew Bodek } 41139b8d05b8SZbigniew Bodek } 41149b8d05b8SZbigniew Bodek 41159b8d05b8SZbigniew Bodek callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 41169b8d05b8SZbigniew Bodek ena_timer_service, (void *)adapter, 0); 41179b8d05b8SZbigniew Bodek 41189b8d05b8SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 41199b8d05b8SZbigniew Bodek 41209b8d05b8SZbigniew Bodek return; 41219b8d05b8SZbigniew Bodek 41229b8d05b8SZbigniew Bodek err_msix_free: 41239b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 41249b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 41259b8d05b8SZbigniew Bodek err_com_free: 41269b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 41279b8d05b8SZbigniew Bodek err_dev_free: 41289b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "ENA reset failed!\n"); 41299b8d05b8SZbigniew Bodek adapter->running = false; 41309b8d05b8SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 41319b8d05b8SZbigniew Bodek } 41329b8d05b8SZbigniew Bodek 41339b8d05b8SZbigniew Bodek /** 41349b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 41359b8d05b8SZbigniew Bodek * @pdev: device information struct 41369b8d05b8SZbigniew Bodek * 41379b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 41389b8d05b8SZbigniew Bodek * 41399b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 41409b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 41419b8d05b8SZbigniew Bodek * and a hardware reset occur. 41429b8d05b8SZbigniew Bodek **/ 41439b8d05b8SZbigniew Bodek static int 41449b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 41459b8d05b8SZbigniew Bodek { 41469b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 41474fa9e02dSMarcin Wojtas struct ena_llq_configurations llq_config; 41486064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 41499b8d05b8SZbigniew Bodek static int version_printed; 41509b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 41519b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 41524fa9e02dSMarcin Wojtas const char *queue_type_str; 41539b8d05b8SZbigniew Bodek int io_queue_num; 41544fa9e02dSMarcin Wojtas int rid, rc; 41554fa9e02dSMarcin Wojtas 41569b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 41579b8d05b8SZbigniew Bodek adapter->pdev = pdev; 41589b8d05b8SZbigniew Bodek 41599b8d05b8SZbigniew Bodek mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); 41609b8d05b8SZbigniew Bodek sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); 41619b8d05b8SZbigniew Bodek 41629b8d05b8SZbigniew Bodek /* Set up the timer service */ 41639b8d05b8SZbigniew Bodek callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); 41649b8d05b8SZbigniew Bodek adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; 41659b8d05b8SZbigniew Bodek adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; 41669b8d05b8SZbigniew Bodek adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; 41679b8d05b8SZbigniew Bodek adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; 41689b8d05b8SZbigniew Bodek 41699b8d05b8SZbigniew Bodek if (version_printed++ == 0) 41709b8d05b8SZbigniew Bodek device_printf(pdev, "%s\n", ena_version); 41719b8d05b8SZbigniew Bodek 41729b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 4173cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 4174cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 41759b8d05b8SZbigniew Bodek 41769b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 41779b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 41784fa9e02dSMarcin Wojtas 41794fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 41804fa9e02dSMarcin Wojtas adapter->memory = NULL; 41814fa9e02dSMarcin Wojtas adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 41824fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 41834fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 41844fa9e02dSMarcin Wojtas device_printf(pdev, 41854fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 41864fa9e02dSMarcin Wojtas rc = ENOMEM; 41874fa9e02dSMarcin Wojtas goto err_dev_free; 41884fa9e02dSMarcin Wojtas } 41894fa9e02dSMarcin Wojtas 41909b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 41919b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 41929b8d05b8SZbigniew Bodek 41939b8d05b8SZbigniew Bodek /* Store register resources */ 41949b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 41959b8d05b8SZbigniew Bodek rman_get_bustag(adapter->registers); 41969b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 41979b8d05b8SZbigniew Bodek rman_get_bushandle(adapter->registers); 41989b8d05b8SZbigniew Bodek 41993f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { 42009b8d05b8SZbigniew Bodek device_printf(pdev, "failed to pmap registers bar\n"); 42019b8d05b8SZbigniew Bodek rc = ENXIO; 4202cd5d5804SMarcin Wojtas goto err_bus_free; 42039b8d05b8SZbigniew Bodek } 42049b8d05b8SZbigniew Bodek 42059b8d05b8SZbigniew Bodek ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 42069b8d05b8SZbigniew Bodek 42079b8d05b8SZbigniew Bodek /* Device initialization */ 42089b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 42093f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 42109b8d05b8SZbigniew Bodek device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); 42119b8d05b8SZbigniew Bodek rc = ENXIO; 42129b8d05b8SZbigniew Bodek goto err_bus_free; 42139b8d05b8SZbigniew Bodek } 42149b8d05b8SZbigniew Bodek 42154fa9e02dSMarcin Wojtas set_default_llq_configurations(&llq_config); 42164fa9e02dSMarcin Wojtas 42174fa9e02dSMarcin Wojtas #if defined(__arm__) || defined(__aarch64__) 42184fa9e02dSMarcin Wojtas /* 42194fa9e02dSMarcin Wojtas * Force LLQ disable, as the driver is not supporting WC enablement 42204fa9e02dSMarcin Wojtas * on the ARM architecture. Using LLQ without WC would affect 42214fa9e02dSMarcin Wojtas * performance in a negative way. 42224fa9e02dSMarcin Wojtas */ 42234fa9e02dSMarcin Wojtas ena_dev->supported_features &= ~(1 << ENA_ADMIN_LLQ); 42244fa9e02dSMarcin Wojtas #endif 42254fa9e02dSMarcin Wojtas rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 42264fa9e02dSMarcin Wojtas &llq_config); 42274fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 42284fa9e02dSMarcin Wojtas device_printf(pdev, "failed to set placement policy\n"); 42294fa9e02dSMarcin Wojtas goto err_com_free; 42304fa9e02dSMarcin Wojtas } 42314fa9e02dSMarcin Wojtas 42324fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 42334fa9e02dSMarcin Wojtas queue_type_str = "Regular"; 42344fa9e02dSMarcin Wojtas else 42354fa9e02dSMarcin Wojtas queue_type_str = "Low Latency"; 42364fa9e02dSMarcin Wojtas device_printf(pdev, "Placement policy: %s\n", queue_type_str); 42374fa9e02dSMarcin Wojtas 42389b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 42399b8d05b8SZbigniew Bodek 42409b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 42419b8d05b8SZbigniew Bodek 42429b8d05b8SZbigniew Bodek /* Set for sure that interface is not up */ 42439b8d05b8SZbigniew Bodek adapter->up = false; 42449b8d05b8SZbigniew Bodek 42459b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 42469b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 42479b8d05b8SZbigniew Bodek 42486064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 42496064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 42506064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 42516064f289SMarcin Wojtas 42529b8d05b8SZbigniew Bodek /* calculate IO queue number to create */ 42539b8d05b8SZbigniew Bodek io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); 42549b8d05b8SZbigniew Bodek 42559b8d05b8SZbigniew Bodek ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", 42569b8d05b8SZbigniew Bodek io_queue_num); 42579b8d05b8SZbigniew Bodek adapter->num_queues = io_queue_num; 42589b8d05b8SZbigniew Bodek 42593cfadb28SMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 42606064f289SMarcin Wojtas // Set the requested Rx ring size 42616064f289SMarcin Wojtas adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; 42629b8d05b8SZbigniew Bodek /* calculatre ring sizes */ 42636064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 42646064f289SMarcin Wojtas if (unlikely((rc != 0) || (io_queue_num <= 0))) { 42656064f289SMarcin Wojtas rc = EFAULT; 42669b8d05b8SZbigniew Bodek goto err_com_free; 42679b8d05b8SZbigniew Bodek } 42689b8d05b8SZbigniew Bodek 4269a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 4270a195fab0SMarcin Wojtas 42716064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 42726064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 42739b8d05b8SZbigniew Bodek 42746064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 42756064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 42766064f289SMarcin Wojtas 42776064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 42789b8d05b8SZbigniew Bodek 42799b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 42809b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 42814e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 42824e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create TX DMA tag\n"); 4283cd5d5804SMarcin Wojtas goto err_com_free; 42844e8acd84SMarcin Wojtas } 42859b8d05b8SZbigniew Bodek 42869b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 42874e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 42884e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create RX DMA tag\n"); 4289cd5d5804SMarcin Wojtas goto err_tx_tag_free; 42904e8acd84SMarcin Wojtas } 42919b8d05b8SZbigniew Bodek 42929b8d05b8SZbigniew Bodek /* initialize rings basic information */ 42936064f289SMarcin Wojtas device_printf(pdev, 42946064f289SMarcin Wojtas "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", 42956064f289SMarcin Wojtas io_queue_num, 42966064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size, 42976064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size); 4298cd5d5804SMarcin Wojtas ena_init_io_rings(adapter); 42999b8d05b8SZbigniew Bodek 43009b8d05b8SZbigniew Bodek rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 43013f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 43029b8d05b8SZbigniew Bodek device_printf(pdev, 43039b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 4304c115a1e2SMarcin Wojtas goto err_io_free; 4305c115a1e2SMarcin Wojtas } 4306c115a1e2SMarcin Wojtas 4307c115a1e2SMarcin Wojtas /* setup network interface */ 4308c115a1e2SMarcin Wojtas rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 4309c115a1e2SMarcin Wojtas if (unlikely(rc != 0)) { 4310c115a1e2SMarcin Wojtas device_printf(pdev, "Error with network interface setup\n"); 4311c115a1e2SMarcin Wojtas goto err_msix_free; 43129b8d05b8SZbigniew Bodek } 43139b8d05b8SZbigniew Bodek 4314081169f2SZbigniew Bodek /* Initialize reset task queue */ 4315081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 4316081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 4317081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 4318081169f2SZbigniew Bodek taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 4319081169f2SZbigniew Bodek "%s rstq", device_get_nameunit(adapter->pdev)); 4320081169f2SZbigniew Bodek 43219b8d05b8SZbigniew Bodek /* Initialize statistics */ 43229b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 43239b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 432430217e2dSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 432530217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 43269b8d05b8SZbigniew Bodek ena_sysctl_add_nodes(adapter); 43279b8d05b8SZbigniew Bodek 43289b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 43299b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 43309b8d05b8SZbigniew Bodek 43319b8d05b8SZbigniew Bodek adapter->running = true; 43329b8d05b8SZbigniew Bodek return (0); 43339b8d05b8SZbigniew Bodek 4334c115a1e2SMarcin Wojtas err_msix_free: 4335c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 4336c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 4337c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 4338cd5d5804SMarcin Wojtas err_io_free: 43399b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 43409b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 4341cd5d5804SMarcin Wojtas err_tx_tag_free: 43429b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 4343cd5d5804SMarcin Wojtas err_com_free: 43449b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 43459b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 4346cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 43479b8d05b8SZbigniew Bodek err_bus_free: 43489b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 43499b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 43504fa9e02dSMarcin Wojtas err_dev_free: 43514fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 4352cd5d5804SMarcin Wojtas 43539b8d05b8SZbigniew Bodek return (rc); 43549b8d05b8SZbigniew Bodek } 43559b8d05b8SZbigniew Bodek 43569b8d05b8SZbigniew Bodek /** 43579b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 43589b8d05b8SZbigniew Bodek * @pdev: device information struct 43599b8d05b8SZbigniew Bodek * 43609b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 43619b8d05b8SZbigniew Bodek * that it should release a PCI device. 43629b8d05b8SZbigniew Bodek **/ 43639b8d05b8SZbigniew Bodek static int 43649b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 43659b8d05b8SZbigniew Bodek { 43669b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 43679b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 43689b8d05b8SZbigniew Bodek int rc; 43699b8d05b8SZbigniew Bodek 43709b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 43719b8d05b8SZbigniew Bodek if (adapter->ifp->if_vlantrunk != NULL) { 43729b8d05b8SZbigniew Bodek device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); 43739b8d05b8SZbigniew Bodek return (EBUSY); 43749b8d05b8SZbigniew Bodek } 43759b8d05b8SZbigniew Bodek 43769151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 43779151c55dSMarcin Wojtas 43789b8d05b8SZbigniew Bodek /* Free reset task and callout */ 43799b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 43809b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 43819b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 43829b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 43839b8d05b8SZbigniew Bodek 4384e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 43859b8d05b8SZbigniew Bodek ena_down(adapter); 4386e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 43879b8d05b8SZbigniew Bodek 43889b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 43899b8d05b8SZbigniew Bodek 439030217e2dSMarcin Wojtas ena_free_counters((counter_u64_t *)&adapter->hw_stats, 439130217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 43929b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&adapter->dev_stats, 43939b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 43949b8d05b8SZbigniew Bodek 43953f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) 43969b8d05b8SZbigniew Bodek ena_com_rss_destroy(ena_dev); 43979b8d05b8SZbigniew Bodek 43989b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 43993f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 44009b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 44019b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 44029b8d05b8SZbigniew Bodek 44039b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 44043f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 44059b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 44069b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 44079b8d05b8SZbigniew Bodek 44089b8d05b8SZbigniew Bodek /* Reset the device only if the device is running. */ 44099b8d05b8SZbigniew Bodek if (adapter->running) 4410a195fab0SMarcin Wojtas ena_com_dev_reset(ena_dev, adapter->reset_reason); 44119b8d05b8SZbigniew Bodek 44129b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 44139b8d05b8SZbigniew Bodek 44149b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 44159b8d05b8SZbigniew Bodek 4416197f0284SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 4417197f0284SMarcin Wojtas 4418197f0284SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 4419197f0284SMarcin Wojtas 4420197f0284SMarcin Wojtas ena_com_admin_destroy(ena_dev); 4421197f0284SMarcin Wojtas 44229b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 44239b8d05b8SZbigniew Bodek 44249b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 44259b8d05b8SZbigniew Bodek 44269b8d05b8SZbigniew Bodek mtx_destroy(&adapter->global_mtx); 44279b8d05b8SZbigniew Bodek sx_destroy(&adapter->ioctl_sx); 44289b8d05b8SZbigniew Bodek 44299151c55dSMarcin Wojtas if_free(adapter->ifp); 44309151c55dSMarcin Wojtas 44319b8d05b8SZbigniew Bodek if (ena_dev->bus != NULL) 44329b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 44339b8d05b8SZbigniew Bodek 44349b8d05b8SZbigniew Bodek if (ena_dev != NULL) 44359b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 44369b8d05b8SZbigniew Bodek 44379b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 44389b8d05b8SZbigniew Bodek } 44399b8d05b8SZbigniew Bodek 44409b8d05b8SZbigniew Bodek /****************************************************************************** 44419b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 44429b8d05b8SZbigniew Bodek *****************************************************************************/ 44439b8d05b8SZbigniew Bodek /** 44449b8d05b8SZbigniew Bodek * ena_update_on_link_change: 44459b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 44469b8d05b8SZbigniew Bodek **/ 44479b8d05b8SZbigniew Bodek static void 44489b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 44499b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 44509b8d05b8SZbigniew Bodek { 44519b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 44529b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 44539b8d05b8SZbigniew Bodek int status; 44549b8d05b8SZbigniew Bodek if_t ifp; 44559b8d05b8SZbigniew Bodek 44569b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 44579b8d05b8SZbigniew Bodek ifp = adapter->ifp; 44589b8d05b8SZbigniew Bodek status = aenq_desc->flags & 44599b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 44609b8d05b8SZbigniew Bodek 44619b8d05b8SZbigniew Bodek if (status != 0) { 44629b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is UP\n"); 44639b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_UP); 44649b8d05b8SZbigniew Bodek } else if (status == 0) { 44659b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is DOWN\n"); 44669b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 44679b8d05b8SZbigniew Bodek } else { 44689b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "invalid value recvd\n"); 44699b8d05b8SZbigniew Bodek BUG(); 44709b8d05b8SZbigniew Bodek } 44719b8d05b8SZbigniew Bodek 44729b8d05b8SZbigniew Bodek adapter->link_status = status; 44739b8d05b8SZbigniew Bodek } 44749b8d05b8SZbigniew Bodek 447540621d71SMarcin Wojtas static void ena_notification(void *adapter_data, 447640621d71SMarcin Wojtas struct ena_admin_aenq_entry *aenq_e) 447740621d71SMarcin Wojtas { 447840621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 447940621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 448040621d71SMarcin Wojtas 448140621d71SMarcin Wojtas ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 448240621d71SMarcin Wojtas "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 448340621d71SMarcin Wojtas ENA_ADMIN_NOTIFICATION); 448440621d71SMarcin Wojtas 448540621d71SMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrom) { 448640621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 448740621d71SMarcin Wojtas hints = 448840621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 448940621d71SMarcin Wojtas ena_update_hints(adapter, hints); 449040621d71SMarcin Wojtas break; 449140621d71SMarcin Wojtas default: 449240621d71SMarcin Wojtas device_printf(adapter->pdev, 449340621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 449440621d71SMarcin Wojtas aenq_e->aenq_common_desc.syndrom); 449540621d71SMarcin Wojtas } 449640621d71SMarcin Wojtas } 449740621d71SMarcin Wojtas 44989b8d05b8SZbigniew Bodek /** 44999b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 45009b8d05b8SZbigniew Bodek **/ 45019b8d05b8SZbigniew Bodek static void 4502e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 45039b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 45049b8d05b8SZbigniew Bodek { 4505e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4506e6de9a83SMarcin Wojtas 4507e6de9a83SMarcin Wojtas device_printf(adapter->pdev, 4508e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 45099b8d05b8SZbigniew Bodek } 45109b8d05b8SZbigniew Bodek 45119b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 45129b8d05b8SZbigniew Bodek .handlers = { 45139b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 451440621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 45159b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 45169b8d05b8SZbigniew Bodek }, 45179b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 45189b8d05b8SZbigniew Bodek }; 45199b8d05b8SZbigniew Bodek 45209b8d05b8SZbigniew Bodek /********************************************************************* 45219b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 45229b8d05b8SZbigniew Bodek *********************************************************************/ 45239b8d05b8SZbigniew Bodek 45249b8d05b8SZbigniew Bodek static device_method_t ena_methods[] = { 45259b8d05b8SZbigniew Bodek /* Device interface */ 45269b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 45279b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 45289b8d05b8SZbigniew Bodek DEVMETHOD(device_detach, ena_detach), 45299b8d05b8SZbigniew Bodek DEVMETHOD_END 45309b8d05b8SZbigniew Bodek }; 45319b8d05b8SZbigniew Bodek 45329b8d05b8SZbigniew Bodek static driver_t ena_driver = { 45339b8d05b8SZbigniew Bodek "ena", ena_methods, sizeof(struct ena_adapter), 45349b8d05b8SZbigniew Bodek }; 45359b8d05b8SZbigniew Bodek 45369b8d05b8SZbigniew Bodek devclass_t ena_devclass; 45379b8d05b8SZbigniew Bodek DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); 453840abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 4539329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 45409b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 45419b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 45429b8d05b8SZbigniew Bodek 45439b8d05b8SZbigniew Bodek /*********************************************************************/ 4544