19b8d05b8SZbigniew Bodek /*- 29b8d05b8SZbigniew Bodek * BSD LICENSE 39b8d05b8SZbigniew Bodek * 49b8d05b8SZbigniew Bodek * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 319b8d05b8SZbigniew Bodek __FBSDID("$FreeBSD$"); 329b8d05b8SZbigniew Bodek 339b8d05b8SZbigniew Bodek #include <sys/param.h> 349b8d05b8SZbigniew Bodek #include <sys/systm.h> 359b8d05b8SZbigniew Bodek #include <sys/bus.h> 369b8d05b8SZbigniew Bodek #include <sys/endian.h> 379b8d05b8SZbigniew Bodek #include <sys/kernel.h> 389b8d05b8SZbigniew Bodek #include <sys/kthread.h> 399b8d05b8SZbigniew Bodek #include <sys/malloc.h> 409b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 419b8d05b8SZbigniew Bodek #include <sys/module.h> 429b8d05b8SZbigniew Bodek #include <sys/rman.h> 439b8d05b8SZbigniew Bodek #include <sys/smp.h> 449b8d05b8SZbigniew Bodek #include <sys/socket.h> 459b8d05b8SZbigniew Bodek #include <sys/sockio.h> 469b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 479b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 489b8d05b8SZbigniew Bodek #include <sys/time.h> 499b8d05b8SZbigniew Bodek #include <sys/eventhandler.h> 509b8d05b8SZbigniew Bodek 519b8d05b8SZbigniew Bodek #include <machine/bus.h> 529b8d05b8SZbigniew Bodek #include <machine/resource.h> 539b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 549b8d05b8SZbigniew Bodek 559b8d05b8SZbigniew Bodek #include <net/bpf.h> 569b8d05b8SZbigniew Bodek #include <net/ethernet.h> 579b8d05b8SZbigniew Bodek #include <net/if.h> 589b8d05b8SZbigniew Bodek #include <net/if_var.h> 599b8d05b8SZbigniew Bodek #include <net/if_arp.h> 609b8d05b8SZbigniew Bodek #include <net/if_dl.h> 619b8d05b8SZbigniew Bodek #include <net/if_media.h> 629b8d05b8SZbigniew Bodek #include <net/if_types.h> 639b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 649b8d05b8SZbigniew Bodek 659b8d05b8SZbigniew Bodek #include <netinet/in_systm.h> 669b8d05b8SZbigniew Bodek #include <netinet/in.h> 679b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 689b8d05b8SZbigniew Bodek #include <netinet/ip.h> 699b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 709b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 719b8d05b8SZbigniew Bodek #include <netinet/udp.h> 729b8d05b8SZbigniew Bodek 739b8d05b8SZbigniew Bodek #include <dev/pci/pcivar.h> 749b8d05b8SZbigniew Bodek #include <dev/pci/pcireg.h> 759b8d05b8SZbigniew Bodek 764fa9e02dSMarcin Wojtas #include <vm/vm.h> 774fa9e02dSMarcin Wojtas #include <vm/pmap.h> 784fa9e02dSMarcin Wojtas 799b8d05b8SZbigniew Bodek #include "ena.h" 809b8d05b8SZbigniew Bodek #include "ena_sysctl.h" 819b8d05b8SZbigniew Bodek 829b8d05b8SZbigniew Bodek /********************************************************* 839b8d05b8SZbigniew Bodek * Function prototypes 849b8d05b8SZbigniew Bodek *********************************************************/ 859b8d05b8SZbigniew Bodek static int ena_probe(device_t); 869b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 879b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 889b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 899b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 909b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 919b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 929b8d05b8SZbigniew Bodek static void ena_init_io_rings_common(struct ena_adapter *, 939b8d05b8SZbigniew Bodek struct ena_ring *, uint16_t); 94cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 959b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 969b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 979b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 989b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 999b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1009b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1019b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1029b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1039b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1049b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 10543fefd16SMarcin Wojtas static inline int validate_rx_req_id(struct ena_ring *, uint16_t); 1069b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1079b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1089b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1099b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1109b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1119b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1129b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1139b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1149b8d05b8SZbigniew Bodek static int ena_refill_rx_bufs(struct ena_ring *, uint32_t); 1159b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1169b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1189b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1199b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1209b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1219b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1229b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1239b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1249b8d05b8SZbigniew Bodek static int ena_tx_cleanup(struct ena_ring *); 1259b8d05b8SZbigniew Bodek static int ena_rx_cleanup(struct ena_ring *); 126fceb9387SMarcin Wojtas static inline int validate_tx_req_id(struct ena_ring *, uint16_t); 1279b8d05b8SZbigniew Bodek static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, 1289b8d05b8SZbigniew Bodek struct mbuf *); 1299b8d05b8SZbigniew Bodek static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, 1309b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *, uint16_t *); 1319b8d05b8SZbigniew Bodek static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, 1329b8d05b8SZbigniew Bodek struct mbuf *); 1335cb9db07SMarcin Wojtas static void ena_cleanup(void *arg, int pending); 1345cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1359b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1369b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static void ena_setup_io_intr(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter*); 1439b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1449b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1459b8d05b8SZbigniew Bodek static int ena_rss_configure(struct ena_adapter *); 1469b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1479b8d05b8SZbigniew Bodek static int ena_up(struct ena_adapter *); 1489b8d05b8SZbigniew Bodek static void ena_down(struct ena_adapter *); 1499b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1509b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1519b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1529b8d05b8SZbigniew Bodek static void ena_init(void *); 1539b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1549b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1559b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1569b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 1579b8d05b8SZbigniew Bodek static int ena_setup_ifnet(device_t, struct ena_adapter *, 1589b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1599b8d05b8SZbigniew Bodek static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *); 1601b069f1cSZbigniew Bodek static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, 1611b069f1cSZbigniew Bodek struct mbuf **mbuf); 1624fa9e02dSMarcin Wojtas static void ena_dmamap_llq(void *, bus_dma_segment_t *, int, int); 1631e9fb899SZbigniew Bodek static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); 1649b8d05b8SZbigniew Bodek static void ena_start_xmit(struct ena_ring *); 1659b8d05b8SZbigniew Bodek static int ena_mq_start(if_t, struct mbuf *); 1669b8d05b8SZbigniew Bodek static void ena_deferred_mq_start(void *, int); 1679b8d05b8SZbigniew Bodek static void ena_qflush(if_t); 1684fa9e02dSMarcin Wojtas static int ena_enable_wc(struct resource *); 1694fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1704fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 1719b8d05b8SZbigniew Bodek static int ena_calc_io_queue_num(struct ena_adapter *, 1729b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1736064f289SMarcin Wojtas static int ena_calc_queue_size(struct ena_adapter *, 1746064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *); 1756064f289SMarcin Wojtas static int ena_handle_updated_queues(struct ena_adapter *, 1766064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *); 1779b8d05b8SZbigniew Bodek static int ena_rss_init_default(struct ena_adapter *); 1789b8d05b8SZbigniew Bodek static void ena_rss_init_default_deferred(void *); 179*46021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1809b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1819b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1829b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1839b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 1849b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, 1859b8d05b8SZbigniew Bodek int); 1869b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 1879b8d05b8SZbigniew Bodek static void unimplemented_aenq_handler(void *, 1889b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *); 1899b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 1909b8d05b8SZbigniew Bodek 1919b8d05b8SZbigniew Bodek static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 1929b8d05b8SZbigniew Bodek 1939b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1949b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 1959b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, 1969b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 1979b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, 1989b8d05b8SZbigniew Bodek /* Last entry */ 1999b8d05b8SZbigniew Bodek { 0, 0, 0 } 2009b8d05b8SZbigniew Bodek }; 2019b8d05b8SZbigniew Bodek 2029b8d05b8SZbigniew Bodek /* 2039b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 2049b8d05b8SZbigniew Bodek */ 2059b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 2069b8d05b8SZbigniew Bodek 2079b8d05b8SZbigniew Bodek void 2089b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2099b8d05b8SZbigniew Bodek { 2100bdffe59SMarcin Wojtas if (error != 0) 2119b8d05b8SZbigniew Bodek return; 2129b8d05b8SZbigniew Bodek *(bus_addr_t *) arg = segs[0].ds_addr; 2139b8d05b8SZbigniew Bodek } 2149b8d05b8SZbigniew Bodek 2159b8d05b8SZbigniew Bodek int 2169b8d05b8SZbigniew Bodek ena_dma_alloc(device_t dmadev, bus_size_t size, 2179b8d05b8SZbigniew Bodek ena_mem_handle_t *dma , int mapflags) 2189b8d05b8SZbigniew Bodek { 2199b8d05b8SZbigniew Bodek struct ena_adapter* adapter = device_get_softc(dmadev); 2200bdffe59SMarcin Wojtas uint32_t maxsize; 2210bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2229b8d05b8SZbigniew Bodek int error; 2239b8d05b8SZbigniew Bodek 2240bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2250bdffe59SMarcin Wojtas 2260bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2273f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2289b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2290bdffe59SMarcin Wojtas 2309b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2319b8d05b8SZbigniew Bodek 8, 0, /* alignment, bounds */ 2328a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2338a573700SZbigniew Bodek BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 2349b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2359b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2369b8d05b8SZbigniew Bodek 1, /* nsegments */ 2379b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2389b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2399b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2409b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2419b8d05b8SZbigniew Bodek &dma->tag); 2423f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2434e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); 2449b8d05b8SZbigniew Bodek goto fail_tag; 2459b8d05b8SZbigniew Bodek } 2469b8d05b8SZbigniew Bodek 2479b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, 2489b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2493f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2504e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", 2514e8acd84SMarcin Wojtas (uintmax_t)size, error); 2529b8d05b8SZbigniew Bodek goto fail_map_create; 2539b8d05b8SZbigniew Bodek } 2549b8d05b8SZbigniew Bodek 2559b8d05b8SZbigniew Bodek dma->paddr = 0; 2569b8d05b8SZbigniew Bodek error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 2579b8d05b8SZbigniew Bodek size, ena_dmamap_callback, &dma->paddr, mapflags); 2583f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2594e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); 2609b8d05b8SZbigniew Bodek goto fail_map_load; 2619b8d05b8SZbigniew Bodek } 2629b8d05b8SZbigniew Bodek 263e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 264e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 265e8073738SMarcin Wojtas 2669b8d05b8SZbigniew Bodek return (0); 2679b8d05b8SZbigniew Bodek 2689b8d05b8SZbigniew Bodek fail_map_load: 2699b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2707d2544e6SMarcin Wojtas fail_map_create: 2719b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2729b8d05b8SZbigniew Bodek fail_tag: 2739b8d05b8SZbigniew Bodek dma->tag = NULL; 2745b14f92eSMarcin Wojtas dma->vaddr = NULL; 2755b14f92eSMarcin Wojtas dma->paddr = 0; 2769b8d05b8SZbigniew Bodek 2779b8d05b8SZbigniew Bodek return (error); 2789b8d05b8SZbigniew Bodek } 2799b8d05b8SZbigniew Bodek 2809b8d05b8SZbigniew Bodek static void 2819b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2829b8d05b8SZbigniew Bodek { 2839b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2849b8d05b8SZbigniew Bodek 2859b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2869b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2879b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2889b8d05b8SZbigniew Bodek } 2899b8d05b8SZbigniew Bodek 2909b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2919b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2929b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2939b8d05b8SZbigniew Bodek } 2949b8d05b8SZbigniew Bodek } 2959b8d05b8SZbigniew Bodek 2969b8d05b8SZbigniew Bodek static int 2979b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2989b8d05b8SZbigniew Bodek { 2999b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 3009b8d05b8SZbigniew Bodek char adapter_name[60]; 3019b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3029b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3039b8d05b8SZbigniew Bodek 3049b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3059b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3069b8d05b8SZbigniew Bodek 3079b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3089b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3099b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3109b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 3119b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "vendor=%x device=%x ", 3129b8d05b8SZbigniew Bodek pci_vendor_id, pci_device_id); 3139b8d05b8SZbigniew Bodek 3149b8d05b8SZbigniew Bodek sprintf(adapter_name, DEVICE_DESC); 3159b8d05b8SZbigniew Bodek device_set_desc_copy(dev, adapter_name); 3169b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3179b8d05b8SZbigniew Bodek } 3189b8d05b8SZbigniew Bodek 3199b8d05b8SZbigniew Bodek ent++; 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek } 3229b8d05b8SZbigniew Bodek 3239b8d05b8SZbigniew Bodek return (ENXIO); 3249b8d05b8SZbigniew Bodek } 3259b8d05b8SZbigniew Bodek 3269b8d05b8SZbigniew Bodek static int 3279b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3289b8d05b8SZbigniew Bodek { 3299b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3303cfadb28SMarcin Wojtas int rc; 3319b8d05b8SZbigniew Bodek 3323cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 3339b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Invalid MTU setting. " 3343cfadb28SMarcin Wojtas "new_mtu: %d max mtu: %d min mtu: %d\n", 3353cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3363cfadb28SMarcin Wojtas return (EINVAL); 3379b8d05b8SZbigniew Bodek } 3389b8d05b8SZbigniew Bodek 3399b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3403cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3413cfadb28SMarcin Wojtas ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); 3423cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3433cfadb28SMarcin Wojtas } else { 3443cfadb28SMarcin Wojtas device_printf(adapter->pdev, "Failed to set MTU to %d\n", 3453cfadb28SMarcin Wojtas new_mtu); 3463cfadb28SMarcin Wojtas } 3479b8d05b8SZbigniew Bodek 3483cfadb28SMarcin Wojtas return (rc); 3499b8d05b8SZbigniew Bodek } 3509b8d05b8SZbigniew Bodek 3519b8d05b8SZbigniew Bodek static inline void 3529b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3539b8d05b8SZbigniew Bodek { 3549b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3559b8d05b8SZbigniew Bodek 3569b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3579b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3589b8d05b8SZbigniew Bodek } 3599b8d05b8SZbigniew Bodek 3609b8d05b8SZbigniew Bodek static inline void 3619b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3629b8d05b8SZbigniew Bodek { 3639b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3649b8d05b8SZbigniew Bodek 3659b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3669b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3679b8d05b8SZbigniew Bodek } 3689b8d05b8SZbigniew Bodek 3699b8d05b8SZbigniew Bodek static inline void 3709b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3719b8d05b8SZbigniew Bodek { 3729b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3739b8d05b8SZbigniew Bodek 3749b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3759b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3769b8d05b8SZbigniew Bodek } 3779b8d05b8SZbigniew Bodek 3789b8d05b8SZbigniew Bodek static void 3799b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3809b8d05b8SZbigniew Bodek uint16_t qid) 3819b8d05b8SZbigniew Bodek { 3829b8d05b8SZbigniew Bodek 3839b8d05b8SZbigniew Bodek ring->qid = qid; 3849b8d05b8SZbigniew Bodek ring->adapter = adapter; 3859b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 386d12f7bfcSMarcin Wojtas ring->first_interrupt = false; 387d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3889b8d05b8SZbigniew Bodek } 3899b8d05b8SZbigniew Bodek 390cd5d5804SMarcin Wojtas static void 3919b8d05b8SZbigniew Bodek ena_init_io_rings(struct ena_adapter *adapter) 3929b8d05b8SZbigniew Bodek { 3939b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3949b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3959b8d05b8SZbigniew Bodek struct ena_que *que; 3969b8d05b8SZbigniew Bodek int i; 3979b8d05b8SZbigniew Bodek 3989b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3999b8d05b8SZbigniew Bodek 4009b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 4019b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 4029b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 4039b8d05b8SZbigniew Bodek 4049b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4059b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4069b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4079b8d05b8SZbigniew Bodek 4089b8d05b8SZbigniew Bodek /* TX specific ring state */ 4099b8d05b8SZbigniew Bodek txr->ring_size = adapter->tx_ring_size; 4109b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4119b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4129b8d05b8SZbigniew Bodek txr->smoothed_interval = 4139b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 4149b8d05b8SZbigniew Bodek 4159b8d05b8SZbigniew Bodek /* Allocate a buf ring */ 4166064f289SMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 4176064f289SMarcin Wojtas txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 4189b8d05b8SZbigniew Bodek M_WAITOK, &txr->ring_mtx); 4199b8d05b8SZbigniew Bodek 4209b8d05b8SZbigniew Bodek /* Alloc TX statistics. */ 4219b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4229b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4239b8d05b8SZbigniew Bodek 4249b8d05b8SZbigniew Bodek /* RX specific ring state */ 4259b8d05b8SZbigniew Bodek rxr->ring_size = adapter->rx_ring_size; 4269b8d05b8SZbigniew Bodek rxr->smoothed_interval = 4279b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 4289b8d05b8SZbigniew Bodek 4299b8d05b8SZbigniew Bodek /* Alloc RX statistics. */ 4309b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4319b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4329b8d05b8SZbigniew Bodek 4339b8d05b8SZbigniew Bodek /* Initialize locks */ 4349b8d05b8SZbigniew Bodek snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4359b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4369b8d05b8SZbigniew Bodek snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4379b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4389b8d05b8SZbigniew Bodek 4399b8d05b8SZbigniew Bodek mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4409b8d05b8SZbigniew Bodek 4419b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4429b8d05b8SZbigniew Bodek que->adapter = adapter; 4439b8d05b8SZbigniew Bodek que->id = i; 4449b8d05b8SZbigniew Bodek que->tx_ring = txr; 4459b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4469b8d05b8SZbigniew Bodek 4479b8d05b8SZbigniew Bodek txr->que = que; 4489b8d05b8SZbigniew Bodek rxr->que = que; 449efe6ab18SMarcin Wojtas 450efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4519b8d05b8SZbigniew Bodek } 4529b8d05b8SZbigniew Bodek } 4539b8d05b8SZbigniew Bodek 4549b8d05b8SZbigniew Bodek static void 4559b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4569b8d05b8SZbigniew Bodek { 4579b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4589b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4599b8d05b8SZbigniew Bodek 4609b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4619b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4629b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4639b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4649b8d05b8SZbigniew Bodek 4657d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4667d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4677d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4687d2544e6SMarcin Wojtas 4699b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4709b8d05b8SZbigniew Bodek } 4719b8d05b8SZbigniew Bodek 4729b8d05b8SZbigniew Bodek static void 4739b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4749b8d05b8SZbigniew Bodek { 4759b8d05b8SZbigniew Bodek int i; 4769b8d05b8SZbigniew Bodek 4779b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 4789b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4799b8d05b8SZbigniew Bodek 4809b8d05b8SZbigniew Bodek } 4819b8d05b8SZbigniew Bodek 4829b8d05b8SZbigniew Bodek static int 4839b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 4849b8d05b8SZbigniew Bodek { 4859b8d05b8SZbigniew Bodek int ret; 4869b8d05b8SZbigniew Bodek 4879b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 4889b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 4899b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 4908a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 4918a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 4929b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 4939b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 4948a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 4959b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 4969b8d05b8SZbigniew Bodek 0, /* flags */ 4979b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 4989b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 4999b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5009b8d05b8SZbigniew Bodek 5019b8d05b8SZbigniew Bodek return (ret); 5029b8d05b8SZbigniew Bodek } 5039b8d05b8SZbigniew Bodek 5049b8d05b8SZbigniew Bodek static int 5059b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5069b8d05b8SZbigniew Bodek { 5079b8d05b8SZbigniew Bodek int ret; 5089b8d05b8SZbigniew Bodek 5099b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5109b8d05b8SZbigniew Bodek 5113f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5129b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5139b8d05b8SZbigniew Bodek 5149b8d05b8SZbigniew Bodek return (ret); 5159b8d05b8SZbigniew Bodek } 5169b8d05b8SZbigniew Bodek 5179b8d05b8SZbigniew Bodek static int 5189b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5199b8d05b8SZbigniew Bodek { 5209b8d05b8SZbigniew Bodek int ret; 5219b8d05b8SZbigniew Bodek 5229b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5239b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5249b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5258a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5268a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5279b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5289b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsize */ 5294727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 5309b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsegsize */ 5319b8d05b8SZbigniew Bodek 0, /* flags */ 5329b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5339b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5349b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5359b8d05b8SZbigniew Bodek 5369b8d05b8SZbigniew Bodek return (ret); 5379b8d05b8SZbigniew Bodek } 5389b8d05b8SZbigniew Bodek 5399b8d05b8SZbigniew Bodek static int 5409b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5419b8d05b8SZbigniew Bodek { 5429b8d05b8SZbigniew Bodek int ret; 5439b8d05b8SZbigniew Bodek 5449b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5459b8d05b8SZbigniew Bodek 5463f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5479b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5489b8d05b8SZbigniew Bodek 5499b8d05b8SZbigniew Bodek return (ret); 5509b8d05b8SZbigniew Bodek } 5519b8d05b8SZbigniew Bodek 5529b8d05b8SZbigniew Bodek /** 5539b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 5549b8d05b8SZbigniew Bodek * @adapter: network interface device structure 5559b8d05b8SZbigniew Bodek * @qid: queue index 5569b8d05b8SZbigniew Bodek * 5579b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 5589b8d05b8SZbigniew Bodek **/ 5599b8d05b8SZbigniew Bodek static int 5609b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 5619b8d05b8SZbigniew Bodek { 5629b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 5639b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 5649b8d05b8SZbigniew Bodek int size, i, err; 5659b8d05b8SZbigniew Bodek 5669b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 5679b8d05b8SZbigniew Bodek 5689b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5693f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 5707d2544e6SMarcin Wojtas return (ENOMEM); 5719b8d05b8SZbigniew Bodek 5729b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 5739b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5743f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 5757d2544e6SMarcin Wojtas goto err_buf_info_free; 5769b8d05b8SZbigniew Bodek 5774fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 5784fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 5794fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 5804fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 5814fa9e02dSMarcin Wojtas goto err_tx_ids_free; 5824fa9e02dSMarcin Wojtas 5839b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 5849b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 5859b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 5869b8d05b8SZbigniew Bodek 5879b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 5889b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 5899b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 5909b8d05b8SZbigniew Bodek 5919b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 5929b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 593af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 5949b8d05b8SZbigniew Bodek 5959b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 596b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 5979b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 598b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 5999b8d05b8SZbigniew Bodek 6009b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6019b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6029b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6034fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_head); 6043f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6054e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6064fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_head for buffer %d\n", 6074fa9e02dSMarcin Wojtas i); 6087d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6099b8d05b8SZbigniew Bodek } 6104fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 6114fa9e02dSMarcin Wojtas 6124fa9e02dSMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6134fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_seg); 6144fa9e02dSMarcin Wojtas if (unlikely(err != 0)) { 6154fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, 6164fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_seg for buffer %d\n", 6174fa9e02dSMarcin Wojtas i); 6184fa9e02dSMarcin Wojtas goto err_buf_info_head_unmap; 6194fa9e02dSMarcin Wojtas } 6204fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6219b8d05b8SZbigniew Bodek } 6229b8d05b8SZbigniew Bodek 6239b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 6249b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 6259b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 6269b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 6273f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 6284e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6299b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 6309b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 6317d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6329b8d05b8SZbigniew Bodek } 6339b8d05b8SZbigniew Bodek 6345cb9db07SMarcin Wojtas tx_ring->running = true; 6355cb9db07SMarcin Wojtas 6369b8d05b8SZbigniew Bodek taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, 6379b8d05b8SZbigniew Bodek "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); 6389b8d05b8SZbigniew Bodek 6399b8d05b8SZbigniew Bodek return (0); 6409b8d05b8SZbigniew Bodek 6414fa9e02dSMarcin Wojtas err_buf_info_head_unmap: 6424fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6434fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6447d2544e6SMarcin Wojtas err_buf_info_unmap: 6459b8d05b8SZbigniew Bodek while (i--) { 6469b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6474fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6484fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6494fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 6509b8d05b8SZbigniew Bodek } 6514fa9e02dSMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 6524fa9e02dSMarcin Wojtas err_tx_ids_free: 653cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 6547d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 6557d2544e6SMarcin Wojtas err_buf_info_free: 656cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 6577d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 6587d2544e6SMarcin Wojtas 6599b8d05b8SZbigniew Bodek return (ENOMEM); 6609b8d05b8SZbigniew Bodek } 6619b8d05b8SZbigniew Bodek 6629b8d05b8SZbigniew Bodek /** 6639b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 6649b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6659b8d05b8SZbigniew Bodek * @qid: queue index 6669b8d05b8SZbigniew Bodek * 6679b8d05b8SZbigniew Bodek * Free all transmit software resources 6689b8d05b8SZbigniew Bodek **/ 6699b8d05b8SZbigniew Bodek static void 6709b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 6719b8d05b8SZbigniew Bodek { 6729b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 6739b8d05b8SZbigniew Bodek 6749b8d05b8SZbigniew Bodek while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 6759b8d05b8SZbigniew Bodek NULL)) 6769b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 6779b8d05b8SZbigniew Bodek 6789b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 6799b8d05b8SZbigniew Bodek 680b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6819b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 6829b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 6839b8d05b8SZbigniew Bodek 6849b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 6859b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 6864fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].head_mapped == true) { 687e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6884fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head, 6894fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 6909b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 6914fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6924fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6934fa9e02dSMarcin Wojtas } 6949b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6954fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6964fa9e02dSMarcin Wojtas 6974fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].seg_mapped == true) { 6984fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6994fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg, 7004fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7014fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 7024fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7034fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 7044fa9e02dSMarcin Wojtas } 7054fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7064fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7074fa9e02dSMarcin Wojtas 708e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 709e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 7109b8d05b8SZbigniew Bodek } 711416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 7129b8d05b8SZbigniew Bodek 7139b8d05b8SZbigniew Bodek /* And free allocated memory. */ 714cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7159b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 7169b8d05b8SZbigniew Bodek 717cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7189b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 7194fa9e02dSMarcin Wojtas 7204fa9e02dSMarcin Wojtas ENA_MEM_FREE(adapter->ena_dev->dmadev, 7214fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 7224fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 7239b8d05b8SZbigniew Bodek } 7249b8d05b8SZbigniew Bodek 7259b8d05b8SZbigniew Bodek /** 7269b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 7279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7289b8d05b8SZbigniew Bodek * 7299b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7309b8d05b8SZbigniew Bodek **/ 7319b8d05b8SZbigniew Bodek static int 7329b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 7339b8d05b8SZbigniew Bodek { 7349b8d05b8SZbigniew Bodek int i, rc; 7359b8d05b8SZbigniew Bodek 7369b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 7379b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 7380bdffe59SMarcin Wojtas if (rc != 0) { 7399b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 7409b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 7419b8d05b8SZbigniew Bodek goto err_setup_tx; 7429b8d05b8SZbigniew Bodek } 7437d2544e6SMarcin Wojtas } 7449b8d05b8SZbigniew Bodek 7459b8d05b8SZbigniew Bodek return (0); 7469b8d05b8SZbigniew Bodek 7479b8d05b8SZbigniew Bodek err_setup_tx: 7489b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 7499b8d05b8SZbigniew Bodek while (i--) 7509b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7519b8d05b8SZbigniew Bodek return (rc); 7529b8d05b8SZbigniew Bodek } 7539b8d05b8SZbigniew Bodek 7549b8d05b8SZbigniew Bodek /** 7559b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 7569b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7579b8d05b8SZbigniew Bodek * 7589b8d05b8SZbigniew Bodek * Free all transmit software resources 7599b8d05b8SZbigniew Bodek **/ 7609b8d05b8SZbigniew Bodek static void 7619b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 7629b8d05b8SZbigniew Bodek { 7639b8d05b8SZbigniew Bodek int i; 7649b8d05b8SZbigniew Bodek 7659b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 7669b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7679b8d05b8SZbigniew Bodek } 7689b8d05b8SZbigniew Bodek 76943fefd16SMarcin Wojtas static inline int 77043fefd16SMarcin Wojtas validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 77143fefd16SMarcin Wojtas { 77243fefd16SMarcin Wojtas if (likely(req_id < rx_ring->ring_size)) 77343fefd16SMarcin Wojtas return (0); 77443fefd16SMarcin Wojtas 77543fefd16SMarcin Wojtas device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", 77643fefd16SMarcin Wojtas req_id); 77743fefd16SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); 77843fefd16SMarcin Wojtas 77943fefd16SMarcin Wojtas /* Trigger device reset */ 78043fefd16SMarcin Wojtas rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 78143fefd16SMarcin Wojtas rx_ring->adapter->trigger_reset = true; 78243fefd16SMarcin Wojtas 78343fefd16SMarcin Wojtas return (EFAULT); 78443fefd16SMarcin Wojtas } 78543fefd16SMarcin Wojtas 7869b8d05b8SZbigniew Bodek /** 7879b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 7889b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7899b8d05b8SZbigniew Bodek * @qid: queue index 7909b8d05b8SZbigniew Bodek * 7919b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7929b8d05b8SZbigniew Bodek **/ 7939b8d05b8SZbigniew Bodek static int 7949b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 7959b8d05b8SZbigniew Bodek { 7969b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 7979b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 7989b8d05b8SZbigniew Bodek int size, err, i; 7999b8d05b8SZbigniew Bodek 8009b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8019b8d05b8SZbigniew Bodek 8029b8d05b8SZbigniew Bodek /* 8039b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8049b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8059b8d05b8SZbigniew Bodek */ 8069b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8079b8d05b8SZbigniew Bodek 808cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8099b8d05b8SZbigniew Bodek 81043fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 81143fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 81243fefd16SMarcin Wojtas 81343fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 81443fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 81543fefd16SMarcin Wojtas 8169b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8179b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8189b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8199b8d05b8SZbigniew Bodek 8209b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 8219b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 8229b8d05b8SZbigniew Bodek 8239b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 8249b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 8259b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 8269b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 8279b8d05b8SZbigniew Bodek if (err != 0) { 8284e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 8299b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 8307d2544e6SMarcin Wojtas goto err_buf_info_unmap; 8319b8d05b8SZbigniew Bodek } 8329b8d05b8SZbigniew Bodek } 8339b8d05b8SZbigniew Bodek 8349b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 8350bdffe59SMarcin Wojtas if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { 8369b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 8370bdffe59SMarcin Wojtas if (err != 0) { 8389b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 8399b8d05b8SZbigniew Bodek "LRO[%d] Initialization failed!\n", qid); 8409b8d05b8SZbigniew Bodek } else { 8419b8d05b8SZbigniew Bodek ena_trace(ENA_INFO, 8429b8d05b8SZbigniew Bodek "RX Soft LRO[%d] Initialized\n", qid); 8439b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 8449b8d05b8SZbigniew Bodek } 8459b8d05b8SZbigniew Bodek } 8469b8d05b8SZbigniew Bodek 8479b8d05b8SZbigniew Bodek return (0); 8489b8d05b8SZbigniew Bodek 8497d2544e6SMarcin Wojtas err_buf_info_unmap: 8509b8d05b8SZbigniew Bodek while (i--) { 8519b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8529b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8539b8d05b8SZbigniew Bodek } 8549b8d05b8SZbigniew Bodek 85543fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 85643fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 857cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8589b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8599b8d05b8SZbigniew Bodek return (ENOMEM); 8609b8d05b8SZbigniew Bodek } 8619b8d05b8SZbigniew Bodek 8629b8d05b8SZbigniew Bodek /** 8639b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 8649b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8659b8d05b8SZbigniew Bodek * @qid: queue index 8669b8d05b8SZbigniew Bodek * 8679b8d05b8SZbigniew Bodek * Free all receive software resources 8689b8d05b8SZbigniew Bodek **/ 8699b8d05b8SZbigniew Bodek static void 8709b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8719b8d05b8SZbigniew Bodek { 8729b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 8739b8d05b8SZbigniew Bodek 8749b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 8759b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 876e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 877e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 8789b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 8799b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 8809b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 8819b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8829b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8839b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8849b8d05b8SZbigniew Bodek } 8859b8d05b8SZbigniew Bodek 8869b8d05b8SZbigniew Bodek /* free LRO resources, */ 8879b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 8889b8d05b8SZbigniew Bodek 8899b8d05b8SZbigniew Bodek /* free allocated memory */ 890cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8919b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8929b8d05b8SZbigniew Bodek 89343fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 89443fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 8959b8d05b8SZbigniew Bodek } 8969b8d05b8SZbigniew Bodek 8979b8d05b8SZbigniew Bodek /** 8989b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 8999b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9009b8d05b8SZbigniew Bodek * 9019b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9029b8d05b8SZbigniew Bodek **/ 9039b8d05b8SZbigniew Bodek static int 9049b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9059b8d05b8SZbigniew Bodek { 9069b8d05b8SZbigniew Bodek int i, rc = 0; 9079b8d05b8SZbigniew Bodek 9089b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 9099b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9100bdffe59SMarcin Wojtas if (rc != 0) { 9119b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 9129b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9139b8d05b8SZbigniew Bodek goto err_setup_rx; 9149b8d05b8SZbigniew Bodek } 9157d2544e6SMarcin Wojtas } 9169b8d05b8SZbigniew Bodek return (0); 9179b8d05b8SZbigniew Bodek 9189b8d05b8SZbigniew Bodek err_setup_rx: 9199b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9209b8d05b8SZbigniew Bodek while (i--) 9219b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9229b8d05b8SZbigniew Bodek return (rc); 9239b8d05b8SZbigniew Bodek } 9249b8d05b8SZbigniew Bodek 9259b8d05b8SZbigniew Bodek /** 9269b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 9279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9289b8d05b8SZbigniew Bodek * 9299b8d05b8SZbigniew Bodek * Free all receive software resources 9309b8d05b8SZbigniew Bodek **/ 9319b8d05b8SZbigniew Bodek static void 9329b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 9339b8d05b8SZbigniew Bodek { 9349b8d05b8SZbigniew Bodek int i; 9359b8d05b8SZbigniew Bodek 9369b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 9379b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9389b8d05b8SZbigniew Bodek } 9399b8d05b8SZbigniew Bodek 9409b8d05b8SZbigniew Bodek static inline int 9419b8d05b8SZbigniew Bodek ena_alloc_rx_mbuf(struct ena_adapter *adapter, 9429b8d05b8SZbigniew Bodek struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 9439b8d05b8SZbigniew Bodek { 9449b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 9459b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 9469b8d05b8SZbigniew Bodek int nsegs, error; 9474727bda6SMarcin Wojtas int mlen; 9489b8d05b8SZbigniew Bodek 9499b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 9503f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 9519b8d05b8SZbigniew Bodek return (0); 9529b8d05b8SZbigniew Bodek 9539b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 9549b8d05b8SZbigniew Bodek rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); 9559b8d05b8SZbigniew Bodek 9563f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9574727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 9584727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 9594727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9609b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 9619b8d05b8SZbigniew Bodek return (ENOMEM); 9629b8d05b8SZbigniew Bodek } 9634727bda6SMarcin Wojtas mlen = MCLBYTES; 9644727bda6SMarcin Wojtas } else { 9654727bda6SMarcin Wojtas mlen = MJUM16BYTES; 9664727bda6SMarcin Wojtas } 9679b8d05b8SZbigniew Bodek /* Set mbuf length*/ 9684727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 9699b8d05b8SZbigniew Bodek 9709b8d05b8SZbigniew Bodek /* Map packets for DMA */ 9719b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 9729b8d05b8SZbigniew Bodek "Using tag %p for buffers' DMA mapping, mbuf %p len: %d", 9739b8d05b8SZbigniew Bodek adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); 9749b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 9759b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 9763f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 9774e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " 9789b8d05b8SZbigniew Bodek "nsegs: %d\n", error, nsegs); 9799b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 9809b8d05b8SZbigniew Bodek goto exit; 9819b8d05b8SZbigniew Bodek 9829b8d05b8SZbigniew Bodek } 9839b8d05b8SZbigniew Bodek 9849b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 9859b8d05b8SZbigniew Bodek 9869b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 9879b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 9884727bda6SMarcin Wojtas ena_buf->len = mlen; 9899b8d05b8SZbigniew Bodek 9909b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 9919b8d05b8SZbigniew Bodek "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 9929b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); 9939b8d05b8SZbigniew Bodek 9949b8d05b8SZbigniew Bodek return (0); 9959b8d05b8SZbigniew Bodek 9969b8d05b8SZbigniew Bodek exit: 9979b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 9989b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 9999b8d05b8SZbigniew Bodek return (EFAULT); 10009b8d05b8SZbigniew Bodek } 10019b8d05b8SZbigniew Bodek 10029b8d05b8SZbigniew Bodek static void 10039b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10049b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10059b8d05b8SZbigniew Bodek { 10069b8d05b8SZbigniew Bodek 10074e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10084e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); 10099b8d05b8SZbigniew Bodek return; 10104e8acd84SMarcin Wojtas } 10119b8d05b8SZbigniew Bodek 1012e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1013e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10149b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10159b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10169b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10179b8d05b8SZbigniew Bodek } 10189b8d05b8SZbigniew Bodek 10199b8d05b8SZbigniew Bodek /** 10209b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 10219b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 10229b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 10239b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 10249b8d05b8SZbigniew Bodek **/ 10259b8d05b8SZbigniew Bodek static int 10269b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 10279b8d05b8SZbigniew Bodek { 10289b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 102943fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 10309b8d05b8SZbigniew Bodek uint32_t i; 10319b8d05b8SZbigniew Bodek int rc; 10329b8d05b8SZbigniew Bodek 10339b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d", 10349b8d05b8SZbigniew Bodek rx_ring->qid); 10359b8d05b8SZbigniew Bodek 10369b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 10379b8d05b8SZbigniew Bodek 10389b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 103943fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 104043fefd16SMarcin Wojtas 10419b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, 10429b8d05b8SZbigniew Bodek "RX buffer - next to use: %d", next_to_use); 10439b8d05b8SZbigniew Bodek 104443fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 104543fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 10469b8d05b8SZbigniew Bodek 10479b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 10483f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 10494e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10504e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 10514e8acd84SMarcin Wojtas rx_ring->qid); 10529b8d05b8SZbigniew Bodek break; 10539b8d05b8SZbigniew Bodek } 10549b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 105543fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 10560bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 10574e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10589b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 10599b8d05b8SZbigniew Bodek rx_ring->qid); 10609b8d05b8SZbigniew Bodek break; 10619b8d05b8SZbigniew Bodek } 10629b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 10639b8d05b8SZbigniew Bodek rx_ring->ring_size); 10649b8d05b8SZbigniew Bodek } 10659b8d05b8SZbigniew Bodek 10663f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 10679b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 10684e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10694e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 10704e8acd84SMarcin Wojtas rx_ring->qid, i, num); 10719b8d05b8SZbigniew Bodek } 10729b8d05b8SZbigniew Bodek 10733f9ed7abSMarcin Wojtas if (likely(i != 0)) { 10749b8d05b8SZbigniew Bodek wmb(); 10759b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 10769b8d05b8SZbigniew Bodek } 10779b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 10789b8d05b8SZbigniew Bodek return (i); 10799b8d05b8SZbigniew Bodek } 10809b8d05b8SZbigniew Bodek 10819b8d05b8SZbigniew Bodek static void 10829b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 10839b8d05b8SZbigniew Bodek { 10849b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 10859b8d05b8SZbigniew Bodek unsigned int i; 10869b8d05b8SZbigniew Bodek 10879b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 10889b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 10899b8d05b8SZbigniew Bodek 10900bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 10919b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 10929b8d05b8SZbigniew Bodek } 10939b8d05b8SZbigniew Bodek } 10949b8d05b8SZbigniew Bodek 10959b8d05b8SZbigniew Bodek /** 10969b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 10979b8d05b8SZbigniew Bodek * @adapter: network interface device structure 10989b8d05b8SZbigniew Bodek * 10999b8d05b8SZbigniew Bodek */ 11009b8d05b8SZbigniew Bodek static void 11019b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 11029b8d05b8SZbigniew Bodek { 11039b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 11049b8d05b8SZbigniew Bodek int i, rc, bufs_num; 11059b8d05b8SZbigniew Bodek 11069b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11079b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 11089b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 11099b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 11109b8d05b8SZbigniew Bodek 11119b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 11124e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "refilling Queue %d failed. " 11134e8acd84SMarcin Wojtas "Allocated %d buffers from: %d\n", i, rc, bufs_num); 11149b8d05b8SZbigniew Bodek } 11159b8d05b8SZbigniew Bodek } 11169b8d05b8SZbigniew Bodek 11179b8d05b8SZbigniew Bodek static void 11189b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 11199b8d05b8SZbigniew Bodek { 11209b8d05b8SZbigniew Bodek int i; 11219b8d05b8SZbigniew Bodek 11229b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 11239b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 11249b8d05b8SZbigniew Bodek } 11259b8d05b8SZbigniew Bodek 11269b8d05b8SZbigniew Bodek /** 11279b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 11289b8d05b8SZbigniew Bodek * @adapter: network interface device structure 11299b8d05b8SZbigniew Bodek * @qid: queue index 11309b8d05b8SZbigniew Bodek **/ 11319b8d05b8SZbigniew Bodek static void 11329b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 11339b8d05b8SZbigniew Bodek { 11344e8acd84SMarcin Wojtas bool print_once = true; 11359b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 11369b8d05b8SZbigniew Bodek 1137416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 11389b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 11399b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 11409b8d05b8SZbigniew Bodek 11419b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 11429b8d05b8SZbigniew Bodek continue; 11439b8d05b8SZbigniew Bodek 11444e8acd84SMarcin Wojtas if (print_once) { 11454e8acd84SMarcin Wojtas device_printf(adapter->pdev, 11464e8acd84SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x", 11474e8acd84SMarcin Wojtas qid, i); 11484e8acd84SMarcin Wojtas print_once = false; 11494e8acd84SMarcin Wojtas } else { 11504e8acd84SMarcin Wojtas ena_trace(ENA_DBG, 11514e8acd84SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x", 11524e8acd84SMarcin Wojtas qid, i); 11534e8acd84SMarcin Wojtas } 11549b8d05b8SZbigniew Bodek 11554fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 11564fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1157e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11584fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11594fa9e02dSMarcin Wojtas tx_info->map_head); 11604fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 11614fa9e02dSMarcin Wojtas } 11624fa9e02dSMarcin Wojtas 11634fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 11644fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 11654fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11664fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11674fa9e02dSMarcin Wojtas tx_info->map_seg); 11684fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 11694fa9e02dSMarcin Wojtas } 11704fa9e02dSMarcin Wojtas 11719b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 11729b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 11739b8d05b8SZbigniew Bodek } 1174416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 11759b8d05b8SZbigniew Bodek } 11769b8d05b8SZbigniew Bodek 11779b8d05b8SZbigniew Bodek static void 11789b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 11799b8d05b8SZbigniew Bodek { 11809b8d05b8SZbigniew Bodek 11819b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) 11829b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 11839b8d05b8SZbigniew Bodek } 11849b8d05b8SZbigniew Bodek 11859b8d05b8SZbigniew Bodek static void 11869b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 11879b8d05b8SZbigniew Bodek { 11889b8d05b8SZbigniew Bodek uint16_t ena_qid; 11899b8d05b8SZbigniew Bodek int i; 11909b8d05b8SZbigniew Bodek 11919b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11929b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 11939b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 11949b8d05b8SZbigniew Bodek } 11959b8d05b8SZbigniew Bodek } 11969b8d05b8SZbigniew Bodek 11979b8d05b8SZbigniew Bodek static void 11989b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 11999b8d05b8SZbigniew Bodek { 12009b8d05b8SZbigniew Bodek uint16_t ena_qid; 12019b8d05b8SZbigniew Bodek int i; 12029b8d05b8SZbigniew Bodek 12039b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12049b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12059b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 12069b8d05b8SZbigniew Bodek } 12079b8d05b8SZbigniew Bodek } 12089b8d05b8SZbigniew Bodek 12099b8d05b8SZbigniew Bodek static void 12109b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 12119b8d05b8SZbigniew Bodek { 12125cb9db07SMarcin Wojtas struct ena_que *queue; 12135cb9db07SMarcin Wojtas int i; 12145cb9db07SMarcin Wojtas 12155cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 12165cb9db07SMarcin Wojtas queue = &adapter->que[i]; 12175cb9db07SMarcin Wojtas while (taskqueue_cancel(queue->cleanup_tq, 12185cb9db07SMarcin Wojtas &queue->cleanup_task, NULL)) 12195cb9db07SMarcin Wojtas taskqueue_drain(queue->cleanup_tq, 12205cb9db07SMarcin Wojtas &queue->cleanup_task); 12215cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 12225cb9db07SMarcin Wojtas } 12235cb9db07SMarcin Wojtas 12249b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 12259b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 12269b8d05b8SZbigniew Bodek } 12279b8d05b8SZbigniew Bodek 1228fceb9387SMarcin Wojtas static inline int 12299b8d05b8SZbigniew Bodek validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) 12309b8d05b8SZbigniew Bodek { 12314e8acd84SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 12329b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = NULL; 12339b8d05b8SZbigniew Bodek 12349b8d05b8SZbigniew Bodek if (likely(req_id < tx_ring->ring_size)) { 12359b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 12360bdffe59SMarcin Wojtas if (tx_info->mbuf != NULL) 12370bdffe59SMarcin Wojtas return (0); 12384e8acd84SMarcin Wojtas device_printf(adapter->pdev, 12394e8acd84SMarcin Wojtas "tx_info doesn't have valid mbuf\n"); 12404e306999SMarcin Wojtas } 12414e8acd84SMarcin Wojtas 12424e306999SMarcin Wojtas device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); 12439b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 12449b8d05b8SZbigniew Bodek 12459b8d05b8SZbigniew Bodek return (EFAULT); 12469b8d05b8SZbigniew Bodek } 12479b8d05b8SZbigniew Bodek 12489b8d05b8SZbigniew Bodek static int 12499b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 12509b8d05b8SZbigniew Bodek { 12519b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 12529b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 12539b8d05b8SZbigniew Bodek struct ena_ring *ring; 12545cb9db07SMarcin Wojtas struct ena_que *queue; 12559b8d05b8SZbigniew Bodek uint16_t ena_qid; 12569b8d05b8SZbigniew Bodek uint32_t msix_vector; 12579b8d05b8SZbigniew Bodek int rc, i; 12589b8d05b8SZbigniew Bodek 12599b8d05b8SZbigniew Bodek /* Create TX queues */ 12609b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12619b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12629b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 12639b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 12649b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 12659b8d05b8SZbigniew Bodek ctx.queue_size = adapter->tx_ring_size; 12669b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 12679b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 12689b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 12690bdffe59SMarcin Wojtas if (rc != 0) { 12709b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12719b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 12729b8d05b8SZbigniew Bodek goto err_tx; 12739b8d05b8SZbigniew Bodek } 12749b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 12759b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 12769b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 12779b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 12780bdffe59SMarcin Wojtas if (rc != 0) { 12799b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12809b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 12819b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 12829b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 12839b8d05b8SZbigniew Bodek goto err_tx; 12849b8d05b8SZbigniew Bodek } 12859b8d05b8SZbigniew Bodek } 12869b8d05b8SZbigniew Bodek 12879b8d05b8SZbigniew Bodek /* Create RX queues */ 12889b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12899b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12909b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12919b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 12929b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 12939b8d05b8SZbigniew Bodek ctx.queue_size = adapter->rx_ring_size; 12949b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 12959b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 12969b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 12973f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 12989b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12999b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 13009b8d05b8SZbigniew Bodek goto err_rx; 13019b8d05b8SZbigniew Bodek } 13029b8d05b8SZbigniew Bodek 13039b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 13049b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 13059b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 13069b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 13073f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 13089b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 13099b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 13109b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 13119b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 13129b8d05b8SZbigniew Bodek goto err_rx; 13139b8d05b8SZbigniew Bodek } 13149b8d05b8SZbigniew Bodek } 13159b8d05b8SZbigniew Bodek 13165cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 13175cb9db07SMarcin Wojtas queue = &adapter->que[i]; 13185cb9db07SMarcin Wojtas 13195cb9db07SMarcin Wojtas TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 13205cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 13215cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 13225cb9db07SMarcin Wojtas 13235cb9db07SMarcin Wojtas taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, 13245cb9db07SMarcin Wojtas "%s queue %d cleanup", 13255cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 13265cb9db07SMarcin Wojtas } 13275cb9db07SMarcin Wojtas 13289b8d05b8SZbigniew Bodek return (0); 13299b8d05b8SZbigniew Bodek 13309b8d05b8SZbigniew Bodek err_rx: 13319b8d05b8SZbigniew Bodek while (i--) 13329b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 13339b8d05b8SZbigniew Bodek i = adapter->num_queues; 13349b8d05b8SZbigniew Bodek err_tx: 13359b8d05b8SZbigniew Bodek while (i--) 13369b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 13379b8d05b8SZbigniew Bodek 13389b8d05b8SZbigniew Bodek return (ENXIO); 13399b8d05b8SZbigniew Bodek } 13409b8d05b8SZbigniew Bodek 13419b8d05b8SZbigniew Bodek /** 13429b8d05b8SZbigniew Bodek * ena_tx_cleanup - clear sent packets and corresponding descriptors 13439b8d05b8SZbigniew Bodek * @tx_ring: ring for which we want to clean packets 13449b8d05b8SZbigniew Bodek * 13459b8d05b8SZbigniew Bodek * Once packets are sent, we ask the device in a loop for no longer used 13469b8d05b8SZbigniew Bodek * descriptors. We find the related mbuf chain in a map (index in an array) 13479b8d05b8SZbigniew Bodek * and free it, then update ring state. 13489b8d05b8SZbigniew Bodek * This is performed in "endless" loop, updating ring pointers every 13499b8d05b8SZbigniew Bodek * TX_COMMIT. The first check of free descriptor is performed before the actual 13509b8d05b8SZbigniew Bodek * loop, then repeated at the loop end. 13519b8d05b8SZbigniew Bodek **/ 13529b8d05b8SZbigniew Bodek static int 13539b8d05b8SZbigniew Bodek ena_tx_cleanup(struct ena_ring *tx_ring) 13549b8d05b8SZbigniew Bodek { 13559b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 13569b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 13579b8d05b8SZbigniew Bodek uint16_t next_to_clean; 13589b8d05b8SZbigniew Bodek uint16_t req_id; 13599b8d05b8SZbigniew Bodek uint16_t ena_qid; 13609b8d05b8SZbigniew Bodek unsigned int total_done = 0; 13619b8d05b8SZbigniew Bodek int rc; 13629b8d05b8SZbigniew Bodek int commit = TX_COMMIT; 13639b8d05b8SZbigniew Bodek int budget = TX_BUDGET; 13649b8d05b8SZbigniew Bodek int work_done; 13655cb9db07SMarcin Wojtas bool above_thresh; 13669b8d05b8SZbigniew Bodek 13679b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 13689b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 13699b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 13709b8d05b8SZbigniew Bodek next_to_clean = tx_ring->next_to_clean; 13719b8d05b8SZbigniew Bodek 13729b8d05b8SZbigniew Bodek do { 13739b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 13749b8d05b8SZbigniew Bodek struct mbuf *mbuf; 13759b8d05b8SZbigniew Bodek 13769b8d05b8SZbigniew Bodek rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); 13773f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13789b8d05b8SZbigniew Bodek break; 13799b8d05b8SZbigniew Bodek 13809b8d05b8SZbigniew Bodek rc = validate_tx_req_id(tx_ring, req_id); 13813f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13829b8d05b8SZbigniew Bodek break; 13839b8d05b8SZbigniew Bodek 13849b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 13859b8d05b8SZbigniew Bodek 13869b8d05b8SZbigniew Bodek mbuf = tx_info->mbuf; 13879b8d05b8SZbigniew Bodek 13889b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 13899b8d05b8SZbigniew Bodek bintime_clear(&tx_info->timestamp); 13909b8d05b8SZbigniew Bodek 13919b8d05b8SZbigniew Bodek /* Map is no longer required */ 13924fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 13934fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1394e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 13954fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 13964fa9e02dSMarcin Wojtas tx_info->map_head); 13974fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 13984fa9e02dSMarcin Wojtas } 13994fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 14004fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 14014fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 14024fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 14034fa9e02dSMarcin Wojtas tx_info->map_seg); 14044fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 14059b8d05b8SZbigniew Bodek } 14069b8d05b8SZbigniew Bodek 14074e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed", 14084e8acd84SMarcin Wojtas tx_ring->qid, mbuf); 14094e8acd84SMarcin Wojtas 14109b8d05b8SZbigniew Bodek m_freem(mbuf); 14119b8d05b8SZbigniew Bodek 14129b8d05b8SZbigniew Bodek total_done += tx_info->tx_descs; 14139b8d05b8SZbigniew Bodek 14149b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[next_to_clean] = req_id; 14159b8d05b8SZbigniew Bodek next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 14169b8d05b8SZbigniew Bodek tx_ring->ring_size); 14179b8d05b8SZbigniew Bodek 14183f9ed7abSMarcin Wojtas if (unlikely(--commit == 0)) { 14199b8d05b8SZbigniew Bodek commit = TX_COMMIT; 14209b8d05b8SZbigniew Bodek /* update ring state every TX_COMMIT descriptor */ 14219b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14220bdffe59SMarcin Wojtas ena_com_comp_ack( 14230bdffe59SMarcin Wojtas &adapter->ena_dev->io_sq_queues[ena_qid], 14240bdffe59SMarcin Wojtas total_done); 14259b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14269b8d05b8SZbigniew Bodek total_done = 0; 14279b8d05b8SZbigniew Bodek } 14283f9ed7abSMarcin Wojtas } while (likely(--budget)); 14299b8d05b8SZbigniew Bodek 14309b8d05b8SZbigniew Bodek work_done = TX_BUDGET - budget; 14319b8d05b8SZbigniew Bodek 14324e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d", 14334e8acd84SMarcin Wojtas tx_ring->qid, work_done); 14344e8acd84SMarcin Wojtas 14359b8d05b8SZbigniew Bodek /* If there is still something to commit update ring state */ 14363f9ed7abSMarcin Wojtas if (likely(commit != TX_COMMIT)) { 14379b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14380bdffe59SMarcin Wojtas ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], 14390bdffe59SMarcin Wojtas total_done); 14409b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14419b8d05b8SZbigniew Bodek } 14429b8d05b8SZbigniew Bodek 14435cb9db07SMarcin Wojtas /* 14445cb9db07SMarcin Wojtas * Need to make the rings circular update visible to 14455cb9db07SMarcin Wojtas * ena_xmit_mbuf() before checking for tx_ring->running. 14465cb9db07SMarcin Wojtas */ 14475cb9db07SMarcin Wojtas mb(); 14485cb9db07SMarcin Wojtas 14495cb9db07SMarcin Wojtas above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14505cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14515cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running && above_thresh)) { 14525cb9db07SMarcin Wojtas ENA_RING_MTX_LOCK(tx_ring); 14535cb9db07SMarcin Wojtas above_thresh = 14545cb9db07SMarcin Wojtas ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14555cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14565cb9db07SMarcin Wojtas if (!tx_ring->running && above_thresh) { 14575cb9db07SMarcin Wojtas tx_ring->running = true; 14585cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 14595cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->enqueue_tq, 14605cb9db07SMarcin Wojtas &tx_ring->enqueue_task); 14615cb9db07SMarcin Wojtas } 14625cb9db07SMarcin Wojtas ENA_RING_MTX_UNLOCK(tx_ring); 14635cb9db07SMarcin Wojtas } 14649b8d05b8SZbigniew Bodek 14659b8d05b8SZbigniew Bodek return (work_done); 14669b8d05b8SZbigniew Bodek } 14679b8d05b8SZbigniew Bodek 14689b8d05b8SZbigniew Bodek static void 14699b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 14709b8d05b8SZbigniew Bodek struct mbuf *mbuf) 14719b8d05b8SZbigniew Bodek { 14729b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 14739b8d05b8SZbigniew Bodek 14743f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) { 14759b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; 14769b8d05b8SZbigniew Bodek 14779b8d05b8SZbigniew Bodek if (ena_rx_ctx->frag && 1478bfea0e93SMarcin Wojtas (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { 14799b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 14809b8d05b8SZbigniew Bodek return; 14819b8d05b8SZbigniew Bodek } 14829b8d05b8SZbigniew Bodek 14839b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l3_proto) { 14849b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV4: 14859b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 14869b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 14879b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); 14889b8d05b8SZbigniew Bodek break; 14899b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 14909b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); 14919b8d05b8SZbigniew Bodek break; 14929b8d05b8SZbigniew Bodek default: 14939b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); 14949b8d05b8SZbigniew Bodek } 14959b8d05b8SZbigniew Bodek break; 14969b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV6: 14979b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 14989b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 14999b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); 15009b8d05b8SZbigniew Bodek break; 15019b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 15029b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); 15039b8d05b8SZbigniew Bodek break; 15049b8d05b8SZbigniew Bodek default: 15059b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); 15069b8d05b8SZbigniew Bodek } 15079b8d05b8SZbigniew Bodek break; 15089b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_UNKNOWN: 15099b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15109b8d05b8SZbigniew Bodek break; 15119b8d05b8SZbigniew Bodek default: 15129b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 15139b8d05b8SZbigniew Bodek } 15149b8d05b8SZbigniew Bodek } else { 15159b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = rx_ring->qid; 15169b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15179b8d05b8SZbigniew Bodek } 15189b8d05b8SZbigniew Bodek } 15199b8d05b8SZbigniew Bodek 15209b8d05b8SZbigniew Bodek /** 15219b8d05b8SZbigniew Bodek * ena_rx_mbuf - assemble mbuf from descriptors 15229b8d05b8SZbigniew Bodek * @rx_ring: ring for which we want to clean packets 15239b8d05b8SZbigniew Bodek * @ena_bufs: buffer info 15249b8d05b8SZbigniew Bodek * @ena_rx_ctx: metadata for this packet(s) 152543fefd16SMarcin Wojtas * @next_to_clean: ring pointer, will be updated only upon success 15269b8d05b8SZbigniew Bodek * 15279b8d05b8SZbigniew Bodek **/ 15289b8d05b8SZbigniew Bodek static struct mbuf* 15299b8d05b8SZbigniew Bodek ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, 15309b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) 15319b8d05b8SZbigniew Bodek { 15329b8d05b8SZbigniew Bodek struct mbuf *mbuf; 15339b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info; 15349b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 15359b8d05b8SZbigniew Bodek unsigned int descs = ena_rx_ctx->descs; 1536c51a229cSMarcin Wojtas int rc; 153743fefd16SMarcin Wojtas uint16_t ntc, len, req_id, buf = 0; 15389b8d05b8SZbigniew Bodek 153943fefd16SMarcin Wojtas ntc = *next_to_clean; 15409b8d05b8SZbigniew Bodek adapter = rx_ring->adapter; 15419b8d05b8SZbigniew Bodek 154243fefd16SMarcin Wojtas len = ena_bufs[buf].len; 154343fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1544c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1545c51a229cSMarcin Wojtas if (unlikely(rc != 0)) 1546c51a229cSMarcin Wojtas return (NULL); 1547c51a229cSMarcin Wojtas 154843fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 15491d65b4c0SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 15501d65b4c0SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 15511d65b4c0SMarcin Wojtas return (NULL); 15521d65b4c0SMarcin Wojtas } 155343fefd16SMarcin Wojtas 15549b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx", 15559b8d05b8SZbigniew Bodek rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); 15569b8d05b8SZbigniew Bodek 1557e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1558e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 15599b8d05b8SZbigniew Bodek mbuf = rx_info->mbuf; 15609b8d05b8SZbigniew Bodek mbuf->m_flags |= M_PKTHDR; 15619b8d05b8SZbigniew Bodek mbuf->m_pkthdr.len = len; 15629b8d05b8SZbigniew Bodek mbuf->m_len = len; 15639b8d05b8SZbigniew Bodek mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; 15649b8d05b8SZbigniew Bodek 15659b8d05b8SZbigniew Bodek /* Fill mbuf with hash key and it's interpretation for optimization */ 15669b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); 15679b8d05b8SZbigniew Bodek 15689b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d", 15699b8d05b8SZbigniew Bodek mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); 15709b8d05b8SZbigniew Bodek 15719b8d05b8SZbigniew Bodek /* DMA address is not needed anymore, unmap it */ 15729b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 15739b8d05b8SZbigniew Bodek 15749b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 157543fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 157643fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 15779b8d05b8SZbigniew Bodek 15789b8d05b8SZbigniew Bodek /* 15799b8d05b8SZbigniew Bodek * While we have more than 1 descriptors for one rcvd packet, append 15809b8d05b8SZbigniew Bodek * other mbufs to the main one 15819b8d05b8SZbigniew Bodek */ 15829b8d05b8SZbigniew Bodek while (--descs) { 158343fefd16SMarcin Wojtas ++buf; 158443fefd16SMarcin Wojtas len = ena_bufs[buf].len; 158543fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1586c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1587c51a229cSMarcin Wojtas if (unlikely(rc != 0)) { 1588c51a229cSMarcin Wojtas /* 1589c51a229cSMarcin Wojtas * If the req_id is invalid, then the device will be 1590c51a229cSMarcin Wojtas * reset. In that case we must free all mbufs that 1591c51a229cSMarcin Wojtas * were already gathered. 1592c51a229cSMarcin Wojtas */ 1593c51a229cSMarcin Wojtas m_freem(mbuf); 1594c51a229cSMarcin Wojtas return (NULL); 1595c51a229cSMarcin Wojtas } 159643fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 15979b8d05b8SZbigniew Bodek 159843fefd16SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 159943fefd16SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 160043fefd16SMarcin Wojtas /* 160143fefd16SMarcin Wojtas * If one of the required mbufs was not allocated yet, 160243fefd16SMarcin Wojtas * we can break there. 160343fefd16SMarcin Wojtas * All earlier used descriptors will be reallocated 160443fefd16SMarcin Wojtas * later and not used mbufs can be reused. 160543fefd16SMarcin Wojtas * The next_to_clean pointer will not be updated in case 160643fefd16SMarcin Wojtas * of an error, so caller should advance it manually 160743fefd16SMarcin Wojtas * in error handling routine to keep it up to date 160843fefd16SMarcin Wojtas * with hw ring. 160943fefd16SMarcin Wojtas */ 161043fefd16SMarcin Wojtas m_freem(mbuf); 161143fefd16SMarcin Wojtas return (NULL); 161243fefd16SMarcin Wojtas } 161343fefd16SMarcin Wojtas 1614e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1615e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 16163f9ed7abSMarcin Wojtas if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { 16179b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 16189b8d05b8SZbigniew Bodek ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p", 16199b8d05b8SZbigniew Bodek mbuf); 16209b8d05b8SZbigniew Bodek } 16214e8acd84SMarcin Wojtas 16224e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 16234e8acd84SMarcin Wojtas "rx mbuf updated. len %d", mbuf->m_pkthdr.len); 16244e8acd84SMarcin Wojtas 16259b8d05b8SZbigniew Bodek /* Free already appended mbuf, it won't be useful anymore */ 16269b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 16279b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 16289b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 16299b8d05b8SZbigniew Bodek 163043fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 163143fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 16329b8d05b8SZbigniew Bodek } 16339b8d05b8SZbigniew Bodek 163443fefd16SMarcin Wojtas *next_to_clean = ntc; 163543fefd16SMarcin Wojtas 16369b8d05b8SZbigniew Bodek return (mbuf); 16379b8d05b8SZbigniew Bodek } 16389b8d05b8SZbigniew Bodek 16399b8d05b8SZbigniew Bodek /** 16409b8d05b8SZbigniew Bodek * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum 16419b8d05b8SZbigniew Bodek **/ 16429b8d05b8SZbigniew Bodek static inline void 16439b8d05b8SZbigniew Bodek ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 16449b8d05b8SZbigniew Bodek struct mbuf *mbuf) 16459b8d05b8SZbigniew Bodek { 16469b8d05b8SZbigniew Bodek 16479b8d05b8SZbigniew Bodek /* if IP and error */ 16483f9ed7abSMarcin Wojtas if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 16493f9ed7abSMarcin Wojtas ena_rx_ctx->l3_csum_err)) { 16509b8d05b8SZbigniew Bodek /* ipv4 checksum error */ 16519b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16529b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 16534e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "RX IPv4 header checksum error"); 16549b8d05b8SZbigniew Bodek return; 16559b8d05b8SZbigniew Bodek } 16569b8d05b8SZbigniew Bodek 16579b8d05b8SZbigniew Bodek /* if TCP/UDP */ 16589b8d05b8SZbigniew Bodek if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 16599b8d05b8SZbigniew Bodek (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { 16609b8d05b8SZbigniew Bodek if (ena_rx_ctx->l4_csum_err) { 16619b8d05b8SZbigniew Bodek /* TCP/UDP checksum error */ 16629b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16639b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 16644e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "RX L4 checksum error"); 16659b8d05b8SZbigniew Bodek } else { 16669b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 16679b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 16689b8d05b8SZbigniew Bodek } 16699b8d05b8SZbigniew Bodek } 16709b8d05b8SZbigniew Bodek } 16719b8d05b8SZbigniew Bodek 16729b8d05b8SZbigniew Bodek /** 16739b8d05b8SZbigniew Bodek * ena_rx_cleanup - handle rx irq 16749b8d05b8SZbigniew Bodek * @arg: ring for which irq is being handled 16759b8d05b8SZbigniew Bodek **/ 16769b8d05b8SZbigniew Bodek static int 16779b8d05b8SZbigniew Bodek ena_rx_cleanup(struct ena_ring *rx_ring) 16789b8d05b8SZbigniew Bodek { 16799b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 16809b8d05b8SZbigniew Bodek struct mbuf *mbuf; 16819b8d05b8SZbigniew Bodek struct ena_com_rx_ctx ena_rx_ctx; 16829b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 16839b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 16849b8d05b8SZbigniew Bodek if_t ifp; 16859b8d05b8SZbigniew Bodek uint16_t ena_qid; 16869b8d05b8SZbigniew Bodek uint16_t next_to_clean; 16879b8d05b8SZbigniew Bodek uint32_t refill_required; 16889b8d05b8SZbigniew Bodek uint32_t refill_threshold; 16899b8d05b8SZbigniew Bodek uint32_t do_if_input = 0; 16909b8d05b8SZbigniew Bodek unsigned int qid; 169143fefd16SMarcin Wojtas int rc, i; 16929b8d05b8SZbigniew Bodek int budget = RX_BUDGET; 16939b8d05b8SZbigniew Bodek 16949b8d05b8SZbigniew Bodek adapter = rx_ring->que->adapter; 16959b8d05b8SZbigniew Bodek ifp = adapter->ifp; 16969b8d05b8SZbigniew Bodek qid = rx_ring->que->id; 16979b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(qid); 16989b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 16999b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 17009b8d05b8SZbigniew Bodek next_to_clean = rx_ring->next_to_clean; 17019b8d05b8SZbigniew Bodek 17024e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "rx: qid %d", qid); 17034e8acd84SMarcin Wojtas 17049b8d05b8SZbigniew Bodek do { 17059b8d05b8SZbigniew Bodek ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 17069b8d05b8SZbigniew Bodek ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; 17079b8d05b8SZbigniew Bodek ena_rx_ctx.descs = 0; 1708e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1709e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 17109b8d05b8SZbigniew Bodek rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); 17119b8d05b8SZbigniew Bodek 17120bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 17139b8d05b8SZbigniew Bodek goto error; 17149b8d05b8SZbigniew Bodek 17159b8d05b8SZbigniew Bodek if (unlikely(ena_rx_ctx.descs == 0)) 17169b8d05b8SZbigniew Bodek break; 17179b8d05b8SZbigniew Bodek 17184e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " 17194e8acd84SMarcin Wojtas "descs #: %d l3 proto %d l4 proto %d hash: %x", 17204e8acd84SMarcin Wojtas rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 17214e8acd84SMarcin Wojtas ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 17224e8acd84SMarcin Wojtas 17239b8d05b8SZbigniew Bodek /* Receive mbuf from the ring */ 17249b8d05b8SZbigniew Bodek mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, 17259b8d05b8SZbigniew Bodek &ena_rx_ctx, &next_to_clean); 1726e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1727e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 17289b8d05b8SZbigniew Bodek /* Exit if we failed to retrieve a buffer */ 17290bdffe59SMarcin Wojtas if (unlikely(mbuf == NULL)) { 173043fefd16SMarcin Wojtas for (i = 0; i < ena_rx_ctx.descs; ++i) { 173143fefd16SMarcin Wojtas rx_ring->free_rx_ids[next_to_clean] = 173243fefd16SMarcin Wojtas rx_ring->ena_bufs[i].req_id; 173343fefd16SMarcin Wojtas next_to_clean = 173443fefd16SMarcin Wojtas ENA_RX_RING_IDX_NEXT(next_to_clean, 173543fefd16SMarcin Wojtas rx_ring->ring_size); 173643fefd16SMarcin Wojtas 173743fefd16SMarcin Wojtas } 17389b8d05b8SZbigniew Bodek break; 17399b8d05b8SZbigniew Bodek } 17409b8d05b8SZbigniew Bodek 17410bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || 17420bdffe59SMarcin Wojtas ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { 17439b8d05b8SZbigniew Bodek ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); 17449b8d05b8SZbigniew Bodek } 17459b8d05b8SZbigniew Bodek 174630217e2dSMarcin Wojtas counter_enter(); 174730217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.bytes, 174830217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 174930217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_bytes, 175030217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 175130217e2dSMarcin Wojtas counter_exit(); 17529b8d05b8SZbigniew Bodek /* 17539b8d05b8SZbigniew Bodek * LRO is only for IP/TCP packets and TCP checksum of the packet 17549b8d05b8SZbigniew Bodek * should be computed by hardware. 17559b8d05b8SZbigniew Bodek */ 17569b8d05b8SZbigniew Bodek do_if_input = 1; 17570bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_LRO) != 0) && 17580bdffe59SMarcin Wojtas ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && 17590bdffe59SMarcin Wojtas (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { 17609b8d05b8SZbigniew Bodek /* 17619b8d05b8SZbigniew Bodek * Send to the stack if: 17629b8d05b8SZbigniew Bodek * - LRO not enabled, or 17639b8d05b8SZbigniew Bodek * - no LRO resources, or 17649b8d05b8SZbigniew Bodek * - lro enqueue fails 17659b8d05b8SZbigniew Bodek */ 17660bdffe59SMarcin Wojtas if ((rx_ring->lro.lro_cnt != 0) && 17670bdffe59SMarcin Wojtas (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) 17689b8d05b8SZbigniew Bodek do_if_input = 0; 17699b8d05b8SZbigniew Bodek } 17700bdffe59SMarcin Wojtas if (do_if_input != 0) { 17710bdffe59SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 17720bdffe59SMarcin Wojtas "calling if_input() with mbuf %p", mbuf); 17739b8d05b8SZbigniew Bodek (*ifp->if_input)(ifp, mbuf); 17749b8d05b8SZbigniew Bodek } 17759b8d05b8SZbigniew Bodek 177630217e2dSMarcin Wojtas counter_enter(); 177730217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); 177830217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); 177930217e2dSMarcin Wojtas counter_exit(); 17809b8d05b8SZbigniew Bodek } while (--budget); 17819b8d05b8SZbigniew Bodek 17829b8d05b8SZbigniew Bodek rx_ring->next_to_clean = next_to_clean; 17839b8d05b8SZbigniew Bodek 1784a195fab0SMarcin Wojtas refill_required = ena_com_free_desc(io_sq); 178582f5a792SMarcin Wojtas refill_threshold = min_t(int, 178682f5a792SMarcin Wojtas rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 178782f5a792SMarcin Wojtas ENA_RX_REFILL_THRESH_PACKET); 17889b8d05b8SZbigniew Bodek 17899b8d05b8SZbigniew Bodek if (refill_required > refill_threshold) { 17909b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 17919b8d05b8SZbigniew Bodek ena_refill_rx_bufs(rx_ring, refill_required); 17929b8d05b8SZbigniew Bodek } 17939b8d05b8SZbigniew Bodek 17949b8d05b8SZbigniew Bodek tcp_lro_flush_all(&rx_ring->lro); 17959b8d05b8SZbigniew Bodek 17969b8d05b8SZbigniew Bodek return (RX_BUDGET - budget); 17979b8d05b8SZbigniew Bodek 17989b8d05b8SZbigniew Bodek error: 17999b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); 1800c9b099ecSMarcin Wojtas 1801c9b099ecSMarcin Wojtas /* Too many desc from the device. Trigger reset */ 1802c9b099ecSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1803c9b099ecSMarcin Wojtas adapter->trigger_reset = true; 1804c9b099ecSMarcin Wojtas 1805c9b099ecSMarcin Wojtas return (0); 18069b8d05b8SZbigniew Bodek } 18079b8d05b8SZbigniew Bodek 18089b8d05b8SZbigniew Bodek /********************************************************************* 18099b8d05b8SZbigniew Bodek * 18109b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 18119b8d05b8SZbigniew Bodek * 18129b8d05b8SZbigniew Bodek **********************************************************************/ 18139b8d05b8SZbigniew Bodek 18149b8d05b8SZbigniew Bodek /** 18159b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 18169b8d05b8SZbigniew Bodek * @arg: interrupt number 18179b8d05b8SZbigniew Bodek **/ 18189b8d05b8SZbigniew Bodek static void 18199b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 18209b8d05b8SZbigniew Bodek { 18219b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 18229b8d05b8SZbigniew Bodek 18239b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 18249b8d05b8SZbigniew Bodek if (likely(adapter->running)) 18259b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 18269b8d05b8SZbigniew Bodek } 18279b8d05b8SZbigniew Bodek 18289b8d05b8SZbigniew Bodek static void 18295cb9db07SMarcin Wojtas ena_cleanup(void *arg, int pending) 18309b8d05b8SZbigniew Bodek { 18319b8d05b8SZbigniew Bodek struct ena_que *que = arg; 18329b8d05b8SZbigniew Bodek struct ena_adapter *adapter = que->adapter; 18339b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 18349b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 18359b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 18369b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 18379b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 18389b8d05b8SZbigniew Bodek int qid, ena_qid; 18399b8d05b8SZbigniew Bodek int txc, rxc, i; 18409b8d05b8SZbigniew Bodek 18413f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18429b8d05b8SZbigniew Bodek return; 18439b8d05b8SZbigniew Bodek 18449b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "MSI-X TX/RX routine"); 18459b8d05b8SZbigniew Bodek 18469b8d05b8SZbigniew Bodek tx_ring = que->tx_ring; 18479b8d05b8SZbigniew Bodek rx_ring = que->rx_ring; 18489b8d05b8SZbigniew Bodek qid = que->id; 18499b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(qid); 18509b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 18519b8d05b8SZbigniew Bodek 1852d12f7bfcSMarcin Wojtas tx_ring->first_interrupt = true; 1853d12f7bfcSMarcin Wojtas rx_ring->first_interrupt = true; 1854d12f7bfcSMarcin Wojtas 18559b8d05b8SZbigniew Bodek for (i = 0; i < CLEAN_BUDGET; ++i) { 18569b8d05b8SZbigniew Bodek rxc = ena_rx_cleanup(rx_ring); 18579b8d05b8SZbigniew Bodek txc = ena_tx_cleanup(tx_ring); 18589b8d05b8SZbigniew Bodek 18593f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18609b8d05b8SZbigniew Bodek return; 18619b8d05b8SZbigniew Bodek 18620bdffe59SMarcin Wojtas if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) 18639b8d05b8SZbigniew Bodek break; 18649b8d05b8SZbigniew Bodek } 18659b8d05b8SZbigniew Bodek 18669b8d05b8SZbigniew Bodek /* Signal that work is done and unmask interrupt */ 18679b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 18689b8d05b8SZbigniew Bodek RX_IRQ_INTERVAL, 18699b8d05b8SZbigniew Bodek TX_IRQ_INTERVAL, 18709b8d05b8SZbigniew Bodek true); 18719b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 18729b8d05b8SZbigniew Bodek } 18739b8d05b8SZbigniew Bodek 18745cb9db07SMarcin Wojtas /** 18755cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 18765cb9db07SMarcin Wojtas * @arg: queue 18775cb9db07SMarcin Wojtas **/ 18785cb9db07SMarcin Wojtas static int 18795cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 18805cb9db07SMarcin Wojtas { 18815cb9db07SMarcin Wojtas struct ena_que *queue = arg; 18825cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 18835cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 18845cb9db07SMarcin Wojtas 18855cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18865cb9db07SMarcin Wojtas return (FILTER_STRAY); 18875cb9db07SMarcin Wojtas 18885cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 18895cb9db07SMarcin Wojtas 18905cb9db07SMarcin Wojtas return (FILTER_HANDLED); 18915cb9db07SMarcin Wojtas } 18925cb9db07SMarcin Wojtas 18939b8d05b8SZbigniew Bodek static int 18949b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 18959b8d05b8SZbigniew Bodek { 18969b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 18978805021aSMarcin Wojtas int msix_vecs, msix_req; 18988805021aSMarcin Wojtas int i, rc = 0; 18999b8d05b8SZbigniew Bodek 19009b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 19019b8d05b8SZbigniew Bodek msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); 19029b8d05b8SZbigniew Bodek 1903cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1904cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1905cd5d5804SMarcin Wojtas 19064e8acd84SMarcin Wojtas ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d", msix_vecs); 19079b8d05b8SZbigniew Bodek 19089b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 19099b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 19109b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 19119b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 19129b8d05b8SZbigniew Bodek } 19139b8d05b8SZbigniew Bodek 19148805021aSMarcin Wojtas msix_req = msix_vecs; 19159b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 19163f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 19179b8d05b8SZbigniew Bodek device_printf(dev, 19189b8d05b8SZbigniew Bodek "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); 19197d2544e6SMarcin Wojtas 19209b8d05b8SZbigniew Bodek rc = ENOSPC; 19217d2544e6SMarcin Wojtas goto err_msix_free; 19229b8d05b8SZbigniew Bodek } 19239b8d05b8SZbigniew Bodek 19248805021aSMarcin Wojtas if (msix_vecs != msix_req) { 19252b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 19262b5b60feSMarcin Wojtas device_printf(dev, 19272b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 19282b5b60feSMarcin Wojtas msix_vecs); 19292b5b60feSMarcin Wojtas pci_release_msi(dev); 19302b5b60feSMarcin Wojtas rc = ENOSPC; 19312b5b60feSMarcin Wojtas goto err_msix_free; 19322b5b60feSMarcin Wojtas } 19338805021aSMarcin Wojtas device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " 19348805021aSMarcin Wojtas "the number of queues\n", msix_vecs, msix_req); 19358805021aSMarcin Wojtas adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; 19368805021aSMarcin Wojtas } 19378805021aSMarcin Wojtas 19389b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 19399b8d05b8SZbigniew Bodek adapter->msix_enabled = true; 19409b8d05b8SZbigniew Bodek 19417d2544e6SMarcin Wojtas return (0); 19427d2544e6SMarcin Wojtas 19437d2544e6SMarcin Wojtas err_msix_free: 19447d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 19457d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 19467d2544e6SMarcin Wojtas 19479b8d05b8SZbigniew Bodek return (rc); 19489b8d05b8SZbigniew Bodek } 19499b8d05b8SZbigniew Bodek 19509b8d05b8SZbigniew Bodek static void 19519b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 19529b8d05b8SZbigniew Bodek { 19539b8d05b8SZbigniew Bodek 19549b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 19559b8d05b8SZbigniew Bodek ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 19569b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev)); 19579b8d05b8SZbigniew Bodek /* 19589b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 19599b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 19609b8d05b8SZbigniew Bodek */ 19619b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 19629b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 19639b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 19649b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 19659b8d05b8SZbigniew Bodek } 19669b8d05b8SZbigniew Bodek 19679b8d05b8SZbigniew Bodek static void 19689b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 19699b8d05b8SZbigniew Bodek { 19709b8d05b8SZbigniew Bodek static int last_bind_cpu = -1; 19719b8d05b8SZbigniew Bodek int irq_idx; 19729b8d05b8SZbigniew Bodek 19739b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) { 19749b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 19759b8d05b8SZbigniew Bodek 19769b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 19779b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 19789b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 19799b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 19809b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 19819b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 19829b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", 19839b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1984277f11c4SMarcin Wojtas 19859b8d05b8SZbigniew Bodek /* 1986277f11c4SMarcin Wojtas * We want to bind rings to the corresponding cpu 19879b8d05b8SZbigniew Bodek * using something similar to the RSS round-robin technique. 19889b8d05b8SZbigniew Bodek */ 19893f9ed7abSMarcin Wojtas if (unlikely(last_bind_cpu < 0)) 19909b8d05b8SZbigniew Bodek last_bind_cpu = CPU_FIRST(); 19919b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 19929b8d05b8SZbigniew Bodek last_bind_cpu; 19939b8d05b8SZbigniew Bodek last_bind_cpu = CPU_NEXT(last_bind_cpu); 19949b8d05b8SZbigniew Bodek } 19959b8d05b8SZbigniew Bodek } 19969b8d05b8SZbigniew Bodek 19979b8d05b8SZbigniew Bodek static int 19989b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 19999b8d05b8SZbigniew Bodek { 20009b8d05b8SZbigniew Bodek struct ena_irq *irq; 20019b8d05b8SZbigniew Bodek unsigned long flags; 20029b8d05b8SZbigniew Bodek int rc, rcc; 20039b8d05b8SZbigniew Bodek 20049b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20059b8d05b8SZbigniew Bodek 20069b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 20079b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20089b8d05b8SZbigniew Bodek &irq->vector, flags); 20099b8d05b8SZbigniew Bodek 20103f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 20119b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20129b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20137d2544e6SMarcin Wojtas return (ENXIO); 20149b8d05b8SZbigniew Bodek } 20159b8d05b8SZbigniew Bodek 20160bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20170bdffe59SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 20180bdffe59SMarcin Wojtas irq->data, &irq->cookie); 20193f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20209b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20219b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20229b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20237d2544e6SMarcin Wojtas goto err_res_free; 20249b8d05b8SZbigniew Bodek } 20259b8d05b8SZbigniew Bodek irq->requested = true; 20269b8d05b8SZbigniew Bodek 20279b8d05b8SZbigniew Bodek return (rc); 20289b8d05b8SZbigniew Bodek 20297d2544e6SMarcin Wojtas err_res_free: 20304e8acd84SMarcin Wojtas ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", 20317d2544e6SMarcin Wojtas irq->vector); 20329b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20339b8d05b8SZbigniew Bodek irq->vector, irq->res); 20343f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 20359b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 20369b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 20379b8d05b8SZbigniew Bodek irq->res = NULL; 20389b8d05b8SZbigniew Bodek 20399b8d05b8SZbigniew Bodek return (rc); 20409b8d05b8SZbigniew Bodek } 20419b8d05b8SZbigniew Bodek 20429b8d05b8SZbigniew Bodek static int 20439b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 20449b8d05b8SZbigniew Bodek { 20459b8d05b8SZbigniew Bodek struct ena_irq *irq; 20469b8d05b8SZbigniew Bodek unsigned long flags = 0; 20479b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 20489b8d05b8SZbigniew Bodek 20493f9ed7abSMarcin Wojtas if (unlikely(adapter->msix_enabled == 0)) { 20504e8acd84SMarcin Wojtas device_printf(adapter->pdev, 20514e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 20529b8d05b8SZbigniew Bodek return (EINVAL); 20539b8d05b8SZbigniew Bodek } else { 20549b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20559b8d05b8SZbigniew Bodek } 20569b8d05b8SZbigniew Bodek 20579b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 20589b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20599b8d05b8SZbigniew Bodek 20603f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 20619b8d05b8SZbigniew Bodek continue; 20629b8d05b8SZbigniew Bodek 20639b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20649b8d05b8SZbigniew Bodek &irq->vector, flags); 20653f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 2066469a8407SMarcin Wojtas rc = ENOMEM; 20679b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20689b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20699b8d05b8SZbigniew Bodek goto err; 20709b8d05b8SZbigniew Bodek } 20719b8d05b8SZbigniew Bodek 20720bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20735cb9db07SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 20745cb9db07SMarcin Wojtas irq->data, &irq->cookie); 20753f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20769b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20779b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20789b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20799b8d05b8SZbigniew Bodek goto err; 20809b8d05b8SZbigniew Bodek } 20819b8d05b8SZbigniew Bodek irq->requested = true; 20829b8d05b8SZbigniew Bodek 20834e8acd84SMarcin Wojtas ena_trace(ENA_INFO, "queue %d - cpu %d\n", 20849b8d05b8SZbigniew Bodek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 20859b8d05b8SZbigniew Bodek } 20869b8d05b8SZbigniew Bodek 20879b8d05b8SZbigniew Bodek return (rc); 20889b8d05b8SZbigniew Bodek 20899b8d05b8SZbigniew Bodek err: 20909b8d05b8SZbigniew Bodek 20919b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 20929b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20939b8d05b8SZbigniew Bodek rcc = 0; 20949b8d05b8SZbigniew Bodek 20959b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 20969b8d05b8SZbigniew Bodek free both intr and resources */ 20970bdffe59SMarcin Wojtas if (irq->requested) 20989b8d05b8SZbigniew Bodek rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 20993f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 21009b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not release" 21019b8d05b8SZbigniew Bodek " irq: %d, error: %d\n", irq->vector, rcc); 21029b8d05b8SZbigniew Bodek 21039b8d05b8SZbigniew Bodek /* If we entred err: section without irq->requested set we know 21049b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 21059b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 21069b8d05b8SZbigniew Bodek this iteration */ 21079b8d05b8SZbigniew Bodek rcc = 0; 21089b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21099b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21109b8d05b8SZbigniew Bodek irq->vector, irq->res); 21119b8d05b8SZbigniew Bodek } 21123f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 21139b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21149b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21159b8d05b8SZbigniew Bodek irq->requested = false; 21169b8d05b8SZbigniew Bodek irq->res = NULL; 21179b8d05b8SZbigniew Bodek } 21189b8d05b8SZbigniew Bodek 21199b8d05b8SZbigniew Bodek return (rc); 21209b8d05b8SZbigniew Bodek } 21219b8d05b8SZbigniew Bodek 21229b8d05b8SZbigniew Bodek static void 21239b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 21249b8d05b8SZbigniew Bodek { 21259b8d05b8SZbigniew Bodek struct ena_irq *irq; 21269b8d05b8SZbigniew Bodek int rc; 21279b8d05b8SZbigniew Bodek 21289b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 21299b8d05b8SZbigniew Bodek if (irq->requested) { 21309b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", 21319b8d05b8SZbigniew Bodek irq->vector); 21329b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 21333f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21349b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21359b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21369b8d05b8SZbigniew Bodek irq->requested = 0; 21379b8d05b8SZbigniew Bodek } 21389b8d05b8SZbigniew Bodek 21399b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21409b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", 21419b8d05b8SZbigniew Bodek irq->vector); 21429b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21439b8d05b8SZbigniew Bodek irq->vector, irq->res); 21449b8d05b8SZbigniew Bodek irq->res = NULL; 21453f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21469b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21479b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21489b8d05b8SZbigniew Bodek } 21499b8d05b8SZbigniew Bodek } 21509b8d05b8SZbigniew Bodek 21519b8d05b8SZbigniew Bodek static void 21529b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 21539b8d05b8SZbigniew Bodek { 21549b8d05b8SZbigniew Bodek struct ena_irq *irq; 21559b8d05b8SZbigniew Bodek int rc; 21569b8d05b8SZbigniew Bodek 21579b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 21589b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 21599b8d05b8SZbigniew Bodek if (irq->requested) { 21609b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", 21619b8d05b8SZbigniew Bodek irq->vector); 21629b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 21639b8d05b8SZbigniew Bodek irq->cookie); 21643f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21659b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21669b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21679b8d05b8SZbigniew Bodek } 21689b8d05b8SZbigniew Bodek irq->requested = 0; 21699b8d05b8SZbigniew Bodek } 21709b8d05b8SZbigniew Bodek 21719b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21729b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", 21739b8d05b8SZbigniew Bodek irq->vector); 21749b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21759b8d05b8SZbigniew Bodek irq->vector, irq->res); 21769b8d05b8SZbigniew Bodek irq->res = NULL; 21773f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21789b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent" 21799b8d05b8SZbigniew Bodek " while releasing res for irq: %d\n", 21809b8d05b8SZbigniew Bodek irq->vector); 21819b8d05b8SZbigniew Bodek } 21829b8d05b8SZbigniew Bodek } 21839b8d05b8SZbigniew Bodek } 21849b8d05b8SZbigniew Bodek } 21859b8d05b8SZbigniew Bodek 21869b8d05b8SZbigniew Bodek static void 21879b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter* adapter) 21889b8d05b8SZbigniew Bodek { 21899b8d05b8SZbigniew Bodek 21909b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 21919b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 21929b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 21939b8d05b8SZbigniew Bodek } 21949b8d05b8SZbigniew Bodek 21959b8d05b8SZbigniew Bodek static void 21969b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 21979b8d05b8SZbigniew Bodek { 21989b8d05b8SZbigniew Bodek 21999b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 22009b8d05b8SZbigniew Bodek 22019b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 2202cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 22039b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 22049b8d05b8SZbigniew Bodek } 22059b8d05b8SZbigniew Bodek 22069b8d05b8SZbigniew Bodek static void 22079b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 22089b8d05b8SZbigniew Bodek { 22099b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 22109b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 22119b8d05b8SZbigniew Bodek uint16_t ena_qid; 22129b8d05b8SZbigniew Bodek int i; 22139b8d05b8SZbigniew Bodek 22149b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 22159b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 22169b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 22179b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 22189b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 0, 0, true); 22199b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 22209b8d05b8SZbigniew Bodek } 22219b8d05b8SZbigniew Bodek } 22229b8d05b8SZbigniew Bodek 22239b8d05b8SZbigniew Bodek /* Configure the Rx forwarding */ 22240bdffe59SMarcin Wojtas static int 22250bdffe59SMarcin Wojtas ena_rss_configure(struct ena_adapter *adapter) 22269b8d05b8SZbigniew Bodek { 22279b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 22289b8d05b8SZbigniew Bodek int rc; 22299b8d05b8SZbigniew Bodek 22309b8d05b8SZbigniew Bodek /* Set indirect table */ 22319b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_set(ena_dev); 22320bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22330bdffe59SMarcin Wojtas return (rc); 22349b8d05b8SZbigniew Bodek 22359b8d05b8SZbigniew Bodek /* Configure hash function (if supported) */ 22369b8d05b8SZbigniew Bodek rc = ena_com_set_hash_function(ena_dev); 22370bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22380bdffe59SMarcin Wojtas return (rc); 22399b8d05b8SZbigniew Bodek 22409b8d05b8SZbigniew Bodek /* Configure hash inputs (if supported) */ 22419b8d05b8SZbigniew Bodek rc = ena_com_set_hash_ctrl(ena_dev); 22420bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22430bdffe59SMarcin Wojtas return (rc); 22449b8d05b8SZbigniew Bodek 22450bdffe59SMarcin Wojtas return (0); 22469b8d05b8SZbigniew Bodek } 22479b8d05b8SZbigniew Bodek 22489b8d05b8SZbigniew Bodek static int 22499b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 22509b8d05b8SZbigniew Bodek { 22519b8d05b8SZbigniew Bodek int rc; 22529b8d05b8SZbigniew Bodek 22533f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) { 22549b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 22550bdffe59SMarcin Wojtas if (rc != 0) 22569b8d05b8SZbigniew Bodek return (rc); 22579b8d05b8SZbigniew Bodek } 22589b8d05b8SZbigniew Bodek 22597d2544e6SMarcin Wojtas rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); 22603f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 22617d2544e6SMarcin Wojtas return (rc); 22627d2544e6SMarcin Wojtas 22639b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 226430217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 226530217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 22669b8d05b8SZbigniew Bodek 22679b8d05b8SZbigniew Bodek return (0); 22689b8d05b8SZbigniew Bodek } 22699b8d05b8SZbigniew Bodek 22709b8d05b8SZbigniew Bodek static int 22719b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 22729b8d05b8SZbigniew Bodek { 22739b8d05b8SZbigniew Bodek int rc = 0; 22749b8d05b8SZbigniew Bodek 22753f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 22769b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is not attached!\n"); 22779b8d05b8SZbigniew Bodek return (ENXIO); 22789b8d05b8SZbigniew Bodek } 22799b8d05b8SZbigniew Bodek 22803f9ed7abSMarcin Wojtas if (unlikely(!adapter->running)) { 22819b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is not running!\n"); 22829b8d05b8SZbigniew Bodek return (ENXIO); 22839b8d05b8SZbigniew Bodek } 22849b8d05b8SZbigniew Bodek 22859b8d05b8SZbigniew Bodek if (!adapter->up) { 22869b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going UP\n"); 22879b8d05b8SZbigniew Bodek 22889b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 22899b8d05b8SZbigniew Bodek ena_setup_io_intr(adapter); 22909b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 22913f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22929b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_req_irq"); 22939b8d05b8SZbigniew Bodek goto err_req_irq; 22949b8d05b8SZbigniew Bodek } 22959b8d05b8SZbigniew Bodek 22969b8d05b8SZbigniew Bodek /* allocate transmit descriptors */ 22979b8d05b8SZbigniew Bodek rc = ena_setup_all_tx_resources(adapter); 22983f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22999b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_setup_tx"); 23009b8d05b8SZbigniew Bodek goto err_setup_tx; 23019b8d05b8SZbigniew Bodek } 23029b8d05b8SZbigniew Bodek 23039b8d05b8SZbigniew Bodek /* allocate receive descriptors */ 23049b8d05b8SZbigniew Bodek rc = ena_setup_all_rx_resources(adapter); 23053f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 23069b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "err_setup_rx"); 23079b8d05b8SZbigniew Bodek goto err_setup_rx; 23089b8d05b8SZbigniew Bodek } 23099b8d05b8SZbigniew Bodek 23109b8d05b8SZbigniew Bodek /* create IO queues for Rx & Tx */ 23119b8d05b8SZbigniew Bodek rc = ena_create_io_queues(adapter); 23123f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 23139b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, 23149b8d05b8SZbigniew Bodek "create IO queues failed"); 23159b8d05b8SZbigniew Bodek goto err_io_que; 23169b8d05b8SZbigniew Bodek } 23179b8d05b8SZbigniew Bodek 23183f9ed7abSMarcin Wojtas if (unlikely(adapter->link_status)) 23199b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 23209b8d05b8SZbigniew Bodek 23219b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 23223f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 23239b8d05b8SZbigniew Bodek goto err_up_complete; 23249b8d05b8SZbigniew Bodek 23259b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 23269b8d05b8SZbigniew Bodek 23279b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 23289b8d05b8SZbigniew Bodek 23299b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 23309b8d05b8SZbigniew Bodek IFF_DRV_OACTIVE); 23319b8d05b8SZbigniew Bodek 23329b8d05b8SZbigniew Bodek callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 23339b8d05b8SZbigniew Bodek ena_timer_service, (void *)adapter, 0); 23349b8d05b8SZbigniew Bodek 23359b8d05b8SZbigniew Bodek adapter->up = true; 233693471047SZbigniew Bodek 233793471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 23389b8d05b8SZbigniew Bodek } 23399b8d05b8SZbigniew Bodek 23409b8d05b8SZbigniew Bodek return (0); 23419b8d05b8SZbigniew Bodek 23429b8d05b8SZbigniew Bodek err_up_complete: 23439b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 23449b8d05b8SZbigniew Bodek err_io_que: 23459b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 23469b8d05b8SZbigniew Bodek err_setup_rx: 23479b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 23489b8d05b8SZbigniew Bodek err_setup_tx: 23499b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 23509b8d05b8SZbigniew Bodek err_req_irq: 23519b8d05b8SZbigniew Bodek return (rc); 23529b8d05b8SZbigniew Bodek } 23539b8d05b8SZbigniew Bodek 23549b8d05b8SZbigniew Bodek static uint64_t 23559b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 23569b8d05b8SZbigniew Bodek { 23579b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23589b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 23599b8d05b8SZbigniew Bodek 23609b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 23619b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 23629b8d05b8SZbigniew Bodek 23639b8d05b8SZbigniew Bodek switch (cnt) { 23649b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 236530217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 23669b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 236730217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 23689b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 236930217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 23709b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 237130217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 23729b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 237330217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 23749b8d05b8SZbigniew Bodek default: 23759b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 23769b8d05b8SZbigniew Bodek } 23779b8d05b8SZbigniew Bodek } 23789b8d05b8SZbigniew Bodek 23799b8d05b8SZbigniew Bodek static int 23809b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 23819b8d05b8SZbigniew Bodek { 23829b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 23839b8d05b8SZbigniew Bodek return (0); 23849b8d05b8SZbigniew Bodek } 23859b8d05b8SZbigniew Bodek 23869b8d05b8SZbigniew Bodek static void 23879b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 23889b8d05b8SZbigniew Bodek { 23899b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 23909b8d05b8SZbigniew Bodek ena_trace(ENA_DBG, "enter"); 23919b8d05b8SZbigniew Bodek 23925a990212SMarcin Wojtas mtx_lock(&adapter->global_mtx); 23939b8d05b8SZbigniew Bodek 23949b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 23959b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 23969b8d05b8SZbigniew Bodek 23979b8d05b8SZbigniew Bodek if (!adapter->link_status) { 23985a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 23994e8acd84SMarcin Wojtas ena_trace(ENA_INFO, "link_status = false"); 24009b8d05b8SZbigniew Bodek return; 24019b8d05b8SZbigniew Bodek } 24029b8d05b8SZbigniew Bodek 24039b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2404b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 24059b8d05b8SZbigniew Bodek 24065a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 24079b8d05b8SZbigniew Bodek } 24089b8d05b8SZbigniew Bodek 24099b8d05b8SZbigniew Bodek static void 24109b8d05b8SZbigniew Bodek ena_init(void *arg) 24119b8d05b8SZbigniew Bodek { 24129b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 24139b8d05b8SZbigniew Bodek 24140bdffe59SMarcin Wojtas if (!adapter->up) { 24153d3a90f9SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24169b8d05b8SZbigniew Bodek ena_up(adapter); 24173d3a90f9SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24183d3a90f9SZbigniew Bodek } 24199b8d05b8SZbigniew Bodek } 24209b8d05b8SZbigniew Bodek 24219b8d05b8SZbigniew Bodek static int 24229b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 24239b8d05b8SZbigniew Bodek { 24249b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 24259b8d05b8SZbigniew Bodek struct ifreq *ifr; 24269b8d05b8SZbigniew Bodek int rc; 24279b8d05b8SZbigniew Bodek 24289b8d05b8SZbigniew Bodek adapter = ifp->if_softc; 24299b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 24309b8d05b8SZbigniew Bodek 24319b8d05b8SZbigniew Bodek /* 24329b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 24339b8d05b8SZbigniew Bodek */ 24349b8d05b8SZbigniew Bodek rc = 0; 24359b8d05b8SZbigniew Bodek switch (command) { 24369b8d05b8SZbigniew Bodek case SIOCSIFMTU: 2437dbf2eb54SMarcin Wojtas if (ifp->if_mtu == ifr->ifr_mtu) 2438dbf2eb54SMarcin Wojtas break; 2439e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24409b8d05b8SZbigniew Bodek ena_down(adapter); 24419b8d05b8SZbigniew Bodek 24429b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 24439b8d05b8SZbigniew Bodek 24449b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2445e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24469b8d05b8SZbigniew Bodek break; 24479b8d05b8SZbigniew Bodek 24489b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 24490bdffe59SMarcin Wojtas if ((ifp->if_flags & IFF_UP) != 0) { 24500bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 24510bdffe59SMarcin Wojtas if ((ifp->if_flags & (IFF_PROMISC | 24520bdffe59SMarcin Wojtas IFF_ALLMULTI)) != 0) { 24539b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 24549b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 24559b8d05b8SZbigniew Bodek } 24569b8d05b8SZbigniew Bodek } else { 2457e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24589b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2459e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24609b8d05b8SZbigniew Bodek } 24619b8d05b8SZbigniew Bodek } else { 24620bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 2463e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24649b8d05b8SZbigniew Bodek ena_down(adapter); 2465e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 2466e67c6554SZbigniew Bodek } 24679b8d05b8SZbigniew Bodek } 24689b8d05b8SZbigniew Bodek break; 24699b8d05b8SZbigniew Bodek 24709b8d05b8SZbigniew Bodek case SIOCADDMULTI: 24719b8d05b8SZbigniew Bodek case SIOCDELMULTI: 24729b8d05b8SZbigniew Bodek break; 24739b8d05b8SZbigniew Bodek 24749b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 24759b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 24769b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 24779b8d05b8SZbigniew Bodek break; 24789b8d05b8SZbigniew Bodek 24799b8d05b8SZbigniew Bodek case SIOCSIFCAP: 24809b8d05b8SZbigniew Bodek { 24819b8d05b8SZbigniew Bodek int reinit = 0; 24829b8d05b8SZbigniew Bodek 24839b8d05b8SZbigniew Bodek if (ifr->ifr_reqcap != ifp->if_capenable) { 24849b8d05b8SZbigniew Bodek ifp->if_capenable = ifr->ifr_reqcap; 24859b8d05b8SZbigniew Bodek reinit = 1; 24869b8d05b8SZbigniew Bodek } 24879b8d05b8SZbigniew Bodek 24880bdffe59SMarcin Wojtas if ((reinit != 0) && 24890bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 2490e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24919b8d05b8SZbigniew Bodek ena_down(adapter); 24929b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2493e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24949b8d05b8SZbigniew Bodek } 24959b8d05b8SZbigniew Bodek } 24969b8d05b8SZbigniew Bodek 24979b8d05b8SZbigniew Bodek break; 24989b8d05b8SZbigniew Bodek default: 24999b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 25009b8d05b8SZbigniew Bodek break; 25019b8d05b8SZbigniew Bodek } 25029b8d05b8SZbigniew Bodek 25039b8d05b8SZbigniew Bodek return (rc); 25049b8d05b8SZbigniew Bodek } 25059b8d05b8SZbigniew Bodek 25069b8d05b8SZbigniew Bodek static int 25079b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 25089b8d05b8SZbigniew Bodek { 25099b8d05b8SZbigniew Bodek int caps = 0; 25109b8d05b8SZbigniew Bodek 25110bdffe59SMarcin Wojtas if ((feat->offload.tx & 25129b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25139b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 25140bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 25159b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 25169b8d05b8SZbigniew Bodek 25170bdffe59SMarcin Wojtas if ((feat->offload.tx & 25189b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 25190bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 25209b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 25219b8d05b8SZbigniew Bodek 25220bdffe59SMarcin Wojtas if ((feat->offload.tx & 25230bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 25249b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 25259b8d05b8SZbigniew Bodek 25260bdffe59SMarcin Wojtas if ((feat->offload.tx & 25270bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 25289b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 25299b8d05b8SZbigniew Bodek 25300bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25319b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 25320bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 25339b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 25349b8d05b8SZbigniew Bodek 25350bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25360bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 25379b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 25389b8d05b8SZbigniew Bodek 25399b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 25409b8d05b8SZbigniew Bodek 25419b8d05b8SZbigniew Bodek return (caps); 25429b8d05b8SZbigniew Bodek } 25439b8d05b8SZbigniew Bodek 25449b8d05b8SZbigniew Bodek static void 25459b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 25469b8d05b8SZbigniew Bodek { 25479b8d05b8SZbigniew Bodek 25489b8d05b8SZbigniew Bodek host_info->supported_network_features[0] = 25499b8d05b8SZbigniew Bodek (uint32_t)if_getcapabilities(ifp); 25509b8d05b8SZbigniew Bodek } 25519b8d05b8SZbigniew Bodek 25529b8d05b8SZbigniew Bodek static void 25539b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 25549b8d05b8SZbigniew Bodek { 25559b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 25569b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 25579b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 25589b8d05b8SZbigniew Bodek int flags = 0; 25599b8d05b8SZbigniew Bodek 25609b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 25619b8d05b8SZbigniew Bodek 25620bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 25630bdffe59SMarcin Wojtas if ((feat & 25640bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 25659b8d05b8SZbigniew Bodek flags |= CSUM_IP; 25660bdffe59SMarcin Wojtas if ((feat & 25679b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25680bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 25699b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 25709b8d05b8SZbigniew Bodek } 25719b8d05b8SZbigniew Bodek 25720bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 25739b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 25749b8d05b8SZbigniew Bodek 25750bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 25769b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 25779b8d05b8SZbigniew Bodek 25780bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 25799b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 25809b8d05b8SZbigniew Bodek 25819b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 25829b8d05b8SZbigniew Bodek } 25839b8d05b8SZbigniew Bodek 25849b8d05b8SZbigniew Bodek static int 25859b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 25869b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 25879b8d05b8SZbigniew Bodek { 25889b8d05b8SZbigniew Bodek if_t ifp; 25899b8d05b8SZbigniew Bodek int caps = 0; 25909b8d05b8SZbigniew Bodek 25919b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 25923f9ed7abSMarcin Wojtas if (unlikely(ifp == NULL)) { 25934e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); 25949b8d05b8SZbigniew Bodek return (ENXIO); 25959b8d05b8SZbigniew Bodek } 25969b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 25979b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 25989b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 25999b8d05b8SZbigniew Bodek 26009b8d05b8SZbigniew Bodek if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 26019b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 26029b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 26039b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 26049b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 26059b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 26069b8d05b8SZbigniew Bodek 26079b8d05b8SZbigniew Bodek if_setsendqlen(ifp, adapter->tx_ring_size); 26089b8d05b8SZbigniew Bodek if_setsendqready(ifp); 26099b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 26109b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 26119b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 26129b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 26139b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 26149b8d05b8SZbigniew Bodek /* check hardware support */ 26159b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 26169b8d05b8SZbigniew Bodek /* ... and set them */ 26179b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 26189b8d05b8SZbigniew Bodek 26199b8d05b8SZbigniew Bodek /* TSO parameters */ 26208a573700SZbigniew Bodek ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - 26218a573700SZbigniew Bodek (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 26228a573700SZbigniew Bodek ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; 26238a573700SZbigniew Bodek ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; 26249b8d05b8SZbigniew Bodek 26259b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 26269b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 26279b8d05b8SZbigniew Bodek 26289b8d05b8SZbigniew Bodek /* 26299b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 26309b8d05b8SZbigniew Bodek * callbacks to update media and link information 26319b8d05b8SZbigniew Bodek */ 26329b8d05b8SZbigniew Bodek ifmedia_init(&adapter->media, IFM_IMASK, 26339b8d05b8SZbigniew Bodek ena_media_change, ena_media_status); 26349b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 26359b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 26369b8d05b8SZbigniew Bodek 26379b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 26389b8d05b8SZbigniew Bodek 26399b8d05b8SZbigniew Bodek return (0); 26409b8d05b8SZbigniew Bodek } 26419b8d05b8SZbigniew Bodek 26429b8d05b8SZbigniew Bodek static void 26439b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 26449b8d05b8SZbigniew Bodek { 2645a195fab0SMarcin Wojtas int rc; 26469b8d05b8SZbigniew Bodek 26479b8d05b8SZbigniew Bodek if (adapter->up) { 26489b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going DOWN\n"); 26499b8d05b8SZbigniew Bodek 26509b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 26519b8d05b8SZbigniew Bodek 26529b8d05b8SZbigniew Bodek adapter->up = false; 26539b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 26549b8d05b8SZbigniew Bodek IFF_DRV_RUNNING); 26559b8d05b8SZbigniew Bodek 26569b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 26579b8d05b8SZbigniew Bodek 2658a195fab0SMarcin Wojtas if (adapter->trigger_reset) { 2659a195fab0SMarcin Wojtas rc = ena_com_dev_reset(adapter->ena_dev, 2660a195fab0SMarcin Wojtas adapter->reset_reason); 26613f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 2662a195fab0SMarcin Wojtas device_printf(adapter->pdev, 2663a195fab0SMarcin Wojtas "Device reset failed\n"); 2664a195fab0SMarcin Wojtas } 2665a195fab0SMarcin Wojtas 26669b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 26679b8d05b8SZbigniew Bodek 26689b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 26699b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 26709b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 26719b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 26729b8d05b8SZbigniew Bodek 26739b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 26749b8d05b8SZbigniew Bodek } 26759b8d05b8SZbigniew Bodek } 26769b8d05b8SZbigniew Bodek 26779b8d05b8SZbigniew Bodek static void 26789b8d05b8SZbigniew Bodek ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf) 26799b8d05b8SZbigniew Bodek { 26809b8d05b8SZbigniew Bodek struct ena_com_tx_meta *ena_meta; 26819b8d05b8SZbigniew Bodek struct ether_vlan_header *eh; 26829b8d05b8SZbigniew Bodek u32 mss; 26839b8d05b8SZbigniew Bodek bool offload; 26849b8d05b8SZbigniew Bodek uint16_t etype; 26859b8d05b8SZbigniew Bodek int ehdrlen; 26869b8d05b8SZbigniew Bodek struct ip *ip; 26879b8d05b8SZbigniew Bodek int iphlen; 26889b8d05b8SZbigniew Bodek struct tcphdr *th; 26899b8d05b8SZbigniew Bodek 26909b8d05b8SZbigniew Bodek offload = false; 26919b8d05b8SZbigniew Bodek ena_meta = &ena_tx_ctx->ena_meta; 26929b8d05b8SZbigniew Bodek mss = mbuf->m_pkthdr.tso_segsz; 26939b8d05b8SZbigniew Bodek 26949b8d05b8SZbigniew Bodek if (mss != 0) 26959b8d05b8SZbigniew Bodek offload = true; 26969b8d05b8SZbigniew Bodek 26979b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) 26989b8d05b8SZbigniew Bodek offload = true; 26999b8d05b8SZbigniew Bodek 27009b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) 27019b8d05b8SZbigniew Bodek offload = true; 27029b8d05b8SZbigniew Bodek 27030bdffe59SMarcin Wojtas if (!offload) { 27049b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 0; 27059b8d05b8SZbigniew Bodek return; 27069b8d05b8SZbigniew Bodek } 27079b8d05b8SZbigniew Bodek 27089b8d05b8SZbigniew Bodek /* Determine where frame payload starts. */ 27099b8d05b8SZbigniew Bodek eh = mtod(mbuf, struct ether_vlan_header *); 27109b8d05b8SZbigniew Bodek if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 27119b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_proto); 27129b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 27139b8d05b8SZbigniew Bodek } else { 27149b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_encap_proto); 27159b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN; 27169b8d05b8SZbigniew Bodek } 27179b8d05b8SZbigniew Bodek 27189b8d05b8SZbigniew Bodek ip = (struct ip *)(mbuf->m_data + ehdrlen); 27199b8d05b8SZbigniew Bodek iphlen = ip->ip_hl << 2; 27209b8d05b8SZbigniew Bodek th = (struct tcphdr *)((caddr_t)ip + iphlen); 27219b8d05b8SZbigniew Bodek 27229b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { 27239b8d05b8SZbigniew Bodek ena_tx_ctx->l3_csum_enable = 1; 27249b8d05b8SZbigniew Bodek } 27259b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 27269b8d05b8SZbigniew Bodek ena_tx_ctx->tso_enable = 1; 27279b8d05b8SZbigniew Bodek ena_meta->l4_hdr_len = (th->th_off); 27289b8d05b8SZbigniew Bodek } 27299b8d05b8SZbigniew Bodek 27309b8d05b8SZbigniew Bodek switch (etype) { 27319b8d05b8SZbigniew Bodek case ETHERTYPE_IP: 27329b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 2733cd433385SMarcin Wojtas if ((ip->ip_off & htons(IP_DF)) != 0) 27349b8d05b8SZbigniew Bodek ena_tx_ctx->df = 1; 27359b8d05b8SZbigniew Bodek break; 27369b8d05b8SZbigniew Bodek case ETHERTYPE_IPV6: 27379b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 27389b8d05b8SZbigniew Bodek 27399b8d05b8SZbigniew Bodek default: 27409b8d05b8SZbigniew Bodek break; 27419b8d05b8SZbigniew Bodek } 27429b8d05b8SZbigniew Bodek 27439b8d05b8SZbigniew Bodek if (ip->ip_p == IPPROTO_TCP) { 27449b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 27450bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27460bdffe59SMarcin Wojtas (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) 27479b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27489b8d05b8SZbigniew Bodek else 27499b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27509b8d05b8SZbigniew Bodek } else if (ip->ip_p == IPPROTO_UDP) { 27519b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 27520bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27530bdffe59SMarcin Wojtas (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) 27549b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27559b8d05b8SZbigniew Bodek else 27569b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27579b8d05b8SZbigniew Bodek } else { 27589b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 27599b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27609b8d05b8SZbigniew Bodek } 27619b8d05b8SZbigniew Bodek 27629b8d05b8SZbigniew Bodek ena_meta->mss = mss; 27639b8d05b8SZbigniew Bodek ena_meta->l3_hdr_len = iphlen; 27649b8d05b8SZbigniew Bodek ena_meta->l3_hdr_offset = ehdrlen; 27659b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 1; 27669b8d05b8SZbigniew Bodek } 27679b8d05b8SZbigniew Bodek 27689b8d05b8SZbigniew Bodek static int 27691b069f1cSZbigniew Bodek ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 27701e9fb899SZbigniew Bodek { 27711e9fb899SZbigniew Bodek struct ena_adapter *adapter; 27721b069f1cSZbigniew Bodek struct mbuf *collapsed_mbuf; 27731e9fb899SZbigniew Bodek int num_frags; 27741e9fb899SZbigniew Bodek 27751e9fb899SZbigniew Bodek adapter = tx_ring->adapter; 27761e9fb899SZbigniew Bodek num_frags = ena_mbuf_count(*mbuf); 27771e9fb899SZbigniew Bodek 27781e9fb899SZbigniew Bodek /* One segment must be reserved for configuration descriptor. */ 27791e9fb899SZbigniew Bodek if (num_frags < adapter->max_tx_sgl_size) 27801e9fb899SZbigniew Bodek return (0); 27811b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse, 1); 27821e9fb899SZbigniew Bodek 27831b069f1cSZbigniew Bodek collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, 27841b069f1cSZbigniew Bodek adapter->max_tx_sgl_size - 1); 27853f9ed7abSMarcin Wojtas if (unlikely(collapsed_mbuf == NULL)) { 27861b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse_err, 1); 27871e9fb899SZbigniew Bodek return (ENOMEM); 27881e9fb899SZbigniew Bodek } 27891e9fb899SZbigniew Bodek 27901b069f1cSZbigniew Bodek /* If mbuf was collapsed succesfully, original mbuf is released. */ 27911b069f1cSZbigniew Bodek *mbuf = collapsed_mbuf; 27921e9fb899SZbigniew Bodek 27931e9fb899SZbigniew Bodek return (0); 27941e9fb899SZbigniew Bodek } 27951e9fb899SZbigniew Bodek 27964fa9e02dSMarcin Wojtas static void 27974fa9e02dSMarcin Wojtas ena_dmamap_llq(void *arg, bus_dma_segment_t *segs, int nseg, int error) 27984fa9e02dSMarcin Wojtas { 27994fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf = arg; 28004fa9e02dSMarcin Wojtas 28014fa9e02dSMarcin Wojtas if (unlikely(error != 0)) { 28024fa9e02dSMarcin Wojtas ena_buf->paddr = 0; 28034fa9e02dSMarcin Wojtas return; 28044fa9e02dSMarcin Wojtas } 28054fa9e02dSMarcin Wojtas 28064fa9e02dSMarcin Wojtas KASSERT(nseg == 1, ("Invalid num of segments for LLQ dma")); 28074fa9e02dSMarcin Wojtas 28084fa9e02dSMarcin Wojtas ena_buf->paddr = segs->ds_addr; 28094fa9e02dSMarcin Wojtas ena_buf->len = segs->ds_len; 28104fa9e02dSMarcin Wojtas } 28114fa9e02dSMarcin Wojtas 28124fa9e02dSMarcin Wojtas static int 28134fa9e02dSMarcin Wojtas ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, 28144fa9e02dSMarcin Wojtas struct mbuf *mbuf, void **push_hdr, u16 *header_len) 28154fa9e02dSMarcin Wojtas { 28164fa9e02dSMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 28174fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf; 28184fa9e02dSMarcin Wojtas bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; 28194fa9e02dSMarcin Wojtas uint32_t mbuf_head_len, frag_len; 28204fa9e02dSMarcin Wojtas uint16_t push_len = 0; 28214fa9e02dSMarcin Wojtas uint16_t delta = 0; 28224fa9e02dSMarcin Wojtas int i, rc, nsegs; 28234fa9e02dSMarcin Wojtas 28244fa9e02dSMarcin Wojtas mbuf_head_len = mbuf->m_len; 28254fa9e02dSMarcin Wojtas tx_info->mbuf = mbuf; 28264fa9e02dSMarcin Wojtas ena_buf = tx_info->bufs; 28274fa9e02dSMarcin Wojtas 28284fa9e02dSMarcin Wojtas if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 28294fa9e02dSMarcin Wojtas /* 28304fa9e02dSMarcin Wojtas * When the device is LLQ mode, the driver will copy 28314fa9e02dSMarcin Wojtas * the header into the device memory space. 28324fa9e02dSMarcin Wojtas * the ena_com layer assumes the header is in a linear 28334fa9e02dSMarcin Wojtas * memory space. 28344fa9e02dSMarcin Wojtas * This assumption might be wrong since part of the header 28354fa9e02dSMarcin Wojtas * can be in the fragmented buffers. 28364fa9e02dSMarcin Wojtas * First check if header fits in the mbuf. If not, copy it to 28374fa9e02dSMarcin Wojtas * separate buffer that will be holding linearized data. 28384fa9e02dSMarcin Wojtas */ 28394fa9e02dSMarcin Wojtas push_len = min_t(uint32_t, mbuf->m_pkthdr.len, 28404fa9e02dSMarcin Wojtas tx_ring->tx_max_header_size); 28414fa9e02dSMarcin Wojtas *header_len = push_len; 28424fa9e02dSMarcin Wojtas /* If header is in linear space, just point into mbuf's data. */ 28434fa9e02dSMarcin Wojtas if (likely(push_len <= mbuf_head_len)) { 28444fa9e02dSMarcin Wojtas *push_hdr = mbuf->m_data; 28454fa9e02dSMarcin Wojtas /* 28464fa9e02dSMarcin Wojtas * Otherwise, copy whole portion of header from multiple mbufs 28474fa9e02dSMarcin Wojtas * to intermediate buffer. 28484fa9e02dSMarcin Wojtas */ 28494fa9e02dSMarcin Wojtas } else { 28504fa9e02dSMarcin Wojtas m_copydata(mbuf, 0, push_len, 28514fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 28524fa9e02dSMarcin Wojtas *push_hdr = tx_ring->push_buf_intermediate_buf; 28534fa9e02dSMarcin Wojtas 28544fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); 28554fa9e02dSMarcin Wojtas delta = push_len - mbuf_head_len; 28564fa9e02dSMarcin Wojtas } 28574fa9e02dSMarcin Wojtas 28584fa9e02dSMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 28594fa9e02dSMarcin Wojtas "mbuf: %p header_buf->vaddr: %p push_len: %d\n", 28604fa9e02dSMarcin Wojtas mbuf, *push_hdr, push_len); 28614fa9e02dSMarcin Wojtas 28624fa9e02dSMarcin Wojtas /* 28634fa9e02dSMarcin Wojtas * If header was in linear memory space, map for the dma rest of the data 28644fa9e02dSMarcin Wojtas * in the first mbuf of the mbuf chain. 28654fa9e02dSMarcin Wojtas */ 28664fa9e02dSMarcin Wojtas if (mbuf_head_len > push_len) { 28674fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 28684fa9e02dSMarcin Wojtas tx_info->map_head, 28694fa9e02dSMarcin Wojtas mbuf->m_data + push_len, mbuf_head_len - push_len, 28704fa9e02dSMarcin Wojtas ena_dmamap_llq, ena_buf, BUS_DMA_NOWAIT); 28714fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 28724fa9e02dSMarcin Wojtas goto single_dma_error; 28734fa9e02dSMarcin Wojtas 28744fa9e02dSMarcin Wojtas ena_buf++; 28754fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 28764fa9e02dSMarcin Wojtas 28774fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 28784fa9e02dSMarcin Wojtas } 28794fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 28804fa9e02dSMarcin Wojtas } else { 28814fa9e02dSMarcin Wojtas *push_hdr = NULL; 28824fa9e02dSMarcin Wojtas /* 28834fa9e02dSMarcin Wojtas * header_len is just a hint for the device. Because FreeBSD is not 28844fa9e02dSMarcin Wojtas * giving us information about packet header length and it is not 28854fa9e02dSMarcin Wojtas * guaranteed that all packet headers will be in the 1st mbuf, setting 28864fa9e02dSMarcin Wojtas * header_len to 0 is making the device ignore this value and resolve 28874fa9e02dSMarcin Wojtas * header on it's own. 28884fa9e02dSMarcin Wojtas */ 28894fa9e02dSMarcin Wojtas *header_len = 0; 28904fa9e02dSMarcin Wojtas } 28914fa9e02dSMarcin Wojtas 28924fa9e02dSMarcin Wojtas /* 28934fa9e02dSMarcin Wojtas * If header is in non linear space (delta > 0), then skip mbufs 28944fa9e02dSMarcin Wojtas * containing header and map the last one containing both header and the 28954fa9e02dSMarcin Wojtas * packet data. 28964fa9e02dSMarcin Wojtas * The first segment is already counted in. 28974fa9e02dSMarcin Wojtas * If LLQ is not supported, the loop will be skipped. 28984fa9e02dSMarcin Wojtas */ 28994fa9e02dSMarcin Wojtas while (delta > 0) { 29004fa9e02dSMarcin Wojtas frag_len = mbuf->m_len; 29014fa9e02dSMarcin Wojtas 29024fa9e02dSMarcin Wojtas /* 29034fa9e02dSMarcin Wojtas * If whole segment contains header just move to the 29044fa9e02dSMarcin Wojtas * next one and reduce delta. 29054fa9e02dSMarcin Wojtas */ 29064fa9e02dSMarcin Wojtas if (unlikely(delta >= frag_len)) { 29074fa9e02dSMarcin Wojtas delta -= frag_len; 29084fa9e02dSMarcin Wojtas } else { 29094fa9e02dSMarcin Wojtas /* 29104fa9e02dSMarcin Wojtas * Map rest of the packet data that was contained in 29114fa9e02dSMarcin Wojtas * the mbuf. 29124fa9e02dSMarcin Wojtas */ 29134fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 29144fa9e02dSMarcin Wojtas tx_info->map_head, mbuf->m_data + delta, 29154fa9e02dSMarcin Wojtas frag_len - delta, ena_dmamap_llq, ena_buf, 29164fa9e02dSMarcin Wojtas BUS_DMA_NOWAIT); 29174fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 29184fa9e02dSMarcin Wojtas goto single_dma_error; 29194fa9e02dSMarcin Wojtas 29204fa9e02dSMarcin Wojtas ena_buf++; 29214fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 29224fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 29234fa9e02dSMarcin Wojtas 29244fa9e02dSMarcin Wojtas delta = 0; 29254fa9e02dSMarcin Wojtas } 29264fa9e02dSMarcin Wojtas 29274fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 29284fa9e02dSMarcin Wojtas } 29294fa9e02dSMarcin Wojtas 29304fa9e02dSMarcin Wojtas if (mbuf == NULL) { 29314fa9e02dSMarcin Wojtas return (0); 29324fa9e02dSMarcin Wojtas } 29334fa9e02dSMarcin Wojtas 29344fa9e02dSMarcin Wojtas /* Map rest of the mbufs */ 29354fa9e02dSMarcin Wojtas rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map_seg, mbuf, 29364fa9e02dSMarcin Wojtas segs, &nsegs, BUS_DMA_NOWAIT); 29374fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (nsegs == 0))) { 29384fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, 29394fa9e02dSMarcin Wojtas "dmamap load failed! err: %d nsegs: %d", rc, nsegs); 29404fa9e02dSMarcin Wojtas goto dma_error; 29414fa9e02dSMarcin Wojtas } 29424fa9e02dSMarcin Wojtas 29434fa9e02dSMarcin Wojtas for (i = 0; i < nsegs; i++) { 29444fa9e02dSMarcin Wojtas ena_buf->len = segs[i].ds_len; 29454fa9e02dSMarcin Wojtas ena_buf->paddr = segs[i].ds_addr; 29464fa9e02dSMarcin Wojtas ena_buf++; 29474fa9e02dSMarcin Wojtas } 29484fa9e02dSMarcin Wojtas tx_info->num_of_bufs += nsegs; 29494fa9e02dSMarcin Wojtas tx_info->seg_mapped = true; 29504fa9e02dSMarcin Wojtas 29514fa9e02dSMarcin Wojtas return (0); 29524fa9e02dSMarcin Wojtas 29534fa9e02dSMarcin Wojtas dma_error: 29544fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 29554fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 29564fa9e02dSMarcin Wojtas single_dma_error: 29574fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); 29584fa9e02dSMarcin Wojtas tx_info->mbuf = NULL; 29594fa9e02dSMarcin Wojtas return (rc); 29604fa9e02dSMarcin Wojtas } 29614fa9e02dSMarcin Wojtas 29621e9fb899SZbigniew Bodek static int 29631e9fb899SZbigniew Bodek ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 29649b8d05b8SZbigniew Bodek { 29659b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 29669b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 29679b8d05b8SZbigniew Bodek struct ena_com_tx_ctx ena_tx_ctx; 29689b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 29699b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 29709b8d05b8SZbigniew Bodek void *push_hdr; 29719b8d05b8SZbigniew Bodek uint16_t next_to_use; 29729b8d05b8SZbigniew Bodek uint16_t req_id; 29739b8d05b8SZbigniew Bodek uint16_t ena_qid; 29744fa9e02dSMarcin Wojtas uint16_t header_len; 29754fa9e02dSMarcin Wojtas int rc; 29761e9fb899SZbigniew Bodek int nb_hw_desc; 29779b8d05b8SZbigniew Bodek 29789b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 29799b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 29809b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 298190f4da8bSMarcin Wojtas io_sq = &ena_dev->io_sq_queues[ena_qid]; 29829b8d05b8SZbigniew Bodek 29831b069f1cSZbigniew Bodek rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); 29843f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29851e9fb899SZbigniew Bodek ena_trace(ENA_WARNING, 29861b069f1cSZbigniew Bodek "Failed to collapse mbuf! err: %d", rc); 29871e9fb899SZbigniew Bodek return (rc); 29889b8d05b8SZbigniew Bodek } 29899b8d05b8SZbigniew Bodek 29904fa9e02dSMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len); 29914fa9e02dSMarcin Wojtas 29929b8d05b8SZbigniew Bodek next_to_use = tx_ring->next_to_use; 29939b8d05b8SZbigniew Bodek req_id = tx_ring->free_tx_ids[next_to_use]; 29949b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 29959b8d05b8SZbigniew Bodek tx_info->num_of_bufs = 0; 29969b8d05b8SZbigniew Bodek 29974fa9e02dSMarcin Wojtas rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); 29984fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 29994fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); 30004fa9e02dSMarcin Wojtas return (rc); 30019b8d05b8SZbigniew Bodek } 30029b8d05b8SZbigniew Bodek memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 30039b8d05b8SZbigniew Bodek ena_tx_ctx.ena_bufs = tx_info->bufs; 30049b8d05b8SZbigniew Bodek ena_tx_ctx.push_header = push_hdr; 30059b8d05b8SZbigniew Bodek ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 30069b8d05b8SZbigniew Bodek ena_tx_ctx.req_id = req_id; 30079b8d05b8SZbigniew Bodek ena_tx_ctx.header_len = header_len; 30089b8d05b8SZbigniew Bodek 30099b8d05b8SZbigniew Bodek /* Set flags and meta data */ 30101e9fb899SZbigniew Bodek ena_tx_csum(&ena_tx_ctx, *mbuf); 3011af66d7d0SMarcin Wojtas 3012af66d7d0SMarcin Wojtas if (tx_ring->acum_pkts == DB_THRESHOLD || 3013af66d7d0SMarcin Wojtas ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { 3014af66d7d0SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 3015af66d7d0SMarcin Wojtas "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", 3016af66d7d0SMarcin Wojtas tx_ring->que->id); 3017af66d7d0SMarcin Wojtas wmb(); 3018af66d7d0SMarcin Wojtas ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3019af66d7d0SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.doorbells, 1); 3020af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 3021af66d7d0SMarcin Wojtas } 3022af66d7d0SMarcin Wojtas 30239b8d05b8SZbigniew Bodek /* Prepare the packet's descriptors and send them to device */ 30249b8d05b8SZbigniew Bodek rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); 30253f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3026e3cecf70SMarcin Wojtas if (likely(rc == ENA_COM_NO_MEM)) { 3027e3cecf70SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 3028e3cecf70SMarcin Wojtas "tx ring[%d] if out of space\n", tx_ring->que->id); 3029e3cecf70SMarcin Wojtas } else { 3030e3cecf70SMarcin Wojtas device_printf(adapter->pdev, 3031e3cecf70SMarcin Wojtas "failed to prepare tx bufs\n"); 3032e3cecf70SMarcin Wojtas } 30330052f3b5SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); 30349b8d05b8SZbigniew Bodek goto dma_error; 30359b8d05b8SZbigniew Bodek } 30369b8d05b8SZbigniew Bodek 30379b8d05b8SZbigniew Bodek counter_enter(); 30389b8d05b8SZbigniew Bodek counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); 30390bdffe59SMarcin Wojtas counter_u64_add_protected(tx_ring->tx_stats.bytes, 30400bdffe59SMarcin Wojtas (*mbuf)->m_pkthdr.len); 304130217e2dSMarcin Wojtas 304230217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); 304330217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_bytes, 304430217e2dSMarcin Wojtas (*mbuf)->m_pkthdr.len); 30459b8d05b8SZbigniew Bodek counter_exit(); 30469b8d05b8SZbigniew Bodek 30479b8d05b8SZbigniew Bodek tx_info->tx_descs = nb_hw_desc; 30489b8d05b8SZbigniew Bodek getbinuptime(&tx_info->timestamp); 30499b8d05b8SZbigniew Bodek tx_info->print_once = true; 30509b8d05b8SZbigniew Bodek 30519b8d05b8SZbigniew Bodek tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 30529b8d05b8SZbigniew Bodek tx_ring->ring_size); 30539b8d05b8SZbigniew Bodek 30545cb9db07SMarcin Wojtas /* stop the queue when no more space available, the packet can have up 30555cb9db07SMarcin Wojtas * to sgl_size + 2. one for the meta descriptor and one for header 30565cb9db07SMarcin Wojtas * (if the header is larger than tx_max_header_size). 30575cb9db07SMarcin Wojtas */ 30585cb9db07SMarcin Wojtas if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 30595cb9db07SMarcin Wojtas adapter->max_tx_sgl_size + 2))) { 30605cb9db07SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", 30615cb9db07SMarcin Wojtas tx_ring->que->id); 30625cb9db07SMarcin Wojtas 30635cb9db07SMarcin Wojtas tx_ring->running = false; 30645cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_stop, 1); 30655cb9db07SMarcin Wojtas 30665cb9db07SMarcin Wojtas /* There is a rare condition where this function decides to 30675cb9db07SMarcin Wojtas * stop the queue but meanwhile tx_cleanup() updates 30685cb9db07SMarcin Wojtas * next_to_completion and terminates. 30695cb9db07SMarcin Wojtas * The queue will remain stopped forever. 30705cb9db07SMarcin Wojtas * To solve this issue this function performs mb(), checks 30715cb9db07SMarcin Wojtas * the wakeup condition and wakes up the queue if needed. 30725cb9db07SMarcin Wojtas */ 30735cb9db07SMarcin Wojtas mb(); 30745cb9db07SMarcin Wojtas 30755cb9db07SMarcin Wojtas if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 30765cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH)) { 30775cb9db07SMarcin Wojtas tx_ring->running = true; 30785cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 30795cb9db07SMarcin Wojtas } 30805cb9db07SMarcin Wojtas } 30815cb9db07SMarcin Wojtas 30824fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 30834fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 30844fa9e02dSMarcin Wojtas BUS_DMASYNC_PREWRITE); 30854fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) 30864fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 30870bdffe59SMarcin Wojtas BUS_DMASYNC_PREWRITE); 30889b8d05b8SZbigniew Bodek 30899b8d05b8SZbigniew Bodek return (0); 30909b8d05b8SZbigniew Bodek 30919b8d05b8SZbigniew Bodek dma_error: 30929b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 30934fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 30944fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg); 30954fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 30964fa9e02dSMarcin Wojtas } 30974fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 30984fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 30994fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 31004fa9e02dSMarcin Wojtas } 31019b8d05b8SZbigniew Bodek 31029b8d05b8SZbigniew Bodek return (rc); 31039b8d05b8SZbigniew Bodek } 31049b8d05b8SZbigniew Bodek 31059b8d05b8SZbigniew Bodek static void 31069b8d05b8SZbigniew Bodek ena_start_xmit(struct ena_ring *tx_ring) 31079b8d05b8SZbigniew Bodek { 31089b8d05b8SZbigniew Bodek struct mbuf *mbuf; 31099b8d05b8SZbigniew Bodek struct ena_adapter *adapter = tx_ring->adapter; 31109b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 31119b8d05b8SZbigniew Bodek int ena_qid; 31129b8d05b8SZbigniew Bodek int ret = 0; 31139b8d05b8SZbigniew Bodek 31143f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 31159b8d05b8SZbigniew Bodek return; 31169b8d05b8SZbigniew Bodek 31173f9ed7abSMarcin Wojtas if (unlikely(!adapter->link_status)) 31189b8d05b8SZbigniew Bodek return; 31199b8d05b8SZbigniew Bodek 31209b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 31219b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 31229b8d05b8SZbigniew Bodek 31239b8d05b8SZbigniew Bodek while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { 31249b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" 31259b8d05b8SZbigniew Bodek " header csum flags %#jx", 31264e8acd84SMarcin Wojtas mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); 31279b8d05b8SZbigniew Bodek 31285cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) { 31295cb9db07SMarcin Wojtas drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31305cb9db07SMarcin Wojtas break; 31315cb9db07SMarcin Wojtas } 31329b8d05b8SZbigniew Bodek 31333f9ed7abSMarcin Wojtas if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { 31349b8d05b8SZbigniew Bodek if (ret == ENA_COM_NO_MEM) { 31359b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31369b8d05b8SZbigniew Bodek } else if (ret == ENA_COM_NO_SPACE) { 31379b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31389b8d05b8SZbigniew Bodek } else { 31399b8d05b8SZbigniew Bodek m_freem(mbuf); 31409b8d05b8SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 31419b8d05b8SZbigniew Bodek } 31429b8d05b8SZbigniew Bodek 31439b8d05b8SZbigniew Bodek break; 31449b8d05b8SZbigniew Bodek } 31459b8d05b8SZbigniew Bodek 3146b4b29032SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 3147b4b29032SZbigniew Bodek 31483f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & 31493f9ed7abSMarcin Wojtas IFF_DRV_RUNNING) == 0)) 31509b8d05b8SZbigniew Bodek return; 31519b8d05b8SZbigniew Bodek 3152af66d7d0SMarcin Wojtas tx_ring->acum_pkts++; 31539b8d05b8SZbigniew Bodek 31549b8d05b8SZbigniew Bodek BPF_MTAP(adapter->ifp, mbuf); 3155af66d7d0SMarcin Wojtas } 31569b8d05b8SZbigniew Bodek 3157af66d7d0SMarcin Wojtas if (likely(tx_ring->acum_pkts != 0)) { 31589b8d05b8SZbigniew Bodek wmb(); 31599b8d05b8SZbigniew Bodek /* Trigger the dma engine */ 31609b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(io_sq); 31619b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.doorbells, 1); 3162af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 31639b8d05b8SZbigniew Bodek } 31649b8d05b8SZbigniew Bodek 31655cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) 31665cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->que->cleanup_tq, 31675cb9db07SMarcin Wojtas &tx_ring->que->cleanup_task); 31689b8d05b8SZbigniew Bodek } 31699b8d05b8SZbigniew Bodek 31709b8d05b8SZbigniew Bodek static void 31719b8d05b8SZbigniew Bodek ena_deferred_mq_start(void *arg, int pending) 31729b8d05b8SZbigniew Bodek { 31739b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = (struct ena_ring *)arg; 31749b8d05b8SZbigniew Bodek struct ifnet *ifp = tx_ring->adapter->ifp; 31759b8d05b8SZbigniew Bodek 31760bdffe59SMarcin Wojtas while (!drbr_empty(ifp, tx_ring->br) && 31775cb9db07SMarcin Wojtas tx_ring->running && 31780bdffe59SMarcin Wojtas (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 31799b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 31809b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 31819b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 31829b8d05b8SZbigniew Bodek } 31839b8d05b8SZbigniew Bodek } 31849b8d05b8SZbigniew Bodek 31859b8d05b8SZbigniew Bodek static int 31869b8d05b8SZbigniew Bodek ena_mq_start(if_t ifp, struct mbuf *m) 31879b8d05b8SZbigniew Bodek { 31889b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 31899b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 31909b8d05b8SZbigniew Bodek int ret, is_drbr_empty; 31919b8d05b8SZbigniew Bodek uint32_t i; 31929b8d05b8SZbigniew Bodek 31933f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 31949b8d05b8SZbigniew Bodek return (ENODEV); 31959b8d05b8SZbigniew Bodek 31969b8d05b8SZbigniew Bodek /* Which queue to use */ 31979b8d05b8SZbigniew Bodek /* 31989b8d05b8SZbigniew Bodek * If everything is setup correctly, it should be the 31999b8d05b8SZbigniew Bodek * same bucket that the current CPU we're on is. 32009b8d05b8SZbigniew Bodek * It should improve performance. 32019b8d05b8SZbigniew Bodek */ 32029b8d05b8SZbigniew Bodek if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 32039b8d05b8SZbigniew Bodek i = m->m_pkthdr.flowid % adapter->num_queues; 32049b8d05b8SZbigniew Bodek } else { 32059b8d05b8SZbigniew Bodek i = curcpu % adapter->num_queues; 32069b8d05b8SZbigniew Bodek } 32079b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 32089b8d05b8SZbigniew Bodek 32099b8d05b8SZbigniew Bodek /* Check if drbr is empty before putting packet */ 32109b8d05b8SZbigniew Bodek is_drbr_empty = drbr_empty(ifp, tx_ring->br); 32119b8d05b8SZbigniew Bodek ret = drbr_enqueue(ifp, tx_ring->br, m); 32123f9ed7abSMarcin Wojtas if (unlikely(ret != 0)) { 32139b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32149b8d05b8SZbigniew Bodek return (ret); 32159b8d05b8SZbigniew Bodek } 32169b8d05b8SZbigniew Bodek 321767ec48bbSMarcin Wojtas if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { 32189b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 32199b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32209b8d05b8SZbigniew Bodek } else { 32219b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32229b8d05b8SZbigniew Bodek } 32239b8d05b8SZbigniew Bodek 32249b8d05b8SZbigniew Bodek return (0); 32259b8d05b8SZbigniew Bodek } 32269b8d05b8SZbigniew Bodek 32279b8d05b8SZbigniew Bodek static void 32289b8d05b8SZbigniew Bodek ena_qflush(if_t ifp) 32299b8d05b8SZbigniew Bodek { 32309b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 32319b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = adapter->tx_ring; 32329b8d05b8SZbigniew Bodek int i; 32339b8d05b8SZbigniew Bodek 32349b8d05b8SZbigniew Bodek for(i = 0; i < adapter->num_queues; ++i, ++tx_ring) 32350bdffe59SMarcin Wojtas if (!drbr_empty(ifp, tx_ring->br)) { 32369b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 32379b8d05b8SZbigniew Bodek drbr_flush(ifp, tx_ring->br); 32389b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32399b8d05b8SZbigniew Bodek } 32409b8d05b8SZbigniew Bodek 32419b8d05b8SZbigniew Bodek if_qflush(ifp); 32429b8d05b8SZbigniew Bodek } 32439b8d05b8SZbigniew Bodek 32440bdffe59SMarcin Wojtas static int 32450bdffe59SMarcin Wojtas ena_calc_io_queue_num(struct ena_adapter *adapter, 32469b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 32479b8d05b8SZbigniew Bodek { 32486064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 32494fa9e02dSMarcin Wojtas int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 32509b8d05b8SZbigniew Bodek 32516064f289SMarcin Wojtas /* Regular queues capabilities */ 32526064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 32536064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 32546064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 32554fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 32564fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 32576064f289SMarcin Wojtas 32584fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 32594fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 32606064f289SMarcin Wojtas } else { 32616064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 32626064f289SMarcin Wojtas &get_feat_ctx->max_queues; 32634fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 32644fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 32654fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 32666064f289SMarcin Wojtas } 32679b8d05b8SZbigniew Bodek 32684fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 32694fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 32704fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 32714fa9e02dSMarcin Wojtas 32729b8d05b8SZbigniew Bodek io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 32734fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_rx_num); 32744fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); 32754fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); 32769b8d05b8SZbigniew Bodek /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ 32779b8d05b8SZbigniew Bodek io_queue_num = min_t(int, io_queue_num, 32789b8d05b8SZbigniew Bodek pci_msix_count(adapter->pdev) - 1); 32799b8d05b8SZbigniew Bodek 32800bdffe59SMarcin Wojtas return (io_queue_num); 32819b8d05b8SZbigniew Bodek } 32829b8d05b8SZbigniew Bodek 32830bdffe59SMarcin Wojtas static int 32844fa9e02dSMarcin Wojtas ena_enable_wc(struct resource *res) 32854fa9e02dSMarcin Wojtas { 32864fa9e02dSMarcin Wojtas #if defined(__i386) || defined(__amd64) 32874fa9e02dSMarcin Wojtas vm_offset_t va; 32884fa9e02dSMarcin Wojtas vm_size_t len; 32894fa9e02dSMarcin Wojtas int rc; 32904fa9e02dSMarcin Wojtas 32914fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 32924fa9e02dSMarcin Wojtas len = rman_get_size(res); 32934fa9e02dSMarcin Wojtas /* Enable write combining */ 32944fa9e02dSMarcin Wojtas rc = pmap_change_attr(va, len, PAT_WRITE_COMBINING); 32954fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 32964fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); 32974fa9e02dSMarcin Wojtas return (rc); 32984fa9e02dSMarcin Wojtas } 32994fa9e02dSMarcin Wojtas 33004fa9e02dSMarcin Wojtas return (0); 33014fa9e02dSMarcin Wojtas #endif 33024fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 33034fa9e02dSMarcin Wojtas } 33044fa9e02dSMarcin Wojtas 33054fa9e02dSMarcin Wojtas static int 33064fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 33074fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 33084fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 33094fa9e02dSMarcin Wojtas { 33104fa9e02dSMarcin Wojtas struct ena_adapter *adapter = device_get_softc(pdev); 33114fa9e02dSMarcin Wojtas int rc, rid; 33124fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 33134fa9e02dSMarcin Wojtas 33144fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 33154fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 33164fa9e02dSMarcin Wojtas device_printf(pdev, 33174fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 33184fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33194fa9e02dSMarcin Wojtas return (0); 33204fa9e02dSMarcin Wojtas } 33214fa9e02dSMarcin Wojtas 33224fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 33234fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33244fa9e02dSMarcin Wojtas device_printf(pdev, "Failed to configure the device mode. " 33254fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33264fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33274fa9e02dSMarcin Wojtas return (0); 33284fa9e02dSMarcin Wojtas } 33294fa9e02dSMarcin Wojtas 33304fa9e02dSMarcin Wojtas /* Nothing to config, exit */ 33314fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 33324fa9e02dSMarcin Wojtas return (0); 33334fa9e02dSMarcin Wojtas 33344fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 33354fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 33364fa9e02dSMarcin Wojtas adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 33374fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 33384fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 33394fa9e02dSMarcin Wojtas device_printf(pdev, "unable to allocate LLQ bar resource. " 33404fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33414fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33424fa9e02dSMarcin Wojtas return (0); 33434fa9e02dSMarcin Wojtas } 33444fa9e02dSMarcin Wojtas 33454fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 33464fa9e02dSMarcin Wojtas rc = ena_enable_wc(adapter->memory); 33474fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33484fa9e02dSMarcin Wojtas device_printf(pdev, "failed to enable write combining.\n"); 33494fa9e02dSMarcin Wojtas return (rc); 33504fa9e02dSMarcin Wojtas } 33514fa9e02dSMarcin Wojtas 33524fa9e02dSMarcin Wojtas /* 33534fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 33544fa9e02dSMarcin Wojtas * for the ena_com layer. 33554fa9e02dSMarcin Wojtas */ 33564fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 33574fa9e02dSMarcin Wojtas 33584fa9e02dSMarcin Wojtas return (0); 33594fa9e02dSMarcin Wojtas } 33604fa9e02dSMarcin Wojtas 33614fa9e02dSMarcin Wojtas static inline 33624fa9e02dSMarcin Wojtas void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 33634fa9e02dSMarcin Wojtas { 33644fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 33654fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 33664fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 33674fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 33684fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 33694fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 33704fa9e02dSMarcin Wojtas } 33714fa9e02dSMarcin Wojtas 33724fa9e02dSMarcin Wojtas static int 33736064f289SMarcin Wojtas ena_calc_queue_size(struct ena_adapter *adapter, 33746064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *ctx) 33759b8d05b8SZbigniew Bodek { 33764fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 33774fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 33786064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 33796064f289SMarcin Wojtas uint32_t rx_queue_size = adapter->rx_ring_size; 33809b8d05b8SZbigniew Bodek 33814fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 33826064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 33836064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 33846064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 33856064f289SMarcin Wojtas max_queue_ext->max_rx_cq_depth); 33866064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 33876064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 33886064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33896064f289SMarcin Wojtas max_queue_ext->max_tx_cq_depth); 33904fa9e02dSMarcin Wojtas 33914fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 33924fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 33934fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33944fa9e02dSMarcin Wojtas llq->max_llq_depth); 33954fa9e02dSMarcin Wojtas else 33966064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 33976064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 33984fa9e02dSMarcin Wojtas 33996064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34006064f289SMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 34016064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34026064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 34036064f289SMarcin Wojtas } else { 34046064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 34056064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 34066064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34076064f289SMarcin Wojtas max_queues->max_cq_depth); 34086064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34096064f289SMarcin Wojtas max_queues->max_sq_depth); 34104fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34114fa9e02dSMarcin Wojtas max_queues->max_cq_depth); 34124fa9e02dSMarcin Wojtas 34134fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 34144fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 34154fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34164fa9e02dSMarcin Wojtas llq->max_llq_depth); 34174fa9e02dSMarcin Wojtas else 34184fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34194fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 34204fa9e02dSMarcin Wojtas 34216064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34226064f289SMarcin Wojtas max_queues->max_packet_tx_descs); 34236064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34246064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 34256064f289SMarcin Wojtas } 34269b8d05b8SZbigniew Bodek 34279b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 34286064f289SMarcin Wojtas rx_queue_size = 1 << (fls(rx_queue_size) - 1); 34296064f289SMarcin Wojtas tx_queue_size = 1 << (fls(tx_queue_size) - 1); 34306064f289SMarcin Wojtas 34316064f289SMarcin Wojtas if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 34326064f289SMarcin Wojtas device_printf(ctx->pdev, "Invalid queue size\n"); 34336064f289SMarcin Wojtas return (EFAULT); 34349b8d05b8SZbigniew Bodek } 34359b8d05b8SZbigniew Bodek 34366064f289SMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 34376064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 34386064f289SMarcin Wojtas 34396064f289SMarcin Wojtas return (0); 34409b8d05b8SZbigniew Bodek } 34419b8d05b8SZbigniew Bodek 34426064f289SMarcin Wojtas static int 34436064f289SMarcin Wojtas ena_handle_updated_queues(struct ena_adapter *adapter, 34446064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 34456064f289SMarcin Wojtas { 34466064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 34476064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 34486064f289SMarcin Wojtas device_t pdev = adapter->pdev; 34496064f289SMarcin Wojtas bool are_queues_changed = false; 34506064f289SMarcin Wojtas int io_queue_num, rc; 34519b8d05b8SZbigniew Bodek 34526064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 34536064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = get_feat_ctx; 34546064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 34556064f289SMarcin Wojtas 34566064f289SMarcin Wojtas io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); 34576064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 34586064f289SMarcin Wojtas if (unlikely(rc != 0 || io_queue_num <= 0)) 34596064f289SMarcin Wojtas return EFAULT; 34606064f289SMarcin Wojtas 34616064f289SMarcin Wojtas if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) 34626064f289SMarcin Wojtas are_queues_changed = true; 34636064f289SMarcin Wojtas 34646064f289SMarcin Wojtas if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || 34656064f289SMarcin Wojtas adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { 34666064f289SMarcin Wojtas device_printf(pdev, 34676064f289SMarcin Wojtas "Not enough resources to allocate requested queue sizes " 34686064f289SMarcin Wojtas "(TX,RX)=(%d,%d), falling back to queue sizes " 34696064f289SMarcin Wojtas "(TX,RX)=(%d,%d)\n", 34706064f289SMarcin Wojtas adapter->tx_ring_size, 34716064f289SMarcin Wojtas adapter->rx_ring_size, 34726064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size, 34736064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size); 34746064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 34756064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 34766064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 34776064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 34786064f289SMarcin Wojtas are_queues_changed = true; 34796064f289SMarcin Wojtas } 34806064f289SMarcin Wojtas 34816064f289SMarcin Wojtas if (unlikely(adapter->num_queues > io_queue_num)) { 34826064f289SMarcin Wojtas device_printf(pdev, 34836064f289SMarcin Wojtas "Not enough resources to allocate %d queues, " 34846064f289SMarcin Wojtas "falling back to %d queues\n", 34856064f289SMarcin Wojtas adapter->num_queues, io_queue_num); 34866064f289SMarcin Wojtas adapter->num_queues = io_queue_num; 34876064f289SMarcin Wojtas if (adapter->rss_support) { 34886064f289SMarcin Wojtas ena_com_rss_destroy(ena_dev); 34896064f289SMarcin Wojtas rc = ena_rss_init_default(adapter); 34906064f289SMarcin Wojtas if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { 34916064f289SMarcin Wojtas device_printf(pdev, "Cannot init RSS rc: %d\n", 34926064f289SMarcin Wojtas rc); 34936064f289SMarcin Wojtas return (rc); 34946064f289SMarcin Wojtas } 34956064f289SMarcin Wojtas } 34966064f289SMarcin Wojtas are_queues_changed = true; 34976064f289SMarcin Wojtas } 34986064f289SMarcin Wojtas 34996064f289SMarcin Wojtas if (unlikely(are_queues_changed)) { 35006064f289SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 35016064f289SMarcin Wojtas ena_init_io_rings(adapter); 35026064f289SMarcin Wojtas } 35036064f289SMarcin Wojtas 35046064f289SMarcin Wojtas return (0); 35059b8d05b8SZbigniew Bodek } 35069b8d05b8SZbigniew Bodek 35070bdffe59SMarcin Wojtas static int 35080bdffe59SMarcin Wojtas ena_rss_init_default(struct ena_adapter *adapter) 35099b8d05b8SZbigniew Bodek { 35109b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 35119b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 35129b8d05b8SZbigniew Bodek int qid, rc, i; 35139b8d05b8SZbigniew Bodek 35149b8d05b8SZbigniew Bodek rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 35150bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 35164e8acd84SMarcin Wojtas device_printf(dev, "Cannot init indirect table\n"); 35177d2544e6SMarcin Wojtas return (rc); 35189b8d05b8SZbigniew Bodek } 35199b8d05b8SZbigniew Bodek 35209b8d05b8SZbigniew Bodek for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 35219b8d05b8SZbigniew Bodek qid = i % adapter->num_queues; 35229b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_fill_entry(ena_dev, i, 35239b8d05b8SZbigniew Bodek ENA_IO_RXQ_IDX(qid)); 35240bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35259b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill indirect table\n"); 35267d2544e6SMarcin Wojtas goto err_rss_destroy; 35279b8d05b8SZbigniew Bodek } 35289b8d05b8SZbigniew Bodek } 35299b8d05b8SZbigniew Bodek 35309b8d05b8SZbigniew Bodek rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 35319b8d05b8SZbigniew Bodek ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 35320bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35339b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash function\n"); 35347d2544e6SMarcin Wojtas goto err_rss_destroy; 35359b8d05b8SZbigniew Bodek } 35369b8d05b8SZbigniew Bodek 35379b8d05b8SZbigniew Bodek rc = ena_com_set_default_hash_ctrl(ena_dev); 35380bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35399b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash control\n"); 35407d2544e6SMarcin Wojtas goto err_rss_destroy; 35419b8d05b8SZbigniew Bodek } 35429b8d05b8SZbigniew Bodek 35439b8d05b8SZbigniew Bodek return (0); 35449b8d05b8SZbigniew Bodek 35457d2544e6SMarcin Wojtas err_rss_destroy: 35469b8d05b8SZbigniew Bodek ena_com_rss_destroy(ena_dev); 35479b8d05b8SZbigniew Bodek return (rc); 35489b8d05b8SZbigniew Bodek } 35499b8d05b8SZbigniew Bodek 35509b8d05b8SZbigniew Bodek static void 35519b8d05b8SZbigniew Bodek ena_rss_init_default_deferred(void *arg) 35529b8d05b8SZbigniew Bodek { 35539b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 35549b8d05b8SZbigniew Bodek devclass_t dc; 35559b8d05b8SZbigniew Bodek int max; 35569b8d05b8SZbigniew Bodek int rc; 35579b8d05b8SZbigniew Bodek 35589b8d05b8SZbigniew Bodek dc = devclass_find("ena"); 35593f9ed7abSMarcin Wojtas if (unlikely(dc == NULL)) { 35604e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "No devclass ena\n"); 35619b8d05b8SZbigniew Bodek return; 35629b8d05b8SZbigniew Bodek } 35639b8d05b8SZbigniew Bodek 35649b8d05b8SZbigniew Bodek max = devclass_get_maxunit(dc); 35659b8d05b8SZbigniew Bodek while (max-- >= 0) { 35669b8d05b8SZbigniew Bodek adapter = devclass_get_softc(dc, max); 35679b8d05b8SZbigniew Bodek if (adapter != NULL) { 35689b8d05b8SZbigniew Bodek rc = ena_rss_init_default(adapter); 35699b8d05b8SZbigniew Bodek adapter->rss_support = true; 35703f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 35719b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 35729b8d05b8SZbigniew Bodek "WARNING: RSS was not properly initialized," 35730bdffe59SMarcin Wojtas " it will affect bandwidth\n"); 35749b8d05b8SZbigniew Bodek adapter->rss_support = false; 35759b8d05b8SZbigniew Bodek } 35769b8d05b8SZbigniew Bodek } 35779b8d05b8SZbigniew Bodek } 35789b8d05b8SZbigniew Bodek } 35799b8d05b8SZbigniew Bodek SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); 35809b8d05b8SZbigniew Bodek 35810bdffe59SMarcin Wojtas static void 3582*46021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 35839b8d05b8SZbigniew Bodek { 35849b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 3585*46021271SMarcin Wojtas uintptr_t rid; 35869b8d05b8SZbigniew Bodek int rc; 35879b8d05b8SZbigniew Bodek 35889b8d05b8SZbigniew Bodek /* Allocate only the host info */ 35899b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 35903f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 35919b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot allocate host info\n"); 35929b8d05b8SZbigniew Bodek return; 35939b8d05b8SZbigniew Bodek } 35949b8d05b8SZbigniew Bodek 35959b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 35969b8d05b8SZbigniew Bodek 3597*46021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 3598*46021271SMarcin Wojtas host_info->bdf = rid; 35999b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 36009b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 36019b8d05b8SZbigniew Bodek 36029b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 36039b8d05b8SZbigniew Bodek host_info->os_dist = 0; 36049b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 36059b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 36069b8d05b8SZbigniew Bodek 36079b8d05b8SZbigniew Bodek host_info->driver_version = 36089b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MAJOR) | 36099b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 36109b8d05b8SZbigniew Bodek (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 36118ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 36129b8d05b8SZbigniew Bodek 36139b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 36143f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3615a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 36169b8d05b8SZbigniew Bodek ena_trace(ENA_WARNING, "Cannot set host attributes\n"); 36179b8d05b8SZbigniew Bodek else 36189b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot set host attributes\n"); 36199b8d05b8SZbigniew Bodek 36209b8d05b8SZbigniew Bodek goto err; 36219b8d05b8SZbigniew Bodek } 36229b8d05b8SZbigniew Bodek 36239b8d05b8SZbigniew Bodek return; 36249b8d05b8SZbigniew Bodek 36259b8d05b8SZbigniew Bodek err: 36269b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 36279b8d05b8SZbigniew Bodek } 36289b8d05b8SZbigniew Bodek 36299b8d05b8SZbigniew Bodek static int 36309b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 36319b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 36329b8d05b8SZbigniew Bodek { 36339b8d05b8SZbigniew Bodek struct ena_com_dev* ena_dev = adapter->ena_dev; 36349b8d05b8SZbigniew Bodek bool readless_supported; 36359b8d05b8SZbigniew Bodek uint32_t aenq_groups; 36369b8d05b8SZbigniew Bodek int dma_width; 36379b8d05b8SZbigniew Bodek int rc; 36389b8d05b8SZbigniew Bodek 36399b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 36403f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36419b8d05b8SZbigniew Bodek device_printf(pdev, "failed to init mmio read less\n"); 36420bdffe59SMarcin Wojtas return (rc); 36439b8d05b8SZbigniew Bodek } 36449b8d05b8SZbigniew Bodek 36459b8d05b8SZbigniew Bodek /* 36469b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 36479b8d05b8SZbigniew Bodek * read is disabled 36489b8d05b8SZbigniew Bodek */ 36499b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 36509b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 36519b8d05b8SZbigniew Bodek 3652a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 36533f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36549b8d05b8SZbigniew Bodek device_printf(pdev, "Can not reset device\n"); 36559b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36569b8d05b8SZbigniew Bodek } 36579b8d05b8SZbigniew Bodek 36589b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 36593f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36609b8d05b8SZbigniew Bodek device_printf(pdev, "device version is too low\n"); 36619b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36629b8d05b8SZbigniew Bodek } 36639b8d05b8SZbigniew Bodek 36649b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 36653f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 36669b8d05b8SZbigniew Bodek device_printf(pdev, "Invalid dma width value %d", dma_width); 36679b8d05b8SZbigniew Bodek rc = dma_width; 36689b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36699b8d05b8SZbigniew Bodek } 36709b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 36719b8d05b8SZbigniew Bodek 36729b8d05b8SZbigniew Bodek /* ENA admin level init */ 367367ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 36743f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36759b8d05b8SZbigniew Bodek device_printf(pdev, 36769b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 36779b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36789b8d05b8SZbigniew Bodek } 36799b8d05b8SZbigniew Bodek 36809b8d05b8SZbigniew Bodek /* 36819b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 36829b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 36839b8d05b8SZbigniew Bodek * information 36849b8d05b8SZbigniew Bodek */ 36859b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 36869b8d05b8SZbigniew Bodek 3687*46021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 36889b8d05b8SZbigniew Bodek 36899b8d05b8SZbigniew Bodek /* Get Device Attributes */ 36909b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 36913f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36929b8d05b8SZbigniew Bodek device_printf(pdev, 36939b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 36949b8d05b8SZbigniew Bodek goto err_admin_init; 36959b8d05b8SZbigniew Bodek } 36969b8d05b8SZbigniew Bodek 3697e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 3698e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 3699e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 370040621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 3701e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_KEEP_ALIVE); 37029b8d05b8SZbigniew Bodek 37039b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 37049b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 37053f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37069b8d05b8SZbigniew Bodek device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); 37079b8d05b8SZbigniew Bodek goto err_admin_init; 37089b8d05b8SZbigniew Bodek } 37099b8d05b8SZbigniew Bodek 37109b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 37119b8d05b8SZbigniew Bodek 37120bdffe59SMarcin Wojtas return (0); 37139b8d05b8SZbigniew Bodek 37149b8d05b8SZbigniew Bodek err_admin_init: 37159b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 37169b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 37179b8d05b8SZbigniew Bodek err_mmio_read_less: 37189b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 37199b8d05b8SZbigniew Bodek 37200bdffe59SMarcin Wojtas return (rc); 37219b8d05b8SZbigniew Bodek } 37229b8d05b8SZbigniew Bodek 37239b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 37249b8d05b8SZbigniew Bodek int io_vectors) 37259b8d05b8SZbigniew Bodek { 37269b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 37279b8d05b8SZbigniew Bodek int rc; 37289b8d05b8SZbigniew Bodek 37299b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 37303f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37319b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Error with MSI-X enablement\n"); 37320bdffe59SMarcin Wojtas return (rc); 37339b8d05b8SZbigniew Bodek } 37349b8d05b8SZbigniew Bodek 37359b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 37369b8d05b8SZbigniew Bodek 37379b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 37383f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37399b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); 37409b8d05b8SZbigniew Bodek goto err_disable_msix; 37419b8d05b8SZbigniew Bodek } 37429b8d05b8SZbigniew Bodek 37439b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 37449b8d05b8SZbigniew Bodek 37459b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 37469b8d05b8SZbigniew Bodek 37470bdffe59SMarcin Wojtas return (0); 37489b8d05b8SZbigniew Bodek 37499b8d05b8SZbigniew Bodek err_disable_msix: 37509b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 37519b8d05b8SZbigniew Bodek 37520bdffe59SMarcin Wojtas return (rc); 37539b8d05b8SZbigniew Bodek } 37549b8d05b8SZbigniew Bodek 37559b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 37569b8d05b8SZbigniew Bodek static void ena_keep_alive_wd(void *adapter_data, 37579b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 37589b8d05b8SZbigniew Bodek { 37599b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 376030217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 37619b8d05b8SZbigniew Bodek sbintime_t stime; 376230217e2dSMarcin Wojtas uint64_t rx_drops; 376330217e2dSMarcin Wojtas 376430217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 376530217e2dSMarcin Wojtas 376630217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 376730217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 376830217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 37699b8d05b8SZbigniew Bodek 37709b8d05b8SZbigniew Bodek stime = getsbinuptime(); 37719b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 37729b8d05b8SZbigniew Bodek } 37739b8d05b8SZbigniew Bodek 37749b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 37759b8d05b8SZbigniew Bodek static void check_for_missing_keep_alive(struct ena_adapter *adapter) 37769b8d05b8SZbigniew Bodek { 37779b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 37789b8d05b8SZbigniew Bodek 37799b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 37809b8d05b8SZbigniew Bodek return; 37819b8d05b8SZbigniew Bodek 378240621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 37839b8d05b8SZbigniew Bodek return; 37849b8d05b8SZbigniew Bodek 37859b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 37869b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 37879b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 37889b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 37899b8d05b8SZbigniew Bodek "Keep alive watchdog timeout.\n"); 37909b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.wd_expired, 1); 3791a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 37929b8d05b8SZbigniew Bodek adapter->trigger_reset = true; 37939b8d05b8SZbigniew Bodek } 37949b8d05b8SZbigniew Bodek } 37959b8d05b8SZbigniew Bodek 37969b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 37979b8d05b8SZbigniew Bodek static void check_for_admin_com_state(struct ena_adapter *adapter) 37989b8d05b8SZbigniew Bodek { 37990bdffe59SMarcin Wojtas if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 38000bdffe59SMarcin Wojtas false)) { 38019b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 38029b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 38039b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 3804a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 38059b8d05b8SZbigniew Bodek adapter->trigger_reset = true; 38069b8d05b8SZbigniew Bodek } 38079b8d05b8SZbigniew Bodek } 38089b8d05b8SZbigniew Bodek 380974dba3adSMarcin Wojtas static int 3810d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3811d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 3812d12f7bfcSMarcin Wojtas { 3813d12f7bfcSMarcin Wojtas if (likely(rx_ring->first_interrupt)) 3814d12f7bfcSMarcin Wojtas return (0); 3815d12f7bfcSMarcin Wojtas 3816d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3817d12f7bfcSMarcin Wojtas return (0); 3818d12f7bfcSMarcin Wojtas 3819d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 3820d12f7bfcSMarcin Wojtas 3821d12f7bfcSMarcin Wojtas if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 3822d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, "Potential MSIX issue on Rx side " 3823d12f7bfcSMarcin Wojtas "Queue = %d. Reset the device\n", rx_ring->qid); 3824d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3825d12f7bfcSMarcin Wojtas adapter->trigger_reset = true; 3826d12f7bfcSMarcin Wojtas return (EIO); 3827d12f7bfcSMarcin Wojtas } 3828d12f7bfcSMarcin Wojtas 3829d12f7bfcSMarcin Wojtas return (0); 3830d12f7bfcSMarcin Wojtas } 3831d12f7bfcSMarcin Wojtas 3832d12f7bfcSMarcin Wojtas static int 3833d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 383474dba3adSMarcin Wojtas struct ena_ring *tx_ring) 383574dba3adSMarcin Wojtas { 383674dba3adSMarcin Wojtas struct bintime curtime, time; 383774dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3838d12f7bfcSMarcin Wojtas sbintime_t time_offset; 383974dba3adSMarcin Wojtas uint32_t missed_tx = 0; 3840d12f7bfcSMarcin Wojtas int i, rc = 0; 384174dba3adSMarcin Wojtas 384274dba3adSMarcin Wojtas getbinuptime(&curtime); 384374dba3adSMarcin Wojtas 384474dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 384574dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 384674dba3adSMarcin Wojtas 38470bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 384874dba3adSMarcin Wojtas continue; 384974dba3adSMarcin Wojtas 385074dba3adSMarcin Wojtas time = curtime; 385174dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3852d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3853d12f7bfcSMarcin Wojtas 3854d12f7bfcSMarcin Wojtas if (unlikely(!tx_ring->first_interrupt && 3855d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3856d12f7bfcSMarcin Wojtas /* 3857d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3858d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3859d12f7bfcSMarcin Wojtas */ 3860d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, 3861d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 3862d12f7bfcSMarcin Wojtas "Reset the device\n", tx_ring->qid); 3863d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3864d12f7bfcSMarcin Wojtas adapter->trigger_reset = true; 3865d12f7bfcSMarcin Wojtas return (EIO); 3866d12f7bfcSMarcin Wojtas } 386774dba3adSMarcin Wojtas 386874dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3869d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 387074dba3adSMarcin Wojtas 387174dba3adSMarcin Wojtas if (!tx_buf->print_once) 387274dba3adSMarcin Wojtas ena_trace(ENA_WARNING, "Found a Tx that wasn't " 387374dba3adSMarcin Wojtas "completed on time, qid %d, index %d.\n", 387474dba3adSMarcin Wojtas tx_ring->qid, i); 387574dba3adSMarcin Wojtas 387674dba3adSMarcin Wojtas tx_buf->print_once = true; 387774dba3adSMarcin Wojtas missed_tx++; 3878d12f7bfcSMarcin Wojtas } 3879d12f7bfcSMarcin Wojtas } 388074dba3adSMarcin Wojtas 3881d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 388274dba3adSMarcin Wojtas device_printf(adapter->pdev, 3883d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3884d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 38854e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 3886d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 388774dba3adSMarcin Wojtas adapter->trigger_reset = true; 3888d12f7bfcSMarcin Wojtas rc = EIO; 388974dba3adSMarcin Wojtas } 389074dba3adSMarcin Wojtas 3891d12f7bfcSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 3892d12f7bfcSMarcin Wojtas 3893d12f7bfcSMarcin Wojtas return (rc); 389474dba3adSMarcin Wojtas } 389574dba3adSMarcin Wojtas 38969b8d05b8SZbigniew Bodek /* 38979b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 38989b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 38999b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 39009b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 39019b8d05b8SZbigniew Bodek */ 39020bdffe59SMarcin Wojtas static void 3903d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 39049b8d05b8SZbigniew Bodek { 39059b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3906d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 390774dba3adSMarcin Wojtas int i, budget, rc; 39089b8d05b8SZbigniew Bodek 39099b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 39109b8d05b8SZbigniew Bodek rmb(); 39119b8d05b8SZbigniew Bodek 39129b8d05b8SZbigniew Bodek if (!adapter->up) 39139b8d05b8SZbigniew Bodek return; 39149b8d05b8SZbigniew Bodek 39159b8d05b8SZbigniew Bodek if (adapter->trigger_reset) 39169b8d05b8SZbigniew Bodek return; 39179b8d05b8SZbigniew Bodek 391840621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 39199b8d05b8SZbigniew Bodek return; 39209b8d05b8SZbigniew Bodek 39219b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 39229b8d05b8SZbigniew Bodek 39239b8d05b8SZbigniew Bodek for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { 39249b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3925d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 39269b8d05b8SZbigniew Bodek 3927d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3928d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3929d12f7bfcSMarcin Wojtas return; 3930d12f7bfcSMarcin Wojtas 3931d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 39320bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 39339b8d05b8SZbigniew Bodek return; 39349b8d05b8SZbigniew Bodek 39359b8d05b8SZbigniew Bodek budget--; 3936cd5d5804SMarcin Wojtas if (budget == 0) { 39379b8d05b8SZbigniew Bodek i++; 39389b8d05b8SZbigniew Bodek break; 39399b8d05b8SZbigniew Bodek } 39409b8d05b8SZbigniew Bodek } 39419b8d05b8SZbigniew Bodek 39429b8d05b8SZbigniew Bodek adapter->next_monitored_tx_qid = i % adapter->num_queues; 39439b8d05b8SZbigniew Bodek } 39449b8d05b8SZbigniew Bodek 39455cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3946efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3947efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3948efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3949efe6ab18SMarcin Wojtas * for example). 3950efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3951efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3952efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3953efe6ab18SMarcin Wojtas * able to send new packets. 3954efe6ab18SMarcin Wojtas * 3955efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3956efe6ab18SMarcin Wojtas */ 3957efe6ab18SMarcin Wojtas static void 3958efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3959efe6ab18SMarcin Wojtas { 3960efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3961efe6ab18SMarcin Wojtas int i, refill_required; 3962efe6ab18SMarcin Wojtas 3963efe6ab18SMarcin Wojtas if (!adapter->up) 3964efe6ab18SMarcin Wojtas return; 3965efe6ab18SMarcin Wojtas 3966efe6ab18SMarcin Wojtas if (adapter->trigger_reset) 3967efe6ab18SMarcin Wojtas return; 3968efe6ab18SMarcin Wojtas 3969efe6ab18SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 3970efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3971efe6ab18SMarcin Wojtas 3972efe6ab18SMarcin Wojtas refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); 3973efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3974efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3975efe6ab18SMarcin Wojtas 3976efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3977efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3978efe6ab18SMarcin Wojtas 1); 3979efe6ab18SMarcin Wojtas 3980efe6ab18SMarcin Wojtas device_printf(adapter->pdev, 3981efe6ab18SMarcin Wojtas "trigger refill for ring %d\n", i); 3982efe6ab18SMarcin Wojtas 39835cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 39845cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3985efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3986efe6ab18SMarcin Wojtas } 3987efe6ab18SMarcin Wojtas } else { 3988efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3989efe6ab18SMarcin Wojtas } 3990efe6ab18SMarcin Wojtas } 3991efe6ab18SMarcin Wojtas } 39929b8d05b8SZbigniew Bodek 399340621d71SMarcin Wojtas static void ena_update_hints(struct ena_adapter *adapter, 399440621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 399540621d71SMarcin Wojtas { 399640621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 399740621d71SMarcin Wojtas 399840621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 399940621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 400040621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 400140621d71SMarcin Wojtas 400240621d71SMarcin Wojtas if (hints->mmio_read_timeout) 400340621d71SMarcin Wojtas /* convert to usec */ 400440621d71SMarcin Wojtas ena_dev->mmio_read.reg_read_to = 400540621d71SMarcin Wojtas hints->mmio_read_timeout * 1000; 400640621d71SMarcin Wojtas 400740621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 400840621d71SMarcin Wojtas adapter->missing_tx_threshold = 400940621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 401040621d71SMarcin Wojtas 401140621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 401240621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 401340621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 401440621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 401540621d71SMarcin Wojtas else 401640621d71SMarcin Wojtas adapter->missing_tx_timeout = 401740621d71SMarcin Wojtas SBT_1MS * hints->missing_tx_completion_timeout; 401840621d71SMarcin Wojtas } 401940621d71SMarcin Wojtas 402040621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 402140621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 402240621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 402340621d71SMarcin Wojtas else 402440621d71SMarcin Wojtas adapter->keep_alive_timeout = 402540621d71SMarcin Wojtas SBT_1MS * hints->driver_watchdog_timeout; 402640621d71SMarcin Wojtas } 402740621d71SMarcin Wojtas } 402840621d71SMarcin Wojtas 40299b8d05b8SZbigniew Bodek static void 40309b8d05b8SZbigniew Bodek ena_timer_service(void *data) 40319b8d05b8SZbigniew Bodek { 40329b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 40339b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 40349b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 40359b8d05b8SZbigniew Bodek 40369b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 40379b8d05b8SZbigniew Bodek 40389b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 40399b8d05b8SZbigniew Bodek 4040d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 40419b8d05b8SZbigniew Bodek 4042efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 4043efe6ab18SMarcin Wojtas 40440bdffe59SMarcin Wojtas if (host_info != NULL) 40459b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 40469b8d05b8SZbigniew Bodek 40479b8d05b8SZbigniew Bodek if (unlikely(adapter->trigger_reset)) { 40489b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Trigger reset is on\n"); 40499b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 40509b8d05b8SZbigniew Bodek return; 40519b8d05b8SZbigniew Bodek } 40529b8d05b8SZbigniew Bodek 40539b8d05b8SZbigniew Bodek /* 40549b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 40559b8d05b8SZbigniew Bodek */ 40569b8d05b8SZbigniew Bodek callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); 40579b8d05b8SZbigniew Bodek } 40589b8d05b8SZbigniew Bodek 40599b8d05b8SZbigniew Bodek static void 40609b8d05b8SZbigniew Bodek ena_reset_task(void *arg, int pending) 40619b8d05b8SZbigniew Bodek { 40629b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 40639b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 40649b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 40659b8d05b8SZbigniew Bodek bool dev_up; 40669b8d05b8SZbigniew Bodek int rc; 40679b8d05b8SZbigniew Bodek 40689b8d05b8SZbigniew Bodek if (unlikely(!adapter->trigger_reset)) { 40699b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 40709b8d05b8SZbigniew Bodek "device reset scheduled but trigger_reset is off\n"); 40719b8d05b8SZbigniew Bodek return; 40729b8d05b8SZbigniew Bodek } 40739b8d05b8SZbigniew Bodek 40749b8d05b8SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 40759b8d05b8SZbigniew Bodek 40769b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 40779b8d05b8SZbigniew Bodek 40789b8d05b8SZbigniew Bodek dev_up = adapter->up; 40799b8d05b8SZbigniew Bodek 40809b8d05b8SZbigniew Bodek ena_com_set_admin_running_state(ena_dev, false); 40819b8d05b8SZbigniew Bodek ena_down(adapter); 4082a195fab0SMarcin Wojtas ena_free_mgmnt_irq(adapter); 40839b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 40849b8d05b8SZbigniew Bodek ena_com_abort_admin_commands(ena_dev); 40859b8d05b8SZbigniew Bodek ena_com_wait_for_abort_completion(ena_dev); 40869b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 40879b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 40889b8d05b8SZbigniew Bodek 4089a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 40909b8d05b8SZbigniew Bodek adapter->trigger_reset = false; 40919b8d05b8SZbigniew Bodek 40929b8d05b8SZbigniew Bodek /* Finished destroy part. Restart the device */ 40939b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, 40949b8d05b8SZbigniew Bodek &adapter->wd_active); 40953f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 40969b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 40979b8d05b8SZbigniew Bodek "ENA device init failed! (err: %d)\n", rc); 40989b8d05b8SZbigniew Bodek goto err_dev_free; 40999b8d05b8SZbigniew Bodek } 41009b8d05b8SZbigniew Bodek 41016064f289SMarcin Wojtas rc = ena_handle_updated_queues(adapter, &get_feat_ctx); 41026064f289SMarcin Wojtas if (unlikely(rc != 0)) 41036064f289SMarcin Wojtas goto err_dev_free; 41046064f289SMarcin Wojtas 41059b8d05b8SZbigniew Bodek rc = ena_enable_msix_and_set_admin_interrupts(adapter, 41069b8d05b8SZbigniew Bodek adapter->num_queues); 41073f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 41089b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Enable MSI-X failed\n"); 41099b8d05b8SZbigniew Bodek goto err_com_free; 41109b8d05b8SZbigniew Bodek } 41119b8d05b8SZbigniew Bodek 41129b8d05b8SZbigniew Bodek /* If the interface was up before the reset bring it up */ 41139b8d05b8SZbigniew Bodek if (dev_up) { 41149b8d05b8SZbigniew Bodek rc = ena_up(adapter); 41153f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 41169b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 41179b8d05b8SZbigniew Bodek "Failed to create I/O queues\n"); 41189b8d05b8SZbigniew Bodek goto err_msix_free; 41199b8d05b8SZbigniew Bodek } 41209b8d05b8SZbigniew Bodek } 41219b8d05b8SZbigniew Bodek 41229b8d05b8SZbigniew Bodek callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 41239b8d05b8SZbigniew Bodek ena_timer_service, (void *)adapter, 0); 41249b8d05b8SZbigniew Bodek 41259b8d05b8SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 41269b8d05b8SZbigniew Bodek 41279b8d05b8SZbigniew Bodek return; 41289b8d05b8SZbigniew Bodek 41299b8d05b8SZbigniew Bodek err_msix_free: 41309b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 41319b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 41329b8d05b8SZbigniew Bodek err_com_free: 41339b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 41349b8d05b8SZbigniew Bodek err_dev_free: 41359b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "ENA reset failed!\n"); 41369b8d05b8SZbigniew Bodek adapter->running = false; 41379b8d05b8SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 41389b8d05b8SZbigniew Bodek } 41399b8d05b8SZbigniew Bodek 41409b8d05b8SZbigniew Bodek /** 41419b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 41429b8d05b8SZbigniew Bodek * @pdev: device information struct 41439b8d05b8SZbigniew Bodek * 41449b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 41459b8d05b8SZbigniew Bodek * 41469b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 41479b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 41489b8d05b8SZbigniew Bodek * and a hardware reset occur. 41499b8d05b8SZbigniew Bodek **/ 41509b8d05b8SZbigniew Bodek static int 41519b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 41529b8d05b8SZbigniew Bodek { 41539b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 41544fa9e02dSMarcin Wojtas struct ena_llq_configurations llq_config; 41556064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 41569b8d05b8SZbigniew Bodek static int version_printed; 41579b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 41589b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 41594fa9e02dSMarcin Wojtas const char *queue_type_str; 41609b8d05b8SZbigniew Bodek int io_queue_num; 41614fa9e02dSMarcin Wojtas int rid, rc; 41624fa9e02dSMarcin Wojtas 41639b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 41649b8d05b8SZbigniew Bodek adapter->pdev = pdev; 41659b8d05b8SZbigniew Bodek 41669b8d05b8SZbigniew Bodek mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); 41679b8d05b8SZbigniew Bodek sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); 41689b8d05b8SZbigniew Bodek 41699b8d05b8SZbigniew Bodek /* Set up the timer service */ 41709b8d05b8SZbigniew Bodek callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); 41719b8d05b8SZbigniew Bodek adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; 41729b8d05b8SZbigniew Bodek adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; 41739b8d05b8SZbigniew Bodek adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; 41749b8d05b8SZbigniew Bodek adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; 41759b8d05b8SZbigniew Bodek 41769b8d05b8SZbigniew Bodek if (version_printed++ == 0) 41779b8d05b8SZbigniew Bodek device_printf(pdev, "%s\n", ena_version); 41789b8d05b8SZbigniew Bodek 41799b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 4180cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 4181cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 41829b8d05b8SZbigniew Bodek 41839b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 41849b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 41854fa9e02dSMarcin Wojtas 41864fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 41874fa9e02dSMarcin Wojtas adapter->memory = NULL; 41884fa9e02dSMarcin Wojtas adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 41894fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 41904fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 41914fa9e02dSMarcin Wojtas device_printf(pdev, 41924fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 41934fa9e02dSMarcin Wojtas rc = ENOMEM; 41944fa9e02dSMarcin Wojtas goto err_dev_free; 41954fa9e02dSMarcin Wojtas } 41964fa9e02dSMarcin Wojtas 41979b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 41989b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 41999b8d05b8SZbigniew Bodek 42009b8d05b8SZbigniew Bodek /* Store register resources */ 42019b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 42029b8d05b8SZbigniew Bodek rman_get_bustag(adapter->registers); 42039b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 42049b8d05b8SZbigniew Bodek rman_get_bushandle(adapter->registers); 42059b8d05b8SZbigniew Bodek 42063f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { 42079b8d05b8SZbigniew Bodek device_printf(pdev, "failed to pmap registers bar\n"); 42089b8d05b8SZbigniew Bodek rc = ENXIO; 4209cd5d5804SMarcin Wojtas goto err_bus_free; 42109b8d05b8SZbigniew Bodek } 42119b8d05b8SZbigniew Bodek 42129b8d05b8SZbigniew Bodek ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 42139b8d05b8SZbigniew Bodek 42149b8d05b8SZbigniew Bodek /* Device initialization */ 42159b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 42163f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 42179b8d05b8SZbigniew Bodek device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); 42189b8d05b8SZbigniew Bodek rc = ENXIO; 42199b8d05b8SZbigniew Bodek goto err_bus_free; 42209b8d05b8SZbigniew Bodek } 42219b8d05b8SZbigniew Bodek 42224fa9e02dSMarcin Wojtas set_default_llq_configurations(&llq_config); 42234fa9e02dSMarcin Wojtas 42244fa9e02dSMarcin Wojtas #if defined(__arm__) || defined(__aarch64__) 42254fa9e02dSMarcin Wojtas /* 42264fa9e02dSMarcin Wojtas * Force LLQ disable, as the driver is not supporting WC enablement 42274fa9e02dSMarcin Wojtas * on the ARM architecture. Using LLQ without WC would affect 42284fa9e02dSMarcin Wojtas * performance in a negative way. 42294fa9e02dSMarcin Wojtas */ 42304fa9e02dSMarcin Wojtas ena_dev->supported_features &= ~(1 << ENA_ADMIN_LLQ); 42314fa9e02dSMarcin Wojtas #endif 42324fa9e02dSMarcin Wojtas rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 42334fa9e02dSMarcin Wojtas &llq_config); 42344fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 42354fa9e02dSMarcin Wojtas device_printf(pdev, "failed to set placement policy\n"); 42364fa9e02dSMarcin Wojtas goto err_com_free; 42374fa9e02dSMarcin Wojtas } 42384fa9e02dSMarcin Wojtas 42394fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 42404fa9e02dSMarcin Wojtas queue_type_str = "Regular"; 42414fa9e02dSMarcin Wojtas else 42424fa9e02dSMarcin Wojtas queue_type_str = "Low Latency"; 42434fa9e02dSMarcin Wojtas device_printf(pdev, "Placement policy: %s\n", queue_type_str); 42444fa9e02dSMarcin Wojtas 42459b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 42469b8d05b8SZbigniew Bodek 42479b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 42489b8d05b8SZbigniew Bodek 42499b8d05b8SZbigniew Bodek /* Set for sure that interface is not up */ 42509b8d05b8SZbigniew Bodek adapter->up = false; 42519b8d05b8SZbigniew Bodek 42529b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 42539b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 42549b8d05b8SZbigniew Bodek 42556064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 42566064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 42576064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 42586064f289SMarcin Wojtas 42599b8d05b8SZbigniew Bodek /* calculate IO queue number to create */ 42609b8d05b8SZbigniew Bodek io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); 42619b8d05b8SZbigniew Bodek 42629b8d05b8SZbigniew Bodek ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", 42639b8d05b8SZbigniew Bodek io_queue_num); 42649b8d05b8SZbigniew Bodek adapter->num_queues = io_queue_num; 42659b8d05b8SZbigniew Bodek 42663cfadb28SMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 42676064f289SMarcin Wojtas // Set the requested Rx ring size 42686064f289SMarcin Wojtas adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; 42699b8d05b8SZbigniew Bodek /* calculatre ring sizes */ 42706064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 42716064f289SMarcin Wojtas if (unlikely((rc != 0) || (io_queue_num <= 0))) { 42726064f289SMarcin Wojtas rc = EFAULT; 42739b8d05b8SZbigniew Bodek goto err_com_free; 42749b8d05b8SZbigniew Bodek } 42759b8d05b8SZbigniew Bodek 4276a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 4277a195fab0SMarcin Wojtas 42786064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 42796064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 42809b8d05b8SZbigniew Bodek 42816064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 42826064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 42836064f289SMarcin Wojtas 42846064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 42859b8d05b8SZbigniew Bodek 42869b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 42879b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 42884e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 42894e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create TX DMA tag\n"); 4290cd5d5804SMarcin Wojtas goto err_com_free; 42914e8acd84SMarcin Wojtas } 42929b8d05b8SZbigniew Bodek 42939b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 42944e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 42954e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create RX DMA tag\n"); 4296cd5d5804SMarcin Wojtas goto err_tx_tag_free; 42974e8acd84SMarcin Wojtas } 42989b8d05b8SZbigniew Bodek 42999b8d05b8SZbigniew Bodek /* initialize rings basic information */ 43006064f289SMarcin Wojtas device_printf(pdev, 43016064f289SMarcin Wojtas "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", 43026064f289SMarcin Wojtas io_queue_num, 43036064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size, 43046064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size); 4305cd5d5804SMarcin Wojtas ena_init_io_rings(adapter); 43069b8d05b8SZbigniew Bodek 43079b8d05b8SZbigniew Bodek rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 43083f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 43099b8d05b8SZbigniew Bodek device_printf(pdev, 43109b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 4311c115a1e2SMarcin Wojtas goto err_io_free; 4312c115a1e2SMarcin Wojtas } 4313c115a1e2SMarcin Wojtas 4314c115a1e2SMarcin Wojtas /* setup network interface */ 4315c115a1e2SMarcin Wojtas rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 4316c115a1e2SMarcin Wojtas if (unlikely(rc != 0)) { 4317c115a1e2SMarcin Wojtas device_printf(pdev, "Error with network interface setup\n"); 4318c115a1e2SMarcin Wojtas goto err_msix_free; 43199b8d05b8SZbigniew Bodek } 43209b8d05b8SZbigniew Bodek 4321081169f2SZbigniew Bodek /* Initialize reset task queue */ 4322081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 4323081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 4324081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 4325081169f2SZbigniew Bodek taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 4326081169f2SZbigniew Bodek "%s rstq", device_get_nameunit(adapter->pdev)); 4327081169f2SZbigniew Bodek 43289b8d05b8SZbigniew Bodek /* Initialize statistics */ 43299b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 43309b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 433130217e2dSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 433230217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 43339b8d05b8SZbigniew Bodek ena_sysctl_add_nodes(adapter); 43349b8d05b8SZbigniew Bodek 43359b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 43369b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 43379b8d05b8SZbigniew Bodek 43389b8d05b8SZbigniew Bodek adapter->running = true; 43399b8d05b8SZbigniew Bodek return (0); 43409b8d05b8SZbigniew Bodek 4341c115a1e2SMarcin Wojtas err_msix_free: 4342c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 4343c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 4344c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 4345cd5d5804SMarcin Wojtas err_io_free: 43469b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 43479b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 4348cd5d5804SMarcin Wojtas err_tx_tag_free: 43499b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 4350cd5d5804SMarcin Wojtas err_com_free: 43519b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 43529b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 4353cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 43549b8d05b8SZbigniew Bodek err_bus_free: 43559b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 43569b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 43574fa9e02dSMarcin Wojtas err_dev_free: 43584fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 4359cd5d5804SMarcin Wojtas 43609b8d05b8SZbigniew Bodek return (rc); 43619b8d05b8SZbigniew Bodek } 43629b8d05b8SZbigniew Bodek 43639b8d05b8SZbigniew Bodek /** 43649b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 43659b8d05b8SZbigniew Bodek * @pdev: device information struct 43669b8d05b8SZbigniew Bodek * 43679b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 43689b8d05b8SZbigniew Bodek * that it should release a PCI device. 43699b8d05b8SZbigniew Bodek **/ 43709b8d05b8SZbigniew Bodek static int 43719b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 43729b8d05b8SZbigniew Bodek { 43739b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 43749b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 43759b8d05b8SZbigniew Bodek int rc; 43769b8d05b8SZbigniew Bodek 43779b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 43789b8d05b8SZbigniew Bodek if (adapter->ifp->if_vlantrunk != NULL) { 43799b8d05b8SZbigniew Bodek device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); 43809b8d05b8SZbigniew Bodek return (EBUSY); 43819b8d05b8SZbigniew Bodek } 43829b8d05b8SZbigniew Bodek 43839151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 43849151c55dSMarcin Wojtas 43859b8d05b8SZbigniew Bodek /* Free reset task and callout */ 43869b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 43879b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 43889b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 43899b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 43909b8d05b8SZbigniew Bodek 4391e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 43929b8d05b8SZbigniew Bodek ena_down(adapter); 4393e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 43949b8d05b8SZbigniew Bodek 43959b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 43969b8d05b8SZbigniew Bodek 439730217e2dSMarcin Wojtas ena_free_counters((counter_u64_t *)&adapter->hw_stats, 439830217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 43999b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&adapter->dev_stats, 44009b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 44019b8d05b8SZbigniew Bodek 44023f9ed7abSMarcin Wojtas if (likely(adapter->rss_support)) 44039b8d05b8SZbigniew Bodek ena_com_rss_destroy(ena_dev); 44049b8d05b8SZbigniew Bodek 44059b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 44063f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 44079b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 44089b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 44099b8d05b8SZbigniew Bodek 44109b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 44113f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 44129b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 44139b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 44149b8d05b8SZbigniew Bodek 44159b8d05b8SZbigniew Bodek /* Reset the device only if the device is running. */ 44169b8d05b8SZbigniew Bodek if (adapter->running) 4417a195fab0SMarcin Wojtas ena_com_dev_reset(ena_dev, adapter->reset_reason); 44189b8d05b8SZbigniew Bodek 44199b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 44209b8d05b8SZbigniew Bodek 44219b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 44229b8d05b8SZbigniew Bodek 4423197f0284SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 4424197f0284SMarcin Wojtas 4425197f0284SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 4426197f0284SMarcin Wojtas 4427197f0284SMarcin Wojtas ena_com_admin_destroy(ena_dev); 4428197f0284SMarcin Wojtas 44299b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 44309b8d05b8SZbigniew Bodek 44319b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 44329b8d05b8SZbigniew Bodek 44339b8d05b8SZbigniew Bodek mtx_destroy(&adapter->global_mtx); 44349b8d05b8SZbigniew Bodek sx_destroy(&adapter->ioctl_sx); 44359b8d05b8SZbigniew Bodek 44369151c55dSMarcin Wojtas if_free(adapter->ifp); 44379151c55dSMarcin Wojtas 44389b8d05b8SZbigniew Bodek if (ena_dev->bus != NULL) 44399b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 44409b8d05b8SZbigniew Bodek 44419b8d05b8SZbigniew Bodek if (ena_dev != NULL) 44429b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 44439b8d05b8SZbigniew Bodek 44449b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 44459b8d05b8SZbigniew Bodek } 44469b8d05b8SZbigniew Bodek 44479b8d05b8SZbigniew Bodek /****************************************************************************** 44489b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 44499b8d05b8SZbigniew Bodek *****************************************************************************/ 44509b8d05b8SZbigniew Bodek /** 44519b8d05b8SZbigniew Bodek * ena_update_on_link_change: 44529b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 44539b8d05b8SZbigniew Bodek **/ 44549b8d05b8SZbigniew Bodek static void 44559b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 44569b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 44579b8d05b8SZbigniew Bodek { 44589b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 44599b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 44609b8d05b8SZbigniew Bodek int status; 44619b8d05b8SZbigniew Bodek if_t ifp; 44629b8d05b8SZbigniew Bodek 44639b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 44649b8d05b8SZbigniew Bodek ifp = adapter->ifp; 44659b8d05b8SZbigniew Bodek status = aenq_desc->flags & 44669b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 44679b8d05b8SZbigniew Bodek 44689b8d05b8SZbigniew Bodek if (status != 0) { 44699b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is UP\n"); 44709b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_UP); 44719b8d05b8SZbigniew Bodek } else if (status == 0) { 44729b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is DOWN\n"); 44739b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 44749b8d05b8SZbigniew Bodek } else { 44759b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "invalid value recvd\n"); 44769b8d05b8SZbigniew Bodek BUG(); 44779b8d05b8SZbigniew Bodek } 44789b8d05b8SZbigniew Bodek 44799b8d05b8SZbigniew Bodek adapter->link_status = status; 44809b8d05b8SZbigniew Bodek } 44819b8d05b8SZbigniew Bodek 448240621d71SMarcin Wojtas static void ena_notification(void *adapter_data, 448340621d71SMarcin Wojtas struct ena_admin_aenq_entry *aenq_e) 448440621d71SMarcin Wojtas { 448540621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 448640621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 448740621d71SMarcin Wojtas 448840621d71SMarcin Wojtas ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 448940621d71SMarcin Wojtas "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 449040621d71SMarcin Wojtas ENA_ADMIN_NOTIFICATION); 449140621d71SMarcin Wojtas 449240621d71SMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrom) { 449340621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 449440621d71SMarcin Wojtas hints = 449540621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 449640621d71SMarcin Wojtas ena_update_hints(adapter, hints); 449740621d71SMarcin Wojtas break; 449840621d71SMarcin Wojtas default: 449940621d71SMarcin Wojtas device_printf(adapter->pdev, 450040621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 450140621d71SMarcin Wojtas aenq_e->aenq_common_desc.syndrom); 450240621d71SMarcin Wojtas } 450340621d71SMarcin Wojtas } 450440621d71SMarcin Wojtas 45059b8d05b8SZbigniew Bodek /** 45069b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 45079b8d05b8SZbigniew Bodek **/ 45089b8d05b8SZbigniew Bodek static void 4509e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 45109b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 45119b8d05b8SZbigniew Bodek { 4512e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4513e6de9a83SMarcin Wojtas 4514e6de9a83SMarcin Wojtas device_printf(adapter->pdev, 4515e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 45169b8d05b8SZbigniew Bodek } 45179b8d05b8SZbigniew Bodek 45189b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 45199b8d05b8SZbigniew Bodek .handlers = { 45209b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 452140621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 45229b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 45239b8d05b8SZbigniew Bodek }, 45249b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 45259b8d05b8SZbigniew Bodek }; 45269b8d05b8SZbigniew Bodek 45279b8d05b8SZbigniew Bodek /********************************************************************* 45289b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 45299b8d05b8SZbigniew Bodek *********************************************************************/ 45309b8d05b8SZbigniew Bodek 45319b8d05b8SZbigniew Bodek static device_method_t ena_methods[] = { 45329b8d05b8SZbigniew Bodek /* Device interface */ 45339b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 45349b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 45359b8d05b8SZbigniew Bodek DEVMETHOD(device_detach, ena_detach), 45369b8d05b8SZbigniew Bodek DEVMETHOD_END 45379b8d05b8SZbigniew Bodek }; 45389b8d05b8SZbigniew Bodek 45399b8d05b8SZbigniew Bodek static driver_t ena_driver = { 45409b8d05b8SZbigniew Bodek "ena", ena_methods, sizeof(struct ena_adapter), 45419b8d05b8SZbigniew Bodek }; 45429b8d05b8SZbigniew Bodek 45439b8d05b8SZbigniew Bodek devclass_t ena_devclass; 45449b8d05b8SZbigniew Bodek DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); 454540abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 4546329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 45479b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 45489b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 45499b8d05b8SZbigniew Bodek 45509b8d05b8SZbigniew Bodek /*********************************************************************/ 4551