19b8d05b8SZbigniew Bodek /*- 29b8d05b8SZbigniew Bodek * BSD LICENSE 39b8d05b8SZbigniew Bodek * 49d0073e4SMarcin Wojtas * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 319b8d05b8SZbigniew Bodek __FBSDID("$FreeBSD$"); 329b8d05b8SZbigniew Bodek 339b8d05b8SZbigniew Bodek #include <sys/param.h> 349b8d05b8SZbigniew Bodek #include <sys/systm.h> 359b8d05b8SZbigniew Bodek #include <sys/bus.h> 369b8d05b8SZbigniew Bodek #include <sys/endian.h> 379b8d05b8SZbigniew Bodek #include <sys/kernel.h> 389b8d05b8SZbigniew Bodek #include <sys/kthread.h> 399b8d05b8SZbigniew Bodek #include <sys/malloc.h> 409b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 419b8d05b8SZbigniew Bodek #include <sys/module.h> 429b8d05b8SZbigniew Bodek #include <sys/rman.h> 439b8d05b8SZbigniew Bodek #include <sys/smp.h> 449b8d05b8SZbigniew Bodek #include <sys/socket.h> 459b8d05b8SZbigniew Bodek #include <sys/sockio.h> 469b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 479b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 489b8d05b8SZbigniew Bodek #include <sys/time.h> 499b8d05b8SZbigniew Bodek #include <sys/eventhandler.h> 509b8d05b8SZbigniew Bodek 519b8d05b8SZbigniew Bodek #include <machine/bus.h> 529b8d05b8SZbigniew Bodek #include <machine/resource.h> 539b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 549b8d05b8SZbigniew Bodek 559b8d05b8SZbigniew Bodek #include <net/bpf.h> 569b8d05b8SZbigniew Bodek #include <net/ethernet.h> 579b8d05b8SZbigniew Bodek #include <net/if.h> 589b8d05b8SZbigniew Bodek #include <net/if_var.h> 599b8d05b8SZbigniew Bodek #include <net/if_arp.h> 609b8d05b8SZbigniew Bodek #include <net/if_dl.h> 619b8d05b8SZbigniew Bodek #include <net/if_media.h> 629b8d05b8SZbigniew Bodek #include <net/if_types.h> 639b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 649b8d05b8SZbigniew Bodek 659b8d05b8SZbigniew Bodek #include <netinet/in_systm.h> 669b8d05b8SZbigniew Bodek #include <netinet/in.h> 679b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 689b8d05b8SZbigniew Bodek #include <netinet/ip.h> 699b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 709b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 719b8d05b8SZbigniew Bodek #include <netinet/udp.h> 729b8d05b8SZbigniew Bodek 739b8d05b8SZbigniew Bodek #include <dev/pci/pcivar.h> 749b8d05b8SZbigniew Bodek #include <dev/pci/pcireg.h> 759b8d05b8SZbigniew Bodek 764fa9e02dSMarcin Wojtas #include <vm/vm.h> 774fa9e02dSMarcin Wojtas #include <vm/pmap.h> 784fa9e02dSMarcin Wojtas 799b8d05b8SZbigniew Bodek #include "ena.h" 809b8d05b8SZbigniew Bodek #include "ena_sysctl.h" 819b8d05b8SZbigniew Bodek 829b8d05b8SZbigniew Bodek /********************************************************* 839b8d05b8SZbigniew Bodek * Function prototypes 849b8d05b8SZbigniew Bodek *********************************************************/ 859b8d05b8SZbigniew Bodek static int ena_probe(device_t); 869b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 879b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 889b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 899b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 909b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 919b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 929b8d05b8SZbigniew Bodek static void ena_init_io_rings_common(struct ena_adapter *, 939b8d05b8SZbigniew Bodek struct ena_ring *, uint16_t); 94cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 959b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 969b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 979b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 989b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 999b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1009b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1019b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1029b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1039b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1049b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 10543fefd16SMarcin Wojtas static inline int validate_rx_req_id(struct ena_ring *, uint16_t); 1069b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1079b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1089b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1099b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1109b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1119b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1129b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1139b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1149b8d05b8SZbigniew Bodek static int ena_refill_rx_bufs(struct ena_ring *, uint32_t); 1159b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1169b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1189b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1199b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1209b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1219b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1229b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1239b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1249b8d05b8SZbigniew Bodek static int ena_tx_cleanup(struct ena_ring *); 1259b8d05b8SZbigniew Bodek static int ena_rx_cleanup(struct ena_ring *); 126fceb9387SMarcin Wojtas static inline int validate_tx_req_id(struct ena_ring *, uint16_t); 1279b8d05b8SZbigniew Bodek static void ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *, 1289b8d05b8SZbigniew Bodek struct mbuf *); 1299b8d05b8SZbigniew Bodek static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *, 1309b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *, uint16_t *); 1319b8d05b8SZbigniew Bodek static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *, 1329b8d05b8SZbigniew Bodek struct mbuf *); 1335cb9db07SMarcin Wojtas static void ena_cleanup(void *arg, int pending); 1345cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1359b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1369b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 13777958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter*); 1439b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1449b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1459b8d05b8SZbigniew Bodek static int ena_rss_configure(struct ena_adapter *); 1469b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1479b8d05b8SZbigniew Bodek static int ena_up(struct ena_adapter *); 1489b8d05b8SZbigniew Bodek static void ena_down(struct ena_adapter *); 1499b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1509b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1519b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1529b8d05b8SZbigniew Bodek static void ena_init(void *); 1539b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1549b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1559b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1569b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 1579b8d05b8SZbigniew Bodek static int ena_setup_ifnet(device_t, struct ena_adapter *, 1589b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1599b8d05b8SZbigniew Bodek static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *); 1601b069f1cSZbigniew Bodek static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, 1611b069f1cSZbigniew Bodek struct mbuf **mbuf); 1624fa9e02dSMarcin Wojtas static void ena_dmamap_llq(void *, bus_dma_segment_t *, int, int); 1631e9fb899SZbigniew Bodek static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **); 1649b8d05b8SZbigniew Bodek static void ena_start_xmit(struct ena_ring *); 1659b8d05b8SZbigniew Bodek static int ena_mq_start(if_t, struct mbuf *); 1669b8d05b8SZbigniew Bodek static void ena_deferred_mq_start(void *, int); 1679b8d05b8SZbigniew Bodek static void ena_qflush(if_t); 1684fa9e02dSMarcin Wojtas static int ena_enable_wc(struct resource *); 1694fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1704fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 1719b8d05b8SZbigniew Bodek static int ena_calc_io_queue_num(struct ena_adapter *, 1729b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1736064f289SMarcin Wojtas static int ena_calc_queue_size(struct ena_adapter *, 1746064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *); 1756064f289SMarcin Wojtas static int ena_handle_updated_queues(struct ena_adapter *, 1766064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *); 1779b8d05b8SZbigniew Bodek static int ena_rss_init_default(struct ena_adapter *); 1789b8d05b8SZbigniew Bodek static void ena_rss_init_default_deferred(void *); 17946021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1809b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1819b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1829b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1839b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 1849b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, 1859b8d05b8SZbigniew Bodek int); 1869b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 1879b8d05b8SZbigniew Bodek static void unimplemented_aenq_handler(void *, 1889b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *); 1899b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 1909b8d05b8SZbigniew Bodek 1919b8d05b8SZbigniew Bodek static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 1929b8d05b8SZbigniew Bodek 1939b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1949b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 1959b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, 1969b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 1979b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, 1989b8d05b8SZbigniew Bodek /* Last entry */ 1999b8d05b8SZbigniew Bodek { 0, 0, 0 } 2009b8d05b8SZbigniew Bodek }; 2019b8d05b8SZbigniew Bodek 2029b8d05b8SZbigniew Bodek /* 2039b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 2049b8d05b8SZbigniew Bodek */ 2059b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 2069b8d05b8SZbigniew Bodek 2079b8d05b8SZbigniew Bodek void 2089b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2099b8d05b8SZbigniew Bodek { 2100bdffe59SMarcin Wojtas if (error != 0) 2119b8d05b8SZbigniew Bodek return; 2129b8d05b8SZbigniew Bodek *(bus_addr_t *) arg = segs[0].ds_addr; 2139b8d05b8SZbigniew Bodek } 2149b8d05b8SZbigniew Bodek 2159b8d05b8SZbigniew Bodek int 2169b8d05b8SZbigniew Bodek ena_dma_alloc(device_t dmadev, bus_size_t size, 2179b8d05b8SZbigniew Bodek ena_mem_handle_t *dma , int mapflags) 2189b8d05b8SZbigniew Bodek { 2199b8d05b8SZbigniew Bodek struct ena_adapter* adapter = device_get_softc(dmadev); 2200bdffe59SMarcin Wojtas uint32_t maxsize; 2210bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2229b8d05b8SZbigniew Bodek int error; 2239b8d05b8SZbigniew Bodek 2240bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2250bdffe59SMarcin Wojtas 2260bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2273f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2289b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2290bdffe59SMarcin Wojtas 2309b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2319b8d05b8SZbigniew Bodek 8, 0, /* alignment, bounds */ 2328a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2338a573700SZbigniew Bodek BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 2349b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2359b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2369b8d05b8SZbigniew Bodek 1, /* nsegments */ 2379b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2389b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2399b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2409b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2419b8d05b8SZbigniew Bodek &dma->tag); 2423f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2434e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); 2449b8d05b8SZbigniew Bodek goto fail_tag; 2459b8d05b8SZbigniew Bodek } 2469b8d05b8SZbigniew Bodek 2479b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, 2489b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2493f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2504e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", 2514e8acd84SMarcin Wojtas (uintmax_t)size, error); 2529b8d05b8SZbigniew Bodek goto fail_map_create; 2539b8d05b8SZbigniew Bodek } 2549b8d05b8SZbigniew Bodek 2559b8d05b8SZbigniew Bodek dma->paddr = 0; 2569b8d05b8SZbigniew Bodek error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 2579b8d05b8SZbigniew Bodek size, ena_dmamap_callback, &dma->paddr, mapflags); 2583f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2594e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); 2609b8d05b8SZbigniew Bodek goto fail_map_load; 2619b8d05b8SZbigniew Bodek } 2629b8d05b8SZbigniew Bodek 263e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 264e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 265e8073738SMarcin Wojtas 2669b8d05b8SZbigniew Bodek return (0); 2679b8d05b8SZbigniew Bodek 2689b8d05b8SZbigniew Bodek fail_map_load: 2699b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2707d2544e6SMarcin Wojtas fail_map_create: 2719b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2729b8d05b8SZbigniew Bodek fail_tag: 2739b8d05b8SZbigniew Bodek dma->tag = NULL; 2745b14f92eSMarcin Wojtas dma->vaddr = NULL; 2755b14f92eSMarcin Wojtas dma->paddr = 0; 2769b8d05b8SZbigniew Bodek 2779b8d05b8SZbigniew Bodek return (error); 2789b8d05b8SZbigniew Bodek } 2799b8d05b8SZbigniew Bodek 2809b8d05b8SZbigniew Bodek static void 2819b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2829b8d05b8SZbigniew Bodek { 2839b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2849b8d05b8SZbigniew Bodek 2859b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2869b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2879b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2889b8d05b8SZbigniew Bodek } 2899b8d05b8SZbigniew Bodek 2909b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2919b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2929b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2939b8d05b8SZbigniew Bodek } 2949b8d05b8SZbigniew Bodek } 2959b8d05b8SZbigniew Bodek 2969b8d05b8SZbigniew Bodek static int 2979b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2989b8d05b8SZbigniew Bodek { 2999b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 3009b8d05b8SZbigniew Bodek char adapter_name[60]; 3019b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3029b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3039b8d05b8SZbigniew Bodek 3049b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3059b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3069b8d05b8SZbigniew Bodek 3079b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3089b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3099b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3109b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 31130425f93SMarcin Wojtas ena_trace(ENA_DBG, "vendor=%x device=%x\n", 3129b8d05b8SZbigniew Bodek pci_vendor_id, pci_device_id); 3139b8d05b8SZbigniew Bodek 3149b8d05b8SZbigniew Bodek sprintf(adapter_name, DEVICE_DESC); 3159b8d05b8SZbigniew Bodek device_set_desc_copy(dev, adapter_name); 3169b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3179b8d05b8SZbigniew Bodek } 3189b8d05b8SZbigniew Bodek 3199b8d05b8SZbigniew Bodek ent++; 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek } 3229b8d05b8SZbigniew Bodek 3239b8d05b8SZbigniew Bodek return (ENXIO); 3249b8d05b8SZbigniew Bodek } 3259b8d05b8SZbigniew Bodek 3269b8d05b8SZbigniew Bodek static int 3279b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3289b8d05b8SZbigniew Bodek { 3299b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3303cfadb28SMarcin Wojtas int rc; 3319b8d05b8SZbigniew Bodek 3323cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 3339b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Invalid MTU setting. " 3343cfadb28SMarcin Wojtas "new_mtu: %d max mtu: %d min mtu: %d\n", 3353cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3363cfadb28SMarcin Wojtas return (EINVAL); 3379b8d05b8SZbigniew Bodek } 3389b8d05b8SZbigniew Bodek 3399b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3403cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3413cfadb28SMarcin Wojtas ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); 3423cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3433cfadb28SMarcin Wojtas } else { 3443cfadb28SMarcin Wojtas device_printf(adapter->pdev, "Failed to set MTU to %d\n", 3453cfadb28SMarcin Wojtas new_mtu); 3463cfadb28SMarcin Wojtas } 3479b8d05b8SZbigniew Bodek 3483cfadb28SMarcin Wojtas return (rc); 3499b8d05b8SZbigniew Bodek } 3509b8d05b8SZbigniew Bodek 3519b8d05b8SZbigniew Bodek static inline void 3529b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3539b8d05b8SZbigniew Bodek { 3549b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3559b8d05b8SZbigniew Bodek 3569b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3579b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3589b8d05b8SZbigniew Bodek } 3599b8d05b8SZbigniew Bodek 3609b8d05b8SZbigniew Bodek static inline void 3619b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3629b8d05b8SZbigniew Bodek { 3639b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3649b8d05b8SZbigniew Bodek 3659b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3669b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3679b8d05b8SZbigniew Bodek } 3689b8d05b8SZbigniew Bodek 3699b8d05b8SZbigniew Bodek static inline void 3709b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3719b8d05b8SZbigniew Bodek { 3729b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3739b8d05b8SZbigniew Bodek 3749b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3759b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3769b8d05b8SZbigniew Bodek } 3779b8d05b8SZbigniew Bodek 3789b8d05b8SZbigniew Bodek static void 3799b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3809b8d05b8SZbigniew Bodek uint16_t qid) 3819b8d05b8SZbigniew Bodek { 3829b8d05b8SZbigniew Bodek 3839b8d05b8SZbigniew Bodek ring->qid = qid; 3849b8d05b8SZbigniew Bodek ring->adapter = adapter; 3859b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 386d12f7bfcSMarcin Wojtas ring->first_interrupt = false; 387d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3889b8d05b8SZbigniew Bodek } 3899b8d05b8SZbigniew Bodek 390cd5d5804SMarcin Wojtas static void 3919b8d05b8SZbigniew Bodek ena_init_io_rings(struct ena_adapter *adapter) 3929b8d05b8SZbigniew Bodek { 3939b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3949b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3959b8d05b8SZbigniew Bodek struct ena_que *que; 3969b8d05b8SZbigniew Bodek int i; 3979b8d05b8SZbigniew Bodek 3989b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3999b8d05b8SZbigniew Bodek 4009b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 4019b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 4029b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 4039b8d05b8SZbigniew Bodek 4049b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4059b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4069b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4079b8d05b8SZbigniew Bodek 4089b8d05b8SZbigniew Bodek /* TX specific ring state */ 4099b8d05b8SZbigniew Bodek txr->ring_size = adapter->tx_ring_size; 4109b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4119b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4129b8d05b8SZbigniew Bodek txr->smoothed_interval = 4139b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); 4149b8d05b8SZbigniew Bodek 4159b8d05b8SZbigniew Bodek /* Allocate a buf ring */ 4166064f289SMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 4176064f289SMarcin Wojtas txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 4189b8d05b8SZbigniew Bodek M_WAITOK, &txr->ring_mtx); 4199b8d05b8SZbigniew Bodek 4209b8d05b8SZbigniew Bodek /* Alloc TX statistics. */ 4219b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4229b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4239b8d05b8SZbigniew Bodek 4249b8d05b8SZbigniew Bodek /* RX specific ring state */ 4259b8d05b8SZbigniew Bodek rxr->ring_size = adapter->rx_ring_size; 4269b8d05b8SZbigniew Bodek rxr->smoothed_interval = 4279b8d05b8SZbigniew Bodek ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 4289b8d05b8SZbigniew Bodek 4299b8d05b8SZbigniew Bodek /* Alloc RX statistics. */ 4309b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4319b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4329b8d05b8SZbigniew Bodek 4339b8d05b8SZbigniew Bodek /* Initialize locks */ 4349b8d05b8SZbigniew Bodek snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4359b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4369b8d05b8SZbigniew Bodek snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4379b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev), i); 4389b8d05b8SZbigniew Bodek 4399b8d05b8SZbigniew Bodek mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4409b8d05b8SZbigniew Bodek 4419b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4429b8d05b8SZbigniew Bodek que->adapter = adapter; 4439b8d05b8SZbigniew Bodek que->id = i; 4449b8d05b8SZbigniew Bodek que->tx_ring = txr; 4459b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4469b8d05b8SZbigniew Bodek 4479b8d05b8SZbigniew Bodek txr->que = que; 4489b8d05b8SZbigniew Bodek rxr->que = que; 449efe6ab18SMarcin Wojtas 450efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4519b8d05b8SZbigniew Bodek } 4529b8d05b8SZbigniew Bodek } 4539b8d05b8SZbigniew Bodek 4549b8d05b8SZbigniew Bodek static void 4559b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4569b8d05b8SZbigniew Bodek { 4579b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4589b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4599b8d05b8SZbigniew Bodek 4609b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4619b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4629b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4639b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4649b8d05b8SZbigniew Bodek 4657d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4667d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4677d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4687d2544e6SMarcin Wojtas 4699b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4709b8d05b8SZbigniew Bodek } 4719b8d05b8SZbigniew Bodek 4729b8d05b8SZbigniew Bodek static void 4739b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4749b8d05b8SZbigniew Bodek { 4759b8d05b8SZbigniew Bodek int i; 4769b8d05b8SZbigniew Bodek 4779b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 4789b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4799b8d05b8SZbigniew Bodek 4809b8d05b8SZbigniew Bodek } 4819b8d05b8SZbigniew Bodek 4829b8d05b8SZbigniew Bodek static int 4839b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 4849b8d05b8SZbigniew Bodek { 4859b8d05b8SZbigniew Bodek int ret; 4869b8d05b8SZbigniew Bodek 4879b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 4889b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 4899b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 4908a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 4918a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 4929b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 4939b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 4948a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 4959b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 4969b8d05b8SZbigniew Bodek 0, /* flags */ 4979b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 4989b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 4999b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5009b8d05b8SZbigniew Bodek 5019b8d05b8SZbigniew Bodek return (ret); 5029b8d05b8SZbigniew Bodek } 5039b8d05b8SZbigniew Bodek 5049b8d05b8SZbigniew Bodek static int 5059b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5069b8d05b8SZbigniew Bodek { 5079b8d05b8SZbigniew Bodek int ret; 5089b8d05b8SZbigniew Bodek 5099b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5109b8d05b8SZbigniew Bodek 5113f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5129b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5139b8d05b8SZbigniew Bodek 5149b8d05b8SZbigniew Bodek return (ret); 5159b8d05b8SZbigniew Bodek } 5169b8d05b8SZbigniew Bodek 5179b8d05b8SZbigniew Bodek static int 5189b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5199b8d05b8SZbigniew Bodek { 5209b8d05b8SZbigniew Bodek int ret; 5219b8d05b8SZbigniew Bodek 5229b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5239b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5249b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5258a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5268a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5279b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5289b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsize */ 5294727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 5309b8d05b8SZbigniew Bodek MJUM16BYTES, /* maxsegsize */ 5319b8d05b8SZbigniew Bodek 0, /* flags */ 5329b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5339b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5349b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5359b8d05b8SZbigniew Bodek 5369b8d05b8SZbigniew Bodek return (ret); 5379b8d05b8SZbigniew Bodek } 5389b8d05b8SZbigniew Bodek 5399b8d05b8SZbigniew Bodek static int 5409b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5419b8d05b8SZbigniew Bodek { 5429b8d05b8SZbigniew Bodek int ret; 5439b8d05b8SZbigniew Bodek 5449b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5459b8d05b8SZbigniew Bodek 5463f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5479b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5489b8d05b8SZbigniew Bodek 5499b8d05b8SZbigniew Bodek return (ret); 5509b8d05b8SZbigniew Bodek } 5519b8d05b8SZbigniew Bodek 5529b8d05b8SZbigniew Bodek /** 5539b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 5549b8d05b8SZbigniew Bodek * @adapter: network interface device structure 5559b8d05b8SZbigniew Bodek * @qid: queue index 5569b8d05b8SZbigniew Bodek * 5579b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 5589b8d05b8SZbigniew Bodek **/ 5599b8d05b8SZbigniew Bodek static int 5609b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 5619b8d05b8SZbigniew Bodek { 5629b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 5639b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 5649b8d05b8SZbigniew Bodek int size, i, err; 5659b8d05b8SZbigniew Bodek 5669b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 5679b8d05b8SZbigniew Bodek 5689b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5693f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 5707d2544e6SMarcin Wojtas return (ENOMEM); 5719b8d05b8SZbigniew Bodek 5729b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 5739b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 5743f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 5757d2544e6SMarcin Wojtas goto err_buf_info_free; 5769b8d05b8SZbigniew Bodek 5774fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 5784fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 5794fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 5804fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 5814fa9e02dSMarcin Wojtas goto err_tx_ids_free; 5824fa9e02dSMarcin Wojtas 5839b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 5849b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 5859b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 5869b8d05b8SZbigniew Bodek 5879b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 5889b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 5899b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 5909b8d05b8SZbigniew Bodek 5919b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 5929b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 593af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 5949b8d05b8SZbigniew Bodek 5959b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 596b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 5979b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 598b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 5999b8d05b8SZbigniew Bodek 6009b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6019b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6029b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6034fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_head); 6043f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6054e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6064fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_head for buffer %d\n", 6074fa9e02dSMarcin Wojtas i); 6087d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6099b8d05b8SZbigniew Bodek } 6104fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 6114fa9e02dSMarcin Wojtas 6124fa9e02dSMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6134fa9e02dSMarcin Wojtas &tx_ring->tx_buffer_info[i].map_seg); 6144fa9e02dSMarcin Wojtas if (unlikely(err != 0)) { 6154fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, 6164fa9e02dSMarcin Wojtas "Unable to create Tx DMA map_seg for buffer %d\n", 6174fa9e02dSMarcin Wojtas i); 6184fa9e02dSMarcin Wojtas goto err_buf_info_head_unmap; 6194fa9e02dSMarcin Wojtas } 6204fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6219b8d05b8SZbigniew Bodek } 6229b8d05b8SZbigniew Bodek 6239b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 6249b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 6259b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 6269b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 6273f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 6284e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 6299b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 6309b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 6317d2544e6SMarcin Wojtas goto err_buf_info_unmap; 6329b8d05b8SZbigniew Bodek } 6339b8d05b8SZbigniew Bodek 6345cb9db07SMarcin Wojtas tx_ring->running = true; 6355cb9db07SMarcin Wojtas 6369b8d05b8SZbigniew Bodek taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, 6379b8d05b8SZbigniew Bodek "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); 6389b8d05b8SZbigniew Bodek 6399b8d05b8SZbigniew Bodek return (0); 6409b8d05b8SZbigniew Bodek 6414fa9e02dSMarcin Wojtas err_buf_info_head_unmap: 6424fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6434fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6447d2544e6SMarcin Wojtas err_buf_info_unmap: 6459b8d05b8SZbigniew Bodek while (i--) { 6469b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6474fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6484fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 6494fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 6509b8d05b8SZbigniew Bodek } 6514fa9e02dSMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 6524fa9e02dSMarcin Wojtas err_tx_ids_free: 653cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 6547d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 6557d2544e6SMarcin Wojtas err_buf_info_free: 656cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 6577d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 6587d2544e6SMarcin Wojtas 6599b8d05b8SZbigniew Bodek return (ENOMEM); 6609b8d05b8SZbigniew Bodek } 6619b8d05b8SZbigniew Bodek 6629b8d05b8SZbigniew Bodek /** 6639b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 6649b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6659b8d05b8SZbigniew Bodek * @qid: queue index 6669b8d05b8SZbigniew Bodek * 6679b8d05b8SZbigniew Bodek * Free all transmit software resources 6689b8d05b8SZbigniew Bodek **/ 6699b8d05b8SZbigniew Bodek static void 6709b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 6719b8d05b8SZbigniew Bodek { 6729b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 6739b8d05b8SZbigniew Bodek 6749b8d05b8SZbigniew Bodek while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 6759b8d05b8SZbigniew Bodek NULL)) 6769b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 6779b8d05b8SZbigniew Bodek 6789b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 6799b8d05b8SZbigniew Bodek 680b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6819b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 6829b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 6839b8d05b8SZbigniew Bodek 6849b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 6859b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 6864fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].head_mapped == true) { 687e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6884fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head, 6894fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 6909b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 6914fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6924fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].head_mapped = false; 6934fa9e02dSMarcin Wojtas } 6949b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 6954fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_head); 6964fa9e02dSMarcin Wojtas 6974fa9e02dSMarcin Wojtas if (tx_ring->tx_buffer_info[i].seg_mapped == true) { 6984fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 6994fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg, 7004fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7014fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 7024fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7034fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].seg_mapped = false; 7044fa9e02dSMarcin Wojtas } 7054fa9e02dSMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7064fa9e02dSMarcin Wojtas tx_ring->tx_buffer_info[i].map_seg); 7074fa9e02dSMarcin Wojtas 708e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 709e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 7109b8d05b8SZbigniew Bodek } 711416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 7129b8d05b8SZbigniew Bodek 7139b8d05b8SZbigniew Bodek /* And free allocated memory. */ 714cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7159b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 7169b8d05b8SZbigniew Bodek 717cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7189b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 7194fa9e02dSMarcin Wojtas 7204fa9e02dSMarcin Wojtas ENA_MEM_FREE(adapter->ena_dev->dmadev, 7214fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 7224fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 7239b8d05b8SZbigniew Bodek } 7249b8d05b8SZbigniew Bodek 7259b8d05b8SZbigniew Bodek /** 7269b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 7279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7289b8d05b8SZbigniew Bodek * 7299b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7309b8d05b8SZbigniew Bodek **/ 7319b8d05b8SZbigniew Bodek static int 7329b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 7339b8d05b8SZbigniew Bodek { 7349b8d05b8SZbigniew Bodek int i, rc; 7359b8d05b8SZbigniew Bodek 7369b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 7379b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 7380bdffe59SMarcin Wojtas if (rc != 0) { 7399b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 7409b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 7419b8d05b8SZbigniew Bodek goto err_setup_tx; 7429b8d05b8SZbigniew Bodek } 7437d2544e6SMarcin Wojtas } 7449b8d05b8SZbigniew Bodek 7459b8d05b8SZbigniew Bodek return (0); 7469b8d05b8SZbigniew Bodek 7479b8d05b8SZbigniew Bodek err_setup_tx: 7489b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 7499b8d05b8SZbigniew Bodek while (i--) 7509b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7519b8d05b8SZbigniew Bodek return (rc); 7529b8d05b8SZbigniew Bodek } 7539b8d05b8SZbigniew Bodek 7549b8d05b8SZbigniew Bodek /** 7559b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 7569b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7579b8d05b8SZbigniew Bodek * 7589b8d05b8SZbigniew Bodek * Free all transmit software resources 7599b8d05b8SZbigniew Bodek **/ 7609b8d05b8SZbigniew Bodek static void 7619b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 7629b8d05b8SZbigniew Bodek { 7639b8d05b8SZbigniew Bodek int i; 7649b8d05b8SZbigniew Bodek 7659b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 7669b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 7679b8d05b8SZbigniew Bodek } 7689b8d05b8SZbigniew Bodek 76943fefd16SMarcin Wojtas static inline int 77043fefd16SMarcin Wojtas validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 77143fefd16SMarcin Wojtas { 77243fefd16SMarcin Wojtas if (likely(req_id < rx_ring->ring_size)) 77343fefd16SMarcin Wojtas return (0); 77443fefd16SMarcin Wojtas 77543fefd16SMarcin Wojtas device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", 77643fefd16SMarcin Wojtas req_id); 77743fefd16SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); 77843fefd16SMarcin Wojtas 77943fefd16SMarcin Wojtas /* Trigger device reset */ 780858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter))) { 78143fefd16SMarcin Wojtas rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 782fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, rx_ring->adapter); 783858659f7SMarcin Wojtas } 78443fefd16SMarcin Wojtas 78543fefd16SMarcin Wojtas return (EFAULT); 78643fefd16SMarcin Wojtas } 78743fefd16SMarcin Wojtas 7889b8d05b8SZbigniew Bodek /** 7899b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 7909b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7919b8d05b8SZbigniew Bodek * @qid: queue index 7929b8d05b8SZbigniew Bodek * 7939b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7949b8d05b8SZbigniew Bodek **/ 7959b8d05b8SZbigniew Bodek static int 7969b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 7979b8d05b8SZbigniew Bodek { 7989b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 7999b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 8009b8d05b8SZbigniew Bodek int size, err, i; 8019b8d05b8SZbigniew Bodek 8029b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8039b8d05b8SZbigniew Bodek 8049b8d05b8SZbigniew Bodek /* 8059b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8069b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8079b8d05b8SZbigniew Bodek */ 8089b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8099b8d05b8SZbigniew Bodek 810cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8119b8d05b8SZbigniew Bodek 81243fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 81343fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 81443fefd16SMarcin Wojtas 81543fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 81643fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 81743fefd16SMarcin Wojtas 8189b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8199b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8209b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8219b8d05b8SZbigniew Bodek 8229b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 8239b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 8249b8d05b8SZbigniew Bodek 8259b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 8269b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 8279b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 8289b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 8299b8d05b8SZbigniew Bodek if (err != 0) { 8304e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, 8319b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 8327d2544e6SMarcin Wojtas goto err_buf_info_unmap; 8339b8d05b8SZbigniew Bodek } 8349b8d05b8SZbigniew Bodek } 8359b8d05b8SZbigniew Bodek 8369b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 8370bdffe59SMarcin Wojtas if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { 8389b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 8390bdffe59SMarcin Wojtas if (err != 0) { 8409b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 8419b8d05b8SZbigniew Bodek "LRO[%d] Initialization failed!\n", qid); 8429b8d05b8SZbigniew Bodek } else { 8439b8d05b8SZbigniew Bodek ena_trace(ENA_INFO, 8449b8d05b8SZbigniew Bodek "RX Soft LRO[%d] Initialized\n", qid); 8459b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 8469b8d05b8SZbigniew Bodek } 8479b8d05b8SZbigniew Bodek } 8489b8d05b8SZbigniew Bodek 8499b8d05b8SZbigniew Bodek return (0); 8509b8d05b8SZbigniew Bodek 8517d2544e6SMarcin Wojtas err_buf_info_unmap: 8529b8d05b8SZbigniew Bodek while (i--) { 8539b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8549b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8559b8d05b8SZbigniew Bodek } 8569b8d05b8SZbigniew Bodek 85743fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 85843fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 859cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8609b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8619b8d05b8SZbigniew Bodek return (ENOMEM); 8629b8d05b8SZbigniew Bodek } 8639b8d05b8SZbigniew Bodek 8649b8d05b8SZbigniew Bodek /** 8659b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 8669b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8679b8d05b8SZbigniew Bodek * @qid: queue index 8689b8d05b8SZbigniew Bodek * 8699b8d05b8SZbigniew Bodek * Free all receive software resources 8709b8d05b8SZbigniew Bodek **/ 8719b8d05b8SZbigniew Bodek static void 8729b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8739b8d05b8SZbigniew Bodek { 8749b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 8759b8d05b8SZbigniew Bodek 8769b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 8779b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 878e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 879e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 8809b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 8819b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 8829b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 8839b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8849b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 8859b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 8869b8d05b8SZbigniew Bodek } 8879b8d05b8SZbigniew Bodek 8889b8d05b8SZbigniew Bodek /* free LRO resources, */ 8899b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 8909b8d05b8SZbigniew Bodek 8919b8d05b8SZbigniew Bodek /* free allocated memory */ 892cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 8939b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 8949b8d05b8SZbigniew Bodek 89543fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 89643fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 8979b8d05b8SZbigniew Bodek } 8989b8d05b8SZbigniew Bodek 8999b8d05b8SZbigniew Bodek /** 9009b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 9019b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9029b8d05b8SZbigniew Bodek * 9039b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9049b8d05b8SZbigniew Bodek **/ 9059b8d05b8SZbigniew Bodek static int 9069b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9079b8d05b8SZbigniew Bodek { 9089b8d05b8SZbigniew Bodek int i, rc = 0; 9099b8d05b8SZbigniew Bodek 9109b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 9119b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9120bdffe59SMarcin Wojtas if (rc != 0) { 9139b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 9149b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9159b8d05b8SZbigniew Bodek goto err_setup_rx; 9169b8d05b8SZbigniew Bodek } 9177d2544e6SMarcin Wojtas } 9189b8d05b8SZbigniew Bodek return (0); 9199b8d05b8SZbigniew Bodek 9209b8d05b8SZbigniew Bodek err_setup_rx: 9219b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9229b8d05b8SZbigniew Bodek while (i--) 9239b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9249b8d05b8SZbigniew Bodek return (rc); 9259b8d05b8SZbigniew Bodek } 9269b8d05b8SZbigniew Bodek 9279b8d05b8SZbigniew Bodek /** 9289b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 9299b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9309b8d05b8SZbigniew Bodek * 9319b8d05b8SZbigniew Bodek * Free all receive software resources 9329b8d05b8SZbigniew Bodek **/ 9339b8d05b8SZbigniew Bodek static void 9349b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 9359b8d05b8SZbigniew Bodek { 9369b8d05b8SZbigniew Bodek int i; 9379b8d05b8SZbigniew Bodek 9389b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 9399b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9409b8d05b8SZbigniew Bodek } 9419b8d05b8SZbigniew Bodek 9429b8d05b8SZbigniew Bodek static inline int 9439b8d05b8SZbigniew Bodek ena_alloc_rx_mbuf(struct ena_adapter *adapter, 9449b8d05b8SZbigniew Bodek struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 9459b8d05b8SZbigniew Bodek { 9469b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 9479b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 9489b8d05b8SZbigniew Bodek int nsegs, error; 9494727bda6SMarcin Wojtas int mlen; 9509b8d05b8SZbigniew Bodek 9519b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 9523f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 9539b8d05b8SZbigniew Bodek return (0); 9549b8d05b8SZbigniew Bodek 9559b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 9569b8d05b8SZbigniew Bodek rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); 9579b8d05b8SZbigniew Bodek 9583f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9594727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 9604727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 9614727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 9629b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 9639b8d05b8SZbigniew Bodek return (ENOMEM); 9649b8d05b8SZbigniew Bodek } 9654727bda6SMarcin Wojtas mlen = MCLBYTES; 9664727bda6SMarcin Wojtas } else { 9674727bda6SMarcin Wojtas mlen = MJUM16BYTES; 9684727bda6SMarcin Wojtas } 9699b8d05b8SZbigniew Bodek /* Set mbuf length*/ 9704727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 9719b8d05b8SZbigniew Bodek 9729b8d05b8SZbigniew Bodek /* Map packets for DMA */ 9739b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 97430425f93SMarcin Wojtas "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 9759b8d05b8SZbigniew Bodek adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); 9769b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 9779b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 9783f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 9794e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " 9809b8d05b8SZbigniew Bodek "nsegs: %d\n", error, nsegs); 9819b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 9829b8d05b8SZbigniew Bodek goto exit; 9839b8d05b8SZbigniew Bodek 9849b8d05b8SZbigniew Bodek } 9859b8d05b8SZbigniew Bodek 9869b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 9879b8d05b8SZbigniew Bodek 9889b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 9899b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 9904727bda6SMarcin Wojtas ena_buf->len = mlen; 9919b8d05b8SZbigniew Bodek 9929b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, 9939b8d05b8SZbigniew Bodek "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 9949b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); 9959b8d05b8SZbigniew Bodek 9969b8d05b8SZbigniew Bodek return (0); 9979b8d05b8SZbigniew Bodek 9989b8d05b8SZbigniew Bodek exit: 9999b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10009b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10019b8d05b8SZbigniew Bodek return (EFAULT); 10029b8d05b8SZbigniew Bodek } 10039b8d05b8SZbigniew Bodek 10049b8d05b8SZbigniew Bodek static void 10059b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10069b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10079b8d05b8SZbigniew Bodek { 10089b8d05b8SZbigniew Bodek 10094e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10104e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); 10119b8d05b8SZbigniew Bodek return; 10124e8acd84SMarcin Wojtas } 10139b8d05b8SZbigniew Bodek 1014e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1015e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10169b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10179b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10189b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10199b8d05b8SZbigniew Bodek } 10209b8d05b8SZbigniew Bodek 10219b8d05b8SZbigniew Bodek /** 10229b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 10239b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 10249b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 10259b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 10269b8d05b8SZbigniew Bodek **/ 10279b8d05b8SZbigniew Bodek static int 10289b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 10299b8d05b8SZbigniew Bodek { 10309b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 103143fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 10329b8d05b8SZbigniew Bodek uint32_t i; 10339b8d05b8SZbigniew Bodek int rc; 10349b8d05b8SZbigniew Bodek 103530425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", 10369b8d05b8SZbigniew Bodek rx_ring->qid); 10379b8d05b8SZbigniew Bodek 10389b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 10399b8d05b8SZbigniew Bodek 10409b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 104143fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 104243fefd16SMarcin Wojtas 10439b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, 104430425f93SMarcin Wojtas "RX buffer - next to use: %d\n", next_to_use); 10459b8d05b8SZbigniew Bodek 104643fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 104743fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 10489b8d05b8SZbigniew Bodek 10499b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 10503f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 10514e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10524e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 10534e8acd84SMarcin Wojtas rx_ring->qid); 10549b8d05b8SZbigniew Bodek break; 10559b8d05b8SZbigniew Bodek } 10569b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 105743fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 10580bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 10594e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10609b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 10619b8d05b8SZbigniew Bodek rx_ring->qid); 10629b8d05b8SZbigniew Bodek break; 10639b8d05b8SZbigniew Bodek } 10649b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 10659b8d05b8SZbigniew Bodek rx_ring->ring_size); 10669b8d05b8SZbigniew Bodek } 10679b8d05b8SZbigniew Bodek 10683f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 10699b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 10704e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, 10714e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 10724e8acd84SMarcin Wojtas rx_ring->qid, i, num); 10739b8d05b8SZbigniew Bodek } 10749b8d05b8SZbigniew Bodek 10753f9ed7abSMarcin Wojtas if (likely(i != 0)) { 10769b8d05b8SZbigniew Bodek wmb(); 10779b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 10789b8d05b8SZbigniew Bodek } 10799b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 10809b8d05b8SZbigniew Bodek return (i); 10819b8d05b8SZbigniew Bodek } 10829b8d05b8SZbigniew Bodek 10839b8d05b8SZbigniew Bodek static void 10849b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 10859b8d05b8SZbigniew Bodek { 10869b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 10879b8d05b8SZbigniew Bodek unsigned int i; 10889b8d05b8SZbigniew Bodek 10899b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 10909b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 10919b8d05b8SZbigniew Bodek 10920bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 10939b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 10949b8d05b8SZbigniew Bodek } 10959b8d05b8SZbigniew Bodek } 10969b8d05b8SZbigniew Bodek 10979b8d05b8SZbigniew Bodek /** 10989b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 10999b8d05b8SZbigniew Bodek * @adapter: network interface device structure 11009b8d05b8SZbigniew Bodek * 11019b8d05b8SZbigniew Bodek */ 11029b8d05b8SZbigniew Bodek static void 11039b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 11049b8d05b8SZbigniew Bodek { 11059b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 11069b8d05b8SZbigniew Bodek int i, rc, bufs_num; 11079b8d05b8SZbigniew Bodek 11089b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11099b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 11109b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 11119b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 11129b8d05b8SZbigniew Bodek 11139b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 11144e8acd84SMarcin Wojtas ena_trace(ENA_WARNING, "refilling Queue %d failed. " 11154e8acd84SMarcin Wojtas "Allocated %d buffers from: %d\n", i, rc, bufs_num); 11169b8d05b8SZbigniew Bodek } 11179b8d05b8SZbigniew Bodek } 11189b8d05b8SZbigniew Bodek 11199b8d05b8SZbigniew Bodek static void 11209b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 11219b8d05b8SZbigniew Bodek { 11229b8d05b8SZbigniew Bodek int i; 11239b8d05b8SZbigniew Bodek 11249b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) 11259b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 11269b8d05b8SZbigniew Bodek } 11279b8d05b8SZbigniew Bodek 11289b8d05b8SZbigniew Bodek /** 11299b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 11309b8d05b8SZbigniew Bodek * @adapter: network interface device structure 11319b8d05b8SZbigniew Bodek * @qid: queue index 11329b8d05b8SZbigniew Bodek **/ 11339b8d05b8SZbigniew Bodek static void 11349b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 11359b8d05b8SZbigniew Bodek { 11364e8acd84SMarcin Wojtas bool print_once = true; 11379b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 11389b8d05b8SZbigniew Bodek 1139416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 11409b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 11419b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 11429b8d05b8SZbigniew Bodek 11439b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 11449b8d05b8SZbigniew Bodek continue; 11459b8d05b8SZbigniew Bodek 11464e8acd84SMarcin Wojtas if (print_once) { 11474e8acd84SMarcin Wojtas device_printf(adapter->pdev, 114830425f93SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x\n", 11494e8acd84SMarcin Wojtas qid, i); 11504e8acd84SMarcin Wojtas print_once = false; 11514e8acd84SMarcin Wojtas } else { 11524e8acd84SMarcin Wojtas ena_trace(ENA_DBG, 115330425f93SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x\n", 11544e8acd84SMarcin Wojtas qid, i); 11554e8acd84SMarcin Wojtas } 11569b8d05b8SZbigniew Bodek 11574fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 11584fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1159e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11604fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11614fa9e02dSMarcin Wojtas tx_info->map_head); 11624fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 11634fa9e02dSMarcin Wojtas } 11644fa9e02dSMarcin Wojtas 11654fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 11664fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 11674fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 11684fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 11694fa9e02dSMarcin Wojtas tx_info->map_seg); 11704fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 11714fa9e02dSMarcin Wojtas } 11724fa9e02dSMarcin Wojtas 11739b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 11749b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 11759b8d05b8SZbigniew Bodek } 1176416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 11779b8d05b8SZbigniew Bodek } 11789b8d05b8SZbigniew Bodek 11799b8d05b8SZbigniew Bodek static void 11809b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 11819b8d05b8SZbigniew Bodek { 11829b8d05b8SZbigniew Bodek 11839b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) 11849b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 11859b8d05b8SZbigniew Bodek } 11869b8d05b8SZbigniew Bodek 11879b8d05b8SZbigniew Bodek static void 11889b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 11899b8d05b8SZbigniew Bodek { 11909b8d05b8SZbigniew Bodek uint16_t ena_qid; 11919b8d05b8SZbigniew Bodek int i; 11929b8d05b8SZbigniew Bodek 11939b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 11949b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 11959b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 11969b8d05b8SZbigniew Bodek } 11979b8d05b8SZbigniew Bodek } 11989b8d05b8SZbigniew Bodek 11999b8d05b8SZbigniew Bodek static void 12009b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 12019b8d05b8SZbigniew Bodek { 12029b8d05b8SZbigniew Bodek uint16_t ena_qid; 12039b8d05b8SZbigniew Bodek int i; 12049b8d05b8SZbigniew Bodek 12059b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12069b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12079b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 12089b8d05b8SZbigniew Bodek } 12099b8d05b8SZbigniew Bodek } 12109b8d05b8SZbigniew Bodek 12119b8d05b8SZbigniew Bodek static void 12129b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 12139b8d05b8SZbigniew Bodek { 12145cb9db07SMarcin Wojtas struct ena_que *queue; 12155cb9db07SMarcin Wojtas int i; 12165cb9db07SMarcin Wojtas 12175cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 12185cb9db07SMarcin Wojtas queue = &adapter->que[i]; 12195cb9db07SMarcin Wojtas while (taskqueue_cancel(queue->cleanup_tq, 12205cb9db07SMarcin Wojtas &queue->cleanup_task, NULL)) 12215cb9db07SMarcin Wojtas taskqueue_drain(queue->cleanup_tq, 12225cb9db07SMarcin Wojtas &queue->cleanup_task); 12235cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 12245cb9db07SMarcin Wojtas } 12255cb9db07SMarcin Wojtas 12269b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 12279b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 12289b8d05b8SZbigniew Bodek } 12299b8d05b8SZbigniew Bodek 1230fceb9387SMarcin Wojtas static inline int 12319b8d05b8SZbigniew Bodek validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) 12329b8d05b8SZbigniew Bodek { 12334e8acd84SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 12349b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = NULL; 12359b8d05b8SZbigniew Bodek 12369b8d05b8SZbigniew Bodek if (likely(req_id < tx_ring->ring_size)) { 12379b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 12380bdffe59SMarcin Wojtas if (tx_info->mbuf != NULL) 12390bdffe59SMarcin Wojtas return (0); 12404e8acd84SMarcin Wojtas device_printf(adapter->pdev, 12414e8acd84SMarcin Wojtas "tx_info doesn't have valid mbuf\n"); 12424e306999SMarcin Wojtas } 12434e8acd84SMarcin Wojtas 12444e306999SMarcin Wojtas device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id); 12459b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 12469b8d05b8SZbigniew Bodek 1247858659f7SMarcin Wojtas /* Trigger device reset */ 1248858659f7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 1249858659f7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 1250858659f7SMarcin Wojtas 12519b8d05b8SZbigniew Bodek return (EFAULT); 12529b8d05b8SZbigniew Bodek } 12539b8d05b8SZbigniew Bodek 12549b8d05b8SZbigniew Bodek static int 12559b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 12569b8d05b8SZbigniew Bodek { 12579b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 12589b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 12599b8d05b8SZbigniew Bodek struct ena_ring *ring; 12605cb9db07SMarcin Wojtas struct ena_que *queue; 12619b8d05b8SZbigniew Bodek uint16_t ena_qid; 12629b8d05b8SZbigniew Bodek uint32_t msix_vector; 12639b8d05b8SZbigniew Bodek int rc, i; 12649b8d05b8SZbigniew Bodek 12659b8d05b8SZbigniew Bodek /* Create TX queues */ 12669b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12679b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12689b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 12699b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 12709b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 12719b8d05b8SZbigniew Bodek ctx.queue_size = adapter->tx_ring_size; 12729b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 12739b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 12749b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 12750bdffe59SMarcin Wojtas if (rc != 0) { 12769b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12779b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 12789b8d05b8SZbigniew Bodek goto err_tx; 12799b8d05b8SZbigniew Bodek } 12809b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 12819b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 12829b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 12839b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 12840bdffe59SMarcin Wojtas if (rc != 0) { 12859b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 12869b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 12879b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 12889b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 12899b8d05b8SZbigniew Bodek goto err_tx; 12909b8d05b8SZbigniew Bodek } 12919b8d05b8SZbigniew Bodek } 12929b8d05b8SZbigniew Bodek 12939b8d05b8SZbigniew Bodek /* Create RX queues */ 12949b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 12959b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 12969b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 12979b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 12989b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 12999b8d05b8SZbigniew Bodek ctx.queue_size = adapter->rx_ring_size; 13009b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 13019b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 13029b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 13033f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 13049b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 13059b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 13069b8d05b8SZbigniew Bodek goto err_rx; 13079b8d05b8SZbigniew Bodek } 13089b8d05b8SZbigniew Bodek 13099b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 13109b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 13119b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 13129b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 13133f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 13149b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 13159b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 13169b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 13179b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 13189b8d05b8SZbigniew Bodek goto err_rx; 13199b8d05b8SZbigniew Bodek } 13209b8d05b8SZbigniew Bodek } 13219b8d05b8SZbigniew Bodek 13225cb9db07SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 13235cb9db07SMarcin Wojtas queue = &adapter->que[i]; 13245cb9db07SMarcin Wojtas 13255cb9db07SMarcin Wojtas TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 13265cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 13275cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 13285cb9db07SMarcin Wojtas 13295cb9db07SMarcin Wojtas taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, 13305cb9db07SMarcin Wojtas "%s queue %d cleanup", 13315cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 13325cb9db07SMarcin Wojtas } 13335cb9db07SMarcin Wojtas 13349b8d05b8SZbigniew Bodek return (0); 13359b8d05b8SZbigniew Bodek 13369b8d05b8SZbigniew Bodek err_rx: 13379b8d05b8SZbigniew Bodek while (i--) 13389b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 13399b8d05b8SZbigniew Bodek i = adapter->num_queues; 13409b8d05b8SZbigniew Bodek err_tx: 13419b8d05b8SZbigniew Bodek while (i--) 13429b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 13439b8d05b8SZbigniew Bodek 13449b8d05b8SZbigniew Bodek return (ENXIO); 13459b8d05b8SZbigniew Bodek } 13469b8d05b8SZbigniew Bodek 13479b8d05b8SZbigniew Bodek /** 13489b8d05b8SZbigniew Bodek * ena_tx_cleanup - clear sent packets and corresponding descriptors 13499b8d05b8SZbigniew Bodek * @tx_ring: ring for which we want to clean packets 13509b8d05b8SZbigniew Bodek * 13519b8d05b8SZbigniew Bodek * Once packets are sent, we ask the device in a loop for no longer used 13529b8d05b8SZbigniew Bodek * descriptors. We find the related mbuf chain in a map (index in an array) 13539b8d05b8SZbigniew Bodek * and free it, then update ring state. 13549b8d05b8SZbigniew Bodek * This is performed in "endless" loop, updating ring pointers every 13559b8d05b8SZbigniew Bodek * TX_COMMIT. The first check of free descriptor is performed before the actual 13569b8d05b8SZbigniew Bodek * loop, then repeated at the loop end. 13579b8d05b8SZbigniew Bodek **/ 13589b8d05b8SZbigniew Bodek static int 13599b8d05b8SZbigniew Bodek ena_tx_cleanup(struct ena_ring *tx_ring) 13609b8d05b8SZbigniew Bodek { 13619b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 13629b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 13639b8d05b8SZbigniew Bodek uint16_t next_to_clean; 13649b8d05b8SZbigniew Bodek uint16_t req_id; 13659b8d05b8SZbigniew Bodek uint16_t ena_qid; 13669b8d05b8SZbigniew Bodek unsigned int total_done = 0; 13679b8d05b8SZbigniew Bodek int rc; 13689b8d05b8SZbigniew Bodek int commit = TX_COMMIT; 13699b8d05b8SZbigniew Bodek int budget = TX_BUDGET; 13709b8d05b8SZbigniew Bodek int work_done; 13715cb9db07SMarcin Wojtas bool above_thresh; 13729b8d05b8SZbigniew Bodek 13739b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 13749b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 13759b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 13769b8d05b8SZbigniew Bodek next_to_clean = tx_ring->next_to_clean; 13779b8d05b8SZbigniew Bodek 13789b8d05b8SZbigniew Bodek do { 13799b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 13809b8d05b8SZbigniew Bodek struct mbuf *mbuf; 13819b8d05b8SZbigniew Bodek 13829b8d05b8SZbigniew Bodek rc = ena_com_tx_comp_req_id_get(io_cq, &req_id); 13833f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13849b8d05b8SZbigniew Bodek break; 13859b8d05b8SZbigniew Bodek 13869b8d05b8SZbigniew Bodek rc = validate_tx_req_id(tx_ring, req_id); 13873f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 13889b8d05b8SZbigniew Bodek break; 13899b8d05b8SZbigniew Bodek 13909b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 13919b8d05b8SZbigniew Bodek 13929b8d05b8SZbigniew Bodek mbuf = tx_info->mbuf; 13939b8d05b8SZbigniew Bodek 13949b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 13959b8d05b8SZbigniew Bodek bintime_clear(&tx_info->timestamp); 13969b8d05b8SZbigniew Bodek 13979b8d05b8SZbigniew Bodek /* Map is no longer required */ 13984fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 13994fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 1400e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 14014fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 14024fa9e02dSMarcin Wojtas tx_info->map_head); 14034fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 14044fa9e02dSMarcin Wojtas } 14054fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 14064fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 14074fa9e02dSMarcin Wojtas BUS_DMASYNC_POSTWRITE); 14084fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, 14094fa9e02dSMarcin Wojtas tx_info->map_seg); 14104fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 14119b8d05b8SZbigniew Bodek } 14129b8d05b8SZbigniew Bodek 141330425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", 14144e8acd84SMarcin Wojtas tx_ring->qid, mbuf); 14154e8acd84SMarcin Wojtas 14169b8d05b8SZbigniew Bodek m_freem(mbuf); 14179b8d05b8SZbigniew Bodek 14189b8d05b8SZbigniew Bodek total_done += tx_info->tx_descs; 14199b8d05b8SZbigniew Bodek 14209b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[next_to_clean] = req_id; 14219b8d05b8SZbigniew Bodek next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, 14229b8d05b8SZbigniew Bodek tx_ring->ring_size); 14239b8d05b8SZbigniew Bodek 14243f9ed7abSMarcin Wojtas if (unlikely(--commit == 0)) { 14259b8d05b8SZbigniew Bodek commit = TX_COMMIT; 14269b8d05b8SZbigniew Bodek /* update ring state every TX_COMMIT descriptor */ 14279b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14280bdffe59SMarcin Wojtas ena_com_comp_ack( 14290bdffe59SMarcin Wojtas &adapter->ena_dev->io_sq_queues[ena_qid], 14300bdffe59SMarcin Wojtas total_done); 14319b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14329b8d05b8SZbigniew Bodek total_done = 0; 14339b8d05b8SZbigniew Bodek } 14343f9ed7abSMarcin Wojtas } while (likely(--budget)); 14359b8d05b8SZbigniew Bodek 14369b8d05b8SZbigniew Bodek work_done = TX_BUDGET - budget; 14379b8d05b8SZbigniew Bodek 143830425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", 14394e8acd84SMarcin Wojtas tx_ring->qid, work_done); 14404e8acd84SMarcin Wojtas 14419b8d05b8SZbigniew Bodek /* If there is still something to commit update ring state */ 14423f9ed7abSMarcin Wojtas if (likely(commit != TX_COMMIT)) { 14439b8d05b8SZbigniew Bodek tx_ring->next_to_clean = next_to_clean; 14440bdffe59SMarcin Wojtas ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], 14450bdffe59SMarcin Wojtas total_done); 14469b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(io_cq); 14479b8d05b8SZbigniew Bodek } 14489b8d05b8SZbigniew Bodek 14495cb9db07SMarcin Wojtas /* 14505cb9db07SMarcin Wojtas * Need to make the rings circular update visible to 14515cb9db07SMarcin Wojtas * ena_xmit_mbuf() before checking for tx_ring->running. 14525cb9db07SMarcin Wojtas */ 14535cb9db07SMarcin Wojtas mb(); 14545cb9db07SMarcin Wojtas 14555cb9db07SMarcin Wojtas above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14565cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14575cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running && above_thresh)) { 14585cb9db07SMarcin Wojtas ENA_RING_MTX_LOCK(tx_ring); 14595cb9db07SMarcin Wojtas above_thresh = 14605cb9db07SMarcin Wojtas ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 14615cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH); 14625cb9db07SMarcin Wojtas if (!tx_ring->running && above_thresh) { 14635cb9db07SMarcin Wojtas tx_ring->running = true; 14645cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 14655cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->enqueue_tq, 14665cb9db07SMarcin Wojtas &tx_ring->enqueue_task); 14675cb9db07SMarcin Wojtas } 14685cb9db07SMarcin Wojtas ENA_RING_MTX_UNLOCK(tx_ring); 14695cb9db07SMarcin Wojtas } 14709b8d05b8SZbigniew Bodek 14719b8d05b8SZbigniew Bodek return (work_done); 14729b8d05b8SZbigniew Bodek } 14739b8d05b8SZbigniew Bodek 14749b8d05b8SZbigniew Bodek static void 14759b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 14769b8d05b8SZbigniew Bodek struct mbuf *mbuf) 14779b8d05b8SZbigniew Bodek { 14789b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 14799b8d05b8SZbigniew Bodek 1480fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 14819b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = ena_rx_ctx->hash; 14829b8d05b8SZbigniew Bodek 14839b8d05b8SZbigniew Bodek if (ena_rx_ctx->frag && 1484bfea0e93SMarcin Wojtas (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) { 14859b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 14869b8d05b8SZbigniew Bodek return; 14879b8d05b8SZbigniew Bodek } 14889b8d05b8SZbigniew Bodek 14899b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l3_proto) { 14909b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV4: 14919b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 14929b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 14939b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4); 14949b8d05b8SZbigniew Bodek break; 14959b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 14969b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4); 14979b8d05b8SZbigniew Bodek break; 14989b8d05b8SZbigniew Bodek default: 14999b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4); 15009b8d05b8SZbigniew Bodek } 15019b8d05b8SZbigniew Bodek break; 15029b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_IPV6: 15039b8d05b8SZbigniew Bodek switch (ena_rx_ctx->l4_proto) { 15049b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_TCP: 15059b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6); 15069b8d05b8SZbigniew Bodek break; 15079b8d05b8SZbigniew Bodek case ENA_ETH_IO_L4_PROTO_UDP: 15089b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6); 15099b8d05b8SZbigniew Bodek break; 15109b8d05b8SZbigniew Bodek default: 15119b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6); 15129b8d05b8SZbigniew Bodek } 15139b8d05b8SZbigniew Bodek break; 15149b8d05b8SZbigniew Bodek case ENA_ETH_IO_L3_PROTO_UNKNOWN: 15159b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15169b8d05b8SZbigniew Bodek break; 15179b8d05b8SZbigniew Bodek default: 15189b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH); 15199b8d05b8SZbigniew Bodek } 15209b8d05b8SZbigniew Bodek } else { 15219b8d05b8SZbigniew Bodek mbuf->m_pkthdr.flowid = rx_ring->qid; 15229b8d05b8SZbigniew Bodek M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE); 15239b8d05b8SZbigniew Bodek } 15249b8d05b8SZbigniew Bodek } 15259b8d05b8SZbigniew Bodek 15269b8d05b8SZbigniew Bodek /** 15279b8d05b8SZbigniew Bodek * ena_rx_mbuf - assemble mbuf from descriptors 15289b8d05b8SZbigniew Bodek * @rx_ring: ring for which we want to clean packets 15299b8d05b8SZbigniew Bodek * @ena_bufs: buffer info 15309b8d05b8SZbigniew Bodek * @ena_rx_ctx: metadata for this packet(s) 153143fefd16SMarcin Wojtas * @next_to_clean: ring pointer, will be updated only upon success 15329b8d05b8SZbigniew Bodek * 15339b8d05b8SZbigniew Bodek **/ 15349b8d05b8SZbigniew Bodek static struct mbuf* 15359b8d05b8SZbigniew Bodek ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, 15369b8d05b8SZbigniew Bodek struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean) 15379b8d05b8SZbigniew Bodek { 15389b8d05b8SZbigniew Bodek struct mbuf *mbuf; 15399b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info; 15409b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 15419b8d05b8SZbigniew Bodek unsigned int descs = ena_rx_ctx->descs; 1542c51a229cSMarcin Wojtas int rc; 154343fefd16SMarcin Wojtas uint16_t ntc, len, req_id, buf = 0; 15449b8d05b8SZbigniew Bodek 154543fefd16SMarcin Wojtas ntc = *next_to_clean; 15469b8d05b8SZbigniew Bodek adapter = rx_ring->adapter; 15479b8d05b8SZbigniew Bodek 154843fefd16SMarcin Wojtas len = ena_bufs[buf].len; 154943fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1550c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1551c51a229cSMarcin Wojtas if (unlikely(rc != 0)) 1552c51a229cSMarcin Wojtas return (NULL); 1553c51a229cSMarcin Wojtas 155443fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 15551d65b4c0SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 15561d65b4c0SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 15571d65b4c0SMarcin Wojtas return (NULL); 15581d65b4c0SMarcin Wojtas } 155943fefd16SMarcin Wojtas 156030425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", 15619b8d05b8SZbigniew Bodek rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); 15629b8d05b8SZbigniew Bodek 1563e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1564e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 15659b8d05b8SZbigniew Bodek mbuf = rx_info->mbuf; 15669b8d05b8SZbigniew Bodek mbuf->m_flags |= M_PKTHDR; 15679b8d05b8SZbigniew Bodek mbuf->m_pkthdr.len = len; 15689b8d05b8SZbigniew Bodek mbuf->m_len = len; 15699b8d05b8SZbigniew Bodek mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp; 15709b8d05b8SZbigniew Bodek 15719b8d05b8SZbigniew Bodek /* Fill mbuf with hash key and it's interpretation for optimization */ 15729b8d05b8SZbigniew Bodek ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); 15739b8d05b8SZbigniew Bodek 157430425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", 15759b8d05b8SZbigniew Bodek mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); 15769b8d05b8SZbigniew Bodek 15779b8d05b8SZbigniew Bodek /* DMA address is not needed anymore, unmap it */ 15789b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 15799b8d05b8SZbigniew Bodek 15809b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 158143fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 158243fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 15839b8d05b8SZbigniew Bodek 15849b8d05b8SZbigniew Bodek /* 15859b8d05b8SZbigniew Bodek * While we have more than 1 descriptors for one rcvd packet, append 15869b8d05b8SZbigniew Bodek * other mbufs to the main one 15879b8d05b8SZbigniew Bodek */ 15889b8d05b8SZbigniew Bodek while (--descs) { 158943fefd16SMarcin Wojtas ++buf; 159043fefd16SMarcin Wojtas len = ena_bufs[buf].len; 159143fefd16SMarcin Wojtas req_id = ena_bufs[buf].req_id; 1592c51a229cSMarcin Wojtas rc = validate_rx_req_id(rx_ring, req_id); 1593c51a229cSMarcin Wojtas if (unlikely(rc != 0)) { 1594c51a229cSMarcin Wojtas /* 1595c51a229cSMarcin Wojtas * If the req_id is invalid, then the device will be 1596c51a229cSMarcin Wojtas * reset. In that case we must free all mbufs that 1597c51a229cSMarcin Wojtas * were already gathered. 1598c51a229cSMarcin Wojtas */ 1599c51a229cSMarcin Wojtas m_freem(mbuf); 1600c51a229cSMarcin Wojtas return (NULL); 1601c51a229cSMarcin Wojtas } 160243fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 16039b8d05b8SZbigniew Bodek 160443fefd16SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 160543fefd16SMarcin Wojtas device_printf(adapter->pdev, "NULL mbuf in rx_info"); 160643fefd16SMarcin Wojtas /* 160743fefd16SMarcin Wojtas * If one of the required mbufs was not allocated yet, 160843fefd16SMarcin Wojtas * we can break there. 160943fefd16SMarcin Wojtas * All earlier used descriptors will be reallocated 161043fefd16SMarcin Wojtas * later and not used mbufs can be reused. 161143fefd16SMarcin Wojtas * The next_to_clean pointer will not be updated in case 161243fefd16SMarcin Wojtas * of an error, so caller should advance it manually 161343fefd16SMarcin Wojtas * in error handling routine to keep it up to date 161443fefd16SMarcin Wojtas * with hw ring. 161543fefd16SMarcin Wojtas */ 161643fefd16SMarcin Wojtas m_freem(mbuf); 161743fefd16SMarcin Wojtas return (NULL); 161843fefd16SMarcin Wojtas } 161943fefd16SMarcin Wojtas 1620e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1621e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 16223f9ed7abSMarcin Wojtas if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { 16239b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 162430425f93SMarcin Wojtas ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n", 16259b8d05b8SZbigniew Bodek mbuf); 16269b8d05b8SZbigniew Bodek } 16274e8acd84SMarcin Wojtas 16284e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 162930425f93SMarcin Wojtas "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len); 16304e8acd84SMarcin Wojtas 16319b8d05b8SZbigniew Bodek /* Free already appended mbuf, it won't be useful anymore */ 16329b8d05b8SZbigniew Bodek bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map); 16339b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 16349b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 16359b8d05b8SZbigniew Bodek 163643fefd16SMarcin Wojtas rx_ring->free_rx_ids[ntc] = req_id; 163743fefd16SMarcin Wojtas ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size); 16389b8d05b8SZbigniew Bodek } 16399b8d05b8SZbigniew Bodek 164043fefd16SMarcin Wojtas *next_to_clean = ntc; 164143fefd16SMarcin Wojtas 16429b8d05b8SZbigniew Bodek return (mbuf); 16439b8d05b8SZbigniew Bodek } 16449b8d05b8SZbigniew Bodek 16459b8d05b8SZbigniew Bodek /** 16469b8d05b8SZbigniew Bodek * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum 16479b8d05b8SZbigniew Bodek **/ 16489b8d05b8SZbigniew Bodek static inline void 16499b8d05b8SZbigniew Bodek ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, 16509b8d05b8SZbigniew Bodek struct mbuf *mbuf) 16519b8d05b8SZbigniew Bodek { 16529b8d05b8SZbigniew Bodek 16539b8d05b8SZbigniew Bodek /* if IP and error */ 16543f9ed7abSMarcin Wojtas if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && 16553f9ed7abSMarcin Wojtas ena_rx_ctx->l3_csum_err)) { 16569b8d05b8SZbigniew Bodek /* ipv4 checksum error */ 16579b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16589b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 165930425f93SMarcin Wojtas ena_trace(ENA_DBG, "RX IPv4 header checksum error\n"); 16609b8d05b8SZbigniew Bodek return; 16619b8d05b8SZbigniew Bodek } 16629b8d05b8SZbigniew Bodek 16639b8d05b8SZbigniew Bodek /* if TCP/UDP */ 16649b8d05b8SZbigniew Bodek if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || 16659b8d05b8SZbigniew Bodek (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) { 16669b8d05b8SZbigniew Bodek if (ena_rx_ctx->l4_csum_err) { 16679b8d05b8SZbigniew Bodek /* TCP/UDP checksum error */ 16689b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = 0; 16699b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_csum, 1); 167030425f93SMarcin Wojtas ena_trace(ENA_DBG, "RX L4 checksum error\n"); 16719b8d05b8SZbigniew Bodek } else { 16729b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 16739b8d05b8SZbigniew Bodek mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 16749b8d05b8SZbigniew Bodek } 16759b8d05b8SZbigniew Bodek } 16769b8d05b8SZbigniew Bodek } 16779b8d05b8SZbigniew Bodek 16789b8d05b8SZbigniew Bodek /** 16799b8d05b8SZbigniew Bodek * ena_rx_cleanup - handle rx irq 16809b8d05b8SZbigniew Bodek * @arg: ring for which irq is being handled 16819b8d05b8SZbigniew Bodek **/ 16829b8d05b8SZbigniew Bodek static int 16839b8d05b8SZbigniew Bodek ena_rx_cleanup(struct ena_ring *rx_ring) 16849b8d05b8SZbigniew Bodek { 16859b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 16869b8d05b8SZbigniew Bodek struct mbuf *mbuf; 16879b8d05b8SZbigniew Bodek struct ena_com_rx_ctx ena_rx_ctx; 16889b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 16899b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 16909b8d05b8SZbigniew Bodek if_t ifp; 16919b8d05b8SZbigniew Bodek uint16_t ena_qid; 16929b8d05b8SZbigniew Bodek uint16_t next_to_clean; 16939b8d05b8SZbigniew Bodek uint32_t refill_required; 16949b8d05b8SZbigniew Bodek uint32_t refill_threshold; 16959b8d05b8SZbigniew Bodek uint32_t do_if_input = 0; 16969b8d05b8SZbigniew Bodek unsigned int qid; 169743fefd16SMarcin Wojtas int rc, i; 16989b8d05b8SZbigniew Bodek int budget = RX_BUDGET; 16999b8d05b8SZbigniew Bodek 17009b8d05b8SZbigniew Bodek adapter = rx_ring->que->adapter; 17019b8d05b8SZbigniew Bodek ifp = adapter->ifp; 17029b8d05b8SZbigniew Bodek qid = rx_ring->que->id; 17039b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(qid); 17049b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 17059b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 17069b8d05b8SZbigniew Bodek next_to_clean = rx_ring->next_to_clean; 17079b8d05b8SZbigniew Bodek 170830425f93SMarcin Wojtas ena_trace(ENA_DBG, "rx: qid %d\n", qid); 17094e8acd84SMarcin Wojtas 17109b8d05b8SZbigniew Bodek do { 17119b8d05b8SZbigniew Bodek ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 17129b8d05b8SZbigniew Bodek ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size; 17139b8d05b8SZbigniew Bodek ena_rx_ctx.descs = 0; 1714e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1715e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); 17169b8d05b8SZbigniew Bodek rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); 17179b8d05b8SZbigniew Bodek 17180bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 17199b8d05b8SZbigniew Bodek goto error; 17209b8d05b8SZbigniew Bodek 17219b8d05b8SZbigniew Bodek if (unlikely(ena_rx_ctx.descs == 0)) 17229b8d05b8SZbigniew Bodek break; 17239b8d05b8SZbigniew Bodek 17244e8acd84SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " 172530425f93SMarcin Wojtas "descs #: %d l3 proto %d l4 proto %d hash: %x\n", 17264e8acd84SMarcin Wojtas rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, 17274e8acd84SMarcin Wojtas ena_rx_ctx.l4_proto, ena_rx_ctx.hash); 17284e8acd84SMarcin Wojtas 17299b8d05b8SZbigniew Bodek /* Receive mbuf from the ring */ 17309b8d05b8SZbigniew Bodek mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs, 17319b8d05b8SZbigniew Bodek &ena_rx_ctx, &next_to_clean); 1732e8073738SMarcin Wojtas bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, 1733e8073738SMarcin Wojtas io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD); 17349b8d05b8SZbigniew Bodek /* Exit if we failed to retrieve a buffer */ 17350bdffe59SMarcin Wojtas if (unlikely(mbuf == NULL)) { 173643fefd16SMarcin Wojtas for (i = 0; i < ena_rx_ctx.descs; ++i) { 173743fefd16SMarcin Wojtas rx_ring->free_rx_ids[next_to_clean] = 173843fefd16SMarcin Wojtas rx_ring->ena_bufs[i].req_id; 173943fefd16SMarcin Wojtas next_to_clean = 174043fefd16SMarcin Wojtas ENA_RX_RING_IDX_NEXT(next_to_clean, 174143fefd16SMarcin Wojtas rx_ring->ring_size); 174243fefd16SMarcin Wojtas 174343fefd16SMarcin Wojtas } 17449b8d05b8SZbigniew Bodek break; 17459b8d05b8SZbigniew Bodek } 17469b8d05b8SZbigniew Bodek 17470bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) || 17480bdffe59SMarcin Wojtas ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) { 17499b8d05b8SZbigniew Bodek ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf); 17509b8d05b8SZbigniew Bodek } 17519b8d05b8SZbigniew Bodek 175230217e2dSMarcin Wojtas counter_enter(); 175330217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.bytes, 175430217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 175530217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_bytes, 175630217e2dSMarcin Wojtas mbuf->m_pkthdr.len); 175730217e2dSMarcin Wojtas counter_exit(); 17589b8d05b8SZbigniew Bodek /* 17599b8d05b8SZbigniew Bodek * LRO is only for IP/TCP packets and TCP checksum of the packet 17609b8d05b8SZbigniew Bodek * should be computed by hardware. 17619b8d05b8SZbigniew Bodek */ 17629b8d05b8SZbigniew Bodek do_if_input = 1; 17630bdffe59SMarcin Wojtas if (((ifp->if_capenable & IFCAP_LRO) != 0) && 17640bdffe59SMarcin Wojtas ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && 17650bdffe59SMarcin Wojtas (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) { 17669b8d05b8SZbigniew Bodek /* 17679b8d05b8SZbigniew Bodek * Send to the stack if: 17689b8d05b8SZbigniew Bodek * - LRO not enabled, or 17699b8d05b8SZbigniew Bodek * - no LRO resources, or 17709b8d05b8SZbigniew Bodek * - lro enqueue fails 17719b8d05b8SZbigniew Bodek */ 17720bdffe59SMarcin Wojtas if ((rx_ring->lro.lro_cnt != 0) && 17730bdffe59SMarcin Wojtas (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)) 17749b8d05b8SZbigniew Bodek do_if_input = 0; 17759b8d05b8SZbigniew Bodek } 17760bdffe59SMarcin Wojtas if (do_if_input != 0) { 17770bdffe59SMarcin Wojtas ena_trace(ENA_DBG | ENA_RXPTH, 177830425f93SMarcin Wojtas "calling if_input() with mbuf %p\n", mbuf); 17799b8d05b8SZbigniew Bodek (*ifp->if_input)(ifp, mbuf); 17809b8d05b8SZbigniew Bodek } 17819b8d05b8SZbigniew Bodek 178230217e2dSMarcin Wojtas counter_enter(); 178330217e2dSMarcin Wojtas counter_u64_add_protected(rx_ring->rx_stats.cnt, 1); 178430217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.rx_packets, 1); 178530217e2dSMarcin Wojtas counter_exit(); 17869b8d05b8SZbigniew Bodek } while (--budget); 17879b8d05b8SZbigniew Bodek 17889b8d05b8SZbigniew Bodek rx_ring->next_to_clean = next_to_clean; 17899b8d05b8SZbigniew Bodek 1790a195fab0SMarcin Wojtas refill_required = ena_com_free_desc(io_sq); 179182f5a792SMarcin Wojtas refill_threshold = min_t(int, 179282f5a792SMarcin Wojtas rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, 179382f5a792SMarcin Wojtas ENA_RX_REFILL_THRESH_PACKET); 17949b8d05b8SZbigniew Bodek 17959b8d05b8SZbigniew Bodek if (refill_required > refill_threshold) { 17969b8d05b8SZbigniew Bodek ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 17979b8d05b8SZbigniew Bodek ena_refill_rx_bufs(rx_ring, refill_required); 17989b8d05b8SZbigniew Bodek } 17999b8d05b8SZbigniew Bodek 18009b8d05b8SZbigniew Bodek tcp_lro_flush_all(&rx_ring->lro); 18019b8d05b8SZbigniew Bodek 18029b8d05b8SZbigniew Bodek return (RX_BUDGET - budget); 18039b8d05b8SZbigniew Bodek 18049b8d05b8SZbigniew Bodek error: 18059b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); 1806c9b099ecSMarcin Wojtas 1807c9b099ecSMarcin Wojtas /* Too many desc from the device. Trigger reset */ 1808858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 1809c9b099ecSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; 1810fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 1811858659f7SMarcin Wojtas } 1812c9b099ecSMarcin Wojtas 1813c9b099ecSMarcin Wojtas return (0); 18149b8d05b8SZbigniew Bodek } 18159b8d05b8SZbigniew Bodek 18169b8d05b8SZbigniew Bodek /********************************************************************* 18179b8d05b8SZbigniew Bodek * 18189b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 18199b8d05b8SZbigniew Bodek * 18209b8d05b8SZbigniew Bodek **********************************************************************/ 18219b8d05b8SZbigniew Bodek 18229b8d05b8SZbigniew Bodek /** 18239b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 18249b8d05b8SZbigniew Bodek * @arg: interrupt number 18259b8d05b8SZbigniew Bodek **/ 18269b8d05b8SZbigniew Bodek static void 18279b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 18289b8d05b8SZbigniew Bodek { 18299b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 18309b8d05b8SZbigniew Bodek 18319b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1832fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 18339b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 18349b8d05b8SZbigniew Bodek } 18359b8d05b8SZbigniew Bodek 18369b8d05b8SZbigniew Bodek static void 18375cb9db07SMarcin Wojtas ena_cleanup(void *arg, int pending) 18389b8d05b8SZbigniew Bodek { 18399b8d05b8SZbigniew Bodek struct ena_que *que = arg; 18409b8d05b8SZbigniew Bodek struct ena_adapter *adapter = que->adapter; 18419b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 18429b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 18439b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 18449b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 18459b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 18469b8d05b8SZbigniew Bodek int qid, ena_qid; 18479b8d05b8SZbigniew Bodek int txc, rxc, i; 18489b8d05b8SZbigniew Bodek 18493f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18509b8d05b8SZbigniew Bodek return; 18519b8d05b8SZbigniew Bodek 185230425f93SMarcin Wojtas ena_trace(ENA_DBG, "MSI-X TX/RX routine\n"); 18539b8d05b8SZbigniew Bodek 18549b8d05b8SZbigniew Bodek tx_ring = que->tx_ring; 18559b8d05b8SZbigniew Bodek rx_ring = que->rx_ring; 18569b8d05b8SZbigniew Bodek qid = que->id; 18579b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(qid); 18589b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 18599b8d05b8SZbigniew Bodek 1860d12f7bfcSMarcin Wojtas tx_ring->first_interrupt = true; 1861d12f7bfcSMarcin Wojtas rx_ring->first_interrupt = true; 1862d12f7bfcSMarcin Wojtas 18639b8d05b8SZbigniew Bodek for (i = 0; i < CLEAN_BUDGET; ++i) { 18649b8d05b8SZbigniew Bodek rxc = ena_rx_cleanup(rx_ring); 18659b8d05b8SZbigniew Bodek txc = ena_tx_cleanup(tx_ring); 18669b8d05b8SZbigniew Bodek 18673f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18689b8d05b8SZbigniew Bodek return; 18699b8d05b8SZbigniew Bodek 18700bdffe59SMarcin Wojtas if ((txc != TX_BUDGET) && (rxc != RX_BUDGET)) 18719b8d05b8SZbigniew Bodek break; 18729b8d05b8SZbigniew Bodek } 18739b8d05b8SZbigniew Bodek 18749b8d05b8SZbigniew Bodek /* Signal that work is done and unmask interrupt */ 18759b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 18769b8d05b8SZbigniew Bodek RX_IRQ_INTERVAL, 18779b8d05b8SZbigniew Bodek TX_IRQ_INTERVAL, 18789b8d05b8SZbigniew Bodek true); 18799b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 18809b8d05b8SZbigniew Bodek } 18819b8d05b8SZbigniew Bodek 18825cb9db07SMarcin Wojtas /** 18835cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 18845cb9db07SMarcin Wojtas * @arg: queue 18855cb9db07SMarcin Wojtas **/ 18865cb9db07SMarcin Wojtas static int 18875cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 18885cb9db07SMarcin Wojtas { 18895cb9db07SMarcin Wojtas struct ena_que *queue = arg; 18905cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 18915cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 18925cb9db07SMarcin Wojtas 18935cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 18945cb9db07SMarcin Wojtas return (FILTER_STRAY); 18955cb9db07SMarcin Wojtas 18965cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 18975cb9db07SMarcin Wojtas 18985cb9db07SMarcin Wojtas return (FILTER_HANDLED); 18995cb9db07SMarcin Wojtas } 19005cb9db07SMarcin Wojtas 19019b8d05b8SZbigniew Bodek static int 19029b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 19039b8d05b8SZbigniew Bodek { 19049b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 19058805021aSMarcin Wojtas int msix_vecs, msix_req; 19068805021aSMarcin Wojtas int i, rc = 0; 19079b8d05b8SZbigniew Bodek 1908fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1909fd43fd2aSMarcin Wojtas device_printf(dev, "Error, MSI-X is already enabled\n"); 1910fd43fd2aSMarcin Wojtas return (EINVAL); 1911fd43fd2aSMarcin Wojtas } 1912fd43fd2aSMarcin Wojtas 19139b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 19149b8d05b8SZbigniew Bodek msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); 19159b8d05b8SZbigniew Bodek 1916cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1917cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1918cd5d5804SMarcin Wojtas 191930425f93SMarcin Wojtas ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); 19209b8d05b8SZbigniew Bodek 19219b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 19229b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 19239b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 19249b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 19259b8d05b8SZbigniew Bodek } 19269b8d05b8SZbigniew Bodek 19278805021aSMarcin Wojtas msix_req = msix_vecs; 19289b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 19293f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 19309b8d05b8SZbigniew Bodek device_printf(dev, 19319b8d05b8SZbigniew Bodek "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); 19327d2544e6SMarcin Wojtas 19339b8d05b8SZbigniew Bodek rc = ENOSPC; 19347d2544e6SMarcin Wojtas goto err_msix_free; 19359b8d05b8SZbigniew Bodek } 19369b8d05b8SZbigniew Bodek 19378805021aSMarcin Wojtas if (msix_vecs != msix_req) { 19382b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 19392b5b60feSMarcin Wojtas device_printf(dev, 19402b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 19412b5b60feSMarcin Wojtas msix_vecs); 19422b5b60feSMarcin Wojtas pci_release_msi(dev); 19432b5b60feSMarcin Wojtas rc = ENOSPC; 19442b5b60feSMarcin Wojtas goto err_msix_free; 19452b5b60feSMarcin Wojtas } 19468805021aSMarcin Wojtas device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " 19478805021aSMarcin Wojtas "the number of queues\n", msix_vecs, msix_req); 19488805021aSMarcin Wojtas adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; 19498805021aSMarcin Wojtas } 19508805021aSMarcin Wojtas 19519b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 1952fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 19539b8d05b8SZbigniew Bodek 19547d2544e6SMarcin Wojtas return (0); 19557d2544e6SMarcin Wojtas 19567d2544e6SMarcin Wojtas err_msix_free: 19577d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 19587d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 19597d2544e6SMarcin Wojtas 19609b8d05b8SZbigniew Bodek return (rc); 19619b8d05b8SZbigniew Bodek } 19629b8d05b8SZbigniew Bodek 19639b8d05b8SZbigniew Bodek static void 19649b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 19659b8d05b8SZbigniew Bodek { 19669b8d05b8SZbigniew Bodek 19679b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 19689b8d05b8SZbigniew Bodek ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 19699b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev)); 19709b8d05b8SZbigniew Bodek /* 19719b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 19729b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 19739b8d05b8SZbigniew Bodek */ 19749b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 19759b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 19769b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 19779b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 19789b8d05b8SZbigniew Bodek } 19799b8d05b8SZbigniew Bodek 198077958fcdSMarcin Wojtas static int 19819b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 19829b8d05b8SZbigniew Bodek { 19839b8d05b8SZbigniew Bodek static int last_bind_cpu = -1; 19849b8d05b8SZbigniew Bodek int irq_idx; 19859b8d05b8SZbigniew Bodek 198677958fcdSMarcin Wojtas if (adapter->msix_entries == NULL) 198777958fcdSMarcin Wojtas return (EINVAL); 198877958fcdSMarcin Wojtas 19899b8d05b8SZbigniew Bodek for (int i = 0; i < adapter->num_queues; i++) { 19909b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 19919b8d05b8SZbigniew Bodek 19929b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 19939b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 19949b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 19959b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 19969b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 19979b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 19989b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", 19999b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 2000277f11c4SMarcin Wojtas 20019b8d05b8SZbigniew Bodek /* 2002277f11c4SMarcin Wojtas * We want to bind rings to the corresponding cpu 20039b8d05b8SZbigniew Bodek * using something similar to the RSS round-robin technique. 20049b8d05b8SZbigniew Bodek */ 20053f9ed7abSMarcin Wojtas if (unlikely(last_bind_cpu < 0)) 20069b8d05b8SZbigniew Bodek last_bind_cpu = CPU_FIRST(); 20079b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 20089b8d05b8SZbigniew Bodek last_bind_cpu; 20099b8d05b8SZbigniew Bodek last_bind_cpu = CPU_NEXT(last_bind_cpu); 20109b8d05b8SZbigniew Bodek } 201177958fcdSMarcin Wojtas 201277958fcdSMarcin Wojtas return (0); 20139b8d05b8SZbigniew Bodek } 20149b8d05b8SZbigniew Bodek 20159b8d05b8SZbigniew Bodek static int 20169b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 20179b8d05b8SZbigniew Bodek { 20189b8d05b8SZbigniew Bodek struct ena_irq *irq; 20199b8d05b8SZbigniew Bodek unsigned long flags; 20209b8d05b8SZbigniew Bodek int rc, rcc; 20219b8d05b8SZbigniew Bodek 20229b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20239b8d05b8SZbigniew Bodek 20249b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 20259b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20269b8d05b8SZbigniew Bodek &irq->vector, flags); 20279b8d05b8SZbigniew Bodek 20283f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 20299b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20309b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20317d2544e6SMarcin Wojtas return (ENXIO); 20329b8d05b8SZbigniew Bodek } 20339b8d05b8SZbigniew Bodek 20340bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20350bdffe59SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 20360bdffe59SMarcin Wojtas irq->data, &irq->cookie); 20373f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20389b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20399b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20409b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20417d2544e6SMarcin Wojtas goto err_res_free; 20429b8d05b8SZbigniew Bodek } 20439b8d05b8SZbigniew Bodek irq->requested = true; 20449b8d05b8SZbigniew Bodek 20459b8d05b8SZbigniew Bodek return (rc); 20469b8d05b8SZbigniew Bodek 20477d2544e6SMarcin Wojtas err_res_free: 20484e8acd84SMarcin Wojtas ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", 20497d2544e6SMarcin Wojtas irq->vector); 20509b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20519b8d05b8SZbigniew Bodek irq->vector, irq->res); 20523f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 20539b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 20549b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 20559b8d05b8SZbigniew Bodek irq->res = NULL; 20569b8d05b8SZbigniew Bodek 20579b8d05b8SZbigniew Bodek return (rc); 20589b8d05b8SZbigniew Bodek } 20599b8d05b8SZbigniew Bodek 20609b8d05b8SZbigniew Bodek static int 20619b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 20629b8d05b8SZbigniew Bodek { 20639b8d05b8SZbigniew Bodek struct ena_irq *irq; 20649b8d05b8SZbigniew Bodek unsigned long flags = 0; 20659b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 20669b8d05b8SZbigniew Bodek 2067fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 20684e8acd84SMarcin Wojtas device_printf(adapter->pdev, 20694e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 20709b8d05b8SZbigniew Bodek return (EINVAL); 20719b8d05b8SZbigniew Bodek } else { 20729b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 20739b8d05b8SZbigniew Bodek } 20749b8d05b8SZbigniew Bodek 20759b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 20769b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 20779b8d05b8SZbigniew Bodek 20783f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 20799b8d05b8SZbigniew Bodek continue; 20809b8d05b8SZbigniew Bodek 20819b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 20829b8d05b8SZbigniew Bodek &irq->vector, flags); 20833f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 2084469a8407SMarcin Wojtas rc = ENOMEM; 20859b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not allocate " 20869b8d05b8SZbigniew Bodek "irq vector: %d\n", irq->vector); 20879b8d05b8SZbigniew Bodek goto err; 20889b8d05b8SZbigniew Bodek } 20899b8d05b8SZbigniew Bodek 20900bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 20915cb9db07SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 20925cb9db07SMarcin Wojtas irq->data, &irq->cookie); 20933f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 20949b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to register " 20959b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 20969b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 20979b8d05b8SZbigniew Bodek goto err; 20989b8d05b8SZbigniew Bodek } 20999b8d05b8SZbigniew Bodek irq->requested = true; 21009b8d05b8SZbigniew Bodek 21014e8acd84SMarcin Wojtas ena_trace(ENA_INFO, "queue %d - cpu %d\n", 21029b8d05b8SZbigniew Bodek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 21039b8d05b8SZbigniew Bodek } 21049b8d05b8SZbigniew Bodek 21059b8d05b8SZbigniew Bodek return (rc); 21069b8d05b8SZbigniew Bodek 21079b8d05b8SZbigniew Bodek err: 21089b8d05b8SZbigniew Bodek 21099b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 21109b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 21119b8d05b8SZbigniew Bodek rcc = 0; 21129b8d05b8SZbigniew Bodek 21139b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 21149b8d05b8SZbigniew Bodek free both intr and resources */ 21150bdffe59SMarcin Wojtas if (irq->requested) 21169b8d05b8SZbigniew Bodek rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 21173f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 21189b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "could not release" 21199b8d05b8SZbigniew Bodek " irq: %d, error: %d\n", irq->vector, rcc); 21209b8d05b8SZbigniew Bodek 21219b8d05b8SZbigniew Bodek /* If we entred err: section without irq->requested set we know 21229b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 21239b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 21249b8d05b8SZbigniew Bodek this iteration */ 21259b8d05b8SZbigniew Bodek rcc = 0; 21269b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21279b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21289b8d05b8SZbigniew Bodek irq->vector, irq->res); 21299b8d05b8SZbigniew Bodek } 21303f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 21319b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21329b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21339b8d05b8SZbigniew Bodek irq->requested = false; 21349b8d05b8SZbigniew Bodek irq->res = NULL; 21359b8d05b8SZbigniew Bodek } 21369b8d05b8SZbigniew Bodek 21379b8d05b8SZbigniew Bodek return (rc); 21389b8d05b8SZbigniew Bodek } 21399b8d05b8SZbigniew Bodek 21409b8d05b8SZbigniew Bodek static void 21419b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 21429b8d05b8SZbigniew Bodek { 21439b8d05b8SZbigniew Bodek struct ena_irq *irq; 21449b8d05b8SZbigniew Bodek int rc; 21459b8d05b8SZbigniew Bodek 21469b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 21479b8d05b8SZbigniew Bodek if (irq->requested) { 21489b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", 21499b8d05b8SZbigniew Bodek irq->vector); 21509b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 21513f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21529b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21539b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21549b8d05b8SZbigniew Bodek irq->requested = 0; 21559b8d05b8SZbigniew Bodek } 21569b8d05b8SZbigniew Bodek 21579b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21589b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", 21599b8d05b8SZbigniew Bodek irq->vector); 21609b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21619b8d05b8SZbigniew Bodek irq->vector, irq->res); 21629b8d05b8SZbigniew Bodek irq->res = NULL; 21633f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21649b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent while " 21659b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 21669b8d05b8SZbigniew Bodek } 21679b8d05b8SZbigniew Bodek } 21689b8d05b8SZbigniew Bodek 21699b8d05b8SZbigniew Bodek static void 21709b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 21719b8d05b8SZbigniew Bodek { 21729b8d05b8SZbigniew Bodek struct ena_irq *irq; 21739b8d05b8SZbigniew Bodek int rc; 21749b8d05b8SZbigniew Bodek 21759b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 21769b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 21779b8d05b8SZbigniew Bodek if (irq->requested) { 21789b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", 21799b8d05b8SZbigniew Bodek irq->vector); 21809b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 21819b8d05b8SZbigniew Bodek irq->cookie); 21823f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21839b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "failed to tear " 21849b8d05b8SZbigniew Bodek "down irq: %d\n", irq->vector); 21859b8d05b8SZbigniew Bodek } 21869b8d05b8SZbigniew Bodek irq->requested = 0; 21879b8d05b8SZbigniew Bodek } 21889b8d05b8SZbigniew Bodek 21899b8d05b8SZbigniew Bodek if (irq->res != NULL) { 21909b8d05b8SZbigniew Bodek ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", 21919b8d05b8SZbigniew Bodek irq->vector); 21929b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 21939b8d05b8SZbigniew Bodek irq->vector, irq->res); 21949b8d05b8SZbigniew Bodek irq->res = NULL; 21953f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21969b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "dev has no parent" 21979b8d05b8SZbigniew Bodek " while releasing res for irq: %d\n", 21989b8d05b8SZbigniew Bodek irq->vector); 21999b8d05b8SZbigniew Bodek } 22009b8d05b8SZbigniew Bodek } 22019b8d05b8SZbigniew Bodek } 22029b8d05b8SZbigniew Bodek } 22039b8d05b8SZbigniew Bodek 22049b8d05b8SZbigniew Bodek static void 22059b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter* adapter) 22069b8d05b8SZbigniew Bodek { 22079b8d05b8SZbigniew Bodek 22089b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 22099b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 22109b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 22119b8d05b8SZbigniew Bodek } 22129b8d05b8SZbigniew Bodek 22139b8d05b8SZbigniew Bodek static void 22149b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 22159b8d05b8SZbigniew Bodek { 22169b8d05b8SZbigniew Bodek 2217fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 2218fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 22199b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 2220fd43fd2aSMarcin Wojtas } 22219b8d05b8SZbigniew Bodek 22229b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 2223fd43fd2aSMarcin Wojtas if (adapter->msix_entries != NULL) 2224cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 22259b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 22269b8d05b8SZbigniew Bodek } 22279b8d05b8SZbigniew Bodek 22289b8d05b8SZbigniew Bodek static void 22299b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 22309b8d05b8SZbigniew Bodek { 22319b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 22329b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 22339b8d05b8SZbigniew Bodek uint16_t ena_qid; 22349b8d05b8SZbigniew Bodek int i; 22359b8d05b8SZbigniew Bodek 22369b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 22379b8d05b8SZbigniew Bodek for (i = 0; i < adapter->num_queues; i++) { 22389b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 22399b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 22409b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 0, 0, true); 22419b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 22429b8d05b8SZbigniew Bodek } 22439b8d05b8SZbigniew Bodek } 22449b8d05b8SZbigniew Bodek 22459b8d05b8SZbigniew Bodek /* Configure the Rx forwarding */ 22460bdffe59SMarcin Wojtas static int 22470bdffe59SMarcin Wojtas ena_rss_configure(struct ena_adapter *adapter) 22489b8d05b8SZbigniew Bodek { 22499b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 22509b8d05b8SZbigniew Bodek int rc; 22519b8d05b8SZbigniew Bodek 22529b8d05b8SZbigniew Bodek /* Set indirect table */ 22539b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_set(ena_dev); 22540bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22550bdffe59SMarcin Wojtas return (rc); 22569b8d05b8SZbigniew Bodek 22579b8d05b8SZbigniew Bodek /* Configure hash function (if supported) */ 22589b8d05b8SZbigniew Bodek rc = ena_com_set_hash_function(ena_dev); 22590bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22600bdffe59SMarcin Wojtas return (rc); 22619b8d05b8SZbigniew Bodek 22629b8d05b8SZbigniew Bodek /* Configure hash inputs (if supported) */ 22639b8d05b8SZbigniew Bodek rc = ena_com_set_hash_ctrl(ena_dev); 22640bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) 22650bdffe59SMarcin Wojtas return (rc); 22669b8d05b8SZbigniew Bodek 22670bdffe59SMarcin Wojtas return (0); 22689b8d05b8SZbigniew Bodek } 22699b8d05b8SZbigniew Bodek 22709b8d05b8SZbigniew Bodek static int 22719b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 22729b8d05b8SZbigniew Bodek { 22739b8d05b8SZbigniew Bodek int rc; 22749b8d05b8SZbigniew Bodek 2275fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 22769b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 22770bdffe59SMarcin Wojtas if (rc != 0) 22789b8d05b8SZbigniew Bodek return (rc); 22799b8d05b8SZbigniew Bodek } 22809b8d05b8SZbigniew Bodek 22817d2544e6SMarcin Wojtas rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); 22823f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 22837d2544e6SMarcin Wojtas return (rc); 22847d2544e6SMarcin Wojtas 22859b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 228630217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 228730217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 22889b8d05b8SZbigniew Bodek 22899b8d05b8SZbigniew Bodek return (0); 22909b8d05b8SZbigniew Bodek } 22919b8d05b8SZbigniew Bodek 22929b8d05b8SZbigniew Bodek static int 22939b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 22949b8d05b8SZbigniew Bodek { 22959b8d05b8SZbigniew Bodek int rc = 0; 22969b8d05b8SZbigniew Bodek 22973f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 22989b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is not attached!\n"); 22999b8d05b8SZbigniew Bodek return (ENXIO); 23009b8d05b8SZbigniew Bodek } 23019b8d05b8SZbigniew Bodek 2302fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 23039b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going UP\n"); 23049b8d05b8SZbigniew Bodek 23059b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 230677958fcdSMarcin Wojtas rc = ena_setup_io_intr(adapter); 230777958fcdSMarcin Wojtas if (unlikely(rc != 0)) { 230877958fcdSMarcin Wojtas ena_trace(ENA_ALERT, "error setting up IO interrupt\n"); 230977958fcdSMarcin Wojtas goto error; 231077958fcdSMarcin Wojtas } 23119b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 23123f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 231330425f93SMarcin Wojtas ena_trace(ENA_ALERT, "err_req_irq\n"); 231477958fcdSMarcin Wojtas goto error; 23159b8d05b8SZbigniew Bodek } 23169b8d05b8SZbigniew Bodek 23179b8d05b8SZbigniew Bodek /* allocate transmit descriptors */ 23189b8d05b8SZbigniew Bodek rc = ena_setup_all_tx_resources(adapter); 23193f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 232030425f93SMarcin Wojtas ena_trace(ENA_ALERT, "err_setup_tx\n"); 23219b8d05b8SZbigniew Bodek goto err_setup_tx; 23229b8d05b8SZbigniew Bodek } 23239b8d05b8SZbigniew Bodek 23249b8d05b8SZbigniew Bodek /* allocate receive descriptors */ 23259b8d05b8SZbigniew Bodek rc = ena_setup_all_rx_resources(adapter); 23263f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 232730425f93SMarcin Wojtas ena_trace(ENA_ALERT, "err_setup_rx\n"); 23289b8d05b8SZbigniew Bodek goto err_setup_rx; 23299b8d05b8SZbigniew Bodek } 23309b8d05b8SZbigniew Bodek 23319b8d05b8SZbigniew Bodek /* create IO queues for Rx & Tx */ 23329b8d05b8SZbigniew Bodek rc = ena_create_io_queues(adapter); 23333f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 23349b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, 233530425f93SMarcin Wojtas "create IO queues failed\n"); 23369b8d05b8SZbigniew Bodek goto err_io_que; 23379b8d05b8SZbigniew Bodek } 23389b8d05b8SZbigniew Bodek 2339fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 23409b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 23419b8d05b8SZbigniew Bodek 23429b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 23433f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 23449b8d05b8SZbigniew Bodek goto err_up_complete; 23459b8d05b8SZbigniew Bodek 23469b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 23479b8d05b8SZbigniew Bodek 23489b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 23499b8d05b8SZbigniew Bodek 23509b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 23519b8d05b8SZbigniew Bodek IFF_DRV_OACTIVE); 23529b8d05b8SZbigniew Bodek 2353*24392281SMarcin Wojtas /* Activate timer service only if the device is running. 2354*24392281SMarcin Wojtas * If this flag is not set, it means that the driver is being 2355*24392281SMarcin Wojtas * reset and timer service will be activated afterwards. 2356*24392281SMarcin Wojtas */ 2357*24392281SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) { 2358*24392281SMarcin Wojtas callout_reset_sbt(&adapter->timer_service, SBT_1S, 2359*24392281SMarcin Wojtas SBT_1S, ena_timer_service, (void *)adapter, 0); 2360*24392281SMarcin Wojtas } 23619b8d05b8SZbigniew Bodek 2362fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 236393471047SZbigniew Bodek 236493471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 23659b8d05b8SZbigniew Bodek } 23669b8d05b8SZbigniew Bodek 23679b8d05b8SZbigniew Bodek return (0); 23689b8d05b8SZbigniew Bodek 23699b8d05b8SZbigniew Bodek err_up_complete: 23709b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 23719b8d05b8SZbigniew Bodek err_io_que: 23729b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 23739b8d05b8SZbigniew Bodek err_setup_rx: 23749b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 23759b8d05b8SZbigniew Bodek err_setup_tx: 23769b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 237777958fcdSMarcin Wojtas error: 23789b8d05b8SZbigniew Bodek return (rc); 23799b8d05b8SZbigniew Bodek } 23809b8d05b8SZbigniew Bodek 23819b8d05b8SZbigniew Bodek static uint64_t 23829b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 23839b8d05b8SZbigniew Bodek { 23849b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23859b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 23869b8d05b8SZbigniew Bodek 23879b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 23889b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 23899b8d05b8SZbigniew Bodek 23909b8d05b8SZbigniew Bodek switch (cnt) { 23919b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 239230217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 23939b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 239430217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 23959b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 239630217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 23979b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 239830217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 23999b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 240030217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 24019b8d05b8SZbigniew Bodek default: 24029b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 24039b8d05b8SZbigniew Bodek } 24049b8d05b8SZbigniew Bodek } 24059b8d05b8SZbigniew Bodek 24069b8d05b8SZbigniew Bodek static int 24079b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 24089b8d05b8SZbigniew Bodek { 24099b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 24109b8d05b8SZbigniew Bodek return (0); 24119b8d05b8SZbigniew Bodek } 24129b8d05b8SZbigniew Bodek 24139b8d05b8SZbigniew Bodek static void 24149b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 24159b8d05b8SZbigniew Bodek { 24169b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 241730425f93SMarcin Wojtas ena_trace(ENA_DBG, "enter\n"); 24189b8d05b8SZbigniew Bodek 24195a990212SMarcin Wojtas mtx_lock(&adapter->global_mtx); 24209b8d05b8SZbigniew Bodek 24219b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 24229b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 24239b8d05b8SZbigniew Bodek 2424fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 24255a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 2426fd43fd2aSMarcin Wojtas ena_trace(ENA_INFO, "Link is down\n"); 24279b8d05b8SZbigniew Bodek return; 24289b8d05b8SZbigniew Bodek } 24299b8d05b8SZbigniew Bodek 24309b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2431b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 24329b8d05b8SZbigniew Bodek 24335a990212SMarcin Wojtas mtx_unlock(&adapter->global_mtx); 24349b8d05b8SZbigniew Bodek } 24359b8d05b8SZbigniew Bodek 24369b8d05b8SZbigniew Bodek static void 24379b8d05b8SZbigniew Bodek ena_init(void *arg) 24389b8d05b8SZbigniew Bodek { 24399b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 24409b8d05b8SZbigniew Bodek 2441fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 24423d3a90f9SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24439b8d05b8SZbigniew Bodek ena_up(adapter); 24443d3a90f9SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24453d3a90f9SZbigniew Bodek } 24469b8d05b8SZbigniew Bodek } 24479b8d05b8SZbigniew Bodek 24489b8d05b8SZbigniew Bodek static int 24499b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 24509b8d05b8SZbigniew Bodek { 24519b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 24529b8d05b8SZbigniew Bodek struct ifreq *ifr; 24539b8d05b8SZbigniew Bodek int rc; 24549b8d05b8SZbigniew Bodek 24559b8d05b8SZbigniew Bodek adapter = ifp->if_softc; 24569b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 24579b8d05b8SZbigniew Bodek 24589b8d05b8SZbigniew Bodek /* 24599b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 24609b8d05b8SZbigniew Bodek */ 24619b8d05b8SZbigniew Bodek rc = 0; 24629b8d05b8SZbigniew Bodek switch (command) { 24639b8d05b8SZbigniew Bodek case SIOCSIFMTU: 2464dbf2eb54SMarcin Wojtas if (ifp->if_mtu == ifr->ifr_mtu) 2465dbf2eb54SMarcin Wojtas break; 2466e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24679b8d05b8SZbigniew Bodek ena_down(adapter); 24689b8d05b8SZbigniew Bodek 24699b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 24709b8d05b8SZbigniew Bodek 24719b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2472e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24739b8d05b8SZbigniew Bodek break; 24749b8d05b8SZbigniew Bodek 24759b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 24760bdffe59SMarcin Wojtas if ((ifp->if_flags & IFF_UP) != 0) { 24770bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 24780bdffe59SMarcin Wojtas if ((ifp->if_flags & (IFF_PROMISC | 24790bdffe59SMarcin Wojtas IFF_ALLMULTI)) != 0) { 24809b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 24819b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 24829b8d05b8SZbigniew Bodek } 24839b8d05b8SZbigniew Bodek } else { 2484e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24859b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2486e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 24879b8d05b8SZbigniew Bodek } 24889b8d05b8SZbigniew Bodek } else { 24890bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 2490e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 24919b8d05b8SZbigniew Bodek ena_down(adapter); 2492e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 2493e67c6554SZbigniew Bodek } 24949b8d05b8SZbigniew Bodek } 24959b8d05b8SZbigniew Bodek break; 24969b8d05b8SZbigniew Bodek 24979b8d05b8SZbigniew Bodek case SIOCADDMULTI: 24989b8d05b8SZbigniew Bodek case SIOCDELMULTI: 24999b8d05b8SZbigniew Bodek break; 25009b8d05b8SZbigniew Bodek 25019b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 25029b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 25039b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 25049b8d05b8SZbigniew Bodek break; 25059b8d05b8SZbigniew Bodek 25069b8d05b8SZbigniew Bodek case SIOCSIFCAP: 25079b8d05b8SZbigniew Bodek { 25089b8d05b8SZbigniew Bodek int reinit = 0; 25099b8d05b8SZbigniew Bodek 25109b8d05b8SZbigniew Bodek if (ifr->ifr_reqcap != ifp->if_capenable) { 25119b8d05b8SZbigniew Bodek ifp->if_capenable = ifr->ifr_reqcap; 25129b8d05b8SZbigniew Bodek reinit = 1; 25139b8d05b8SZbigniew Bodek } 25149b8d05b8SZbigniew Bodek 25150bdffe59SMarcin Wojtas if ((reinit != 0) && 25160bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 2517e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 25189b8d05b8SZbigniew Bodek ena_down(adapter); 25199b8d05b8SZbigniew Bodek rc = ena_up(adapter); 2520e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 25219b8d05b8SZbigniew Bodek } 25229b8d05b8SZbigniew Bodek } 25239b8d05b8SZbigniew Bodek 25249b8d05b8SZbigniew Bodek break; 25259b8d05b8SZbigniew Bodek default: 25269b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 25279b8d05b8SZbigniew Bodek break; 25289b8d05b8SZbigniew Bodek } 25299b8d05b8SZbigniew Bodek 25309b8d05b8SZbigniew Bodek return (rc); 25319b8d05b8SZbigniew Bodek } 25329b8d05b8SZbigniew Bodek 25339b8d05b8SZbigniew Bodek static int 25349b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 25359b8d05b8SZbigniew Bodek { 25369b8d05b8SZbigniew Bodek int caps = 0; 25379b8d05b8SZbigniew Bodek 25380bdffe59SMarcin Wojtas if ((feat->offload.tx & 25399b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25409b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 25410bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 25429b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 25439b8d05b8SZbigniew Bodek 25440bdffe59SMarcin Wojtas if ((feat->offload.tx & 25459b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 25460bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 25479b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 25489b8d05b8SZbigniew Bodek 25490bdffe59SMarcin Wojtas if ((feat->offload.tx & 25500bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 25519b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 25529b8d05b8SZbigniew Bodek 25530bdffe59SMarcin Wojtas if ((feat->offload.tx & 25540bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 25559b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 25569b8d05b8SZbigniew Bodek 25570bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25589b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 25590bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 25609b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 25619b8d05b8SZbigniew Bodek 25620bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 25630bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 25649b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 25659b8d05b8SZbigniew Bodek 25669b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 25679b8d05b8SZbigniew Bodek 25689b8d05b8SZbigniew Bodek return (caps); 25699b8d05b8SZbigniew Bodek } 25709b8d05b8SZbigniew Bodek 25719b8d05b8SZbigniew Bodek static void 25729b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 25739b8d05b8SZbigniew Bodek { 25749b8d05b8SZbigniew Bodek 25759b8d05b8SZbigniew Bodek host_info->supported_network_features[0] = 25769b8d05b8SZbigniew Bodek (uint32_t)if_getcapabilities(ifp); 25779b8d05b8SZbigniew Bodek } 25789b8d05b8SZbigniew Bodek 25799b8d05b8SZbigniew Bodek static void 25809b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 25819b8d05b8SZbigniew Bodek { 25829b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 25839b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 25849b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 25859b8d05b8SZbigniew Bodek int flags = 0; 25869b8d05b8SZbigniew Bodek 25879b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 25889b8d05b8SZbigniew Bodek 25890bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 25900bdffe59SMarcin Wojtas if ((feat & 25910bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 25929b8d05b8SZbigniew Bodek flags |= CSUM_IP; 25930bdffe59SMarcin Wojtas if ((feat & 25949b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 25950bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 25969b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 25979b8d05b8SZbigniew Bodek } 25989b8d05b8SZbigniew Bodek 25990bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 26009b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 26019b8d05b8SZbigniew Bodek 26020bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 26039b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 26049b8d05b8SZbigniew Bodek 26050bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 26069b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 26079b8d05b8SZbigniew Bodek 26089b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 26099b8d05b8SZbigniew Bodek } 26109b8d05b8SZbigniew Bodek 26119b8d05b8SZbigniew Bodek static int 26129b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 26139b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 26149b8d05b8SZbigniew Bodek { 26159b8d05b8SZbigniew Bodek if_t ifp; 26169b8d05b8SZbigniew Bodek int caps = 0; 26179b8d05b8SZbigniew Bodek 26189b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 26193f9ed7abSMarcin Wojtas if (unlikely(ifp == NULL)) { 26204e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); 26219b8d05b8SZbigniew Bodek return (ENXIO); 26229b8d05b8SZbigniew Bodek } 26239b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 26249b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 26259b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 26269b8d05b8SZbigniew Bodek 26279b8d05b8SZbigniew Bodek if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 26289b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 26299b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 26309b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 26319b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 26329b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 26339b8d05b8SZbigniew Bodek 26349b8d05b8SZbigniew Bodek if_setsendqlen(ifp, adapter->tx_ring_size); 26359b8d05b8SZbigniew Bodek if_setsendqready(ifp); 26369b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 26379b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 26389b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 26399b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 26409b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 26419b8d05b8SZbigniew Bodek /* check hardware support */ 26429b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 26439b8d05b8SZbigniew Bodek /* ... and set them */ 26449b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 26459b8d05b8SZbigniew Bodek 26469b8d05b8SZbigniew Bodek /* TSO parameters */ 26478a573700SZbigniew Bodek ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - 26488a573700SZbigniew Bodek (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 26498a573700SZbigniew Bodek ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; 26508a573700SZbigniew Bodek ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; 26519b8d05b8SZbigniew Bodek 26529b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 26539b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 26549b8d05b8SZbigniew Bodek 26559b8d05b8SZbigniew Bodek /* 26569b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 26579b8d05b8SZbigniew Bodek * callbacks to update media and link information 26589b8d05b8SZbigniew Bodek */ 26599b8d05b8SZbigniew Bodek ifmedia_init(&adapter->media, IFM_IMASK, 26609b8d05b8SZbigniew Bodek ena_media_change, ena_media_status); 26619b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 26629b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 26639b8d05b8SZbigniew Bodek 26649b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 26659b8d05b8SZbigniew Bodek 26669b8d05b8SZbigniew Bodek return (0); 26679b8d05b8SZbigniew Bodek } 26689b8d05b8SZbigniew Bodek 26699b8d05b8SZbigniew Bodek static void 26709b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 26719b8d05b8SZbigniew Bodek { 2672a195fab0SMarcin Wojtas int rc; 26739b8d05b8SZbigniew Bodek 2674fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 26759b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "device is going DOWN\n"); 26769b8d05b8SZbigniew Bodek 26779b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 26789b8d05b8SZbigniew Bodek 2679fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 26809b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 26819b8d05b8SZbigniew Bodek IFF_DRV_RUNNING); 26829b8d05b8SZbigniew Bodek 26839b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 26849b8d05b8SZbigniew Bodek 2685fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 2686a195fab0SMarcin Wojtas rc = ena_com_dev_reset(adapter->ena_dev, 2687a195fab0SMarcin Wojtas adapter->reset_reason); 26883f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 2689a195fab0SMarcin Wojtas device_printf(adapter->pdev, 2690a195fab0SMarcin Wojtas "Device reset failed\n"); 2691a195fab0SMarcin Wojtas } 2692a195fab0SMarcin Wojtas 26939b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 26949b8d05b8SZbigniew Bodek 26959b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 26969b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 26979b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 26989b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 26999b8d05b8SZbigniew Bodek 27009b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 27019b8d05b8SZbigniew Bodek } 27029b8d05b8SZbigniew Bodek } 27039b8d05b8SZbigniew Bodek 27049b8d05b8SZbigniew Bodek static void 27059b8d05b8SZbigniew Bodek ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf) 27069b8d05b8SZbigniew Bodek { 27079b8d05b8SZbigniew Bodek struct ena_com_tx_meta *ena_meta; 27089b8d05b8SZbigniew Bodek struct ether_vlan_header *eh; 2709a870eab2SMarcin Wojtas struct mbuf *mbuf_next; 27109b8d05b8SZbigniew Bodek u32 mss; 27119b8d05b8SZbigniew Bodek bool offload; 27129b8d05b8SZbigniew Bodek uint16_t etype; 27139b8d05b8SZbigniew Bodek int ehdrlen; 27149b8d05b8SZbigniew Bodek struct ip *ip; 27159b8d05b8SZbigniew Bodek int iphlen; 27169b8d05b8SZbigniew Bodek struct tcphdr *th; 2717a870eab2SMarcin Wojtas int offset; 27189b8d05b8SZbigniew Bodek 27199b8d05b8SZbigniew Bodek offload = false; 27209b8d05b8SZbigniew Bodek ena_meta = &ena_tx_ctx->ena_meta; 27219b8d05b8SZbigniew Bodek mss = mbuf->m_pkthdr.tso_segsz; 27229b8d05b8SZbigniew Bodek 27239b8d05b8SZbigniew Bodek if (mss != 0) 27249b8d05b8SZbigniew Bodek offload = true; 27259b8d05b8SZbigniew Bodek 27269b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) 27279b8d05b8SZbigniew Bodek offload = true; 27289b8d05b8SZbigniew Bodek 27299b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) 27309b8d05b8SZbigniew Bodek offload = true; 27319b8d05b8SZbigniew Bodek 27320bdffe59SMarcin Wojtas if (!offload) { 27339b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 0; 27349b8d05b8SZbigniew Bodek return; 27359b8d05b8SZbigniew Bodek } 27369b8d05b8SZbigniew Bodek 27379b8d05b8SZbigniew Bodek /* Determine where frame payload starts. */ 27389b8d05b8SZbigniew Bodek eh = mtod(mbuf, struct ether_vlan_header *); 27399b8d05b8SZbigniew Bodek if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 27409b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_proto); 27419b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 27429b8d05b8SZbigniew Bodek } else { 27439b8d05b8SZbigniew Bodek etype = ntohs(eh->evl_encap_proto); 27449b8d05b8SZbigniew Bodek ehdrlen = ETHER_HDR_LEN; 27459b8d05b8SZbigniew Bodek } 27469b8d05b8SZbigniew Bodek 2747a870eab2SMarcin Wojtas mbuf_next = m_getptr(mbuf, ehdrlen, &offset); 2748a870eab2SMarcin Wojtas ip = (struct ip *)(mtodo(mbuf_next, offset)); 27499b8d05b8SZbigniew Bodek iphlen = ip->ip_hl << 2; 2750a870eab2SMarcin Wojtas 2751a870eab2SMarcin Wojtas mbuf_next = m_getptr(mbuf, iphlen + ehdrlen, &offset); 2752a870eab2SMarcin Wojtas th = (struct tcphdr *)(mtodo(mbuf_next, offset)); 27539b8d05b8SZbigniew Bodek 27549b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) { 27559b8d05b8SZbigniew Bodek ena_tx_ctx->l3_csum_enable = 1; 27569b8d05b8SZbigniew Bodek } 27579b8d05b8SZbigniew Bodek if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 27589b8d05b8SZbigniew Bodek ena_tx_ctx->tso_enable = 1; 27599b8d05b8SZbigniew Bodek ena_meta->l4_hdr_len = (th->th_off); 27609b8d05b8SZbigniew Bodek } 27619b8d05b8SZbigniew Bodek 27629b8d05b8SZbigniew Bodek switch (etype) { 27639b8d05b8SZbigniew Bodek case ETHERTYPE_IP: 27649b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 2765cd433385SMarcin Wojtas if ((ip->ip_off & htons(IP_DF)) != 0) 27669b8d05b8SZbigniew Bodek ena_tx_ctx->df = 1; 27679b8d05b8SZbigniew Bodek break; 27689b8d05b8SZbigniew Bodek case ETHERTYPE_IPV6: 27699b8d05b8SZbigniew Bodek ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 27709b8d05b8SZbigniew Bodek 27719b8d05b8SZbigniew Bodek default: 27729b8d05b8SZbigniew Bodek break; 27739b8d05b8SZbigniew Bodek } 27749b8d05b8SZbigniew Bodek 27759b8d05b8SZbigniew Bodek if (ip->ip_p == IPPROTO_TCP) { 27769b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 27770bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27780bdffe59SMarcin Wojtas (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0) 27799b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27809b8d05b8SZbigniew Bodek else 27819b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27829b8d05b8SZbigniew Bodek } else if (ip->ip_p == IPPROTO_UDP) { 27839b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 27840bdffe59SMarcin Wojtas if ((mbuf->m_pkthdr.csum_flags & 27850bdffe59SMarcin Wojtas (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0) 27869b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 1; 27879b8d05b8SZbigniew Bodek else 27889b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27899b8d05b8SZbigniew Bodek } else { 27909b8d05b8SZbigniew Bodek ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 27919b8d05b8SZbigniew Bodek ena_tx_ctx->l4_csum_enable = 0; 27929b8d05b8SZbigniew Bodek } 27939b8d05b8SZbigniew Bodek 27949b8d05b8SZbigniew Bodek ena_meta->mss = mss; 27959b8d05b8SZbigniew Bodek ena_meta->l3_hdr_len = iphlen; 27969b8d05b8SZbigniew Bodek ena_meta->l3_hdr_offset = ehdrlen; 27979b8d05b8SZbigniew Bodek ena_tx_ctx->meta_valid = 1; 27989b8d05b8SZbigniew Bodek } 27999b8d05b8SZbigniew Bodek 28009b8d05b8SZbigniew Bodek static int 28011b069f1cSZbigniew Bodek ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 28021e9fb899SZbigniew Bodek { 28031e9fb899SZbigniew Bodek struct ena_adapter *adapter; 28041b069f1cSZbigniew Bodek struct mbuf *collapsed_mbuf; 28051e9fb899SZbigniew Bodek int num_frags; 28061e9fb899SZbigniew Bodek 28071e9fb899SZbigniew Bodek adapter = tx_ring->adapter; 28081e9fb899SZbigniew Bodek num_frags = ena_mbuf_count(*mbuf); 28091e9fb899SZbigniew Bodek 28101e9fb899SZbigniew Bodek /* One segment must be reserved for configuration descriptor. */ 28111e9fb899SZbigniew Bodek if (num_frags < adapter->max_tx_sgl_size) 28121e9fb899SZbigniew Bodek return (0); 28131b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse, 1); 28141e9fb899SZbigniew Bodek 28151b069f1cSZbigniew Bodek collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT, 28161b069f1cSZbigniew Bodek adapter->max_tx_sgl_size - 1); 28173f9ed7abSMarcin Wojtas if (unlikely(collapsed_mbuf == NULL)) { 28181b069f1cSZbigniew Bodek counter_u64_add(tx_ring->tx_stats.collapse_err, 1); 28191e9fb899SZbigniew Bodek return (ENOMEM); 28201e9fb899SZbigniew Bodek } 28211e9fb899SZbigniew Bodek 28221b069f1cSZbigniew Bodek /* If mbuf was collapsed succesfully, original mbuf is released. */ 28231b069f1cSZbigniew Bodek *mbuf = collapsed_mbuf; 28241e9fb899SZbigniew Bodek 28251e9fb899SZbigniew Bodek return (0); 28261e9fb899SZbigniew Bodek } 28271e9fb899SZbigniew Bodek 28284fa9e02dSMarcin Wojtas static void 28294fa9e02dSMarcin Wojtas ena_dmamap_llq(void *arg, bus_dma_segment_t *segs, int nseg, int error) 28304fa9e02dSMarcin Wojtas { 28314fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf = arg; 28324fa9e02dSMarcin Wojtas 28334fa9e02dSMarcin Wojtas if (unlikely(error != 0)) { 28344fa9e02dSMarcin Wojtas ena_buf->paddr = 0; 28354fa9e02dSMarcin Wojtas return; 28364fa9e02dSMarcin Wojtas } 28374fa9e02dSMarcin Wojtas 28384fa9e02dSMarcin Wojtas KASSERT(nseg == 1, ("Invalid num of segments for LLQ dma")); 28394fa9e02dSMarcin Wojtas 28404fa9e02dSMarcin Wojtas ena_buf->paddr = segs->ds_addr; 28414fa9e02dSMarcin Wojtas ena_buf->len = segs->ds_len; 28424fa9e02dSMarcin Wojtas } 28434fa9e02dSMarcin Wojtas 28444fa9e02dSMarcin Wojtas static int 28454fa9e02dSMarcin Wojtas ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, 28464fa9e02dSMarcin Wojtas struct mbuf *mbuf, void **push_hdr, u16 *header_len) 28474fa9e02dSMarcin Wojtas { 28484fa9e02dSMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 28494fa9e02dSMarcin Wojtas struct ena_com_buf *ena_buf; 28504fa9e02dSMarcin Wojtas bus_dma_segment_t segs[ENA_BUS_DMA_SEGS]; 28514fa9e02dSMarcin Wojtas uint32_t mbuf_head_len, frag_len; 28524fa9e02dSMarcin Wojtas uint16_t push_len = 0; 28534fa9e02dSMarcin Wojtas uint16_t delta = 0; 28544fa9e02dSMarcin Wojtas int i, rc, nsegs; 28554fa9e02dSMarcin Wojtas 28564fa9e02dSMarcin Wojtas mbuf_head_len = mbuf->m_len; 28574fa9e02dSMarcin Wojtas tx_info->mbuf = mbuf; 28584fa9e02dSMarcin Wojtas ena_buf = tx_info->bufs; 28594fa9e02dSMarcin Wojtas 28604fa9e02dSMarcin Wojtas if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 28614fa9e02dSMarcin Wojtas /* 28624fa9e02dSMarcin Wojtas * When the device is LLQ mode, the driver will copy 28634fa9e02dSMarcin Wojtas * the header into the device memory space. 28644fa9e02dSMarcin Wojtas * the ena_com layer assumes the header is in a linear 28654fa9e02dSMarcin Wojtas * memory space. 28664fa9e02dSMarcin Wojtas * This assumption might be wrong since part of the header 28674fa9e02dSMarcin Wojtas * can be in the fragmented buffers. 28684fa9e02dSMarcin Wojtas * First check if header fits in the mbuf. If not, copy it to 28694fa9e02dSMarcin Wojtas * separate buffer that will be holding linearized data. 28704fa9e02dSMarcin Wojtas */ 28714fa9e02dSMarcin Wojtas push_len = min_t(uint32_t, mbuf->m_pkthdr.len, 28724fa9e02dSMarcin Wojtas tx_ring->tx_max_header_size); 28734fa9e02dSMarcin Wojtas *header_len = push_len; 28744fa9e02dSMarcin Wojtas /* If header is in linear space, just point into mbuf's data. */ 28754fa9e02dSMarcin Wojtas if (likely(push_len <= mbuf_head_len)) { 28764fa9e02dSMarcin Wojtas *push_hdr = mbuf->m_data; 28774fa9e02dSMarcin Wojtas /* 28784fa9e02dSMarcin Wojtas * Otherwise, copy whole portion of header from multiple mbufs 28794fa9e02dSMarcin Wojtas * to intermediate buffer. 28804fa9e02dSMarcin Wojtas */ 28814fa9e02dSMarcin Wojtas } else { 28824fa9e02dSMarcin Wojtas m_copydata(mbuf, 0, push_len, 28834fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf); 28844fa9e02dSMarcin Wojtas *push_hdr = tx_ring->push_buf_intermediate_buf; 28854fa9e02dSMarcin Wojtas 28864fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); 28874fa9e02dSMarcin Wojtas delta = push_len - mbuf_head_len; 28884fa9e02dSMarcin Wojtas } 28894fa9e02dSMarcin Wojtas 28904fa9e02dSMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 28914fa9e02dSMarcin Wojtas "mbuf: %p header_buf->vaddr: %p push_len: %d\n", 28924fa9e02dSMarcin Wojtas mbuf, *push_hdr, push_len); 28934fa9e02dSMarcin Wojtas 28944fa9e02dSMarcin Wojtas /* 28954fa9e02dSMarcin Wojtas * If header was in linear memory space, map for the dma rest of the data 28964fa9e02dSMarcin Wojtas * in the first mbuf of the mbuf chain. 28974fa9e02dSMarcin Wojtas */ 28984fa9e02dSMarcin Wojtas if (mbuf_head_len > push_len) { 28994fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 29004fa9e02dSMarcin Wojtas tx_info->map_head, 29014fa9e02dSMarcin Wojtas mbuf->m_data + push_len, mbuf_head_len - push_len, 29024fa9e02dSMarcin Wojtas ena_dmamap_llq, ena_buf, BUS_DMA_NOWAIT); 29034fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 29044fa9e02dSMarcin Wojtas goto single_dma_error; 29054fa9e02dSMarcin Wojtas 29064fa9e02dSMarcin Wojtas ena_buf++; 29074fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 29084fa9e02dSMarcin Wojtas 29094fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 29104fa9e02dSMarcin Wojtas } 29114fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 29124fa9e02dSMarcin Wojtas } else { 29134fa9e02dSMarcin Wojtas *push_hdr = NULL; 29144fa9e02dSMarcin Wojtas /* 29154fa9e02dSMarcin Wojtas * header_len is just a hint for the device. Because FreeBSD is not 29164fa9e02dSMarcin Wojtas * giving us information about packet header length and it is not 29174fa9e02dSMarcin Wojtas * guaranteed that all packet headers will be in the 1st mbuf, setting 29184fa9e02dSMarcin Wojtas * header_len to 0 is making the device ignore this value and resolve 29194fa9e02dSMarcin Wojtas * header on it's own. 29204fa9e02dSMarcin Wojtas */ 29214fa9e02dSMarcin Wojtas *header_len = 0; 29224fa9e02dSMarcin Wojtas } 29234fa9e02dSMarcin Wojtas 29244fa9e02dSMarcin Wojtas /* 29254fa9e02dSMarcin Wojtas * If header is in non linear space (delta > 0), then skip mbufs 29264fa9e02dSMarcin Wojtas * containing header and map the last one containing both header and the 29274fa9e02dSMarcin Wojtas * packet data. 29284fa9e02dSMarcin Wojtas * The first segment is already counted in. 29294fa9e02dSMarcin Wojtas * If LLQ is not supported, the loop will be skipped. 29304fa9e02dSMarcin Wojtas */ 29314fa9e02dSMarcin Wojtas while (delta > 0) { 29324fa9e02dSMarcin Wojtas frag_len = mbuf->m_len; 29334fa9e02dSMarcin Wojtas 29344fa9e02dSMarcin Wojtas /* 29354fa9e02dSMarcin Wojtas * If whole segment contains header just move to the 29364fa9e02dSMarcin Wojtas * next one and reduce delta. 29374fa9e02dSMarcin Wojtas */ 29384fa9e02dSMarcin Wojtas if (unlikely(delta >= frag_len)) { 29394fa9e02dSMarcin Wojtas delta -= frag_len; 29404fa9e02dSMarcin Wojtas } else { 29414fa9e02dSMarcin Wojtas /* 29424fa9e02dSMarcin Wojtas * Map rest of the packet data that was contained in 29434fa9e02dSMarcin Wojtas * the mbuf. 29444fa9e02dSMarcin Wojtas */ 29454fa9e02dSMarcin Wojtas rc = bus_dmamap_load(adapter->tx_buf_tag, 29464fa9e02dSMarcin Wojtas tx_info->map_head, mbuf->m_data + delta, 29474fa9e02dSMarcin Wojtas frag_len - delta, ena_dmamap_llq, ena_buf, 29484fa9e02dSMarcin Wojtas BUS_DMA_NOWAIT); 29494fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (ena_buf->paddr == 0))) 29504fa9e02dSMarcin Wojtas goto single_dma_error; 29514fa9e02dSMarcin Wojtas 29524fa9e02dSMarcin Wojtas ena_buf++; 29534fa9e02dSMarcin Wojtas tx_info->num_of_bufs++; 29544fa9e02dSMarcin Wojtas tx_info->head_mapped = true; 29554fa9e02dSMarcin Wojtas 29564fa9e02dSMarcin Wojtas delta = 0; 29574fa9e02dSMarcin Wojtas } 29584fa9e02dSMarcin Wojtas 29594fa9e02dSMarcin Wojtas mbuf = mbuf->m_next; 29604fa9e02dSMarcin Wojtas } 29614fa9e02dSMarcin Wojtas 29624fa9e02dSMarcin Wojtas if (mbuf == NULL) { 29634fa9e02dSMarcin Wojtas return (0); 29644fa9e02dSMarcin Wojtas } 29654fa9e02dSMarcin Wojtas 29664fa9e02dSMarcin Wojtas /* Map rest of the mbufs */ 29674fa9e02dSMarcin Wojtas rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map_seg, mbuf, 29684fa9e02dSMarcin Wojtas segs, &nsegs, BUS_DMA_NOWAIT); 29694fa9e02dSMarcin Wojtas if (unlikely((rc != 0) || (nsegs == 0))) { 29704fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, 297130425f93SMarcin Wojtas "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs); 29724fa9e02dSMarcin Wojtas goto dma_error; 29734fa9e02dSMarcin Wojtas } 29744fa9e02dSMarcin Wojtas 29754fa9e02dSMarcin Wojtas for (i = 0; i < nsegs; i++) { 29764fa9e02dSMarcin Wojtas ena_buf->len = segs[i].ds_len; 29774fa9e02dSMarcin Wojtas ena_buf->paddr = segs[i].ds_addr; 29784fa9e02dSMarcin Wojtas ena_buf++; 29794fa9e02dSMarcin Wojtas } 29804fa9e02dSMarcin Wojtas tx_info->num_of_bufs += nsegs; 29814fa9e02dSMarcin Wojtas tx_info->seg_mapped = true; 29824fa9e02dSMarcin Wojtas 29834fa9e02dSMarcin Wojtas return (0); 29844fa9e02dSMarcin Wojtas 29854fa9e02dSMarcin Wojtas dma_error: 29864fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 29874fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 29884fa9e02dSMarcin Wojtas single_dma_error: 29894fa9e02dSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1); 29904fa9e02dSMarcin Wojtas tx_info->mbuf = NULL; 29914fa9e02dSMarcin Wojtas return (rc); 29924fa9e02dSMarcin Wojtas } 29934fa9e02dSMarcin Wojtas 29941e9fb899SZbigniew Bodek static int 29951e9fb899SZbigniew Bodek ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) 29969b8d05b8SZbigniew Bodek { 29979b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 29989b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info; 29999b8d05b8SZbigniew Bodek struct ena_com_tx_ctx ena_tx_ctx; 30009b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 30019b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 30029b8d05b8SZbigniew Bodek void *push_hdr; 30039b8d05b8SZbigniew Bodek uint16_t next_to_use; 30049b8d05b8SZbigniew Bodek uint16_t req_id; 30059b8d05b8SZbigniew Bodek uint16_t ena_qid; 30064fa9e02dSMarcin Wojtas uint16_t header_len; 30074fa9e02dSMarcin Wojtas int rc; 30081e9fb899SZbigniew Bodek int nb_hw_desc; 30099b8d05b8SZbigniew Bodek 30109b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 30119b8d05b8SZbigniew Bodek adapter = tx_ring->que->adapter; 30129b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 301390f4da8bSMarcin Wojtas io_sq = &ena_dev->io_sq_queues[ena_qid]; 30149b8d05b8SZbigniew Bodek 30151b069f1cSZbigniew Bodek rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); 30163f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 30171e9fb899SZbigniew Bodek ena_trace(ENA_WARNING, 301830425f93SMarcin Wojtas "Failed to collapse mbuf! err: %d\n", rc); 30191e9fb899SZbigniew Bodek return (rc); 30209b8d05b8SZbigniew Bodek } 30219b8d05b8SZbigniew Bodek 302230425f93SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); 30234fa9e02dSMarcin Wojtas 30249b8d05b8SZbigniew Bodek next_to_use = tx_ring->next_to_use; 30259b8d05b8SZbigniew Bodek req_id = tx_ring->free_tx_ids[next_to_use]; 30269b8d05b8SZbigniew Bodek tx_info = &tx_ring->tx_buffer_info[req_id]; 30279b8d05b8SZbigniew Bodek tx_info->num_of_bufs = 0; 30289b8d05b8SZbigniew Bodek 30294fa9e02dSMarcin Wojtas rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); 30304fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 30314fa9e02dSMarcin Wojtas ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); 30324fa9e02dSMarcin Wojtas return (rc); 30339b8d05b8SZbigniew Bodek } 30349b8d05b8SZbigniew Bodek memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 30359b8d05b8SZbigniew Bodek ena_tx_ctx.ena_bufs = tx_info->bufs; 30369b8d05b8SZbigniew Bodek ena_tx_ctx.push_header = push_hdr; 30379b8d05b8SZbigniew Bodek ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 30389b8d05b8SZbigniew Bodek ena_tx_ctx.req_id = req_id; 30399b8d05b8SZbigniew Bodek ena_tx_ctx.header_len = header_len; 30409b8d05b8SZbigniew Bodek 30419b8d05b8SZbigniew Bodek /* Set flags and meta data */ 30421e9fb899SZbigniew Bodek ena_tx_csum(&ena_tx_ctx, *mbuf); 3043af66d7d0SMarcin Wojtas 3044af66d7d0SMarcin Wojtas if (tx_ring->acum_pkts == DB_THRESHOLD || 3045af66d7d0SMarcin Wojtas ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { 3046af66d7d0SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 3047af66d7d0SMarcin Wojtas "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", 3048af66d7d0SMarcin Wojtas tx_ring->que->id); 3049af66d7d0SMarcin Wojtas wmb(); 3050af66d7d0SMarcin Wojtas ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3051af66d7d0SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.doorbells, 1); 3052af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 3053af66d7d0SMarcin Wojtas } 3054af66d7d0SMarcin Wojtas 30559b8d05b8SZbigniew Bodek /* Prepare the packet's descriptors and send them to device */ 30569b8d05b8SZbigniew Bodek rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); 30573f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3058e3cecf70SMarcin Wojtas if (likely(rc == ENA_COM_NO_MEM)) { 3059e3cecf70SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, 3060e3cecf70SMarcin Wojtas "tx ring[%d] if out of space\n", tx_ring->que->id); 3061e3cecf70SMarcin Wojtas } else { 3062e3cecf70SMarcin Wojtas device_printf(adapter->pdev, 3063e3cecf70SMarcin Wojtas "failed to prepare tx bufs\n"); 3064e3cecf70SMarcin Wojtas } 30650052f3b5SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1); 30669b8d05b8SZbigniew Bodek goto dma_error; 30679b8d05b8SZbigniew Bodek } 30689b8d05b8SZbigniew Bodek 30699b8d05b8SZbigniew Bodek counter_enter(); 30709b8d05b8SZbigniew Bodek counter_u64_add_protected(tx_ring->tx_stats.cnt, 1); 30710bdffe59SMarcin Wojtas counter_u64_add_protected(tx_ring->tx_stats.bytes, 30720bdffe59SMarcin Wojtas (*mbuf)->m_pkthdr.len); 307330217e2dSMarcin Wojtas 307430217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_packets, 1); 307530217e2dSMarcin Wojtas counter_u64_add_protected(adapter->hw_stats.tx_bytes, 307630217e2dSMarcin Wojtas (*mbuf)->m_pkthdr.len); 30779b8d05b8SZbigniew Bodek counter_exit(); 30789b8d05b8SZbigniew Bodek 30799b8d05b8SZbigniew Bodek tx_info->tx_descs = nb_hw_desc; 30809b8d05b8SZbigniew Bodek getbinuptime(&tx_info->timestamp); 30819b8d05b8SZbigniew Bodek tx_info->print_once = true; 30829b8d05b8SZbigniew Bodek 30839b8d05b8SZbigniew Bodek tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 30849b8d05b8SZbigniew Bodek tx_ring->ring_size); 30859b8d05b8SZbigniew Bodek 30865cb9db07SMarcin Wojtas /* stop the queue when no more space available, the packet can have up 30875cb9db07SMarcin Wojtas * to sgl_size + 2. one for the meta descriptor and one for header 30885cb9db07SMarcin Wojtas * (if the header is larger than tx_max_header_size). 30895cb9db07SMarcin Wojtas */ 30905cb9db07SMarcin Wojtas if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 30915cb9db07SMarcin Wojtas adapter->max_tx_sgl_size + 2))) { 30925cb9db07SMarcin Wojtas ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", 30935cb9db07SMarcin Wojtas tx_ring->que->id); 30945cb9db07SMarcin Wojtas 30955cb9db07SMarcin Wojtas tx_ring->running = false; 30965cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_stop, 1); 30975cb9db07SMarcin Wojtas 30985cb9db07SMarcin Wojtas /* There is a rare condition where this function decides to 30995cb9db07SMarcin Wojtas * stop the queue but meanwhile tx_cleanup() updates 31005cb9db07SMarcin Wojtas * next_to_completion and terminates. 31015cb9db07SMarcin Wojtas * The queue will remain stopped forever. 31025cb9db07SMarcin Wojtas * To solve this issue this function performs mb(), checks 31035cb9db07SMarcin Wojtas * the wakeup condition and wakes up the queue if needed. 31045cb9db07SMarcin Wojtas */ 31055cb9db07SMarcin Wojtas mb(); 31065cb9db07SMarcin Wojtas 31075cb9db07SMarcin Wojtas if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 31085cb9db07SMarcin Wojtas ENA_TX_RESUME_THRESH)) { 31095cb9db07SMarcin Wojtas tx_ring->running = true; 31105cb9db07SMarcin Wojtas counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1); 31115cb9db07SMarcin Wojtas } 31125cb9db07SMarcin Wojtas } 31135cb9db07SMarcin Wojtas 31144fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) 31154fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, 31164fa9e02dSMarcin Wojtas BUS_DMASYNC_PREWRITE); 31174fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) 31184fa9e02dSMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, 31190bdffe59SMarcin Wojtas BUS_DMASYNC_PREWRITE); 31209b8d05b8SZbigniew Bodek 31219b8d05b8SZbigniew Bodek return (0); 31229b8d05b8SZbigniew Bodek 31239b8d05b8SZbigniew Bodek dma_error: 31249b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 31254fa9e02dSMarcin Wojtas if (tx_info->seg_mapped == true) { 31264fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg); 31274fa9e02dSMarcin Wojtas tx_info->seg_mapped = false; 31284fa9e02dSMarcin Wojtas } 31294fa9e02dSMarcin Wojtas if (tx_info->head_mapped == true) { 31304fa9e02dSMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); 31314fa9e02dSMarcin Wojtas tx_info->head_mapped = false; 31324fa9e02dSMarcin Wojtas } 31339b8d05b8SZbigniew Bodek 31349b8d05b8SZbigniew Bodek return (rc); 31359b8d05b8SZbigniew Bodek } 31369b8d05b8SZbigniew Bodek 31379b8d05b8SZbigniew Bodek static void 31389b8d05b8SZbigniew Bodek ena_start_xmit(struct ena_ring *tx_ring) 31399b8d05b8SZbigniew Bodek { 31409b8d05b8SZbigniew Bodek struct mbuf *mbuf; 31419b8d05b8SZbigniew Bodek struct ena_adapter *adapter = tx_ring->adapter; 31429b8d05b8SZbigniew Bodek struct ena_com_io_sq* io_sq; 31439b8d05b8SZbigniew Bodek int ena_qid; 31449b8d05b8SZbigniew Bodek int ret = 0; 31459b8d05b8SZbigniew Bodek 31463f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 31479b8d05b8SZbigniew Bodek return; 31489b8d05b8SZbigniew Bodek 3149fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))) 31509b8d05b8SZbigniew Bodek return; 31519b8d05b8SZbigniew Bodek 31529b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id); 31539b8d05b8SZbigniew Bodek io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; 31549b8d05b8SZbigniew Bodek 31559b8d05b8SZbigniew Bodek while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { 31569b8d05b8SZbigniew Bodek ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" 315730425f93SMarcin Wojtas " header csum flags %#jx\n", 31584e8acd84SMarcin Wojtas mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); 31599b8d05b8SZbigniew Bodek 31605cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) { 31615cb9db07SMarcin Wojtas drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31625cb9db07SMarcin Wojtas break; 31635cb9db07SMarcin Wojtas } 31649b8d05b8SZbigniew Bodek 31653f9ed7abSMarcin Wojtas if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) { 31669b8d05b8SZbigniew Bodek if (ret == ENA_COM_NO_MEM) { 31679b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31689b8d05b8SZbigniew Bodek } else if (ret == ENA_COM_NO_SPACE) { 31699b8d05b8SZbigniew Bodek drbr_putback(adapter->ifp, tx_ring->br, mbuf); 31709b8d05b8SZbigniew Bodek } else { 31719b8d05b8SZbigniew Bodek m_freem(mbuf); 31729b8d05b8SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 31739b8d05b8SZbigniew Bodek } 31749b8d05b8SZbigniew Bodek 31759b8d05b8SZbigniew Bodek break; 31769b8d05b8SZbigniew Bodek } 31779b8d05b8SZbigniew Bodek 3178b4b29032SZbigniew Bodek drbr_advance(adapter->ifp, tx_ring->br); 3179b4b29032SZbigniew Bodek 31803f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & 31813f9ed7abSMarcin Wojtas IFF_DRV_RUNNING) == 0)) 31829b8d05b8SZbigniew Bodek return; 31839b8d05b8SZbigniew Bodek 3184af66d7d0SMarcin Wojtas tx_ring->acum_pkts++; 31859b8d05b8SZbigniew Bodek 31869b8d05b8SZbigniew Bodek BPF_MTAP(adapter->ifp, mbuf); 3187af66d7d0SMarcin Wojtas } 31889b8d05b8SZbigniew Bodek 3189af66d7d0SMarcin Wojtas if (likely(tx_ring->acum_pkts != 0)) { 31909b8d05b8SZbigniew Bodek wmb(); 31919b8d05b8SZbigniew Bodek /* Trigger the dma engine */ 31929b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(io_sq); 31939b8d05b8SZbigniew Bodek counter_u64_add(tx_ring->tx_stats.doorbells, 1); 3194af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 31959b8d05b8SZbigniew Bodek } 31969b8d05b8SZbigniew Bodek 31975cb9db07SMarcin Wojtas if (unlikely(!tx_ring->running)) 31985cb9db07SMarcin Wojtas taskqueue_enqueue(tx_ring->que->cleanup_tq, 31995cb9db07SMarcin Wojtas &tx_ring->que->cleanup_task); 32009b8d05b8SZbigniew Bodek } 32019b8d05b8SZbigniew Bodek 32029b8d05b8SZbigniew Bodek static void 32039b8d05b8SZbigniew Bodek ena_deferred_mq_start(void *arg, int pending) 32049b8d05b8SZbigniew Bodek { 32059b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = (struct ena_ring *)arg; 32069b8d05b8SZbigniew Bodek struct ifnet *ifp = tx_ring->adapter->ifp; 32079b8d05b8SZbigniew Bodek 32080bdffe59SMarcin Wojtas while (!drbr_empty(ifp, tx_ring->br) && 32095cb9db07SMarcin Wojtas tx_ring->running && 32100bdffe59SMarcin Wojtas (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 32119b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 32129b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 32139b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32149b8d05b8SZbigniew Bodek } 32159b8d05b8SZbigniew Bodek } 32169b8d05b8SZbigniew Bodek 32179b8d05b8SZbigniew Bodek static int 32189b8d05b8SZbigniew Bodek ena_mq_start(if_t ifp, struct mbuf *m) 32199b8d05b8SZbigniew Bodek { 32209b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 32219b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 32229b8d05b8SZbigniew Bodek int ret, is_drbr_empty; 32239b8d05b8SZbigniew Bodek uint32_t i; 32249b8d05b8SZbigniew Bodek 32253f9ed7abSMarcin Wojtas if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)) 32269b8d05b8SZbigniew Bodek return (ENODEV); 32279b8d05b8SZbigniew Bodek 32289b8d05b8SZbigniew Bodek /* Which queue to use */ 32299b8d05b8SZbigniew Bodek /* 32309b8d05b8SZbigniew Bodek * If everything is setup correctly, it should be the 32319b8d05b8SZbigniew Bodek * same bucket that the current CPU we're on is. 32329b8d05b8SZbigniew Bodek * It should improve performance. 32339b8d05b8SZbigniew Bodek */ 32349b8d05b8SZbigniew Bodek if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 32359b8d05b8SZbigniew Bodek i = m->m_pkthdr.flowid % adapter->num_queues; 32369b8d05b8SZbigniew Bodek } else { 32379b8d05b8SZbigniew Bodek i = curcpu % adapter->num_queues; 32389b8d05b8SZbigniew Bodek } 32399b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 32409b8d05b8SZbigniew Bodek 32419b8d05b8SZbigniew Bodek /* Check if drbr is empty before putting packet */ 32429b8d05b8SZbigniew Bodek is_drbr_empty = drbr_empty(ifp, tx_ring->br); 32439b8d05b8SZbigniew Bodek ret = drbr_enqueue(ifp, tx_ring->br, m); 32443f9ed7abSMarcin Wojtas if (unlikely(ret != 0)) { 32459b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32469b8d05b8SZbigniew Bodek return (ret); 32479b8d05b8SZbigniew Bodek } 32489b8d05b8SZbigniew Bodek 324967ec48bbSMarcin Wojtas if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) { 32509b8d05b8SZbigniew Bodek ena_start_xmit(tx_ring); 32519b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32529b8d05b8SZbigniew Bodek } else { 32539b8d05b8SZbigniew Bodek taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 32549b8d05b8SZbigniew Bodek } 32559b8d05b8SZbigniew Bodek 32569b8d05b8SZbigniew Bodek return (0); 32579b8d05b8SZbigniew Bodek } 32589b8d05b8SZbigniew Bodek 32599b8d05b8SZbigniew Bodek static void 32609b8d05b8SZbigniew Bodek ena_qflush(if_t ifp) 32619b8d05b8SZbigniew Bodek { 32629b8d05b8SZbigniew Bodek struct ena_adapter *adapter = ifp->if_softc; 32639b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = adapter->tx_ring; 32649b8d05b8SZbigniew Bodek int i; 32659b8d05b8SZbigniew Bodek 32669b8d05b8SZbigniew Bodek for(i = 0; i < adapter->num_queues; ++i, ++tx_ring) 32670bdffe59SMarcin Wojtas if (!drbr_empty(ifp, tx_ring->br)) { 32689b8d05b8SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 32699b8d05b8SZbigniew Bodek drbr_flush(ifp, tx_ring->br); 32709b8d05b8SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 32719b8d05b8SZbigniew Bodek } 32729b8d05b8SZbigniew Bodek 32739b8d05b8SZbigniew Bodek if_qflush(ifp); 32749b8d05b8SZbigniew Bodek } 32759b8d05b8SZbigniew Bodek 32760bdffe59SMarcin Wojtas static int 32770bdffe59SMarcin Wojtas ena_calc_io_queue_num(struct ena_adapter *adapter, 32789b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 32799b8d05b8SZbigniew Bodek { 32806064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 32814fa9e02dSMarcin Wojtas int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 32829b8d05b8SZbigniew Bodek 32836064f289SMarcin Wojtas /* Regular queues capabilities */ 32846064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 32856064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 32866064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 32874fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 32884fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 32896064f289SMarcin Wojtas 32904fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 32914fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 32926064f289SMarcin Wojtas } else { 32936064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 32946064f289SMarcin Wojtas &get_feat_ctx->max_queues; 32954fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 32964fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 32974fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 32986064f289SMarcin Wojtas } 32999b8d05b8SZbigniew Bodek 33004fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 33014fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 33024fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 33034fa9e02dSMarcin Wojtas 33049b8d05b8SZbigniew Bodek io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 33054fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_rx_num); 33064fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); 33074fa9e02dSMarcin Wojtas io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); 33089b8d05b8SZbigniew Bodek /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ 33099b8d05b8SZbigniew Bodek io_queue_num = min_t(int, io_queue_num, 33109b8d05b8SZbigniew Bodek pci_msix_count(adapter->pdev) - 1); 33119b8d05b8SZbigniew Bodek 33120bdffe59SMarcin Wojtas return (io_queue_num); 33139b8d05b8SZbigniew Bodek } 33149b8d05b8SZbigniew Bodek 33150bdffe59SMarcin Wojtas static int 33164fa9e02dSMarcin Wojtas ena_enable_wc(struct resource *res) 33174fa9e02dSMarcin Wojtas { 3318472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 33194fa9e02dSMarcin Wojtas vm_offset_t va; 33204fa9e02dSMarcin Wojtas vm_size_t len; 33214fa9e02dSMarcin Wojtas int rc; 33224fa9e02dSMarcin Wojtas 33234fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 33244fa9e02dSMarcin Wojtas len = rman_get_size(res); 33254fa9e02dSMarcin Wojtas /* Enable write combining */ 3326472d4784SMarcin Wojtas rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 33274fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33284fa9e02dSMarcin Wojtas ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); 33294fa9e02dSMarcin Wojtas return (rc); 33304fa9e02dSMarcin Wojtas } 33314fa9e02dSMarcin Wojtas 33324fa9e02dSMarcin Wojtas return (0); 33334fa9e02dSMarcin Wojtas #endif 33344fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 33354fa9e02dSMarcin Wojtas } 33364fa9e02dSMarcin Wojtas 33374fa9e02dSMarcin Wojtas static int 33384fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 33394fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 33404fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 33414fa9e02dSMarcin Wojtas { 33424fa9e02dSMarcin Wojtas struct ena_adapter *adapter = device_get_softc(pdev); 33434fa9e02dSMarcin Wojtas int rc, rid; 33444fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 33454fa9e02dSMarcin Wojtas 33464fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 33474fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 33484fa9e02dSMarcin Wojtas device_printf(pdev, 33494fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 33504fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33514fa9e02dSMarcin Wojtas return (0); 33524fa9e02dSMarcin Wojtas } 33534fa9e02dSMarcin Wojtas 33544fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 33554fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33564fa9e02dSMarcin Wojtas device_printf(pdev, "Failed to configure the device mode. " 33574fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33584fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33594fa9e02dSMarcin Wojtas return (0); 33604fa9e02dSMarcin Wojtas } 33614fa9e02dSMarcin Wojtas 33624fa9e02dSMarcin Wojtas /* Nothing to config, exit */ 33634fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 33644fa9e02dSMarcin Wojtas return (0); 33654fa9e02dSMarcin Wojtas 33664fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 33674fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 33684fa9e02dSMarcin Wojtas adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 33694fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 33704fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 33714fa9e02dSMarcin Wojtas device_printf(pdev, "unable to allocate LLQ bar resource. " 33724fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 33734fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 33744fa9e02dSMarcin Wojtas return (0); 33754fa9e02dSMarcin Wojtas } 33764fa9e02dSMarcin Wojtas 33774fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 33784fa9e02dSMarcin Wojtas rc = ena_enable_wc(adapter->memory); 33794fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 33804fa9e02dSMarcin Wojtas device_printf(pdev, "failed to enable write combining.\n"); 33814fa9e02dSMarcin Wojtas return (rc); 33824fa9e02dSMarcin Wojtas } 33834fa9e02dSMarcin Wojtas 33844fa9e02dSMarcin Wojtas /* 33854fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 33864fa9e02dSMarcin Wojtas * for the ena_com layer. 33874fa9e02dSMarcin Wojtas */ 33884fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 33894fa9e02dSMarcin Wojtas 33904fa9e02dSMarcin Wojtas return (0); 33914fa9e02dSMarcin Wojtas } 33924fa9e02dSMarcin Wojtas 33934fa9e02dSMarcin Wojtas static inline 33944fa9e02dSMarcin Wojtas void set_default_llq_configurations(struct ena_llq_configurations *llq_config) 33954fa9e02dSMarcin Wojtas { 33964fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 33974fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 33984fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 33994fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 34004fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 34014fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 34024fa9e02dSMarcin Wojtas } 34034fa9e02dSMarcin Wojtas 34044fa9e02dSMarcin Wojtas static int 34056064f289SMarcin Wojtas ena_calc_queue_size(struct ena_adapter *adapter, 34066064f289SMarcin Wojtas struct ena_calc_queue_size_ctx *ctx) 34079b8d05b8SZbigniew Bodek { 34084fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 34094fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 34106064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 34116064f289SMarcin Wojtas uint32_t rx_queue_size = adapter->rx_ring_size; 34129b8d05b8SZbigniew Bodek 34134fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 34146064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 34156064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 34166064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34176064f289SMarcin Wojtas max_queue_ext->max_rx_cq_depth); 34186064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34196064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 34206064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34216064f289SMarcin Wojtas max_queue_ext->max_tx_cq_depth); 34224fa9e02dSMarcin Wojtas 34234fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 34244fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 34254fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34264fa9e02dSMarcin Wojtas llq->max_llq_depth); 34274fa9e02dSMarcin Wojtas else 34286064f289SMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34296064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 34304fa9e02dSMarcin Wojtas 34316064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34326064f289SMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 34336064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34346064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 34356064f289SMarcin Wojtas } else { 34366064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 34376064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 34386064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34396064f289SMarcin Wojtas max_queues->max_cq_depth); 34406064f289SMarcin Wojtas rx_queue_size = min_t(uint32_t, rx_queue_size, 34416064f289SMarcin Wojtas max_queues->max_sq_depth); 34424fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34434fa9e02dSMarcin Wojtas max_queues->max_cq_depth); 34444fa9e02dSMarcin Wojtas 34454fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 34464fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 34474fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34484fa9e02dSMarcin Wojtas llq->max_llq_depth); 34494fa9e02dSMarcin Wojtas else 34504fa9e02dSMarcin Wojtas tx_queue_size = min_t(uint32_t, tx_queue_size, 34514fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 34524fa9e02dSMarcin Wojtas 34536064f289SMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34546064f289SMarcin Wojtas max_queues->max_packet_tx_descs); 34556064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 34566064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 34576064f289SMarcin Wojtas } 34589b8d05b8SZbigniew Bodek 34599b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 34606064f289SMarcin Wojtas rx_queue_size = 1 << (fls(rx_queue_size) - 1); 34616064f289SMarcin Wojtas tx_queue_size = 1 << (fls(tx_queue_size) - 1); 34626064f289SMarcin Wojtas 34636064f289SMarcin Wojtas if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 34646064f289SMarcin Wojtas device_printf(ctx->pdev, "Invalid queue size\n"); 34656064f289SMarcin Wojtas return (EFAULT); 34669b8d05b8SZbigniew Bodek } 34679b8d05b8SZbigniew Bodek 34686064f289SMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 34696064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 34706064f289SMarcin Wojtas 34716064f289SMarcin Wojtas return (0); 34729b8d05b8SZbigniew Bodek } 34739b8d05b8SZbigniew Bodek 34746064f289SMarcin Wojtas static int 34756064f289SMarcin Wojtas ena_handle_updated_queues(struct ena_adapter *adapter, 34766064f289SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 34776064f289SMarcin Wojtas { 34786064f289SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 34796064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 34806064f289SMarcin Wojtas device_t pdev = adapter->pdev; 34816064f289SMarcin Wojtas bool are_queues_changed = false; 34826064f289SMarcin Wojtas int io_queue_num, rc; 34839b8d05b8SZbigniew Bodek 34846064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 34856064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = get_feat_ctx; 34866064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 34876064f289SMarcin Wojtas 34886064f289SMarcin Wojtas io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); 34896064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 34906064f289SMarcin Wojtas if (unlikely(rc != 0 || io_queue_num <= 0)) 34916064f289SMarcin Wojtas return EFAULT; 34926064f289SMarcin Wojtas 34936064f289SMarcin Wojtas if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) 34946064f289SMarcin Wojtas are_queues_changed = true; 34956064f289SMarcin Wojtas 34966064f289SMarcin Wojtas if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || 34976064f289SMarcin Wojtas adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { 34986064f289SMarcin Wojtas device_printf(pdev, 34996064f289SMarcin Wojtas "Not enough resources to allocate requested queue sizes " 35006064f289SMarcin Wojtas "(TX,RX)=(%d,%d), falling back to queue sizes " 35016064f289SMarcin Wojtas "(TX,RX)=(%d,%d)\n", 35026064f289SMarcin Wojtas adapter->tx_ring_size, 35036064f289SMarcin Wojtas adapter->rx_ring_size, 35046064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size, 35056064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size); 35066064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 35076064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 35086064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 35096064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 35106064f289SMarcin Wojtas are_queues_changed = true; 35116064f289SMarcin Wojtas } 35126064f289SMarcin Wojtas 35136064f289SMarcin Wojtas if (unlikely(adapter->num_queues > io_queue_num)) { 35146064f289SMarcin Wojtas device_printf(pdev, 35156064f289SMarcin Wojtas "Not enough resources to allocate %d queues, " 35166064f289SMarcin Wojtas "falling back to %d queues\n", 35176064f289SMarcin Wojtas adapter->num_queues, io_queue_num); 35186064f289SMarcin Wojtas adapter->num_queues = io_queue_num; 3519fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)) { 35206064f289SMarcin Wojtas ena_com_rss_destroy(ena_dev); 35216064f289SMarcin Wojtas rc = ena_rss_init_default(adapter); 35226064f289SMarcin Wojtas if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { 35236064f289SMarcin Wojtas device_printf(pdev, "Cannot init RSS rc: %d\n", 35246064f289SMarcin Wojtas rc); 35256064f289SMarcin Wojtas return (rc); 35266064f289SMarcin Wojtas } 35276064f289SMarcin Wojtas } 35286064f289SMarcin Wojtas are_queues_changed = true; 35296064f289SMarcin Wojtas } 35306064f289SMarcin Wojtas 35316064f289SMarcin Wojtas if (unlikely(are_queues_changed)) { 35326064f289SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 35336064f289SMarcin Wojtas ena_init_io_rings(adapter); 35346064f289SMarcin Wojtas } 35356064f289SMarcin Wojtas 35366064f289SMarcin Wojtas return (0); 35379b8d05b8SZbigniew Bodek } 35389b8d05b8SZbigniew Bodek 35390bdffe59SMarcin Wojtas static int 35400bdffe59SMarcin Wojtas ena_rss_init_default(struct ena_adapter *adapter) 35419b8d05b8SZbigniew Bodek { 35429b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 35439b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 35449b8d05b8SZbigniew Bodek int qid, rc, i; 35459b8d05b8SZbigniew Bodek 35469b8d05b8SZbigniew Bodek rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 35470bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 35484e8acd84SMarcin Wojtas device_printf(dev, "Cannot init indirect table\n"); 35497d2544e6SMarcin Wojtas return (rc); 35509b8d05b8SZbigniew Bodek } 35519b8d05b8SZbigniew Bodek 35529b8d05b8SZbigniew Bodek for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 35539b8d05b8SZbigniew Bodek qid = i % adapter->num_queues; 35549b8d05b8SZbigniew Bodek rc = ena_com_indirect_table_fill_entry(ena_dev, i, 35559b8d05b8SZbigniew Bodek ENA_IO_RXQ_IDX(qid)); 35560bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35579b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill indirect table\n"); 35587d2544e6SMarcin Wojtas goto err_rss_destroy; 35599b8d05b8SZbigniew Bodek } 35609b8d05b8SZbigniew Bodek } 35619b8d05b8SZbigniew Bodek 35629b8d05b8SZbigniew Bodek rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 35639b8d05b8SZbigniew Bodek ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 35640bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35659b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash function\n"); 35667d2544e6SMarcin Wojtas goto err_rss_destroy; 35679b8d05b8SZbigniew Bodek } 35689b8d05b8SZbigniew Bodek 35699b8d05b8SZbigniew Bodek rc = ena_com_set_default_hash_ctrl(ena_dev); 35700bdffe59SMarcin Wojtas if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { 35719b8d05b8SZbigniew Bodek device_printf(dev, "Cannot fill hash control\n"); 35727d2544e6SMarcin Wojtas goto err_rss_destroy; 35739b8d05b8SZbigniew Bodek } 35749b8d05b8SZbigniew Bodek 35759b8d05b8SZbigniew Bodek return (0); 35769b8d05b8SZbigniew Bodek 35777d2544e6SMarcin Wojtas err_rss_destroy: 35789b8d05b8SZbigniew Bodek ena_com_rss_destroy(ena_dev); 35799b8d05b8SZbigniew Bodek return (rc); 35809b8d05b8SZbigniew Bodek } 35819b8d05b8SZbigniew Bodek 35829b8d05b8SZbigniew Bodek static void 35839b8d05b8SZbigniew Bodek ena_rss_init_default_deferred(void *arg) 35849b8d05b8SZbigniew Bodek { 35859b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 35869b8d05b8SZbigniew Bodek devclass_t dc; 35879b8d05b8SZbigniew Bodek int max; 35889b8d05b8SZbigniew Bodek int rc; 35899b8d05b8SZbigniew Bodek 35909b8d05b8SZbigniew Bodek dc = devclass_find("ena"); 35913f9ed7abSMarcin Wojtas if (unlikely(dc == NULL)) { 35924e8acd84SMarcin Wojtas ena_trace(ENA_ALERT, "No devclass ena\n"); 35939b8d05b8SZbigniew Bodek return; 35949b8d05b8SZbigniew Bodek } 35959b8d05b8SZbigniew Bodek 35969b8d05b8SZbigniew Bodek max = devclass_get_maxunit(dc); 35979b8d05b8SZbigniew Bodek while (max-- >= 0) { 35989b8d05b8SZbigniew Bodek adapter = devclass_get_softc(dc, max); 35999b8d05b8SZbigniew Bodek if (adapter != NULL) { 36009b8d05b8SZbigniew Bodek rc = ena_rss_init_default(adapter); 3601fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); 36023f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36039b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 36049b8d05b8SZbigniew Bodek "WARNING: RSS was not properly initialized," 36050bdffe59SMarcin Wojtas " it will affect bandwidth\n"); 3606fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); 36079b8d05b8SZbigniew Bodek } 36089b8d05b8SZbigniew Bodek } 36099b8d05b8SZbigniew Bodek } 36109b8d05b8SZbigniew Bodek } 36119b8d05b8SZbigniew Bodek SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); 36129b8d05b8SZbigniew Bodek 36130bdffe59SMarcin Wojtas static void 361446021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 36159b8d05b8SZbigniew Bodek { 36169b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 361746021271SMarcin Wojtas uintptr_t rid; 36189b8d05b8SZbigniew Bodek int rc; 36199b8d05b8SZbigniew Bodek 36209b8d05b8SZbigniew Bodek /* Allocate only the host info */ 36219b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 36223f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36239b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot allocate host info\n"); 36249b8d05b8SZbigniew Bodek return; 36259b8d05b8SZbigniew Bodek } 36269b8d05b8SZbigniew Bodek 36279b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 36289b8d05b8SZbigniew Bodek 362946021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 363046021271SMarcin Wojtas host_info->bdf = rid; 36319b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 36329b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 36339b8d05b8SZbigniew Bodek 36349b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 36359b8d05b8SZbigniew Bodek host_info->os_dist = 0; 36369b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 36379b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 36389b8d05b8SZbigniew Bodek 36399b8d05b8SZbigniew Bodek host_info->driver_version = 36409b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MAJOR) | 36419b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 36429b8d05b8SZbigniew Bodek (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 36438ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 36449b8d05b8SZbigniew Bodek 36459b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 36463f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 3647a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 36489b8d05b8SZbigniew Bodek ena_trace(ENA_WARNING, "Cannot set host attributes\n"); 36499b8d05b8SZbigniew Bodek else 36509b8d05b8SZbigniew Bodek ena_trace(ENA_ALERT, "Cannot set host attributes\n"); 36519b8d05b8SZbigniew Bodek 36529b8d05b8SZbigniew Bodek goto err; 36539b8d05b8SZbigniew Bodek } 36549b8d05b8SZbigniew Bodek 36559b8d05b8SZbigniew Bodek return; 36569b8d05b8SZbigniew Bodek 36579b8d05b8SZbigniew Bodek err: 36589b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 36599b8d05b8SZbigniew Bodek } 36609b8d05b8SZbigniew Bodek 36619b8d05b8SZbigniew Bodek static int 36629b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 36639b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 36649b8d05b8SZbigniew Bodek { 36659b8d05b8SZbigniew Bodek struct ena_com_dev* ena_dev = adapter->ena_dev; 36669b8d05b8SZbigniew Bodek bool readless_supported; 36679b8d05b8SZbigniew Bodek uint32_t aenq_groups; 36689b8d05b8SZbigniew Bodek int dma_width; 36699b8d05b8SZbigniew Bodek int rc; 36709b8d05b8SZbigniew Bodek 36719b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 36723f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36739b8d05b8SZbigniew Bodek device_printf(pdev, "failed to init mmio read less\n"); 36740bdffe59SMarcin Wojtas return (rc); 36759b8d05b8SZbigniew Bodek } 36769b8d05b8SZbigniew Bodek 36779b8d05b8SZbigniew Bodek /* 36789b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 36799b8d05b8SZbigniew Bodek * read is disabled 36809b8d05b8SZbigniew Bodek */ 36819b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 36829b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 36839b8d05b8SZbigniew Bodek 3684a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 36853f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36869b8d05b8SZbigniew Bodek device_printf(pdev, "Can not reset device\n"); 36879b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36889b8d05b8SZbigniew Bodek } 36899b8d05b8SZbigniew Bodek 36909b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 36913f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36929b8d05b8SZbigniew Bodek device_printf(pdev, "device version is too low\n"); 36939b8d05b8SZbigniew Bodek goto err_mmio_read_less; 36949b8d05b8SZbigniew Bodek } 36959b8d05b8SZbigniew Bodek 36969b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 36973f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 36989b8d05b8SZbigniew Bodek device_printf(pdev, "Invalid dma width value %d", dma_width); 36999b8d05b8SZbigniew Bodek rc = dma_width; 37009b8d05b8SZbigniew Bodek goto err_mmio_read_less; 37019b8d05b8SZbigniew Bodek } 37029b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 37039b8d05b8SZbigniew Bodek 37049b8d05b8SZbigniew Bodek /* ENA admin level init */ 370567ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 37063f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37079b8d05b8SZbigniew Bodek device_printf(pdev, 37089b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 37099b8d05b8SZbigniew Bodek goto err_mmio_read_less; 37109b8d05b8SZbigniew Bodek } 37119b8d05b8SZbigniew Bodek 37129b8d05b8SZbigniew Bodek /* 37139b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 37149b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 37159b8d05b8SZbigniew Bodek * information 37169b8d05b8SZbigniew Bodek */ 37179b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 37189b8d05b8SZbigniew Bodek 371946021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 37209b8d05b8SZbigniew Bodek 37219b8d05b8SZbigniew Bodek /* Get Device Attributes */ 37229b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 37233f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37249b8d05b8SZbigniew Bodek device_printf(pdev, 37259b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 37269b8d05b8SZbigniew Bodek goto err_admin_init; 37279b8d05b8SZbigniew Bodek } 37289b8d05b8SZbigniew Bodek 3729e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 3730e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 3731e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 373240621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 3733e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_KEEP_ALIVE); 37349b8d05b8SZbigniew Bodek 37359b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 37369b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 37373f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37389b8d05b8SZbigniew Bodek device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); 37399b8d05b8SZbigniew Bodek goto err_admin_init; 37409b8d05b8SZbigniew Bodek } 37419b8d05b8SZbigniew Bodek 37429b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 37439b8d05b8SZbigniew Bodek 37440bdffe59SMarcin Wojtas return (0); 37459b8d05b8SZbigniew Bodek 37469b8d05b8SZbigniew Bodek err_admin_init: 37479b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 37489b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 37499b8d05b8SZbigniew Bodek err_mmio_read_less: 37509b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 37519b8d05b8SZbigniew Bodek 37520bdffe59SMarcin Wojtas return (rc); 37539b8d05b8SZbigniew Bodek } 37549b8d05b8SZbigniew Bodek 37559b8d05b8SZbigniew Bodek static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, 37569b8d05b8SZbigniew Bodek int io_vectors) 37579b8d05b8SZbigniew Bodek { 37589b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 37599b8d05b8SZbigniew Bodek int rc; 37609b8d05b8SZbigniew Bodek 37619b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 37623f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37639b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Error with MSI-X enablement\n"); 37640bdffe59SMarcin Wojtas return (rc); 37659b8d05b8SZbigniew Bodek } 37669b8d05b8SZbigniew Bodek 37679b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 37689b8d05b8SZbigniew Bodek 37699b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 37703f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37719b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); 37729b8d05b8SZbigniew Bodek goto err_disable_msix; 37739b8d05b8SZbigniew Bodek } 37749b8d05b8SZbigniew Bodek 37759b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 37769b8d05b8SZbigniew Bodek 37779b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 37789b8d05b8SZbigniew Bodek 37790bdffe59SMarcin Wojtas return (0); 37809b8d05b8SZbigniew Bodek 37819b8d05b8SZbigniew Bodek err_disable_msix: 37829b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 37839b8d05b8SZbigniew Bodek 37840bdffe59SMarcin Wojtas return (rc); 37859b8d05b8SZbigniew Bodek } 37869b8d05b8SZbigniew Bodek 37879b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 37889b8d05b8SZbigniew Bodek static void ena_keep_alive_wd(void *adapter_data, 37899b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 37909b8d05b8SZbigniew Bodek { 37919b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 379230217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 37939b8d05b8SZbigniew Bodek sbintime_t stime; 379430217e2dSMarcin Wojtas uint64_t rx_drops; 379530217e2dSMarcin Wojtas 379630217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 379730217e2dSMarcin Wojtas 379830217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 379930217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 380030217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 38019b8d05b8SZbigniew Bodek 38029b8d05b8SZbigniew Bodek stime = getsbinuptime(); 38039b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 38049b8d05b8SZbigniew Bodek } 38059b8d05b8SZbigniew Bodek 38069b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 38079b8d05b8SZbigniew Bodek static void check_for_missing_keep_alive(struct ena_adapter *adapter) 38089b8d05b8SZbigniew Bodek { 38099b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 38109b8d05b8SZbigniew Bodek 38119b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 38129b8d05b8SZbigniew Bodek return; 38139b8d05b8SZbigniew Bodek 381440621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 38159b8d05b8SZbigniew Bodek return; 38169b8d05b8SZbigniew Bodek 38179b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 38189b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 38199b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 38209b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 38219b8d05b8SZbigniew Bodek "Keep alive watchdog timeout.\n"); 38229b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.wd_expired, 1); 3823858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3824a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 3825fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 38269b8d05b8SZbigniew Bodek } 38279b8d05b8SZbigniew Bodek } 3828858659f7SMarcin Wojtas } 38299b8d05b8SZbigniew Bodek 38309b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 38319b8d05b8SZbigniew Bodek static void check_for_admin_com_state(struct ena_adapter *adapter) 38329b8d05b8SZbigniew Bodek { 38330bdffe59SMarcin Wojtas if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 38340bdffe59SMarcin Wojtas false)) { 38359b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 38369b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 38379b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 3838858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3839a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 3840fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 38419b8d05b8SZbigniew Bodek } 38429b8d05b8SZbigniew Bodek } 3843858659f7SMarcin Wojtas } 38449b8d05b8SZbigniew Bodek 384574dba3adSMarcin Wojtas static int 3846d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3847d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 3848d12f7bfcSMarcin Wojtas { 3849d12f7bfcSMarcin Wojtas if (likely(rx_ring->first_interrupt)) 3850d12f7bfcSMarcin Wojtas return (0); 3851d12f7bfcSMarcin Wojtas 3852d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3853d12f7bfcSMarcin Wojtas return (0); 3854d12f7bfcSMarcin Wojtas 3855d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 3856d12f7bfcSMarcin Wojtas 3857d12f7bfcSMarcin Wojtas if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 3858d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, "Potential MSIX issue on Rx side " 3859d12f7bfcSMarcin Wojtas "Queue = %d. Reset the device\n", rx_ring->qid); 3860858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3861d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; 3862fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3863858659f7SMarcin Wojtas } 3864d12f7bfcSMarcin Wojtas return (EIO); 3865d12f7bfcSMarcin Wojtas } 3866d12f7bfcSMarcin Wojtas 3867d12f7bfcSMarcin Wojtas return (0); 3868d12f7bfcSMarcin Wojtas } 3869d12f7bfcSMarcin Wojtas 3870d12f7bfcSMarcin Wojtas static int 3871d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 387274dba3adSMarcin Wojtas struct ena_ring *tx_ring) 387374dba3adSMarcin Wojtas { 387474dba3adSMarcin Wojtas struct bintime curtime, time; 387574dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3876d12f7bfcSMarcin Wojtas sbintime_t time_offset; 387774dba3adSMarcin Wojtas uint32_t missed_tx = 0; 3878d12f7bfcSMarcin Wojtas int i, rc = 0; 387974dba3adSMarcin Wojtas 388074dba3adSMarcin Wojtas getbinuptime(&curtime); 388174dba3adSMarcin Wojtas 388274dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 388374dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 388474dba3adSMarcin Wojtas 38850bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 388674dba3adSMarcin Wojtas continue; 388774dba3adSMarcin Wojtas 388874dba3adSMarcin Wojtas time = curtime; 388974dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3890d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3891d12f7bfcSMarcin Wojtas 3892d12f7bfcSMarcin Wojtas if (unlikely(!tx_ring->first_interrupt && 3893d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3894d12f7bfcSMarcin Wojtas /* 3895d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3896d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3897d12f7bfcSMarcin Wojtas */ 3898d12f7bfcSMarcin Wojtas device_printf(adapter->pdev, 3899d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 3900d12f7bfcSMarcin Wojtas "Reset the device\n", tx_ring->qid); 3901858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, 3902858659f7SMarcin Wojtas adapter))) { 3903858659f7SMarcin Wojtas adapter->reset_reason = 3904858659f7SMarcin Wojtas ENA_REGS_RESET_MISS_INTERRUPT; 3905858659f7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, 3906858659f7SMarcin Wojtas adapter); 3907858659f7SMarcin Wojtas } 3908d12f7bfcSMarcin Wojtas return (EIO); 3909d12f7bfcSMarcin Wojtas } 391074dba3adSMarcin Wojtas 391174dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3912d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 391374dba3adSMarcin Wojtas 391474dba3adSMarcin Wojtas if (!tx_buf->print_once) 391574dba3adSMarcin Wojtas ena_trace(ENA_WARNING, "Found a Tx that wasn't " 391674dba3adSMarcin Wojtas "completed on time, qid %d, index %d.\n", 391774dba3adSMarcin Wojtas tx_ring->qid, i); 391874dba3adSMarcin Wojtas 391974dba3adSMarcin Wojtas tx_buf->print_once = true; 392074dba3adSMarcin Wojtas missed_tx++; 3921d12f7bfcSMarcin Wojtas } 3922d12f7bfcSMarcin Wojtas } 392374dba3adSMarcin Wojtas 3924d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 392574dba3adSMarcin Wojtas device_printf(adapter->pdev, 3926d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3927d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 39284e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 3929858659f7SMarcin Wojtas if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3930d12f7bfcSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 3931fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3932858659f7SMarcin Wojtas } 3933d12f7bfcSMarcin Wojtas rc = EIO; 393474dba3adSMarcin Wojtas } 393574dba3adSMarcin Wojtas 3936d12f7bfcSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 3937d12f7bfcSMarcin Wojtas 3938d12f7bfcSMarcin Wojtas return (rc); 393974dba3adSMarcin Wojtas } 394074dba3adSMarcin Wojtas 39419b8d05b8SZbigniew Bodek /* 39429b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 39439b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 39449b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 39459b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 39469b8d05b8SZbigniew Bodek */ 39470bdffe59SMarcin Wojtas static void 3948d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 39499b8d05b8SZbigniew Bodek { 39509b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3951d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 395274dba3adSMarcin Wojtas int i, budget, rc; 39539b8d05b8SZbigniew Bodek 39549b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 39559b8d05b8SZbigniew Bodek rmb(); 39569b8d05b8SZbigniew Bodek 3957fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 39589b8d05b8SZbigniew Bodek return; 39599b8d05b8SZbigniew Bodek 3960fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 39619b8d05b8SZbigniew Bodek return; 39629b8d05b8SZbigniew Bodek 396340621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 39649b8d05b8SZbigniew Bodek return; 39659b8d05b8SZbigniew Bodek 39669b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 39679b8d05b8SZbigniew Bodek 39689b8d05b8SZbigniew Bodek for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { 39699b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3970d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 39719b8d05b8SZbigniew Bodek 3972d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3973d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3974d12f7bfcSMarcin Wojtas return; 3975d12f7bfcSMarcin Wojtas 3976d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 39770bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 39789b8d05b8SZbigniew Bodek return; 39799b8d05b8SZbigniew Bodek 39809b8d05b8SZbigniew Bodek budget--; 3981cd5d5804SMarcin Wojtas if (budget == 0) { 39829b8d05b8SZbigniew Bodek i++; 39839b8d05b8SZbigniew Bodek break; 39849b8d05b8SZbigniew Bodek } 39859b8d05b8SZbigniew Bodek } 39869b8d05b8SZbigniew Bodek 39879b8d05b8SZbigniew Bodek adapter->next_monitored_tx_qid = i % adapter->num_queues; 39889b8d05b8SZbigniew Bodek } 39899b8d05b8SZbigniew Bodek 39905cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3991efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3992efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3993efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3994efe6ab18SMarcin Wojtas * for example). 3995efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3996efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3997efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3998efe6ab18SMarcin Wojtas * able to send new packets. 3999efe6ab18SMarcin Wojtas * 4000efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 4001efe6ab18SMarcin Wojtas */ 4002efe6ab18SMarcin Wojtas static void 4003efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 4004efe6ab18SMarcin Wojtas { 4005efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 4006efe6ab18SMarcin Wojtas int i, refill_required; 4007efe6ab18SMarcin Wojtas 4008fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 4009efe6ab18SMarcin Wojtas return; 4010efe6ab18SMarcin Wojtas 4011fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 4012efe6ab18SMarcin Wojtas return; 4013efe6ab18SMarcin Wojtas 4014efe6ab18SMarcin Wojtas for (i = 0; i < adapter->num_queues; i++) { 4015efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 4016efe6ab18SMarcin Wojtas 4017efe6ab18SMarcin Wojtas refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); 4018efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 4019efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 4020efe6ab18SMarcin Wojtas 4021efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 4022efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 4023efe6ab18SMarcin Wojtas 1); 4024efe6ab18SMarcin Wojtas 4025efe6ab18SMarcin Wojtas device_printf(adapter->pdev, 4026efe6ab18SMarcin Wojtas "trigger refill for ring %d\n", i); 4027efe6ab18SMarcin Wojtas 40285cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 40295cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 4030efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 4031efe6ab18SMarcin Wojtas } 4032efe6ab18SMarcin Wojtas } else { 4033efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 4034efe6ab18SMarcin Wojtas } 4035efe6ab18SMarcin Wojtas } 4036efe6ab18SMarcin Wojtas } 40379b8d05b8SZbigniew Bodek 403840621d71SMarcin Wojtas static void ena_update_hints(struct ena_adapter *adapter, 403940621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 404040621d71SMarcin Wojtas { 404140621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 404240621d71SMarcin Wojtas 404340621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 404440621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 404540621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 404640621d71SMarcin Wojtas 404740621d71SMarcin Wojtas if (hints->mmio_read_timeout) 404840621d71SMarcin Wojtas /* convert to usec */ 404940621d71SMarcin Wojtas ena_dev->mmio_read.reg_read_to = 405040621d71SMarcin Wojtas hints->mmio_read_timeout * 1000; 405140621d71SMarcin Wojtas 405240621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 405340621d71SMarcin Wojtas adapter->missing_tx_threshold = 405440621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 405540621d71SMarcin Wojtas 405640621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 405740621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 405840621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 405940621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 406040621d71SMarcin Wojtas else 406140621d71SMarcin Wojtas adapter->missing_tx_timeout = 406240621d71SMarcin Wojtas SBT_1MS * hints->missing_tx_completion_timeout; 406340621d71SMarcin Wojtas } 406440621d71SMarcin Wojtas 406540621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 406640621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 406740621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 406840621d71SMarcin Wojtas else 406940621d71SMarcin Wojtas adapter->keep_alive_timeout = 407040621d71SMarcin Wojtas SBT_1MS * hints->driver_watchdog_timeout; 407140621d71SMarcin Wojtas } 407240621d71SMarcin Wojtas } 407340621d71SMarcin Wojtas 40749b8d05b8SZbigniew Bodek static void 40759b8d05b8SZbigniew Bodek ena_timer_service(void *data) 40769b8d05b8SZbigniew Bodek { 40779b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 40789b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 40799b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 40809b8d05b8SZbigniew Bodek 40819b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 40829b8d05b8SZbigniew Bodek 40839b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 40849b8d05b8SZbigniew Bodek 4085d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 40869b8d05b8SZbigniew Bodek 4087efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 4088efe6ab18SMarcin Wojtas 40890bdffe59SMarcin Wojtas if (host_info != NULL) 40909b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 40919b8d05b8SZbigniew Bodek 4092fd43fd2aSMarcin Wojtas if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 40939b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "Trigger reset is on\n"); 40949b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 40959b8d05b8SZbigniew Bodek return; 40969b8d05b8SZbigniew Bodek } 40979b8d05b8SZbigniew Bodek 40989b8d05b8SZbigniew Bodek /* 40999b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 41009b8d05b8SZbigniew Bodek */ 41019b8d05b8SZbigniew Bodek callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); 41029b8d05b8SZbigniew Bodek } 41039b8d05b8SZbigniew Bodek 41049b8d05b8SZbigniew Bodek static void 410532f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful) 41069b8d05b8SZbigniew Bodek { 410732f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 41089b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 41099b8d05b8SZbigniew Bodek bool dev_up; 411032f63fa7SMarcin Wojtas 411132f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 411232f63fa7SMarcin Wojtas return; 411332f63fa7SMarcin Wojtas 411432f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_DOWN); 411532f63fa7SMarcin Wojtas 411632f63fa7SMarcin Wojtas callout_drain(&adapter->timer_service); 411732f63fa7SMarcin Wojtas 411832f63fa7SMarcin Wojtas dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 411932f63fa7SMarcin Wojtas if (dev_up) 412032f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 412132f63fa7SMarcin Wojtas else 412232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 412332f63fa7SMarcin Wojtas 412432f63fa7SMarcin Wojtas if (!graceful) 412532f63fa7SMarcin Wojtas ena_com_set_admin_running_state(ena_dev, false); 412632f63fa7SMarcin Wojtas 412732f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 412832f63fa7SMarcin Wojtas ena_down(adapter); 412932f63fa7SMarcin Wojtas 413032f63fa7SMarcin Wojtas /* 413132f63fa7SMarcin Wojtas * Stop the device from sending AENQ events (if the device was up, and 413232f63fa7SMarcin Wojtas * the trigger reset was on, ena_down already performs device reset) 413332f63fa7SMarcin Wojtas */ 413432f63fa7SMarcin Wojtas if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 413532f63fa7SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 413632f63fa7SMarcin Wojtas 413732f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 413832f63fa7SMarcin Wojtas 413932f63fa7SMarcin Wojtas ena_disable_msix(adapter); 414032f63fa7SMarcin Wojtas 414132f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 414232f63fa7SMarcin Wojtas 414332f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 414432f63fa7SMarcin Wojtas 414532f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 414632f63fa7SMarcin Wojtas 414732f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 414832f63fa7SMarcin Wojtas 414932f63fa7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 415032f63fa7SMarcin Wojtas 415132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 415232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 415332f63fa7SMarcin Wojtas } 415432f63fa7SMarcin Wojtas 415532f63fa7SMarcin Wojtas static int 415632f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter, 415732f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 415832f63fa7SMarcin Wojtas { 415932f63fa7SMarcin Wojtas 416032f63fa7SMarcin Wojtas if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 416132f63fa7SMarcin Wojtas ETHER_ADDR_LEN) != 0) { 416232f63fa7SMarcin Wojtas device_printf(adapter->pdev, 416332f63fa7SMarcin Wojtas "Error, mac address are different\n"); 416432f63fa7SMarcin Wojtas return (EINVAL); 416532f63fa7SMarcin Wojtas } 416632f63fa7SMarcin Wojtas 416732f63fa7SMarcin Wojtas if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 416832f63fa7SMarcin Wojtas device_printf(adapter->pdev, 416932f63fa7SMarcin Wojtas "Error, device max mtu is smaller than ifp MTU\n"); 417032f63fa7SMarcin Wojtas return (EINVAL); 417132f63fa7SMarcin Wojtas } 417232f63fa7SMarcin Wojtas 417332f63fa7SMarcin Wojtas return 0; 417432f63fa7SMarcin Wojtas } 417532f63fa7SMarcin Wojtas 417632f63fa7SMarcin Wojtas static int 417732f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter) 417832f63fa7SMarcin Wojtas { 417932f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx get_feat_ctx; 418032f63fa7SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 418132f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 418232f63fa7SMarcin Wojtas device_t dev = adapter->pdev; 418332f63fa7SMarcin Wojtas int wd_active; 41849b8d05b8SZbigniew Bodek int rc; 41859b8d05b8SZbigniew Bodek 418632f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 418732f63fa7SMarcin Wojtas 418832f63fa7SMarcin Wojtas rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 418932f63fa7SMarcin Wojtas if (rc != 0) { 419032f63fa7SMarcin Wojtas device_printf(dev, "Cannot initialize device\n"); 419132f63fa7SMarcin Wojtas goto err; 419232f63fa7SMarcin Wojtas } 419332f63fa7SMarcin Wojtas /* 419432f63fa7SMarcin Wojtas * Only enable WD if it was enabled before reset, so it won't override 419532f63fa7SMarcin Wojtas * value set by the user by the sysctl. 419632f63fa7SMarcin Wojtas */ 419732f63fa7SMarcin Wojtas if (adapter->wd_active != 0) 419832f63fa7SMarcin Wojtas adapter->wd_active = wd_active; 419932f63fa7SMarcin Wojtas 420032f63fa7SMarcin Wojtas rc = ena_device_validate_params(adapter, &get_feat_ctx); 420132f63fa7SMarcin Wojtas if (rc != 0) { 420232f63fa7SMarcin Wojtas device_printf(dev, "Validation of device parameters failed\n"); 420332f63fa7SMarcin Wojtas goto err_device_destroy; 420432f63fa7SMarcin Wojtas } 420532f63fa7SMarcin Wojtas 420632f63fa7SMarcin Wojtas rc = ena_handle_updated_queues(adapter, &get_feat_ctx); 420732f63fa7SMarcin Wojtas if (rc != 0) 420832f63fa7SMarcin Wojtas goto err_device_destroy; 420932f63fa7SMarcin Wojtas 421032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 421132f63fa7SMarcin Wojtas /* Make sure we don't have a race with AENQ Links state handler */ 421232f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 421332f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 421432f63fa7SMarcin Wojtas 421532f63fa7SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter, 421632f63fa7SMarcin Wojtas adapter->num_queues); 421732f63fa7SMarcin Wojtas if (rc != 0) { 421832f63fa7SMarcin Wojtas device_printf(dev, "Enable MSI-X failed\n"); 421932f63fa7SMarcin Wojtas goto err_device_destroy; 422032f63fa7SMarcin Wojtas } 422132f63fa7SMarcin Wojtas 422232f63fa7SMarcin Wojtas /* If the interface was up before the reset bring it up */ 422332f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 422432f63fa7SMarcin Wojtas rc = ena_up(adapter); 422532f63fa7SMarcin Wojtas if (rc != 0) { 422632f63fa7SMarcin Wojtas device_printf(dev, "Failed to create I/O queues\n"); 422732f63fa7SMarcin Wojtas goto err_disable_msix; 422832f63fa7SMarcin Wojtas } 422932f63fa7SMarcin Wojtas } 423032f63fa7SMarcin Wojtas 4231*24392281SMarcin Wojtas /* Indicate that device is running again and ready to work */ 423232f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 4233*24392281SMarcin Wojtas 4234*24392281SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 4235*24392281SMarcin Wojtas /* 4236*24392281SMarcin Wojtas * As the AENQ handlers weren't executed during reset because 4237*24392281SMarcin Wojtas * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 4238*24392281SMarcin Wojtas * timestamp must be updated again That will prevent next reset 4239*24392281SMarcin Wojtas * caused by missing keep alive. 4240*24392281SMarcin Wojtas */ 4241*24392281SMarcin Wojtas adapter->keep_alive_timestamp = getsbinuptime(); 424232f63fa7SMarcin Wojtas callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 424332f63fa7SMarcin Wojtas ena_timer_service, (void *)adapter, 0); 4244*24392281SMarcin Wojtas } 424532f63fa7SMarcin Wojtas 424632f63fa7SMarcin Wojtas device_printf(dev, 424732f63fa7SMarcin Wojtas "Device reset completed successfully, Driver info: %s\n", ena_version); 424832f63fa7SMarcin Wojtas 424932f63fa7SMarcin Wojtas return (rc); 425032f63fa7SMarcin Wojtas 425132f63fa7SMarcin Wojtas err_disable_msix: 425232f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 425332f63fa7SMarcin Wojtas ena_disable_msix(adapter); 425432f63fa7SMarcin Wojtas err_device_destroy: 425532f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 425632f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 425732f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 425832f63fa7SMarcin Wojtas ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 425932f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 426032f63fa7SMarcin Wojtas err: 426132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 426232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 426332f63fa7SMarcin Wojtas device_printf(dev, "Reset attempt failed. Can not reset the device\n"); 426432f63fa7SMarcin Wojtas 426532f63fa7SMarcin Wojtas return (rc); 426632f63fa7SMarcin Wojtas } 426732f63fa7SMarcin Wojtas 426832f63fa7SMarcin Wojtas static void 426932f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending) 427032f63fa7SMarcin Wojtas { 427132f63fa7SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)arg; 427232f63fa7SMarcin Wojtas 4273fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 42749b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 42759b8d05b8SZbigniew Bodek "device reset scheduled but trigger_reset is off\n"); 42769b8d05b8SZbigniew Bodek return; 42779b8d05b8SZbigniew Bodek } 42789b8d05b8SZbigniew Bodek 42799b8d05b8SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 428032f63fa7SMarcin Wojtas ena_destroy_device(adapter, false); 428132f63fa7SMarcin Wojtas ena_restore_device(adapter); 42829b8d05b8SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 42839b8d05b8SZbigniew Bodek } 42849b8d05b8SZbigniew Bodek 42859b8d05b8SZbigniew Bodek /** 42869b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 42879b8d05b8SZbigniew Bodek * @pdev: device information struct 42889b8d05b8SZbigniew Bodek * 42899b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 42909b8d05b8SZbigniew Bodek * 42919b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 42929b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 42939b8d05b8SZbigniew Bodek * and a hardware reset occur. 42949b8d05b8SZbigniew Bodek **/ 42959b8d05b8SZbigniew Bodek static int 42969b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 42979b8d05b8SZbigniew Bodek { 42989b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 42994fa9e02dSMarcin Wojtas struct ena_llq_configurations llq_config; 43006064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 43019b8d05b8SZbigniew Bodek static int version_printed; 43029b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 43039b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 43044fa9e02dSMarcin Wojtas const char *queue_type_str; 43059b8d05b8SZbigniew Bodek int io_queue_num; 43064fa9e02dSMarcin Wojtas int rid, rc; 43074fa9e02dSMarcin Wojtas 43089b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 43099b8d05b8SZbigniew Bodek adapter->pdev = pdev; 43109b8d05b8SZbigniew Bodek 43119b8d05b8SZbigniew Bodek mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); 43129b8d05b8SZbigniew Bodek sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); 43139b8d05b8SZbigniew Bodek 43149b8d05b8SZbigniew Bodek /* Set up the timer service */ 43159b8d05b8SZbigniew Bodek callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); 43169b8d05b8SZbigniew Bodek adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; 43179b8d05b8SZbigniew Bodek adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; 43189b8d05b8SZbigniew Bodek adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; 43199b8d05b8SZbigniew Bodek adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; 43209b8d05b8SZbigniew Bodek 43219b8d05b8SZbigniew Bodek if (version_printed++ == 0) 43229b8d05b8SZbigniew Bodek device_printf(pdev, "%s\n", ena_version); 43239b8d05b8SZbigniew Bodek 43249b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 4325cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 4326cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 43279b8d05b8SZbigniew Bodek 43289b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 43299b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 43304fa9e02dSMarcin Wojtas 43314fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 43324fa9e02dSMarcin Wojtas adapter->memory = NULL; 43334fa9e02dSMarcin Wojtas adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 43344fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 43354fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 43364fa9e02dSMarcin Wojtas device_printf(pdev, 43374fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 43384fa9e02dSMarcin Wojtas rc = ENOMEM; 43394fa9e02dSMarcin Wojtas goto err_dev_free; 43404fa9e02dSMarcin Wojtas } 43414fa9e02dSMarcin Wojtas 43429b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 43439b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 43449b8d05b8SZbigniew Bodek 43459b8d05b8SZbigniew Bodek /* Store register resources */ 43469b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 43479b8d05b8SZbigniew Bodek rman_get_bustag(adapter->registers); 43489b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 43499b8d05b8SZbigniew Bodek rman_get_bushandle(adapter->registers); 43509b8d05b8SZbigniew Bodek 43513f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { 43529b8d05b8SZbigniew Bodek device_printf(pdev, "failed to pmap registers bar\n"); 43539b8d05b8SZbigniew Bodek rc = ENXIO; 4354cd5d5804SMarcin Wojtas goto err_bus_free; 43559b8d05b8SZbigniew Bodek } 43569b8d05b8SZbigniew Bodek 43579b8d05b8SZbigniew Bodek ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 43589b8d05b8SZbigniew Bodek 4359fd43fd2aSMarcin Wojtas /* Initially clear all the flags */ 4360fd43fd2aSMarcin Wojtas ENA_FLAG_ZERO(adapter); 4361fd43fd2aSMarcin Wojtas 43629b8d05b8SZbigniew Bodek /* Device initialization */ 43639b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 43643f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 43659b8d05b8SZbigniew Bodek device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); 43669b8d05b8SZbigniew Bodek rc = ENXIO; 43679b8d05b8SZbigniew Bodek goto err_bus_free; 43689b8d05b8SZbigniew Bodek } 43699b8d05b8SZbigniew Bodek 43704fa9e02dSMarcin Wojtas set_default_llq_configurations(&llq_config); 43714fa9e02dSMarcin Wojtas 43724fa9e02dSMarcin Wojtas rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 43734fa9e02dSMarcin Wojtas &llq_config); 43744fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 43754fa9e02dSMarcin Wojtas device_printf(pdev, "failed to set placement policy\n"); 43764fa9e02dSMarcin Wojtas goto err_com_free; 43774fa9e02dSMarcin Wojtas } 43784fa9e02dSMarcin Wojtas 43794fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 43804fa9e02dSMarcin Wojtas queue_type_str = "Regular"; 43814fa9e02dSMarcin Wojtas else 43824fa9e02dSMarcin Wojtas queue_type_str = "Low Latency"; 43834fa9e02dSMarcin Wojtas device_printf(pdev, "Placement policy: %s\n", queue_type_str); 43844fa9e02dSMarcin Wojtas 43859b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 43869b8d05b8SZbigniew Bodek 43879b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 43889b8d05b8SZbigniew Bodek 43899b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 43909b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 43919b8d05b8SZbigniew Bodek 43926064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 43936064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 43946064f289SMarcin Wojtas calc_queue_ctx.pdev = pdev; 43956064f289SMarcin Wojtas 43969b8d05b8SZbigniew Bodek /* calculate IO queue number to create */ 43979b8d05b8SZbigniew Bodek io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); 43989b8d05b8SZbigniew Bodek 43999b8d05b8SZbigniew Bodek ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", 44009b8d05b8SZbigniew Bodek io_queue_num); 44019b8d05b8SZbigniew Bodek adapter->num_queues = io_queue_num; 44029b8d05b8SZbigniew Bodek 44033cfadb28SMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 44046064f289SMarcin Wojtas // Set the requested Rx ring size 44056064f289SMarcin Wojtas adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; 44069b8d05b8SZbigniew Bodek /* calculatre ring sizes */ 44076064f289SMarcin Wojtas rc = ena_calc_queue_size(adapter, &calc_queue_ctx); 44086064f289SMarcin Wojtas if (unlikely((rc != 0) || (io_queue_num <= 0))) { 44096064f289SMarcin Wojtas rc = EFAULT; 44109b8d05b8SZbigniew Bodek goto err_com_free; 44119b8d05b8SZbigniew Bodek } 44129b8d05b8SZbigniew Bodek 4413a195fab0SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 4414a195fab0SMarcin Wojtas 44156064f289SMarcin Wojtas adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 44166064f289SMarcin Wojtas adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 44179b8d05b8SZbigniew Bodek 44186064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 44196064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 44206064f289SMarcin Wojtas 44216064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 44229b8d05b8SZbigniew Bodek 44239b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 44249b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 44254e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 44264e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create TX DMA tag\n"); 4427cd5d5804SMarcin Wojtas goto err_com_free; 44284e8acd84SMarcin Wojtas } 44299b8d05b8SZbigniew Bodek 44309b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 44314e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 44324e8acd84SMarcin Wojtas device_printf(pdev, "Failed to create RX DMA tag\n"); 4433cd5d5804SMarcin Wojtas goto err_tx_tag_free; 44344e8acd84SMarcin Wojtas } 44359b8d05b8SZbigniew Bodek 44369b8d05b8SZbigniew Bodek /* initialize rings basic information */ 44376064f289SMarcin Wojtas device_printf(pdev, 44386064f289SMarcin Wojtas "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", 44396064f289SMarcin Wojtas io_queue_num, 44406064f289SMarcin Wojtas calc_queue_ctx.rx_queue_size, 44416064f289SMarcin Wojtas calc_queue_ctx.tx_queue_size); 4442cd5d5804SMarcin Wojtas ena_init_io_rings(adapter); 44439b8d05b8SZbigniew Bodek 44449b8d05b8SZbigniew Bodek rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); 44453f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 44469b8d05b8SZbigniew Bodek device_printf(pdev, 44479b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 4448c115a1e2SMarcin Wojtas goto err_io_free; 4449c115a1e2SMarcin Wojtas } 4450c115a1e2SMarcin Wojtas 4451c115a1e2SMarcin Wojtas /* setup network interface */ 4452c115a1e2SMarcin Wojtas rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 4453c115a1e2SMarcin Wojtas if (unlikely(rc != 0)) { 4454c115a1e2SMarcin Wojtas device_printf(pdev, "Error with network interface setup\n"); 4455c115a1e2SMarcin Wojtas goto err_msix_free; 44569b8d05b8SZbigniew Bodek } 44579b8d05b8SZbigniew Bodek 4458081169f2SZbigniew Bodek /* Initialize reset task queue */ 4459081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 4460081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 4461081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 4462081169f2SZbigniew Bodek taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 4463081169f2SZbigniew Bodek "%s rstq", device_get_nameunit(adapter->pdev)); 4464081169f2SZbigniew Bodek 44659b8d05b8SZbigniew Bodek /* Initialize statistics */ 44669b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 44679b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 446830217e2dSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 446930217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 44709b8d05b8SZbigniew Bodek ena_sysctl_add_nodes(adapter); 44719b8d05b8SZbigniew Bodek 44729b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 44739b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 4474fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 44759b8d05b8SZbigniew Bodek 44769b8d05b8SZbigniew Bodek return (0); 44779b8d05b8SZbigniew Bodek 4478c115a1e2SMarcin Wojtas err_msix_free: 4479c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 4480c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 4481c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 4482cd5d5804SMarcin Wojtas err_io_free: 44839b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 44849b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 4485cd5d5804SMarcin Wojtas err_tx_tag_free: 44869b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 4487cd5d5804SMarcin Wojtas err_com_free: 44889b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 44899b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 4490cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 44919b8d05b8SZbigniew Bodek err_bus_free: 44929b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 44939b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 44944fa9e02dSMarcin Wojtas err_dev_free: 44954fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 4496cd5d5804SMarcin Wojtas 44979b8d05b8SZbigniew Bodek return (rc); 44989b8d05b8SZbigniew Bodek } 44999b8d05b8SZbigniew Bodek 45009b8d05b8SZbigniew Bodek /** 45019b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 45029b8d05b8SZbigniew Bodek * @pdev: device information struct 45039b8d05b8SZbigniew Bodek * 45049b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 45059b8d05b8SZbigniew Bodek * that it should release a PCI device. 45069b8d05b8SZbigniew Bodek **/ 45079b8d05b8SZbigniew Bodek static int 45089b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 45099b8d05b8SZbigniew Bodek { 45109b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 45119b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 45129b8d05b8SZbigniew Bodek int rc; 45139b8d05b8SZbigniew Bodek 45149b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 45159b8d05b8SZbigniew Bodek if (adapter->ifp->if_vlantrunk != NULL) { 45169b8d05b8SZbigniew Bodek device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); 45179b8d05b8SZbigniew Bodek return (EBUSY); 45189b8d05b8SZbigniew Bodek } 45199b8d05b8SZbigniew Bodek 45209151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 45219151c55dSMarcin Wojtas 45229b8d05b8SZbigniew Bodek /* Free reset task and callout */ 45239b8d05b8SZbigniew Bodek callout_drain(&adapter->timer_service); 45249b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 45259b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 45269b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 45279b8d05b8SZbigniew Bodek 4528e67c6554SZbigniew Bodek sx_xlock(&adapter->ioctl_sx); 45299b8d05b8SZbigniew Bodek ena_down(adapter); 453032f63fa7SMarcin Wojtas ena_destroy_device(adapter, true); 4531e67c6554SZbigniew Bodek sx_unlock(&adapter->ioctl_sx); 45329b8d05b8SZbigniew Bodek 45339b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 45349b8d05b8SZbigniew Bodek 453530217e2dSMarcin Wojtas ena_free_counters((counter_u64_t *)&adapter->hw_stats, 453630217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 45379b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&adapter->dev_stats, 45389b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 45399b8d05b8SZbigniew Bodek 45409b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 45413f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 45429b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 45439b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 45449b8d05b8SZbigniew Bodek 45459b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 45463f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 45479b8d05b8SZbigniew Bodek device_printf(adapter->pdev, 45489b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 45499b8d05b8SZbigniew Bodek 45509b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 45519b8d05b8SZbigniew Bodek 45529b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 45539b8d05b8SZbigniew Bodek 455432f63fa7SMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 455532f63fa7SMarcin Wojtas ena_com_rss_destroy(ena_dev); 455632f63fa7SMarcin Wojtas 455732f63fa7SMarcin Wojtas ena_com_delete_host_info(ena_dev); 455832f63fa7SMarcin Wojtas 45599b8d05b8SZbigniew Bodek mtx_destroy(&adapter->global_mtx); 45609b8d05b8SZbigniew Bodek sx_destroy(&adapter->ioctl_sx); 45619b8d05b8SZbigniew Bodek 45629151c55dSMarcin Wojtas if_free(adapter->ifp); 45639151c55dSMarcin Wojtas 45649b8d05b8SZbigniew Bodek if (ena_dev->bus != NULL) 45659b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 45669b8d05b8SZbigniew Bodek 45679b8d05b8SZbigniew Bodek if (ena_dev != NULL) 45689b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 45699b8d05b8SZbigniew Bodek 45709b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 45719b8d05b8SZbigniew Bodek } 45729b8d05b8SZbigniew Bodek 45739b8d05b8SZbigniew Bodek /****************************************************************************** 45749b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 45759b8d05b8SZbigniew Bodek *****************************************************************************/ 45769b8d05b8SZbigniew Bodek /** 45779b8d05b8SZbigniew Bodek * ena_update_on_link_change: 45789b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 45799b8d05b8SZbigniew Bodek **/ 45809b8d05b8SZbigniew Bodek static void 45819b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 45829b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 45839b8d05b8SZbigniew Bodek { 45849b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 45859b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 45869b8d05b8SZbigniew Bodek int status; 45879b8d05b8SZbigniew Bodek if_t ifp; 45889b8d05b8SZbigniew Bodek 45899b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 45909b8d05b8SZbigniew Bodek ifp = adapter->ifp; 45919b8d05b8SZbigniew Bodek status = aenq_desc->flags & 45929b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 45939b8d05b8SZbigniew Bodek 45949b8d05b8SZbigniew Bodek if (status != 0) { 45959b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is UP\n"); 4596fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 459732f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 459832f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 459932f63fa7SMarcin Wojtas } else { 46009b8d05b8SZbigniew Bodek device_printf(adapter->pdev, "link is DOWN\n"); 46019b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 4602fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 46039b8d05b8SZbigniew Bodek } 46049b8d05b8SZbigniew Bodek } 46059b8d05b8SZbigniew Bodek 460640621d71SMarcin Wojtas static void ena_notification(void *adapter_data, 460740621d71SMarcin Wojtas struct ena_admin_aenq_entry *aenq_e) 460840621d71SMarcin Wojtas { 460940621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 461040621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 461140621d71SMarcin Wojtas 461240621d71SMarcin Wojtas ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 461340621d71SMarcin Wojtas "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 461440621d71SMarcin Wojtas ENA_ADMIN_NOTIFICATION); 461540621d71SMarcin Wojtas 461640621d71SMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrom) { 461740621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 461840621d71SMarcin Wojtas hints = 461940621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 462040621d71SMarcin Wojtas ena_update_hints(adapter, hints); 462140621d71SMarcin Wojtas break; 462240621d71SMarcin Wojtas default: 462340621d71SMarcin Wojtas device_printf(adapter->pdev, 462440621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 462540621d71SMarcin Wojtas aenq_e->aenq_common_desc.syndrom); 462640621d71SMarcin Wojtas } 462740621d71SMarcin Wojtas } 462840621d71SMarcin Wojtas 46299b8d05b8SZbigniew Bodek /** 46309b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 46319b8d05b8SZbigniew Bodek **/ 46329b8d05b8SZbigniew Bodek static void 4633e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 46349b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 46359b8d05b8SZbigniew Bodek { 4636e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4637e6de9a83SMarcin Wojtas 4638e6de9a83SMarcin Wojtas device_printf(adapter->pdev, 4639e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 46409b8d05b8SZbigniew Bodek } 46419b8d05b8SZbigniew Bodek 46429b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 46439b8d05b8SZbigniew Bodek .handlers = { 46449b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 464540621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 46469b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 46479b8d05b8SZbigniew Bodek }, 46489b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 46499b8d05b8SZbigniew Bodek }; 46509b8d05b8SZbigniew Bodek 46519b8d05b8SZbigniew Bodek /********************************************************************* 46529b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 46539b8d05b8SZbigniew Bodek *********************************************************************/ 46549b8d05b8SZbigniew Bodek 46559b8d05b8SZbigniew Bodek static device_method_t ena_methods[] = { 46569b8d05b8SZbigniew Bodek /* Device interface */ 46579b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 46589b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 46599b8d05b8SZbigniew Bodek DEVMETHOD(device_detach, ena_detach), 46609b8d05b8SZbigniew Bodek DEVMETHOD_END 46619b8d05b8SZbigniew Bodek }; 46629b8d05b8SZbigniew Bodek 46639b8d05b8SZbigniew Bodek static driver_t ena_driver = { 46649b8d05b8SZbigniew Bodek "ena", ena_methods, sizeof(struct ena_adapter), 46659b8d05b8SZbigniew Bodek }; 46669b8d05b8SZbigniew Bodek 46679b8d05b8SZbigniew Bodek devclass_t ena_devclass; 46689b8d05b8SZbigniew Bodek DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); 466940abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 4670329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 46719b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 46729b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 46739b8d05b8SZbigniew Bodek 46749b8d05b8SZbigniew Bodek /*********************************************************************/ 4675