19b8d05b8SZbigniew Bodek /*- 20835cc78SMarcin Wojtas * SPDX-License-Identifier: BSD-2-Clause 39b8d05b8SZbigniew Bodek * 48d6806cdSOsama Abboud * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 31b40dd828SAndriy Gapon #include "opt_rss.h" 32b40dd828SAndriy Gapon 339b8d05b8SZbigniew Bodek #include <sys/param.h> 349b8d05b8SZbigniew Bodek #include <sys/systm.h> 359b8d05b8SZbigniew Bodek #include <sys/bus.h> 369b8d05b8SZbigniew Bodek #include <sys/endian.h> 3782e558eaSDawid Gorecki #include <sys/eventhandler.h> 389b8d05b8SZbigniew Bodek #include <sys/kernel.h> 399b8d05b8SZbigniew Bodek #include <sys/kthread.h> 409b8d05b8SZbigniew Bodek #include <sys/malloc.h> 419b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 429b8d05b8SZbigniew Bodek #include <sys/module.h> 439b8d05b8SZbigniew Bodek #include <sys/rman.h> 449b8d05b8SZbigniew Bodek #include <sys/smp.h> 459b8d05b8SZbigniew Bodek #include <sys/socket.h> 469b8d05b8SZbigniew Bodek #include <sys/sockio.h> 479b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 489b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 499b8d05b8SZbigniew Bodek #include <sys/time.h> 5082e558eaSDawid Gorecki 5182e558eaSDawid Gorecki #include <vm/vm.h> 5282e558eaSDawid Gorecki #include <vm/pmap.h> 539b8d05b8SZbigniew Bodek 540ac122c3SDawid Gorecki #include <machine/atomic.h> 559b8d05b8SZbigniew Bodek #include <machine/bus.h> 569b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 5782e558eaSDawid Gorecki #include <machine/resource.h> 5882e558eaSDawid Gorecki 5982e558eaSDawid Gorecki #include <dev/pci/pcireg.h> 6082e558eaSDawid Gorecki #include <dev/pci/pcivar.h> 619b8d05b8SZbigniew Bodek 629b8d05b8SZbigniew Bodek #include <net/bpf.h> 639b8d05b8SZbigniew Bodek #include <net/ethernet.h> 649b8d05b8SZbigniew Bodek #include <net/if.h> 659b8d05b8SZbigniew Bodek #include <net/if_arp.h> 669b8d05b8SZbigniew Bodek #include <net/if_dl.h> 679b8d05b8SZbigniew Bodek #include <net/if_media.h> 689b8d05b8SZbigniew Bodek #include <net/if_types.h> 6982e558eaSDawid Gorecki #include <net/if_var.h> 709b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 719b8d05b8SZbigniew Bodek #include <netinet/in.h> 7282e558eaSDawid Gorecki #include <netinet/in_systm.h> 739b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 749b8d05b8SZbigniew Bodek #include <netinet/ip.h> 759b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 769b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 779b8d05b8SZbigniew Bodek #include <netinet/udp.h> 789b8d05b8SZbigniew Bodek 799b8d05b8SZbigniew Bodek #include "ena.h" 8082e558eaSDawid Gorecki #include "ena_datapath.h" 81986e7b92SArtur Rojek #include "ena_rss.h" 8282e558eaSDawid Gorecki #include "ena_sysctl.h" 839b8d05b8SZbigniew Bodek 84d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 85d17b7d87SMarcin Wojtas #include "ena_netmap.h" 86d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 87d17b7d87SMarcin Wojtas 889b8d05b8SZbigniew Bodek /********************************************************* 899b8d05b8SZbigniew Bodek * Function prototypes 909b8d05b8SZbigniew Bodek *********************************************************/ 919b8d05b8SZbigniew Bodek static int ena_probe(device_t); 929b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 939b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 949b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 959b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 969b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 979b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 9882e558eaSDawid Gorecki static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, 9982e558eaSDawid Gorecki uint16_t); 1007d8c4feeSMarcin Wojtas static void ena_init_io_rings_basic(struct ena_adapter *); 1017d8c4feeSMarcin Wojtas static void ena_init_io_rings_advanced(struct ena_adapter *); 102cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 1039b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 1049b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 1059b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 1069b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 1079b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1089b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1096f2128c7SMarcin Wojtas static void ena_release_all_tx_dmamap(struct ena_ring *); 1109b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1119b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1129b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1139b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 1149b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1159b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1169b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1189b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1199b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1209b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1219b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1229b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1239b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1249b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1259b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1269b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1279b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1289b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1299b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1309b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1315cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1329b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1339b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 13477958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *); 1359b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1369b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1439b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1449b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1459b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1469b8d05b8SZbigniew Bodek static void ena_init(void *); 1479b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1489b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1499b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1509b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 151aa386085SZhenlei Huang static void ena_setup_ifnet(device_t, struct ena_adapter *, 1529b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1533fc5d816SMarcin Wojtas static int ena_enable_wc(device_t, struct resource *); 1544fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1554fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 15690232d18SDawid Gorecki static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 1577d8c4feeSMarcin Wojtas static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, 1589b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1597d8c4feeSMarcin Wojtas static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); 16046021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1619b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1629b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1639b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1649b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 165aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); 1669b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 16782e558eaSDawid Gorecki static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); 168f180142cSMarcin Wojtas static int ena_copy_eni_metrics(struct ena_adapter *); 16936d42c86SOsama Abboud static int ena_copy_srd_metrics(struct ena_adapter *); 170f97993adSOsama Abboud static int ena_copy_customer_metrics(struct ena_adapter *); 1719b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 172*a33ec635SOsama Abboud static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *, 173*a33ec635SOsama Abboud struct ena_ring *); 174*a33ec635SOsama Abboud 1759b8d05b8SZbigniew Bodek 1768f15f8a7SDawid Gorecki static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME 1778f15f8a7SDawid Gorecki " v" ENA_DRV_MODULE_VERSION; 1789b8d05b8SZbigniew Bodek 1799b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1809b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 }, 1817d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 }, 1829b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 }, 1837d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 }, 1849b8d05b8SZbigniew Bodek /* Last entry */ 1859b8d05b8SZbigniew Bodek { 0, 0, 0 } 1869b8d05b8SZbigniew Bodek }; 1879b8d05b8SZbigniew Bodek 18807aff471SArtur Rojek struct sx ena_global_lock; 18907aff471SArtur Rojek 1909b8d05b8SZbigniew Bodek /* 1919b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 1929b8d05b8SZbigniew Bodek */ 1939b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 1949b8d05b8SZbigniew Bodek 1959b8d05b8SZbigniew Bodek void 1969b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1979b8d05b8SZbigniew Bodek { 1980bdffe59SMarcin Wojtas if (error != 0) 1999b8d05b8SZbigniew Bodek return; 2009b8d05b8SZbigniew Bodek *(bus_addr_t *)arg = segs[0].ds_addr; 2019b8d05b8SZbigniew Bodek } 2029b8d05b8SZbigniew Bodek 2039b8d05b8SZbigniew Bodek int 20482e558eaSDawid Gorecki ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, 20582e558eaSDawid Gorecki int mapflags, bus_size_t alignment, int domain) 2069b8d05b8SZbigniew Bodek { 2079b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(dmadev); 2083fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 2090bdffe59SMarcin Wojtas uint32_t maxsize; 2100bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2119b8d05b8SZbigniew Bodek int error; 2129b8d05b8SZbigniew Bodek 2130bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2140bdffe59SMarcin Wojtas 2150bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2163f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2179b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2180bdffe59SMarcin Wojtas 2199b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2204f8f476eSMarcin Wojtas alignment, 0, /* alignment, bounds */ 2218a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2228a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of exclusion window */ 2239b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2249b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2259b8d05b8SZbigniew Bodek 1, /* nsegments */ 2269b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2279b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2289b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2299b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2309b8d05b8SZbigniew Bodek &dma->tag); 2313f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2323fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 2339b8d05b8SZbigniew Bodek goto fail_tag; 2349b8d05b8SZbigniew Bodek } 2359b8d05b8SZbigniew Bodek 236eb4c4f4aSMarcin Wojtas error = bus_dma_tag_set_domain(dma->tag, domain); 237eb4c4f4aSMarcin Wojtas if (unlikely(error != 0)) { 238eb4c4f4aSMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 239eb4c4f4aSMarcin Wojtas error); 240eb4c4f4aSMarcin Wojtas goto fail_map_create; 241eb4c4f4aSMarcin Wojtas } 242eb4c4f4aSMarcin Wojtas 2439b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 2449b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2453f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2463fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 2474e8acd84SMarcin Wojtas (uintmax_t)size, error); 2489b8d05b8SZbigniew Bodek goto fail_map_create; 2499b8d05b8SZbigniew Bodek } 2509b8d05b8SZbigniew Bodek 2519b8d05b8SZbigniew Bodek dma->paddr = 0; 25282e558eaSDawid Gorecki error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 25382e558eaSDawid Gorecki ena_dmamap_callback, &dma->paddr, mapflags); 2543f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2553fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 2569b8d05b8SZbigniew Bodek goto fail_map_load; 2579b8d05b8SZbigniew Bodek } 2589b8d05b8SZbigniew Bodek 259e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 260e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 261e8073738SMarcin Wojtas 2629b8d05b8SZbigniew Bodek return (0); 2639b8d05b8SZbigniew Bodek 2649b8d05b8SZbigniew Bodek fail_map_load: 2659b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2667d2544e6SMarcin Wojtas fail_map_create: 2679b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2689b8d05b8SZbigniew Bodek fail_tag: 2699b8d05b8SZbigniew Bodek dma->tag = NULL; 2705b14f92eSMarcin Wojtas dma->vaddr = NULL; 2715b14f92eSMarcin Wojtas dma->paddr = 0; 2729b8d05b8SZbigniew Bodek 2739b8d05b8SZbigniew Bodek return (error); 2749b8d05b8SZbigniew Bodek } 2759b8d05b8SZbigniew Bodek 2769b8d05b8SZbigniew Bodek static void 2779b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2789b8d05b8SZbigniew Bodek { 2799b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2809b8d05b8SZbigniew Bodek 2819b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2829b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2839b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2849b8d05b8SZbigniew Bodek } 2859b8d05b8SZbigniew Bodek 2869b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2879b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2889b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2899b8d05b8SZbigniew Bodek } 2901c808fcdSMichal Krawczyk 2911c808fcdSMichal Krawczyk if (adapter->msix != NULL) { 29282e558eaSDawid Gorecki bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid, 29382e558eaSDawid Gorecki adapter->msix); 2941c808fcdSMichal Krawczyk } 2959b8d05b8SZbigniew Bodek } 2969b8d05b8SZbigniew Bodek 2979b8d05b8SZbigniew Bodek static int 2989b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2999b8d05b8SZbigniew Bodek { 3009b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 3019b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3029b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3039b8d05b8SZbigniew Bodek 3049b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3059b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3069b8d05b8SZbigniew Bodek 3079b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3089b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3099b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3109b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 31182e558eaSDawid Gorecki ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id, 31282e558eaSDawid Gorecki pci_device_id); 3139b8d05b8SZbigniew Bodek 3148f15f8a7SDawid Gorecki device_set_desc(dev, ENA_DEVICE_DESC); 3159b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3169b8d05b8SZbigniew Bodek } 3179b8d05b8SZbigniew Bodek 3189b8d05b8SZbigniew Bodek ent++; 3199b8d05b8SZbigniew Bodek } 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek return (ENXIO); 3229b8d05b8SZbigniew Bodek } 3239b8d05b8SZbigniew Bodek 3249b8d05b8SZbigniew Bodek static int 3259b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3269b8d05b8SZbigniew Bodek { 3279b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3283fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 3293cfadb28SMarcin Wojtas int rc; 3309b8d05b8SZbigniew Bodek 3313cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 33282e558eaSDawid Gorecki ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n", 3333cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3343cfadb28SMarcin Wojtas return (EINVAL); 3359b8d05b8SZbigniew Bodek } 3369b8d05b8SZbigniew Bodek 3379b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3383cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3393fc5d816SMarcin Wojtas ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 3403cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3413cfadb28SMarcin Wojtas } else { 3423fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu); 3433cfadb28SMarcin Wojtas } 3449b8d05b8SZbigniew Bodek 3453cfadb28SMarcin Wojtas return (rc); 3469b8d05b8SZbigniew Bodek } 3479b8d05b8SZbigniew Bodek 3489b8d05b8SZbigniew Bodek static inline void 3499b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3509b8d05b8SZbigniew Bodek { 3519b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3529b8d05b8SZbigniew Bodek 3539b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3549b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3559b8d05b8SZbigniew Bodek } 3569b8d05b8SZbigniew Bodek 3579b8d05b8SZbigniew Bodek static inline void 3589b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3599b8d05b8SZbigniew Bodek { 3609b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3619b8d05b8SZbigniew Bodek 3629b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3639b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3649b8d05b8SZbigniew Bodek } 3659b8d05b8SZbigniew Bodek 3669b8d05b8SZbigniew Bodek static inline void 3679b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3689b8d05b8SZbigniew Bodek { 3699b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3709b8d05b8SZbigniew Bodek 3719b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3729b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3739b8d05b8SZbigniew Bodek } 3749b8d05b8SZbigniew Bodek 3759b8d05b8SZbigniew Bodek static void 3769b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3779b8d05b8SZbigniew Bodek uint16_t qid) 3789b8d05b8SZbigniew Bodek { 3799b8d05b8SZbigniew Bodek ring->qid = qid; 3809b8d05b8SZbigniew Bodek ring->adapter = adapter; 3819b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 382b72f1f45SMark Johnston atomic_store_8(&ring->first_interrupt, 0); 383d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3849b8d05b8SZbigniew Bodek } 3859b8d05b8SZbigniew Bodek 386cd5d5804SMarcin Wojtas static void 3877d8c4feeSMarcin Wojtas ena_init_io_rings_basic(struct ena_adapter *adapter) 3889b8d05b8SZbigniew Bodek { 3899b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3909b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3919b8d05b8SZbigniew Bodek struct ena_que *que; 3929b8d05b8SZbigniew Bodek int i; 3939b8d05b8SZbigniew Bodek 3949b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3959b8d05b8SZbigniew Bodek 3967d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3979b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 3989b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 3999b8d05b8SZbigniew Bodek 4009b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4019b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4029b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4039b8d05b8SZbigniew Bodek 4049b8d05b8SZbigniew Bodek /* TX specific ring state */ 4059b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4069b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4079b8d05b8SZbigniew Bodek 4089b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4099b8d05b8SZbigniew Bodek que->adapter = adapter; 4109b8d05b8SZbigniew Bodek que->id = i; 4119b8d05b8SZbigniew Bodek que->tx_ring = txr; 4129b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4139b8d05b8SZbigniew Bodek 4149b8d05b8SZbigniew Bodek txr->que = que; 4159b8d05b8SZbigniew Bodek rxr->que = que; 416efe6ab18SMarcin Wojtas 417efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4187d8c4feeSMarcin Wojtas rxr->rx_mbuf_sz = ena_mbuf_sz; 4199b8d05b8SZbigniew Bodek } 4209b8d05b8SZbigniew Bodek } 4219b8d05b8SZbigniew Bodek 4229b8d05b8SZbigniew Bodek static void 4237d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(struct ena_adapter *adapter) 4247d8c4feeSMarcin Wojtas { 4257d8c4feeSMarcin Wojtas struct ena_ring *txr, *rxr; 4267d8c4feeSMarcin Wojtas int i; 4277d8c4feeSMarcin Wojtas 4287d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 4297d8c4feeSMarcin Wojtas txr = &adapter->tx_ring[i]; 4307d8c4feeSMarcin Wojtas rxr = &adapter->rx_ring[i]; 4317d8c4feeSMarcin Wojtas 4327d8c4feeSMarcin Wojtas /* Allocate a buf ring */ 4337d8c4feeSMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 43482e558eaSDawid Gorecki txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, 43582e558eaSDawid Gorecki &txr->ring_mtx); 4367d8c4feeSMarcin Wojtas 4377d8c4feeSMarcin Wojtas /* Allocate Tx statistics. */ 4387d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4397d8c4feeSMarcin Wojtas sizeof(txr->tx_stats)); 440d8aba82bSDawid Gorecki txr->tx_last_cleanup_ticks = ticks; 4417d8c4feeSMarcin Wojtas 4427d8c4feeSMarcin Wojtas /* Allocate Rx statistics. */ 4437d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4447d8c4feeSMarcin Wojtas sizeof(rxr->rx_stats)); 4457d8c4feeSMarcin Wojtas 4467d8c4feeSMarcin Wojtas /* Initialize locks */ 4477d8c4feeSMarcin Wojtas snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4487d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4497d8c4feeSMarcin Wojtas snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4507d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4517d8c4feeSMarcin Wojtas 4527d8c4feeSMarcin Wojtas mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4537d8c4feeSMarcin Wojtas } 4547d8c4feeSMarcin Wojtas } 4557d8c4feeSMarcin Wojtas 4567d8c4feeSMarcin Wojtas static void 4577d8c4feeSMarcin Wojtas ena_init_io_rings(struct ena_adapter *adapter) 4587d8c4feeSMarcin Wojtas { 4597d8c4feeSMarcin Wojtas /* 4607d8c4feeSMarcin Wojtas * IO rings initialization can be divided into the 2 steps: 4617d8c4feeSMarcin Wojtas * 1. Initialize variables and fields with initial values and copy 4627d8c4feeSMarcin Wojtas * them from adapter/ena_dev (basic) 4637d8c4feeSMarcin Wojtas * 2. Allocate mutex, counters and buf_ring (advanced) 4647d8c4feeSMarcin Wojtas */ 4657d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 4667d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(adapter); 4677d8c4feeSMarcin Wojtas } 4687d8c4feeSMarcin Wojtas 4697d8c4feeSMarcin Wojtas static void 4709b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4719b8d05b8SZbigniew Bodek { 4729b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4739b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4749b8d05b8SZbigniew Bodek 4759b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4769b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4779b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4789b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4799b8d05b8SZbigniew Bodek 4807d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4817d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4827d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4837d2544e6SMarcin Wojtas 4849b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4859b8d05b8SZbigniew Bodek } 4869b8d05b8SZbigniew Bodek 4879b8d05b8SZbigniew Bodek static void 4889b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4899b8d05b8SZbigniew Bodek { 4909b8d05b8SZbigniew Bodek int i; 4919b8d05b8SZbigniew Bodek 4927d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 4939b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4949b8d05b8SZbigniew Bodek } 4959b8d05b8SZbigniew Bodek 4969b8d05b8SZbigniew Bodek static int 4979b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 4989b8d05b8SZbigniew Bodek { 4999b8d05b8SZbigniew Bodek int ret; 5009b8d05b8SZbigniew Bodek 5019b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 5029b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 5039b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5048a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5058a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5069b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5079b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 5088a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 5099b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 5109b8d05b8SZbigniew Bodek 0, /* flags */ 5119b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5129b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 5139b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5149b8d05b8SZbigniew Bodek 5159b8d05b8SZbigniew Bodek return (ret); 5169b8d05b8SZbigniew Bodek } 5179b8d05b8SZbigniew Bodek 5189b8d05b8SZbigniew Bodek static int 5199b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5209b8d05b8SZbigniew Bodek { 5219b8d05b8SZbigniew Bodek int ret; 5229b8d05b8SZbigniew Bodek 5239b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5249b8d05b8SZbigniew Bodek 5253f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5269b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5279b8d05b8SZbigniew Bodek 5289b8d05b8SZbigniew Bodek return (ret); 5299b8d05b8SZbigniew Bodek } 5309b8d05b8SZbigniew Bodek 5319b8d05b8SZbigniew Bodek static int 5329b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5339b8d05b8SZbigniew Bodek { 5349b8d05b8SZbigniew Bodek int ret; 5359b8d05b8SZbigniew Bodek 5369b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5379b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5389b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5398a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5408a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5419b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 54204cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsize */ 5434727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 54404cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsegsize */ 5459b8d05b8SZbigniew Bodek 0, /* flags */ 5469b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5479b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5489b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5499b8d05b8SZbigniew Bodek 5509b8d05b8SZbigniew Bodek return (ret); 5519b8d05b8SZbigniew Bodek } 5529b8d05b8SZbigniew Bodek 5539b8d05b8SZbigniew Bodek static int 5549b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5559b8d05b8SZbigniew Bodek { 5569b8d05b8SZbigniew Bodek int ret; 5579b8d05b8SZbigniew Bodek 5589b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5599b8d05b8SZbigniew Bodek 5603f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5619b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5629b8d05b8SZbigniew Bodek 5639b8d05b8SZbigniew Bodek return (ret); 5649b8d05b8SZbigniew Bodek } 5659b8d05b8SZbigniew Bodek 56638727218SOsama Abboud int 56738727218SOsama Abboud validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc) 56838727218SOsama Abboud { 56938727218SOsama Abboud struct ena_adapter *adapter = tx_ring->adapter; 57038727218SOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 57138727218SOsama Abboud 57238727218SOsama Abboud if (unlikely(tx_req_id_rc != 0)) { 57338727218SOsama Abboud if (tx_req_id_rc == ENA_COM_FAULT) { 57438727218SOsama Abboud reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED; 57538727218SOsama Abboud ena_log(adapter->pdev, ERR, 57638727218SOsama Abboud "TX descriptor malformed. req_id %hu qid %hu\n", 57738727218SOsama Abboud req_id, tx_ring->qid); 57838727218SOsama Abboud } else if (tx_req_id_rc == ENA_COM_INVAL) { 57938727218SOsama Abboud ena_log_nm(adapter->pdev, WARN, 58038727218SOsama Abboud "Invalid req_id %hu in qid %hu\n", 58138727218SOsama Abboud req_id, tx_ring->qid); 58238727218SOsama Abboud counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); 58338727218SOsama Abboud } 58438727218SOsama Abboud 58538727218SOsama Abboud ena_trigger_reset(adapter, reset_reason); 58638727218SOsama Abboud return (EFAULT); 58738727218SOsama Abboud } 58838727218SOsama Abboud 58938727218SOsama Abboud return (0); 59038727218SOsama Abboud } 59138727218SOsama Abboud 5926f2128c7SMarcin Wojtas static void 5936f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 5946f2128c7SMarcin Wojtas { 5956f2128c7SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 5966f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 59782e558eaSDawid Gorecki bus_dma_tag_t tx_tag = adapter->tx_buf_tag; 5986f2128c7SMarcin Wojtas int i; 5996f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6006f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 6016f2128c7SMarcin Wojtas int j; 6026f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6036f2128c7SMarcin Wojtas 6046f2128c7SMarcin Wojtas for (i = 0; i < tx_ring->ring_size; ++i) { 6056f2128c7SMarcin Wojtas tx_info = &tx_ring->tx_buffer_info[i]; 6066f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6077583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 6086f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 6096f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { 6106f2128c7SMarcin Wojtas if (nm_info->map_seg[j] != NULL) { 6116f2128c7SMarcin Wojtas bus_dmamap_destroy(tx_tag, 6126f2128c7SMarcin Wojtas nm_info->map_seg[j]); 6136f2128c7SMarcin Wojtas nm_info->map_seg[j] = NULL; 6146f2128c7SMarcin Wojtas } 6156f2128c7SMarcin Wojtas } 6166f2128c7SMarcin Wojtas } 6176f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 618888810f0SMarcin Wojtas if (tx_info->dmamap != NULL) { 619888810f0SMarcin Wojtas bus_dmamap_destroy(tx_tag, tx_info->dmamap); 620888810f0SMarcin Wojtas tx_info->dmamap = NULL; 6216f2128c7SMarcin Wojtas } 6226f2128c7SMarcin Wojtas } 6236f2128c7SMarcin Wojtas } 6246f2128c7SMarcin Wojtas 6259b8d05b8SZbigniew Bodek /** 6269b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 6279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6289b8d05b8SZbigniew Bodek * @qid: queue index 6299b8d05b8SZbigniew Bodek * 6309b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 6319b8d05b8SZbigniew Bodek **/ 6329b8d05b8SZbigniew Bodek static int 6339b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 6349b8d05b8SZbigniew Bodek { 6353fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 6366d1ef2abSArtur Rojek char thread_name[MAXCOMLEN + 1]; 6379b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 6389b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 6396d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 6409b8d05b8SZbigniew Bodek int size, i, err; 6416f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6426f2128c7SMarcin Wojtas bus_dmamap_t *map; 6436f2128c7SMarcin Wojtas int j; 6446f2128c7SMarcin Wojtas 6456f2128c7SMarcin Wojtas ena_netmap_reset_tx_ring(adapter, qid); 6466f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6479b8d05b8SZbigniew Bodek 6489b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 6499b8d05b8SZbigniew Bodek 6509b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6513f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 6527d2544e6SMarcin Wojtas return (ENOMEM); 6539b8d05b8SZbigniew Bodek 6549b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 6559b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6563f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 6577d2544e6SMarcin Wojtas goto err_buf_info_free; 6589b8d05b8SZbigniew Bodek 6594fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 6604fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 6614fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 6624fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 6634fa9e02dSMarcin Wojtas goto err_tx_ids_free; 6644fa9e02dSMarcin Wojtas 6659b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 6669b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 6679b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 6689b8d05b8SZbigniew Bodek 6699b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 6709b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 6719b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 6729b8d05b8SZbigniew Bodek 6739b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 6749b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 675af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 6769b8d05b8SZbigniew Bodek 6779b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 678b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6799b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 680b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 6819b8d05b8SZbigniew Bodek 6829b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6839b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6849b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 685888810f0SMarcin Wojtas &tx_ring->tx_buffer_info[i].dmamap); 6863f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6873fc5d816SMarcin Wojtas ena_log(pdev, ERR, 68882e558eaSDawid Gorecki "Unable to create Tx DMA map for buffer %d\n", i); 6896f2128c7SMarcin Wojtas goto err_map_release; 6909b8d05b8SZbigniew Bodek } 6916f2128c7SMarcin Wojtas 6926f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6937583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 6946f2128c7SMarcin Wojtas map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 6956f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 6966f2128c7SMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6976f2128c7SMarcin Wojtas &map[j]); 6986f2128c7SMarcin Wojtas if (unlikely(err != 0)) { 6993fc5d816SMarcin Wojtas ena_log(pdev, ERR, 70082e558eaSDawid Gorecki "Unable to create Tx DMA for buffer %d %d\n", 70182e558eaSDawid Gorecki i, j); 7026f2128c7SMarcin Wojtas goto err_map_release; 7036f2128c7SMarcin Wojtas } 7046f2128c7SMarcin Wojtas } 7056f2128c7SMarcin Wojtas } 7066f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7079b8d05b8SZbigniew Bodek } 7089b8d05b8SZbigniew Bodek 7099b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 7109b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 7119b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 7129b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 7133f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 7143fc5d816SMarcin Wojtas ena_log(pdev, ERR, 7159b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 7169b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 7176f2128c7SMarcin Wojtas goto err_map_release; 7189b8d05b8SZbigniew Bodek } 7199b8d05b8SZbigniew Bodek 7205cb9db07SMarcin Wojtas tx_ring->running = true; 7215cb9db07SMarcin Wojtas 7226d1ef2abSArtur Rojek #ifdef RSS 7236d1ef2abSArtur Rojek cpu_mask = &que->cpu_mask; 7246d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7256d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->cpu); 7266d1ef2abSArtur Rojek #else 7276d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7286d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->id); 7296d1ef2abSArtur Rojek #endif 7306d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, 7316d1ef2abSArtur Rojek cpu_mask, "%s", thread_name); 7329b8d05b8SZbigniew Bodek 7339b8d05b8SZbigniew Bodek return (0); 7349b8d05b8SZbigniew Bodek 7356f2128c7SMarcin Wojtas err_map_release: 7366f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(tx_ring); 7374fa9e02dSMarcin Wojtas err_tx_ids_free: 738cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7397d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 7407d2544e6SMarcin Wojtas err_buf_info_free: 741cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7427d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 7437d2544e6SMarcin Wojtas 7449b8d05b8SZbigniew Bodek return (ENOMEM); 7459b8d05b8SZbigniew Bodek } 7469b8d05b8SZbigniew Bodek 7479b8d05b8SZbigniew Bodek /** 7489b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 7499b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7509b8d05b8SZbigniew Bodek * @qid: queue index 7519b8d05b8SZbigniew Bodek * 7529b8d05b8SZbigniew Bodek * Free all transmit software resources 7539b8d05b8SZbigniew Bodek **/ 7549b8d05b8SZbigniew Bodek static void 7559b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 7569b8d05b8SZbigniew Bodek { 7579b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 7586f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7596f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 7606f2128c7SMarcin Wojtas int j; 7616f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7629b8d05b8SZbigniew Bodek 76382e558eaSDawid Gorecki while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) 7649b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 7659b8d05b8SZbigniew Bodek 7669b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 7679b8d05b8SZbigniew Bodek 768b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 7699b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 7709b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 7719b8d05b8SZbigniew Bodek 7729b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 7739b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 774e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 775888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); 7769b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 777888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7789b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 779888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7804fa9e02dSMarcin Wojtas 7816f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7827583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 7836f2128c7SMarcin Wojtas nm_info = &tx_ring->tx_buffer_info[i].nm_info; 7846f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 7856f2128c7SMarcin Wojtas if (nm_info->socket_buf_idx[j] != 0) { 7866f2128c7SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 7876f2128c7SMarcin Wojtas nm_info->map_seg[j], 7886f2128c7SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7896f2128c7SMarcin Wojtas ena_netmap_unload(adapter, 7906f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7916f2128c7SMarcin Wojtas } 7926f2128c7SMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7936f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7946f2128c7SMarcin Wojtas nm_info->socket_buf_idx[j] = 0; 7956f2128c7SMarcin Wojtas } 7966f2128c7SMarcin Wojtas } 7976f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7986f2128c7SMarcin Wojtas 799e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 800e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 8019b8d05b8SZbigniew Bodek } 802416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 8039b8d05b8SZbigniew Bodek 8049b8d05b8SZbigniew Bodek /* And free allocated memory. */ 805cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 8069b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 8079b8d05b8SZbigniew Bodek 808cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 8099b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 8104fa9e02dSMarcin Wojtas 8118483b844SMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 8124fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 8139b8d05b8SZbigniew Bodek } 8149b8d05b8SZbigniew Bodek 8159b8d05b8SZbigniew Bodek /** 8169b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 8179b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8189b8d05b8SZbigniew Bodek * 8199b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8209b8d05b8SZbigniew Bodek **/ 8219b8d05b8SZbigniew Bodek static int 8229b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 8239b8d05b8SZbigniew Bodek { 8249b8d05b8SZbigniew Bodek int i, rc; 8259b8d05b8SZbigniew Bodek 8267d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 8279b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 8280bdffe59SMarcin Wojtas if (rc != 0) { 8293fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 8309b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 8319b8d05b8SZbigniew Bodek goto err_setup_tx; 8329b8d05b8SZbigniew Bodek } 8337d2544e6SMarcin Wojtas } 8349b8d05b8SZbigniew Bodek 8359b8d05b8SZbigniew Bodek return (0); 8369b8d05b8SZbigniew Bodek 8379b8d05b8SZbigniew Bodek err_setup_tx: 8389b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 8399b8d05b8SZbigniew Bodek while (i--) 8409b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8419b8d05b8SZbigniew Bodek return (rc); 8429b8d05b8SZbigniew Bodek } 8439b8d05b8SZbigniew Bodek 8449b8d05b8SZbigniew Bodek /** 8459b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 8469b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8479b8d05b8SZbigniew Bodek * 8489b8d05b8SZbigniew Bodek * Free all transmit software resources 8499b8d05b8SZbigniew Bodek **/ 8509b8d05b8SZbigniew Bodek static void 8519b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 8529b8d05b8SZbigniew Bodek { 8539b8d05b8SZbigniew Bodek int i; 8549b8d05b8SZbigniew Bodek 8557d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 8569b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8579b8d05b8SZbigniew Bodek } 8589b8d05b8SZbigniew Bodek 8599b8d05b8SZbigniew Bodek /** 8609b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 8619b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8629b8d05b8SZbigniew Bodek * @qid: queue index 8639b8d05b8SZbigniew Bodek * 8649b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8659b8d05b8SZbigniew Bodek **/ 8669b8d05b8SZbigniew Bodek static int 8679b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8689b8d05b8SZbigniew Bodek { 8693fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 8709b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 8719b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 8729b8d05b8SZbigniew Bodek int size, err, i; 8739b8d05b8SZbigniew Bodek 8749b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8759b8d05b8SZbigniew Bodek 8769a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 8779a0f2079SMarcin Wojtas ena_netmap_reset_rx_ring(adapter, qid); 8789a0f2079SMarcin Wojtas rx_ring->initialized = false; 8799a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 8809a0f2079SMarcin Wojtas 8819b8d05b8SZbigniew Bodek /* 8829b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8839b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8849b8d05b8SZbigniew Bodek */ 8859b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8869b8d05b8SZbigniew Bodek 887cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8889b8d05b8SZbigniew Bodek 88943fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 89043fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 89143fefd16SMarcin Wojtas 89243fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 89343fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 89443fefd16SMarcin Wojtas 8959b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8969b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8979b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8989b8d05b8SZbigniew Bodek 8999b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 9009b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 9019b8d05b8SZbigniew Bodek 9029b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 9039b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 9049b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 9059b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 9069b8d05b8SZbigniew Bodek if (err != 0) { 9073fc5d816SMarcin Wojtas ena_log(pdev, ERR, 9089b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 9097d2544e6SMarcin Wojtas goto err_buf_info_unmap; 9109b8d05b8SZbigniew Bodek } 9119b8d05b8SZbigniew Bodek } 9129b8d05b8SZbigniew Bodek 9139b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 9147583c633SJustin Hibbits if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) { 9159b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 9160bdffe59SMarcin Wojtas if (err != 0) { 9173fc5d816SMarcin Wojtas ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n", 9183fc5d816SMarcin Wojtas qid); 9199b8d05b8SZbigniew Bodek } else { 9203fc5d816SMarcin Wojtas ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n", 9213fc5d816SMarcin Wojtas qid); 9229b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 9239b8d05b8SZbigniew Bodek } 9249b8d05b8SZbigniew Bodek } 9259b8d05b8SZbigniew Bodek 9269b8d05b8SZbigniew Bodek return (0); 9279b8d05b8SZbigniew Bodek 9287d2544e6SMarcin Wojtas err_buf_info_unmap: 9299b8d05b8SZbigniew Bodek while (i--) { 9309b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9319b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9329b8d05b8SZbigniew Bodek } 9339b8d05b8SZbigniew Bodek 93443fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 93543fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 936cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9379b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9389b8d05b8SZbigniew Bodek return (ENOMEM); 9399b8d05b8SZbigniew Bodek } 9409b8d05b8SZbigniew Bodek 9419b8d05b8SZbigniew Bodek /** 9429b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 9439b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9449b8d05b8SZbigniew Bodek * @qid: queue index 9459b8d05b8SZbigniew Bodek * 9469b8d05b8SZbigniew Bodek * Free all receive software resources 9479b8d05b8SZbigniew Bodek **/ 9489b8d05b8SZbigniew Bodek static void 9499b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 9509b8d05b8SZbigniew Bodek { 9519b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 9529b8d05b8SZbigniew Bodek 9539b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 9549b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 955e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 956e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 9579b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 9589b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 9599b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 9609b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9619b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9629b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9639b8d05b8SZbigniew Bodek } 9649b8d05b8SZbigniew Bodek 9659b8d05b8SZbigniew Bodek /* free LRO resources, */ 9669b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 9679b8d05b8SZbigniew Bodek 9689b8d05b8SZbigniew Bodek /* free allocated memory */ 969cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9709b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9719b8d05b8SZbigniew Bodek 97243fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 97343fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 9749b8d05b8SZbigniew Bodek } 9759b8d05b8SZbigniew Bodek 9769b8d05b8SZbigniew Bodek /** 9779b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 9789b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9799b8d05b8SZbigniew Bodek * 9809b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9819b8d05b8SZbigniew Bodek **/ 9829b8d05b8SZbigniew Bodek static int 9839b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9849b8d05b8SZbigniew Bodek { 9859b8d05b8SZbigniew Bodek int i, rc = 0; 9869b8d05b8SZbigniew Bodek 9877d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 9889b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9890bdffe59SMarcin Wojtas if (rc != 0) { 9903fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 9919b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9929b8d05b8SZbigniew Bodek goto err_setup_rx; 9939b8d05b8SZbigniew Bodek } 9947d2544e6SMarcin Wojtas } 9959b8d05b8SZbigniew Bodek return (0); 9969b8d05b8SZbigniew Bodek 9979b8d05b8SZbigniew Bodek err_setup_rx: 9989b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9999b8d05b8SZbigniew Bodek while (i--) 10009b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 10019b8d05b8SZbigniew Bodek return (rc); 10029b8d05b8SZbigniew Bodek } 10039b8d05b8SZbigniew Bodek 10049b8d05b8SZbigniew Bodek /** 10059b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 10069b8d05b8SZbigniew Bodek * @adapter: network interface device structure 10079b8d05b8SZbigniew Bodek * 10089b8d05b8SZbigniew Bodek * Free all receive software resources 10099b8d05b8SZbigniew Bodek **/ 10109b8d05b8SZbigniew Bodek static void 10119b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 10129b8d05b8SZbigniew Bodek { 10139b8d05b8SZbigniew Bodek int i; 10149b8d05b8SZbigniew Bodek 10157d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 10169b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 10179b8d05b8SZbigniew Bodek } 10189b8d05b8SZbigniew Bodek 10199b8d05b8SZbigniew Bodek static inline int 102082e558eaSDawid Gorecki ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 102182e558eaSDawid Gorecki struct ena_rx_buffer *rx_info) 10229b8d05b8SZbigniew Bodek { 10233fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 10249b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 10259b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 10269b8d05b8SZbigniew Bodek int nsegs, error; 10274727bda6SMarcin Wojtas int mlen; 10289b8d05b8SZbigniew Bodek 10299b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 10303f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 10319b8d05b8SZbigniew Bodek return (0); 10329b8d05b8SZbigniew Bodek 10339b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 103404cf2b88SMarcin Wojtas rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 103504cf2b88SMarcin Wojtas rx_ring->rx_mbuf_sz); 10369b8d05b8SZbigniew Bodek 10373f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10384727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 10394727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 10404727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10419b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 10429b8d05b8SZbigniew Bodek return (ENOMEM); 10439b8d05b8SZbigniew Bodek } 10444727bda6SMarcin Wojtas mlen = MCLBYTES; 10454727bda6SMarcin Wojtas } else { 104604cf2b88SMarcin Wojtas mlen = rx_ring->rx_mbuf_sz; 10474727bda6SMarcin Wojtas } 10489b8d05b8SZbigniew Bodek /* Set mbuf length*/ 10494727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 10509b8d05b8SZbigniew Bodek 10519b8d05b8SZbigniew Bodek /* Map packets for DMA */ 105282e558eaSDawid Gorecki ena_log(pdev, DBG, 105382e558eaSDawid Gorecki "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 10549b8d05b8SZbigniew Bodek adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len); 10559b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 10569b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 10573f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 10583fc5d816SMarcin Wojtas ena_log(pdev, WARN, 10593fc5d816SMarcin Wojtas "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 10609b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 10619b8d05b8SZbigniew Bodek goto exit; 10629b8d05b8SZbigniew Bodek } 10639b8d05b8SZbigniew Bodek 10649b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 10659b8d05b8SZbigniew Bodek 10669b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 10679b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 10684727bda6SMarcin Wojtas ena_buf->len = mlen; 10699b8d05b8SZbigniew Bodek 107082e558eaSDawid Gorecki ena_log(pdev, DBG, 107182e558eaSDawid Gorecki "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 10729b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr); 10739b8d05b8SZbigniew Bodek 10749b8d05b8SZbigniew Bodek return (0); 10759b8d05b8SZbigniew Bodek 10769b8d05b8SZbigniew Bodek exit: 10779b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10789b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10799b8d05b8SZbigniew Bodek return (EFAULT); 10809b8d05b8SZbigniew Bodek } 10819b8d05b8SZbigniew Bodek 10829b8d05b8SZbigniew Bodek static void 10839b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10849b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10859b8d05b8SZbigniew Bodek { 10864e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10873fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 10883fc5d816SMarcin Wojtas "Trying to free unallocated buffer\n"); 10899b8d05b8SZbigniew Bodek return; 10904e8acd84SMarcin Wojtas } 10919b8d05b8SZbigniew Bodek 1092e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1093e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10949b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10959b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10969b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10979b8d05b8SZbigniew Bodek } 10989b8d05b8SZbigniew Bodek 10999b8d05b8SZbigniew Bodek /** 11009b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 11019b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 11029b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 11039b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 11049b8d05b8SZbigniew Bodek **/ 110538c7b965SMarcin Wojtas int 11069b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 11079b8d05b8SZbigniew Bodek { 11089b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 11093fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 111043fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 11119b8d05b8SZbigniew Bodek uint32_t i; 11129b8d05b8SZbigniew Bodek int rc; 11139b8d05b8SZbigniew Bodek 11143fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid); 11159b8d05b8SZbigniew Bodek 11169b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 11179b8d05b8SZbigniew Bodek 11189b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 111943fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 112043fefd16SMarcin Wojtas 11213fc5d816SMarcin Wojtas ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 11223fc5d816SMarcin Wojtas next_to_use); 11239b8d05b8SZbigniew Bodek 112443fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 112543fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 11269a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 1127358bcc4cSMarcin Wojtas if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) 112882e558eaSDawid Gorecki rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, 112982e558eaSDawid Gorecki rx_info); 11309a0f2079SMarcin Wojtas else 11319a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 11329b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 11333f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 11343fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11354e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 11364e8acd84SMarcin Wojtas rx_ring->qid); 11379b8d05b8SZbigniew Bodek break; 11389b8d05b8SZbigniew Bodek } 11399b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 114043fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 11410bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 11423fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11439b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 11449b8d05b8SZbigniew Bodek rx_ring->qid); 11459b8d05b8SZbigniew Bodek break; 11469b8d05b8SZbigniew Bodek } 11479b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 11489b8d05b8SZbigniew Bodek rx_ring->ring_size); 11499b8d05b8SZbigniew Bodek } 11509b8d05b8SZbigniew Bodek 11513f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 11529b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 11533fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11544e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 11554e8acd84SMarcin Wojtas rx_ring->qid, i, num); 11569b8d05b8SZbigniew Bodek } 11579b8d05b8SZbigniew Bodek 11588483b844SMarcin Wojtas if (likely(i != 0)) 11599b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 11608483b844SMarcin Wojtas 11619b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 11629b8d05b8SZbigniew Bodek return (i); 11639b8d05b8SZbigniew Bodek } 11649b8d05b8SZbigniew Bodek 11657d8c4feeSMarcin Wojtas int 116621823546SMarcin Wojtas ena_update_buf_ring_size(struct ena_adapter *adapter, 116721823546SMarcin Wojtas uint32_t new_buf_ring_size) 116821823546SMarcin Wojtas { 116921823546SMarcin Wojtas uint32_t old_buf_ring_size; 117021823546SMarcin Wojtas int rc = 0; 117121823546SMarcin Wojtas bool dev_was_up; 117221823546SMarcin Wojtas 117321823546SMarcin Wojtas old_buf_ring_size = adapter->buf_ring_size; 117421823546SMarcin Wojtas adapter->buf_ring_size = new_buf_ring_size; 117521823546SMarcin Wojtas 117621823546SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 117721823546SMarcin Wojtas ena_down(adapter); 117821823546SMarcin Wojtas 117921823546SMarcin Wojtas /* Reconfigure buf ring for all Tx rings. */ 118021823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 118121823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 118221823546SMarcin Wojtas if (dev_was_up) { 118321823546SMarcin Wojtas /* 118421823546SMarcin Wojtas * If ena_up() fails, it's not because of recent buf_ring size 118521823546SMarcin Wojtas * changes. Because of that, we just want to revert old drbr 118621823546SMarcin Wojtas * value and trigger the reset because something else had to 118721823546SMarcin Wojtas * go wrong. 118821823546SMarcin Wojtas */ 118921823546SMarcin Wojtas rc = ena_up(adapter); 119021823546SMarcin Wojtas if (unlikely(rc != 0)) { 11913fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 119221823546SMarcin Wojtas "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n", 119321823546SMarcin Wojtas new_buf_ring_size, old_buf_ring_size); 119421823546SMarcin Wojtas 119521823546SMarcin Wojtas /* Revert old size and trigger the reset */ 119621823546SMarcin Wojtas adapter->buf_ring_size = old_buf_ring_size; 119721823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 119821823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 119921823546SMarcin Wojtas 120021823546SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 120121823546SMarcin Wojtas adapter); 120221823546SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); 120321823546SMarcin Wojtas } 120421823546SMarcin Wojtas } 120521823546SMarcin Wojtas 120621823546SMarcin Wojtas return (rc); 120721823546SMarcin Wojtas } 120821823546SMarcin Wojtas 120921823546SMarcin Wojtas int 12107d8c4feeSMarcin Wojtas ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, 12117d8c4feeSMarcin Wojtas uint32_t new_rx_size) 12127d8c4feeSMarcin Wojtas { 12137d8c4feeSMarcin Wojtas uint32_t old_tx_size, old_rx_size; 12147d8c4feeSMarcin Wojtas int rc = 0; 12157d8c4feeSMarcin Wojtas bool dev_was_up; 12167d8c4feeSMarcin Wojtas 12179762a033SMarcin Wojtas old_tx_size = adapter->requested_tx_ring_size; 12189762a033SMarcin Wojtas old_rx_size = adapter->requested_rx_ring_size; 12199762a033SMarcin Wojtas adapter->requested_tx_ring_size = new_tx_size; 12209762a033SMarcin Wojtas adapter->requested_rx_ring_size = new_rx_size; 12217d8c4feeSMarcin Wojtas 12227d8c4feeSMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 12237d8c4feeSMarcin Wojtas ena_down(adapter); 12247d8c4feeSMarcin Wojtas 12257d8c4feeSMarcin Wojtas /* Configure queues with new size. */ 12267d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 12277d8c4feeSMarcin Wojtas if (dev_was_up) { 12287d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12297d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12303fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12317d8c4feeSMarcin Wojtas "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n", 12327d8c4feeSMarcin Wojtas new_tx_size, new_rx_size, old_tx_size, old_rx_size); 12337d8c4feeSMarcin Wojtas 12347d8c4feeSMarcin Wojtas /* Revert old size. */ 12359762a033SMarcin Wojtas adapter->requested_tx_ring_size = old_tx_size; 12369762a033SMarcin Wojtas adapter->requested_rx_ring_size = old_rx_size; 12377d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 12387d8c4feeSMarcin Wojtas 12397d8c4feeSMarcin Wojtas /* And try again. */ 12407d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12417d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12423fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12437d8c4feeSMarcin Wojtas "Failed to revert old queue sizes. Triggering device reset.\n"); 12447d8c4feeSMarcin Wojtas /* 12457d8c4feeSMarcin Wojtas * If we've failed again, something had to go 12467d8c4feeSMarcin Wojtas * wrong. After reset, the device should try to 12477d8c4feeSMarcin Wojtas * go up 12487d8c4feeSMarcin Wojtas */ 12497d8c4feeSMarcin Wojtas ENA_FLAG_SET_ATOMIC( 12507d8c4feeSMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 12517d8c4feeSMarcin Wojtas ena_trigger_reset(adapter, 12527d8c4feeSMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 12537d8c4feeSMarcin Wojtas } 12547d8c4feeSMarcin Wojtas } 12557d8c4feeSMarcin Wojtas } 12567d8c4feeSMarcin Wojtas 12577d8c4feeSMarcin Wojtas return (rc); 12587d8c4feeSMarcin Wojtas } 12597d8c4feeSMarcin Wojtas 12609b8d05b8SZbigniew Bodek static void 126156d41ad5SMarcin Wojtas ena_update_io_rings(struct ena_adapter *adapter, uint32_t num) 126256d41ad5SMarcin Wojtas { 126356d41ad5SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 126456d41ad5SMarcin Wojtas /* Force indirection table to be reinitialized */ 126556d41ad5SMarcin Wojtas ena_com_rss_destroy(adapter->ena_dev); 126656d41ad5SMarcin Wojtas 126756d41ad5SMarcin Wojtas adapter->num_io_queues = num; 126856d41ad5SMarcin Wojtas ena_init_io_rings(adapter); 126956d41ad5SMarcin Wojtas } 127056d41ad5SMarcin Wojtas 1271f9e1d947SOsama Abboud int 1272f9e1d947SOsama Abboud ena_update_base_cpu(struct ena_adapter *adapter, int new_num) 1273f9e1d947SOsama Abboud { 1274f9e1d947SOsama Abboud int old_num; 1275f9e1d947SOsama Abboud int rc = 0; 1276f9e1d947SOsama Abboud bool dev_was_up; 1277f9e1d947SOsama Abboud 1278f9e1d947SOsama Abboud dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 1279f9e1d947SOsama Abboud old_num = adapter->irq_cpu_base; 1280f9e1d947SOsama Abboud 1281f9e1d947SOsama Abboud ena_down(adapter); 1282f9e1d947SOsama Abboud 1283f9e1d947SOsama Abboud adapter->irq_cpu_base = new_num; 1284f9e1d947SOsama Abboud 1285f9e1d947SOsama Abboud if (dev_was_up) { 1286f9e1d947SOsama Abboud rc = ena_up(adapter); 1287f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1288f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1289f9e1d947SOsama Abboud "Failed to configure device %d IRQ base CPU. " 1290f9e1d947SOsama Abboud "Reverting to previous value: %d\n", 1291f9e1d947SOsama Abboud new_num, old_num); 1292f9e1d947SOsama Abboud 1293f9e1d947SOsama Abboud adapter->irq_cpu_base = old_num; 1294f9e1d947SOsama Abboud 1295f9e1d947SOsama Abboud rc = ena_up(adapter); 1296f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1297f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1298f9e1d947SOsama Abboud "Failed to revert to previous setup." 1299f9e1d947SOsama Abboud "Triggering device reset.\n"); 1300f9e1d947SOsama Abboud ENA_FLAG_SET_ATOMIC( 1301f9e1d947SOsama Abboud ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 1302f9e1d947SOsama Abboud ena_trigger_reset(adapter, 1303f9e1d947SOsama Abboud ENA_REGS_RESET_OS_TRIGGER); 1304f9e1d947SOsama Abboud } 1305f9e1d947SOsama Abboud } 1306f9e1d947SOsama Abboud } 1307f9e1d947SOsama Abboud return (rc); 1308f9e1d947SOsama Abboud } 1309f9e1d947SOsama Abboud 1310f9e1d947SOsama Abboud int 1311f9e1d947SOsama Abboud ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num) 1312f9e1d947SOsama Abboud { 1313f9e1d947SOsama Abboud uint32_t old_num; 1314f9e1d947SOsama Abboud int rc = 0; 1315f9e1d947SOsama Abboud bool dev_was_up; 1316f9e1d947SOsama Abboud 1317f9e1d947SOsama Abboud dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 1318f9e1d947SOsama Abboud old_num = adapter->irq_cpu_stride; 1319f9e1d947SOsama Abboud 1320f9e1d947SOsama Abboud ena_down(adapter); 1321f9e1d947SOsama Abboud 1322f9e1d947SOsama Abboud adapter->irq_cpu_stride = new_num; 1323f9e1d947SOsama Abboud 1324f9e1d947SOsama Abboud if (dev_was_up) { 1325f9e1d947SOsama Abboud rc = ena_up(adapter); 1326f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1327f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1328f9e1d947SOsama Abboud "Failed to configure device %d IRQ CPU stride. " 1329f9e1d947SOsama Abboud "Reverting to previous value: %d\n", 1330f9e1d947SOsama Abboud new_num, old_num); 1331f9e1d947SOsama Abboud 1332f9e1d947SOsama Abboud adapter->irq_cpu_stride = old_num; 1333f9e1d947SOsama Abboud 1334f9e1d947SOsama Abboud rc = ena_up(adapter); 1335f9e1d947SOsama Abboud if (unlikely(rc != 0)) { 1336f9e1d947SOsama Abboud ena_log(adapter->pdev, ERR, 1337f9e1d947SOsama Abboud "Failed to revert to previous setup." 1338f9e1d947SOsama Abboud "Triggering device reset.\n"); 1339f9e1d947SOsama Abboud ENA_FLAG_SET_ATOMIC( 1340f9e1d947SOsama Abboud ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 1341f9e1d947SOsama Abboud ena_trigger_reset(adapter, 1342f9e1d947SOsama Abboud ENA_REGS_RESET_OS_TRIGGER); 1343f9e1d947SOsama Abboud } 1344f9e1d947SOsama Abboud } 1345f9e1d947SOsama Abboud } 1346f9e1d947SOsama Abboud return (rc); 1347f9e1d947SOsama Abboud } 1348f9e1d947SOsama Abboud 134956d41ad5SMarcin Wojtas /* Caller should sanitize new_num */ 135056d41ad5SMarcin Wojtas int 135156d41ad5SMarcin Wojtas ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num) 135256d41ad5SMarcin Wojtas { 135356d41ad5SMarcin Wojtas uint32_t old_num; 135456d41ad5SMarcin Wojtas int rc = 0; 135556d41ad5SMarcin Wojtas bool dev_was_up; 135656d41ad5SMarcin Wojtas 135756d41ad5SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 135856d41ad5SMarcin Wojtas old_num = adapter->num_io_queues; 135956d41ad5SMarcin Wojtas ena_down(adapter); 136056d41ad5SMarcin Wojtas 136156d41ad5SMarcin Wojtas ena_update_io_rings(adapter, new_num); 136256d41ad5SMarcin Wojtas 136356d41ad5SMarcin Wojtas if (dev_was_up) { 136456d41ad5SMarcin Wojtas rc = ena_up(adapter); 136556d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 13663fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 136756d41ad5SMarcin Wojtas "Failed to configure device with %u IO queues. " 136856d41ad5SMarcin Wojtas "Reverting to previous value: %u\n", 136956d41ad5SMarcin Wojtas new_num, old_num); 137056d41ad5SMarcin Wojtas 137156d41ad5SMarcin Wojtas ena_update_io_rings(adapter, old_num); 137256d41ad5SMarcin Wojtas 137356d41ad5SMarcin Wojtas rc = ena_up(adapter); 137456d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 13753fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 137656d41ad5SMarcin Wojtas "Failed to revert to previous setup IO " 137756d41ad5SMarcin Wojtas "queues. Triggering device reset.\n"); 137856d41ad5SMarcin Wojtas ENA_FLAG_SET_ATOMIC( 137956d41ad5SMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 138056d41ad5SMarcin Wojtas ena_trigger_reset(adapter, 138156d41ad5SMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 138256d41ad5SMarcin Wojtas } 138356d41ad5SMarcin Wojtas } 138456d41ad5SMarcin Wojtas } 138556d41ad5SMarcin Wojtas 138656d41ad5SMarcin Wojtas return (rc); 138756d41ad5SMarcin Wojtas } 138856d41ad5SMarcin Wojtas 138956d41ad5SMarcin Wojtas static void 13909b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 13919b8d05b8SZbigniew Bodek { 13929b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 13939b8d05b8SZbigniew Bodek unsigned int i; 13949b8d05b8SZbigniew Bodek 13959b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 13969b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 13979b8d05b8SZbigniew Bodek 13980bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 13999b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 14009a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 14019a0f2079SMarcin Wojtas if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && 14027583c633SJustin Hibbits (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) { 14039a0f2079SMarcin Wojtas if (rx_info->netmap_buf_idx != 0) 14049a0f2079SMarcin Wojtas ena_netmap_free_rx_slot(adapter, rx_ring, 14059a0f2079SMarcin Wojtas rx_info); 14069a0f2079SMarcin Wojtas } 14079a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 14089b8d05b8SZbigniew Bodek } 14099b8d05b8SZbigniew Bodek } 14109b8d05b8SZbigniew Bodek 14119b8d05b8SZbigniew Bodek /** 14129b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 14139b8d05b8SZbigniew Bodek * @adapter: network interface device structure 14149b8d05b8SZbigniew Bodek * 14159b8d05b8SZbigniew Bodek */ 14169b8d05b8SZbigniew Bodek static void 14179b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 14189b8d05b8SZbigniew Bodek { 14199b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 14209b8d05b8SZbigniew Bodek int i, rc, bufs_num; 14219b8d05b8SZbigniew Bodek 14227d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14239b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 14249b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 14259b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 14269b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 14273fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, WARN, 14283fc5d816SMarcin Wojtas "refilling Queue %d failed. " 142982e558eaSDawid Gorecki "Allocated %d buffers from: %d\n", 143082e558eaSDawid Gorecki i, rc, bufs_num); 14319a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 14329a0f2079SMarcin Wojtas rx_ring->initialized = true; 14339a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 14349b8d05b8SZbigniew Bodek } 14359b8d05b8SZbigniew Bodek } 14369b8d05b8SZbigniew Bodek 14379b8d05b8SZbigniew Bodek static void 14389b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 14399b8d05b8SZbigniew Bodek { 14409b8d05b8SZbigniew Bodek int i; 14419b8d05b8SZbigniew Bodek 14427d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 14439b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 14449b8d05b8SZbigniew Bodek } 14459b8d05b8SZbigniew Bodek 14469b8d05b8SZbigniew Bodek /** 14479b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 14489b8d05b8SZbigniew Bodek * @adapter: network interface device structure 14499b8d05b8SZbigniew Bodek * @qid: queue index 14509b8d05b8SZbigniew Bodek **/ 14519b8d05b8SZbigniew Bodek static void 14529b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 14539b8d05b8SZbigniew Bodek { 14544e8acd84SMarcin Wojtas bool print_once = true; 14559b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 14569b8d05b8SZbigniew Bodek 1457416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 14589b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 14599b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 14609b8d05b8SZbigniew Bodek 14619b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 14629b8d05b8SZbigniew Bodek continue; 14639b8d05b8SZbigniew Bodek 14644e8acd84SMarcin Wojtas if (print_once) { 14653fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 146682e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 146782e558eaSDawid Gorecki i); 14684e8acd84SMarcin Wojtas print_once = false; 14694e8acd84SMarcin Wojtas } else { 14703fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 147182e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 147282e558eaSDawid Gorecki i); 14734e8acd84SMarcin Wojtas } 14749b8d05b8SZbigniew Bodek 1475888810f0SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1476e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 1477888810f0SMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 14784fa9e02dSMarcin Wojtas 14799b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 14809b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 14819b8d05b8SZbigniew Bodek } 1482416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 14839b8d05b8SZbigniew Bodek } 14849b8d05b8SZbigniew Bodek 14859b8d05b8SZbigniew Bodek static void 14869b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 14879b8d05b8SZbigniew Bodek { 14887d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) 14899b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 14909b8d05b8SZbigniew Bodek } 14919b8d05b8SZbigniew Bodek 14929b8d05b8SZbigniew Bodek static void 14939b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 14949b8d05b8SZbigniew Bodek { 14959b8d05b8SZbigniew Bodek uint16_t ena_qid; 14969b8d05b8SZbigniew Bodek int i; 14979b8d05b8SZbigniew Bodek 14987d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14999b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 15009b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 15019b8d05b8SZbigniew Bodek } 15029b8d05b8SZbigniew Bodek } 15039b8d05b8SZbigniew Bodek 15049b8d05b8SZbigniew Bodek static void 15059b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 15069b8d05b8SZbigniew Bodek { 15079b8d05b8SZbigniew Bodek uint16_t ena_qid; 15089b8d05b8SZbigniew Bodek int i; 15099b8d05b8SZbigniew Bodek 15107d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15119b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 15129b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 15139b8d05b8SZbigniew Bodek } 15149b8d05b8SZbigniew Bodek } 15159b8d05b8SZbigniew Bodek 15169b8d05b8SZbigniew Bodek static void 15179b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 15189b8d05b8SZbigniew Bodek { 15195cb9db07SMarcin Wojtas struct ena_que *queue; 15205cb9db07SMarcin Wojtas int i; 15215cb9db07SMarcin Wojtas 15227d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15235cb9db07SMarcin Wojtas queue = &adapter->que[i]; 152482e558eaSDawid Gorecki while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) 152582e558eaSDawid Gorecki taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); 15265cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 15275cb9db07SMarcin Wojtas } 15285cb9db07SMarcin Wojtas 15299b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 15309b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 15319b8d05b8SZbigniew Bodek } 15329b8d05b8SZbigniew Bodek 15339b8d05b8SZbigniew Bodek static int 15349b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 15359b8d05b8SZbigniew Bodek { 15369b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 15379b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 15389b8d05b8SZbigniew Bodek struct ena_ring *ring; 15395cb9db07SMarcin Wojtas struct ena_que *queue; 15409b8d05b8SZbigniew Bodek uint16_t ena_qid; 15419b8d05b8SZbigniew Bodek uint32_t msix_vector; 15426d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 15439b8d05b8SZbigniew Bodek int rc, i; 15449b8d05b8SZbigniew Bodek 15459b8d05b8SZbigniew Bodek /* Create TX queues */ 15467d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15479b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 15489b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 15499b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 15509b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 15519762a033SMarcin Wojtas ctx.queue_size = adapter->requested_tx_ring_size; 15529b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 15539b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1554eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1555eb4c4f4aSMarcin Wojtas 15569b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 15570bdffe59SMarcin Wojtas if (rc != 0) { 15583fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 15599b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 15609b8d05b8SZbigniew Bodek goto err_tx; 15619b8d05b8SZbigniew Bodek } 15629b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 15639b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 156482e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 15650bdffe59SMarcin Wojtas if (rc != 0) { 15663fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 15679b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 156882e558eaSDawid Gorecki " %d rc: %d\n", 156982e558eaSDawid Gorecki i, rc); 15709b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 15719b8d05b8SZbigniew Bodek goto err_tx; 15729b8d05b8SZbigniew Bodek } 1573eb4c4f4aSMarcin Wojtas 1574eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1575eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1576eb4c4f4aSMarcin Wojtas ctx.numa_node); 1577eb4c4f4aSMarcin Wojtas } 15789b8d05b8SZbigniew Bodek } 15799b8d05b8SZbigniew Bodek 15809b8d05b8SZbigniew Bodek /* Create RX queues */ 15817d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15829b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 15839b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 15849b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 15859b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 15869762a033SMarcin Wojtas ctx.queue_size = adapter->requested_rx_ring_size; 15879b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 15889b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1589eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1590eb4c4f4aSMarcin Wojtas 15919b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 15923f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 15933fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 15949b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 15959b8d05b8SZbigniew Bodek goto err_rx; 15969b8d05b8SZbigniew Bodek } 15979b8d05b8SZbigniew Bodek 15989b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 15999b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 160082e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 16013f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 16023fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 16039b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 160482e558eaSDawid Gorecki " %d rc: %d\n", 160582e558eaSDawid Gorecki i, rc); 16069b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 16079b8d05b8SZbigniew Bodek goto err_rx; 16089b8d05b8SZbigniew Bodek } 1609eb4c4f4aSMarcin Wojtas 1610eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1611eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1612eb4c4f4aSMarcin Wojtas ctx.numa_node); 1613eb4c4f4aSMarcin Wojtas } 16149b8d05b8SZbigniew Bodek } 16159b8d05b8SZbigniew Bodek 16167d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 16175cb9db07SMarcin Wojtas queue = &adapter->que[i]; 16185cb9db07SMarcin Wojtas 16196c3e93cbSGleb Smirnoff NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 16205cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 16215cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 16225cb9db07SMarcin Wojtas 16236d1ef2abSArtur Rojek #ifdef RSS 16246d1ef2abSArtur Rojek cpu_mask = &queue->cpu_mask; 16256d1ef2abSArtur Rojek #endif 16266d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, 162782e558eaSDawid Gorecki cpu_mask, "%s queue %d cleanup", 16285cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 16295cb9db07SMarcin Wojtas } 16305cb9db07SMarcin Wojtas 16319b8d05b8SZbigniew Bodek return (0); 16329b8d05b8SZbigniew Bodek 16339b8d05b8SZbigniew Bodek err_rx: 16349b8d05b8SZbigniew Bodek while (i--) 16359b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 16367d8c4feeSMarcin Wojtas i = adapter->num_io_queues; 16379b8d05b8SZbigniew Bodek err_tx: 16389b8d05b8SZbigniew Bodek while (i--) 16399b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 16409b8d05b8SZbigniew Bodek 16419b8d05b8SZbigniew Bodek return (ENXIO); 16429b8d05b8SZbigniew Bodek } 16439b8d05b8SZbigniew Bodek 16449b8d05b8SZbigniew Bodek /********************************************************************* 16459b8d05b8SZbigniew Bodek * 16469b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 16479b8d05b8SZbigniew Bodek * 16489b8d05b8SZbigniew Bodek **********************************************************************/ 16499b8d05b8SZbigniew Bodek 16509b8d05b8SZbigniew Bodek /** 16519b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 16529b8d05b8SZbigniew Bodek * @arg: interrupt number 16539b8d05b8SZbigniew Bodek **/ 16549b8d05b8SZbigniew Bodek static void 16559b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 16569b8d05b8SZbigniew Bodek { 16579b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 16589b8d05b8SZbigniew Bodek 16599b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1660fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 16619b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 16629b8d05b8SZbigniew Bodek } 16639b8d05b8SZbigniew Bodek 16645cb9db07SMarcin Wojtas /** 16655cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 16665cb9db07SMarcin Wojtas * @arg: queue 16675cb9db07SMarcin Wojtas **/ 16685cb9db07SMarcin Wojtas static int 16695cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 16705cb9db07SMarcin Wojtas { 16715cb9db07SMarcin Wojtas struct ena_que *queue = arg; 16725cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 16735cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 16745cb9db07SMarcin Wojtas 16755cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 16765cb9db07SMarcin Wojtas return (FILTER_STRAY); 16775cb9db07SMarcin Wojtas 16785cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 16795cb9db07SMarcin Wojtas 16805cb9db07SMarcin Wojtas return (FILTER_HANDLED); 16815cb9db07SMarcin Wojtas } 16825cb9db07SMarcin Wojtas 16839b8d05b8SZbigniew Bodek static int 16849b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 16859b8d05b8SZbigniew Bodek { 16869b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 16878805021aSMarcin Wojtas int msix_vecs, msix_req; 16888805021aSMarcin Wojtas int i, rc = 0; 16899b8d05b8SZbigniew Bodek 1690fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 16913fc5d816SMarcin Wojtas ena_log(dev, ERR, "Error, MSI-X is already enabled\n"); 1692fd43fd2aSMarcin Wojtas return (EINVAL); 1693fd43fd2aSMarcin Wojtas } 1694fd43fd2aSMarcin Wojtas 16959b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 16967d8c4feeSMarcin Wojtas msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 16979b8d05b8SZbigniew Bodek 1698cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1699cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1700cd5d5804SMarcin Wojtas 170182e558eaSDawid Gorecki ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); 17029b8d05b8SZbigniew Bodek 17039b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 17049b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 17059b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 17069b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 17079b8d05b8SZbigniew Bodek } 17089b8d05b8SZbigniew Bodek 17098805021aSMarcin Wojtas msix_req = msix_vecs; 17109b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 17113f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 171282e558eaSDawid Gorecki ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n", 171382e558eaSDawid Gorecki msix_vecs, rc); 17147d2544e6SMarcin Wojtas 17159b8d05b8SZbigniew Bodek rc = ENOSPC; 17167d2544e6SMarcin Wojtas goto err_msix_free; 17179b8d05b8SZbigniew Bodek } 17189b8d05b8SZbigniew Bodek 17198805021aSMarcin Wojtas if (msix_vecs != msix_req) { 17202b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 17213fc5d816SMarcin Wojtas ena_log(dev, ERR, 17222b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 17232b5b60feSMarcin Wojtas msix_vecs); 17242b5b60feSMarcin Wojtas pci_release_msi(dev); 17252b5b60feSMarcin Wojtas rc = ENOSPC; 17262b5b60feSMarcin Wojtas goto err_msix_free; 17272b5b60feSMarcin Wojtas } 172882e558eaSDawid Gorecki ena_log(dev, ERR, 172982e558eaSDawid Gorecki "Enable only %d MSI-x (out of %d), reduce " 173082e558eaSDawid Gorecki "the number of queues\n", 173182e558eaSDawid Gorecki msix_vecs, msix_req); 17328805021aSMarcin Wojtas } 17338805021aSMarcin Wojtas 17349b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 1735fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 17369b8d05b8SZbigniew Bodek 17377d2544e6SMarcin Wojtas return (0); 17387d2544e6SMarcin Wojtas 17397d2544e6SMarcin Wojtas err_msix_free: 17407d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 17417d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 17427d2544e6SMarcin Wojtas 17439b8d05b8SZbigniew Bodek return (rc); 17449b8d05b8SZbigniew Bodek } 17459b8d05b8SZbigniew Bodek 17469b8d05b8SZbigniew Bodek static void 17479b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 17489b8d05b8SZbigniew Bodek { 174982e558eaSDawid Gorecki snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, 175082e558eaSDawid Gorecki "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); 17519b8d05b8SZbigniew Bodek /* 17529b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 17539b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 17549b8d05b8SZbigniew Bodek */ 17559b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 17569b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 17579b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 17589b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 17599b8d05b8SZbigniew Bodek } 17609b8d05b8SZbigniew Bodek 176177958fcdSMarcin Wojtas static int 17629b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 17639b8d05b8SZbigniew Bodek { 17646d1ef2abSArtur Rojek #ifdef RSS 17656d1ef2abSArtur Rojek int num_buckets = rss_getnumbuckets(); 17666d1ef2abSArtur Rojek static int last_bind = 0; 1767eb4c4f4aSMarcin Wojtas int cur_bind; 1768eb4c4f4aSMarcin Wojtas int idx; 17696d1ef2abSArtur Rojek #endif 17709b8d05b8SZbigniew Bodek int irq_idx; 17719b8d05b8SZbigniew Bodek 177277958fcdSMarcin Wojtas if (adapter->msix_entries == NULL) 177377958fcdSMarcin Wojtas return (EINVAL); 177477958fcdSMarcin Wojtas 1775eb4c4f4aSMarcin Wojtas #ifdef RSS 1776eb4c4f4aSMarcin Wojtas if (adapter->first_bind < 0) { 1777eb4c4f4aSMarcin Wojtas adapter->first_bind = last_bind; 1778eb4c4f4aSMarcin Wojtas last_bind = (last_bind + adapter->num_io_queues) % num_buckets; 1779eb4c4f4aSMarcin Wojtas } 1780eb4c4f4aSMarcin Wojtas cur_bind = adapter->first_bind; 1781eb4c4f4aSMarcin Wojtas #endif 1782eb4c4f4aSMarcin Wojtas 17837d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) { 17849b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 17859b8d05b8SZbigniew Bodek 17869b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 17879b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 17889b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 17899b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 17909b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 17919b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 17923fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n", 17939b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1794277f11c4SMarcin Wojtas 1795f9e1d947SOsama Abboud if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { 1796f9e1d947SOsama Abboud adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1797f9e1d947SOsama Abboud (unsigned)(adapter->irq_cpu_base + 1798f9e1d947SOsama Abboud i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus; 1799f9e1d947SOsama Abboud CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1800f9e1d947SOsama Abboud } 1801f9e1d947SOsama Abboud 18026d1ef2abSArtur Rojek #ifdef RSS 18039b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1804eb4c4f4aSMarcin Wojtas rss_getcpu(cur_bind); 1805eb4c4f4aSMarcin Wojtas cur_bind = (cur_bind + 1) % num_buckets; 18066d1ef2abSArtur Rojek CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1807eb4c4f4aSMarcin Wojtas 1808eb4c4f4aSMarcin Wojtas for (idx = 0; idx < MAXMEMDOM; ++idx) { 1809eb4c4f4aSMarcin Wojtas if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx])) 1810eb4c4f4aSMarcin Wojtas break; 1811eb4c4f4aSMarcin Wojtas } 1812eb4c4f4aSMarcin Wojtas adapter->que[i].domain = idx; 1813eb4c4f4aSMarcin Wojtas #else 1814eb4c4f4aSMarcin Wojtas adapter->que[i].domain = -1; 18156d1ef2abSArtur Rojek #endif 18169b8d05b8SZbigniew Bodek } 181777958fcdSMarcin Wojtas 181877958fcdSMarcin Wojtas return (0); 18199b8d05b8SZbigniew Bodek } 18209b8d05b8SZbigniew Bodek 18219b8d05b8SZbigniew Bodek static int 18229b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 18239b8d05b8SZbigniew Bodek { 18243fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18259b8d05b8SZbigniew Bodek struct ena_irq *irq; 18269b8d05b8SZbigniew Bodek unsigned long flags; 18279b8d05b8SZbigniew Bodek int rc, rcc; 18289b8d05b8SZbigniew Bodek 18299b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 18309b8d05b8SZbigniew Bodek 18319b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 18329b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 18339b8d05b8SZbigniew Bodek &irq->vector, flags); 18349b8d05b8SZbigniew Bodek 18353f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 18363fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 18373fc5d816SMarcin Wojtas irq->vector); 18387d2544e6SMarcin Wojtas return (ENXIO); 18399b8d05b8SZbigniew Bodek } 18409b8d05b8SZbigniew Bodek 18410bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 184282e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, 184382e558eaSDawid Gorecki &irq->cookie); 18443f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 184582e558eaSDawid Gorecki ena_log(pdev, ERR, 184682e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 18479b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 18487d2544e6SMarcin Wojtas goto err_res_free; 18499b8d05b8SZbigniew Bodek } 18509b8d05b8SZbigniew Bodek irq->requested = true; 18519b8d05b8SZbigniew Bodek 18529b8d05b8SZbigniew Bodek return (rc); 18539b8d05b8SZbigniew Bodek 18547d2544e6SMarcin Wojtas err_res_free: 18553fc5d816SMarcin Wojtas ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); 185682e558eaSDawid Gorecki rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, 185782e558eaSDawid Gorecki irq->res); 18583f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 185982e558eaSDawid Gorecki ena_log(pdev, ERR, 186082e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 186182e558eaSDawid Gorecki irq->vector); 18629b8d05b8SZbigniew Bodek irq->res = NULL; 18639b8d05b8SZbigniew Bodek 18649b8d05b8SZbigniew Bodek return (rc); 18659b8d05b8SZbigniew Bodek } 18669b8d05b8SZbigniew Bodek 18679b8d05b8SZbigniew Bodek static int 18689b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 18699b8d05b8SZbigniew Bodek { 18703fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18719b8d05b8SZbigniew Bodek struct ena_irq *irq; 18729b8d05b8SZbigniew Bodek unsigned long flags = 0; 18739b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 18749b8d05b8SZbigniew Bodek 1875fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 18763fc5d816SMarcin Wojtas ena_log(pdev, ERR, 18774e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 18789b8d05b8SZbigniew Bodek return (EINVAL); 18799b8d05b8SZbigniew Bodek } else { 18809b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 18819b8d05b8SZbigniew Bodek } 18829b8d05b8SZbigniew Bodek 18839b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 18849b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 18859b8d05b8SZbigniew Bodek 18863f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 18879b8d05b8SZbigniew Bodek continue; 18889b8d05b8SZbigniew Bodek 18899b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 18909b8d05b8SZbigniew Bodek &irq->vector, flags); 18913f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 1892469a8407SMarcin Wojtas rc = ENOMEM; 189382e558eaSDawid Gorecki ena_log(pdev, ERR, 189482e558eaSDawid Gorecki "could not allocate irq vector: %d\n", irq->vector); 18959b8d05b8SZbigniew Bodek goto err; 18969b8d05b8SZbigniew Bodek } 18979b8d05b8SZbigniew Bodek 18980bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 189982e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, 190082e558eaSDawid Gorecki &irq->cookie); 19013f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 190282e558eaSDawid Gorecki ena_log(pdev, ERR, 190382e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 19049b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 19059b8d05b8SZbigniew Bodek goto err; 19069b8d05b8SZbigniew Bodek } 19079b8d05b8SZbigniew Bodek irq->requested = true; 19086d1ef2abSArtur Rojek 1909f9e1d947SOsama Abboud if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) { 19106d1ef2abSArtur Rojek rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 19116d1ef2abSArtur Rojek if (unlikely(rc != 0)) { 191282e558eaSDawid Gorecki ena_log(pdev, ERR, 191382e558eaSDawid Gorecki "failed to bind interrupt handler for irq %ju to cpu %d: %d\n", 19146d1ef2abSArtur Rojek rman_get_start(irq->res), irq->cpu, rc); 19156d1ef2abSArtur Rojek goto err; 19166d1ef2abSArtur Rojek } 19176d1ef2abSArtur Rojek 19186d1ef2abSArtur Rojek ena_log(pdev, INFO, "queue %d - cpu %d\n", 19196d1ef2abSArtur Rojek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 19209b8d05b8SZbigniew Bodek } 1921f9e1d947SOsama Abboud } 19229b8d05b8SZbigniew Bodek return (rc); 19239b8d05b8SZbigniew Bodek 19249b8d05b8SZbigniew Bodek err: 19259b8d05b8SZbigniew Bodek 19269b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 19279b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 19289b8d05b8SZbigniew Bodek rcc = 0; 19299b8d05b8SZbigniew Bodek 19309b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 19319b8d05b8SZbigniew Bodek free both intr and resources */ 1932f9e1d947SOsama Abboud if (irq->requested) { 193382e558eaSDawid Gorecki rcc = bus_teardown_intr(adapter->pdev, irq->res, 193482e558eaSDawid Gorecki irq->cookie); 19353f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 193682e558eaSDawid Gorecki ena_log(pdev, ERR, 193782e558eaSDawid Gorecki "could not release irq: %d, error: %d\n", 19383fc5d816SMarcin Wojtas irq->vector, rcc); 1939f9e1d947SOsama Abboud } 19409b8d05b8SZbigniew Bodek 1941eb3f25b4SGordon Bergling /* If we entered err: section without irq->requested set we know 19429b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 19439b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 19449b8d05b8SZbigniew Bodek this iteration */ 19459b8d05b8SZbigniew Bodek rcc = 0; 19469b8d05b8SZbigniew Bodek if (irq->res != NULL) { 19479b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 19489b8d05b8SZbigniew Bodek irq->vector, irq->res); 19499b8d05b8SZbigniew Bodek } 19503f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 195182e558eaSDawid Gorecki ena_log(pdev, ERR, 195282e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 195382e558eaSDawid Gorecki irq->vector); 19549b8d05b8SZbigniew Bodek irq->requested = false; 19559b8d05b8SZbigniew Bodek irq->res = NULL; 19569b8d05b8SZbigniew Bodek } 19579b8d05b8SZbigniew Bodek 19589b8d05b8SZbigniew Bodek return (rc); 19599b8d05b8SZbigniew Bodek } 19609b8d05b8SZbigniew Bodek 19619b8d05b8SZbigniew Bodek static void 19629b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 19639b8d05b8SZbigniew Bodek { 19643fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 19659b8d05b8SZbigniew Bodek struct ena_irq *irq; 19669b8d05b8SZbigniew Bodek int rc; 19679b8d05b8SZbigniew Bodek 19689b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 19699b8d05b8SZbigniew Bodek if (irq->requested) { 19703fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 19719b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 19723f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 19733fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to tear down irq: %d\n", 19743fc5d816SMarcin Wojtas irq->vector); 19759b8d05b8SZbigniew Bodek irq->requested = 0; 19769b8d05b8SZbigniew Bodek } 19779b8d05b8SZbigniew Bodek 19789b8d05b8SZbigniew Bodek if (irq->res != NULL) { 19793fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 19809b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 19819b8d05b8SZbigniew Bodek irq->vector, irq->res); 19829b8d05b8SZbigniew Bodek irq->res = NULL; 19833f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 198482e558eaSDawid Gorecki ena_log(pdev, ERR, 198582e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 198682e558eaSDawid Gorecki irq->vector); 19879b8d05b8SZbigniew Bodek } 19889b8d05b8SZbigniew Bodek } 19899b8d05b8SZbigniew Bodek 19909b8d05b8SZbigniew Bodek static void 19919b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 19929b8d05b8SZbigniew Bodek { 19933fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 19949b8d05b8SZbigniew Bodek struct ena_irq *irq; 19959b8d05b8SZbigniew Bodek int rc; 19969b8d05b8SZbigniew Bodek 19979b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 19989b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 19999b8d05b8SZbigniew Bodek if (irq->requested) { 20003fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 20019b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 20029b8d05b8SZbigniew Bodek irq->cookie); 20033f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 200482e558eaSDawid Gorecki ena_log(pdev, ERR, 200582e558eaSDawid Gorecki "failed to tear down irq: %d\n", 20063fc5d816SMarcin Wojtas irq->vector); 20079b8d05b8SZbigniew Bodek } 20089b8d05b8SZbigniew Bodek irq->requested = 0; 20099b8d05b8SZbigniew Bodek } 20109b8d05b8SZbigniew Bodek 20119b8d05b8SZbigniew Bodek if (irq->res != NULL) { 20123fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", 20139b8d05b8SZbigniew Bodek irq->vector); 20149b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 20159b8d05b8SZbigniew Bodek irq->vector, irq->res); 20169b8d05b8SZbigniew Bodek irq->res = NULL; 20173f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 201882e558eaSDawid Gorecki ena_log(pdev, ERR, 201982e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 20209b8d05b8SZbigniew Bodek irq->vector); 20219b8d05b8SZbigniew Bodek } 20229b8d05b8SZbigniew Bodek } 20239b8d05b8SZbigniew Bodek } 20249b8d05b8SZbigniew Bodek } 20259b8d05b8SZbigniew Bodek 20269b8d05b8SZbigniew Bodek static void 20279b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter *adapter) 20289b8d05b8SZbigniew Bodek { 20299b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 20309b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 20319b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 20329b8d05b8SZbigniew Bodek } 20339b8d05b8SZbigniew Bodek 20349b8d05b8SZbigniew Bodek static void 20359b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 20369b8d05b8SZbigniew Bodek { 2037fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 2038fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 20399b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 2040fd43fd2aSMarcin Wojtas } 20419b8d05b8SZbigniew Bodek 20429b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 2043cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 20449b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 20459b8d05b8SZbigniew Bodek } 20469b8d05b8SZbigniew Bodek 20479b8d05b8SZbigniew Bodek static void 20489b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 20499b8d05b8SZbigniew Bodek { 20509b8d05b8SZbigniew Bodek struct ena_com_io_cq *io_cq; 20519b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 2052223c8cb1SArtur Rojek struct ena_ring *tx_ring; 20539b8d05b8SZbigniew Bodek uint16_t ena_qid; 20549b8d05b8SZbigniew Bodek int i; 20559b8d05b8SZbigniew Bodek 20569b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 20577d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 20589b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 20599b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 206072e34ebdSOsama Abboud ena_com_update_intr_reg(&intr_reg, 0, 0, true, false); 2061223c8cb1SArtur Rojek tx_ring = &adapter->tx_ring[i]; 2062223c8cb1SArtur Rojek counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); 20639b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 20649b8d05b8SZbigniew Bodek } 20659b8d05b8SZbigniew Bodek } 20669b8d05b8SZbigniew Bodek 20679b8d05b8SZbigniew Bodek static int 20689b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 20699b8d05b8SZbigniew Bodek { 20709b8d05b8SZbigniew Bodek int rc; 20719b8d05b8SZbigniew Bodek 2072fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 20739b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 207456d41ad5SMarcin Wojtas if (rc != 0) { 20753fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 207656d41ad5SMarcin Wojtas "Failed to configure RSS\n"); 20779b8d05b8SZbigniew Bodek return (rc); 20789b8d05b8SZbigniew Bodek } 207956d41ad5SMarcin Wojtas } 20809b8d05b8SZbigniew Bodek 20817583c633SJustin Hibbits rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp)); 20823f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 20837d2544e6SMarcin Wojtas return (rc); 20847d2544e6SMarcin Wojtas 20859b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 208630217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 208730217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 20889b8d05b8SZbigniew Bodek 20899b8d05b8SZbigniew Bodek return (0); 20909b8d05b8SZbigniew Bodek } 20919b8d05b8SZbigniew Bodek 20929762a033SMarcin Wojtas static void 209382e558eaSDawid Gorecki set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) 20949762a033SMarcin Wojtas { 20959762a033SMarcin Wojtas int i; 20969762a033SMarcin Wojtas 20979762a033SMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 20989762a033SMarcin Wojtas adapter->tx_ring[i].ring_size = new_tx_size; 20999762a033SMarcin Wojtas adapter->rx_ring[i].ring_size = new_rx_size; 21009762a033SMarcin Wojtas } 21019762a033SMarcin Wojtas } 21029762a033SMarcin Wojtas 21039762a033SMarcin Wojtas static int 21049762a033SMarcin Wojtas create_queues_with_size_backoff(struct ena_adapter *adapter) 21059762a033SMarcin Wojtas { 21063fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 21079762a033SMarcin Wojtas int rc; 21089762a033SMarcin Wojtas uint32_t cur_rx_ring_size, cur_tx_ring_size; 21099762a033SMarcin Wojtas uint32_t new_rx_ring_size, new_tx_ring_size; 21109762a033SMarcin Wojtas 21119762a033SMarcin Wojtas /* 21129762a033SMarcin Wojtas * Current queue sizes might be set to smaller than the requested 21139762a033SMarcin Wojtas * ones due to past queue allocation failures. 21149762a033SMarcin Wojtas */ 21159762a033SMarcin Wojtas set_io_rings_size(adapter, adapter->requested_tx_ring_size, 21169762a033SMarcin Wojtas adapter->requested_rx_ring_size); 21179762a033SMarcin Wojtas 21189762a033SMarcin Wojtas while (1) { 21199762a033SMarcin Wojtas /* Allocate transmit descriptors */ 21209762a033SMarcin Wojtas rc = ena_setup_all_tx_resources(adapter); 21219762a033SMarcin Wojtas if (unlikely(rc != 0)) { 21223fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_tx\n"); 21239762a033SMarcin Wojtas goto err_setup_tx; 21249762a033SMarcin Wojtas } 21259762a033SMarcin Wojtas 21269762a033SMarcin Wojtas /* Allocate receive descriptors */ 21279762a033SMarcin Wojtas rc = ena_setup_all_rx_resources(adapter); 21289762a033SMarcin Wojtas if (unlikely(rc != 0)) { 21293fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_rx\n"); 21309762a033SMarcin Wojtas goto err_setup_rx; 21319762a033SMarcin Wojtas } 21329762a033SMarcin Wojtas 21339762a033SMarcin Wojtas /* Create IO queues for Rx & Tx */ 21349762a033SMarcin Wojtas rc = ena_create_io_queues(adapter); 21359762a033SMarcin Wojtas if (unlikely(rc != 0)) { 213682e558eaSDawid Gorecki ena_log(pdev, ERR, "create IO queues failed\n"); 21379762a033SMarcin Wojtas goto err_io_que; 21389762a033SMarcin Wojtas } 21399762a033SMarcin Wojtas 21409762a033SMarcin Wojtas return (0); 21419762a033SMarcin Wojtas 21429762a033SMarcin Wojtas err_io_que: 21439762a033SMarcin Wojtas ena_free_all_rx_resources(adapter); 21449762a033SMarcin Wojtas err_setup_rx: 21459762a033SMarcin Wojtas ena_free_all_tx_resources(adapter); 21469762a033SMarcin Wojtas err_setup_tx: 21479762a033SMarcin Wojtas /* 21489762a033SMarcin Wojtas * Lower the ring size if ENOMEM. Otherwise, return the 21499762a033SMarcin Wojtas * error straightaway. 21509762a033SMarcin Wojtas */ 21519762a033SMarcin Wojtas if (unlikely(rc != ENOMEM)) { 21523fc5d816SMarcin Wojtas ena_log(pdev, ERR, 21539762a033SMarcin Wojtas "Queue creation failed with error code: %d\n", rc); 21549762a033SMarcin Wojtas return (rc); 21559762a033SMarcin Wojtas } 21569762a033SMarcin Wojtas 21579762a033SMarcin Wojtas cur_tx_ring_size = adapter->tx_ring[0].ring_size; 21589762a033SMarcin Wojtas cur_rx_ring_size = adapter->rx_ring[0].ring_size; 21599762a033SMarcin Wojtas 21603fc5d816SMarcin Wojtas ena_log(pdev, ERR, 21619762a033SMarcin Wojtas "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 21629762a033SMarcin Wojtas cur_tx_ring_size, cur_rx_ring_size); 21639762a033SMarcin Wojtas 21649762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size; 21659762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size; 21669762a033SMarcin Wojtas 21679762a033SMarcin Wojtas /* 216882e558eaSDawid Gorecki * Decrease the size of a larger queue, or decrease both if they 216982e558eaSDawid Gorecki * are the same size. 21709762a033SMarcin Wojtas */ 21719762a033SMarcin Wojtas if (cur_rx_ring_size <= cur_tx_ring_size) 21729762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size / 2; 21739762a033SMarcin Wojtas if (cur_rx_ring_size >= cur_tx_ring_size) 21749762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size / 2; 21759762a033SMarcin Wojtas 21769762a033SMarcin Wojtas if (new_tx_ring_size < ENA_MIN_RING_SIZE || 21779762a033SMarcin Wojtas new_rx_ring_size < ENA_MIN_RING_SIZE) { 21783fc5d816SMarcin Wojtas ena_log(pdev, ERR, 21799762a033SMarcin Wojtas "Queue creation failed with the smallest possible queue size" 21809762a033SMarcin Wojtas "of %d for both queues. Not retrying with smaller queues\n", 21819762a033SMarcin Wojtas ENA_MIN_RING_SIZE); 21829762a033SMarcin Wojtas return (rc); 21839762a033SMarcin Wojtas } 21849762a033SMarcin Wojtas 218577160654SArtur Rojek ena_log(pdev, INFO, 218677160654SArtur Rojek "Retrying queue creation with sizes TX=%d, RX=%d\n", 218777160654SArtur Rojek new_tx_ring_size, new_rx_ring_size); 218877160654SArtur Rojek 21899762a033SMarcin Wojtas set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); 21909762a033SMarcin Wojtas } 21919762a033SMarcin Wojtas } 21929762a033SMarcin Wojtas 219338c7b965SMarcin Wojtas int 21949b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 21959b8d05b8SZbigniew Bodek { 21969b8d05b8SZbigniew Bodek int rc = 0; 21979b8d05b8SZbigniew Bodek 219807aff471SArtur Rojek ENA_LOCK_ASSERT(); 2199cb98c439SArtur Rojek 22003f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 22013fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "device is not attached!\n"); 22029b8d05b8SZbigniew Bodek return (ENXIO); 22039b8d05b8SZbigniew Bodek } 22049b8d05b8SZbigniew Bodek 2205579d23aaSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2206579d23aaSMarcin Wojtas return (0); 2207579d23aaSMarcin Wojtas 22083fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "device is going UP\n"); 22099b8d05b8SZbigniew Bodek 22109b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 221177958fcdSMarcin Wojtas rc = ena_setup_io_intr(adapter); 221277958fcdSMarcin Wojtas if (unlikely(rc != 0)) { 22133fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n"); 221477958fcdSMarcin Wojtas goto error; 221577958fcdSMarcin Wojtas } 22169b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 22173f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22183fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "err_req_irq\n"); 221977958fcdSMarcin Wojtas goto error; 22209b8d05b8SZbigniew Bodek } 22219b8d05b8SZbigniew Bodek 22223fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 222382e558eaSDawid Gorecki "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n", 22247d8c4feeSMarcin Wojtas adapter->num_io_queues, 22259762a033SMarcin Wojtas adapter->requested_rx_ring_size, 22269762a033SMarcin Wojtas adapter->requested_tx_ring_size, 22279762a033SMarcin Wojtas (adapter->ena_dev->tx_mem_queue_type == 22289762a033SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); 22297d8c4feeSMarcin Wojtas 22309762a033SMarcin Wojtas rc = create_queues_with_size_backoff(adapter); 22313f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 22323fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 22339762a033SMarcin Wojtas "error creating queues with size backoff\n"); 22349762a033SMarcin Wojtas goto err_create_queues_with_backoff; 22359b8d05b8SZbigniew Bodek } 22369b8d05b8SZbigniew Bodek 2237fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 22389b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 22399b8d05b8SZbigniew Bodek 22409b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 22413f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 22429b8d05b8SZbigniew Bodek goto err_up_complete; 22439b8d05b8SZbigniew Bodek 22449b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 22459b8d05b8SZbigniew Bodek 22469b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 22479b8d05b8SZbigniew Bodek 224882e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 22499b8d05b8SZbigniew Bodek 2250fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 225193471047SZbigniew Bodek 225293471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 22539b8d05b8SZbigniew Bodek 22549b8d05b8SZbigniew Bodek return (0); 22559b8d05b8SZbigniew Bodek 22569b8d05b8SZbigniew Bodek err_up_complete: 22579b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 22589b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 22599b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 22609762a033SMarcin Wojtas err_create_queues_with_backoff: 22619b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 226277958fcdSMarcin Wojtas error: 22639b8d05b8SZbigniew Bodek return (rc); 22649b8d05b8SZbigniew Bodek } 22659b8d05b8SZbigniew Bodek 22669b8d05b8SZbigniew Bodek static uint64_t 22679b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 22689b8d05b8SZbigniew Bodek { 22699b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 22709b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 22719b8d05b8SZbigniew Bodek 22729b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 22739b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 22749b8d05b8SZbigniew Bodek 22759b8d05b8SZbigniew Bodek switch (cnt) { 22769b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 227730217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 22789b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 227930217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 22809b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 228130217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 22829b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 228330217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 22849b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 228530217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 22866c84cec3SMarcin Wojtas case IFCOUNTER_OQDROPS: 22876c84cec3SMarcin Wojtas return (counter_u64_fetch(stats->tx_drops)); 22889b8d05b8SZbigniew Bodek default: 22899b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 22909b8d05b8SZbigniew Bodek } 22919b8d05b8SZbigniew Bodek } 22929b8d05b8SZbigniew Bodek 22939b8d05b8SZbigniew Bodek static int 22949b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 22959b8d05b8SZbigniew Bodek { 22969b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 22979b8d05b8SZbigniew Bodek return (0); 22989b8d05b8SZbigniew Bodek } 22999b8d05b8SZbigniew Bodek 23009b8d05b8SZbigniew Bodek static void 23019b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 23029b8d05b8SZbigniew Bodek { 23039b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 23043fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "Media status update\n"); 23059b8d05b8SZbigniew Bodek 230607aff471SArtur Rojek ENA_LOCK_LOCK(); 23079b8d05b8SZbigniew Bodek 23089b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 23099b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 23109b8d05b8SZbigniew Bodek 2311fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 231207aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23133fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "Link is down\n"); 23149b8d05b8SZbigniew Bodek return; 23159b8d05b8SZbigniew Bodek } 23169b8d05b8SZbigniew Bodek 23179b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2318b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 23199b8d05b8SZbigniew Bodek 232007aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23219b8d05b8SZbigniew Bodek } 23229b8d05b8SZbigniew Bodek 23239b8d05b8SZbigniew Bodek static void 23249b8d05b8SZbigniew Bodek ena_init(void *arg) 23259b8d05b8SZbigniew Bodek { 23269b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 23279b8d05b8SZbigniew Bodek 2328fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 232907aff471SArtur Rojek ENA_LOCK_LOCK(); 23309b8d05b8SZbigniew Bodek ena_up(adapter); 233107aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23323d3a90f9SZbigniew Bodek } 23339b8d05b8SZbigniew Bodek } 23349b8d05b8SZbigniew Bodek 23359b8d05b8SZbigniew Bodek static int 23369b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 23379b8d05b8SZbigniew Bodek { 23389b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 23399b8d05b8SZbigniew Bodek struct ifreq *ifr; 23409b8d05b8SZbigniew Bodek int rc; 23419b8d05b8SZbigniew Bodek 23427583c633SJustin Hibbits adapter = if_getsoftc(ifp); 23439b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 23449b8d05b8SZbigniew Bodek 23459b8d05b8SZbigniew Bodek /* 23469b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 23479b8d05b8SZbigniew Bodek */ 23489b8d05b8SZbigniew Bodek rc = 0; 23499b8d05b8SZbigniew Bodek switch (command) { 23509b8d05b8SZbigniew Bodek case SIOCSIFMTU: 23517583c633SJustin Hibbits if (if_getmtu(ifp) == ifr->ifr_mtu) 2352dbf2eb54SMarcin Wojtas break; 235307aff471SArtur Rojek ENA_LOCK_LOCK(); 23549b8d05b8SZbigniew Bodek ena_down(adapter); 23559b8d05b8SZbigniew Bodek 23569b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 23579b8d05b8SZbigniew Bodek 23589b8d05b8SZbigniew Bodek rc = ena_up(adapter); 235907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23609b8d05b8SZbigniew Bodek break; 23619b8d05b8SZbigniew Bodek 23629b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 23637583c633SJustin Hibbits if ((if_getflags(ifp) & IFF_UP) != 0) { 23640bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 23657583c633SJustin Hibbits if ((if_getflags(ifp) & (IFF_PROMISC | 23667583c633SJustin Hibbits IFF_ALLMULTI)) != 0) { 23673fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 23689b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 23699b8d05b8SZbigniew Bodek } 23709b8d05b8SZbigniew Bodek } else { 237107aff471SArtur Rojek ENA_LOCK_LOCK(); 23729b8d05b8SZbigniew Bodek rc = ena_up(adapter); 237307aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23749b8d05b8SZbigniew Bodek } 23759b8d05b8SZbigniew Bodek } else { 23760bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 237707aff471SArtur Rojek ENA_LOCK_LOCK(); 23789b8d05b8SZbigniew Bodek ena_down(adapter); 237907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 2380e67c6554SZbigniew Bodek } 23819b8d05b8SZbigniew Bodek } 23829b8d05b8SZbigniew Bodek break; 23839b8d05b8SZbigniew Bodek 23849b8d05b8SZbigniew Bodek case SIOCADDMULTI: 23859b8d05b8SZbigniew Bodek case SIOCDELMULTI: 23869b8d05b8SZbigniew Bodek break; 23879b8d05b8SZbigniew Bodek 23889b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 23899b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 23909b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 23919b8d05b8SZbigniew Bodek break; 23929b8d05b8SZbigniew Bodek 23939b8d05b8SZbigniew Bodek case SIOCSIFCAP: 23949b8d05b8SZbigniew Bodek { 23959b8d05b8SZbigniew Bodek int reinit = 0; 23969b8d05b8SZbigniew Bodek 23977583c633SJustin Hibbits if (ifr->ifr_reqcap != if_getcapenable(ifp)) { 23987583c633SJustin Hibbits if_setcapenable(ifp, ifr->ifr_reqcap); 23999b8d05b8SZbigniew Bodek reinit = 1; 24009b8d05b8SZbigniew Bodek } 24019b8d05b8SZbigniew Bodek 24020bdffe59SMarcin Wojtas if ((reinit != 0) && 24030bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 240407aff471SArtur Rojek ENA_LOCK_LOCK(); 24059b8d05b8SZbigniew Bodek ena_down(adapter); 24069b8d05b8SZbigniew Bodek rc = ena_up(adapter); 240707aff471SArtur Rojek ENA_LOCK_UNLOCK(); 24089b8d05b8SZbigniew Bodek } 24099b8d05b8SZbigniew Bodek } 24109b8d05b8SZbigniew Bodek 24119b8d05b8SZbigniew Bodek break; 24129b8d05b8SZbigniew Bodek default: 24139b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 24149b8d05b8SZbigniew Bodek break; 24159b8d05b8SZbigniew Bodek } 24169b8d05b8SZbigniew Bodek 24179b8d05b8SZbigniew Bodek return (rc); 24189b8d05b8SZbigniew Bodek } 24199b8d05b8SZbigniew Bodek 24209b8d05b8SZbigniew Bodek static int 24219b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 24229b8d05b8SZbigniew Bodek { 24239b8d05b8SZbigniew Bodek int caps = 0; 24249b8d05b8SZbigniew Bodek 24250bdffe59SMarcin Wojtas if ((feat->offload.tx & 24269b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 24279b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 24280bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 24299b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 24309b8d05b8SZbigniew Bodek 24310bdffe59SMarcin Wojtas if ((feat->offload.tx & 24329b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 24330bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 24349b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 24359b8d05b8SZbigniew Bodek 243682e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 24379b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 24389b8d05b8SZbigniew Bodek 243982e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 24409b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 24419b8d05b8SZbigniew Bodek 24420bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 24439b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 24440bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 24459b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 24469b8d05b8SZbigniew Bodek 24470bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 24480bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 24499b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 24509b8d05b8SZbigniew Bodek 24519b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 24529b8d05b8SZbigniew Bodek 24539b8d05b8SZbigniew Bodek return (caps); 24549b8d05b8SZbigniew Bodek } 24559b8d05b8SZbigniew Bodek 24569b8d05b8SZbigniew Bodek static void 24579b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 24589b8d05b8SZbigniew Bodek { 245982e558eaSDawid Gorecki host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); 24609b8d05b8SZbigniew Bodek } 24619b8d05b8SZbigniew Bodek 24629b8d05b8SZbigniew Bodek static void 24639b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 24649b8d05b8SZbigniew Bodek { 24659b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 24669b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 24679b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 24689b8d05b8SZbigniew Bodek int flags = 0; 24699b8d05b8SZbigniew Bodek 24709b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 24719b8d05b8SZbigniew Bodek 24720bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 24730bdffe59SMarcin Wojtas if ((feat & 24740bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 24759b8d05b8SZbigniew Bodek flags |= CSUM_IP; 24760bdffe59SMarcin Wojtas if ((feat & 24779b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 24780bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 24799b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 24809b8d05b8SZbigniew Bodek } 24819b8d05b8SZbigniew Bodek 24820bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 24839b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 24849b8d05b8SZbigniew Bodek 24850bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 24869b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 24879b8d05b8SZbigniew Bodek 24880bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 24899b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 24909b8d05b8SZbigniew Bodek 24919b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 24929b8d05b8SZbigniew Bodek } 24939b8d05b8SZbigniew Bodek 2494aa386085SZhenlei Huang static void 24959b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 24969b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 24979b8d05b8SZbigniew Bodek { 24989b8d05b8SZbigniew Bodek if_t ifp; 24999b8d05b8SZbigniew Bodek int caps = 0; 25009b8d05b8SZbigniew Bodek 25019b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 25029b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 25039b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 25049b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 25059b8d05b8SZbigniew Bodek 2506a6b55ee6SGleb Smirnoff if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 25079b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 25089b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 25099b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 25109b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 25119b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 25129b8d05b8SZbigniew Bodek 25139762a033SMarcin Wojtas if_setsendqlen(ifp, adapter->requested_tx_ring_size); 25149b8d05b8SZbigniew Bodek if_setsendqready(ifp); 25159b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 25169b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 25179b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 25189b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 25199b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 25209b8d05b8SZbigniew Bodek /* check hardware support */ 25219b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 25229b8d05b8SZbigniew Bodek /* ... and set them */ 25239b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 25249b8d05b8SZbigniew Bodek 25259b8d05b8SZbigniew Bodek /* TSO parameters */ 25267583c633SJustin Hibbits if_sethwtsomax(ifp, ENA_TSO_MAXSIZE - 25277583c633SJustin Hibbits (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 25287583c633SJustin Hibbits if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1); 25297583c633SJustin Hibbits if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE); 25309b8d05b8SZbigniew Bodek 25319b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 25329b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 25339b8d05b8SZbigniew Bodek 25349b8d05b8SZbigniew Bodek /* 25359b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 25369b8d05b8SZbigniew Bodek * callbacks to update media and link information 25379b8d05b8SZbigniew Bodek */ 253882e558eaSDawid Gorecki ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, 253982e558eaSDawid Gorecki ena_media_status); 25409b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 25419b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 25429b8d05b8SZbigniew Bodek 25439b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 25449b8d05b8SZbigniew Bodek } 25459b8d05b8SZbigniew Bodek 254638c7b965SMarcin Wojtas void 25479b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 25489b8d05b8SZbigniew Bodek { 2549a195fab0SMarcin Wojtas int rc; 25509b8d05b8SZbigniew Bodek 255107aff471SArtur Rojek ENA_LOCK_ASSERT(); 2552cb98c439SArtur Rojek 2553579d23aaSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2554579d23aaSMarcin Wojtas return; 2555579d23aaSMarcin Wojtas 255678554d0cSDawid Gorecki ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 25579b8d05b8SZbigniew Bodek 2558fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 255982e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 25609b8d05b8SZbigniew Bodek 25619b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 25629b8d05b8SZbigniew Bodek 2563fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 256482e558eaSDawid Gorecki rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 25653f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 256682e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, "Device reset failed\n"); 2567a195fab0SMarcin Wojtas } 2568a195fab0SMarcin Wojtas 25699b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 25709b8d05b8SZbigniew Bodek 25719b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 25729b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 25739b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 25749b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 25759b8d05b8SZbigniew Bodek 25769b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 25779b8d05b8SZbigniew Bodek } 25789b8d05b8SZbigniew Bodek 25797d8c4feeSMarcin Wojtas static uint32_t 25807d8c4feeSMarcin Wojtas ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev, 25819b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 25829b8d05b8SZbigniew Bodek { 25837d8c4feeSMarcin Wojtas uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 25849b8d05b8SZbigniew Bodek 25856064f289SMarcin Wojtas /* Regular queues capabilities */ 25866064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 25876064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 25886064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 25894fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 25904fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 25916064f289SMarcin Wojtas 25924fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 25934fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 25946064f289SMarcin Wojtas } else { 25956064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 25966064f289SMarcin Wojtas &get_feat_ctx->max_queues; 25974fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 25984fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 25994fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 26006064f289SMarcin Wojtas } 26019b8d05b8SZbigniew Bodek 26024fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 26034fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 26044fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 26054fa9e02dSMarcin Wojtas 26067d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 26077d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num); 26087d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num); 26097d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num); 2610609e6f6dSGordon Bergling /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */ 26117d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, 26127d8c4feeSMarcin Wojtas pci_msix_count(pdev) - 1); 26136d1ef2abSArtur Rojek #ifdef RSS 26146d1ef2abSArtur Rojek max_num_io_queues = min_t(uint32_t, max_num_io_queues, 26156d1ef2abSArtur Rojek rss_getnumbuckets()); 26166d1ef2abSArtur Rojek #endif 26179b8d05b8SZbigniew Bodek 26187d8c4feeSMarcin Wojtas return (max_num_io_queues); 26199b8d05b8SZbigniew Bodek } 26209b8d05b8SZbigniew Bodek 26210bdffe59SMarcin Wojtas static int 26223fc5d816SMarcin Wojtas ena_enable_wc(device_t pdev, struct resource *res) 26234fa9e02dSMarcin Wojtas { 2624472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 26254fa9e02dSMarcin Wojtas vm_offset_t va; 26264fa9e02dSMarcin Wojtas vm_size_t len; 26274fa9e02dSMarcin Wojtas int rc; 26284fa9e02dSMarcin Wojtas 26294fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 26304fa9e02dSMarcin Wojtas len = rman_get_size(res); 26314fa9e02dSMarcin Wojtas /* Enable write combining */ 2632472d4784SMarcin Wojtas rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 26334fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 26343fc5d816SMarcin Wojtas ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc); 26354fa9e02dSMarcin Wojtas return (rc); 26364fa9e02dSMarcin Wojtas } 26374fa9e02dSMarcin Wojtas 26384fa9e02dSMarcin Wojtas return (0); 26394fa9e02dSMarcin Wojtas #endif 26404fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 26414fa9e02dSMarcin Wojtas } 26424fa9e02dSMarcin Wojtas 26434fa9e02dSMarcin Wojtas static int 26444fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 26454fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 26464fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 26474fa9e02dSMarcin Wojtas { 264890232d18SDawid Gorecki int rc; 26494fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 26504fa9e02dSMarcin Wojtas 26514fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 26524fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 26533fc5d816SMarcin Wojtas ena_log(pdev, WARN, 26544fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 26554fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 26564fa9e02dSMarcin Wojtas return (0); 26574fa9e02dSMarcin Wojtas } 26584fa9e02dSMarcin Wojtas 265990232d18SDawid Gorecki if (ena_dev->mem_bar == NULL) { 266090232d18SDawid Gorecki ena_log(pdev, WARN, 266190232d18SDawid Gorecki "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 266290232d18SDawid Gorecki ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 266390232d18SDawid Gorecki return (0); 266490232d18SDawid Gorecki } 266590232d18SDawid Gorecki 26664fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 26674fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 266882e558eaSDawid Gorecki ena_log(pdev, WARN, 266982e558eaSDawid Gorecki "Failed to configure the device mode. " 26704fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 26714fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 267290232d18SDawid Gorecki } 267390232d18SDawid Gorecki 26744fa9e02dSMarcin Wojtas return (0); 26754fa9e02dSMarcin Wojtas } 26764fa9e02dSMarcin Wojtas 267790232d18SDawid Gorecki static int 267890232d18SDawid Gorecki ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 267990232d18SDawid Gorecki { 268090232d18SDawid Gorecki struct ena_adapter *adapter = device_get_softc(pdev); 268190232d18SDawid Gorecki int rc, rid; 26824fa9e02dSMarcin Wojtas 26834fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 26844fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 268582e558eaSDawid Gorecki adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 268682e558eaSDawid Gorecki RF_ACTIVE); 26874fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 268882e558eaSDawid Gorecki ena_log(pdev, WARN, 26893324e304SMichal Krawczyk "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n"); 26904fa9e02dSMarcin Wojtas return (0); 26914fa9e02dSMarcin Wojtas } 26924fa9e02dSMarcin Wojtas 26934fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 26943fc5d816SMarcin Wojtas rc = ena_enable_wc(adapter->pdev, adapter->memory); 26954fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 26963fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to enable write combining.\n"); 26974fa9e02dSMarcin Wojtas return (rc); 26984fa9e02dSMarcin Wojtas } 26994fa9e02dSMarcin Wojtas 27004fa9e02dSMarcin Wojtas /* 27014fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 27024fa9e02dSMarcin Wojtas * for the ena_com layer. 27034fa9e02dSMarcin Wojtas */ 27044fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 27054fa9e02dSMarcin Wojtas 27064fa9e02dSMarcin Wojtas return (0); 27074fa9e02dSMarcin Wojtas } 27084fa9e02dSMarcin Wojtas 270982e558eaSDawid Gorecki static inline void 271082e558eaSDawid Gorecki set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2711beaadec9SMarcin Wojtas struct ena_admin_feature_llq_desc *llq) 27124fa9e02dSMarcin Wojtas { 27134fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 27144fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 27154fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 27164fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 271782e558eaSDawid Gorecki if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 271882e558eaSDawid Gorecki 0 && ena_force_large_llq_header) { 2719beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2720beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2721beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size_value = 256; 2722beaadec9SMarcin Wojtas } else { 2723beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2724beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_128B; 27254fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 27264fa9e02dSMarcin Wojtas } 2727beaadec9SMarcin Wojtas } 27284fa9e02dSMarcin Wojtas 27294fa9e02dSMarcin Wojtas static int 27307d8c4feeSMarcin Wojtas ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) 27319b8d05b8SZbigniew Bodek { 27324fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 27334fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 27346064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 27357d8c4feeSMarcin Wojtas uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE; 27367d8c4feeSMarcin Wojtas uint32_t max_tx_queue_size; 27377d8c4feeSMarcin Wojtas uint32_t max_rx_queue_size; 27389b8d05b8SZbigniew Bodek 27394fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 27406064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 27416064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 27427d8c4feeSMarcin Wojtas max_rx_queue_size = min_t(uint32_t, 27437d8c4feeSMarcin Wojtas max_queue_ext->max_rx_cq_depth, 27446064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 27457d8c4feeSMarcin Wojtas max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 27464fa9e02dSMarcin Wojtas 27474fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 27484fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 27497d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27504fa9e02dSMarcin Wojtas llq->max_llq_depth); 27514fa9e02dSMarcin Wojtas else 27527d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27536064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 27544fa9e02dSMarcin Wojtas 27556064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27566064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 27577d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27587d8c4feeSMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 27596064f289SMarcin Wojtas } else { 27606064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 27616064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 276282e558eaSDawid Gorecki max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth, 27636064f289SMarcin Wojtas max_queues->max_sq_depth); 27647d8c4feeSMarcin Wojtas max_tx_queue_size = max_queues->max_cq_depth; 27654fa9e02dSMarcin Wojtas 27664fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 27674fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 27687d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27694fa9e02dSMarcin Wojtas llq->max_llq_depth); 27704fa9e02dSMarcin Wojtas else 27717d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27724fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 27734fa9e02dSMarcin Wojtas 27746064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27757d8c4feeSMarcin Wojtas max_queues->max_packet_tx_descs); 27767d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27776064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 27786064f289SMarcin Wojtas } 27799b8d05b8SZbigniew Bodek 27809b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 27817d8c4feeSMarcin Wojtas max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1); 27827d8c4feeSMarcin Wojtas max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); 27836064f289SMarcin Wojtas 2784beaadec9SMarcin Wojtas /* 2785beaadec9SMarcin Wojtas * When forcing large headers, we multiply the entry size by 2, 2786beaadec9SMarcin Wojtas * and therefore divide the queue size by 2, leaving the amount 2787beaadec9SMarcin Wojtas * of memory used by the queues unchanged. 2788beaadec9SMarcin Wojtas */ 2789beaadec9SMarcin Wojtas if (ena_force_large_llq_header) { 2790beaadec9SMarcin Wojtas if ((llq->entry_size_ctrl_supported & 2791beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && 2792beaadec9SMarcin Wojtas ena_dev->tx_mem_queue_type == 2793beaadec9SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2794beaadec9SMarcin Wojtas max_tx_queue_size /= 2; 27953fc5d816SMarcin Wojtas ena_log(ctx->pdev, INFO, 2796beaadec9SMarcin Wojtas "Forcing large headers and decreasing maximum Tx queue size to %d\n", 2797beaadec9SMarcin Wojtas max_tx_queue_size); 2798beaadec9SMarcin Wojtas } else { 27993fc5d816SMarcin Wojtas ena_log(ctx->pdev, WARN, 2800beaadec9SMarcin Wojtas "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 2801beaadec9SMarcin Wojtas } 2802beaadec9SMarcin Wojtas } 2803beaadec9SMarcin Wojtas 28047d8c4feeSMarcin Wojtas tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, 28057d8c4feeSMarcin Wojtas max_tx_queue_size); 28067d8c4feeSMarcin Wojtas rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, 28077d8c4feeSMarcin Wojtas max_rx_queue_size); 28089b8d05b8SZbigniew Bodek 28097d8c4feeSMarcin Wojtas tx_queue_size = 1 << (flsl(tx_queue_size) - 1); 28107d8c4feeSMarcin Wojtas rx_queue_size = 1 << (flsl(rx_queue_size) - 1); 28117d8c4feeSMarcin Wojtas 28127d8c4feeSMarcin Wojtas ctx->max_tx_queue_size = max_tx_queue_size; 28137d8c4feeSMarcin Wojtas ctx->max_rx_queue_size = max_rx_queue_size; 28146064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 28157d8c4feeSMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 28166064f289SMarcin Wojtas 28176064f289SMarcin Wojtas return (0); 28189b8d05b8SZbigniew Bodek } 28199b8d05b8SZbigniew Bodek 28200bdffe59SMarcin Wojtas static void 282146021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 28229b8d05b8SZbigniew Bodek { 28239b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 282446021271SMarcin Wojtas uintptr_t rid; 28259b8d05b8SZbigniew Bodek int rc; 28269b8d05b8SZbigniew Bodek 28279b8d05b8SZbigniew Bodek /* Allocate only the host info */ 28289b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 28293f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28303fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot allocate host info\n"); 28319b8d05b8SZbigniew Bodek return; 28329b8d05b8SZbigniew Bodek } 28339b8d05b8SZbigniew Bodek 28349b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 28359b8d05b8SZbigniew Bodek 283646021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 283746021271SMarcin Wojtas host_info->bdf = rid; 28389b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 28399b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 28409b8d05b8SZbigniew Bodek 28419b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 28429b8d05b8SZbigniew Bodek host_info->os_dist = 0; 28439b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 28449b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 28459b8d05b8SZbigniew Bodek 28468f15f8a7SDawid Gorecki host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) | 28478f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 28488f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 28498ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 2850c7444389SMarcin Wojtas host_info->driver_supported_features = 28516d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 28526d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 28539b8d05b8SZbigniew Bodek 28549b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 28553f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 2856a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 28573fc5d816SMarcin Wojtas ena_log(dev, WARN, "Cannot set host attributes\n"); 28589b8d05b8SZbigniew Bodek else 28593fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot set host attributes\n"); 28609b8d05b8SZbigniew Bodek 28619b8d05b8SZbigniew Bodek goto err; 28629b8d05b8SZbigniew Bodek } 28639b8d05b8SZbigniew Bodek 28649b8d05b8SZbigniew Bodek return; 28659b8d05b8SZbigniew Bodek 28669b8d05b8SZbigniew Bodek err: 28679b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 28689b8d05b8SZbigniew Bodek } 28699b8d05b8SZbigniew Bodek 28709b8d05b8SZbigniew Bodek static int 28719b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 28729b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 28739b8d05b8SZbigniew Bodek { 28743324e304SMichal Krawczyk struct ena_llq_configurations llq_config; 28759b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 28769b8d05b8SZbigniew Bodek bool readless_supported; 28779b8d05b8SZbigniew Bodek uint32_t aenq_groups; 28789b8d05b8SZbigniew Bodek int dma_width; 28799b8d05b8SZbigniew Bodek int rc; 28809b8d05b8SZbigniew Bodek 28819b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 28823f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28833fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to init mmio read less\n"); 28840bdffe59SMarcin Wojtas return (rc); 28859b8d05b8SZbigniew Bodek } 28869b8d05b8SZbigniew Bodek 28879b8d05b8SZbigniew Bodek /* 28889b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 28899b8d05b8SZbigniew Bodek * read is disabled 28909b8d05b8SZbigniew Bodek */ 28919b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 28929b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 28939b8d05b8SZbigniew Bodek 2894a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 28953f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28963fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Can not reset device\n"); 28979b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28989b8d05b8SZbigniew Bodek } 28999b8d05b8SZbigniew Bodek 29009b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 29013f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29023fc5d816SMarcin Wojtas ena_log(pdev, ERR, "device version is too low\n"); 29039b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29049b8d05b8SZbigniew Bodek } 29059b8d05b8SZbigniew Bodek 29069b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 29073f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 29083fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Invalid dma width value %d", dma_width); 29099b8d05b8SZbigniew Bodek rc = dma_width; 29109b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29119b8d05b8SZbigniew Bodek } 29129b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 29139b8d05b8SZbigniew Bodek 29149b8d05b8SZbigniew Bodek /* ENA admin level init */ 291567ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 29163f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29173fc5d816SMarcin Wojtas ena_log(pdev, ERR, 29189b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 29199b8d05b8SZbigniew Bodek goto err_mmio_read_less; 29209b8d05b8SZbigniew Bodek } 29219b8d05b8SZbigniew Bodek 29229b8d05b8SZbigniew Bodek /* 29239b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 29249b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 29259b8d05b8SZbigniew Bodek * information 29269b8d05b8SZbigniew Bodek */ 29279b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 29289b8d05b8SZbigniew Bodek 292946021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 29309b8d05b8SZbigniew Bodek 29319b8d05b8SZbigniew Bodek /* Get Device Attributes */ 29329b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 29333f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29343fc5d816SMarcin Wojtas ena_log(pdev, ERR, 29359b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 29369b8d05b8SZbigniew Bodek goto err_admin_init; 29379b8d05b8SZbigniew Bodek } 29389b8d05b8SZbigniew Bodek 2939e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2940e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 2941e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 294240621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 29438cd86b51SOsama Abboud BIT(ENA_ADMIN_KEEP_ALIVE) | 29448cd86b51SOsama Abboud BIT(ENA_ADMIN_CONF_NOTIFICATIONS); 29459b8d05b8SZbigniew Bodek 29469b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 29479b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 29483f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29493fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc); 29509b8d05b8SZbigniew Bodek goto err_admin_init; 29519b8d05b8SZbigniew Bodek } 29529b8d05b8SZbigniew Bodek 29539b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 29549b8d05b8SZbigniew Bodek 29553324e304SMichal Krawczyk set_default_llq_configurations(&llq_config, &get_feat_ctx->llq); 29563324e304SMichal Krawczyk 29573324e304SMichal Krawczyk rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, 29583324e304SMichal Krawczyk &llq_config); 29593324e304SMichal Krawczyk if (unlikely(rc != 0)) { 29603324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to set placement policy\n"); 29613324e304SMichal Krawczyk goto err_admin_init; 29623324e304SMichal Krawczyk } 29633324e304SMichal Krawczyk 29640bdffe59SMarcin Wojtas return (0); 29659b8d05b8SZbigniew Bodek 29669b8d05b8SZbigniew Bodek err_admin_init: 29679b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 29689b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 29699b8d05b8SZbigniew Bodek err_mmio_read_less: 29709b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 29719b8d05b8SZbigniew Bodek 29720bdffe59SMarcin Wojtas return (rc); 29739b8d05b8SZbigniew Bodek } 29749b8d05b8SZbigniew Bodek 297582e558eaSDawid Gorecki static int 297682e558eaSDawid Gorecki ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) 29779b8d05b8SZbigniew Bodek { 29789b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 29799b8d05b8SZbigniew Bodek int rc; 29809b8d05b8SZbigniew Bodek 29819b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 29823f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29833fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 29840bdffe59SMarcin Wojtas return (rc); 29859b8d05b8SZbigniew Bodek } 29869b8d05b8SZbigniew Bodek 29879b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 29889b8d05b8SZbigniew Bodek 29899b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 29903f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29913fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n"); 29929b8d05b8SZbigniew Bodek goto err_disable_msix; 29939b8d05b8SZbigniew Bodek } 29949b8d05b8SZbigniew Bodek 29959b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 29969b8d05b8SZbigniew Bodek 29979b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 29989b8d05b8SZbigniew Bodek 29990bdffe59SMarcin Wojtas return (0); 30009b8d05b8SZbigniew Bodek 30019b8d05b8SZbigniew Bodek err_disable_msix: 30029b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 30039b8d05b8SZbigniew Bodek 30040bdffe59SMarcin Wojtas return (rc); 30059b8d05b8SZbigniew Bodek } 30069b8d05b8SZbigniew Bodek 30079b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 300882e558eaSDawid Gorecki static void 300982e558eaSDawid Gorecki ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 30109b8d05b8SZbigniew Bodek { 30119b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 301230217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 30139b8d05b8SZbigniew Bodek sbintime_t stime; 301430217e2dSMarcin Wojtas uint64_t rx_drops; 30156c84cec3SMarcin Wojtas uint64_t tx_drops; 301630217e2dSMarcin Wojtas 301730217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 301830217e2dSMarcin Wojtas 301930217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 30206c84cec3SMarcin Wojtas tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 302130217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 302230217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 30236c84cec3SMarcin Wojtas counter_u64_zero(adapter->hw_stats.tx_drops); 30246c84cec3SMarcin Wojtas counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 30259b8d05b8SZbigniew Bodek 30269b8d05b8SZbigniew Bodek stime = getsbinuptime(); 30279b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 30289b8d05b8SZbigniew Bodek } 30299b8d05b8SZbigniew Bodek 30309b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 303182e558eaSDawid Gorecki static void 303282e558eaSDawid Gorecki check_for_missing_keep_alive(struct ena_adapter *adapter) 30339b8d05b8SZbigniew Bodek { 30349b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 3035274319acSOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 30369b8d05b8SZbigniew Bodek 30379b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 30389b8d05b8SZbigniew Bodek return; 30399b8d05b8SZbigniew Bodek 304040621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 30419b8d05b8SZbigniew Bodek return; 30429b8d05b8SZbigniew Bodek 30439b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 30449b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 30459b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 30463fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 3047274319acSOsama Abboud if (ena_com_aenq_has_keep_alive(adapter->ena_dev)) 3048274319acSOsama Abboud reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; 3049274319acSOsama Abboud 3050274319acSOsama Abboud ena_trigger_reset(adapter, reset_reason); 30519b8d05b8SZbigniew Bodek } 3052858659f7SMarcin Wojtas } 30539b8d05b8SZbigniew Bodek 30549b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 305582e558eaSDawid Gorecki static void 305682e558eaSDawid Gorecki check_for_admin_com_state(struct ena_adapter *adapter) 30579b8d05b8SZbigniew Bodek { 3058274319acSOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO; 305982e558eaSDawid Gorecki if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { 30603fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 30619b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 3062274319acSOsama Abboud counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 3063274319acSOsama Abboud if (ena_com_get_missing_admin_interrupt(adapter->ena_dev)) 3064274319acSOsama Abboud reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT; 3065274319acSOsama Abboud 3066274319acSOsama Abboud ena_trigger_reset(adapter, reset_reason); 30679b8d05b8SZbigniew Bodek } 3068858659f7SMarcin Wojtas } 30699b8d05b8SZbigniew Bodek 307074dba3adSMarcin Wojtas static int 3071d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 3072d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 3073d12f7bfcSMarcin Wojtas { 30740ac122c3SDawid Gorecki if (likely(atomic_load_8(&rx_ring->first_interrupt))) 3075d12f7bfcSMarcin Wojtas return (0); 3076d12f7bfcSMarcin Wojtas 3077d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 3078d12f7bfcSMarcin Wojtas return (0); 3079d12f7bfcSMarcin Wojtas 3080d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 3081d12f7bfcSMarcin Wojtas 308282e558eaSDawid Gorecki if (rx_ring->no_interrupt_event_cnt == 308382e558eaSDawid Gorecki ENA_MAX_NO_INTERRUPT_ITERATIONS) { 308482e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, 308582e558eaSDawid Gorecki "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 308682e558eaSDawid Gorecki rx_ring->qid); 30877926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 3088d12f7bfcSMarcin Wojtas return (EIO); 3089d12f7bfcSMarcin Wojtas } 3090d12f7bfcSMarcin Wojtas 3091d12f7bfcSMarcin Wojtas return (0); 3092d12f7bfcSMarcin Wojtas } 3093d12f7bfcSMarcin Wojtas 3094*a33ec635SOsama Abboud static enum ena_regs_reset_reason_types 3095*a33ec635SOsama Abboud check_cdesc_in_tx_cq(struct ena_adapter *adapter, 3096*a33ec635SOsama Abboud struct ena_ring *tx_ring) 3097*a33ec635SOsama Abboud { 3098*a33ec635SOsama Abboud device_t pdev = adapter->pdev; 3099*a33ec635SOsama Abboud int rc; 3100*a33ec635SOsama Abboud u16 req_id; 3101*a33ec635SOsama Abboud 3102*a33ec635SOsama Abboud rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); 3103*a33ec635SOsama Abboud /* TX CQ is empty */ 3104*a33ec635SOsama Abboud if (rc == ENA_COM_TRY_AGAIN) { 3105*a33ec635SOsama Abboud ena_log(pdev, ERR, 3106*a33ec635SOsama Abboud "No completion descriptors found in CQ %d\n", 3107*a33ec635SOsama Abboud tx_ring->qid); 3108*a33ec635SOsama Abboud return ENA_REGS_RESET_MISS_TX_CMPL; 3109*a33ec635SOsama Abboud } 3110*a33ec635SOsama Abboud 3111*a33ec635SOsama Abboud /* TX CQ has cdescs */ 3112*a33ec635SOsama Abboud ena_log(pdev, ERR, 3113*a33ec635SOsama Abboud "Completion descriptors found in CQ %d", 3114*a33ec635SOsama Abboud tx_ring->qid); 3115*a33ec635SOsama Abboud 3116*a33ec635SOsama Abboud return ENA_REGS_RESET_MISS_INTERRUPT; 3117*a33ec635SOsama Abboud } 3118*a33ec635SOsama Abboud 3119d12f7bfcSMarcin Wojtas static int 3120d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 312174dba3adSMarcin Wojtas struct ena_ring *tx_ring) 312274dba3adSMarcin Wojtas { 31231f67704eSOsama Abboud uint32_t missed_tx = 0, new_missed_tx = 0; 31243fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 312574dba3adSMarcin Wojtas struct bintime curtime, time; 312674dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3127d8aba82bSDawid Gorecki int time_since_last_cleanup; 3128d8aba82bSDawid Gorecki int missing_tx_comp_to; 3129d12f7bfcSMarcin Wojtas sbintime_t time_offset; 3130d12f7bfcSMarcin Wojtas int i, rc = 0; 3131*a33ec635SOsama Abboud enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 3132*a33ec635SOsama Abboud bool cleanup_scheduled, cleanup_running; 313374dba3adSMarcin Wojtas 313474dba3adSMarcin Wojtas getbinuptime(&curtime); 313574dba3adSMarcin Wojtas 313674dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 313774dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 313874dba3adSMarcin Wojtas 31390bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 314074dba3adSMarcin Wojtas continue; 314174dba3adSMarcin Wojtas 314274dba3adSMarcin Wojtas time = curtime; 314374dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3144d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3145d12f7bfcSMarcin Wojtas 31460ac122c3SDawid Gorecki if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && 3147d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3148d12f7bfcSMarcin Wojtas /* 3149d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3150d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3151d12f7bfcSMarcin Wojtas */ 31523fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3153d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 315482e558eaSDawid Gorecki "Reset the device\n", 315582e558eaSDawid Gorecki tx_ring->qid); 31567926bc44SMarcin Wojtas ena_trigger_reset(adapter, 31577926bc44SMarcin Wojtas ENA_REGS_RESET_MISS_INTERRUPT); 3158d12f7bfcSMarcin Wojtas return (EIO); 3159d12f7bfcSMarcin Wojtas } 316074dba3adSMarcin Wojtas 316174dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3162d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 316374dba3adSMarcin Wojtas 3164f01b2cd9SArthur Kiyanovski if (tx_buf->print_once) { 31659272e45cSArthur Kiyanovski time_since_last_cleanup = TICKS_2_MSEC(ticks - 3166d8aba82bSDawid Gorecki tx_ring->tx_last_cleanup_ticks); 316782e558eaSDawid Gorecki missing_tx_comp_to = sbttoms( 316882e558eaSDawid Gorecki adapter->missing_tx_timeout); 316982e558eaSDawid Gorecki ena_log(pdev, WARN, 317082e558eaSDawid Gorecki "Found a Tx that wasn't completed on time, qid %d, index %d. " 31719272e45cSArthur Kiyanovski "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n", 3172d8aba82bSDawid Gorecki tx_ring->qid, i, time_since_last_cleanup, 3173d8aba82bSDawid Gorecki missing_tx_comp_to); 31741f67704eSOsama Abboud /* Add new TX completions which are missed */ 31751f67704eSOsama Abboud new_missed_tx++; 3176d8aba82bSDawid Gorecki } 317774dba3adSMarcin Wojtas 3178f01b2cd9SArthur Kiyanovski tx_buf->print_once = false; 317974dba3adSMarcin Wojtas missed_tx++; 3180d12f7bfcSMarcin Wojtas } 3181d12f7bfcSMarcin Wojtas } 31821f67704eSOsama Abboud /* Checking if this TX ring missing TX completions have passed the threshold */ 3183d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 31843fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3185d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3186d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 31874e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 3188*a33ec635SOsama Abboud /* Set the reset flag to prevent ena_cleanup() from running */ 3189*a33ec635SOsama Abboud ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 3190*a33ec635SOsama Abboud /* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and 3191*a33ec635SOsama Abboud * that cleanup_running is visible to check_missing_comp_in_tx_queue() to 3192*a33ec635SOsama Abboud * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq() 3193*a33ec635SOsama Abboud */ 3194*a33ec635SOsama Abboud mb(); 3195*a33ec635SOsama Abboud cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending)); 3196*a33ec635SOsama Abboud cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running))); 3197*a33ec635SOsama Abboud if (!(cleanup_scheduled || cleanup_running)) 3198*a33ec635SOsama Abboud reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring); 3199*a33ec635SOsama Abboud 3200*a33ec635SOsama Abboud adapter->reset_reason = reset_reason; 3201d12f7bfcSMarcin Wojtas rc = EIO; 320274dba3adSMarcin Wojtas } 32031f67704eSOsama Abboud /* Add the newly discovered missing TX completions */ 32041f67704eSOsama Abboud counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx); 3205d12f7bfcSMarcin Wojtas 3206d12f7bfcSMarcin Wojtas return (rc); 320774dba3adSMarcin Wojtas } 320874dba3adSMarcin Wojtas 32099b8d05b8SZbigniew Bodek /* 32109b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 32119b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 32129b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 32139b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 32149b8d05b8SZbigniew Bodek */ 32150bdffe59SMarcin Wojtas static void 3216d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 32179b8d05b8SZbigniew Bodek { 32189b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3219d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 322074dba3adSMarcin Wojtas int i, budget, rc; 32219b8d05b8SZbigniew Bodek 32229b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 32239b8d05b8SZbigniew Bodek rmb(); 32249b8d05b8SZbigniew Bodek 3225fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 32269b8d05b8SZbigniew Bodek return; 32279b8d05b8SZbigniew Bodek 3228fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 32299b8d05b8SZbigniew Bodek return; 32309b8d05b8SZbigniew Bodek 323140621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 32329b8d05b8SZbigniew Bodek return; 32339b8d05b8SZbigniew Bodek 32349b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 32359b8d05b8SZbigniew Bodek 32367d8c4feeSMarcin Wojtas for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) { 32379b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3238d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 32399b8d05b8SZbigniew Bodek 3240d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3241d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3242d12f7bfcSMarcin Wojtas return; 3243d12f7bfcSMarcin Wojtas 3244d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 32450bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 32469b8d05b8SZbigniew Bodek return; 32479b8d05b8SZbigniew Bodek 32489b8d05b8SZbigniew Bodek budget--; 3249cd5d5804SMarcin Wojtas if (budget == 0) { 32509b8d05b8SZbigniew Bodek i++; 32519b8d05b8SZbigniew Bodek break; 32529b8d05b8SZbigniew Bodek } 32539b8d05b8SZbigniew Bodek } 32549b8d05b8SZbigniew Bodek 32557d8c4feeSMarcin Wojtas adapter->next_monitored_tx_qid = i % adapter->num_io_queues; 32569b8d05b8SZbigniew Bodek } 32579b8d05b8SZbigniew Bodek 32585cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3259efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3260efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3261efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3262efe6ab18SMarcin Wojtas * for example). 3263efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3264efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3265efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3266efe6ab18SMarcin Wojtas * able to send new packets. 3267efe6ab18SMarcin Wojtas * 3268efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3269efe6ab18SMarcin Wojtas */ 3270efe6ab18SMarcin Wojtas static void 3271efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3272efe6ab18SMarcin Wojtas { 3273efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3274efe6ab18SMarcin Wojtas int i, refill_required; 3275efe6ab18SMarcin Wojtas 3276fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 3277efe6ab18SMarcin Wojtas return; 3278efe6ab18SMarcin Wojtas 3279fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3280efe6ab18SMarcin Wojtas return; 3281efe6ab18SMarcin Wojtas 32827d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3283efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3284efe6ab18SMarcin Wojtas 328582e558eaSDawid Gorecki refill_required = ena_com_free_q_entries( 328682e558eaSDawid Gorecki rx_ring->ena_com_io_sq); 3287efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3288efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3289efe6ab18SMarcin Wojtas 3290efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3291efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3292efe6ab18SMarcin Wojtas 1); 3293efe6ab18SMarcin Wojtas 32943fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 32953fc5d816SMarcin Wojtas "Rx ring %d is stalled. Triggering the refill function\n", 32963fc5d816SMarcin Wojtas i); 3297efe6ab18SMarcin Wojtas 32985cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 32995cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3300efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3301efe6ab18SMarcin Wojtas } 3302efe6ab18SMarcin Wojtas } else { 3303efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3304efe6ab18SMarcin Wojtas } 3305efe6ab18SMarcin Wojtas } 3306efe6ab18SMarcin Wojtas } 33079b8d05b8SZbigniew Bodek 330882e558eaSDawid Gorecki static void 330982e558eaSDawid Gorecki ena_update_hints(struct ena_adapter *adapter, 331040621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 331140621d71SMarcin Wojtas { 331240621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 331340621d71SMarcin Wojtas 331440621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 331540621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 331640621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 331740621d71SMarcin Wojtas 331840621d71SMarcin Wojtas if (hints->mmio_read_timeout) 331940621d71SMarcin Wojtas /* convert to usec */ 332082e558eaSDawid Gorecki ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; 332140621d71SMarcin Wojtas 332240621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 332340621d71SMarcin Wojtas adapter->missing_tx_threshold = 332440621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 332540621d71SMarcin Wojtas 332640621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 332740621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 332840621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 332940621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 333040621d71SMarcin Wojtas else 333182e558eaSDawid Gorecki adapter->missing_tx_timeout = SBT_1MS * 333282e558eaSDawid Gorecki hints->missing_tx_completion_timeout; 333340621d71SMarcin Wojtas } 333440621d71SMarcin Wojtas 333540621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 333640621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 333740621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 333840621d71SMarcin Wojtas else 333982e558eaSDawid Gorecki adapter->keep_alive_timeout = SBT_1MS * 334082e558eaSDawid Gorecki hints->driver_watchdog_timeout; 334140621d71SMarcin Wojtas } 334240621d71SMarcin Wojtas } 334340621d71SMarcin Wojtas 3344f180142cSMarcin Wojtas /** 3345f180142cSMarcin Wojtas * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3346f180142cSMarcin Wojtas * @adapter: ENA device adapter 3347f180142cSMarcin Wojtas * 3348f180142cSMarcin Wojtas * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics 3349f180142cSMarcin Wojtas * and other error codes on failure. 3350f180142cSMarcin Wojtas * 3351f180142cSMarcin Wojtas * This function can possibly cause a race with other calls to the admin queue. 3352f180142cSMarcin Wojtas * Because of that, the caller should either lock this function or make sure 3353f180142cSMarcin Wojtas * that there is no race in the current context. 3354f180142cSMarcin Wojtas */ 3355f180142cSMarcin Wojtas static int 3356f180142cSMarcin Wojtas ena_copy_eni_metrics(struct ena_adapter *adapter) 3357f180142cSMarcin Wojtas { 3358f180142cSMarcin Wojtas static bool print_once = true; 3359f180142cSMarcin Wojtas int rc; 3360f180142cSMarcin Wojtas 3361f180142cSMarcin Wojtas rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics); 3362f180142cSMarcin Wojtas 3363f180142cSMarcin Wojtas if (rc != 0) { 3364f180142cSMarcin Wojtas if (rc == ENA_COM_UNSUPPORTED) { 3365f180142cSMarcin Wojtas if (print_once) { 33663fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 3367f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3368f180142cSMarcin Wojtas print_once = false; 3369f180142cSMarcin Wojtas } else { 33703fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 3371f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3372f180142cSMarcin Wojtas } 3373f180142cSMarcin Wojtas } else { 33743fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3375f180142cSMarcin Wojtas "Failed to get ENI metrics: %d\n", rc); 3376f180142cSMarcin Wojtas } 3377f180142cSMarcin Wojtas } 3378f180142cSMarcin Wojtas 3379f180142cSMarcin Wojtas return (rc); 3380f180142cSMarcin Wojtas } 3381f180142cSMarcin Wojtas 3382f97993adSOsama Abboud static int 338336d42c86SOsama Abboud ena_copy_srd_metrics(struct ena_adapter *adapter) 338436d42c86SOsama Abboud { 338536d42c86SOsama Abboud return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info); 338636d42c86SOsama Abboud } 338736d42c86SOsama Abboud 338836d42c86SOsama Abboud static int 3389f97993adSOsama Abboud ena_copy_customer_metrics(struct ena_adapter *adapter) 3390f97993adSOsama Abboud { 3391f97993adSOsama Abboud struct ena_com_dev *dev; 3392f97993adSOsama Abboud u32 supported_metrics_count; 3393f97993adSOsama Abboud int rc, len; 3394f97993adSOsama Abboud 3395f97993adSOsama Abboud dev = adapter->ena_dev; 3396f97993adSOsama Abboud 3397f97993adSOsama Abboud supported_metrics_count = ena_com_get_customer_metric_count(dev); 3398f97993adSOsama Abboud len = supported_metrics_count * sizeof(u64); 3399f97993adSOsama Abboud 3400f97993adSOsama Abboud /* Fill the data buffer */ 3401f97993adSOsama Abboud rc = ena_com_get_customer_metrics(adapter->ena_dev, 3402f97993adSOsama Abboud (char *)(adapter->customer_metrics_array), len); 3403f97993adSOsama Abboud 3404f97993adSOsama Abboud return (rc); 3405f97993adSOsama Abboud } 3406f97993adSOsama Abboud 34079b8d05b8SZbigniew Bodek static void 34089b8d05b8SZbigniew Bodek ena_timer_service(void *data) 34099b8d05b8SZbigniew Bodek { 34109b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 34119b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 34129b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 34139b8d05b8SZbigniew Bodek 34149b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 34159b8d05b8SZbigniew Bodek 34169b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 34179b8d05b8SZbigniew Bodek 3418d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 34199b8d05b8SZbigniew Bodek 3420efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 3421efe6ab18SMarcin Wojtas 3422f180142cSMarcin Wojtas /* 34235b925280SOsama Abboud * User controller update of the ENA metrics. 3424f180142cSMarcin Wojtas * If the delay was set to 0, then the stats shouldn't be updated at 3425f180142cSMarcin Wojtas * all. 34265b925280SOsama Abboud * Otherwise, wait 'metrics_sample_interval' seconds, before 3427f180142cSMarcin Wojtas * updating stats. 3428f180142cSMarcin Wojtas * As timer service is executed every second, it's enough to increment 3429f180142cSMarcin Wojtas * appropriate counter each time the timer service is executed. 3430f180142cSMarcin Wojtas */ 34315b925280SOsama Abboud if ((adapter->metrics_sample_interval != 0) && 34325b925280SOsama Abboud (++adapter->metrics_sample_interval_cnt >= 34335b925280SOsama Abboud adapter->metrics_sample_interval)) { 3434b899a02aSDawid Gorecki taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task); 34355b925280SOsama Abboud adapter->metrics_sample_interval_cnt = 0; 3436f180142cSMarcin Wojtas } 3437f180142cSMarcin Wojtas 3438f180142cSMarcin Wojtas 34390bdffe59SMarcin Wojtas if (host_info != NULL) 34409b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 34419b8d05b8SZbigniew Bodek 3442fd43fd2aSMarcin Wojtas if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3443d10ec3adSDawid Gorecki /* 3444d10ec3adSDawid Gorecki * Timeout when validating version indicates that the device 3445d10ec3adSDawid Gorecki * became unresponsive. If that happens skip the reset and 3446d10ec3adSDawid Gorecki * reschedule timer service, so the reset can be retried later. 3447d10ec3adSDawid Gorecki */ 3448d10ec3adSDawid Gorecki if (ena_com_validate_version(adapter->ena_dev) == 3449d10ec3adSDawid Gorecki ENA_COM_TIMER_EXPIRED) { 3450d10ec3adSDawid Gorecki ena_log(adapter->pdev, WARN, 3451d10ec3adSDawid Gorecki "FW unresponsive, skipping reset\n"); 3452d10ec3adSDawid Gorecki ENA_TIMER_RESET(adapter); 3453d10ec3adSDawid Gorecki return; 3454d10ec3adSDawid Gorecki } 34553fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, "Trigger reset is on\n"); 34569b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 34579b8d05b8SZbigniew Bodek return; 34589b8d05b8SZbigniew Bodek } 34599b8d05b8SZbigniew Bodek 34609b8d05b8SZbigniew Bodek /* 34619b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 34629b8d05b8SZbigniew Bodek */ 346378554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 34649b8d05b8SZbigniew Bodek } 34659b8d05b8SZbigniew Bodek 346638c7b965SMarcin Wojtas void 346732f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful) 34689b8d05b8SZbigniew Bodek { 346932f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 34709b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 34719b8d05b8SZbigniew Bodek bool dev_up; 347232f63fa7SMarcin Wojtas 347332f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 347432f63fa7SMarcin Wojtas return; 347532f63fa7SMarcin Wojtas 3476c59a5fbdSArthur Kiyanovski if (!graceful) 347732f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_DOWN); 347832f63fa7SMarcin Wojtas 347978554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 348032f63fa7SMarcin Wojtas 348132f63fa7SMarcin Wojtas dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 348232f63fa7SMarcin Wojtas if (dev_up) 348332f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 348432f63fa7SMarcin Wojtas 348532f63fa7SMarcin Wojtas if (!graceful) 348632f63fa7SMarcin Wojtas ena_com_set_admin_running_state(ena_dev, false); 348732f63fa7SMarcin Wojtas 348832f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 348932f63fa7SMarcin Wojtas ena_down(adapter); 349032f63fa7SMarcin Wojtas 349132f63fa7SMarcin Wojtas /* 349232f63fa7SMarcin Wojtas * Stop the device from sending AENQ events (if the device was up, and 349332f63fa7SMarcin Wojtas * the trigger reset was on, ena_down already performs device reset) 349432f63fa7SMarcin Wojtas */ 349532f63fa7SMarcin Wojtas if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 349632f63fa7SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 349732f63fa7SMarcin Wojtas 349832f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 349932f63fa7SMarcin Wojtas 350032f63fa7SMarcin Wojtas ena_disable_msix(adapter); 350132f63fa7SMarcin Wojtas 3502e2735b09SMarcin Wojtas /* 3503e2735b09SMarcin Wojtas * IO rings resources should be freed because `ena_restore_device()` 3504e2735b09SMarcin Wojtas * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX 3505e2735b09SMarcin Wojtas * vectors. The amount of MSIX vectors after destroy-restore may be 3506e2735b09SMarcin Wojtas * different than before. Therefore, IO rings resources should be 3507e2735b09SMarcin Wojtas * established from scratch each time. 3508e2735b09SMarcin Wojtas */ 3509e2735b09SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 3510e2735b09SMarcin Wojtas 351132f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 351232f63fa7SMarcin Wojtas 351332f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 351432f63fa7SMarcin Wojtas 351532f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 351632f63fa7SMarcin Wojtas 351732f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 351832f63fa7SMarcin Wojtas 351932f63fa7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 352032f63fa7SMarcin Wojtas 352132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 352232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 352332f63fa7SMarcin Wojtas } 352432f63fa7SMarcin Wojtas 352532f63fa7SMarcin Wojtas static int 352632f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter, 352732f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 352832f63fa7SMarcin Wojtas { 352932f63fa7SMarcin Wojtas if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 353032f63fa7SMarcin Wojtas ETHER_ADDR_LEN) != 0) { 35313fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 353232f63fa7SMarcin Wojtas return (EINVAL); 353332f63fa7SMarcin Wojtas } 353432f63fa7SMarcin Wojtas 353532f63fa7SMarcin Wojtas if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 35363fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 353732f63fa7SMarcin Wojtas "Error, device max mtu is smaller than ifp MTU\n"); 353832f63fa7SMarcin Wojtas return (EINVAL); 353932f63fa7SMarcin Wojtas } 354032f63fa7SMarcin Wojtas 354132f63fa7SMarcin Wojtas return 0; 354232f63fa7SMarcin Wojtas } 354332f63fa7SMarcin Wojtas 354438c7b965SMarcin Wojtas int 354532f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter) 354632f63fa7SMarcin Wojtas { 354732f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx get_feat_ctx; 354832f63fa7SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 354932f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 355032f63fa7SMarcin Wojtas device_t dev = adapter->pdev; 355132f63fa7SMarcin Wojtas int wd_active; 35529b8d05b8SZbigniew Bodek int rc; 35539b8d05b8SZbigniew Bodek 355432f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 355532f63fa7SMarcin Wojtas 355632f63fa7SMarcin Wojtas rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 355732f63fa7SMarcin Wojtas if (rc != 0) { 35583fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot initialize device\n"); 355932f63fa7SMarcin Wojtas goto err; 356032f63fa7SMarcin Wojtas } 356132f63fa7SMarcin Wojtas /* 356232f63fa7SMarcin Wojtas * Only enable WD if it was enabled before reset, so it won't override 356332f63fa7SMarcin Wojtas * value set by the user by the sysctl. 356432f63fa7SMarcin Wojtas */ 356532f63fa7SMarcin Wojtas if (adapter->wd_active != 0) 356632f63fa7SMarcin Wojtas adapter->wd_active = wd_active; 356732f63fa7SMarcin Wojtas 356832f63fa7SMarcin Wojtas rc = ena_device_validate_params(adapter, &get_feat_ctx); 356932f63fa7SMarcin Wojtas if (rc != 0) { 35703fc5d816SMarcin Wojtas ena_log(dev, ERR, "Validation of device parameters failed\n"); 357132f63fa7SMarcin Wojtas goto err_device_destroy; 357232f63fa7SMarcin Wojtas } 357332f63fa7SMarcin Wojtas 357432f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 357532f63fa7SMarcin Wojtas /* Make sure we don't have a race with AENQ Links state handler */ 357632f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 357732f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 357832f63fa7SMarcin Wojtas 3579aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 358032f63fa7SMarcin Wojtas if (rc != 0) { 35813fc5d816SMarcin Wojtas ena_log(dev, ERR, "Enable MSI-X failed\n"); 358232f63fa7SMarcin Wojtas goto err_device_destroy; 358332f63fa7SMarcin Wojtas } 358432f63fa7SMarcin Wojtas 3585e2735b09SMarcin Wojtas /* 3586e2735b09SMarcin Wojtas * Effective value of used MSIX vectors should be the same as before 3587e2735b09SMarcin Wojtas * `ena_destroy_device()`, if possible, or closest to it if less vectors 3588e2735b09SMarcin Wojtas * are available. 3589e2735b09SMarcin Wojtas */ 3590e2735b09SMarcin Wojtas if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) 359182e558eaSDawid Gorecki adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3592e2735b09SMarcin Wojtas 3593e2735b09SMarcin Wojtas /* Re-initialize rings basic information */ 3594e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3595e2735b09SMarcin Wojtas 359632f63fa7SMarcin Wojtas /* If the interface was up before the reset bring it up */ 359732f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 359832f63fa7SMarcin Wojtas rc = ena_up(adapter); 359932f63fa7SMarcin Wojtas if (rc != 0) { 36003fc5d816SMarcin Wojtas ena_log(dev, ERR, "Failed to create I/O queues\n"); 360132f63fa7SMarcin Wojtas goto err_disable_msix; 360232f63fa7SMarcin Wojtas } 360332f63fa7SMarcin Wojtas } 360432f63fa7SMarcin Wojtas 360524392281SMarcin Wojtas /* Indicate that device is running again and ready to work */ 360632f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 360724392281SMarcin Wojtas 360824392281SMarcin Wojtas /* 360924392281SMarcin Wojtas * As the AENQ handlers weren't executed during reset because 361024392281SMarcin Wojtas * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 361124392281SMarcin Wojtas * timestamp must be updated again That will prevent next reset 361224392281SMarcin Wojtas * caused by missing keep alive. 361324392281SMarcin Wojtas */ 361424392281SMarcin Wojtas adapter->keep_alive_timestamp = getsbinuptime(); 361578554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 361678554d0cSDawid Gorecki 36177d8c4feeSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 361832f63fa7SMarcin Wojtas 361932f63fa7SMarcin Wojtas return (rc); 362032f63fa7SMarcin Wojtas 362132f63fa7SMarcin Wojtas err_disable_msix: 362232f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 362332f63fa7SMarcin Wojtas ena_disable_msix(adapter); 362432f63fa7SMarcin Wojtas err_device_destroy: 362532f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 362632f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 362732f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 362832f63fa7SMarcin Wojtas ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 362932f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 363032f63fa7SMarcin Wojtas err: 363132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 363232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 36333fc5d816SMarcin Wojtas ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n"); 363432f63fa7SMarcin Wojtas 363532f63fa7SMarcin Wojtas return (rc); 363632f63fa7SMarcin Wojtas } 363732f63fa7SMarcin Wojtas 363832f63fa7SMarcin Wojtas static void 3639b899a02aSDawid Gorecki ena_metrics_task(void *arg, int pending) 3640b899a02aSDawid Gorecki { 3641b899a02aSDawid Gorecki struct ena_adapter *adapter = (struct ena_adapter *)arg; 3642b899a02aSDawid Gorecki 3643b899a02aSDawid Gorecki ENA_LOCK_LOCK(); 3644f97993adSOsama Abboud 3645f97993adSOsama Abboud if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) 3646f97993adSOsama Abboud (void)ena_copy_customer_metrics(adapter); 3647f97993adSOsama Abboud else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS)) 3648b899a02aSDawid Gorecki (void)ena_copy_eni_metrics(adapter); 3649f97993adSOsama Abboud 365036d42c86SOsama Abboud if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) 365136d42c86SOsama Abboud (void)ena_copy_srd_metrics(adapter); 365236d42c86SOsama Abboud 3653b899a02aSDawid Gorecki ENA_LOCK_UNLOCK(); 3654b899a02aSDawid Gorecki } 3655b899a02aSDawid Gorecki 3656b899a02aSDawid Gorecki static void 365732f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending) 365832f63fa7SMarcin Wojtas { 365932f63fa7SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)arg; 366032f63fa7SMarcin Wojtas 366107aff471SArtur Rojek ENA_LOCK_LOCK(); 3662433ab9b6SArtur Rojek if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3663*a33ec635SOsama Abboud ena_increment_reset_counter(adapter); 366432f63fa7SMarcin Wojtas ena_destroy_device(adapter, false); 366532f63fa7SMarcin Wojtas ena_restore_device(adapter); 3666d209ffeeSDawid Gorecki 3667d209ffeeSDawid Gorecki ena_log(adapter->pdev, INFO, 3668d209ffeeSDawid Gorecki "Device reset completed successfully, Driver info: %s\n", 3669d209ffeeSDawid Gorecki ena_version); 3670433ab9b6SArtur Rojek } 367107aff471SArtur Rojek ENA_LOCK_UNLOCK(); 36729b8d05b8SZbigniew Bodek } 36739b8d05b8SZbigniew Bodek 3674b9e80b52SOsama Abboud static void 3675b9e80b52SOsama Abboud ena_free_stats(struct ena_adapter *adapter) 3676b9e80b52SOsama Abboud { 3677b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->hw_stats, 3678b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3679b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->dev_stats, 3680b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3681b9e80b52SOsama Abboud 3682b9e80b52SOsama Abboud } 36839b8d05b8SZbigniew Bodek /** 36849b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 36859b8d05b8SZbigniew Bodek * @pdev: device information struct 36869b8d05b8SZbigniew Bodek * 36879b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 36889b8d05b8SZbigniew Bodek * 36899b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 36909b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 36919b8d05b8SZbigniew Bodek * and a hardware reset occur. 36929b8d05b8SZbigniew Bodek **/ 36939b8d05b8SZbigniew Bodek static int 36949b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 36959b8d05b8SZbigniew Bodek { 36969b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 36976064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 36989b8d05b8SZbigniew Bodek static int version_printed; 36999b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 37009b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 37017d8c4feeSMarcin Wojtas uint32_t max_num_io_queues; 37021c808fcdSMichal Krawczyk int msix_rid; 37034fa9e02dSMarcin Wojtas int rid, rc; 37044fa9e02dSMarcin Wojtas 37059b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 37069b8d05b8SZbigniew Bodek adapter->pdev = pdev; 3707eb4c4f4aSMarcin Wojtas adapter->first_bind = -1; 37089b8d05b8SZbigniew Bodek 37096959869eSMarcin Wojtas /* 37106959869eSMarcin Wojtas * Set up the timer service - driver is responsible for avoiding 37116959869eSMarcin Wojtas * concurrency, as the callout won't be using any locking inside. 37126959869eSMarcin Wojtas */ 371378554d0cSDawid Gorecki ENA_TIMER_INIT(adapter); 37148f15f8a7SDawid Gorecki adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO; 37158f15f8a7SDawid Gorecki adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO; 37168f15f8a7SDawid Gorecki adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES; 37178f15f8a7SDawid Gorecki adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD; 37189b8d05b8SZbigniew Bodek 3719f9e1d947SOsama Abboud adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED; 3720f9e1d947SOsama Abboud adapter->irq_cpu_stride = 0; 3721f9e1d947SOsama Abboud 3722f9e1d947SOsama Abboud #ifdef RSS 3723f9e1d947SOsama Abboud adapter->rss_enabled = 1; 3724f9e1d947SOsama Abboud #endif 3725f9e1d947SOsama Abboud 37269b8d05b8SZbigniew Bodek if (version_printed++ == 0) 37273fc5d816SMarcin Wojtas ena_log(pdev, INFO, "%s\n", ena_version); 37289b8d05b8SZbigniew Bodek 37299b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 3730cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3731cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 37329b8d05b8SZbigniew Bodek 37339b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 37349b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 37354fa9e02dSMarcin Wojtas 37364fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 37374fa9e02dSMarcin Wojtas adapter->memory = NULL; 373882e558eaSDawid Gorecki adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 373982e558eaSDawid Gorecki RF_ACTIVE); 37404fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 37413fc5d816SMarcin Wojtas ena_log(pdev, ERR, 37424fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 37434fa9e02dSMarcin Wojtas rc = ENOMEM; 37444fa9e02dSMarcin Wojtas goto err_dev_free; 37454fa9e02dSMarcin Wojtas } 37464fa9e02dSMarcin Wojtas 37471c808fcdSMichal Krawczyk /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ 37481c808fcdSMichal Krawczyk msix_rid = pci_msix_table_bar(pdev); 37491c808fcdSMichal Krawczyk if (msix_rid != rid) { 37501c808fcdSMichal Krawczyk adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 37511c808fcdSMichal Krawczyk &msix_rid, RF_ACTIVE); 37521c808fcdSMichal Krawczyk if (unlikely(adapter->msix == NULL)) { 37533fc5d816SMarcin Wojtas ena_log(pdev, ERR, 37541c808fcdSMichal Krawczyk "unable to allocate bus resource: msix!\n"); 37551c808fcdSMichal Krawczyk rc = ENOMEM; 37561c808fcdSMichal Krawczyk goto err_pci_free; 37571c808fcdSMichal Krawczyk } 37581c808fcdSMichal Krawczyk adapter->msix_rid = msix_rid; 37591c808fcdSMichal Krawczyk } 37601c808fcdSMichal Krawczyk 37619b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 37629b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 37639b8d05b8SZbigniew Bodek 37649b8d05b8SZbigniew Bodek /* Store register resources */ 376582e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag( 376682e558eaSDawid Gorecki adapter->registers); 376782e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle( 376882e558eaSDawid Gorecki adapter->registers); 37699b8d05b8SZbigniew Bodek 37703f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) { 37713fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to pmap registers bar\n"); 37729b8d05b8SZbigniew Bodek rc = ENXIO; 3773cd5d5804SMarcin Wojtas goto err_bus_free; 37749b8d05b8SZbigniew Bodek } 37759b8d05b8SZbigniew Bodek 37763324e304SMichal Krawczyk rc = ena_map_llq_mem_bar(pdev, ena_dev); 37773324e304SMichal Krawczyk if (unlikely(rc != 0)) { 37783324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to map ENA mem bar"); 37793324e304SMichal Krawczyk goto err_bus_free; 37803324e304SMichal Krawczyk } 37819b8d05b8SZbigniew Bodek 3782637ff00fSosamaabb ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; 3783637ff00fSosamaabb 3784fd43fd2aSMarcin Wojtas /* Initially clear all the flags */ 3785fd43fd2aSMarcin Wojtas ENA_FLAG_ZERO(adapter); 3786fd43fd2aSMarcin Wojtas 37879b8d05b8SZbigniew Bodek /* Device initialization */ 37889b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 37893f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 37903fc5d816SMarcin Wojtas ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc); 37919b8d05b8SZbigniew Bodek rc = ENXIO; 37929b8d05b8SZbigniew Bodek goto err_bus_free; 37939b8d05b8SZbigniew Bodek } 37949b8d05b8SZbigniew Bodek 37950b432b70SMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 379682e558eaSDawid Gorecki adapter->disable_meta_caching = !!( 379782e558eaSDawid Gorecki get_feat_ctx.llq.accel_mode.u.get.supported_flags & 37980b432b70SMarcin Wojtas BIT(ENA_ADMIN_DISABLE_META_CACHING)); 37990b432b70SMarcin Wojtas 38009b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 38019b8d05b8SZbigniew Bodek 38029b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 38039b8d05b8SZbigniew Bodek 38049b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 38059b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 38069b8d05b8SZbigniew Bodek 38077d8c4feeSMarcin Wojtas calc_queue_ctx.pdev = pdev; 38086064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 38096064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 38106064f289SMarcin Wojtas 38117d8c4feeSMarcin Wojtas /* Calculate initial and maximum IO queue number and size */ 38127d8c4feeSMarcin Wojtas max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, 38137d8c4feeSMarcin Wojtas &get_feat_ctx); 38147d8c4feeSMarcin Wojtas rc = ena_calc_io_queue_size(&calc_queue_ctx); 38157d8c4feeSMarcin Wojtas if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { 38166064f289SMarcin Wojtas rc = EFAULT; 38179b8d05b8SZbigniew Bodek goto err_com_free; 38189b8d05b8SZbigniew Bodek } 38199b8d05b8SZbigniew Bodek 38209762a033SMarcin Wojtas adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; 38219762a033SMarcin Wojtas adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; 38227d8c4feeSMarcin Wojtas adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 38237d8c4feeSMarcin Wojtas adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 38246064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 38256064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 38266064f289SMarcin Wojtas 38277d8c4feeSMarcin Wojtas adapter->max_num_io_queues = max_num_io_queues; 38287d8c4feeSMarcin Wojtas 38296064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 38309b8d05b8SZbigniew Bodek 38317d8c4feeSMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 38327d8c4feeSMarcin Wojtas 38337d8c4feeSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 38347d8c4feeSMarcin Wojtas 38359b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 38369b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 38374e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 38383fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create TX DMA tag\n"); 3839cd5d5804SMarcin Wojtas goto err_com_free; 38404e8acd84SMarcin Wojtas } 38419b8d05b8SZbigniew Bodek 38429b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 38434e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 38443fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create RX DMA tag\n"); 3845cd5d5804SMarcin Wojtas goto err_tx_tag_free; 38464e8acd84SMarcin Wojtas } 38479b8d05b8SZbigniew Bodek 3848e2735b09SMarcin Wojtas /* 3849e2735b09SMarcin Wojtas * The amount of requested MSIX vectors is equal to 3850e2735b09SMarcin Wojtas * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant 3851e2735b09SMarcin Wojtas * number of admin queue interrupts. The former is initially determined 3852e2735b09SMarcin Wojtas * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be 3853e2735b09SMarcin Wojtas * achieved if there are not enough system resources. By default, the 3854e2735b09SMarcin Wojtas * number of effectively used IO queues is the same but later on it can 3855e2735b09SMarcin Wojtas * be limited by the user using sysctl interface. 3856e2735b09SMarcin Wojtas */ 3857aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 38583f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 38593fc5d816SMarcin Wojtas ena_log(pdev, ERR, 38609b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 3861c115a1e2SMarcin Wojtas goto err_io_free; 3862c115a1e2SMarcin Wojtas } 3863e2735b09SMarcin Wojtas /* By default all of allocated MSIX vectors are actively used */ 3864e2735b09SMarcin Wojtas adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3865e2735b09SMarcin Wojtas 3866e2735b09SMarcin Wojtas /* initialize rings basic information */ 3867e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3868c115a1e2SMarcin Wojtas 3869f97993adSOsama Abboud rc = ena_com_allocate_customer_metrics_buffer(ena_dev); 3870f97993adSOsama Abboud if (rc) { 3871f97993adSOsama Abboud ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n"); 3872f97993adSOsama Abboud goto err_msix_free; 3873f97993adSOsama Abboud } 3874f97993adSOsama Abboud 3875f97993adSOsama Abboud rc = ena_sysctl_allocate_customer_metrics_buffer(adapter); 3876f97993adSOsama Abboud if (unlikely(rc)){ 3877f97993adSOsama Abboud ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n"); 3878f97993adSOsama Abboud goto err_metrics_buffer_destroy; 3879f97993adSOsama Abboud } 3880f97993adSOsama Abboud 3881b9e80b52SOsama Abboud /* Initialize statistics */ 3882b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3883b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3884b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3885b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3886b9e80b52SOsama Abboud ena_sysctl_add_nodes(adapter); 3887b9e80b52SOsama Abboud 3888c115a1e2SMarcin Wojtas /* setup network interface */ 3889aa386085SZhenlei Huang ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 38909b8d05b8SZbigniew Bodek 3891081169f2SZbigniew Bodek /* Initialize reset task queue */ 3892081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3893081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3894081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 389582e558eaSDawid Gorecki taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", 389682e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3897081169f2SZbigniew Bodek 3898b899a02aSDawid Gorecki /* Initialize metrics task queue */ 3899b899a02aSDawid Gorecki TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); 3900b899a02aSDawid Gorecki adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", 3901b899a02aSDawid Gorecki M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); 390282e558eaSDawid Gorecki taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq", 390382e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3904b899a02aSDawid Gorecki 3905d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3906d17b7d87SMarcin Wojtas rc = ena_netmap_attach(adapter); 3907d17b7d87SMarcin Wojtas if (rc != 0) { 39083fc5d816SMarcin Wojtas ena_log(pdev, ERR, "netmap attach failed: %d\n", rc); 3909d17b7d87SMarcin Wojtas goto err_detach; 3910d17b7d87SMarcin Wojtas } 3911d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3912d17b7d87SMarcin Wojtas 39139b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 39149b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 3915fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 39169b8d05b8SZbigniew Bodek 391778554d0cSDawid Gorecki /* Run the timer service */ 391878554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 391978554d0cSDawid Gorecki 39209b8d05b8SZbigniew Bodek return (0); 39219b8d05b8SZbigniew Bodek 3922d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3923d17b7d87SMarcin Wojtas err_detach: 3924d17b7d87SMarcin Wojtas ether_ifdetach(adapter->ifp); 3925f97993adSOsama Abboud free(adapter->customer_metrics_array, M_DEVBUF); 39265517ca84SOsama Abboud #endif /* DEV_NETMAP */ 3927f97993adSOsama Abboud err_metrics_buffer_destroy: 3928f97993adSOsama Abboud ena_com_delete_customer_metrics_buffer(ena_dev); 3929c115a1e2SMarcin Wojtas err_msix_free: 3930b9e80b52SOsama Abboud ena_free_stats(adapter); 3931c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 3932c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 3933c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 3934cd5d5804SMarcin Wojtas err_io_free: 39359b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 39369b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 3937cd5d5804SMarcin Wojtas err_tx_tag_free: 39389b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 3939cd5d5804SMarcin Wojtas err_com_free: 39409b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 39419b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 3942cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 39439b8d05b8SZbigniew Bodek err_bus_free: 39449b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 39451c808fcdSMichal Krawczyk err_pci_free: 39469b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 39474fa9e02dSMarcin Wojtas err_dev_free: 39484fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 3949cd5d5804SMarcin Wojtas 39509b8d05b8SZbigniew Bodek return (rc); 39519b8d05b8SZbigniew Bodek } 39529b8d05b8SZbigniew Bodek 39539b8d05b8SZbigniew Bodek /** 39549b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 39559b8d05b8SZbigniew Bodek * @pdev: device information struct 39569b8d05b8SZbigniew Bodek * 39579b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 39589b8d05b8SZbigniew Bodek * that it should release a PCI device. 39599b8d05b8SZbigniew Bodek **/ 39609b8d05b8SZbigniew Bodek static int 39619b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 39629b8d05b8SZbigniew Bodek { 39639b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 39649b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 39659b8d05b8SZbigniew Bodek int rc; 39669b8d05b8SZbigniew Bodek 39679b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 39687583c633SJustin Hibbits if (if_vlantrunkinuse(adapter->ifp)) { 39693fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n"); 39709b8d05b8SZbigniew Bodek return (EBUSY); 39719b8d05b8SZbigniew Bodek } 39729b8d05b8SZbigniew Bodek 39739151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 39749151c55dSMarcin Wojtas 39756959869eSMarcin Wojtas /* Stop timer service */ 397607aff471SArtur Rojek ENA_LOCK_LOCK(); 397778554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 397807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 39796959869eSMarcin Wojtas 3980b899a02aSDawid Gorecki /* Release metrics task */ 3981b899a02aSDawid Gorecki while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL)) 3982b899a02aSDawid Gorecki taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task); 3983b899a02aSDawid Gorecki taskqueue_free(adapter->metrics_tq); 3984b899a02aSDawid Gorecki 39856959869eSMarcin Wojtas /* Release reset task */ 39869b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 39879b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 39889b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 39899b8d05b8SZbigniew Bodek 399007aff471SArtur Rojek ENA_LOCK_LOCK(); 39919b8d05b8SZbigniew Bodek ena_down(adapter); 399232f63fa7SMarcin Wojtas ena_destroy_device(adapter, true); 399307aff471SArtur Rojek ENA_LOCK_UNLOCK(); 39949b8d05b8SZbigniew Bodek 39950e7d31f6SMarcin Wojtas /* Restore unregistered sysctl queue nodes. */ 39960e7d31f6SMarcin Wojtas ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues, 39970e7d31f6SMarcin Wojtas adapter->max_num_io_queues); 39980e7d31f6SMarcin Wojtas 3999d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 4000d17b7d87SMarcin Wojtas netmap_detach(adapter->ifp); 4001d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 4002d17b7d87SMarcin Wojtas 4003b9e80b52SOsama Abboud ena_free_stats(adapter); 40049b8d05b8SZbigniew Bodek 40059b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 40063f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 40073fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 40089b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 40099b8d05b8SZbigniew Bodek 40109b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 40113f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 40123fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 40139b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 40149b8d05b8SZbigniew Bodek 40159b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 40169b8d05b8SZbigniew Bodek 40179b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 40189b8d05b8SZbigniew Bodek 40196d1ef2abSArtur Rojek if (adapter->rss_indir != NULL) 40206d1ef2abSArtur Rojek free(adapter->rss_indir, M_DEVBUF); 40216d1ef2abSArtur Rojek 402232f63fa7SMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 402332f63fa7SMarcin Wojtas ena_com_rss_destroy(ena_dev); 402432f63fa7SMarcin Wojtas 402532f63fa7SMarcin Wojtas ena_com_delete_host_info(ena_dev); 402632f63fa7SMarcin Wojtas 4027f97993adSOsama Abboud free(adapter->customer_metrics_array, M_DEVBUF); 4028f97993adSOsama Abboud 4029f97993adSOsama Abboud ena_com_delete_customer_metrics_buffer(ena_dev); 4030f97993adSOsama Abboud 40319151c55dSMarcin Wojtas if_free(adapter->ifp); 40329151c55dSMarcin Wojtas 40339b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 40349b8d05b8SZbigniew Bodek 40359b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 40369b8d05b8SZbigniew Bodek 40379b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 40389b8d05b8SZbigniew Bodek } 40399b8d05b8SZbigniew Bodek 40409b8d05b8SZbigniew Bodek /****************************************************************************** 40419b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 40429b8d05b8SZbigniew Bodek *****************************************************************************/ 40439b8d05b8SZbigniew Bodek /** 40449b8d05b8SZbigniew Bodek * ena_update_on_link_change: 40459b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 40469b8d05b8SZbigniew Bodek **/ 40479b8d05b8SZbigniew Bodek static void 40489b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 40499b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 40509b8d05b8SZbigniew Bodek { 40519b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 40529b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 40539b8d05b8SZbigniew Bodek int status; 40549b8d05b8SZbigniew Bodek if_t ifp; 40559b8d05b8SZbigniew Bodek 40569b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 40579b8d05b8SZbigniew Bodek ifp = adapter->ifp; 40589b8d05b8SZbigniew Bodek status = aenq_desc->flags & 40599b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 40609b8d05b8SZbigniew Bodek 40619b8d05b8SZbigniew Bodek if (status != 0) { 40623fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is UP\n"); 4063fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 406432f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 406532f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 406632f63fa7SMarcin Wojtas } else { 40673fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is DOWN\n"); 40689b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 4069fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 40709b8d05b8SZbigniew Bodek } 40719b8d05b8SZbigniew Bodek } 40729b8d05b8SZbigniew Bodek 407382e558eaSDawid Gorecki static void 407482e558eaSDawid Gorecki ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 407540621d71SMarcin Wojtas { 407640621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 407740621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 407840621d71SMarcin Wojtas 407982e558eaSDawid Gorecki ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 408082e558eaSDawid Gorecki adapter->ena_dev, "Invalid group(%x) expected %x\n", 408182e558eaSDawid Gorecki aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); 408240621d71SMarcin Wojtas 40839eb1615fSMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrome) { 408440621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 408540621d71SMarcin Wojtas hints = 408640621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 408740621d71SMarcin Wojtas ena_update_hints(adapter, hints); 408840621d71SMarcin Wojtas break; 408940621d71SMarcin Wojtas default: 40903fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 409140621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 40929eb1615fSMarcin Wojtas aenq_e->aenq_common_desc.syndrome); 409340621d71SMarcin Wojtas } 409440621d71SMarcin Wojtas } 409540621d71SMarcin Wojtas 409607aff471SArtur Rojek static void 409707aff471SArtur Rojek ena_lock_init(void *arg) 409807aff471SArtur Rojek { 409907aff471SArtur Rojek ENA_LOCK_INIT(); 410007aff471SArtur Rojek } 410107aff471SArtur Rojek SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL); 410207aff471SArtur Rojek 410307aff471SArtur Rojek static void 410407aff471SArtur Rojek ena_lock_uninit(void *arg) 410507aff471SArtur Rojek { 410607aff471SArtur Rojek ENA_LOCK_DESTROY(); 410707aff471SArtur Rojek } 410807aff471SArtur Rojek SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL); 410907aff471SArtur Rojek 41109b8d05b8SZbigniew Bodek /** 41119b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 41129b8d05b8SZbigniew Bodek **/ 41139b8d05b8SZbigniew Bodek static void 4114e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 41159b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 41169b8d05b8SZbigniew Bodek { 4117e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 4118e6de9a83SMarcin Wojtas 41193fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 4120e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 41219b8d05b8SZbigniew Bodek } 41229b8d05b8SZbigniew Bodek 41238cd86b51SOsama Abboud static void ena_conf_notification(void *adapter_data, 41248cd86b51SOsama Abboud struct ena_admin_aenq_entry *aenq_e) 41258cd86b51SOsama Abboud { 41268cd86b51SOsama Abboud struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 41278cd86b51SOsama Abboud struct ena_admin_aenq_conf_notifications_desc *desc; 41288cd86b51SOsama Abboud u64 bitmap, bit; 41298cd86b51SOsama Abboud 41308cd86b51SOsama Abboud desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e; 41318cd86b51SOsama Abboud bitmap = desc->notifications_bitmap; 41328cd86b51SOsama Abboud 41338cd86b51SOsama Abboud if (bitmap == 0) { 41348cd86b51SOsama Abboud ena_log(adapter->pdev, INFO, 41358cd86b51SOsama Abboud "Empty configuration notification bitmap\n"); 41368cd86b51SOsama Abboud return; 41378cd86b51SOsama Abboud } 41388cd86b51SOsama Abboud 41398cd86b51SOsama Abboud for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) { 41408cd86b51SOsama Abboud bit--; 41418cd86b51SOsama Abboud ena_log(adapter->pdev, INFO, 41428cd86b51SOsama Abboud "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n", 41438cd86b51SOsama Abboud bit + 1); 41448cd86b51SOsama Abboud // Clear the processed bit 41458cd86b51SOsama Abboud bitmap &= ~(1UL << bit); 41468cd86b51SOsama Abboud } 41478cd86b51SOsama Abboud } 41488cd86b51SOsama Abboud 41499b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 41509b8d05b8SZbigniew Bodek .handlers = { 41519b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 415240621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 41539b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 41548cd86b51SOsama Abboud [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification, 41559b8d05b8SZbigniew Bodek }, 41569b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 41579b8d05b8SZbigniew Bodek }; 41589b8d05b8SZbigniew Bodek 41599b8d05b8SZbigniew Bodek /********************************************************************* 41609b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 41619b8d05b8SZbigniew Bodek *********************************************************************/ 41629b8d05b8SZbigniew Bodek 416382e558eaSDawid Gorecki static device_method_t ena_methods[] = { /* Device interface */ 41649b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 41659b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 416682e558eaSDawid Gorecki DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END 41679b8d05b8SZbigniew Bodek }; 41689b8d05b8SZbigniew Bodek 41699b8d05b8SZbigniew Bodek static driver_t ena_driver = { 417082e558eaSDawid Gorecki "ena", 417182e558eaSDawid Gorecki ena_methods, 417282e558eaSDawid Gorecki sizeof(struct ena_adapter), 41739b8d05b8SZbigniew Bodek }; 41749b8d05b8SZbigniew Bodek 41751dc1476cSJohn Baldwin DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 417640abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 4177329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 41789b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 41799b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 4180d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 4181d17b7d87SMarcin Wojtas MODULE_DEPEND(ena, netmap, 1, 1, 1); 4182d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 41839b8d05b8SZbigniew Bodek 41849b8d05b8SZbigniew Bodek /*********************************************************************/ 4185