19b8d05b8SZbigniew Bodek /*- 20835cc78SMarcin Wojtas * SPDX-License-Identifier: BSD-2-Clause 39b8d05b8SZbigniew Bodek * 4beaadec9SMarcin Wojtas * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 319b8d05b8SZbigniew Bodek __FBSDID("$FreeBSD$"); 329b8d05b8SZbigniew Bodek 33b40dd828SAndriy Gapon #include "opt_rss.h" 34b40dd828SAndriy Gapon 359b8d05b8SZbigniew Bodek #include <sys/param.h> 369b8d05b8SZbigniew Bodek #include <sys/systm.h> 379b8d05b8SZbigniew Bodek #include <sys/bus.h> 389b8d05b8SZbigniew Bodek #include <sys/endian.h> 3982e558eaSDawid Gorecki #include <sys/eventhandler.h> 409b8d05b8SZbigniew Bodek #include <sys/kernel.h> 419b8d05b8SZbigniew Bodek #include <sys/kthread.h> 429b8d05b8SZbigniew Bodek #include <sys/malloc.h> 439b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 449b8d05b8SZbigniew Bodek #include <sys/module.h> 459b8d05b8SZbigniew Bodek #include <sys/rman.h> 469b8d05b8SZbigniew Bodek #include <sys/smp.h> 479b8d05b8SZbigniew Bodek #include <sys/socket.h> 489b8d05b8SZbigniew Bodek #include <sys/sockio.h> 499b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 509b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 519b8d05b8SZbigniew Bodek #include <sys/time.h> 5282e558eaSDawid Gorecki 5382e558eaSDawid Gorecki #include <vm/vm.h> 5482e558eaSDawid Gorecki #include <vm/pmap.h> 559b8d05b8SZbigniew Bodek 560ac122c3SDawid Gorecki #include <machine/atomic.h> 579b8d05b8SZbigniew Bodek #include <machine/bus.h> 589b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 5982e558eaSDawid Gorecki #include <machine/resource.h> 6082e558eaSDawid Gorecki 6182e558eaSDawid Gorecki #include <dev/pci/pcireg.h> 6282e558eaSDawid Gorecki #include <dev/pci/pcivar.h> 639b8d05b8SZbigniew Bodek 649b8d05b8SZbigniew Bodek #include <net/bpf.h> 659b8d05b8SZbigniew Bodek #include <net/ethernet.h> 669b8d05b8SZbigniew Bodek #include <net/if.h> 679b8d05b8SZbigniew Bodek #include <net/if_arp.h> 689b8d05b8SZbigniew Bodek #include <net/if_dl.h> 699b8d05b8SZbigniew Bodek #include <net/if_media.h> 709b8d05b8SZbigniew Bodek #include <net/if_types.h> 7182e558eaSDawid Gorecki #include <net/if_var.h> 729b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 739b8d05b8SZbigniew Bodek #include <netinet/in.h> 7482e558eaSDawid Gorecki #include <netinet/in_systm.h> 759b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 769b8d05b8SZbigniew Bodek #include <netinet/ip.h> 779b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 789b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 799b8d05b8SZbigniew Bodek #include <netinet/udp.h> 809b8d05b8SZbigniew Bodek 819b8d05b8SZbigniew Bodek #include "ena.h" 8282e558eaSDawid Gorecki #include "ena_datapath.h" 83986e7b92SArtur Rojek #include "ena_rss.h" 8482e558eaSDawid Gorecki #include "ena_sysctl.h" 859b8d05b8SZbigniew Bodek 86d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 87d17b7d87SMarcin Wojtas #include "ena_netmap.h" 88d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 89d17b7d87SMarcin Wojtas 909b8d05b8SZbigniew Bodek /********************************************************* 919b8d05b8SZbigniew Bodek * Function prototypes 929b8d05b8SZbigniew Bodek *********************************************************/ 939b8d05b8SZbigniew Bodek static int ena_probe(device_t); 949b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 959b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 969b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 979b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 989b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 999b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 10082e558eaSDawid Gorecki static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, 10182e558eaSDawid Gorecki uint16_t); 1027d8c4feeSMarcin Wojtas static void ena_init_io_rings_basic(struct ena_adapter *); 1037d8c4feeSMarcin Wojtas static void ena_init_io_rings_advanced(struct ena_adapter *); 104cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 1059b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 1069b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 1079b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 1089b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 1099b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1109b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1116f2128c7SMarcin Wojtas static void ena_release_all_tx_dmamap(struct ena_ring *); 1129b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1139b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1149b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1159b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 1169b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1179b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1189b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1199b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1209b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1219b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1229b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1239b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1249b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1259b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1269b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1279b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1289b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1299b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1309b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1319b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1329b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1335cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1349b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1359b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 13677958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter *); 1429b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1439b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1449b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1459b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1469b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1479b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1489b8d05b8SZbigniew Bodek static void ena_init(void *); 1499b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1509b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1519b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1529b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 1539b8d05b8SZbigniew Bodek static int ena_setup_ifnet(device_t, struct ena_adapter *, 1549b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1553fc5d816SMarcin Wojtas static int ena_enable_wc(device_t, struct resource *); 1564fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1574fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 15890232d18SDawid Gorecki static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 1597d8c4feeSMarcin Wojtas static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, 1609b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1617d8c4feeSMarcin Wojtas static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); 16246021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1639b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1649b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1659b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1669b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 167aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); 1689b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 16982e558eaSDawid Gorecki static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); 170f180142cSMarcin Wojtas static int ena_copy_eni_metrics(struct ena_adapter *); 1719b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 1729b8d05b8SZbigniew Bodek 1738f15f8a7SDawid Gorecki static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME 1748f15f8a7SDawid Gorecki " v" ENA_DRV_MODULE_VERSION; 1759b8d05b8SZbigniew Bodek 1769b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1779b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 }, 1787d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 }, 1799b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 }, 1807d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 }, 1819b8d05b8SZbigniew Bodek /* Last entry */ 1829b8d05b8SZbigniew Bodek { 0, 0, 0 } 1839b8d05b8SZbigniew Bodek }; 1849b8d05b8SZbigniew Bodek 18507aff471SArtur Rojek struct sx ena_global_lock; 18607aff471SArtur Rojek 1879b8d05b8SZbigniew Bodek /* 1889b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 1899b8d05b8SZbigniew Bodek */ 1909b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 1919b8d05b8SZbigniew Bodek 1929b8d05b8SZbigniew Bodek void 1939b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1949b8d05b8SZbigniew Bodek { 1950bdffe59SMarcin Wojtas if (error != 0) 1969b8d05b8SZbigniew Bodek return; 1979b8d05b8SZbigniew Bodek *(bus_addr_t *)arg = segs[0].ds_addr; 1989b8d05b8SZbigniew Bodek } 1999b8d05b8SZbigniew Bodek 2009b8d05b8SZbigniew Bodek int 20182e558eaSDawid Gorecki ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma, 20282e558eaSDawid Gorecki int mapflags, bus_size_t alignment, int domain) 2039b8d05b8SZbigniew Bodek { 2049b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(dmadev); 2053fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 2060bdffe59SMarcin Wojtas uint32_t maxsize; 2070bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2089b8d05b8SZbigniew Bodek int error; 2099b8d05b8SZbigniew Bodek 2100bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2110bdffe59SMarcin Wojtas 2120bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2133f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2149b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2150bdffe59SMarcin Wojtas 2169b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2174f8f476eSMarcin Wojtas alignment, 0, /* alignment, bounds */ 2188a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2198a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of exclusion window */ 2209b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2219b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2229b8d05b8SZbigniew Bodek 1, /* nsegments */ 2239b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2249b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2259b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2269b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2279b8d05b8SZbigniew Bodek &dma->tag); 2283f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2293fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 2309b8d05b8SZbigniew Bodek goto fail_tag; 2319b8d05b8SZbigniew Bodek } 2329b8d05b8SZbigniew Bodek 233eb4c4f4aSMarcin Wojtas error = bus_dma_tag_set_domain(dma->tag, domain); 234eb4c4f4aSMarcin Wojtas if (unlikely(error != 0)) { 235eb4c4f4aSMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 236eb4c4f4aSMarcin Wojtas error); 237eb4c4f4aSMarcin Wojtas goto fail_map_create; 238eb4c4f4aSMarcin Wojtas } 239eb4c4f4aSMarcin Wojtas 2409b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 2419b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2423f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2433fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 2444e8acd84SMarcin Wojtas (uintmax_t)size, error); 2459b8d05b8SZbigniew Bodek goto fail_map_create; 2469b8d05b8SZbigniew Bodek } 2479b8d05b8SZbigniew Bodek 2489b8d05b8SZbigniew Bodek dma->paddr = 0; 24982e558eaSDawid Gorecki error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 25082e558eaSDawid Gorecki ena_dmamap_callback, &dma->paddr, mapflags); 2513f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2523fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 2539b8d05b8SZbigniew Bodek goto fail_map_load; 2549b8d05b8SZbigniew Bodek } 2559b8d05b8SZbigniew Bodek 256e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 257e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 258e8073738SMarcin Wojtas 2599b8d05b8SZbigniew Bodek return (0); 2609b8d05b8SZbigniew Bodek 2619b8d05b8SZbigniew Bodek fail_map_load: 2629b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2637d2544e6SMarcin Wojtas fail_map_create: 2649b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2659b8d05b8SZbigniew Bodek fail_tag: 2669b8d05b8SZbigniew Bodek dma->tag = NULL; 2675b14f92eSMarcin Wojtas dma->vaddr = NULL; 2685b14f92eSMarcin Wojtas dma->paddr = 0; 2699b8d05b8SZbigniew Bodek 2709b8d05b8SZbigniew Bodek return (error); 2719b8d05b8SZbigniew Bodek } 2729b8d05b8SZbigniew Bodek 2739b8d05b8SZbigniew Bodek static void 2749b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2759b8d05b8SZbigniew Bodek { 2769b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2779b8d05b8SZbigniew Bodek 2789b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2799b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2809b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2819b8d05b8SZbigniew Bodek } 2829b8d05b8SZbigniew Bodek 2839b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2849b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2859b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2869b8d05b8SZbigniew Bodek } 2871c808fcdSMichal Krawczyk 2881c808fcdSMichal Krawczyk if (adapter->msix != NULL) { 28982e558eaSDawid Gorecki bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid, 29082e558eaSDawid Gorecki adapter->msix); 2911c808fcdSMichal Krawczyk } 2929b8d05b8SZbigniew Bodek } 2939b8d05b8SZbigniew Bodek 2949b8d05b8SZbigniew Bodek static int 2959b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2969b8d05b8SZbigniew Bodek { 2979b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 2989b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 2999b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3009b8d05b8SZbigniew Bodek 3019b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3029b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3039b8d05b8SZbigniew Bodek 3049b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3059b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3069b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3079b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 30882e558eaSDawid Gorecki ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id, 30982e558eaSDawid Gorecki pci_device_id); 3109b8d05b8SZbigniew Bodek 3118f15f8a7SDawid Gorecki device_set_desc(dev, ENA_DEVICE_DESC); 3129b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3139b8d05b8SZbigniew Bodek } 3149b8d05b8SZbigniew Bodek 3159b8d05b8SZbigniew Bodek ent++; 3169b8d05b8SZbigniew Bodek } 3179b8d05b8SZbigniew Bodek 3189b8d05b8SZbigniew Bodek return (ENXIO); 3199b8d05b8SZbigniew Bodek } 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek static int 3229b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3239b8d05b8SZbigniew Bodek { 3249b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3253fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 3263cfadb28SMarcin Wojtas int rc; 3279b8d05b8SZbigniew Bodek 3283cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 32982e558eaSDawid Gorecki ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n", 3303cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3313cfadb28SMarcin Wojtas return (EINVAL); 3329b8d05b8SZbigniew Bodek } 3339b8d05b8SZbigniew Bodek 3349b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3353cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3363fc5d816SMarcin Wojtas ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 3373cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3383cfadb28SMarcin Wojtas } else { 3393fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu); 3403cfadb28SMarcin Wojtas } 3419b8d05b8SZbigniew Bodek 3423cfadb28SMarcin Wojtas return (rc); 3439b8d05b8SZbigniew Bodek } 3449b8d05b8SZbigniew Bodek 3459b8d05b8SZbigniew Bodek static inline void 3469b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3479b8d05b8SZbigniew Bodek { 3489b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3499b8d05b8SZbigniew Bodek 3509b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3519b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3529b8d05b8SZbigniew Bodek } 3539b8d05b8SZbigniew Bodek 3549b8d05b8SZbigniew Bodek static inline void 3559b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3569b8d05b8SZbigniew Bodek { 3579b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3589b8d05b8SZbigniew Bodek 3599b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3609b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3619b8d05b8SZbigniew Bodek } 3629b8d05b8SZbigniew Bodek 3639b8d05b8SZbigniew Bodek static inline void 3649b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3659b8d05b8SZbigniew Bodek { 3669b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3679b8d05b8SZbigniew Bodek 3689b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3699b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3709b8d05b8SZbigniew Bodek } 3719b8d05b8SZbigniew Bodek 3729b8d05b8SZbigniew Bodek static void 3739b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3749b8d05b8SZbigniew Bodek uint16_t qid) 3759b8d05b8SZbigniew Bodek { 3769b8d05b8SZbigniew Bodek ring->qid = qid; 3779b8d05b8SZbigniew Bodek ring->adapter = adapter; 3789b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 379b72f1f45SMark Johnston atomic_store_8(&ring->first_interrupt, 0); 380d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3819b8d05b8SZbigniew Bodek } 3829b8d05b8SZbigniew Bodek 383cd5d5804SMarcin Wojtas static void 3847d8c4feeSMarcin Wojtas ena_init_io_rings_basic(struct ena_adapter *adapter) 3859b8d05b8SZbigniew Bodek { 3869b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3879b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3889b8d05b8SZbigniew Bodek struct ena_que *que; 3899b8d05b8SZbigniew Bodek int i; 3909b8d05b8SZbigniew Bodek 3919b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3929b8d05b8SZbigniew Bodek 3937d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3949b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 3959b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 3969b8d05b8SZbigniew Bodek 3979b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 3989b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 3999b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4009b8d05b8SZbigniew Bodek 4019b8d05b8SZbigniew Bodek /* TX specific ring state */ 4029b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4039b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4049b8d05b8SZbigniew Bodek 4059b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4069b8d05b8SZbigniew Bodek que->adapter = adapter; 4079b8d05b8SZbigniew Bodek que->id = i; 4089b8d05b8SZbigniew Bodek que->tx_ring = txr; 4099b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4109b8d05b8SZbigniew Bodek 4119b8d05b8SZbigniew Bodek txr->que = que; 4129b8d05b8SZbigniew Bodek rxr->que = que; 413efe6ab18SMarcin Wojtas 414efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4157d8c4feeSMarcin Wojtas rxr->rx_mbuf_sz = ena_mbuf_sz; 4169b8d05b8SZbigniew Bodek } 4179b8d05b8SZbigniew Bodek } 4189b8d05b8SZbigniew Bodek 4199b8d05b8SZbigniew Bodek static void 4207d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(struct ena_adapter *adapter) 4217d8c4feeSMarcin Wojtas { 4227d8c4feeSMarcin Wojtas struct ena_ring *txr, *rxr; 4237d8c4feeSMarcin Wojtas int i; 4247d8c4feeSMarcin Wojtas 4257d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 4267d8c4feeSMarcin Wojtas txr = &adapter->tx_ring[i]; 4277d8c4feeSMarcin Wojtas rxr = &adapter->rx_ring[i]; 4287d8c4feeSMarcin Wojtas 4297d8c4feeSMarcin Wojtas /* Allocate a buf ring */ 4307d8c4feeSMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 43182e558eaSDawid Gorecki txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, 43282e558eaSDawid Gorecki &txr->ring_mtx); 4337d8c4feeSMarcin Wojtas 4347d8c4feeSMarcin Wojtas /* Allocate Tx statistics. */ 4357d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4367d8c4feeSMarcin Wojtas sizeof(txr->tx_stats)); 437d8aba82bSDawid Gorecki txr->tx_last_cleanup_ticks = ticks; 4387d8c4feeSMarcin Wojtas 4397d8c4feeSMarcin Wojtas /* Allocate Rx statistics. */ 4407d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4417d8c4feeSMarcin Wojtas sizeof(rxr->rx_stats)); 4427d8c4feeSMarcin Wojtas 4437d8c4feeSMarcin Wojtas /* Initialize locks */ 4447d8c4feeSMarcin Wojtas snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4457d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4467d8c4feeSMarcin Wojtas snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4477d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4487d8c4feeSMarcin Wojtas 4497d8c4feeSMarcin Wojtas mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4507d8c4feeSMarcin Wojtas } 4517d8c4feeSMarcin Wojtas } 4527d8c4feeSMarcin Wojtas 4537d8c4feeSMarcin Wojtas static void 4547d8c4feeSMarcin Wojtas ena_init_io_rings(struct ena_adapter *adapter) 4557d8c4feeSMarcin Wojtas { 4567d8c4feeSMarcin Wojtas /* 4577d8c4feeSMarcin Wojtas * IO rings initialization can be divided into the 2 steps: 4587d8c4feeSMarcin Wojtas * 1. Initialize variables and fields with initial values and copy 4597d8c4feeSMarcin Wojtas * them from adapter/ena_dev (basic) 4607d8c4feeSMarcin Wojtas * 2. Allocate mutex, counters and buf_ring (advanced) 4617d8c4feeSMarcin Wojtas */ 4627d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 4637d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(adapter); 4647d8c4feeSMarcin Wojtas } 4657d8c4feeSMarcin Wojtas 4667d8c4feeSMarcin Wojtas static void 4679b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4689b8d05b8SZbigniew Bodek { 4699b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4709b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4719b8d05b8SZbigniew Bodek 4729b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4739b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4749b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4759b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4769b8d05b8SZbigniew Bodek 4777d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4787d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4797d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4807d2544e6SMarcin Wojtas 4819b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4829b8d05b8SZbigniew Bodek } 4839b8d05b8SZbigniew Bodek 4849b8d05b8SZbigniew Bodek static void 4859b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4869b8d05b8SZbigniew Bodek { 4879b8d05b8SZbigniew Bodek int i; 4889b8d05b8SZbigniew Bodek 4897d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 4909b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4919b8d05b8SZbigniew Bodek } 4929b8d05b8SZbigniew Bodek 4939b8d05b8SZbigniew Bodek static int 4949b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 4959b8d05b8SZbigniew Bodek { 4969b8d05b8SZbigniew Bodek int ret; 4979b8d05b8SZbigniew Bodek 4989b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 4999b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 5009b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5018a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5028a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5039b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5049b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 5058a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 5069b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 5079b8d05b8SZbigniew Bodek 0, /* flags */ 5089b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5099b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 5109b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5119b8d05b8SZbigniew Bodek 5129b8d05b8SZbigniew Bodek return (ret); 5139b8d05b8SZbigniew Bodek } 5149b8d05b8SZbigniew Bodek 5159b8d05b8SZbigniew Bodek static int 5169b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5179b8d05b8SZbigniew Bodek { 5189b8d05b8SZbigniew Bodek int ret; 5199b8d05b8SZbigniew Bodek 5209b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5219b8d05b8SZbigniew Bodek 5223f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5239b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5249b8d05b8SZbigniew Bodek 5259b8d05b8SZbigniew Bodek return (ret); 5269b8d05b8SZbigniew Bodek } 5279b8d05b8SZbigniew Bodek 5289b8d05b8SZbigniew Bodek static int 5299b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5309b8d05b8SZbigniew Bodek { 5319b8d05b8SZbigniew Bodek int ret; 5329b8d05b8SZbigniew Bodek 5339b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5349b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5359b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5368a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5378a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5389b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 53904cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsize */ 5404727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 54104cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsegsize */ 5429b8d05b8SZbigniew Bodek 0, /* flags */ 5439b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5449b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5459b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5469b8d05b8SZbigniew Bodek 5479b8d05b8SZbigniew Bodek return (ret); 5489b8d05b8SZbigniew Bodek } 5499b8d05b8SZbigniew Bodek 5509b8d05b8SZbigniew Bodek static int 5519b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5529b8d05b8SZbigniew Bodek { 5539b8d05b8SZbigniew Bodek int ret; 5549b8d05b8SZbigniew Bodek 5559b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5569b8d05b8SZbigniew Bodek 5573f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5589b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5599b8d05b8SZbigniew Bodek 5609b8d05b8SZbigniew Bodek return (ret); 5619b8d05b8SZbigniew Bodek } 5629b8d05b8SZbigniew Bodek 5636f2128c7SMarcin Wojtas static void 5646f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 5656f2128c7SMarcin Wojtas { 5666f2128c7SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 5676f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 56882e558eaSDawid Gorecki bus_dma_tag_t tx_tag = adapter->tx_buf_tag; 5696f2128c7SMarcin Wojtas int i; 5706f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 5716f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 5726f2128c7SMarcin Wojtas int j; 5736f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 5746f2128c7SMarcin Wojtas 5756f2128c7SMarcin Wojtas for (i = 0; i < tx_ring->ring_size; ++i) { 5766f2128c7SMarcin Wojtas tx_info = &tx_ring->tx_buffer_info[i]; 5776f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 5787583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 5796f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 5806f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { 5816f2128c7SMarcin Wojtas if (nm_info->map_seg[j] != NULL) { 5826f2128c7SMarcin Wojtas bus_dmamap_destroy(tx_tag, 5836f2128c7SMarcin Wojtas nm_info->map_seg[j]); 5846f2128c7SMarcin Wojtas nm_info->map_seg[j] = NULL; 5856f2128c7SMarcin Wojtas } 5866f2128c7SMarcin Wojtas } 5876f2128c7SMarcin Wojtas } 5886f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 589888810f0SMarcin Wojtas if (tx_info->dmamap != NULL) { 590888810f0SMarcin Wojtas bus_dmamap_destroy(tx_tag, tx_info->dmamap); 591888810f0SMarcin Wojtas tx_info->dmamap = NULL; 5926f2128c7SMarcin Wojtas } 5936f2128c7SMarcin Wojtas } 5946f2128c7SMarcin Wojtas } 5956f2128c7SMarcin Wojtas 5969b8d05b8SZbigniew Bodek /** 5979b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 5989b8d05b8SZbigniew Bodek * @adapter: network interface device structure 5999b8d05b8SZbigniew Bodek * @qid: queue index 6009b8d05b8SZbigniew Bodek * 6019b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 6029b8d05b8SZbigniew Bodek **/ 6039b8d05b8SZbigniew Bodek static int 6049b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 6059b8d05b8SZbigniew Bodek { 6063fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 6076d1ef2abSArtur Rojek char thread_name[MAXCOMLEN + 1]; 6089b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 6099b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 6106d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 6119b8d05b8SZbigniew Bodek int size, i, err; 6126f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6136f2128c7SMarcin Wojtas bus_dmamap_t *map; 6146f2128c7SMarcin Wojtas int j; 6156f2128c7SMarcin Wojtas 6166f2128c7SMarcin Wojtas ena_netmap_reset_tx_ring(adapter, qid); 6176f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6189b8d05b8SZbigniew Bodek 6199b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 6209b8d05b8SZbigniew Bodek 6219b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6223f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 6237d2544e6SMarcin Wojtas return (ENOMEM); 6249b8d05b8SZbigniew Bodek 6259b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 6269b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6273f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 6287d2544e6SMarcin Wojtas goto err_buf_info_free; 6299b8d05b8SZbigniew Bodek 6304fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 6314fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 6324fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 6334fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 6344fa9e02dSMarcin Wojtas goto err_tx_ids_free; 6354fa9e02dSMarcin Wojtas 6369b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 6379b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 6389b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 6399b8d05b8SZbigniew Bodek 6409b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 6419b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 6429b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 6439b8d05b8SZbigniew Bodek 6449b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 6459b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 646af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 6479b8d05b8SZbigniew Bodek 6489b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 649b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6509b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 651b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 6529b8d05b8SZbigniew Bodek 6539b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6549b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6559b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 656888810f0SMarcin Wojtas &tx_ring->tx_buffer_info[i].dmamap); 6573f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6583fc5d816SMarcin Wojtas ena_log(pdev, ERR, 65982e558eaSDawid Gorecki "Unable to create Tx DMA map for buffer %d\n", i); 6606f2128c7SMarcin Wojtas goto err_map_release; 6619b8d05b8SZbigniew Bodek } 6626f2128c7SMarcin Wojtas 6636f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6647583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 6656f2128c7SMarcin Wojtas map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 6666f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 6676f2128c7SMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6686f2128c7SMarcin Wojtas &map[j]); 6696f2128c7SMarcin Wojtas if (unlikely(err != 0)) { 6703fc5d816SMarcin Wojtas ena_log(pdev, ERR, 67182e558eaSDawid Gorecki "Unable to create Tx DMA for buffer %d %d\n", 67282e558eaSDawid Gorecki i, j); 6736f2128c7SMarcin Wojtas goto err_map_release; 6746f2128c7SMarcin Wojtas } 6756f2128c7SMarcin Wojtas } 6766f2128c7SMarcin Wojtas } 6776f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6789b8d05b8SZbigniew Bodek } 6799b8d05b8SZbigniew Bodek 6809b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 6819b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 6829b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 6839b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 6843f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 6853fc5d816SMarcin Wojtas ena_log(pdev, ERR, 6869b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 6879b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 6886f2128c7SMarcin Wojtas goto err_map_release; 6899b8d05b8SZbigniew Bodek } 6909b8d05b8SZbigniew Bodek 6915cb9db07SMarcin Wojtas tx_ring->running = true; 6925cb9db07SMarcin Wojtas 6936d1ef2abSArtur Rojek #ifdef RSS 6946d1ef2abSArtur Rojek cpu_mask = &que->cpu_mask; 6956d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 6966d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->cpu); 6976d1ef2abSArtur Rojek #else 6986d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 6996d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->id); 7006d1ef2abSArtur Rojek #endif 7016d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, 7026d1ef2abSArtur Rojek cpu_mask, "%s", thread_name); 7039b8d05b8SZbigniew Bodek 7049b8d05b8SZbigniew Bodek return (0); 7059b8d05b8SZbigniew Bodek 7066f2128c7SMarcin Wojtas err_map_release: 7076f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(tx_ring); 7084fa9e02dSMarcin Wojtas err_tx_ids_free: 709cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7107d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 7117d2544e6SMarcin Wojtas err_buf_info_free: 712cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7137d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 7147d2544e6SMarcin Wojtas 7159b8d05b8SZbigniew Bodek return (ENOMEM); 7169b8d05b8SZbigniew Bodek } 7179b8d05b8SZbigniew Bodek 7189b8d05b8SZbigniew Bodek /** 7199b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 7209b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7219b8d05b8SZbigniew Bodek * @qid: queue index 7229b8d05b8SZbigniew Bodek * 7239b8d05b8SZbigniew Bodek * Free all transmit software resources 7249b8d05b8SZbigniew Bodek **/ 7259b8d05b8SZbigniew Bodek static void 7269b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 7279b8d05b8SZbigniew Bodek { 7289b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 7296f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7306f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 7316f2128c7SMarcin Wojtas int j; 7326f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7339b8d05b8SZbigniew Bodek 73482e558eaSDawid Gorecki while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) 7359b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 7369b8d05b8SZbigniew Bodek 7379b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 7389b8d05b8SZbigniew Bodek 739b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 7409b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 7419b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 7429b8d05b8SZbigniew Bodek 7439b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 7449b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 745e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 746888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); 7479b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 748888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7499b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 750888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7514fa9e02dSMarcin Wojtas 7526f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7537583c633SJustin Hibbits if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) { 7546f2128c7SMarcin Wojtas nm_info = &tx_ring->tx_buffer_info[i].nm_info; 7556f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 7566f2128c7SMarcin Wojtas if (nm_info->socket_buf_idx[j] != 0) { 7576f2128c7SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 7586f2128c7SMarcin Wojtas nm_info->map_seg[j], 7596f2128c7SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7606f2128c7SMarcin Wojtas ena_netmap_unload(adapter, 7616f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7626f2128c7SMarcin Wojtas } 7636f2128c7SMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7646f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7656f2128c7SMarcin Wojtas nm_info->socket_buf_idx[j] = 0; 7666f2128c7SMarcin Wojtas } 7676f2128c7SMarcin Wojtas } 7686f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7696f2128c7SMarcin Wojtas 770e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 771e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 7729b8d05b8SZbigniew Bodek } 773416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 7749b8d05b8SZbigniew Bodek 7759b8d05b8SZbigniew Bodek /* And free allocated memory. */ 776cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7779b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 7789b8d05b8SZbigniew Bodek 779cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7809b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 7814fa9e02dSMarcin Wojtas 7828483b844SMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 7834fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 7849b8d05b8SZbigniew Bodek } 7859b8d05b8SZbigniew Bodek 7869b8d05b8SZbigniew Bodek /** 7879b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 7889b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7899b8d05b8SZbigniew Bodek * 7909b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7919b8d05b8SZbigniew Bodek **/ 7929b8d05b8SZbigniew Bodek static int 7939b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 7949b8d05b8SZbigniew Bodek { 7959b8d05b8SZbigniew Bodek int i, rc; 7969b8d05b8SZbigniew Bodek 7977d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 7989b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 7990bdffe59SMarcin Wojtas if (rc != 0) { 8003fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 8019b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 8029b8d05b8SZbigniew Bodek goto err_setup_tx; 8039b8d05b8SZbigniew Bodek } 8047d2544e6SMarcin Wojtas } 8059b8d05b8SZbigniew Bodek 8069b8d05b8SZbigniew Bodek return (0); 8079b8d05b8SZbigniew Bodek 8089b8d05b8SZbigniew Bodek err_setup_tx: 8099b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 8109b8d05b8SZbigniew Bodek while (i--) 8119b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8129b8d05b8SZbigniew Bodek return (rc); 8139b8d05b8SZbigniew Bodek } 8149b8d05b8SZbigniew Bodek 8159b8d05b8SZbigniew Bodek /** 8169b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 8179b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8189b8d05b8SZbigniew Bodek * 8199b8d05b8SZbigniew Bodek * Free all transmit software resources 8209b8d05b8SZbigniew Bodek **/ 8219b8d05b8SZbigniew Bodek static void 8229b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 8239b8d05b8SZbigniew Bodek { 8249b8d05b8SZbigniew Bodek int i; 8259b8d05b8SZbigniew Bodek 8267d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 8279b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8289b8d05b8SZbigniew Bodek } 8299b8d05b8SZbigniew Bodek 8309b8d05b8SZbigniew Bodek /** 8319b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 8329b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8339b8d05b8SZbigniew Bodek * @qid: queue index 8349b8d05b8SZbigniew Bodek * 8359b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8369b8d05b8SZbigniew Bodek **/ 8379b8d05b8SZbigniew Bodek static int 8389b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8399b8d05b8SZbigniew Bodek { 8403fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 8419b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 8429b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 8439b8d05b8SZbigniew Bodek int size, err, i; 8449b8d05b8SZbigniew Bodek 8459b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8469b8d05b8SZbigniew Bodek 8479a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 8489a0f2079SMarcin Wojtas ena_netmap_reset_rx_ring(adapter, qid); 8499a0f2079SMarcin Wojtas rx_ring->initialized = false; 8509a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 8519a0f2079SMarcin Wojtas 8529b8d05b8SZbigniew Bodek /* 8539b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8549b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8559b8d05b8SZbigniew Bodek */ 8569b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8579b8d05b8SZbigniew Bodek 858cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8599b8d05b8SZbigniew Bodek 86043fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 86143fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 86243fefd16SMarcin Wojtas 86343fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 86443fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 86543fefd16SMarcin Wojtas 8669b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8679b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8689b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8699b8d05b8SZbigniew Bodek 8709b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 8719b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 8729b8d05b8SZbigniew Bodek 8739b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 8749b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 8759b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 8769b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 8779b8d05b8SZbigniew Bodek if (err != 0) { 8783fc5d816SMarcin Wojtas ena_log(pdev, ERR, 8799b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 8807d2544e6SMarcin Wojtas goto err_buf_info_unmap; 8819b8d05b8SZbigniew Bodek } 8829b8d05b8SZbigniew Bodek } 8839b8d05b8SZbigniew Bodek 8849b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 8857583c633SJustin Hibbits if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) { 8869b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 8870bdffe59SMarcin Wojtas if (err != 0) { 8883fc5d816SMarcin Wojtas ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n", 8893fc5d816SMarcin Wojtas qid); 8909b8d05b8SZbigniew Bodek } else { 8913fc5d816SMarcin Wojtas ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n", 8923fc5d816SMarcin Wojtas qid); 8939b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 8949b8d05b8SZbigniew Bodek } 8959b8d05b8SZbigniew Bodek } 8969b8d05b8SZbigniew Bodek 8979b8d05b8SZbigniew Bodek return (0); 8989b8d05b8SZbigniew Bodek 8997d2544e6SMarcin Wojtas err_buf_info_unmap: 9009b8d05b8SZbigniew Bodek while (i--) { 9019b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9029b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9039b8d05b8SZbigniew Bodek } 9049b8d05b8SZbigniew Bodek 90543fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 90643fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 907cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9089b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9099b8d05b8SZbigniew Bodek return (ENOMEM); 9109b8d05b8SZbigniew Bodek } 9119b8d05b8SZbigniew Bodek 9129b8d05b8SZbigniew Bodek /** 9139b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 9149b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9159b8d05b8SZbigniew Bodek * @qid: queue index 9169b8d05b8SZbigniew Bodek * 9179b8d05b8SZbigniew Bodek * Free all receive software resources 9189b8d05b8SZbigniew Bodek **/ 9199b8d05b8SZbigniew Bodek static void 9209b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 9219b8d05b8SZbigniew Bodek { 9229b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 9239b8d05b8SZbigniew Bodek 9249b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 9259b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 926e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 927e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 9289b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 9299b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 9309b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 9319b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9329b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9339b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9349b8d05b8SZbigniew Bodek } 9359b8d05b8SZbigniew Bodek 9369b8d05b8SZbigniew Bodek /* free LRO resources, */ 9379b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 9389b8d05b8SZbigniew Bodek 9399b8d05b8SZbigniew Bodek /* free allocated memory */ 940cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9419b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9429b8d05b8SZbigniew Bodek 94343fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 94443fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 9459b8d05b8SZbigniew Bodek } 9469b8d05b8SZbigniew Bodek 9479b8d05b8SZbigniew Bodek /** 9489b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 9499b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9509b8d05b8SZbigniew Bodek * 9519b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9529b8d05b8SZbigniew Bodek **/ 9539b8d05b8SZbigniew Bodek static int 9549b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9559b8d05b8SZbigniew Bodek { 9569b8d05b8SZbigniew Bodek int i, rc = 0; 9579b8d05b8SZbigniew Bodek 9587d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 9599b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9600bdffe59SMarcin Wojtas if (rc != 0) { 9613fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 9629b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9639b8d05b8SZbigniew Bodek goto err_setup_rx; 9649b8d05b8SZbigniew Bodek } 9657d2544e6SMarcin Wojtas } 9669b8d05b8SZbigniew Bodek return (0); 9679b8d05b8SZbigniew Bodek 9689b8d05b8SZbigniew Bodek err_setup_rx: 9699b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9709b8d05b8SZbigniew Bodek while (i--) 9719b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9729b8d05b8SZbigniew Bodek return (rc); 9739b8d05b8SZbigniew Bodek } 9749b8d05b8SZbigniew Bodek 9759b8d05b8SZbigniew Bodek /** 9769b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 9779b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9789b8d05b8SZbigniew Bodek * 9799b8d05b8SZbigniew Bodek * Free all receive software resources 9809b8d05b8SZbigniew Bodek **/ 9819b8d05b8SZbigniew Bodek static void 9829b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 9839b8d05b8SZbigniew Bodek { 9849b8d05b8SZbigniew Bodek int i; 9859b8d05b8SZbigniew Bodek 9867d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 9879b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9889b8d05b8SZbigniew Bodek } 9899b8d05b8SZbigniew Bodek 9909b8d05b8SZbigniew Bodek static inline int 99182e558eaSDawid Gorecki ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 99282e558eaSDawid Gorecki struct ena_rx_buffer *rx_info) 9939b8d05b8SZbigniew Bodek { 9943fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 9959b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 9969b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 9979b8d05b8SZbigniew Bodek int nsegs, error; 9984727bda6SMarcin Wojtas int mlen; 9999b8d05b8SZbigniew Bodek 10009b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 10013f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 10029b8d05b8SZbigniew Bodek return (0); 10039b8d05b8SZbigniew Bodek 10049b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 100504cf2b88SMarcin Wojtas rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 100604cf2b88SMarcin Wojtas rx_ring->rx_mbuf_sz); 10079b8d05b8SZbigniew Bodek 10083f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10094727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 10104727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 10114727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10129b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 10139b8d05b8SZbigniew Bodek return (ENOMEM); 10149b8d05b8SZbigniew Bodek } 10154727bda6SMarcin Wojtas mlen = MCLBYTES; 10164727bda6SMarcin Wojtas } else { 101704cf2b88SMarcin Wojtas mlen = rx_ring->rx_mbuf_sz; 10184727bda6SMarcin Wojtas } 10199b8d05b8SZbigniew Bodek /* Set mbuf length*/ 10204727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 10219b8d05b8SZbigniew Bodek 10229b8d05b8SZbigniew Bodek /* Map packets for DMA */ 102382e558eaSDawid Gorecki ena_log(pdev, DBG, 102482e558eaSDawid Gorecki "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 10259b8d05b8SZbigniew Bodek adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len); 10269b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 10279b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 10283f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 10293fc5d816SMarcin Wojtas ena_log(pdev, WARN, 10303fc5d816SMarcin Wojtas "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 10319b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 10329b8d05b8SZbigniew Bodek goto exit; 10339b8d05b8SZbigniew Bodek } 10349b8d05b8SZbigniew Bodek 10359b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 10369b8d05b8SZbigniew Bodek 10379b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 10389b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 10394727bda6SMarcin Wojtas ena_buf->len = mlen; 10409b8d05b8SZbigniew Bodek 104182e558eaSDawid Gorecki ena_log(pdev, DBG, 104282e558eaSDawid Gorecki "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 10439b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr); 10449b8d05b8SZbigniew Bodek 10459b8d05b8SZbigniew Bodek return (0); 10469b8d05b8SZbigniew Bodek 10479b8d05b8SZbigniew Bodek exit: 10489b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10499b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10509b8d05b8SZbigniew Bodek return (EFAULT); 10519b8d05b8SZbigniew Bodek } 10529b8d05b8SZbigniew Bodek 10539b8d05b8SZbigniew Bodek static void 10549b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10559b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10569b8d05b8SZbigniew Bodek { 10574e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10583fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 10593fc5d816SMarcin Wojtas "Trying to free unallocated buffer\n"); 10609b8d05b8SZbigniew Bodek return; 10614e8acd84SMarcin Wojtas } 10629b8d05b8SZbigniew Bodek 1063e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1064e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10659b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10669b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10679b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10689b8d05b8SZbigniew Bodek } 10699b8d05b8SZbigniew Bodek 10709b8d05b8SZbigniew Bodek /** 10719b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 10729b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 10739b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 10749b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 10759b8d05b8SZbigniew Bodek **/ 107638c7b965SMarcin Wojtas int 10779b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 10789b8d05b8SZbigniew Bodek { 10799b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 10803fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 108143fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 10829b8d05b8SZbigniew Bodek uint32_t i; 10839b8d05b8SZbigniew Bodek int rc; 10849b8d05b8SZbigniew Bodek 10853fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid); 10869b8d05b8SZbigniew Bodek 10879b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 10889b8d05b8SZbigniew Bodek 10899b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 109043fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 109143fefd16SMarcin Wojtas 10923fc5d816SMarcin Wojtas ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 10933fc5d816SMarcin Wojtas next_to_use); 10949b8d05b8SZbigniew Bodek 109543fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 109643fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 10979a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 1098358bcc4cSMarcin Wojtas if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) 109982e558eaSDawid Gorecki rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, 110082e558eaSDawid Gorecki rx_info); 11019a0f2079SMarcin Wojtas else 11029a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 11039b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 11043f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 11053fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11064e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 11074e8acd84SMarcin Wojtas rx_ring->qid); 11089b8d05b8SZbigniew Bodek break; 11099b8d05b8SZbigniew Bodek } 11109b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 111143fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 11120bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 11133fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11149b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 11159b8d05b8SZbigniew Bodek rx_ring->qid); 11169b8d05b8SZbigniew Bodek break; 11179b8d05b8SZbigniew Bodek } 11189b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 11199b8d05b8SZbigniew Bodek rx_ring->ring_size); 11209b8d05b8SZbigniew Bodek } 11219b8d05b8SZbigniew Bodek 11223f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 11239b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 11243fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11254e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 11264e8acd84SMarcin Wojtas rx_ring->qid, i, num); 11279b8d05b8SZbigniew Bodek } 11289b8d05b8SZbigniew Bodek 11298483b844SMarcin Wojtas if (likely(i != 0)) 11309b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 11318483b844SMarcin Wojtas 11329b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 11339b8d05b8SZbigniew Bodek return (i); 11349b8d05b8SZbigniew Bodek } 11359b8d05b8SZbigniew Bodek 11367d8c4feeSMarcin Wojtas int 113721823546SMarcin Wojtas ena_update_buf_ring_size(struct ena_adapter *adapter, 113821823546SMarcin Wojtas uint32_t new_buf_ring_size) 113921823546SMarcin Wojtas { 114021823546SMarcin Wojtas uint32_t old_buf_ring_size; 114121823546SMarcin Wojtas int rc = 0; 114221823546SMarcin Wojtas bool dev_was_up; 114321823546SMarcin Wojtas 114421823546SMarcin Wojtas old_buf_ring_size = adapter->buf_ring_size; 114521823546SMarcin Wojtas adapter->buf_ring_size = new_buf_ring_size; 114621823546SMarcin Wojtas 114721823546SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 114821823546SMarcin Wojtas ena_down(adapter); 114921823546SMarcin Wojtas 115021823546SMarcin Wojtas /* Reconfigure buf ring for all Tx rings. */ 115121823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 115221823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 115321823546SMarcin Wojtas if (dev_was_up) { 115421823546SMarcin Wojtas /* 115521823546SMarcin Wojtas * If ena_up() fails, it's not because of recent buf_ring size 115621823546SMarcin Wojtas * changes. Because of that, we just want to revert old drbr 115721823546SMarcin Wojtas * value and trigger the reset because something else had to 115821823546SMarcin Wojtas * go wrong. 115921823546SMarcin Wojtas */ 116021823546SMarcin Wojtas rc = ena_up(adapter); 116121823546SMarcin Wojtas if (unlikely(rc != 0)) { 11623fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 116321823546SMarcin Wojtas "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n", 116421823546SMarcin Wojtas new_buf_ring_size, old_buf_ring_size); 116521823546SMarcin Wojtas 116621823546SMarcin Wojtas /* Revert old size and trigger the reset */ 116721823546SMarcin Wojtas adapter->buf_ring_size = old_buf_ring_size; 116821823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 116921823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 117021823546SMarcin Wojtas 117121823546SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 117221823546SMarcin Wojtas adapter); 117321823546SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); 117421823546SMarcin Wojtas } 117521823546SMarcin Wojtas } 117621823546SMarcin Wojtas 117721823546SMarcin Wojtas return (rc); 117821823546SMarcin Wojtas } 117921823546SMarcin Wojtas 118021823546SMarcin Wojtas int 11817d8c4feeSMarcin Wojtas ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, 11827d8c4feeSMarcin Wojtas uint32_t new_rx_size) 11837d8c4feeSMarcin Wojtas { 11847d8c4feeSMarcin Wojtas uint32_t old_tx_size, old_rx_size; 11857d8c4feeSMarcin Wojtas int rc = 0; 11867d8c4feeSMarcin Wojtas bool dev_was_up; 11877d8c4feeSMarcin Wojtas 11889762a033SMarcin Wojtas old_tx_size = adapter->requested_tx_ring_size; 11899762a033SMarcin Wojtas old_rx_size = adapter->requested_rx_ring_size; 11909762a033SMarcin Wojtas adapter->requested_tx_ring_size = new_tx_size; 11919762a033SMarcin Wojtas adapter->requested_rx_ring_size = new_rx_size; 11927d8c4feeSMarcin Wojtas 11937d8c4feeSMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 11947d8c4feeSMarcin Wojtas ena_down(adapter); 11957d8c4feeSMarcin Wojtas 11967d8c4feeSMarcin Wojtas /* Configure queues with new size. */ 11977d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 11987d8c4feeSMarcin Wojtas if (dev_was_up) { 11997d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12007d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12013fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12027d8c4feeSMarcin Wojtas "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n", 12037d8c4feeSMarcin Wojtas new_tx_size, new_rx_size, old_tx_size, old_rx_size); 12047d8c4feeSMarcin Wojtas 12057d8c4feeSMarcin Wojtas /* Revert old size. */ 12069762a033SMarcin Wojtas adapter->requested_tx_ring_size = old_tx_size; 12079762a033SMarcin Wojtas adapter->requested_rx_ring_size = old_rx_size; 12087d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 12097d8c4feeSMarcin Wojtas 12107d8c4feeSMarcin Wojtas /* And try again. */ 12117d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12127d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12133fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12147d8c4feeSMarcin Wojtas "Failed to revert old queue sizes. Triggering device reset.\n"); 12157d8c4feeSMarcin Wojtas /* 12167d8c4feeSMarcin Wojtas * If we've failed again, something had to go 12177d8c4feeSMarcin Wojtas * wrong. After reset, the device should try to 12187d8c4feeSMarcin Wojtas * go up 12197d8c4feeSMarcin Wojtas */ 12207d8c4feeSMarcin Wojtas ENA_FLAG_SET_ATOMIC( 12217d8c4feeSMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 12227d8c4feeSMarcin Wojtas ena_trigger_reset(adapter, 12237d8c4feeSMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 12247d8c4feeSMarcin Wojtas } 12257d8c4feeSMarcin Wojtas } 12267d8c4feeSMarcin Wojtas } 12277d8c4feeSMarcin Wojtas 12287d8c4feeSMarcin Wojtas return (rc); 12297d8c4feeSMarcin Wojtas } 12307d8c4feeSMarcin Wojtas 12319b8d05b8SZbigniew Bodek static void 123256d41ad5SMarcin Wojtas ena_update_io_rings(struct ena_adapter *adapter, uint32_t num) 123356d41ad5SMarcin Wojtas { 123456d41ad5SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 123556d41ad5SMarcin Wojtas /* Force indirection table to be reinitialized */ 123656d41ad5SMarcin Wojtas ena_com_rss_destroy(adapter->ena_dev); 123756d41ad5SMarcin Wojtas 123856d41ad5SMarcin Wojtas adapter->num_io_queues = num; 123956d41ad5SMarcin Wojtas ena_init_io_rings(adapter); 124056d41ad5SMarcin Wojtas } 124156d41ad5SMarcin Wojtas 124256d41ad5SMarcin Wojtas /* Caller should sanitize new_num */ 124356d41ad5SMarcin Wojtas int 124456d41ad5SMarcin Wojtas ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num) 124556d41ad5SMarcin Wojtas { 124656d41ad5SMarcin Wojtas uint32_t old_num; 124756d41ad5SMarcin Wojtas int rc = 0; 124856d41ad5SMarcin Wojtas bool dev_was_up; 124956d41ad5SMarcin Wojtas 125056d41ad5SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 125156d41ad5SMarcin Wojtas old_num = adapter->num_io_queues; 125256d41ad5SMarcin Wojtas ena_down(adapter); 125356d41ad5SMarcin Wojtas 125456d41ad5SMarcin Wojtas ena_update_io_rings(adapter, new_num); 125556d41ad5SMarcin Wojtas 125656d41ad5SMarcin Wojtas if (dev_was_up) { 125756d41ad5SMarcin Wojtas rc = ena_up(adapter); 125856d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 12593fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 126056d41ad5SMarcin Wojtas "Failed to configure device with %u IO queues. " 126156d41ad5SMarcin Wojtas "Reverting to previous value: %u\n", 126256d41ad5SMarcin Wojtas new_num, old_num); 126356d41ad5SMarcin Wojtas 126456d41ad5SMarcin Wojtas ena_update_io_rings(adapter, old_num); 126556d41ad5SMarcin Wojtas 126656d41ad5SMarcin Wojtas rc = ena_up(adapter); 126756d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 12683fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 126956d41ad5SMarcin Wojtas "Failed to revert to previous setup IO " 127056d41ad5SMarcin Wojtas "queues. Triggering device reset.\n"); 127156d41ad5SMarcin Wojtas ENA_FLAG_SET_ATOMIC( 127256d41ad5SMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 127356d41ad5SMarcin Wojtas ena_trigger_reset(adapter, 127456d41ad5SMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 127556d41ad5SMarcin Wojtas } 127656d41ad5SMarcin Wojtas } 127756d41ad5SMarcin Wojtas } 127856d41ad5SMarcin Wojtas 127956d41ad5SMarcin Wojtas return (rc); 128056d41ad5SMarcin Wojtas } 128156d41ad5SMarcin Wojtas 128256d41ad5SMarcin Wojtas static void 12839b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 12849b8d05b8SZbigniew Bodek { 12859b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 12869b8d05b8SZbigniew Bodek unsigned int i; 12879b8d05b8SZbigniew Bodek 12889b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 12899b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 12909b8d05b8SZbigniew Bodek 12910bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 12929b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 12939a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 12949a0f2079SMarcin Wojtas if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && 12957583c633SJustin Hibbits (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) { 12969a0f2079SMarcin Wojtas if (rx_info->netmap_buf_idx != 0) 12979a0f2079SMarcin Wojtas ena_netmap_free_rx_slot(adapter, rx_ring, 12989a0f2079SMarcin Wojtas rx_info); 12999a0f2079SMarcin Wojtas } 13009a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 13019b8d05b8SZbigniew Bodek } 13029b8d05b8SZbigniew Bodek } 13039b8d05b8SZbigniew Bodek 13049b8d05b8SZbigniew Bodek /** 13059b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 13069b8d05b8SZbigniew Bodek * @adapter: network interface device structure 13079b8d05b8SZbigniew Bodek * 13089b8d05b8SZbigniew Bodek */ 13099b8d05b8SZbigniew Bodek static void 13109b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 13119b8d05b8SZbigniew Bodek { 13129b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 13139b8d05b8SZbigniew Bodek int i, rc, bufs_num; 13149b8d05b8SZbigniew Bodek 13157d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 13169b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 13179b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 13189b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 13199b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 13203fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, WARN, 13213fc5d816SMarcin Wojtas "refilling Queue %d failed. " 132282e558eaSDawid Gorecki "Allocated %d buffers from: %d\n", 132382e558eaSDawid Gorecki i, rc, bufs_num); 13249a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 13259a0f2079SMarcin Wojtas rx_ring->initialized = true; 13269a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 13279b8d05b8SZbigniew Bodek } 13289b8d05b8SZbigniew Bodek } 13299b8d05b8SZbigniew Bodek 13309b8d05b8SZbigniew Bodek static void 13319b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 13329b8d05b8SZbigniew Bodek { 13339b8d05b8SZbigniew Bodek int i; 13349b8d05b8SZbigniew Bodek 13357d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 13369b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 13379b8d05b8SZbigniew Bodek } 13389b8d05b8SZbigniew Bodek 13399b8d05b8SZbigniew Bodek /** 13409b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 13419b8d05b8SZbigniew Bodek * @adapter: network interface device structure 13429b8d05b8SZbigniew Bodek * @qid: queue index 13439b8d05b8SZbigniew Bodek **/ 13449b8d05b8SZbigniew Bodek static void 13459b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 13469b8d05b8SZbigniew Bodek { 13474e8acd84SMarcin Wojtas bool print_once = true; 13489b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 13499b8d05b8SZbigniew Bodek 1350416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 13519b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 13529b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 13539b8d05b8SZbigniew Bodek 13549b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 13559b8d05b8SZbigniew Bodek continue; 13569b8d05b8SZbigniew Bodek 13574e8acd84SMarcin Wojtas if (print_once) { 13583fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 135982e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 136082e558eaSDawid Gorecki i); 13614e8acd84SMarcin Wojtas print_once = false; 13624e8acd84SMarcin Wojtas } else { 13633fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 136482e558eaSDawid Gorecki "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, 136582e558eaSDawid Gorecki i); 13664e8acd84SMarcin Wojtas } 13679b8d05b8SZbigniew Bodek 1368888810f0SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1369e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 1370888810f0SMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 13714fa9e02dSMarcin Wojtas 13729b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 13739b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 13749b8d05b8SZbigniew Bodek } 1375416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 13769b8d05b8SZbigniew Bodek } 13779b8d05b8SZbigniew Bodek 13789b8d05b8SZbigniew Bodek static void 13799b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 13809b8d05b8SZbigniew Bodek { 13817d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) 13829b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 13839b8d05b8SZbigniew Bodek } 13849b8d05b8SZbigniew Bodek 13859b8d05b8SZbigniew Bodek static void 13869b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 13879b8d05b8SZbigniew Bodek { 13889b8d05b8SZbigniew Bodek uint16_t ena_qid; 13899b8d05b8SZbigniew Bodek int i; 13909b8d05b8SZbigniew Bodek 13917d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 13929b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 13939b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 13949b8d05b8SZbigniew Bodek } 13959b8d05b8SZbigniew Bodek } 13969b8d05b8SZbigniew Bodek 13979b8d05b8SZbigniew Bodek static void 13989b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 13999b8d05b8SZbigniew Bodek { 14009b8d05b8SZbigniew Bodek uint16_t ena_qid; 14019b8d05b8SZbigniew Bodek int i; 14029b8d05b8SZbigniew Bodek 14037d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14049b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 14059b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 14069b8d05b8SZbigniew Bodek } 14079b8d05b8SZbigniew Bodek } 14089b8d05b8SZbigniew Bodek 14099b8d05b8SZbigniew Bodek static void 14109b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 14119b8d05b8SZbigniew Bodek { 14125cb9db07SMarcin Wojtas struct ena_que *queue; 14135cb9db07SMarcin Wojtas int i; 14145cb9db07SMarcin Wojtas 14157d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14165cb9db07SMarcin Wojtas queue = &adapter->que[i]; 141782e558eaSDawid Gorecki while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) 141882e558eaSDawid Gorecki taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); 14195cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 14205cb9db07SMarcin Wojtas } 14215cb9db07SMarcin Wojtas 14229b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 14239b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 14249b8d05b8SZbigniew Bodek } 14259b8d05b8SZbigniew Bodek 14269b8d05b8SZbigniew Bodek static int 14279b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 14289b8d05b8SZbigniew Bodek { 14299b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 14309b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 14319b8d05b8SZbigniew Bodek struct ena_ring *ring; 14325cb9db07SMarcin Wojtas struct ena_que *queue; 14339b8d05b8SZbigniew Bodek uint16_t ena_qid; 14349b8d05b8SZbigniew Bodek uint32_t msix_vector; 14356d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 14369b8d05b8SZbigniew Bodek int rc, i; 14379b8d05b8SZbigniew Bodek 14389b8d05b8SZbigniew Bodek /* Create TX queues */ 14397d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14409b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 14419b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 14429b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 14439b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 14449762a033SMarcin Wojtas ctx.queue_size = adapter->requested_tx_ring_size; 14459b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 14469b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1447eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1448eb4c4f4aSMarcin Wojtas 14499b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 14500bdffe59SMarcin Wojtas if (rc != 0) { 14513fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14529b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 14539b8d05b8SZbigniew Bodek goto err_tx; 14549b8d05b8SZbigniew Bodek } 14559b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 14569b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 145782e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 14580bdffe59SMarcin Wojtas if (rc != 0) { 14593fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14609b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 146182e558eaSDawid Gorecki " %d rc: %d\n", 146282e558eaSDawid Gorecki i, rc); 14639b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 14649b8d05b8SZbigniew Bodek goto err_tx; 14659b8d05b8SZbigniew Bodek } 1466eb4c4f4aSMarcin Wojtas 1467eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1468eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1469eb4c4f4aSMarcin Wojtas ctx.numa_node); 1470eb4c4f4aSMarcin Wojtas } 14719b8d05b8SZbigniew Bodek } 14729b8d05b8SZbigniew Bodek 14739b8d05b8SZbigniew Bodek /* Create RX queues */ 14747d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14759b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 14769b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 14779b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 14789b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 14799762a033SMarcin Wojtas ctx.queue_size = adapter->requested_rx_ring_size; 14809b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 14819b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1482eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1483eb4c4f4aSMarcin Wojtas 14849b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 14853f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 14863fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14879b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 14889b8d05b8SZbigniew Bodek goto err_rx; 14899b8d05b8SZbigniew Bodek } 14909b8d05b8SZbigniew Bodek 14919b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 14929b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 149382e558eaSDawid Gorecki &ring->ena_com_io_sq, &ring->ena_com_io_cq); 14943f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 14953fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14969b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 149782e558eaSDawid Gorecki " %d rc: %d\n", 149882e558eaSDawid Gorecki i, rc); 14999b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 15009b8d05b8SZbigniew Bodek goto err_rx; 15019b8d05b8SZbigniew Bodek } 1502eb4c4f4aSMarcin Wojtas 1503eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1504eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1505eb4c4f4aSMarcin Wojtas ctx.numa_node); 1506eb4c4f4aSMarcin Wojtas } 15079b8d05b8SZbigniew Bodek } 15089b8d05b8SZbigniew Bodek 15097d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15105cb9db07SMarcin Wojtas queue = &adapter->que[i]; 15115cb9db07SMarcin Wojtas 15126c3e93cbSGleb Smirnoff NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 15135cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 15145cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 15155cb9db07SMarcin Wojtas 15166d1ef2abSArtur Rojek #ifdef RSS 15176d1ef2abSArtur Rojek cpu_mask = &queue->cpu_mask; 15186d1ef2abSArtur Rojek #endif 15196d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, 152082e558eaSDawid Gorecki cpu_mask, "%s queue %d cleanup", 15215cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 15225cb9db07SMarcin Wojtas } 15235cb9db07SMarcin Wojtas 15249b8d05b8SZbigniew Bodek return (0); 15259b8d05b8SZbigniew Bodek 15269b8d05b8SZbigniew Bodek err_rx: 15279b8d05b8SZbigniew Bodek while (i--) 15289b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 15297d8c4feeSMarcin Wojtas i = adapter->num_io_queues; 15309b8d05b8SZbigniew Bodek err_tx: 15319b8d05b8SZbigniew Bodek while (i--) 15329b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 15339b8d05b8SZbigniew Bodek 15349b8d05b8SZbigniew Bodek return (ENXIO); 15359b8d05b8SZbigniew Bodek } 15369b8d05b8SZbigniew Bodek 15379b8d05b8SZbigniew Bodek /********************************************************************* 15389b8d05b8SZbigniew Bodek * 15399b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 15409b8d05b8SZbigniew Bodek * 15419b8d05b8SZbigniew Bodek **********************************************************************/ 15429b8d05b8SZbigniew Bodek 15439b8d05b8SZbigniew Bodek /** 15449b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 15459b8d05b8SZbigniew Bodek * @arg: interrupt number 15469b8d05b8SZbigniew Bodek **/ 15479b8d05b8SZbigniew Bodek static void 15489b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 15499b8d05b8SZbigniew Bodek { 15509b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 15519b8d05b8SZbigniew Bodek 15529b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1553fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 15549b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 15559b8d05b8SZbigniew Bodek } 15569b8d05b8SZbigniew Bodek 15575cb9db07SMarcin Wojtas /** 15585cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 15595cb9db07SMarcin Wojtas * @arg: queue 15605cb9db07SMarcin Wojtas **/ 15615cb9db07SMarcin Wojtas static int 15625cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 15635cb9db07SMarcin Wojtas { 15645cb9db07SMarcin Wojtas struct ena_que *queue = arg; 15655cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 15665cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 15675cb9db07SMarcin Wojtas 15685cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 15695cb9db07SMarcin Wojtas return (FILTER_STRAY); 15705cb9db07SMarcin Wojtas 15715cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 15725cb9db07SMarcin Wojtas 15735cb9db07SMarcin Wojtas return (FILTER_HANDLED); 15745cb9db07SMarcin Wojtas } 15755cb9db07SMarcin Wojtas 15769b8d05b8SZbigniew Bodek static int 15779b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 15789b8d05b8SZbigniew Bodek { 15799b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 15808805021aSMarcin Wojtas int msix_vecs, msix_req; 15818805021aSMarcin Wojtas int i, rc = 0; 15829b8d05b8SZbigniew Bodek 1583fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 15843fc5d816SMarcin Wojtas ena_log(dev, ERR, "Error, MSI-X is already enabled\n"); 1585fd43fd2aSMarcin Wojtas return (EINVAL); 1586fd43fd2aSMarcin Wojtas } 1587fd43fd2aSMarcin Wojtas 15889b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 15897d8c4feeSMarcin Wojtas msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 15909b8d05b8SZbigniew Bodek 1591cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1592cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1593cd5d5804SMarcin Wojtas 159482e558eaSDawid Gorecki ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); 15959b8d05b8SZbigniew Bodek 15969b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 15979b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 15989b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 15999b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 16009b8d05b8SZbigniew Bodek } 16019b8d05b8SZbigniew Bodek 16028805021aSMarcin Wojtas msix_req = msix_vecs; 16039b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 16043f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 160582e558eaSDawid Gorecki ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n", 160682e558eaSDawid Gorecki msix_vecs, rc); 16077d2544e6SMarcin Wojtas 16089b8d05b8SZbigniew Bodek rc = ENOSPC; 16097d2544e6SMarcin Wojtas goto err_msix_free; 16109b8d05b8SZbigniew Bodek } 16119b8d05b8SZbigniew Bodek 16128805021aSMarcin Wojtas if (msix_vecs != msix_req) { 16132b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 16143fc5d816SMarcin Wojtas ena_log(dev, ERR, 16152b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 16162b5b60feSMarcin Wojtas msix_vecs); 16172b5b60feSMarcin Wojtas pci_release_msi(dev); 16182b5b60feSMarcin Wojtas rc = ENOSPC; 16192b5b60feSMarcin Wojtas goto err_msix_free; 16202b5b60feSMarcin Wojtas } 162182e558eaSDawid Gorecki ena_log(dev, ERR, 162282e558eaSDawid Gorecki "Enable only %d MSI-x (out of %d), reduce " 162382e558eaSDawid Gorecki "the number of queues\n", 162482e558eaSDawid Gorecki msix_vecs, msix_req); 16258805021aSMarcin Wojtas } 16268805021aSMarcin Wojtas 16279b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 1628fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 16299b8d05b8SZbigniew Bodek 16307d2544e6SMarcin Wojtas return (0); 16317d2544e6SMarcin Wojtas 16327d2544e6SMarcin Wojtas err_msix_free: 16337d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 16347d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 16357d2544e6SMarcin Wojtas 16369b8d05b8SZbigniew Bodek return (rc); 16379b8d05b8SZbigniew Bodek } 16389b8d05b8SZbigniew Bodek 16399b8d05b8SZbigniew Bodek static void 16409b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 16419b8d05b8SZbigniew Bodek { 164282e558eaSDawid Gorecki snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, 164382e558eaSDawid Gorecki "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); 16449b8d05b8SZbigniew Bodek /* 16459b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 16469b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 16479b8d05b8SZbigniew Bodek */ 16489b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 16499b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 16509b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 16519b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 16529b8d05b8SZbigniew Bodek } 16539b8d05b8SZbigniew Bodek 165477958fcdSMarcin Wojtas static int 16559b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 16569b8d05b8SZbigniew Bodek { 16576d1ef2abSArtur Rojek #ifdef RSS 16586d1ef2abSArtur Rojek int num_buckets = rss_getnumbuckets(); 16596d1ef2abSArtur Rojek static int last_bind = 0; 1660eb4c4f4aSMarcin Wojtas int cur_bind; 1661eb4c4f4aSMarcin Wojtas int idx; 16626d1ef2abSArtur Rojek #endif 16639b8d05b8SZbigniew Bodek int irq_idx; 16649b8d05b8SZbigniew Bodek 166577958fcdSMarcin Wojtas if (adapter->msix_entries == NULL) 166677958fcdSMarcin Wojtas return (EINVAL); 166777958fcdSMarcin Wojtas 1668eb4c4f4aSMarcin Wojtas #ifdef RSS 1669eb4c4f4aSMarcin Wojtas if (adapter->first_bind < 0) { 1670eb4c4f4aSMarcin Wojtas adapter->first_bind = last_bind; 1671eb4c4f4aSMarcin Wojtas last_bind = (last_bind + adapter->num_io_queues) % num_buckets; 1672eb4c4f4aSMarcin Wojtas } 1673eb4c4f4aSMarcin Wojtas cur_bind = adapter->first_bind; 1674eb4c4f4aSMarcin Wojtas #endif 1675eb4c4f4aSMarcin Wojtas 16767d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) { 16779b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 16789b8d05b8SZbigniew Bodek 16799b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 16809b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 16819b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 16829b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 16839b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 16849b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 16853fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n", 16869b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1687277f11c4SMarcin Wojtas 16886d1ef2abSArtur Rojek #ifdef RSS 16899b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1690eb4c4f4aSMarcin Wojtas rss_getcpu(cur_bind); 1691eb4c4f4aSMarcin Wojtas cur_bind = (cur_bind + 1) % num_buckets; 16926d1ef2abSArtur Rojek CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1693eb4c4f4aSMarcin Wojtas 1694eb4c4f4aSMarcin Wojtas for (idx = 0; idx < MAXMEMDOM; ++idx) { 1695eb4c4f4aSMarcin Wojtas if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx])) 1696eb4c4f4aSMarcin Wojtas break; 1697eb4c4f4aSMarcin Wojtas } 1698eb4c4f4aSMarcin Wojtas adapter->que[i].domain = idx; 1699eb4c4f4aSMarcin Wojtas #else 1700eb4c4f4aSMarcin Wojtas adapter->que[i].domain = -1; 17016d1ef2abSArtur Rojek #endif 17029b8d05b8SZbigniew Bodek } 170377958fcdSMarcin Wojtas 170477958fcdSMarcin Wojtas return (0); 17059b8d05b8SZbigniew Bodek } 17069b8d05b8SZbigniew Bodek 17079b8d05b8SZbigniew Bodek static int 17089b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 17099b8d05b8SZbigniew Bodek { 17103fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 17119b8d05b8SZbigniew Bodek struct ena_irq *irq; 17129b8d05b8SZbigniew Bodek unsigned long flags; 17139b8d05b8SZbigniew Bodek int rc, rcc; 17149b8d05b8SZbigniew Bodek 17159b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 17169b8d05b8SZbigniew Bodek 17179b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 17189b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 17199b8d05b8SZbigniew Bodek &irq->vector, flags); 17209b8d05b8SZbigniew Bodek 17213f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 17223fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 17233fc5d816SMarcin Wojtas irq->vector); 17247d2544e6SMarcin Wojtas return (ENXIO); 17259b8d05b8SZbigniew Bodek } 17269b8d05b8SZbigniew Bodek 17270bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 172882e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, 172982e558eaSDawid Gorecki &irq->cookie); 17303f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 173182e558eaSDawid Gorecki ena_log(pdev, ERR, 173282e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 17339b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 17347d2544e6SMarcin Wojtas goto err_res_free; 17359b8d05b8SZbigniew Bodek } 17369b8d05b8SZbigniew Bodek irq->requested = true; 17379b8d05b8SZbigniew Bodek 17389b8d05b8SZbigniew Bodek return (rc); 17399b8d05b8SZbigniew Bodek 17407d2544e6SMarcin Wojtas err_res_free: 17413fc5d816SMarcin Wojtas ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); 174282e558eaSDawid Gorecki rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, 174382e558eaSDawid Gorecki irq->res); 17443f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 174582e558eaSDawid Gorecki ena_log(pdev, ERR, 174682e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 174782e558eaSDawid Gorecki irq->vector); 17489b8d05b8SZbigniew Bodek irq->res = NULL; 17499b8d05b8SZbigniew Bodek 17509b8d05b8SZbigniew Bodek return (rc); 17519b8d05b8SZbigniew Bodek } 17529b8d05b8SZbigniew Bodek 17539b8d05b8SZbigniew Bodek static int 17549b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 17559b8d05b8SZbigniew Bodek { 17563fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 17579b8d05b8SZbigniew Bodek struct ena_irq *irq; 17589b8d05b8SZbigniew Bodek unsigned long flags = 0; 17599b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 17609b8d05b8SZbigniew Bodek 1761fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 17623fc5d816SMarcin Wojtas ena_log(pdev, ERR, 17634e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 17649b8d05b8SZbigniew Bodek return (EINVAL); 17659b8d05b8SZbigniew Bodek } else { 17669b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 17679b8d05b8SZbigniew Bodek } 17689b8d05b8SZbigniew Bodek 17699b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 17709b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 17719b8d05b8SZbigniew Bodek 17723f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 17739b8d05b8SZbigniew Bodek continue; 17749b8d05b8SZbigniew Bodek 17759b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 17769b8d05b8SZbigniew Bodek &irq->vector, flags); 17773f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 1778469a8407SMarcin Wojtas rc = ENOMEM; 177982e558eaSDawid Gorecki ena_log(pdev, ERR, 178082e558eaSDawid Gorecki "could not allocate irq vector: %d\n", irq->vector); 17819b8d05b8SZbigniew Bodek goto err; 17829b8d05b8SZbigniew Bodek } 17839b8d05b8SZbigniew Bodek 17840bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 178582e558eaSDawid Gorecki INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, 178682e558eaSDawid Gorecki &irq->cookie); 17873f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 178882e558eaSDawid Gorecki ena_log(pdev, ERR, 178982e558eaSDawid Gorecki "failed to register interrupt handler for irq %ju: %d\n", 17909b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 17919b8d05b8SZbigniew Bodek goto err; 17929b8d05b8SZbigniew Bodek } 17939b8d05b8SZbigniew Bodek irq->requested = true; 17946d1ef2abSArtur Rojek 17956d1ef2abSArtur Rojek #ifdef RSS 17966d1ef2abSArtur Rojek rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 17976d1ef2abSArtur Rojek if (unlikely(rc != 0)) { 179882e558eaSDawid Gorecki ena_log(pdev, ERR, 179982e558eaSDawid Gorecki "failed to bind interrupt handler for irq %ju to cpu %d: %d\n", 18006d1ef2abSArtur Rojek rman_get_start(irq->res), irq->cpu, rc); 18016d1ef2abSArtur Rojek goto err; 18026d1ef2abSArtur Rojek } 18036d1ef2abSArtur Rojek 18046d1ef2abSArtur Rojek ena_log(pdev, INFO, "queue %d - cpu %d\n", 18056d1ef2abSArtur Rojek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 18066d1ef2abSArtur Rojek #endif 18079b8d05b8SZbigniew Bodek } 18089b8d05b8SZbigniew Bodek 18099b8d05b8SZbigniew Bodek return (rc); 18109b8d05b8SZbigniew Bodek 18119b8d05b8SZbigniew Bodek err: 18129b8d05b8SZbigniew Bodek 18139b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 18149b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 18159b8d05b8SZbigniew Bodek rcc = 0; 18169b8d05b8SZbigniew Bodek 18179b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 18189b8d05b8SZbigniew Bodek free both intr and resources */ 18190bdffe59SMarcin Wojtas if (irq->requested) 182082e558eaSDawid Gorecki rcc = bus_teardown_intr(adapter->pdev, irq->res, 182182e558eaSDawid Gorecki irq->cookie); 18223f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 182382e558eaSDawid Gorecki ena_log(pdev, ERR, 182482e558eaSDawid Gorecki "could not release irq: %d, error: %d\n", 18253fc5d816SMarcin Wojtas irq->vector, rcc); 18269b8d05b8SZbigniew Bodek 1827eb3f25b4SGordon Bergling /* If we entered err: section without irq->requested set we know 18289b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 18299b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 18309b8d05b8SZbigniew Bodek this iteration */ 18319b8d05b8SZbigniew Bodek rcc = 0; 18329b8d05b8SZbigniew Bodek if (irq->res != NULL) { 18339b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 18349b8d05b8SZbigniew Bodek irq->vector, irq->res); 18359b8d05b8SZbigniew Bodek } 18363f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 183782e558eaSDawid Gorecki ena_log(pdev, ERR, 183882e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 183982e558eaSDawid Gorecki irq->vector); 18409b8d05b8SZbigniew Bodek irq->requested = false; 18419b8d05b8SZbigniew Bodek irq->res = NULL; 18429b8d05b8SZbigniew Bodek } 18439b8d05b8SZbigniew Bodek 18449b8d05b8SZbigniew Bodek return (rc); 18459b8d05b8SZbigniew Bodek } 18469b8d05b8SZbigniew Bodek 18479b8d05b8SZbigniew Bodek static void 18489b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 18499b8d05b8SZbigniew Bodek { 18503fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18519b8d05b8SZbigniew Bodek struct ena_irq *irq; 18529b8d05b8SZbigniew Bodek int rc; 18539b8d05b8SZbigniew Bodek 18549b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 18559b8d05b8SZbigniew Bodek if (irq->requested) { 18563fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 18579b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 18583f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 18593fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to tear down irq: %d\n", 18603fc5d816SMarcin Wojtas irq->vector); 18619b8d05b8SZbigniew Bodek irq->requested = 0; 18629b8d05b8SZbigniew Bodek } 18639b8d05b8SZbigniew Bodek 18649b8d05b8SZbigniew Bodek if (irq->res != NULL) { 18653fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 18669b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 18679b8d05b8SZbigniew Bodek irq->vector, irq->res); 18689b8d05b8SZbigniew Bodek irq->res = NULL; 18693f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 187082e558eaSDawid Gorecki ena_log(pdev, ERR, 187182e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 187282e558eaSDawid Gorecki irq->vector); 18739b8d05b8SZbigniew Bodek } 18749b8d05b8SZbigniew Bodek } 18759b8d05b8SZbigniew Bodek 18769b8d05b8SZbigniew Bodek static void 18779b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 18789b8d05b8SZbigniew Bodek { 18793fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18809b8d05b8SZbigniew Bodek struct ena_irq *irq; 18819b8d05b8SZbigniew Bodek int rc; 18829b8d05b8SZbigniew Bodek 18839b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 18849b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 18859b8d05b8SZbigniew Bodek if (irq->requested) { 18863fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 18879b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 18889b8d05b8SZbigniew Bodek irq->cookie); 18893f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 189082e558eaSDawid Gorecki ena_log(pdev, ERR, 189182e558eaSDawid Gorecki "failed to tear down irq: %d\n", 18923fc5d816SMarcin Wojtas irq->vector); 18939b8d05b8SZbigniew Bodek } 18949b8d05b8SZbigniew Bodek irq->requested = 0; 18959b8d05b8SZbigniew Bodek } 18969b8d05b8SZbigniew Bodek 18979b8d05b8SZbigniew Bodek if (irq->res != NULL) { 18983fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", 18999b8d05b8SZbigniew Bodek irq->vector); 19009b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 19019b8d05b8SZbigniew Bodek irq->vector, irq->res); 19029b8d05b8SZbigniew Bodek irq->res = NULL; 19033f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 190482e558eaSDawid Gorecki ena_log(pdev, ERR, 190582e558eaSDawid Gorecki "dev has no parent while releasing res for irq: %d\n", 19069b8d05b8SZbigniew Bodek irq->vector); 19079b8d05b8SZbigniew Bodek } 19089b8d05b8SZbigniew Bodek } 19099b8d05b8SZbigniew Bodek } 19109b8d05b8SZbigniew Bodek } 19119b8d05b8SZbigniew Bodek 19129b8d05b8SZbigniew Bodek static void 19139b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter *adapter) 19149b8d05b8SZbigniew Bodek { 19159b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 19169b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 19179b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 19189b8d05b8SZbigniew Bodek } 19199b8d05b8SZbigniew Bodek 19209b8d05b8SZbigniew Bodek static void 19219b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 19229b8d05b8SZbigniew Bodek { 1923fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1924fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 19259b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 1926fd43fd2aSMarcin Wojtas } 19279b8d05b8SZbigniew Bodek 19289b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 1929cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 19309b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 19319b8d05b8SZbigniew Bodek } 19329b8d05b8SZbigniew Bodek 19339b8d05b8SZbigniew Bodek static void 19349b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 19359b8d05b8SZbigniew Bodek { 19369b8d05b8SZbigniew Bodek struct ena_com_io_cq *io_cq; 19379b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 1938223c8cb1SArtur Rojek struct ena_ring *tx_ring; 19399b8d05b8SZbigniew Bodek uint16_t ena_qid; 19409b8d05b8SZbigniew Bodek int i; 19419b8d05b8SZbigniew Bodek 19429b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 19437d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 19449b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 19459b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 19469b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 0, 0, true); 1947223c8cb1SArtur Rojek tx_ring = &adapter->tx_ring[i]; 1948223c8cb1SArtur Rojek counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); 19499b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 19509b8d05b8SZbigniew Bodek } 19519b8d05b8SZbigniew Bodek } 19529b8d05b8SZbigniew Bodek 19539b8d05b8SZbigniew Bodek static int 19549b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 19559b8d05b8SZbigniew Bodek { 19569b8d05b8SZbigniew Bodek int rc; 19579b8d05b8SZbigniew Bodek 1958fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 19599b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 196056d41ad5SMarcin Wojtas if (rc != 0) { 19613fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 196256d41ad5SMarcin Wojtas "Failed to configure RSS\n"); 19639b8d05b8SZbigniew Bodek return (rc); 19649b8d05b8SZbigniew Bodek } 196556d41ad5SMarcin Wojtas } 19669b8d05b8SZbigniew Bodek 19677583c633SJustin Hibbits rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp)); 19683f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 19697d2544e6SMarcin Wojtas return (rc); 19707d2544e6SMarcin Wojtas 19719b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 197230217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 197330217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 19749b8d05b8SZbigniew Bodek 19759b8d05b8SZbigniew Bodek return (0); 19769b8d05b8SZbigniew Bodek } 19779b8d05b8SZbigniew Bodek 19789762a033SMarcin Wojtas static void 197982e558eaSDawid Gorecki set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) 19809762a033SMarcin Wojtas { 19819762a033SMarcin Wojtas int i; 19829762a033SMarcin Wojtas 19839762a033SMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 19849762a033SMarcin Wojtas adapter->tx_ring[i].ring_size = new_tx_size; 19859762a033SMarcin Wojtas adapter->rx_ring[i].ring_size = new_rx_size; 19869762a033SMarcin Wojtas } 19879762a033SMarcin Wojtas } 19889762a033SMarcin Wojtas 19899762a033SMarcin Wojtas static int 19909762a033SMarcin Wojtas create_queues_with_size_backoff(struct ena_adapter *adapter) 19919762a033SMarcin Wojtas { 19923fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 19939762a033SMarcin Wojtas int rc; 19949762a033SMarcin Wojtas uint32_t cur_rx_ring_size, cur_tx_ring_size; 19959762a033SMarcin Wojtas uint32_t new_rx_ring_size, new_tx_ring_size; 19969762a033SMarcin Wojtas 19979762a033SMarcin Wojtas /* 19989762a033SMarcin Wojtas * Current queue sizes might be set to smaller than the requested 19999762a033SMarcin Wojtas * ones due to past queue allocation failures. 20009762a033SMarcin Wojtas */ 20019762a033SMarcin Wojtas set_io_rings_size(adapter, adapter->requested_tx_ring_size, 20029762a033SMarcin Wojtas adapter->requested_rx_ring_size); 20039762a033SMarcin Wojtas 20049762a033SMarcin Wojtas while (1) { 20059762a033SMarcin Wojtas /* Allocate transmit descriptors */ 20069762a033SMarcin Wojtas rc = ena_setup_all_tx_resources(adapter); 20079762a033SMarcin Wojtas if (unlikely(rc != 0)) { 20083fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_tx\n"); 20099762a033SMarcin Wojtas goto err_setup_tx; 20109762a033SMarcin Wojtas } 20119762a033SMarcin Wojtas 20129762a033SMarcin Wojtas /* Allocate receive descriptors */ 20139762a033SMarcin Wojtas rc = ena_setup_all_rx_resources(adapter); 20149762a033SMarcin Wojtas if (unlikely(rc != 0)) { 20153fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_rx\n"); 20169762a033SMarcin Wojtas goto err_setup_rx; 20179762a033SMarcin Wojtas } 20189762a033SMarcin Wojtas 20199762a033SMarcin Wojtas /* Create IO queues for Rx & Tx */ 20209762a033SMarcin Wojtas rc = ena_create_io_queues(adapter); 20219762a033SMarcin Wojtas if (unlikely(rc != 0)) { 202282e558eaSDawid Gorecki ena_log(pdev, ERR, "create IO queues failed\n"); 20239762a033SMarcin Wojtas goto err_io_que; 20249762a033SMarcin Wojtas } 20259762a033SMarcin Wojtas 20269762a033SMarcin Wojtas return (0); 20279762a033SMarcin Wojtas 20289762a033SMarcin Wojtas err_io_que: 20299762a033SMarcin Wojtas ena_free_all_rx_resources(adapter); 20309762a033SMarcin Wojtas err_setup_rx: 20319762a033SMarcin Wojtas ena_free_all_tx_resources(adapter); 20329762a033SMarcin Wojtas err_setup_tx: 20339762a033SMarcin Wojtas /* 20349762a033SMarcin Wojtas * Lower the ring size if ENOMEM. Otherwise, return the 20359762a033SMarcin Wojtas * error straightaway. 20369762a033SMarcin Wojtas */ 20379762a033SMarcin Wojtas if (unlikely(rc != ENOMEM)) { 20383fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20399762a033SMarcin Wojtas "Queue creation failed with error code: %d\n", rc); 20409762a033SMarcin Wojtas return (rc); 20419762a033SMarcin Wojtas } 20429762a033SMarcin Wojtas 20439762a033SMarcin Wojtas cur_tx_ring_size = adapter->tx_ring[0].ring_size; 20449762a033SMarcin Wojtas cur_rx_ring_size = adapter->rx_ring[0].ring_size; 20459762a033SMarcin Wojtas 20463fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20479762a033SMarcin Wojtas "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 20489762a033SMarcin Wojtas cur_tx_ring_size, cur_rx_ring_size); 20499762a033SMarcin Wojtas 20509762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size; 20519762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size; 20529762a033SMarcin Wojtas 20539762a033SMarcin Wojtas /* 205482e558eaSDawid Gorecki * Decrease the size of a larger queue, or decrease both if they 205582e558eaSDawid Gorecki * are the same size. 20569762a033SMarcin Wojtas */ 20579762a033SMarcin Wojtas if (cur_rx_ring_size <= cur_tx_ring_size) 20589762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size / 2; 20599762a033SMarcin Wojtas if (cur_rx_ring_size >= cur_tx_ring_size) 20609762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size / 2; 20619762a033SMarcin Wojtas 20629762a033SMarcin Wojtas if (new_tx_ring_size < ENA_MIN_RING_SIZE || 20639762a033SMarcin Wojtas new_rx_ring_size < ENA_MIN_RING_SIZE) { 20643fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20659762a033SMarcin Wojtas "Queue creation failed with the smallest possible queue size" 20669762a033SMarcin Wojtas "of %d for both queues. Not retrying with smaller queues\n", 20679762a033SMarcin Wojtas ENA_MIN_RING_SIZE); 20689762a033SMarcin Wojtas return (rc); 20699762a033SMarcin Wojtas } 20709762a033SMarcin Wojtas 207177160654SArtur Rojek ena_log(pdev, INFO, 207277160654SArtur Rojek "Retrying queue creation with sizes TX=%d, RX=%d\n", 207377160654SArtur Rojek new_tx_ring_size, new_rx_ring_size); 207477160654SArtur Rojek 20759762a033SMarcin Wojtas set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); 20769762a033SMarcin Wojtas } 20779762a033SMarcin Wojtas } 20789762a033SMarcin Wojtas 207938c7b965SMarcin Wojtas int 20809b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 20819b8d05b8SZbigniew Bodek { 20829b8d05b8SZbigniew Bodek int rc = 0; 20839b8d05b8SZbigniew Bodek 208407aff471SArtur Rojek ENA_LOCK_ASSERT(); 2085cb98c439SArtur Rojek 20863f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 20873fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "device is not attached!\n"); 20889b8d05b8SZbigniew Bodek return (ENXIO); 20899b8d05b8SZbigniew Bodek } 20909b8d05b8SZbigniew Bodek 2091579d23aaSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2092579d23aaSMarcin Wojtas return (0); 2093579d23aaSMarcin Wojtas 20943fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "device is going UP\n"); 20959b8d05b8SZbigniew Bodek 20969b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 209777958fcdSMarcin Wojtas rc = ena_setup_io_intr(adapter); 209877958fcdSMarcin Wojtas if (unlikely(rc != 0)) { 20993fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n"); 210077958fcdSMarcin Wojtas goto error; 210177958fcdSMarcin Wojtas } 21029b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 21033f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21043fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "err_req_irq\n"); 210577958fcdSMarcin Wojtas goto error; 21069b8d05b8SZbigniew Bodek } 21079b8d05b8SZbigniew Bodek 21083fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 210982e558eaSDawid Gorecki "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n", 21107d8c4feeSMarcin Wojtas adapter->num_io_queues, 21119762a033SMarcin Wojtas adapter->requested_rx_ring_size, 21129762a033SMarcin Wojtas adapter->requested_tx_ring_size, 21139762a033SMarcin Wojtas (adapter->ena_dev->tx_mem_queue_type == 21149762a033SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); 21157d8c4feeSMarcin Wojtas 21169762a033SMarcin Wojtas rc = create_queues_with_size_backoff(adapter); 21173f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21183fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 21199762a033SMarcin Wojtas "error creating queues with size backoff\n"); 21209762a033SMarcin Wojtas goto err_create_queues_with_backoff; 21219b8d05b8SZbigniew Bodek } 21229b8d05b8SZbigniew Bodek 2123fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 21249b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 21259b8d05b8SZbigniew Bodek 21269b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 21273f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21289b8d05b8SZbigniew Bodek goto err_up_complete; 21299b8d05b8SZbigniew Bodek 21309b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 21319b8d05b8SZbigniew Bodek 21329b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 21339b8d05b8SZbigniew Bodek 213482e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 21359b8d05b8SZbigniew Bodek 2136fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 213793471047SZbigniew Bodek 213893471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 21399b8d05b8SZbigniew Bodek 21409b8d05b8SZbigniew Bodek return (0); 21419b8d05b8SZbigniew Bodek 21429b8d05b8SZbigniew Bodek err_up_complete: 21439b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 21449b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 21459b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 21469762a033SMarcin Wojtas err_create_queues_with_backoff: 21479b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 214877958fcdSMarcin Wojtas error: 21499b8d05b8SZbigniew Bodek return (rc); 21509b8d05b8SZbigniew Bodek } 21519b8d05b8SZbigniew Bodek 21529b8d05b8SZbigniew Bodek static uint64_t 21539b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 21549b8d05b8SZbigniew Bodek { 21559b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 21569b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 21579b8d05b8SZbigniew Bodek 21589b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 21599b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 21609b8d05b8SZbigniew Bodek 21619b8d05b8SZbigniew Bodek switch (cnt) { 21629b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 216330217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 21649b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 216530217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 21669b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 216730217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 21689b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 216930217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 21709b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 217130217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 21726c84cec3SMarcin Wojtas case IFCOUNTER_OQDROPS: 21736c84cec3SMarcin Wojtas return (counter_u64_fetch(stats->tx_drops)); 21749b8d05b8SZbigniew Bodek default: 21759b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 21769b8d05b8SZbigniew Bodek } 21779b8d05b8SZbigniew Bodek } 21789b8d05b8SZbigniew Bodek 21799b8d05b8SZbigniew Bodek static int 21809b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 21819b8d05b8SZbigniew Bodek { 21829b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 21839b8d05b8SZbigniew Bodek return (0); 21849b8d05b8SZbigniew Bodek } 21859b8d05b8SZbigniew Bodek 21869b8d05b8SZbigniew Bodek static void 21879b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 21889b8d05b8SZbigniew Bodek { 21899b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 21903fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "Media status update\n"); 21919b8d05b8SZbigniew Bodek 219207aff471SArtur Rojek ENA_LOCK_LOCK(); 21939b8d05b8SZbigniew Bodek 21949b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 21959b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 21969b8d05b8SZbigniew Bodek 2197fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 219807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 21993fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "Link is down\n"); 22009b8d05b8SZbigniew Bodek return; 22019b8d05b8SZbigniew Bodek } 22029b8d05b8SZbigniew Bodek 22039b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2204b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 22059b8d05b8SZbigniew Bodek 220607aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22079b8d05b8SZbigniew Bodek } 22089b8d05b8SZbigniew Bodek 22099b8d05b8SZbigniew Bodek static void 22109b8d05b8SZbigniew Bodek ena_init(void *arg) 22119b8d05b8SZbigniew Bodek { 22129b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 22139b8d05b8SZbigniew Bodek 2214fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 221507aff471SArtur Rojek ENA_LOCK_LOCK(); 22169b8d05b8SZbigniew Bodek ena_up(adapter); 221707aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22183d3a90f9SZbigniew Bodek } 22199b8d05b8SZbigniew Bodek } 22209b8d05b8SZbigniew Bodek 22219b8d05b8SZbigniew Bodek static int 22229b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 22239b8d05b8SZbigniew Bodek { 22249b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 22259b8d05b8SZbigniew Bodek struct ifreq *ifr; 22269b8d05b8SZbigniew Bodek int rc; 22279b8d05b8SZbigniew Bodek 22287583c633SJustin Hibbits adapter = if_getsoftc(ifp); 22299b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 22309b8d05b8SZbigniew Bodek 22319b8d05b8SZbigniew Bodek /* 22329b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 22339b8d05b8SZbigniew Bodek */ 22349b8d05b8SZbigniew Bodek rc = 0; 22359b8d05b8SZbigniew Bodek switch (command) { 22369b8d05b8SZbigniew Bodek case SIOCSIFMTU: 22377583c633SJustin Hibbits if (if_getmtu(ifp) == ifr->ifr_mtu) 2238dbf2eb54SMarcin Wojtas break; 223907aff471SArtur Rojek ENA_LOCK_LOCK(); 22409b8d05b8SZbigniew Bodek ena_down(adapter); 22419b8d05b8SZbigniew Bodek 22429b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 22439b8d05b8SZbigniew Bodek 22449b8d05b8SZbigniew Bodek rc = ena_up(adapter); 224507aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22469b8d05b8SZbigniew Bodek break; 22479b8d05b8SZbigniew Bodek 22489b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 22497583c633SJustin Hibbits if ((if_getflags(ifp) & IFF_UP) != 0) { 22500bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 22517583c633SJustin Hibbits if ((if_getflags(ifp) & (IFF_PROMISC | 22527583c633SJustin Hibbits IFF_ALLMULTI)) != 0) { 22533fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 22549b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 22559b8d05b8SZbigniew Bodek } 22569b8d05b8SZbigniew Bodek } else { 225707aff471SArtur Rojek ENA_LOCK_LOCK(); 22589b8d05b8SZbigniew Bodek rc = ena_up(adapter); 225907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22609b8d05b8SZbigniew Bodek } 22619b8d05b8SZbigniew Bodek } else { 22620bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 226307aff471SArtur Rojek ENA_LOCK_LOCK(); 22649b8d05b8SZbigniew Bodek ena_down(adapter); 226507aff471SArtur Rojek ENA_LOCK_UNLOCK(); 2266e67c6554SZbigniew Bodek } 22679b8d05b8SZbigniew Bodek } 22689b8d05b8SZbigniew Bodek break; 22699b8d05b8SZbigniew Bodek 22709b8d05b8SZbigniew Bodek case SIOCADDMULTI: 22719b8d05b8SZbigniew Bodek case SIOCDELMULTI: 22729b8d05b8SZbigniew Bodek break; 22739b8d05b8SZbigniew Bodek 22749b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 22759b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 22769b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 22779b8d05b8SZbigniew Bodek break; 22789b8d05b8SZbigniew Bodek 22799b8d05b8SZbigniew Bodek case SIOCSIFCAP: 22809b8d05b8SZbigniew Bodek { 22819b8d05b8SZbigniew Bodek int reinit = 0; 22829b8d05b8SZbigniew Bodek 22837583c633SJustin Hibbits if (ifr->ifr_reqcap != if_getcapenable(ifp)) { 22847583c633SJustin Hibbits if_setcapenable(ifp, ifr->ifr_reqcap); 22859b8d05b8SZbigniew Bodek reinit = 1; 22869b8d05b8SZbigniew Bodek } 22879b8d05b8SZbigniew Bodek 22880bdffe59SMarcin Wojtas if ((reinit != 0) && 22890bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 229007aff471SArtur Rojek ENA_LOCK_LOCK(); 22919b8d05b8SZbigniew Bodek ena_down(adapter); 22929b8d05b8SZbigniew Bodek rc = ena_up(adapter); 229307aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22949b8d05b8SZbigniew Bodek } 22959b8d05b8SZbigniew Bodek } 22969b8d05b8SZbigniew Bodek 22979b8d05b8SZbigniew Bodek break; 22989b8d05b8SZbigniew Bodek default: 22999b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 23009b8d05b8SZbigniew Bodek break; 23019b8d05b8SZbigniew Bodek } 23029b8d05b8SZbigniew Bodek 23039b8d05b8SZbigniew Bodek return (rc); 23049b8d05b8SZbigniew Bodek } 23059b8d05b8SZbigniew Bodek 23069b8d05b8SZbigniew Bodek static int 23079b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 23089b8d05b8SZbigniew Bodek { 23099b8d05b8SZbigniew Bodek int caps = 0; 23109b8d05b8SZbigniew Bodek 23110bdffe59SMarcin Wojtas if ((feat->offload.tx & 23129b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 23139b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 23140bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 23159b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 23169b8d05b8SZbigniew Bodek 23170bdffe59SMarcin Wojtas if ((feat->offload.tx & 23189b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 23190bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 23209b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 23219b8d05b8SZbigniew Bodek 232282e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 23239b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 23249b8d05b8SZbigniew Bodek 232582e558eaSDawid Gorecki if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 23269b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 23279b8d05b8SZbigniew Bodek 23280bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 23299b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 23300bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 23319b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 23329b8d05b8SZbigniew Bodek 23330bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 23340bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 23359b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 23369b8d05b8SZbigniew Bodek 23379b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 23389b8d05b8SZbigniew Bodek 23399b8d05b8SZbigniew Bodek return (caps); 23409b8d05b8SZbigniew Bodek } 23419b8d05b8SZbigniew Bodek 23429b8d05b8SZbigniew Bodek static void 23439b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 23449b8d05b8SZbigniew Bodek { 234582e558eaSDawid Gorecki host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); 23469b8d05b8SZbigniew Bodek } 23479b8d05b8SZbigniew Bodek 23489b8d05b8SZbigniew Bodek static void 23499b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 23509b8d05b8SZbigniew Bodek { 23519b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 23529b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 23539b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 23549b8d05b8SZbigniew Bodek int flags = 0; 23559b8d05b8SZbigniew Bodek 23569b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 23579b8d05b8SZbigniew Bodek 23580bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 23590bdffe59SMarcin Wojtas if ((feat & 23600bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 23619b8d05b8SZbigniew Bodek flags |= CSUM_IP; 23620bdffe59SMarcin Wojtas if ((feat & 23639b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 23640bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 23659b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 23669b8d05b8SZbigniew Bodek } 23679b8d05b8SZbigniew Bodek 23680bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 23699b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 23709b8d05b8SZbigniew Bodek 23710bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 23729b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 23739b8d05b8SZbigniew Bodek 23740bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 23759b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 23769b8d05b8SZbigniew Bodek 23779b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 23789b8d05b8SZbigniew Bodek } 23799b8d05b8SZbigniew Bodek 23809b8d05b8SZbigniew Bodek static int 23819b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 23829b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 23839b8d05b8SZbigniew Bodek { 23849b8d05b8SZbigniew Bodek if_t ifp; 23859b8d05b8SZbigniew Bodek int caps = 0; 23869b8d05b8SZbigniew Bodek 23879b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 23883f9ed7abSMarcin Wojtas if (unlikely(ifp == NULL)) { 23893fc5d816SMarcin Wojtas ena_log(pdev, ERR, "can not allocate ifnet structure\n"); 23909b8d05b8SZbigniew Bodek return (ENXIO); 23919b8d05b8SZbigniew Bodek } 23929b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 23939b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 23949b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 23959b8d05b8SZbigniew Bodek 2396a6b55ee6SGleb Smirnoff if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 23979b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 23989b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 23999b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 24009b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 24019b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 24029b8d05b8SZbigniew Bodek 24039762a033SMarcin Wojtas if_setsendqlen(ifp, adapter->requested_tx_ring_size); 24049b8d05b8SZbigniew Bodek if_setsendqready(ifp); 24059b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 24069b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 24079b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 24089b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 24099b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 24109b8d05b8SZbigniew Bodek /* check hardware support */ 24119b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 24129b8d05b8SZbigniew Bodek /* ... and set them */ 24139b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 24149b8d05b8SZbigniew Bodek 24159b8d05b8SZbigniew Bodek /* TSO parameters */ 24167583c633SJustin Hibbits if_sethwtsomax(ifp, ENA_TSO_MAXSIZE - 24177583c633SJustin Hibbits (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 24187583c633SJustin Hibbits if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1); 24197583c633SJustin Hibbits if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE); 24209b8d05b8SZbigniew Bodek 24219b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 24229b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 24239b8d05b8SZbigniew Bodek 24249b8d05b8SZbigniew Bodek /* 24259b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 24269b8d05b8SZbigniew Bodek * callbacks to update media and link information 24279b8d05b8SZbigniew Bodek */ 242882e558eaSDawid Gorecki ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, 242982e558eaSDawid Gorecki ena_media_status); 24309b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 24319b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 24329b8d05b8SZbigniew Bodek 24339b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 24349b8d05b8SZbigniew Bodek 24359b8d05b8SZbigniew Bodek return (0); 24369b8d05b8SZbigniew Bodek } 24379b8d05b8SZbigniew Bodek 243838c7b965SMarcin Wojtas void 24399b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 24409b8d05b8SZbigniew Bodek { 2441a195fab0SMarcin Wojtas int rc; 24429b8d05b8SZbigniew Bodek 244307aff471SArtur Rojek ENA_LOCK_ASSERT(); 2444cb98c439SArtur Rojek 2445579d23aaSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2446579d23aaSMarcin Wojtas return; 2447579d23aaSMarcin Wojtas 244878554d0cSDawid Gorecki ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 24499b8d05b8SZbigniew Bodek 2450fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 245182e558eaSDawid Gorecki if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 24529b8d05b8SZbigniew Bodek 24539b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 24549b8d05b8SZbigniew Bodek 2455fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 245682e558eaSDawid Gorecki rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 24573f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 245882e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, "Device reset failed\n"); 2459a195fab0SMarcin Wojtas } 2460a195fab0SMarcin Wojtas 24619b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 24629b8d05b8SZbigniew Bodek 24639b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 24649b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 24659b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 24669b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 24679b8d05b8SZbigniew Bodek 24689b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 24699b8d05b8SZbigniew Bodek } 24709b8d05b8SZbigniew Bodek 24717d8c4feeSMarcin Wojtas static uint32_t 24727d8c4feeSMarcin Wojtas ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev, 24739b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 24749b8d05b8SZbigniew Bodek { 24757d8c4feeSMarcin Wojtas uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 24769b8d05b8SZbigniew Bodek 24776064f289SMarcin Wojtas /* Regular queues capabilities */ 24786064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 24796064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 24806064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 24814fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 24824fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 24836064f289SMarcin Wojtas 24844fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 24854fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 24866064f289SMarcin Wojtas } else { 24876064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 24886064f289SMarcin Wojtas &get_feat_ctx->max_queues; 24894fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 24904fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 24914fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 24926064f289SMarcin Wojtas } 24939b8d05b8SZbigniew Bodek 24944fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 24954fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 24964fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 24974fa9e02dSMarcin Wojtas 24987d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 24997d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num); 25007d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num); 25017d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num); 2502609e6f6dSGordon Bergling /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */ 25037d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, 25047d8c4feeSMarcin Wojtas pci_msix_count(pdev) - 1); 25056d1ef2abSArtur Rojek #ifdef RSS 25066d1ef2abSArtur Rojek max_num_io_queues = min_t(uint32_t, max_num_io_queues, 25076d1ef2abSArtur Rojek rss_getnumbuckets()); 25086d1ef2abSArtur Rojek #endif 25099b8d05b8SZbigniew Bodek 25107d8c4feeSMarcin Wojtas return (max_num_io_queues); 25119b8d05b8SZbigniew Bodek } 25129b8d05b8SZbigniew Bodek 25130bdffe59SMarcin Wojtas static int 25143fc5d816SMarcin Wojtas ena_enable_wc(device_t pdev, struct resource *res) 25154fa9e02dSMarcin Wojtas { 2516472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 25174fa9e02dSMarcin Wojtas vm_offset_t va; 25184fa9e02dSMarcin Wojtas vm_size_t len; 25194fa9e02dSMarcin Wojtas int rc; 25204fa9e02dSMarcin Wojtas 25214fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 25224fa9e02dSMarcin Wojtas len = rman_get_size(res); 25234fa9e02dSMarcin Wojtas /* Enable write combining */ 2524472d4784SMarcin Wojtas rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 25254fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 25263fc5d816SMarcin Wojtas ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc); 25274fa9e02dSMarcin Wojtas return (rc); 25284fa9e02dSMarcin Wojtas } 25294fa9e02dSMarcin Wojtas 25304fa9e02dSMarcin Wojtas return (0); 25314fa9e02dSMarcin Wojtas #endif 25324fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 25334fa9e02dSMarcin Wojtas } 25344fa9e02dSMarcin Wojtas 25354fa9e02dSMarcin Wojtas static int 25364fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 25374fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 25384fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 25394fa9e02dSMarcin Wojtas { 254090232d18SDawid Gorecki int rc; 25414fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 25424fa9e02dSMarcin Wojtas 25434fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 25444fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 25453fc5d816SMarcin Wojtas ena_log(pdev, WARN, 25464fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 25474fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 25484fa9e02dSMarcin Wojtas return (0); 25494fa9e02dSMarcin Wojtas } 25504fa9e02dSMarcin Wojtas 255190232d18SDawid Gorecki if (ena_dev->mem_bar == NULL) { 255290232d18SDawid Gorecki ena_log(pdev, WARN, 255390232d18SDawid Gorecki "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 255490232d18SDawid Gorecki ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 255590232d18SDawid Gorecki return (0); 255690232d18SDawid Gorecki } 255790232d18SDawid Gorecki 25584fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 25594fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 256082e558eaSDawid Gorecki ena_log(pdev, WARN, 256182e558eaSDawid Gorecki "Failed to configure the device mode. " 25624fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 25634fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 256490232d18SDawid Gorecki } 256590232d18SDawid Gorecki 25664fa9e02dSMarcin Wojtas return (0); 25674fa9e02dSMarcin Wojtas } 25684fa9e02dSMarcin Wojtas 256990232d18SDawid Gorecki static int 257090232d18SDawid Gorecki ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 257190232d18SDawid Gorecki { 257290232d18SDawid Gorecki struct ena_adapter *adapter = device_get_softc(pdev); 257390232d18SDawid Gorecki int rc, rid; 25744fa9e02dSMarcin Wojtas 25754fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 25764fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 257782e558eaSDawid Gorecki adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 257882e558eaSDawid Gorecki RF_ACTIVE); 25794fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 258082e558eaSDawid Gorecki ena_log(pdev, WARN, 25813324e304SMichal Krawczyk "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n"); 25824fa9e02dSMarcin Wojtas return (0); 25834fa9e02dSMarcin Wojtas } 25844fa9e02dSMarcin Wojtas 25854fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 25863fc5d816SMarcin Wojtas rc = ena_enable_wc(adapter->pdev, adapter->memory); 25874fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 25883fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to enable write combining.\n"); 25894fa9e02dSMarcin Wojtas return (rc); 25904fa9e02dSMarcin Wojtas } 25914fa9e02dSMarcin Wojtas 25924fa9e02dSMarcin Wojtas /* 25934fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 25944fa9e02dSMarcin Wojtas * for the ena_com layer. 25954fa9e02dSMarcin Wojtas */ 25964fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 25974fa9e02dSMarcin Wojtas 25984fa9e02dSMarcin Wojtas return (0); 25994fa9e02dSMarcin Wojtas } 26004fa9e02dSMarcin Wojtas 260182e558eaSDawid Gorecki static inline void 260282e558eaSDawid Gorecki set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2603beaadec9SMarcin Wojtas struct ena_admin_feature_llq_desc *llq) 26044fa9e02dSMarcin Wojtas { 26054fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 26064fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 26074fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 26084fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 260982e558eaSDawid Gorecki if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 261082e558eaSDawid Gorecki 0 && ena_force_large_llq_header) { 2611beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2612beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2613beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size_value = 256; 2614beaadec9SMarcin Wojtas } else { 2615beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2616beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_128B; 26174fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 26184fa9e02dSMarcin Wojtas } 2619beaadec9SMarcin Wojtas } 26204fa9e02dSMarcin Wojtas 26214fa9e02dSMarcin Wojtas static int 26227d8c4feeSMarcin Wojtas ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) 26239b8d05b8SZbigniew Bodek { 26244fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 26254fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 26266064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 26277d8c4feeSMarcin Wojtas uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE; 26287d8c4feeSMarcin Wojtas uint32_t max_tx_queue_size; 26297d8c4feeSMarcin Wojtas uint32_t max_rx_queue_size; 26309b8d05b8SZbigniew Bodek 26314fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 26326064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 26336064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 26347d8c4feeSMarcin Wojtas max_rx_queue_size = min_t(uint32_t, 26357d8c4feeSMarcin Wojtas max_queue_ext->max_rx_cq_depth, 26366064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 26377d8c4feeSMarcin Wojtas max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 26384fa9e02dSMarcin Wojtas 26394fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 26404fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 26417d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26424fa9e02dSMarcin Wojtas llq->max_llq_depth); 26434fa9e02dSMarcin Wojtas else 26447d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26456064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 26464fa9e02dSMarcin Wojtas 26476064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26486064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 26497d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26507d8c4feeSMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 26516064f289SMarcin Wojtas } else { 26526064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 26536064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 265482e558eaSDawid Gorecki max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth, 26556064f289SMarcin Wojtas max_queues->max_sq_depth); 26567d8c4feeSMarcin Wojtas max_tx_queue_size = max_queues->max_cq_depth; 26574fa9e02dSMarcin Wojtas 26584fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 26594fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 26607d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26614fa9e02dSMarcin Wojtas llq->max_llq_depth); 26624fa9e02dSMarcin Wojtas else 26637d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26644fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 26654fa9e02dSMarcin Wojtas 26666064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26677d8c4feeSMarcin Wojtas max_queues->max_packet_tx_descs); 26687d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26696064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 26706064f289SMarcin Wojtas } 26719b8d05b8SZbigniew Bodek 26729b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 26737d8c4feeSMarcin Wojtas max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1); 26747d8c4feeSMarcin Wojtas max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); 26756064f289SMarcin Wojtas 2676beaadec9SMarcin Wojtas /* 2677beaadec9SMarcin Wojtas * When forcing large headers, we multiply the entry size by 2, 2678beaadec9SMarcin Wojtas * and therefore divide the queue size by 2, leaving the amount 2679beaadec9SMarcin Wojtas * of memory used by the queues unchanged. 2680beaadec9SMarcin Wojtas */ 2681beaadec9SMarcin Wojtas if (ena_force_large_llq_header) { 2682beaadec9SMarcin Wojtas if ((llq->entry_size_ctrl_supported & 2683beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && 2684beaadec9SMarcin Wojtas ena_dev->tx_mem_queue_type == 2685beaadec9SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2686beaadec9SMarcin Wojtas max_tx_queue_size /= 2; 26873fc5d816SMarcin Wojtas ena_log(ctx->pdev, INFO, 2688beaadec9SMarcin Wojtas "Forcing large headers and decreasing maximum Tx queue size to %d\n", 2689beaadec9SMarcin Wojtas max_tx_queue_size); 2690beaadec9SMarcin Wojtas } else { 26913fc5d816SMarcin Wojtas ena_log(ctx->pdev, WARN, 2692beaadec9SMarcin Wojtas "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 2693beaadec9SMarcin Wojtas } 2694beaadec9SMarcin Wojtas } 2695beaadec9SMarcin Wojtas 26967d8c4feeSMarcin Wojtas tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, 26977d8c4feeSMarcin Wojtas max_tx_queue_size); 26987d8c4feeSMarcin Wojtas rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, 26997d8c4feeSMarcin Wojtas max_rx_queue_size); 27009b8d05b8SZbigniew Bodek 27017d8c4feeSMarcin Wojtas tx_queue_size = 1 << (flsl(tx_queue_size) - 1); 27027d8c4feeSMarcin Wojtas rx_queue_size = 1 << (flsl(rx_queue_size) - 1); 27037d8c4feeSMarcin Wojtas 27047d8c4feeSMarcin Wojtas ctx->max_tx_queue_size = max_tx_queue_size; 27057d8c4feeSMarcin Wojtas ctx->max_rx_queue_size = max_rx_queue_size; 27066064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 27077d8c4feeSMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 27086064f289SMarcin Wojtas 27096064f289SMarcin Wojtas return (0); 27109b8d05b8SZbigniew Bodek } 27119b8d05b8SZbigniew Bodek 27120bdffe59SMarcin Wojtas static void 271346021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 27149b8d05b8SZbigniew Bodek { 27159b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 271646021271SMarcin Wojtas uintptr_t rid; 27179b8d05b8SZbigniew Bodek int rc; 27189b8d05b8SZbigniew Bodek 27199b8d05b8SZbigniew Bodek /* Allocate only the host info */ 27209b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 27213f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 27223fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot allocate host info\n"); 27239b8d05b8SZbigniew Bodek return; 27249b8d05b8SZbigniew Bodek } 27259b8d05b8SZbigniew Bodek 27269b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 27279b8d05b8SZbigniew Bodek 272846021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 272946021271SMarcin Wojtas host_info->bdf = rid; 27309b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 27319b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 27329b8d05b8SZbigniew Bodek 27339b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 27349b8d05b8SZbigniew Bodek host_info->os_dist = 0; 27359b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 27369b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 27379b8d05b8SZbigniew Bodek 27388f15f8a7SDawid Gorecki host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) | 27398f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 27408f15f8a7SDawid Gorecki (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 27418ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 2742c7444389SMarcin Wojtas host_info->driver_supported_features = 27436d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 27446d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 27459b8d05b8SZbigniew Bodek 27469b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 27473f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 2748a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 27493fc5d816SMarcin Wojtas ena_log(dev, WARN, "Cannot set host attributes\n"); 27509b8d05b8SZbigniew Bodek else 27513fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot set host attributes\n"); 27529b8d05b8SZbigniew Bodek 27539b8d05b8SZbigniew Bodek goto err; 27549b8d05b8SZbigniew Bodek } 27559b8d05b8SZbigniew Bodek 27569b8d05b8SZbigniew Bodek return; 27579b8d05b8SZbigniew Bodek 27589b8d05b8SZbigniew Bodek err: 27599b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 27609b8d05b8SZbigniew Bodek } 27619b8d05b8SZbigniew Bodek 27629b8d05b8SZbigniew Bodek static int 27639b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 27649b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 27659b8d05b8SZbigniew Bodek { 27663324e304SMichal Krawczyk struct ena_llq_configurations llq_config; 27679b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 27689b8d05b8SZbigniew Bodek bool readless_supported; 27699b8d05b8SZbigniew Bodek uint32_t aenq_groups; 27709b8d05b8SZbigniew Bodek int dma_width; 27719b8d05b8SZbigniew Bodek int rc; 27729b8d05b8SZbigniew Bodek 27739b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 27743f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 27753fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to init mmio read less\n"); 27760bdffe59SMarcin Wojtas return (rc); 27779b8d05b8SZbigniew Bodek } 27789b8d05b8SZbigniew Bodek 27799b8d05b8SZbigniew Bodek /* 27809b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 27819b8d05b8SZbigniew Bodek * read is disabled 27829b8d05b8SZbigniew Bodek */ 27839b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 27849b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 27859b8d05b8SZbigniew Bodek 2786a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 27873f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 27883fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Can not reset device\n"); 27899b8d05b8SZbigniew Bodek goto err_mmio_read_less; 27909b8d05b8SZbigniew Bodek } 27919b8d05b8SZbigniew Bodek 27929b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 27933f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 27943fc5d816SMarcin Wojtas ena_log(pdev, ERR, "device version is too low\n"); 27959b8d05b8SZbigniew Bodek goto err_mmio_read_less; 27969b8d05b8SZbigniew Bodek } 27979b8d05b8SZbigniew Bodek 27989b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 27993f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 28003fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Invalid dma width value %d", dma_width); 28019b8d05b8SZbigniew Bodek rc = dma_width; 28029b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28039b8d05b8SZbigniew Bodek } 28049b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 28059b8d05b8SZbigniew Bodek 28069b8d05b8SZbigniew Bodek /* ENA admin level init */ 280767ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 28083f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28093fc5d816SMarcin Wojtas ena_log(pdev, ERR, 28109b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 28119b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28129b8d05b8SZbigniew Bodek } 28139b8d05b8SZbigniew Bodek 28149b8d05b8SZbigniew Bodek /* 28159b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 28169b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 28179b8d05b8SZbigniew Bodek * information 28189b8d05b8SZbigniew Bodek */ 28199b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 28209b8d05b8SZbigniew Bodek 282146021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 28229b8d05b8SZbigniew Bodek 28239b8d05b8SZbigniew Bodek /* Get Device Attributes */ 28249b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 28253f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28263fc5d816SMarcin Wojtas ena_log(pdev, ERR, 28279b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 28289b8d05b8SZbigniew Bodek goto err_admin_init; 28299b8d05b8SZbigniew Bodek } 28309b8d05b8SZbigniew Bodek 2831e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2832e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 2833e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 283440621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 2835e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_KEEP_ALIVE); 28369b8d05b8SZbigniew Bodek 28379b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 28389b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 28393f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28403fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc); 28419b8d05b8SZbigniew Bodek goto err_admin_init; 28429b8d05b8SZbigniew Bodek } 28439b8d05b8SZbigniew Bodek 28449b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 28459b8d05b8SZbigniew Bodek 28463324e304SMichal Krawczyk set_default_llq_configurations(&llq_config, &get_feat_ctx->llq); 28473324e304SMichal Krawczyk 28483324e304SMichal Krawczyk rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, 28493324e304SMichal Krawczyk &llq_config); 28503324e304SMichal Krawczyk if (unlikely(rc != 0)) { 28513324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to set placement policy\n"); 28523324e304SMichal Krawczyk goto err_admin_init; 28533324e304SMichal Krawczyk } 28543324e304SMichal Krawczyk 28550bdffe59SMarcin Wojtas return (0); 28569b8d05b8SZbigniew Bodek 28579b8d05b8SZbigniew Bodek err_admin_init: 28589b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 28599b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 28609b8d05b8SZbigniew Bodek err_mmio_read_less: 28619b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 28629b8d05b8SZbigniew Bodek 28630bdffe59SMarcin Wojtas return (rc); 28649b8d05b8SZbigniew Bodek } 28659b8d05b8SZbigniew Bodek 286682e558eaSDawid Gorecki static int 286782e558eaSDawid Gorecki ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) 28689b8d05b8SZbigniew Bodek { 28699b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 28709b8d05b8SZbigniew Bodek int rc; 28719b8d05b8SZbigniew Bodek 28729b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 28733f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28743fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 28750bdffe59SMarcin Wojtas return (rc); 28769b8d05b8SZbigniew Bodek } 28779b8d05b8SZbigniew Bodek 28789b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 28799b8d05b8SZbigniew Bodek 28809b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 28813f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28823fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n"); 28839b8d05b8SZbigniew Bodek goto err_disable_msix; 28849b8d05b8SZbigniew Bodek } 28859b8d05b8SZbigniew Bodek 28869b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 28879b8d05b8SZbigniew Bodek 28889b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 28899b8d05b8SZbigniew Bodek 28900bdffe59SMarcin Wojtas return (0); 28919b8d05b8SZbigniew Bodek 28929b8d05b8SZbigniew Bodek err_disable_msix: 28939b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 28949b8d05b8SZbigniew Bodek 28950bdffe59SMarcin Wojtas return (rc); 28969b8d05b8SZbigniew Bodek } 28979b8d05b8SZbigniew Bodek 28989b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 289982e558eaSDawid Gorecki static void 290082e558eaSDawid Gorecki ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 29019b8d05b8SZbigniew Bodek { 29029b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 290330217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 29049b8d05b8SZbigniew Bodek sbintime_t stime; 290530217e2dSMarcin Wojtas uint64_t rx_drops; 29066c84cec3SMarcin Wojtas uint64_t tx_drops; 290730217e2dSMarcin Wojtas 290830217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 290930217e2dSMarcin Wojtas 291030217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 29116c84cec3SMarcin Wojtas tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 291230217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 291330217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 29146c84cec3SMarcin Wojtas counter_u64_zero(adapter->hw_stats.tx_drops); 29156c84cec3SMarcin Wojtas counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 29169b8d05b8SZbigniew Bodek 29179b8d05b8SZbigniew Bodek stime = getsbinuptime(); 29189b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 29199b8d05b8SZbigniew Bodek } 29209b8d05b8SZbigniew Bodek 29219b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 292282e558eaSDawid Gorecki static void 292382e558eaSDawid Gorecki check_for_missing_keep_alive(struct ena_adapter *adapter) 29249b8d05b8SZbigniew Bodek { 29259b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 29269b8d05b8SZbigniew Bodek 29279b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 29289b8d05b8SZbigniew Bodek return; 29299b8d05b8SZbigniew Bodek 293040621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 29319b8d05b8SZbigniew Bodek return; 29329b8d05b8SZbigniew Bodek 29339b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 29349b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 29359b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 29363fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 29379b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.wd_expired, 1); 29387926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 29399b8d05b8SZbigniew Bodek } 2940858659f7SMarcin Wojtas } 29419b8d05b8SZbigniew Bodek 29429b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 294382e558eaSDawid Gorecki static void 294482e558eaSDawid Gorecki check_for_admin_com_state(struct ena_adapter *adapter) 29459b8d05b8SZbigniew Bodek { 294682e558eaSDawid Gorecki if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { 29473fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 29489b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 29499b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 29507926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 29519b8d05b8SZbigniew Bodek } 2952858659f7SMarcin Wojtas } 29539b8d05b8SZbigniew Bodek 295474dba3adSMarcin Wojtas static int 2955d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2956d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 2957d12f7bfcSMarcin Wojtas { 29580ac122c3SDawid Gorecki if (likely(atomic_load_8(&rx_ring->first_interrupt))) 2959d12f7bfcSMarcin Wojtas return (0); 2960d12f7bfcSMarcin Wojtas 2961d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2962d12f7bfcSMarcin Wojtas return (0); 2963d12f7bfcSMarcin Wojtas 2964d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 2965d12f7bfcSMarcin Wojtas 296682e558eaSDawid Gorecki if (rx_ring->no_interrupt_event_cnt == 296782e558eaSDawid Gorecki ENA_MAX_NO_INTERRUPT_ITERATIONS) { 296882e558eaSDawid Gorecki ena_log(adapter->pdev, ERR, 296982e558eaSDawid Gorecki "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", 297082e558eaSDawid Gorecki rx_ring->qid); 29717926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 2972d12f7bfcSMarcin Wojtas return (EIO); 2973d12f7bfcSMarcin Wojtas } 2974d12f7bfcSMarcin Wojtas 2975d12f7bfcSMarcin Wojtas return (0); 2976d12f7bfcSMarcin Wojtas } 2977d12f7bfcSMarcin Wojtas 2978d12f7bfcSMarcin Wojtas static int 2979d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 298074dba3adSMarcin Wojtas struct ena_ring *tx_ring) 298174dba3adSMarcin Wojtas { 29823fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 298374dba3adSMarcin Wojtas struct bintime curtime, time; 298474dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 2985d8aba82bSDawid Gorecki int time_since_last_cleanup; 2986d8aba82bSDawid Gorecki int missing_tx_comp_to; 2987d12f7bfcSMarcin Wojtas sbintime_t time_offset; 298874dba3adSMarcin Wojtas uint32_t missed_tx = 0; 2989d12f7bfcSMarcin Wojtas int i, rc = 0; 299074dba3adSMarcin Wojtas 299174dba3adSMarcin Wojtas getbinuptime(&curtime); 299274dba3adSMarcin Wojtas 299374dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 299474dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 299574dba3adSMarcin Wojtas 29960bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 299774dba3adSMarcin Wojtas continue; 299874dba3adSMarcin Wojtas 299974dba3adSMarcin Wojtas time = curtime; 300074dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3001d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3002d12f7bfcSMarcin Wojtas 30030ac122c3SDawid Gorecki if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) && 3004d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3005d12f7bfcSMarcin Wojtas /* 3006d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3007d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3008d12f7bfcSMarcin Wojtas */ 30093fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3010d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 301182e558eaSDawid Gorecki "Reset the device\n", 301282e558eaSDawid Gorecki tx_ring->qid); 30137926bc44SMarcin Wojtas ena_trigger_reset(adapter, 30147926bc44SMarcin Wojtas ENA_REGS_RESET_MISS_INTERRUPT); 3015d12f7bfcSMarcin Wojtas return (EIO); 3016d12f7bfcSMarcin Wojtas } 301774dba3adSMarcin Wojtas 301874dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3019d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 302074dba3adSMarcin Wojtas 3021f01b2cd9SArthur Kiyanovski if (tx_buf->print_once) { 3022d8aba82bSDawid Gorecki time_since_last_cleanup = TICKS_2_USEC(ticks - 3023d8aba82bSDawid Gorecki tx_ring->tx_last_cleanup_ticks); 302482e558eaSDawid Gorecki missing_tx_comp_to = sbttoms( 302582e558eaSDawid Gorecki adapter->missing_tx_timeout); 302682e558eaSDawid Gorecki ena_log(pdev, WARN, 302782e558eaSDawid Gorecki "Found a Tx that wasn't completed on time, qid %d, index %d. " 302882e558eaSDawid Gorecki "%d usecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n", 3029d8aba82bSDawid Gorecki tx_ring->qid, i, time_since_last_cleanup, 3030d8aba82bSDawid Gorecki missing_tx_comp_to); 3031d8aba82bSDawid Gorecki } 303274dba3adSMarcin Wojtas 3033f01b2cd9SArthur Kiyanovski tx_buf->print_once = false; 303474dba3adSMarcin Wojtas missed_tx++; 3035d12f7bfcSMarcin Wojtas } 3036d12f7bfcSMarcin Wojtas } 303774dba3adSMarcin Wojtas 3038d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 30393fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3040d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3041d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 30424e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 30437926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL); 3044d12f7bfcSMarcin Wojtas rc = EIO; 304574dba3adSMarcin Wojtas } 304674dba3adSMarcin Wojtas 3047d12f7bfcSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 3048d12f7bfcSMarcin Wojtas 3049d12f7bfcSMarcin Wojtas return (rc); 305074dba3adSMarcin Wojtas } 305174dba3adSMarcin Wojtas 30529b8d05b8SZbigniew Bodek /* 30539b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 30549b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 30559b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 30569b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 30579b8d05b8SZbigniew Bodek */ 30580bdffe59SMarcin Wojtas static void 3059d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 30609b8d05b8SZbigniew Bodek { 30619b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3062d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 306374dba3adSMarcin Wojtas int i, budget, rc; 30649b8d05b8SZbigniew Bodek 30659b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 30669b8d05b8SZbigniew Bodek rmb(); 30679b8d05b8SZbigniew Bodek 3068fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 30699b8d05b8SZbigniew Bodek return; 30709b8d05b8SZbigniew Bodek 3071fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 30729b8d05b8SZbigniew Bodek return; 30739b8d05b8SZbigniew Bodek 307440621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 30759b8d05b8SZbigniew Bodek return; 30769b8d05b8SZbigniew Bodek 30779b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 30789b8d05b8SZbigniew Bodek 30797d8c4feeSMarcin Wojtas for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) { 30809b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3081d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 30829b8d05b8SZbigniew Bodek 3083d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3084d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3085d12f7bfcSMarcin Wojtas return; 3086d12f7bfcSMarcin Wojtas 3087d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 30880bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 30899b8d05b8SZbigniew Bodek return; 30909b8d05b8SZbigniew Bodek 30919b8d05b8SZbigniew Bodek budget--; 3092cd5d5804SMarcin Wojtas if (budget == 0) { 30939b8d05b8SZbigniew Bodek i++; 30949b8d05b8SZbigniew Bodek break; 30959b8d05b8SZbigniew Bodek } 30969b8d05b8SZbigniew Bodek } 30979b8d05b8SZbigniew Bodek 30987d8c4feeSMarcin Wojtas adapter->next_monitored_tx_qid = i % adapter->num_io_queues; 30999b8d05b8SZbigniew Bodek } 31009b8d05b8SZbigniew Bodek 31015cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3102efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3103efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3104efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3105efe6ab18SMarcin Wojtas * for example). 3106efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3107efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3108efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3109efe6ab18SMarcin Wojtas * able to send new packets. 3110efe6ab18SMarcin Wojtas * 3111efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3112efe6ab18SMarcin Wojtas */ 3113efe6ab18SMarcin Wojtas static void 3114efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3115efe6ab18SMarcin Wojtas { 3116efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3117efe6ab18SMarcin Wojtas int i, refill_required; 3118efe6ab18SMarcin Wojtas 3119fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 3120efe6ab18SMarcin Wojtas return; 3121efe6ab18SMarcin Wojtas 3122fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3123efe6ab18SMarcin Wojtas return; 3124efe6ab18SMarcin Wojtas 31257d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3126efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3127efe6ab18SMarcin Wojtas 312882e558eaSDawid Gorecki refill_required = ena_com_free_q_entries( 312982e558eaSDawid Gorecki rx_ring->ena_com_io_sq); 3130efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3131efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3132efe6ab18SMarcin Wojtas 3133efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3134efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3135efe6ab18SMarcin Wojtas 1); 3136efe6ab18SMarcin Wojtas 31373fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 31383fc5d816SMarcin Wojtas "Rx ring %d is stalled. Triggering the refill function\n", 31393fc5d816SMarcin Wojtas i); 3140efe6ab18SMarcin Wojtas 31415cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 31425cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3143efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3144efe6ab18SMarcin Wojtas } 3145efe6ab18SMarcin Wojtas } else { 3146efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3147efe6ab18SMarcin Wojtas } 3148efe6ab18SMarcin Wojtas } 3149efe6ab18SMarcin Wojtas } 31509b8d05b8SZbigniew Bodek 315182e558eaSDawid Gorecki static void 315282e558eaSDawid Gorecki ena_update_hints(struct ena_adapter *adapter, 315340621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 315440621d71SMarcin Wojtas { 315540621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 315640621d71SMarcin Wojtas 315740621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 315840621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 315940621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 316040621d71SMarcin Wojtas 316140621d71SMarcin Wojtas if (hints->mmio_read_timeout) 316240621d71SMarcin Wojtas /* convert to usec */ 316382e558eaSDawid Gorecki ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; 316440621d71SMarcin Wojtas 316540621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 316640621d71SMarcin Wojtas adapter->missing_tx_threshold = 316740621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 316840621d71SMarcin Wojtas 316940621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 317040621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 317140621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 317240621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 317340621d71SMarcin Wojtas else 317482e558eaSDawid Gorecki adapter->missing_tx_timeout = SBT_1MS * 317582e558eaSDawid Gorecki hints->missing_tx_completion_timeout; 317640621d71SMarcin Wojtas } 317740621d71SMarcin Wojtas 317840621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 317940621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 318040621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 318140621d71SMarcin Wojtas else 318282e558eaSDawid Gorecki adapter->keep_alive_timeout = SBT_1MS * 318382e558eaSDawid Gorecki hints->driver_watchdog_timeout; 318440621d71SMarcin Wojtas } 318540621d71SMarcin Wojtas } 318640621d71SMarcin Wojtas 3187f180142cSMarcin Wojtas /** 3188f180142cSMarcin Wojtas * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3189f180142cSMarcin Wojtas * @adapter: ENA device adapter 3190f180142cSMarcin Wojtas * 3191f180142cSMarcin Wojtas * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics 3192f180142cSMarcin Wojtas * and other error codes on failure. 3193f180142cSMarcin Wojtas * 3194f180142cSMarcin Wojtas * This function can possibly cause a race with other calls to the admin queue. 3195f180142cSMarcin Wojtas * Because of that, the caller should either lock this function or make sure 3196f180142cSMarcin Wojtas * that there is no race in the current context. 3197f180142cSMarcin Wojtas */ 3198f180142cSMarcin Wojtas static int 3199f180142cSMarcin Wojtas ena_copy_eni_metrics(struct ena_adapter *adapter) 3200f180142cSMarcin Wojtas { 3201f180142cSMarcin Wojtas static bool print_once = true; 3202f180142cSMarcin Wojtas int rc; 3203f180142cSMarcin Wojtas 3204f180142cSMarcin Wojtas rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics); 3205f180142cSMarcin Wojtas 3206f180142cSMarcin Wojtas if (rc != 0) { 3207f180142cSMarcin Wojtas if (rc == ENA_COM_UNSUPPORTED) { 3208f180142cSMarcin Wojtas if (print_once) { 32093fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 3210f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3211f180142cSMarcin Wojtas print_once = false; 3212f180142cSMarcin Wojtas } else { 32133fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 3214f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3215f180142cSMarcin Wojtas } 3216f180142cSMarcin Wojtas } else { 32173fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3218f180142cSMarcin Wojtas "Failed to get ENI metrics: %d\n", rc); 3219f180142cSMarcin Wojtas } 3220f180142cSMarcin Wojtas } 3221f180142cSMarcin Wojtas 3222f180142cSMarcin Wojtas return (rc); 3223f180142cSMarcin Wojtas } 3224f180142cSMarcin Wojtas 32259b8d05b8SZbigniew Bodek static void 32269b8d05b8SZbigniew Bodek ena_timer_service(void *data) 32279b8d05b8SZbigniew Bodek { 32289b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 32299b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 32309b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 32319b8d05b8SZbigniew Bodek 32329b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 32339b8d05b8SZbigniew Bodek 32349b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 32359b8d05b8SZbigniew Bodek 3236d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 32379b8d05b8SZbigniew Bodek 3238efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 3239efe6ab18SMarcin Wojtas 3240f180142cSMarcin Wojtas /* 3241f180142cSMarcin Wojtas * User controller update of the ENI metrics. 3242f180142cSMarcin Wojtas * If the delay was set to 0, then the stats shouldn't be updated at 3243f180142cSMarcin Wojtas * all. 3244f180142cSMarcin Wojtas * Otherwise, wait 'eni_metrics_sample_interval' seconds, before 3245f180142cSMarcin Wojtas * updating stats. 3246f180142cSMarcin Wojtas * As timer service is executed every second, it's enough to increment 3247f180142cSMarcin Wojtas * appropriate counter each time the timer service is executed. 3248f180142cSMarcin Wojtas */ 3249f180142cSMarcin Wojtas if ((adapter->eni_metrics_sample_interval != 0) && 3250f180142cSMarcin Wojtas (++adapter->eni_metrics_sample_interval_cnt >= 3251f180142cSMarcin Wojtas adapter->eni_metrics_sample_interval)) { 3252b899a02aSDawid Gorecki taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task); 3253f180142cSMarcin Wojtas adapter->eni_metrics_sample_interval_cnt = 0; 3254f180142cSMarcin Wojtas } 3255f180142cSMarcin Wojtas 3256f180142cSMarcin Wojtas 32570bdffe59SMarcin Wojtas if (host_info != NULL) 32589b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 32599b8d05b8SZbigniew Bodek 3260fd43fd2aSMarcin Wojtas if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3261d10ec3adSDawid Gorecki /* 3262d10ec3adSDawid Gorecki * Timeout when validating version indicates that the device 3263d10ec3adSDawid Gorecki * became unresponsive. If that happens skip the reset and 3264d10ec3adSDawid Gorecki * reschedule timer service, so the reset can be retried later. 3265d10ec3adSDawid Gorecki */ 3266d10ec3adSDawid Gorecki if (ena_com_validate_version(adapter->ena_dev) == 3267d10ec3adSDawid Gorecki ENA_COM_TIMER_EXPIRED) { 3268d10ec3adSDawid Gorecki ena_log(adapter->pdev, WARN, 3269d10ec3adSDawid Gorecki "FW unresponsive, skipping reset\n"); 3270d10ec3adSDawid Gorecki ENA_TIMER_RESET(adapter); 3271d10ec3adSDawid Gorecki return; 3272d10ec3adSDawid Gorecki } 32733fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, "Trigger reset is on\n"); 32749b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 32759b8d05b8SZbigniew Bodek return; 32769b8d05b8SZbigniew Bodek } 32779b8d05b8SZbigniew Bodek 32789b8d05b8SZbigniew Bodek /* 32799b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 32809b8d05b8SZbigniew Bodek */ 328178554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 32829b8d05b8SZbigniew Bodek } 32839b8d05b8SZbigniew Bodek 328438c7b965SMarcin Wojtas void 328532f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful) 32869b8d05b8SZbigniew Bodek { 328732f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 32889b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 32899b8d05b8SZbigniew Bodek bool dev_up; 329032f63fa7SMarcin Wojtas 329132f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 329232f63fa7SMarcin Wojtas return; 329332f63fa7SMarcin Wojtas 3294*c59a5fbdSArthur Kiyanovski if (!graceful) 329532f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_DOWN); 329632f63fa7SMarcin Wojtas 329778554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 329832f63fa7SMarcin Wojtas 329932f63fa7SMarcin Wojtas dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 330032f63fa7SMarcin Wojtas if (dev_up) 330132f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 330232f63fa7SMarcin Wojtas 330332f63fa7SMarcin Wojtas if (!graceful) 330432f63fa7SMarcin Wojtas ena_com_set_admin_running_state(ena_dev, false); 330532f63fa7SMarcin Wojtas 330632f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 330732f63fa7SMarcin Wojtas ena_down(adapter); 330832f63fa7SMarcin Wojtas 330932f63fa7SMarcin Wojtas /* 331032f63fa7SMarcin Wojtas * Stop the device from sending AENQ events (if the device was up, and 331132f63fa7SMarcin Wojtas * the trigger reset was on, ena_down already performs device reset) 331232f63fa7SMarcin Wojtas */ 331332f63fa7SMarcin Wojtas if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 331432f63fa7SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 331532f63fa7SMarcin Wojtas 331632f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 331732f63fa7SMarcin Wojtas 331832f63fa7SMarcin Wojtas ena_disable_msix(adapter); 331932f63fa7SMarcin Wojtas 3320e2735b09SMarcin Wojtas /* 3321e2735b09SMarcin Wojtas * IO rings resources should be freed because `ena_restore_device()` 3322e2735b09SMarcin Wojtas * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX 3323e2735b09SMarcin Wojtas * vectors. The amount of MSIX vectors after destroy-restore may be 3324e2735b09SMarcin Wojtas * different than before. Therefore, IO rings resources should be 3325e2735b09SMarcin Wojtas * established from scratch each time. 3326e2735b09SMarcin Wojtas */ 3327e2735b09SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 3328e2735b09SMarcin Wojtas 332932f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 333032f63fa7SMarcin Wojtas 333132f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 333232f63fa7SMarcin Wojtas 333332f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 333432f63fa7SMarcin Wojtas 333532f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 333632f63fa7SMarcin Wojtas 333732f63fa7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 333832f63fa7SMarcin Wojtas 333932f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 334032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 334132f63fa7SMarcin Wojtas } 334232f63fa7SMarcin Wojtas 334332f63fa7SMarcin Wojtas static int 334432f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter, 334532f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 334632f63fa7SMarcin Wojtas { 334732f63fa7SMarcin Wojtas if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 334832f63fa7SMarcin Wojtas ETHER_ADDR_LEN) != 0) { 33493fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 335032f63fa7SMarcin Wojtas return (EINVAL); 335132f63fa7SMarcin Wojtas } 335232f63fa7SMarcin Wojtas 335332f63fa7SMarcin Wojtas if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 33543fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 335532f63fa7SMarcin Wojtas "Error, device max mtu is smaller than ifp MTU\n"); 335632f63fa7SMarcin Wojtas return (EINVAL); 335732f63fa7SMarcin Wojtas } 335832f63fa7SMarcin Wojtas 335932f63fa7SMarcin Wojtas return 0; 336032f63fa7SMarcin Wojtas } 336132f63fa7SMarcin Wojtas 336238c7b965SMarcin Wojtas int 336332f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter) 336432f63fa7SMarcin Wojtas { 336532f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx get_feat_ctx; 336632f63fa7SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 336732f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 336832f63fa7SMarcin Wojtas device_t dev = adapter->pdev; 336932f63fa7SMarcin Wojtas int wd_active; 33709b8d05b8SZbigniew Bodek int rc; 33719b8d05b8SZbigniew Bodek 337232f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 337332f63fa7SMarcin Wojtas 337432f63fa7SMarcin Wojtas rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 337532f63fa7SMarcin Wojtas if (rc != 0) { 33763fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot initialize device\n"); 337732f63fa7SMarcin Wojtas goto err; 337832f63fa7SMarcin Wojtas } 337932f63fa7SMarcin Wojtas /* 338032f63fa7SMarcin Wojtas * Only enable WD if it was enabled before reset, so it won't override 338132f63fa7SMarcin Wojtas * value set by the user by the sysctl. 338232f63fa7SMarcin Wojtas */ 338332f63fa7SMarcin Wojtas if (adapter->wd_active != 0) 338432f63fa7SMarcin Wojtas adapter->wd_active = wd_active; 338532f63fa7SMarcin Wojtas 338632f63fa7SMarcin Wojtas rc = ena_device_validate_params(adapter, &get_feat_ctx); 338732f63fa7SMarcin Wojtas if (rc != 0) { 33883fc5d816SMarcin Wojtas ena_log(dev, ERR, "Validation of device parameters failed\n"); 338932f63fa7SMarcin Wojtas goto err_device_destroy; 339032f63fa7SMarcin Wojtas } 339132f63fa7SMarcin Wojtas 339232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 339332f63fa7SMarcin Wojtas /* Make sure we don't have a race with AENQ Links state handler */ 339432f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 339532f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 339632f63fa7SMarcin Wojtas 3397aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 339832f63fa7SMarcin Wojtas if (rc != 0) { 33993fc5d816SMarcin Wojtas ena_log(dev, ERR, "Enable MSI-X failed\n"); 340032f63fa7SMarcin Wojtas goto err_device_destroy; 340132f63fa7SMarcin Wojtas } 340232f63fa7SMarcin Wojtas 3403e2735b09SMarcin Wojtas /* 3404e2735b09SMarcin Wojtas * Effective value of used MSIX vectors should be the same as before 3405e2735b09SMarcin Wojtas * `ena_destroy_device()`, if possible, or closest to it if less vectors 3406e2735b09SMarcin Wojtas * are available. 3407e2735b09SMarcin Wojtas */ 3408e2735b09SMarcin Wojtas if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) 340982e558eaSDawid Gorecki adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3410e2735b09SMarcin Wojtas 3411e2735b09SMarcin Wojtas /* Re-initialize rings basic information */ 3412e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3413e2735b09SMarcin Wojtas 341432f63fa7SMarcin Wojtas /* If the interface was up before the reset bring it up */ 341532f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 341632f63fa7SMarcin Wojtas rc = ena_up(adapter); 341732f63fa7SMarcin Wojtas if (rc != 0) { 34183fc5d816SMarcin Wojtas ena_log(dev, ERR, "Failed to create I/O queues\n"); 341932f63fa7SMarcin Wojtas goto err_disable_msix; 342032f63fa7SMarcin Wojtas } 342132f63fa7SMarcin Wojtas } 342232f63fa7SMarcin Wojtas 342324392281SMarcin Wojtas /* Indicate that device is running again and ready to work */ 342432f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 342524392281SMarcin Wojtas 342624392281SMarcin Wojtas /* 342724392281SMarcin Wojtas * As the AENQ handlers weren't executed during reset because 342824392281SMarcin Wojtas * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 342924392281SMarcin Wojtas * timestamp must be updated again That will prevent next reset 343024392281SMarcin Wojtas * caused by missing keep alive. 343124392281SMarcin Wojtas */ 343224392281SMarcin Wojtas adapter->keep_alive_timestamp = getsbinuptime(); 343378554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 343478554d0cSDawid Gorecki 34357d8c4feeSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 343632f63fa7SMarcin Wojtas 343732f63fa7SMarcin Wojtas return (rc); 343832f63fa7SMarcin Wojtas 343932f63fa7SMarcin Wojtas err_disable_msix: 344032f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 344132f63fa7SMarcin Wojtas ena_disable_msix(adapter); 344232f63fa7SMarcin Wojtas err_device_destroy: 344332f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 344432f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 344532f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 344632f63fa7SMarcin Wojtas ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 344732f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 344832f63fa7SMarcin Wojtas err: 344932f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 345032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 34513fc5d816SMarcin Wojtas ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n"); 345232f63fa7SMarcin Wojtas 345332f63fa7SMarcin Wojtas return (rc); 345432f63fa7SMarcin Wojtas } 345532f63fa7SMarcin Wojtas 345632f63fa7SMarcin Wojtas static void 3457b899a02aSDawid Gorecki ena_metrics_task(void *arg, int pending) 3458b899a02aSDawid Gorecki { 3459b899a02aSDawid Gorecki struct ena_adapter *adapter = (struct ena_adapter *)arg; 3460b899a02aSDawid Gorecki 3461b899a02aSDawid Gorecki ENA_LOCK_LOCK(); 3462b899a02aSDawid Gorecki (void)ena_copy_eni_metrics(adapter); 3463b899a02aSDawid Gorecki ENA_LOCK_UNLOCK(); 3464b899a02aSDawid Gorecki } 3465b899a02aSDawid Gorecki 3466b899a02aSDawid Gorecki static void 346732f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending) 346832f63fa7SMarcin Wojtas { 346932f63fa7SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)arg; 347032f63fa7SMarcin Wojtas 347107aff471SArtur Rojek ENA_LOCK_LOCK(); 3472433ab9b6SArtur Rojek if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 347332f63fa7SMarcin Wojtas ena_destroy_device(adapter, false); 347432f63fa7SMarcin Wojtas ena_restore_device(adapter); 3475d209ffeeSDawid Gorecki 3476d209ffeeSDawid Gorecki ena_log(adapter->pdev, INFO, 3477d209ffeeSDawid Gorecki "Device reset completed successfully, Driver info: %s\n", 3478d209ffeeSDawid Gorecki ena_version); 3479433ab9b6SArtur Rojek } 348007aff471SArtur Rojek ENA_LOCK_UNLOCK(); 34819b8d05b8SZbigniew Bodek } 34829b8d05b8SZbigniew Bodek 3483b9e80b52SOsama Abboud static void 3484b9e80b52SOsama Abboud ena_free_stats(struct ena_adapter *adapter) 3485b9e80b52SOsama Abboud { 3486b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->hw_stats, 3487b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3488b9e80b52SOsama Abboud ena_free_counters((counter_u64_t *)&adapter->dev_stats, 3489b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3490b9e80b52SOsama Abboud 3491b9e80b52SOsama Abboud } 34929b8d05b8SZbigniew Bodek /** 34939b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 34949b8d05b8SZbigniew Bodek * @pdev: device information struct 34959b8d05b8SZbigniew Bodek * 34969b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 34979b8d05b8SZbigniew Bodek * 34989b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 34999b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 35009b8d05b8SZbigniew Bodek * and a hardware reset occur. 35019b8d05b8SZbigniew Bodek **/ 35029b8d05b8SZbigniew Bodek static int 35039b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 35049b8d05b8SZbigniew Bodek { 35059b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 35066064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 35079b8d05b8SZbigniew Bodek static int version_printed; 35089b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 35099b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 35107d8c4feeSMarcin Wojtas uint32_t max_num_io_queues; 35111c808fcdSMichal Krawczyk int msix_rid; 35124fa9e02dSMarcin Wojtas int rid, rc; 35134fa9e02dSMarcin Wojtas 35149b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 35159b8d05b8SZbigniew Bodek adapter->pdev = pdev; 3516eb4c4f4aSMarcin Wojtas adapter->first_bind = -1; 35179b8d05b8SZbigniew Bodek 35186959869eSMarcin Wojtas /* 35196959869eSMarcin Wojtas * Set up the timer service - driver is responsible for avoiding 35206959869eSMarcin Wojtas * concurrency, as the callout won't be using any locking inside. 35216959869eSMarcin Wojtas */ 352278554d0cSDawid Gorecki ENA_TIMER_INIT(adapter); 35238f15f8a7SDawid Gorecki adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO; 35248f15f8a7SDawid Gorecki adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO; 35258f15f8a7SDawid Gorecki adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES; 35268f15f8a7SDawid Gorecki adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD; 35279b8d05b8SZbigniew Bodek 35289b8d05b8SZbigniew Bodek if (version_printed++ == 0) 35293fc5d816SMarcin Wojtas ena_log(pdev, INFO, "%s\n", ena_version); 35309b8d05b8SZbigniew Bodek 35319b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 3532cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3533cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 35349b8d05b8SZbigniew Bodek 35359b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 35369b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 35374fa9e02dSMarcin Wojtas 35384fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 35394fa9e02dSMarcin Wojtas adapter->memory = NULL; 354082e558eaSDawid Gorecki adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, 354182e558eaSDawid Gorecki RF_ACTIVE); 35424fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 35433fc5d816SMarcin Wojtas ena_log(pdev, ERR, 35444fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 35454fa9e02dSMarcin Wojtas rc = ENOMEM; 35464fa9e02dSMarcin Wojtas goto err_dev_free; 35474fa9e02dSMarcin Wojtas } 35484fa9e02dSMarcin Wojtas 35491c808fcdSMichal Krawczyk /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ 35501c808fcdSMichal Krawczyk msix_rid = pci_msix_table_bar(pdev); 35511c808fcdSMichal Krawczyk if (msix_rid != rid) { 35521c808fcdSMichal Krawczyk adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 35531c808fcdSMichal Krawczyk &msix_rid, RF_ACTIVE); 35541c808fcdSMichal Krawczyk if (unlikely(adapter->msix == NULL)) { 35553fc5d816SMarcin Wojtas ena_log(pdev, ERR, 35561c808fcdSMichal Krawczyk "unable to allocate bus resource: msix!\n"); 35571c808fcdSMichal Krawczyk rc = ENOMEM; 35581c808fcdSMichal Krawczyk goto err_pci_free; 35591c808fcdSMichal Krawczyk } 35601c808fcdSMichal Krawczyk adapter->msix_rid = msix_rid; 35611c808fcdSMichal Krawczyk } 35621c808fcdSMichal Krawczyk 35639b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 35649b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 35659b8d05b8SZbigniew Bodek 35669b8d05b8SZbigniew Bodek /* Store register resources */ 356782e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag( 356882e558eaSDawid Gorecki adapter->registers); 356982e558eaSDawid Gorecki ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle( 357082e558eaSDawid Gorecki adapter->registers); 35719b8d05b8SZbigniew Bodek 35723f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) { 35733fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to pmap registers bar\n"); 35749b8d05b8SZbigniew Bodek rc = ENXIO; 3575cd5d5804SMarcin Wojtas goto err_bus_free; 35769b8d05b8SZbigniew Bodek } 35779b8d05b8SZbigniew Bodek 35783324e304SMichal Krawczyk rc = ena_map_llq_mem_bar(pdev, ena_dev); 35793324e304SMichal Krawczyk if (unlikely(rc != 0)) { 35803324e304SMichal Krawczyk ena_log(pdev, ERR, "Failed to map ENA mem bar"); 35813324e304SMichal Krawczyk goto err_bus_free; 35823324e304SMichal Krawczyk } 35839b8d05b8SZbigniew Bodek 3584fd43fd2aSMarcin Wojtas /* Initially clear all the flags */ 3585fd43fd2aSMarcin Wojtas ENA_FLAG_ZERO(adapter); 3586fd43fd2aSMarcin Wojtas 35879b8d05b8SZbigniew Bodek /* Device initialization */ 35889b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 35893f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 35903fc5d816SMarcin Wojtas ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc); 35919b8d05b8SZbigniew Bodek rc = ENXIO; 35929b8d05b8SZbigniew Bodek goto err_bus_free; 35939b8d05b8SZbigniew Bodek } 35949b8d05b8SZbigniew Bodek 35950b432b70SMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 359682e558eaSDawid Gorecki adapter->disable_meta_caching = !!( 359782e558eaSDawid Gorecki get_feat_ctx.llq.accel_mode.u.get.supported_flags & 35980b432b70SMarcin Wojtas BIT(ENA_ADMIN_DISABLE_META_CACHING)); 35990b432b70SMarcin Wojtas 36009b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 36019b8d05b8SZbigniew Bodek 36029b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 36039b8d05b8SZbigniew Bodek 36049b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 36059b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 36069b8d05b8SZbigniew Bodek 36077d8c4feeSMarcin Wojtas calc_queue_ctx.pdev = pdev; 36086064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 36096064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 36106064f289SMarcin Wojtas 36117d8c4feeSMarcin Wojtas /* Calculate initial and maximum IO queue number and size */ 36127d8c4feeSMarcin Wojtas max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, 36137d8c4feeSMarcin Wojtas &get_feat_ctx); 36147d8c4feeSMarcin Wojtas rc = ena_calc_io_queue_size(&calc_queue_ctx); 36157d8c4feeSMarcin Wojtas if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { 36166064f289SMarcin Wojtas rc = EFAULT; 36179b8d05b8SZbigniew Bodek goto err_com_free; 36189b8d05b8SZbigniew Bodek } 36199b8d05b8SZbigniew Bodek 36209762a033SMarcin Wojtas adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; 36219762a033SMarcin Wojtas adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; 36227d8c4feeSMarcin Wojtas adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 36237d8c4feeSMarcin Wojtas adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 36246064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 36256064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 36266064f289SMarcin Wojtas 36277d8c4feeSMarcin Wojtas adapter->max_num_io_queues = max_num_io_queues; 36287d8c4feeSMarcin Wojtas 36296064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 36309b8d05b8SZbigniew Bodek 36317d8c4feeSMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 36327d8c4feeSMarcin Wojtas 36337d8c4feeSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 36347d8c4feeSMarcin Wojtas 36359b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 36369b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 36374e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 36383fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create TX DMA tag\n"); 3639cd5d5804SMarcin Wojtas goto err_com_free; 36404e8acd84SMarcin Wojtas } 36419b8d05b8SZbigniew Bodek 36429b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 36434e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 36443fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create RX DMA tag\n"); 3645cd5d5804SMarcin Wojtas goto err_tx_tag_free; 36464e8acd84SMarcin Wojtas } 36479b8d05b8SZbigniew Bodek 3648e2735b09SMarcin Wojtas /* 3649e2735b09SMarcin Wojtas * The amount of requested MSIX vectors is equal to 3650e2735b09SMarcin Wojtas * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant 3651e2735b09SMarcin Wojtas * number of admin queue interrupts. The former is initially determined 3652e2735b09SMarcin Wojtas * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be 3653e2735b09SMarcin Wojtas * achieved if there are not enough system resources. By default, the 3654e2735b09SMarcin Wojtas * number of effectively used IO queues is the same but later on it can 3655e2735b09SMarcin Wojtas * be limited by the user using sysctl interface. 3656e2735b09SMarcin Wojtas */ 3657aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 36583f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36593fc5d816SMarcin Wojtas ena_log(pdev, ERR, 36609b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 3661c115a1e2SMarcin Wojtas goto err_io_free; 3662c115a1e2SMarcin Wojtas } 3663e2735b09SMarcin Wojtas /* By default all of allocated MSIX vectors are actively used */ 3664e2735b09SMarcin Wojtas adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3665e2735b09SMarcin Wojtas 3666e2735b09SMarcin Wojtas /* initialize rings basic information */ 3667e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3668c115a1e2SMarcin Wojtas 3669b9e80b52SOsama Abboud /* Initialize statistics */ 3670b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 3671b9e80b52SOsama Abboud sizeof(struct ena_stats_dev)); 3672b9e80b52SOsama Abboud ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 3673b9e80b52SOsama Abboud sizeof(struct ena_hw_stats)); 3674b9e80b52SOsama Abboud ena_sysctl_add_nodes(adapter); 3675b9e80b52SOsama Abboud 3676c115a1e2SMarcin Wojtas /* setup network interface */ 3677c115a1e2SMarcin Wojtas rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 3678c115a1e2SMarcin Wojtas if (unlikely(rc != 0)) { 36793fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Error with network interface setup\n"); 3680c115a1e2SMarcin Wojtas goto err_msix_free; 36819b8d05b8SZbigniew Bodek } 36829b8d05b8SZbigniew Bodek 3683081169f2SZbigniew Bodek /* Initialize reset task queue */ 3684081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3685081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3686081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 368782e558eaSDawid Gorecki taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", 368882e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3689081169f2SZbigniew Bodek 3690b899a02aSDawid Gorecki /* Initialize metrics task queue */ 3691b899a02aSDawid Gorecki TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter); 3692b899a02aSDawid Gorecki adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue", 3693b899a02aSDawid Gorecki M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq); 369482e558eaSDawid Gorecki taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq", 369582e558eaSDawid Gorecki device_get_nameunit(adapter->pdev)); 3696b899a02aSDawid Gorecki 3697d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3698d17b7d87SMarcin Wojtas rc = ena_netmap_attach(adapter); 3699d17b7d87SMarcin Wojtas if (rc != 0) { 37003fc5d816SMarcin Wojtas ena_log(pdev, ERR, "netmap attach failed: %d\n", rc); 3701d17b7d87SMarcin Wojtas goto err_detach; 3702d17b7d87SMarcin Wojtas } 3703d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3704d17b7d87SMarcin Wojtas 37059b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 37069b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 3707fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 37089b8d05b8SZbigniew Bodek 370978554d0cSDawid Gorecki /* Run the timer service */ 371078554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 371178554d0cSDawid Gorecki 37129b8d05b8SZbigniew Bodek return (0); 37139b8d05b8SZbigniew Bodek 3714d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3715d17b7d87SMarcin Wojtas err_detach: 3716d17b7d87SMarcin Wojtas ether_ifdetach(adapter->ifp); 3717d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3718c115a1e2SMarcin Wojtas err_msix_free: 3719b9e80b52SOsama Abboud ena_free_stats(adapter); 3720c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 3721c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 3722c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 3723cd5d5804SMarcin Wojtas err_io_free: 37249b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 37259b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 3726cd5d5804SMarcin Wojtas err_tx_tag_free: 37279b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 3728cd5d5804SMarcin Wojtas err_com_free: 37299b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 37309b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 3731cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 37329b8d05b8SZbigniew Bodek err_bus_free: 37339b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 37341c808fcdSMichal Krawczyk err_pci_free: 37359b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 37364fa9e02dSMarcin Wojtas err_dev_free: 37374fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 3738cd5d5804SMarcin Wojtas 37399b8d05b8SZbigniew Bodek return (rc); 37409b8d05b8SZbigniew Bodek } 37419b8d05b8SZbigniew Bodek 37429b8d05b8SZbigniew Bodek /** 37439b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 37449b8d05b8SZbigniew Bodek * @pdev: device information struct 37459b8d05b8SZbigniew Bodek * 37469b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 37479b8d05b8SZbigniew Bodek * that it should release a PCI device. 37489b8d05b8SZbigniew Bodek **/ 37499b8d05b8SZbigniew Bodek static int 37509b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 37519b8d05b8SZbigniew Bodek { 37529b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 37539b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 37549b8d05b8SZbigniew Bodek int rc; 37559b8d05b8SZbigniew Bodek 37569b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 37577583c633SJustin Hibbits if (if_vlantrunkinuse(adapter->ifp)) { 37583fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n"); 37599b8d05b8SZbigniew Bodek return (EBUSY); 37609b8d05b8SZbigniew Bodek } 37619b8d05b8SZbigniew Bodek 37629151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 37639151c55dSMarcin Wojtas 37646959869eSMarcin Wojtas /* Stop timer service */ 376507aff471SArtur Rojek ENA_LOCK_LOCK(); 376678554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 376707aff471SArtur Rojek ENA_LOCK_UNLOCK(); 37686959869eSMarcin Wojtas 3769b899a02aSDawid Gorecki /* Release metrics task */ 3770b899a02aSDawid Gorecki while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL)) 3771b899a02aSDawid Gorecki taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task); 3772b899a02aSDawid Gorecki taskqueue_free(adapter->metrics_tq); 3773b899a02aSDawid Gorecki 37746959869eSMarcin Wojtas /* Release reset task */ 37759b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 37769b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 37779b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 37789b8d05b8SZbigniew Bodek 377907aff471SArtur Rojek ENA_LOCK_LOCK(); 37809b8d05b8SZbigniew Bodek ena_down(adapter); 378132f63fa7SMarcin Wojtas ena_destroy_device(adapter, true); 378207aff471SArtur Rojek ENA_LOCK_UNLOCK(); 37839b8d05b8SZbigniew Bodek 37840e7d31f6SMarcin Wojtas /* Restore unregistered sysctl queue nodes. */ 37850e7d31f6SMarcin Wojtas ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues, 37860e7d31f6SMarcin Wojtas adapter->max_num_io_queues); 37870e7d31f6SMarcin Wojtas 3788d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3789d17b7d87SMarcin Wojtas netmap_detach(adapter->ifp); 3790d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3791d17b7d87SMarcin Wojtas 3792b9e80b52SOsama Abboud ena_free_stats(adapter); 37939b8d05b8SZbigniew Bodek 37949b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 37953f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 37963fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 37979b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 37989b8d05b8SZbigniew Bodek 37999b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 38003f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 38013fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 38029b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 38039b8d05b8SZbigniew Bodek 38049b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 38059b8d05b8SZbigniew Bodek 38069b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 38079b8d05b8SZbigniew Bodek 38086d1ef2abSArtur Rojek if (adapter->rss_indir != NULL) 38096d1ef2abSArtur Rojek free(adapter->rss_indir, M_DEVBUF); 38106d1ef2abSArtur Rojek 381132f63fa7SMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 381232f63fa7SMarcin Wojtas ena_com_rss_destroy(ena_dev); 381332f63fa7SMarcin Wojtas 381432f63fa7SMarcin Wojtas ena_com_delete_host_info(ena_dev); 381532f63fa7SMarcin Wojtas 38169151c55dSMarcin Wojtas if_free(adapter->ifp); 38179151c55dSMarcin Wojtas 38189b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 38199b8d05b8SZbigniew Bodek 38209b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 38219b8d05b8SZbigniew Bodek 38229b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 38239b8d05b8SZbigniew Bodek } 38249b8d05b8SZbigniew Bodek 38259b8d05b8SZbigniew Bodek /****************************************************************************** 38269b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 38279b8d05b8SZbigniew Bodek *****************************************************************************/ 38289b8d05b8SZbigniew Bodek /** 38299b8d05b8SZbigniew Bodek * ena_update_on_link_change: 38309b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 38319b8d05b8SZbigniew Bodek **/ 38329b8d05b8SZbigniew Bodek static void 38339b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 38349b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 38359b8d05b8SZbigniew Bodek { 38369b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 38379b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 38389b8d05b8SZbigniew Bodek int status; 38399b8d05b8SZbigniew Bodek if_t ifp; 38409b8d05b8SZbigniew Bodek 38419b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 38429b8d05b8SZbigniew Bodek ifp = adapter->ifp; 38439b8d05b8SZbigniew Bodek status = aenq_desc->flags & 38449b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 38459b8d05b8SZbigniew Bodek 38469b8d05b8SZbigniew Bodek if (status != 0) { 38473fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is UP\n"); 3848fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 384932f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 385032f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 385132f63fa7SMarcin Wojtas } else { 38523fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is DOWN\n"); 38539b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 3854fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 38559b8d05b8SZbigniew Bodek } 38569b8d05b8SZbigniew Bodek } 38579b8d05b8SZbigniew Bodek 385882e558eaSDawid Gorecki static void 385982e558eaSDawid Gorecki ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) 386040621d71SMarcin Wojtas { 386140621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 386240621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 386340621d71SMarcin Wojtas 386482e558eaSDawid Gorecki ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, 386582e558eaSDawid Gorecki adapter->ena_dev, "Invalid group(%x) expected %x\n", 386682e558eaSDawid Gorecki aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); 386740621d71SMarcin Wojtas 38689eb1615fSMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrome) { 386940621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 387040621d71SMarcin Wojtas hints = 387140621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 387240621d71SMarcin Wojtas ena_update_hints(adapter, hints); 387340621d71SMarcin Wojtas break; 387440621d71SMarcin Wojtas default: 38753fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 387640621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 38779eb1615fSMarcin Wojtas aenq_e->aenq_common_desc.syndrome); 387840621d71SMarcin Wojtas } 387940621d71SMarcin Wojtas } 388040621d71SMarcin Wojtas 388107aff471SArtur Rojek static void 388207aff471SArtur Rojek ena_lock_init(void *arg) 388307aff471SArtur Rojek { 388407aff471SArtur Rojek ENA_LOCK_INIT(); 388507aff471SArtur Rojek } 388607aff471SArtur Rojek SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL); 388707aff471SArtur Rojek 388807aff471SArtur Rojek static void 388907aff471SArtur Rojek ena_lock_uninit(void *arg) 389007aff471SArtur Rojek { 389107aff471SArtur Rojek ENA_LOCK_DESTROY(); 389207aff471SArtur Rojek } 389307aff471SArtur Rojek SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL); 389407aff471SArtur Rojek 38959b8d05b8SZbigniew Bodek /** 38969b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 38979b8d05b8SZbigniew Bodek **/ 38989b8d05b8SZbigniew Bodek static void 3899e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 39009b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 39019b8d05b8SZbigniew Bodek { 3902e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3903e6de9a83SMarcin Wojtas 39043fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3905e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 39069b8d05b8SZbigniew Bodek } 39079b8d05b8SZbigniew Bodek 39089b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 39099b8d05b8SZbigniew Bodek .handlers = { 39109b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 391140621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 39129b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 39139b8d05b8SZbigniew Bodek }, 39149b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 39159b8d05b8SZbigniew Bodek }; 39169b8d05b8SZbigniew Bodek 39179b8d05b8SZbigniew Bodek /********************************************************************* 39189b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 39199b8d05b8SZbigniew Bodek *********************************************************************/ 39209b8d05b8SZbigniew Bodek 392182e558eaSDawid Gorecki static device_method_t ena_methods[] = { /* Device interface */ 39229b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 39239b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 392482e558eaSDawid Gorecki DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END 39259b8d05b8SZbigniew Bodek }; 39269b8d05b8SZbigniew Bodek 39279b8d05b8SZbigniew Bodek static driver_t ena_driver = { 392882e558eaSDawid Gorecki "ena", 392982e558eaSDawid Gorecki ena_methods, 393082e558eaSDawid Gorecki sizeof(struct ena_adapter), 39319b8d05b8SZbigniew Bodek }; 39329b8d05b8SZbigniew Bodek 39331dc1476cSJohn Baldwin DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 393440abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 3935329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 39369b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 39379b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 3938d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3939d17b7d87SMarcin Wojtas MODULE_DEPEND(ena, netmap, 1, 1, 1); 3940d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 39419b8d05b8SZbigniew Bodek 39429b8d05b8SZbigniew Bodek /*********************************************************************/ 3943