19b8d05b8SZbigniew Bodek /*- 20835cc78SMarcin Wojtas * SPDX-License-Identifier: BSD-2-Clause 39b8d05b8SZbigniew Bodek * 4beaadec9SMarcin Wojtas * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates. 59b8d05b8SZbigniew Bodek * All rights reserved. 69b8d05b8SZbigniew Bodek * 79b8d05b8SZbigniew Bodek * Redistribution and use in source and binary forms, with or without 89b8d05b8SZbigniew Bodek * modification, are permitted provided that the following conditions 99b8d05b8SZbigniew Bodek * are met: 109b8d05b8SZbigniew Bodek * 119b8d05b8SZbigniew Bodek * 1. Redistributions of source code must retain the above copyright 129b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer. 139b8d05b8SZbigniew Bodek * 149b8d05b8SZbigniew Bodek * 2. Redistributions in binary form must reproduce the above copyright 159b8d05b8SZbigniew Bodek * notice, this list of conditions and the following disclaimer in the 169b8d05b8SZbigniew Bodek * documentation and/or other materials provided with the distribution. 179b8d05b8SZbigniew Bodek * 189b8d05b8SZbigniew Bodek * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 199b8d05b8SZbigniew Bodek * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 209b8d05b8SZbigniew Bodek * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 219b8d05b8SZbigniew Bodek * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 229b8d05b8SZbigniew Bodek * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 239b8d05b8SZbigniew Bodek * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 249b8d05b8SZbigniew Bodek * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 259b8d05b8SZbigniew Bodek * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 269b8d05b8SZbigniew Bodek * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 279b8d05b8SZbigniew Bodek * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 289b8d05b8SZbigniew Bodek * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 299b8d05b8SZbigniew Bodek */ 309b8d05b8SZbigniew Bodek #include <sys/cdefs.h> 319b8d05b8SZbigniew Bodek __FBSDID("$FreeBSD$"); 329b8d05b8SZbigniew Bodek 33b40dd828SAndriy Gapon #include "opt_rss.h" 34b40dd828SAndriy Gapon 359b8d05b8SZbigniew Bodek #include <sys/param.h> 369b8d05b8SZbigniew Bodek #include <sys/systm.h> 379b8d05b8SZbigniew Bodek #include <sys/bus.h> 389b8d05b8SZbigniew Bodek #include <sys/endian.h> 399b8d05b8SZbigniew Bodek #include <sys/kernel.h> 409b8d05b8SZbigniew Bodek #include <sys/kthread.h> 419b8d05b8SZbigniew Bodek #include <sys/malloc.h> 429b8d05b8SZbigniew Bodek #include <sys/mbuf.h> 439b8d05b8SZbigniew Bodek #include <sys/module.h> 449b8d05b8SZbigniew Bodek #include <sys/rman.h> 459b8d05b8SZbigniew Bodek #include <sys/smp.h> 469b8d05b8SZbigniew Bodek #include <sys/socket.h> 479b8d05b8SZbigniew Bodek #include <sys/sockio.h> 489b8d05b8SZbigniew Bodek #include <sys/sysctl.h> 499b8d05b8SZbigniew Bodek #include <sys/taskqueue.h> 509b8d05b8SZbigniew Bodek #include <sys/time.h> 519b8d05b8SZbigniew Bodek #include <sys/eventhandler.h> 529b8d05b8SZbigniew Bodek 539b8d05b8SZbigniew Bodek #include <machine/bus.h> 549b8d05b8SZbigniew Bodek #include <machine/resource.h> 559b8d05b8SZbigniew Bodek #include <machine/in_cksum.h> 569b8d05b8SZbigniew Bodek 579b8d05b8SZbigniew Bodek #include <net/bpf.h> 589b8d05b8SZbigniew Bodek #include <net/ethernet.h> 599b8d05b8SZbigniew Bodek #include <net/if.h> 609b8d05b8SZbigniew Bodek #include <net/if_var.h> 619b8d05b8SZbigniew Bodek #include <net/if_arp.h> 629b8d05b8SZbigniew Bodek #include <net/if_dl.h> 639b8d05b8SZbigniew Bodek #include <net/if_media.h> 649b8d05b8SZbigniew Bodek #include <net/if_types.h> 659b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h> 669b8d05b8SZbigniew Bodek 679b8d05b8SZbigniew Bodek #include <netinet/in_systm.h> 689b8d05b8SZbigniew Bodek #include <netinet/in.h> 699b8d05b8SZbigniew Bodek #include <netinet/if_ether.h> 709b8d05b8SZbigniew Bodek #include <netinet/ip.h> 719b8d05b8SZbigniew Bodek #include <netinet/ip6.h> 729b8d05b8SZbigniew Bodek #include <netinet/tcp.h> 739b8d05b8SZbigniew Bodek #include <netinet/udp.h> 749b8d05b8SZbigniew Bodek 759b8d05b8SZbigniew Bodek #include <dev/pci/pcivar.h> 769b8d05b8SZbigniew Bodek #include <dev/pci/pcireg.h> 779b8d05b8SZbigniew Bodek 784fa9e02dSMarcin Wojtas #include <vm/vm.h> 794fa9e02dSMarcin Wojtas #include <vm/pmap.h> 804fa9e02dSMarcin Wojtas 8138c7b965SMarcin Wojtas #include "ena_datapath.h" 829b8d05b8SZbigniew Bodek #include "ena.h" 839b8d05b8SZbigniew Bodek #include "ena_sysctl.h" 84986e7b92SArtur Rojek #include "ena_rss.h" 859b8d05b8SZbigniew Bodek 86d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 87d17b7d87SMarcin Wojtas #include "ena_netmap.h" 88d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 89d17b7d87SMarcin Wojtas 909b8d05b8SZbigniew Bodek /********************************************************* 919b8d05b8SZbigniew Bodek * Function prototypes 929b8d05b8SZbigniew Bodek *********************************************************/ 939b8d05b8SZbigniew Bodek static int ena_probe(device_t); 949b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *); 959b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *); 969b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int); 979b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int); 989b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int); 999b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int); 1009b8d05b8SZbigniew Bodek static void ena_init_io_rings_common(struct ena_adapter *, 1019b8d05b8SZbigniew Bodek struct ena_ring *, uint16_t); 1027d8c4feeSMarcin Wojtas static void ena_init_io_rings_basic(struct ena_adapter *); 1037d8c4feeSMarcin Wojtas static void ena_init_io_rings_advanced(struct ena_adapter *); 104cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *); 1059b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); 1069b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *); 1079b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *); 1089b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *); 1099b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *); 1109b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *); 1116f2128c7SMarcin Wojtas static void ena_release_all_tx_dmamap(struct ena_ring *); 1129b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int); 1139b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int); 1149b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *); 1159b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *); 1169b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); 1179b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int); 1189b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *); 1199b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *); 1209b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1219b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1229b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, 1239b8d05b8SZbigniew Bodek struct ena_rx_buffer *); 1249b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); 1259b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *); 1269b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *); 1279b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); 1289b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *); 1299b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *); 1309b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *); 1319b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *); 1329b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *); 1335cb9db07SMarcin Wojtas static int ena_handle_msix(void *); 1349b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *); 1359b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *); 13677958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *); 1379b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *); 1389b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *); 1399b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *); 1409b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *); 1419b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter*); 1429b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *); 1439b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *); 1449b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *); 1459b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter); 1469b8d05b8SZbigniew Bodek static int ena_media_change(if_t); 1479b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *); 1489b8d05b8SZbigniew Bodek static void ena_init(void *); 1499b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t); 1509b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); 1519b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t); 1529b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *); 1539b8d05b8SZbigniew Bodek static int ena_setup_ifnet(device_t, struct ena_adapter *, 1549b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1553fc5d816SMarcin Wojtas static int ena_enable_wc(device_t, struct resource *); 1564fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, 1574fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); 15890232d18SDawid Gorecki static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *); 1597d8c4feeSMarcin Wojtas static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *, 1609b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *); 1617d8c4feeSMarcin Wojtas static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *); 16246021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t); 1639b8d05b8SZbigniew Bodek static int ena_attach(device_t); 1649b8d05b8SZbigniew Bodek static int ena_detach(device_t); 1659b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t, 1669b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *, int *); 167aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *); 1689b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); 1699b8d05b8SZbigniew Bodek static void unimplemented_aenq_handler(void *, 1709b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *); 171f180142cSMarcin Wojtas static int ena_copy_eni_metrics(struct ena_adapter *); 1729b8d05b8SZbigniew Bodek static void ena_timer_service(void *); 1739b8d05b8SZbigniew Bodek 1749b8d05b8SZbigniew Bodek static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; 1759b8d05b8SZbigniew Bodek 1769b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = { 1779b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, 1787d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0}, 1799b8d05b8SZbigniew Bodek { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, 1807d2e6f20SMarcin Wojtas { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0}, 1819b8d05b8SZbigniew Bodek /* Last entry */ 1829b8d05b8SZbigniew Bodek { 0, 0, 0 } 1839b8d05b8SZbigniew Bodek }; 1849b8d05b8SZbigniew Bodek 18507aff471SArtur Rojek struct sx ena_global_lock; 18607aff471SArtur Rojek 1879b8d05b8SZbigniew Bodek /* 1889b8d05b8SZbigniew Bodek * Contains pointers to event handlers, e.g. link state chage. 1899b8d05b8SZbigniew Bodek */ 1909b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers; 1919b8d05b8SZbigniew Bodek 1929b8d05b8SZbigniew Bodek void 1939b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1949b8d05b8SZbigniew Bodek { 1950bdffe59SMarcin Wojtas if (error != 0) 1969b8d05b8SZbigniew Bodek return; 1979b8d05b8SZbigniew Bodek *(bus_addr_t *) arg = segs[0].ds_addr; 1989b8d05b8SZbigniew Bodek } 1999b8d05b8SZbigniew Bodek 2009b8d05b8SZbigniew Bodek int 2019b8d05b8SZbigniew Bodek ena_dma_alloc(device_t dmadev, bus_size_t size, 202eb4c4f4aSMarcin Wojtas ena_mem_handle_t *dma, int mapflags, bus_size_t alignment, int domain) 2039b8d05b8SZbigniew Bodek { 2049b8d05b8SZbigniew Bodek struct ena_adapter* adapter = device_get_softc(dmadev); 2053fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 2060bdffe59SMarcin Wojtas uint32_t maxsize; 2070bdffe59SMarcin Wojtas uint64_t dma_space_addr; 2089b8d05b8SZbigniew Bodek int error; 2099b8d05b8SZbigniew Bodek 2100bdffe59SMarcin Wojtas maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; 2110bdffe59SMarcin Wojtas 2120bdffe59SMarcin Wojtas dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); 2133f9ed7abSMarcin Wojtas if (unlikely(dma_space_addr == 0)) 2149b8d05b8SZbigniew Bodek dma_space_addr = BUS_SPACE_MAXADDR; 2150bdffe59SMarcin Wojtas 2169b8d05b8SZbigniew Bodek error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 2174f8f476eSMarcin Wojtas alignment, 0, /* alignment, bounds */ 2188a573700SZbigniew Bodek dma_space_addr, /* lowaddr of exclusion window */ 2198a573700SZbigniew Bodek BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ 2209b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 2219b8d05b8SZbigniew Bodek maxsize, /* maxsize */ 2229b8d05b8SZbigniew Bodek 1, /* nsegments */ 2239b8d05b8SZbigniew Bodek maxsize, /* maxsegsize */ 2249b8d05b8SZbigniew Bodek BUS_DMA_ALLOCNOW, /* flags */ 2259b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 2269b8d05b8SZbigniew Bodek NULL, /* lockarg */ 2279b8d05b8SZbigniew Bodek &dma->tag); 2283f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2293fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error); 2309b8d05b8SZbigniew Bodek goto fail_tag; 2319b8d05b8SZbigniew Bodek } 2329b8d05b8SZbigniew Bodek 233eb4c4f4aSMarcin Wojtas error = bus_dma_tag_set_domain(dma->tag, domain); 234eb4c4f4aSMarcin Wojtas if (unlikely(error != 0)) { 235eb4c4f4aSMarcin Wojtas ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n", 236eb4c4f4aSMarcin Wojtas error); 237eb4c4f4aSMarcin Wojtas goto fail_map_create; 238eb4c4f4aSMarcin Wojtas } 239eb4c4f4aSMarcin Wojtas 2409b8d05b8SZbigniew Bodek error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, 2419b8d05b8SZbigniew Bodek BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); 2423f9ed7abSMarcin Wojtas if (unlikely(error != 0)) { 2433fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n", 2444e8acd84SMarcin Wojtas (uintmax_t)size, error); 2459b8d05b8SZbigniew Bodek goto fail_map_create; 2469b8d05b8SZbigniew Bodek } 2479b8d05b8SZbigniew Bodek 2489b8d05b8SZbigniew Bodek dma->paddr = 0; 2499b8d05b8SZbigniew Bodek error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 2509b8d05b8SZbigniew Bodek size, ena_dmamap_callback, &dma->paddr, mapflags); 2513f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (dma->paddr == 0))) { 2523fc5d816SMarcin Wojtas ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error); 2539b8d05b8SZbigniew Bodek goto fail_map_load; 2549b8d05b8SZbigniew Bodek } 2559b8d05b8SZbigniew Bodek 256e8073738SMarcin Wojtas bus_dmamap_sync(dma->tag, dma->map, 257e8073738SMarcin Wojtas BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 258e8073738SMarcin Wojtas 2599b8d05b8SZbigniew Bodek return (0); 2609b8d05b8SZbigniew Bodek 2619b8d05b8SZbigniew Bodek fail_map_load: 2629b8d05b8SZbigniew Bodek bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 2637d2544e6SMarcin Wojtas fail_map_create: 2649b8d05b8SZbigniew Bodek bus_dma_tag_destroy(dma->tag); 2659b8d05b8SZbigniew Bodek fail_tag: 2669b8d05b8SZbigniew Bodek dma->tag = NULL; 2675b14f92eSMarcin Wojtas dma->vaddr = NULL; 2685b14f92eSMarcin Wojtas dma->paddr = 0; 2699b8d05b8SZbigniew Bodek 2709b8d05b8SZbigniew Bodek return (error); 2719b8d05b8SZbigniew Bodek } 2729b8d05b8SZbigniew Bodek 2739b8d05b8SZbigniew Bodek static void 2749b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter) 2759b8d05b8SZbigniew Bodek { 2769b8d05b8SZbigniew Bodek device_t pdev = adapter->pdev; 2779b8d05b8SZbigniew Bodek 2789b8d05b8SZbigniew Bodek if (adapter->memory != NULL) { 2799b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2809b8d05b8SZbigniew Bodek PCIR_BAR(ENA_MEM_BAR), adapter->memory); 2819b8d05b8SZbigniew Bodek } 2829b8d05b8SZbigniew Bodek 2839b8d05b8SZbigniew Bodek if (adapter->registers != NULL) { 2849b8d05b8SZbigniew Bodek bus_release_resource(pdev, SYS_RES_MEMORY, 2859b8d05b8SZbigniew Bodek PCIR_BAR(ENA_REG_BAR), adapter->registers); 2869b8d05b8SZbigniew Bodek } 2871c808fcdSMichal Krawczyk 2881c808fcdSMichal Krawczyk if (adapter->msix != NULL) { 2891c808fcdSMichal Krawczyk bus_release_resource(pdev, SYS_RES_MEMORY, 2901c808fcdSMichal Krawczyk adapter->msix_rid, adapter->msix); 2911c808fcdSMichal Krawczyk } 2929b8d05b8SZbigniew Bodek } 2939b8d05b8SZbigniew Bodek 2949b8d05b8SZbigniew Bodek static int 2959b8d05b8SZbigniew Bodek ena_probe(device_t dev) 2969b8d05b8SZbigniew Bodek { 2979b8d05b8SZbigniew Bodek ena_vendor_info_t *ent; 2989b8d05b8SZbigniew Bodek char adapter_name[60]; 2999b8d05b8SZbigniew Bodek uint16_t pci_vendor_id = 0; 3009b8d05b8SZbigniew Bodek uint16_t pci_device_id = 0; 3019b8d05b8SZbigniew Bodek 3029b8d05b8SZbigniew Bodek pci_vendor_id = pci_get_vendor(dev); 3039b8d05b8SZbigniew Bodek pci_device_id = pci_get_device(dev); 3049b8d05b8SZbigniew Bodek 3059b8d05b8SZbigniew Bodek ent = ena_vendor_info_array; 3069b8d05b8SZbigniew Bodek while (ent->vendor_id != 0) { 3079b8d05b8SZbigniew Bodek if ((pci_vendor_id == ent->vendor_id) && 3089b8d05b8SZbigniew Bodek (pci_device_id == ent->device_id)) { 3093fc5d816SMarcin Wojtas ena_log_raw(DBG, "vendor=%x device=%x\n", 3109b8d05b8SZbigniew Bodek pci_vendor_id, pci_device_id); 3119b8d05b8SZbigniew Bodek 3129b8d05b8SZbigniew Bodek sprintf(adapter_name, DEVICE_DESC); 3139b8d05b8SZbigniew Bodek device_set_desc_copy(dev, adapter_name); 3149b8d05b8SZbigniew Bodek return (BUS_PROBE_DEFAULT); 3159b8d05b8SZbigniew Bodek } 3169b8d05b8SZbigniew Bodek 3179b8d05b8SZbigniew Bodek ent++; 3189b8d05b8SZbigniew Bodek 3199b8d05b8SZbigniew Bodek } 3209b8d05b8SZbigniew Bodek 3219b8d05b8SZbigniew Bodek return (ENXIO); 3229b8d05b8SZbigniew Bodek } 3239b8d05b8SZbigniew Bodek 3249b8d05b8SZbigniew Bodek static int 3259b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu) 3269b8d05b8SZbigniew Bodek { 3279b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 3283fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 3293cfadb28SMarcin Wojtas int rc; 3309b8d05b8SZbigniew Bodek 3313cfadb28SMarcin Wojtas if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { 3323fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Invalid MTU setting. " 3333cfadb28SMarcin Wojtas "new_mtu: %d max mtu: %d min mtu: %d\n", 3343cfadb28SMarcin Wojtas new_mtu, adapter->max_mtu, ENA_MIN_MTU); 3353cfadb28SMarcin Wojtas return (EINVAL); 3369b8d05b8SZbigniew Bodek } 3379b8d05b8SZbigniew Bodek 3389b8d05b8SZbigniew Bodek rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); 3393cfadb28SMarcin Wojtas if (likely(rc == 0)) { 3403fc5d816SMarcin Wojtas ena_log(pdev, DBG, "set MTU to %d\n", new_mtu); 3413cfadb28SMarcin Wojtas if_setmtu(ifp, new_mtu); 3423cfadb28SMarcin Wojtas } else { 3433fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu); 3443cfadb28SMarcin Wojtas } 3459b8d05b8SZbigniew Bodek 3463cfadb28SMarcin Wojtas return (rc); 3479b8d05b8SZbigniew Bodek } 3489b8d05b8SZbigniew Bodek 3499b8d05b8SZbigniew Bodek static inline void 3509b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size) 3519b8d05b8SZbigniew Bodek { 3529b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3539b8d05b8SZbigniew Bodek 3549b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3559b8d05b8SZbigniew Bodek *begin = counter_u64_alloc(M_WAITOK); 3569b8d05b8SZbigniew Bodek } 3579b8d05b8SZbigniew Bodek 3589b8d05b8SZbigniew Bodek static inline void 3599b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size) 3609b8d05b8SZbigniew Bodek { 3619b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3629b8d05b8SZbigniew Bodek 3639b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3649b8d05b8SZbigniew Bodek counter_u64_free(*begin); 3659b8d05b8SZbigniew Bodek } 3669b8d05b8SZbigniew Bodek 3679b8d05b8SZbigniew Bodek static inline void 3689b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size) 3699b8d05b8SZbigniew Bodek { 3709b8d05b8SZbigniew Bodek counter_u64_t *end = (counter_u64_t *)((char *)begin + size); 3719b8d05b8SZbigniew Bodek 3729b8d05b8SZbigniew Bodek for (; begin < end; ++begin) 3739b8d05b8SZbigniew Bodek counter_u64_zero(*begin); 3749b8d05b8SZbigniew Bodek } 3759b8d05b8SZbigniew Bodek 3769b8d05b8SZbigniew Bodek static void 3779b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, 3789b8d05b8SZbigniew Bodek uint16_t qid) 3799b8d05b8SZbigniew Bodek { 3809b8d05b8SZbigniew Bodek 3819b8d05b8SZbigniew Bodek ring->qid = qid; 3829b8d05b8SZbigniew Bodek ring->adapter = adapter; 3839b8d05b8SZbigniew Bodek ring->ena_dev = adapter->ena_dev; 384d12f7bfcSMarcin Wojtas ring->first_interrupt = false; 385d12f7bfcSMarcin Wojtas ring->no_interrupt_event_cnt = 0; 3869b8d05b8SZbigniew Bodek } 3879b8d05b8SZbigniew Bodek 388cd5d5804SMarcin Wojtas static void 3897d8c4feeSMarcin Wojtas ena_init_io_rings_basic(struct ena_adapter *adapter) 3909b8d05b8SZbigniew Bodek { 3919b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev; 3929b8d05b8SZbigniew Bodek struct ena_ring *txr, *rxr; 3939b8d05b8SZbigniew Bodek struct ena_que *que; 3949b8d05b8SZbigniew Bodek int i; 3959b8d05b8SZbigniew Bodek 3969b8d05b8SZbigniew Bodek ena_dev = adapter->ena_dev; 3979b8d05b8SZbigniew Bodek 3987d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3999b8d05b8SZbigniew Bodek txr = &adapter->tx_ring[i]; 4009b8d05b8SZbigniew Bodek rxr = &adapter->rx_ring[i]; 4019b8d05b8SZbigniew Bodek 4029b8d05b8SZbigniew Bodek /* TX/RX common ring state */ 4039b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, txr, i); 4049b8d05b8SZbigniew Bodek ena_init_io_rings_common(adapter, rxr, i); 4059b8d05b8SZbigniew Bodek 4069b8d05b8SZbigniew Bodek /* TX specific ring state */ 4079b8d05b8SZbigniew Bodek txr->tx_max_header_size = ena_dev->tx_max_header_size; 4089b8d05b8SZbigniew Bodek txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; 4099b8d05b8SZbigniew Bodek 4109b8d05b8SZbigniew Bodek que = &adapter->que[i]; 4119b8d05b8SZbigniew Bodek que->adapter = adapter; 4129b8d05b8SZbigniew Bodek que->id = i; 4139b8d05b8SZbigniew Bodek que->tx_ring = txr; 4149b8d05b8SZbigniew Bodek que->rx_ring = rxr; 4159b8d05b8SZbigniew Bodek 4169b8d05b8SZbigniew Bodek txr->que = que; 4179b8d05b8SZbigniew Bodek rxr->que = que; 418efe6ab18SMarcin Wojtas 419efe6ab18SMarcin Wojtas rxr->empty_rx_queue = 0; 4207d8c4feeSMarcin Wojtas rxr->rx_mbuf_sz = ena_mbuf_sz; 4219b8d05b8SZbigniew Bodek } 4229b8d05b8SZbigniew Bodek } 4239b8d05b8SZbigniew Bodek 4249b8d05b8SZbigniew Bodek static void 4257d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(struct ena_adapter *adapter) 4267d8c4feeSMarcin Wojtas { 4277d8c4feeSMarcin Wojtas struct ena_ring *txr, *rxr; 4287d8c4feeSMarcin Wojtas int i; 4297d8c4feeSMarcin Wojtas 4307d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 4317d8c4feeSMarcin Wojtas txr = &adapter->tx_ring[i]; 4327d8c4feeSMarcin Wojtas rxr = &adapter->rx_ring[i]; 4337d8c4feeSMarcin Wojtas 4347d8c4feeSMarcin Wojtas /* Allocate a buf ring */ 4357d8c4feeSMarcin Wojtas txr->buf_ring_size = adapter->buf_ring_size; 4367d8c4feeSMarcin Wojtas txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, 4377d8c4feeSMarcin Wojtas M_WAITOK, &txr->ring_mtx); 4387d8c4feeSMarcin Wojtas 4397d8c4feeSMarcin Wojtas /* Allocate Tx statistics. */ 4407d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&txr->tx_stats, 4417d8c4feeSMarcin Wojtas sizeof(txr->tx_stats)); 442*d8aba82bSDawid Gorecki txr->tx_last_cleanup_ticks = ticks; 4437d8c4feeSMarcin Wojtas 4447d8c4feeSMarcin Wojtas /* Allocate Rx statistics. */ 4457d8c4feeSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, 4467d8c4feeSMarcin Wojtas sizeof(rxr->rx_stats)); 4477d8c4feeSMarcin Wojtas 4487d8c4feeSMarcin Wojtas /* Initialize locks */ 4497d8c4feeSMarcin Wojtas snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", 4507d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4517d8c4feeSMarcin Wojtas snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", 4527d8c4feeSMarcin Wojtas device_get_nameunit(adapter->pdev), i); 4537d8c4feeSMarcin Wojtas 4547d8c4feeSMarcin Wojtas mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); 4557d8c4feeSMarcin Wojtas } 4567d8c4feeSMarcin Wojtas } 4577d8c4feeSMarcin Wojtas 4587d8c4feeSMarcin Wojtas static void 4597d8c4feeSMarcin Wojtas ena_init_io_rings(struct ena_adapter *adapter) 4607d8c4feeSMarcin Wojtas { 4617d8c4feeSMarcin Wojtas /* 4627d8c4feeSMarcin Wojtas * IO rings initialization can be divided into the 2 steps: 4637d8c4feeSMarcin Wojtas * 1. Initialize variables and fields with initial values and copy 4647d8c4feeSMarcin Wojtas * them from adapter/ena_dev (basic) 4657d8c4feeSMarcin Wojtas * 2. Allocate mutex, counters and buf_ring (advanced) 4667d8c4feeSMarcin Wojtas */ 4677d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 4687d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(adapter); 4697d8c4feeSMarcin Wojtas } 4707d8c4feeSMarcin Wojtas 4717d8c4feeSMarcin Wojtas static void 4729b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) 4739b8d05b8SZbigniew Bodek { 4749b8d05b8SZbigniew Bodek struct ena_ring *txr = &adapter->tx_ring[qid]; 4759b8d05b8SZbigniew Bodek struct ena_ring *rxr = &adapter->rx_ring[qid]; 4769b8d05b8SZbigniew Bodek 4779b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&txr->tx_stats, 4789b8d05b8SZbigniew Bodek sizeof(txr->tx_stats)); 4799b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&rxr->rx_stats, 4809b8d05b8SZbigniew Bodek sizeof(rxr->rx_stats)); 4819b8d05b8SZbigniew Bodek 4827d2544e6SMarcin Wojtas ENA_RING_MTX_LOCK(txr); 4837d2544e6SMarcin Wojtas drbr_free(txr->br, M_DEVBUF); 4847d2544e6SMarcin Wojtas ENA_RING_MTX_UNLOCK(txr); 4857d2544e6SMarcin Wojtas 4869b8d05b8SZbigniew Bodek mtx_destroy(&txr->ring_mtx); 4879b8d05b8SZbigniew Bodek } 4889b8d05b8SZbigniew Bodek 4899b8d05b8SZbigniew Bodek static void 4909b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter) 4919b8d05b8SZbigniew Bodek { 4929b8d05b8SZbigniew Bodek int i; 4939b8d05b8SZbigniew Bodek 4947d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 4959b8d05b8SZbigniew Bodek ena_free_io_ring_resources(adapter, i); 4969b8d05b8SZbigniew Bodek 4979b8d05b8SZbigniew Bodek } 4989b8d05b8SZbigniew Bodek 4999b8d05b8SZbigniew Bodek static int 5009b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter) 5019b8d05b8SZbigniew Bodek { 5029b8d05b8SZbigniew Bodek int ret; 5039b8d05b8SZbigniew Bodek 5049b8d05b8SZbigniew Bodek /* Create DMA tag for Tx buffers */ 5059b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 5069b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5078a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5088a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5099b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 5109b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsize */ 5118a573700SZbigniew Bodek adapter->max_tx_sgl_size - 1, /* nsegments */ 5129b8d05b8SZbigniew Bodek ENA_TSO_MAXSIZE, /* maxsegsize */ 5139b8d05b8SZbigniew Bodek 0, /* flags */ 5149b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5159b8d05b8SZbigniew Bodek NULL, /* lockfuncarg */ 5169b8d05b8SZbigniew Bodek &adapter->tx_buf_tag); 5179b8d05b8SZbigniew Bodek 5189b8d05b8SZbigniew Bodek return (ret); 5199b8d05b8SZbigniew Bodek } 5209b8d05b8SZbigniew Bodek 5219b8d05b8SZbigniew Bodek static int 5229b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter) 5239b8d05b8SZbigniew Bodek { 5249b8d05b8SZbigniew Bodek int ret; 5259b8d05b8SZbigniew Bodek 5269b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->tx_buf_tag); 5279b8d05b8SZbigniew Bodek 5283f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5299b8d05b8SZbigniew Bodek adapter->tx_buf_tag = NULL; 5309b8d05b8SZbigniew Bodek 5319b8d05b8SZbigniew Bodek return (ret); 5329b8d05b8SZbigniew Bodek } 5339b8d05b8SZbigniew Bodek 5349b8d05b8SZbigniew Bodek static int 5359b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter) 5369b8d05b8SZbigniew Bodek { 5379b8d05b8SZbigniew Bodek int ret; 5389b8d05b8SZbigniew Bodek 5399b8d05b8SZbigniew Bodek /* Create DMA tag for Rx buffers*/ 5409b8d05b8SZbigniew Bodek ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 5419b8d05b8SZbigniew Bodek 1, 0, /* alignment, bounds */ 5428a573700SZbigniew Bodek ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ 5438a573700SZbigniew Bodek BUS_SPACE_MAXADDR, /* highaddr of excl window */ 5449b8d05b8SZbigniew Bodek NULL, NULL, /* filter, filterarg */ 54504cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsize */ 5464727bda6SMarcin Wojtas adapter->max_rx_sgl_size, /* nsegments */ 54704cf2b88SMarcin Wojtas ena_mbuf_sz, /* maxsegsize */ 5489b8d05b8SZbigniew Bodek 0, /* flags */ 5499b8d05b8SZbigniew Bodek NULL, /* lockfunc */ 5509b8d05b8SZbigniew Bodek NULL, /* lockarg */ 5519b8d05b8SZbigniew Bodek &adapter->rx_buf_tag); 5529b8d05b8SZbigniew Bodek 5539b8d05b8SZbigniew Bodek return (ret); 5549b8d05b8SZbigniew Bodek } 5559b8d05b8SZbigniew Bodek 5569b8d05b8SZbigniew Bodek static int 5579b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter) 5589b8d05b8SZbigniew Bodek { 5599b8d05b8SZbigniew Bodek int ret; 5609b8d05b8SZbigniew Bodek 5619b8d05b8SZbigniew Bodek ret = bus_dma_tag_destroy(adapter->rx_buf_tag); 5629b8d05b8SZbigniew Bodek 5633f9ed7abSMarcin Wojtas if (likely(ret == 0)) 5649b8d05b8SZbigniew Bodek adapter->rx_buf_tag = NULL; 5659b8d05b8SZbigniew Bodek 5669b8d05b8SZbigniew Bodek return (ret); 5679b8d05b8SZbigniew Bodek } 5689b8d05b8SZbigniew Bodek 5696f2128c7SMarcin Wojtas static void 5706f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(struct ena_ring *tx_ring) 5716f2128c7SMarcin Wojtas { 5726f2128c7SMarcin Wojtas struct ena_adapter *adapter = tx_ring->adapter; 5736f2128c7SMarcin Wojtas struct ena_tx_buffer *tx_info; 5746f2128c7SMarcin Wojtas bus_dma_tag_t tx_tag = adapter->tx_buf_tag;; 5756f2128c7SMarcin Wojtas int i; 5766f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 5776f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 5786f2128c7SMarcin Wojtas int j; 5796f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 5806f2128c7SMarcin Wojtas 5816f2128c7SMarcin Wojtas for (i = 0; i < tx_ring->ring_size; ++i) { 5826f2128c7SMarcin Wojtas tx_info = &tx_ring->tx_buffer_info[i]; 5836f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 5846f2128c7SMarcin Wojtas if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 5856f2128c7SMarcin Wojtas nm_info = &tx_info->nm_info; 5866f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { 5876f2128c7SMarcin Wojtas if (nm_info->map_seg[j] != NULL) { 5886f2128c7SMarcin Wojtas bus_dmamap_destroy(tx_tag, 5896f2128c7SMarcin Wojtas nm_info->map_seg[j]); 5906f2128c7SMarcin Wojtas nm_info->map_seg[j] = NULL; 5916f2128c7SMarcin Wojtas } 5926f2128c7SMarcin Wojtas } 5936f2128c7SMarcin Wojtas } 5946f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 595888810f0SMarcin Wojtas if (tx_info->dmamap != NULL) { 596888810f0SMarcin Wojtas bus_dmamap_destroy(tx_tag, tx_info->dmamap); 597888810f0SMarcin Wojtas tx_info->dmamap = NULL; 5986f2128c7SMarcin Wojtas } 5996f2128c7SMarcin Wojtas } 6006f2128c7SMarcin Wojtas } 6016f2128c7SMarcin Wojtas 6029b8d05b8SZbigniew Bodek /** 6039b8d05b8SZbigniew Bodek * ena_setup_tx_resources - allocate Tx resources (Descriptors) 6049b8d05b8SZbigniew Bodek * @adapter: network interface device structure 6059b8d05b8SZbigniew Bodek * @qid: queue index 6069b8d05b8SZbigniew Bodek * 6079b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 6089b8d05b8SZbigniew Bodek **/ 6099b8d05b8SZbigniew Bodek static int 6109b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid) 6119b8d05b8SZbigniew Bodek { 6123fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 6136d1ef2abSArtur Rojek char thread_name[MAXCOMLEN + 1]; 6149b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 6159b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = que->tx_ring; 6166d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 6179b8d05b8SZbigniew Bodek int size, i, err; 6186f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6196f2128c7SMarcin Wojtas bus_dmamap_t *map; 6206f2128c7SMarcin Wojtas int j; 6216f2128c7SMarcin Wojtas 6226f2128c7SMarcin Wojtas ena_netmap_reset_tx_ring(adapter, qid); 6236f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6249b8d05b8SZbigniew Bodek 6259b8d05b8SZbigniew Bodek size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; 6269b8d05b8SZbigniew Bodek 6279b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6283f9ed7abSMarcin Wojtas if (unlikely(tx_ring->tx_buffer_info == NULL)) 6297d2544e6SMarcin Wojtas return (ENOMEM); 6309b8d05b8SZbigniew Bodek 6319b8d05b8SZbigniew Bodek size = sizeof(uint16_t) * tx_ring->ring_size; 6329b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 6333f9ed7abSMarcin Wojtas if (unlikely(tx_ring->free_tx_ids == NULL)) 6347d2544e6SMarcin Wojtas goto err_buf_info_free; 6359b8d05b8SZbigniew Bodek 6364fa9e02dSMarcin Wojtas size = tx_ring->tx_max_header_size; 6374fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, 6384fa9e02dSMarcin Wojtas M_NOWAIT | M_ZERO); 6394fa9e02dSMarcin Wojtas if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) 6404fa9e02dSMarcin Wojtas goto err_tx_ids_free; 6414fa9e02dSMarcin Wojtas 6429b8d05b8SZbigniew Bodek /* Req id stack for TX OOO completions */ 6439b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) 6449b8d05b8SZbigniew Bodek tx_ring->free_tx_ids[i] = i; 6459b8d05b8SZbigniew Bodek 6469b8d05b8SZbigniew Bodek /* Reset TX statistics. */ 6479b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, 6489b8d05b8SZbigniew Bodek sizeof(tx_ring->tx_stats)); 6499b8d05b8SZbigniew Bodek 6509b8d05b8SZbigniew Bodek tx_ring->next_to_use = 0; 6519b8d05b8SZbigniew Bodek tx_ring->next_to_clean = 0; 652af66d7d0SMarcin Wojtas tx_ring->acum_pkts = 0; 6539b8d05b8SZbigniew Bodek 6549b8d05b8SZbigniew Bodek /* Make sure that drbr is empty */ 655b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 6569b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 657b38cf613SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 6589b8d05b8SZbigniew Bodek 6599b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 6609b8d05b8SZbigniew Bodek for (i = 0; i < tx_ring->ring_size; i++) { 6619b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->tx_buf_tag, 0, 662888810f0SMarcin Wojtas &tx_ring->tx_buffer_info[i].dmamap); 6633f9ed7abSMarcin Wojtas if (unlikely(err != 0)) { 6643fc5d816SMarcin Wojtas ena_log(pdev, ERR, 665888810f0SMarcin Wojtas "Unable to create Tx DMA map for buffer %d\n", 6664fa9e02dSMarcin Wojtas i); 6676f2128c7SMarcin Wojtas goto err_map_release; 6689b8d05b8SZbigniew Bodek } 6696f2128c7SMarcin Wojtas 6706f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 6716f2128c7SMarcin Wojtas if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 6726f2128c7SMarcin Wojtas map = tx_ring->tx_buffer_info[i].nm_info.map_seg; 6736f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 6746f2128c7SMarcin Wojtas err = bus_dmamap_create(adapter->tx_buf_tag, 0, 6756f2128c7SMarcin Wojtas &map[j]); 6766f2128c7SMarcin Wojtas if (unlikely(err != 0)) { 6773fc5d816SMarcin Wojtas ena_log(pdev, ERR, 6783fc5d816SMarcin Wojtas "Unable to create " 6796f2128c7SMarcin Wojtas "Tx DMA for buffer %d %d\n", i, j); 6806f2128c7SMarcin Wojtas goto err_map_release; 6816f2128c7SMarcin Wojtas } 6826f2128c7SMarcin Wojtas } 6836f2128c7SMarcin Wojtas } 6846f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 6859b8d05b8SZbigniew Bodek } 6869b8d05b8SZbigniew Bodek 6879b8d05b8SZbigniew Bodek /* Allocate taskqueues */ 6889b8d05b8SZbigniew Bodek TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); 6899b8d05b8SZbigniew Bodek tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, 6909b8d05b8SZbigniew Bodek taskqueue_thread_enqueue, &tx_ring->enqueue_tq); 6913f9ed7abSMarcin Wojtas if (unlikely(tx_ring->enqueue_tq == NULL)) { 6923fc5d816SMarcin Wojtas ena_log(pdev, ERR, 6939b8d05b8SZbigniew Bodek "Unable to create taskqueue for enqueue task\n"); 6949b8d05b8SZbigniew Bodek i = tx_ring->ring_size; 6956f2128c7SMarcin Wojtas goto err_map_release; 6969b8d05b8SZbigniew Bodek } 6979b8d05b8SZbigniew Bodek 6985cb9db07SMarcin Wojtas tx_ring->running = true; 6995cb9db07SMarcin Wojtas 7006d1ef2abSArtur Rojek #ifdef RSS 7016d1ef2abSArtur Rojek cpu_mask = &que->cpu_mask; 7026d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7036d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->cpu); 7046d1ef2abSArtur Rojek #else 7056d1ef2abSArtur Rojek snprintf(thread_name, sizeof(thread_name), "%s txeq %d", 7066d1ef2abSArtur Rojek device_get_nameunit(adapter->pdev), que->id); 7076d1ef2abSArtur Rojek #endif 7086d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET, 7096d1ef2abSArtur Rojek cpu_mask, "%s", thread_name); 7109b8d05b8SZbigniew Bodek 7119b8d05b8SZbigniew Bodek return (0); 7129b8d05b8SZbigniew Bodek 7136f2128c7SMarcin Wojtas err_map_release: 7146f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(tx_ring); 7154fa9e02dSMarcin Wojtas err_tx_ids_free: 716cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7177d2544e6SMarcin Wojtas tx_ring->free_tx_ids = NULL; 7187d2544e6SMarcin Wojtas err_buf_info_free: 719cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7207d2544e6SMarcin Wojtas tx_ring->tx_buffer_info = NULL; 7217d2544e6SMarcin Wojtas 7229b8d05b8SZbigniew Bodek return (ENOMEM); 7239b8d05b8SZbigniew Bodek } 7249b8d05b8SZbigniew Bodek 7259b8d05b8SZbigniew Bodek /** 7269b8d05b8SZbigniew Bodek * ena_free_tx_resources - Free Tx Resources per Queue 7279b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7289b8d05b8SZbigniew Bodek * @qid: queue index 7299b8d05b8SZbigniew Bodek * 7309b8d05b8SZbigniew Bodek * Free all transmit software resources 7319b8d05b8SZbigniew Bodek **/ 7329b8d05b8SZbigniew Bodek static void 7339b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid) 7349b8d05b8SZbigniew Bodek { 7359b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 7366f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7376f2128c7SMarcin Wojtas struct ena_netmap_tx_info *nm_info; 7386f2128c7SMarcin Wojtas int j; 7396f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7409b8d05b8SZbigniew Bodek 7419b8d05b8SZbigniew Bodek while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, 7429b8d05b8SZbigniew Bodek NULL)) 7439b8d05b8SZbigniew Bodek taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); 7449b8d05b8SZbigniew Bodek 7459b8d05b8SZbigniew Bodek taskqueue_free(tx_ring->enqueue_tq); 7469b8d05b8SZbigniew Bodek 747b38cf613SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 7489b8d05b8SZbigniew Bodek /* Flush buffer ring, */ 7499b8d05b8SZbigniew Bodek drbr_flush(adapter->ifp, tx_ring->br); 7509b8d05b8SZbigniew Bodek 7519b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 7529b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 753e8073738SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 754888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE); 7559b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->tx_buf_tag, 756888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7579b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->tx_buf_tag, 758888810f0SMarcin Wojtas tx_ring->tx_buffer_info[i].dmamap); 7594fa9e02dSMarcin Wojtas 7606f2128c7SMarcin Wojtas #ifdef DEV_NETMAP 7616f2128c7SMarcin Wojtas if (adapter->ifp->if_capenable & IFCAP_NETMAP) { 7626f2128c7SMarcin Wojtas nm_info = &tx_ring->tx_buffer_info[i].nm_info; 7636f2128c7SMarcin Wojtas for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { 7646f2128c7SMarcin Wojtas if (nm_info->socket_buf_idx[j] != 0) { 7656f2128c7SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, 7666f2128c7SMarcin Wojtas nm_info->map_seg[j], 7676f2128c7SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 7686f2128c7SMarcin Wojtas ena_netmap_unload(adapter, 7696f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7706f2128c7SMarcin Wojtas } 7716f2128c7SMarcin Wojtas bus_dmamap_destroy(adapter->tx_buf_tag, 7726f2128c7SMarcin Wojtas nm_info->map_seg[j]); 7736f2128c7SMarcin Wojtas nm_info->socket_buf_idx[j] = 0; 7746f2128c7SMarcin Wojtas } 7756f2128c7SMarcin Wojtas } 7766f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */ 7776f2128c7SMarcin Wojtas 778e8073738SMarcin Wojtas m_freem(tx_ring->tx_buffer_info[i].mbuf); 779e8073738SMarcin Wojtas tx_ring->tx_buffer_info[i].mbuf = NULL; 7809b8d05b8SZbigniew Bodek } 781416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 7829b8d05b8SZbigniew Bodek 7839b8d05b8SZbigniew Bodek /* And free allocated memory. */ 784cd5d5804SMarcin Wojtas free(tx_ring->tx_buffer_info, M_DEVBUF); 7859b8d05b8SZbigniew Bodek tx_ring->tx_buffer_info = NULL; 7869b8d05b8SZbigniew Bodek 787cd5d5804SMarcin Wojtas free(tx_ring->free_tx_ids, M_DEVBUF); 7889b8d05b8SZbigniew Bodek tx_ring->free_tx_ids = NULL; 7894fa9e02dSMarcin Wojtas 7908483b844SMarcin Wojtas free(tx_ring->push_buf_intermediate_buf, M_DEVBUF); 7914fa9e02dSMarcin Wojtas tx_ring->push_buf_intermediate_buf = NULL; 7929b8d05b8SZbigniew Bodek } 7939b8d05b8SZbigniew Bodek 7949b8d05b8SZbigniew Bodek /** 7959b8d05b8SZbigniew Bodek * ena_setup_all_tx_resources - allocate all queues Tx resources 7969b8d05b8SZbigniew Bodek * @adapter: network interface device structure 7979b8d05b8SZbigniew Bodek * 7989b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 7999b8d05b8SZbigniew Bodek **/ 8009b8d05b8SZbigniew Bodek static int 8019b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter) 8029b8d05b8SZbigniew Bodek { 8039b8d05b8SZbigniew Bodek int i, rc; 8049b8d05b8SZbigniew Bodek 8057d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 8069b8d05b8SZbigniew Bodek rc = ena_setup_tx_resources(adapter, i); 8070bdffe59SMarcin Wojtas if (rc != 0) { 8083fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 8099b8d05b8SZbigniew Bodek "Allocation for Tx Queue %u failed\n", i); 8109b8d05b8SZbigniew Bodek goto err_setup_tx; 8119b8d05b8SZbigniew Bodek } 8127d2544e6SMarcin Wojtas } 8139b8d05b8SZbigniew Bodek 8149b8d05b8SZbigniew Bodek return (0); 8159b8d05b8SZbigniew Bodek 8169b8d05b8SZbigniew Bodek err_setup_tx: 8179b8d05b8SZbigniew Bodek /* Rewind the index freeing the rings as we go */ 8189b8d05b8SZbigniew Bodek while (i--) 8199b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8209b8d05b8SZbigniew Bodek return (rc); 8219b8d05b8SZbigniew Bodek } 8229b8d05b8SZbigniew Bodek 8239b8d05b8SZbigniew Bodek /** 8249b8d05b8SZbigniew Bodek * ena_free_all_tx_resources - Free Tx Resources for All Queues 8259b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8269b8d05b8SZbigniew Bodek * 8279b8d05b8SZbigniew Bodek * Free all transmit software resources 8289b8d05b8SZbigniew Bodek **/ 8299b8d05b8SZbigniew Bodek static void 8309b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter) 8319b8d05b8SZbigniew Bodek { 8329b8d05b8SZbigniew Bodek int i; 8339b8d05b8SZbigniew Bodek 8347d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 8359b8d05b8SZbigniew Bodek ena_free_tx_resources(adapter, i); 8369b8d05b8SZbigniew Bodek } 8379b8d05b8SZbigniew Bodek 8389b8d05b8SZbigniew Bodek /** 8399b8d05b8SZbigniew Bodek * ena_setup_rx_resources - allocate Rx resources (Descriptors) 8409b8d05b8SZbigniew Bodek * @adapter: network interface device structure 8419b8d05b8SZbigniew Bodek * @qid: queue index 8429b8d05b8SZbigniew Bodek * 8439b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 8449b8d05b8SZbigniew Bodek **/ 8459b8d05b8SZbigniew Bodek static int 8469b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) 8479b8d05b8SZbigniew Bodek { 8483fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 8499b8d05b8SZbigniew Bodek struct ena_que *que = &adapter->que[qid]; 8509b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = que->rx_ring; 8519b8d05b8SZbigniew Bodek int size, err, i; 8529b8d05b8SZbigniew Bodek 8539b8d05b8SZbigniew Bodek size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; 8549b8d05b8SZbigniew Bodek 8559a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 8569a0f2079SMarcin Wojtas ena_netmap_reset_rx_ring(adapter, qid); 8579a0f2079SMarcin Wojtas rx_ring->initialized = false; 8589a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 8599a0f2079SMarcin Wojtas 8609b8d05b8SZbigniew Bodek /* 8619b8d05b8SZbigniew Bodek * Alloc extra element so in rx path 8629b8d05b8SZbigniew Bodek * we can always prefetch rx_info + 1 8639b8d05b8SZbigniew Bodek */ 8649b8d05b8SZbigniew Bodek size += sizeof(struct ena_rx_buffer); 8659b8d05b8SZbigniew Bodek 866cd5d5804SMarcin Wojtas rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 8679b8d05b8SZbigniew Bodek 86843fefd16SMarcin Wojtas size = sizeof(uint16_t) * rx_ring->ring_size; 86943fefd16SMarcin Wojtas rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); 87043fefd16SMarcin Wojtas 87143fefd16SMarcin Wojtas for (i = 0; i < rx_ring->ring_size; i++) 87243fefd16SMarcin Wojtas rx_ring->free_rx_ids[i] = i; 87343fefd16SMarcin Wojtas 8749b8d05b8SZbigniew Bodek /* Reset RX statistics. */ 8759b8d05b8SZbigniew Bodek ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, 8769b8d05b8SZbigniew Bodek sizeof(rx_ring->rx_stats)); 8779b8d05b8SZbigniew Bodek 8789b8d05b8SZbigniew Bodek rx_ring->next_to_clean = 0; 8799b8d05b8SZbigniew Bodek rx_ring->next_to_use = 0; 8809b8d05b8SZbigniew Bodek 8819b8d05b8SZbigniew Bodek /* ... and create the buffer DMA maps */ 8829b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 8839b8d05b8SZbigniew Bodek err = bus_dmamap_create(adapter->rx_buf_tag, 0, 8849b8d05b8SZbigniew Bodek &(rx_ring->rx_buffer_info[i].map)); 8859b8d05b8SZbigniew Bodek if (err != 0) { 8863fc5d816SMarcin Wojtas ena_log(pdev, ERR, 8879b8d05b8SZbigniew Bodek "Unable to create Rx DMA map for buffer %d\n", i); 8887d2544e6SMarcin Wojtas goto err_buf_info_unmap; 8899b8d05b8SZbigniew Bodek } 8909b8d05b8SZbigniew Bodek } 8919b8d05b8SZbigniew Bodek 8929b8d05b8SZbigniew Bodek /* Create LRO for the ring */ 8930bdffe59SMarcin Wojtas if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { 8949b8d05b8SZbigniew Bodek int err = tcp_lro_init(&rx_ring->lro); 8950bdffe59SMarcin Wojtas if (err != 0) { 8963fc5d816SMarcin Wojtas ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n", 8973fc5d816SMarcin Wojtas qid); 8989b8d05b8SZbigniew Bodek } else { 8993fc5d816SMarcin Wojtas ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n", 9003fc5d816SMarcin Wojtas qid); 9019b8d05b8SZbigniew Bodek rx_ring->lro.ifp = adapter->ifp; 9029b8d05b8SZbigniew Bodek } 9039b8d05b8SZbigniew Bodek } 9049b8d05b8SZbigniew Bodek 9059b8d05b8SZbigniew Bodek return (0); 9069b8d05b8SZbigniew Bodek 9077d2544e6SMarcin Wojtas err_buf_info_unmap: 9089b8d05b8SZbigniew Bodek while (i--) { 9099b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9109b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9119b8d05b8SZbigniew Bodek } 9129b8d05b8SZbigniew Bodek 91343fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 91443fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 915cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9169b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9179b8d05b8SZbigniew Bodek return (ENOMEM); 9189b8d05b8SZbigniew Bodek } 9199b8d05b8SZbigniew Bodek 9209b8d05b8SZbigniew Bodek /** 9219b8d05b8SZbigniew Bodek * ena_free_rx_resources - Free Rx Resources 9229b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9239b8d05b8SZbigniew Bodek * @qid: queue index 9249b8d05b8SZbigniew Bodek * 9259b8d05b8SZbigniew Bodek * Free all receive software resources 9269b8d05b8SZbigniew Bodek **/ 9279b8d05b8SZbigniew Bodek static void 9289b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) 9299b8d05b8SZbigniew Bodek { 9309b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 9319b8d05b8SZbigniew Bodek 9329b8d05b8SZbigniew Bodek /* Free buffer DMA maps, */ 9339b8d05b8SZbigniew Bodek for (int i = 0; i < rx_ring->ring_size; i++) { 934e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, 935e8073738SMarcin Wojtas rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); 9369b8d05b8SZbigniew Bodek m_freem(rx_ring->rx_buffer_info[i].mbuf); 9379b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].mbuf = NULL; 9389b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, 9399b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9409b8d05b8SZbigniew Bodek bus_dmamap_destroy(adapter->rx_buf_tag, 9419b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info[i].map); 9429b8d05b8SZbigniew Bodek } 9439b8d05b8SZbigniew Bodek 9449b8d05b8SZbigniew Bodek /* free LRO resources, */ 9459b8d05b8SZbigniew Bodek tcp_lro_free(&rx_ring->lro); 9469b8d05b8SZbigniew Bodek 9479b8d05b8SZbigniew Bodek /* free allocated memory */ 948cd5d5804SMarcin Wojtas free(rx_ring->rx_buffer_info, M_DEVBUF); 9499b8d05b8SZbigniew Bodek rx_ring->rx_buffer_info = NULL; 9509b8d05b8SZbigniew Bodek 95143fefd16SMarcin Wojtas free(rx_ring->free_rx_ids, M_DEVBUF); 95243fefd16SMarcin Wojtas rx_ring->free_rx_ids = NULL; 9539b8d05b8SZbigniew Bodek } 9549b8d05b8SZbigniew Bodek 9559b8d05b8SZbigniew Bodek /** 9569b8d05b8SZbigniew Bodek * ena_setup_all_rx_resources - allocate all queues Rx resources 9579b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9589b8d05b8SZbigniew Bodek * 9599b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 9609b8d05b8SZbigniew Bodek **/ 9619b8d05b8SZbigniew Bodek static int 9629b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter) 9639b8d05b8SZbigniew Bodek { 9649b8d05b8SZbigniew Bodek int i, rc = 0; 9659b8d05b8SZbigniew Bodek 9667d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 9679b8d05b8SZbigniew Bodek rc = ena_setup_rx_resources(adapter, i); 9680bdffe59SMarcin Wojtas if (rc != 0) { 9693fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 9709b8d05b8SZbigniew Bodek "Allocation for Rx Queue %u failed\n", i); 9719b8d05b8SZbigniew Bodek goto err_setup_rx; 9729b8d05b8SZbigniew Bodek } 9737d2544e6SMarcin Wojtas } 9749b8d05b8SZbigniew Bodek return (0); 9759b8d05b8SZbigniew Bodek 9769b8d05b8SZbigniew Bodek err_setup_rx: 9779b8d05b8SZbigniew Bodek /* rewind the index freeing the rings as we go */ 9789b8d05b8SZbigniew Bodek while (i--) 9799b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9809b8d05b8SZbigniew Bodek return (rc); 9819b8d05b8SZbigniew Bodek } 9829b8d05b8SZbigniew Bodek 9839b8d05b8SZbigniew Bodek /** 9849b8d05b8SZbigniew Bodek * ena_free_all_rx_resources - Free Rx resources for all queues 9859b8d05b8SZbigniew Bodek * @adapter: network interface device structure 9869b8d05b8SZbigniew Bodek * 9879b8d05b8SZbigniew Bodek * Free all receive software resources 9889b8d05b8SZbigniew Bodek **/ 9899b8d05b8SZbigniew Bodek static void 9909b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter) 9919b8d05b8SZbigniew Bodek { 9929b8d05b8SZbigniew Bodek int i; 9939b8d05b8SZbigniew Bodek 9947d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 9959b8d05b8SZbigniew Bodek ena_free_rx_resources(adapter, i); 9969b8d05b8SZbigniew Bodek } 9979b8d05b8SZbigniew Bodek 9989b8d05b8SZbigniew Bodek static inline int 9999b8d05b8SZbigniew Bodek ena_alloc_rx_mbuf(struct ena_adapter *adapter, 10009b8d05b8SZbigniew Bodek struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) 10019b8d05b8SZbigniew Bodek { 10023fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 10039b8d05b8SZbigniew Bodek struct ena_com_buf *ena_buf; 10049b8d05b8SZbigniew Bodek bus_dma_segment_t segs[1]; 10059b8d05b8SZbigniew Bodek int nsegs, error; 10064727bda6SMarcin Wojtas int mlen; 10079b8d05b8SZbigniew Bodek 10089b8d05b8SZbigniew Bodek /* if previous allocated frag is not used */ 10093f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf != NULL)) 10109b8d05b8SZbigniew Bodek return (0); 10119b8d05b8SZbigniew Bodek 10129b8d05b8SZbigniew Bodek /* Get mbuf using UMA allocator */ 101304cf2b88SMarcin Wojtas rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 101404cf2b88SMarcin Wojtas rx_ring->rx_mbuf_sz); 10159b8d05b8SZbigniew Bodek 10163f9ed7abSMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10174727bda6SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); 10184727bda6SMarcin Wojtas rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 10194727bda6SMarcin Wojtas if (unlikely(rx_info->mbuf == NULL)) { 10209b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); 10219b8d05b8SZbigniew Bodek return (ENOMEM); 10229b8d05b8SZbigniew Bodek } 10234727bda6SMarcin Wojtas mlen = MCLBYTES; 10244727bda6SMarcin Wojtas } else { 102504cf2b88SMarcin Wojtas mlen = rx_ring->rx_mbuf_sz; 10264727bda6SMarcin Wojtas } 10279b8d05b8SZbigniew Bodek /* Set mbuf length*/ 10284727bda6SMarcin Wojtas rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; 10299b8d05b8SZbigniew Bodek 10309b8d05b8SZbigniew Bodek /* Map packets for DMA */ 10313fc5d816SMarcin Wojtas ena_log(pdev, DBG, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", 10329b8d05b8SZbigniew Bodek adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); 10339b8d05b8SZbigniew Bodek error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, 10349b8d05b8SZbigniew Bodek rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 10353f9ed7abSMarcin Wojtas if (unlikely((error != 0) || (nsegs != 1))) { 10363fc5d816SMarcin Wojtas ena_log(pdev, WARN, 10373fc5d816SMarcin Wojtas "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs); 10389b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); 10399b8d05b8SZbigniew Bodek goto exit; 10409b8d05b8SZbigniew Bodek 10419b8d05b8SZbigniew Bodek } 10429b8d05b8SZbigniew Bodek 10439b8d05b8SZbigniew Bodek bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); 10449b8d05b8SZbigniew Bodek 10459b8d05b8SZbigniew Bodek ena_buf = &rx_info->ena_buf; 10469b8d05b8SZbigniew Bodek ena_buf->paddr = segs[0].ds_addr; 10474727bda6SMarcin Wojtas ena_buf->len = mlen; 10489b8d05b8SZbigniew Bodek 10493fc5d816SMarcin Wojtas ena_log(pdev, DBG, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", 10509b8d05b8SZbigniew Bodek rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); 10519b8d05b8SZbigniew Bodek 10529b8d05b8SZbigniew Bodek return (0); 10539b8d05b8SZbigniew Bodek 10549b8d05b8SZbigniew Bodek exit: 10559b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10569b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10579b8d05b8SZbigniew Bodek return (EFAULT); 10589b8d05b8SZbigniew Bodek } 10599b8d05b8SZbigniew Bodek 10609b8d05b8SZbigniew Bodek static void 10619b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, 10629b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info) 10639b8d05b8SZbigniew Bodek { 10649b8d05b8SZbigniew Bodek 10654e8acd84SMarcin Wojtas if (rx_info->mbuf == NULL) { 10663fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 10673fc5d816SMarcin Wojtas "Trying to free unallocated buffer\n"); 10689b8d05b8SZbigniew Bodek return; 10694e8acd84SMarcin Wojtas } 10709b8d05b8SZbigniew Bodek 1071e8073738SMarcin Wojtas bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, 1072e8073738SMarcin Wojtas BUS_DMASYNC_POSTREAD); 10739b8d05b8SZbigniew Bodek bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); 10749b8d05b8SZbigniew Bodek m_freem(rx_info->mbuf); 10759b8d05b8SZbigniew Bodek rx_info->mbuf = NULL; 10769b8d05b8SZbigniew Bodek } 10779b8d05b8SZbigniew Bodek 10789b8d05b8SZbigniew Bodek /** 10799b8d05b8SZbigniew Bodek * ena_refill_rx_bufs - Refills ring with descriptors 10809b8d05b8SZbigniew Bodek * @rx_ring: the ring which we want to feed with free descriptors 10819b8d05b8SZbigniew Bodek * @num: number of descriptors to refill 10829b8d05b8SZbigniew Bodek * Refills the ring with newly allocated DMA-mapped mbufs for receiving 10839b8d05b8SZbigniew Bodek **/ 108438c7b965SMarcin Wojtas int 10859b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) 10869b8d05b8SZbigniew Bodek { 10879b8d05b8SZbigniew Bodek struct ena_adapter *adapter = rx_ring->adapter; 10883fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 108943fefd16SMarcin Wojtas uint16_t next_to_use, req_id; 10909b8d05b8SZbigniew Bodek uint32_t i; 10919b8d05b8SZbigniew Bodek int rc; 10929b8d05b8SZbigniew Bodek 10933fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid); 10949b8d05b8SZbigniew Bodek 10959b8d05b8SZbigniew Bodek next_to_use = rx_ring->next_to_use; 10969b8d05b8SZbigniew Bodek 10979b8d05b8SZbigniew Bodek for (i = 0; i < num; i++) { 109843fefd16SMarcin Wojtas struct ena_rx_buffer *rx_info; 109943fefd16SMarcin Wojtas 11003fc5d816SMarcin Wojtas ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n", 11013fc5d816SMarcin Wojtas next_to_use); 11029b8d05b8SZbigniew Bodek 110343fefd16SMarcin Wojtas req_id = rx_ring->free_rx_ids[next_to_use]; 110443fefd16SMarcin Wojtas rx_info = &rx_ring->rx_buffer_info[req_id]; 11059a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 1106358bcc4cSMarcin Wojtas if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) 11079a0f2079SMarcin Wojtas rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); 11089a0f2079SMarcin Wojtas else 11099a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 11109b8d05b8SZbigniew Bodek rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); 11113f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 11123fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11134e8acd84SMarcin Wojtas "failed to alloc buffer for rx queue %d\n", 11144e8acd84SMarcin Wojtas rx_ring->qid); 11159b8d05b8SZbigniew Bodek break; 11169b8d05b8SZbigniew Bodek } 11179b8d05b8SZbigniew Bodek rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, 111843fefd16SMarcin Wojtas &rx_info->ena_buf, req_id); 11190bdffe59SMarcin Wojtas if (unlikely(rc != 0)) { 11203fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11219b8d05b8SZbigniew Bodek "failed to add buffer for rx queue %d\n", 11229b8d05b8SZbigniew Bodek rx_ring->qid); 11239b8d05b8SZbigniew Bodek break; 11249b8d05b8SZbigniew Bodek } 11259b8d05b8SZbigniew Bodek next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, 11269b8d05b8SZbigniew Bodek rx_ring->ring_size); 11279b8d05b8SZbigniew Bodek } 11289b8d05b8SZbigniew Bodek 11293f9ed7abSMarcin Wojtas if (unlikely(i < num)) { 11309b8d05b8SZbigniew Bodek counter_u64_add(rx_ring->rx_stats.refil_partial, 1); 11313fc5d816SMarcin Wojtas ena_log_io(pdev, WARN, 11324e8acd84SMarcin Wojtas "refilled rx qid %d with only %d mbufs (from %d)\n", 11334e8acd84SMarcin Wojtas rx_ring->qid, i, num); 11349b8d05b8SZbigniew Bodek } 11359b8d05b8SZbigniew Bodek 11368483b844SMarcin Wojtas if (likely(i != 0)) 11379b8d05b8SZbigniew Bodek ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); 11388483b844SMarcin Wojtas 11399b8d05b8SZbigniew Bodek rx_ring->next_to_use = next_to_use; 11409b8d05b8SZbigniew Bodek return (i); 11419b8d05b8SZbigniew Bodek } 11429b8d05b8SZbigniew Bodek 11437d8c4feeSMarcin Wojtas int 114421823546SMarcin Wojtas ena_update_buf_ring_size(struct ena_adapter *adapter, 114521823546SMarcin Wojtas uint32_t new_buf_ring_size) 114621823546SMarcin Wojtas { 114721823546SMarcin Wojtas uint32_t old_buf_ring_size; 114821823546SMarcin Wojtas int rc = 0; 114921823546SMarcin Wojtas bool dev_was_up; 115021823546SMarcin Wojtas 115121823546SMarcin Wojtas old_buf_ring_size = adapter->buf_ring_size; 115221823546SMarcin Wojtas adapter->buf_ring_size = new_buf_ring_size; 115321823546SMarcin Wojtas 115421823546SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 115521823546SMarcin Wojtas ena_down(adapter); 115621823546SMarcin Wojtas 115721823546SMarcin Wojtas /* Reconfigure buf ring for all Tx rings. */ 115821823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 115921823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 116021823546SMarcin Wojtas if (dev_was_up) { 116121823546SMarcin Wojtas /* 116221823546SMarcin Wojtas * If ena_up() fails, it's not because of recent buf_ring size 116321823546SMarcin Wojtas * changes. Because of that, we just want to revert old drbr 116421823546SMarcin Wojtas * value and trigger the reset because something else had to 116521823546SMarcin Wojtas * go wrong. 116621823546SMarcin Wojtas */ 116721823546SMarcin Wojtas rc = ena_up(adapter); 116821823546SMarcin Wojtas if (unlikely(rc != 0)) { 11693fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 117021823546SMarcin Wojtas "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n", 117121823546SMarcin Wojtas new_buf_ring_size, old_buf_ring_size); 117221823546SMarcin Wojtas 117321823546SMarcin Wojtas /* Revert old size and trigger the reset */ 117421823546SMarcin Wojtas adapter->buf_ring_size = old_buf_ring_size; 117521823546SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 117621823546SMarcin Wojtas ena_init_io_rings_advanced(adapter); 117721823546SMarcin Wojtas 117821823546SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, 117921823546SMarcin Wojtas adapter); 118021823546SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER); 118121823546SMarcin Wojtas 118221823546SMarcin Wojtas } 118321823546SMarcin Wojtas } 118421823546SMarcin Wojtas 118521823546SMarcin Wojtas return (rc); 118621823546SMarcin Wojtas } 118721823546SMarcin Wojtas 118821823546SMarcin Wojtas int 11897d8c4feeSMarcin Wojtas ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size, 11907d8c4feeSMarcin Wojtas uint32_t new_rx_size) 11917d8c4feeSMarcin Wojtas { 11927d8c4feeSMarcin Wojtas uint32_t old_tx_size, old_rx_size; 11937d8c4feeSMarcin Wojtas int rc = 0; 11947d8c4feeSMarcin Wojtas bool dev_was_up; 11957d8c4feeSMarcin Wojtas 11969762a033SMarcin Wojtas old_tx_size = adapter->requested_tx_ring_size; 11979762a033SMarcin Wojtas old_rx_size = adapter->requested_rx_ring_size; 11989762a033SMarcin Wojtas adapter->requested_tx_ring_size = new_tx_size; 11999762a033SMarcin Wojtas adapter->requested_rx_ring_size = new_rx_size; 12007d8c4feeSMarcin Wojtas 12017d8c4feeSMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 12027d8c4feeSMarcin Wojtas ena_down(adapter); 12037d8c4feeSMarcin Wojtas 12047d8c4feeSMarcin Wojtas /* Configure queues with new size. */ 12057d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 12067d8c4feeSMarcin Wojtas if (dev_was_up) { 12077d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12087d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12093fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12107d8c4feeSMarcin Wojtas "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n", 12117d8c4feeSMarcin Wojtas new_tx_size, new_rx_size, old_tx_size, old_rx_size); 12127d8c4feeSMarcin Wojtas 12137d8c4feeSMarcin Wojtas /* Revert old size. */ 12149762a033SMarcin Wojtas adapter->requested_tx_ring_size = old_tx_size; 12159762a033SMarcin Wojtas adapter->requested_rx_ring_size = old_rx_size; 12167d8c4feeSMarcin Wojtas ena_init_io_rings_basic(adapter); 12177d8c4feeSMarcin Wojtas 12187d8c4feeSMarcin Wojtas /* And try again. */ 12197d8c4feeSMarcin Wojtas rc = ena_up(adapter); 12207d8c4feeSMarcin Wojtas if (unlikely(rc != 0)) { 12213fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 12227d8c4feeSMarcin Wojtas "Failed to revert old queue sizes. Triggering device reset.\n"); 12237d8c4feeSMarcin Wojtas /* 12247d8c4feeSMarcin Wojtas * If we've failed again, something had to go 12257d8c4feeSMarcin Wojtas * wrong. After reset, the device should try to 12267d8c4feeSMarcin Wojtas * go up 12277d8c4feeSMarcin Wojtas */ 12287d8c4feeSMarcin Wojtas ENA_FLAG_SET_ATOMIC( 12297d8c4feeSMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 12307d8c4feeSMarcin Wojtas ena_trigger_reset(adapter, 12317d8c4feeSMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 12327d8c4feeSMarcin Wojtas } 12337d8c4feeSMarcin Wojtas } 12347d8c4feeSMarcin Wojtas } 12357d8c4feeSMarcin Wojtas 12367d8c4feeSMarcin Wojtas return (rc); 12377d8c4feeSMarcin Wojtas } 12387d8c4feeSMarcin Wojtas 12399b8d05b8SZbigniew Bodek static void 124056d41ad5SMarcin Wojtas ena_update_io_rings(struct ena_adapter *adapter, uint32_t num) 124156d41ad5SMarcin Wojtas { 124256d41ad5SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 124356d41ad5SMarcin Wojtas /* Force indirection table to be reinitialized */ 124456d41ad5SMarcin Wojtas ena_com_rss_destroy(adapter->ena_dev); 124556d41ad5SMarcin Wojtas 124656d41ad5SMarcin Wojtas adapter->num_io_queues = num; 124756d41ad5SMarcin Wojtas ena_init_io_rings(adapter); 124856d41ad5SMarcin Wojtas } 124956d41ad5SMarcin Wojtas 125056d41ad5SMarcin Wojtas /* Caller should sanitize new_num */ 125156d41ad5SMarcin Wojtas int 125256d41ad5SMarcin Wojtas ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num) 125356d41ad5SMarcin Wojtas { 125456d41ad5SMarcin Wojtas uint32_t old_num; 125556d41ad5SMarcin Wojtas int rc = 0; 125656d41ad5SMarcin Wojtas bool dev_was_up; 125756d41ad5SMarcin Wojtas 125856d41ad5SMarcin Wojtas dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 125956d41ad5SMarcin Wojtas old_num = adapter->num_io_queues; 126056d41ad5SMarcin Wojtas ena_down(adapter); 126156d41ad5SMarcin Wojtas 126256d41ad5SMarcin Wojtas ena_update_io_rings(adapter, new_num); 126356d41ad5SMarcin Wojtas 126456d41ad5SMarcin Wojtas if (dev_was_up) { 126556d41ad5SMarcin Wojtas rc = ena_up(adapter); 126656d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 12673fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 126856d41ad5SMarcin Wojtas "Failed to configure device with %u IO queues. " 126956d41ad5SMarcin Wojtas "Reverting to previous value: %u\n", 127056d41ad5SMarcin Wojtas new_num, old_num); 127156d41ad5SMarcin Wojtas 127256d41ad5SMarcin Wojtas ena_update_io_rings(adapter, old_num); 127356d41ad5SMarcin Wojtas 127456d41ad5SMarcin Wojtas rc = ena_up(adapter); 127556d41ad5SMarcin Wojtas if (unlikely(rc != 0)) { 12763fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 127756d41ad5SMarcin Wojtas "Failed to revert to previous setup IO " 127856d41ad5SMarcin Wojtas "queues. Triggering device reset.\n"); 127956d41ad5SMarcin Wojtas ENA_FLAG_SET_ATOMIC( 128056d41ad5SMarcin Wojtas ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 128156d41ad5SMarcin Wojtas ena_trigger_reset(adapter, 128256d41ad5SMarcin Wojtas ENA_REGS_RESET_OS_TRIGGER); 128356d41ad5SMarcin Wojtas } 128456d41ad5SMarcin Wojtas } 128556d41ad5SMarcin Wojtas } 128656d41ad5SMarcin Wojtas 128756d41ad5SMarcin Wojtas return (rc); 128856d41ad5SMarcin Wojtas } 128956d41ad5SMarcin Wojtas 129056d41ad5SMarcin Wojtas static void 12919b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) 12929b8d05b8SZbigniew Bodek { 12939b8d05b8SZbigniew Bodek struct ena_ring *rx_ring = &adapter->rx_ring[qid]; 12949b8d05b8SZbigniew Bodek unsigned int i; 12959b8d05b8SZbigniew Bodek 12969b8d05b8SZbigniew Bodek for (i = 0; i < rx_ring->ring_size; i++) { 12979b8d05b8SZbigniew Bodek struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; 12989b8d05b8SZbigniew Bodek 12990bdffe59SMarcin Wojtas if (rx_info->mbuf != NULL) 13009b8d05b8SZbigniew Bodek ena_free_rx_mbuf(adapter, rx_ring, rx_info); 13019a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 13029a0f2079SMarcin Wojtas if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && 13039a0f2079SMarcin Wojtas (adapter->ifp->if_capenable & IFCAP_NETMAP)) { 13049a0f2079SMarcin Wojtas if (rx_info->netmap_buf_idx != 0) 13059a0f2079SMarcin Wojtas ena_netmap_free_rx_slot(adapter, rx_ring, 13069a0f2079SMarcin Wojtas rx_info); 13079a0f2079SMarcin Wojtas } 13089a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 13099b8d05b8SZbigniew Bodek } 13109b8d05b8SZbigniew Bodek } 13119b8d05b8SZbigniew Bodek 13129b8d05b8SZbigniew Bodek /** 13139b8d05b8SZbigniew Bodek * ena_refill_all_rx_bufs - allocate all queues Rx buffers 13149b8d05b8SZbigniew Bodek * @adapter: network interface device structure 13159b8d05b8SZbigniew Bodek * 13169b8d05b8SZbigniew Bodek */ 13179b8d05b8SZbigniew Bodek static void 13189b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter) 13199b8d05b8SZbigniew Bodek { 13209b8d05b8SZbigniew Bodek struct ena_ring *rx_ring; 13219b8d05b8SZbigniew Bodek int i, rc, bufs_num; 13229b8d05b8SZbigniew Bodek 13237d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 13249b8d05b8SZbigniew Bodek rx_ring = &adapter->rx_ring[i]; 13259b8d05b8SZbigniew Bodek bufs_num = rx_ring->ring_size - 1; 13269b8d05b8SZbigniew Bodek rc = ena_refill_rx_bufs(rx_ring, bufs_num); 13279b8d05b8SZbigniew Bodek if (unlikely(rc != bufs_num)) 13283fc5d816SMarcin Wojtas ena_log_io(adapter->pdev, WARN, 13293fc5d816SMarcin Wojtas "refilling Queue %d failed. " 13304e8acd84SMarcin Wojtas "Allocated %d buffers from: %d\n", i, rc, bufs_num); 13319a0f2079SMarcin Wojtas #ifdef DEV_NETMAP 13329a0f2079SMarcin Wojtas rx_ring->initialized = true; 13339a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */ 13349b8d05b8SZbigniew Bodek } 13359b8d05b8SZbigniew Bodek } 13369b8d05b8SZbigniew Bodek 13379b8d05b8SZbigniew Bodek static void 13389b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter) 13399b8d05b8SZbigniew Bodek { 13409b8d05b8SZbigniew Bodek int i; 13419b8d05b8SZbigniew Bodek 13427d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) 13439b8d05b8SZbigniew Bodek ena_free_rx_bufs(adapter, i); 13449b8d05b8SZbigniew Bodek } 13459b8d05b8SZbigniew Bodek 13469b8d05b8SZbigniew Bodek /** 13479b8d05b8SZbigniew Bodek * ena_free_tx_bufs - Free Tx Buffers per Queue 13489b8d05b8SZbigniew Bodek * @adapter: network interface device structure 13499b8d05b8SZbigniew Bodek * @qid: queue index 13509b8d05b8SZbigniew Bodek **/ 13519b8d05b8SZbigniew Bodek static void 13529b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) 13539b8d05b8SZbigniew Bodek { 13544e8acd84SMarcin Wojtas bool print_once = true; 13559b8d05b8SZbigniew Bodek struct ena_ring *tx_ring = &adapter->tx_ring[qid]; 13569b8d05b8SZbigniew Bodek 1357416e8864SZbigniew Bodek ENA_RING_MTX_LOCK(tx_ring); 13589b8d05b8SZbigniew Bodek for (int i = 0; i < tx_ring->ring_size; i++) { 13599b8d05b8SZbigniew Bodek struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 13609b8d05b8SZbigniew Bodek 13619b8d05b8SZbigniew Bodek if (tx_info->mbuf == NULL) 13629b8d05b8SZbigniew Bodek continue; 13639b8d05b8SZbigniew Bodek 13644e8acd84SMarcin Wojtas if (print_once) { 13653fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 136630425f93SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x\n", 13674e8acd84SMarcin Wojtas qid, i); 13684e8acd84SMarcin Wojtas print_once = false; 13694e8acd84SMarcin Wojtas } else { 13703fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 137130425f93SMarcin Wojtas "free uncompleted tx mbuf qid %d idx 0x%x\n", 13724e8acd84SMarcin Wojtas qid, i); 13734e8acd84SMarcin Wojtas } 13749b8d05b8SZbigniew Bodek 1375888810f0SMarcin Wojtas bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap, 1376e8073738SMarcin Wojtas BUS_DMASYNC_POSTWRITE); 1377888810f0SMarcin Wojtas bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); 13784fa9e02dSMarcin Wojtas 13799b8d05b8SZbigniew Bodek m_free(tx_info->mbuf); 13809b8d05b8SZbigniew Bodek tx_info->mbuf = NULL; 13819b8d05b8SZbigniew Bodek } 1382416e8864SZbigniew Bodek ENA_RING_MTX_UNLOCK(tx_ring); 13839b8d05b8SZbigniew Bodek } 13849b8d05b8SZbigniew Bodek 13859b8d05b8SZbigniew Bodek static void 13869b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter) 13879b8d05b8SZbigniew Bodek { 13889b8d05b8SZbigniew Bodek 13897d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) 13909b8d05b8SZbigniew Bodek ena_free_tx_bufs(adapter, i); 13919b8d05b8SZbigniew Bodek } 13929b8d05b8SZbigniew Bodek 13939b8d05b8SZbigniew Bodek static void 13949b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter) 13959b8d05b8SZbigniew Bodek { 13969b8d05b8SZbigniew Bodek uint16_t ena_qid; 13979b8d05b8SZbigniew Bodek int i; 13989b8d05b8SZbigniew Bodek 13997d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14009b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 14019b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 14029b8d05b8SZbigniew Bodek } 14039b8d05b8SZbigniew Bodek } 14049b8d05b8SZbigniew Bodek 14059b8d05b8SZbigniew Bodek static void 14069b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter) 14079b8d05b8SZbigniew Bodek { 14089b8d05b8SZbigniew Bodek uint16_t ena_qid; 14099b8d05b8SZbigniew Bodek int i; 14109b8d05b8SZbigniew Bodek 14117d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14129b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 14139b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); 14149b8d05b8SZbigniew Bodek } 14159b8d05b8SZbigniew Bodek } 14169b8d05b8SZbigniew Bodek 14179b8d05b8SZbigniew Bodek static void 14189b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter) 14199b8d05b8SZbigniew Bodek { 14205cb9db07SMarcin Wojtas struct ena_que *queue; 14215cb9db07SMarcin Wojtas int i; 14225cb9db07SMarcin Wojtas 14237d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14245cb9db07SMarcin Wojtas queue = &adapter->que[i]; 14255cb9db07SMarcin Wojtas while (taskqueue_cancel(queue->cleanup_tq, 14265cb9db07SMarcin Wojtas &queue->cleanup_task, NULL)) 14275cb9db07SMarcin Wojtas taskqueue_drain(queue->cleanup_tq, 14285cb9db07SMarcin Wojtas &queue->cleanup_task); 14295cb9db07SMarcin Wojtas taskqueue_free(queue->cleanup_tq); 14305cb9db07SMarcin Wojtas } 14315cb9db07SMarcin Wojtas 14329b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(adapter); 14339b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(adapter); 14349b8d05b8SZbigniew Bodek } 14359b8d05b8SZbigniew Bodek 14369b8d05b8SZbigniew Bodek static int 14379b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter) 14389b8d05b8SZbigniew Bodek { 14399b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 14409b8d05b8SZbigniew Bodek struct ena_com_create_io_ctx ctx; 14419b8d05b8SZbigniew Bodek struct ena_ring *ring; 14425cb9db07SMarcin Wojtas struct ena_que *queue; 14439b8d05b8SZbigniew Bodek uint16_t ena_qid; 14449b8d05b8SZbigniew Bodek uint32_t msix_vector; 14456d1ef2abSArtur Rojek cpuset_t *cpu_mask = NULL; 14469b8d05b8SZbigniew Bodek int rc, i; 14479b8d05b8SZbigniew Bodek 14489b8d05b8SZbigniew Bodek /* Create TX queues */ 14497d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14509b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 14519b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 14529b8d05b8SZbigniew Bodek ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 14539b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 14549762a033SMarcin Wojtas ctx.queue_size = adapter->requested_tx_ring_size; 14559b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 14569b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1457eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1458eb4c4f4aSMarcin Wojtas 14599b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 14600bdffe59SMarcin Wojtas if (rc != 0) { 14613fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14629b8d05b8SZbigniew Bodek "Failed to create io TX queue #%d rc: %d\n", i, rc); 14639b8d05b8SZbigniew Bodek goto err_tx; 14649b8d05b8SZbigniew Bodek } 14659b8d05b8SZbigniew Bodek ring = &adapter->tx_ring[i]; 14669b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 14679b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 14689b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 14690bdffe59SMarcin Wojtas if (rc != 0) { 14703fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14719b8d05b8SZbigniew Bodek "Failed to get TX queue handlers. TX queue num" 14729b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 14739b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 14749b8d05b8SZbigniew Bodek goto err_tx; 14759b8d05b8SZbigniew Bodek } 1476eb4c4f4aSMarcin Wojtas 1477eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1478eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1479eb4c4f4aSMarcin Wojtas ctx.numa_node); 1480eb4c4f4aSMarcin Wojtas } 14819b8d05b8SZbigniew Bodek } 14829b8d05b8SZbigniew Bodek 14839b8d05b8SZbigniew Bodek /* Create RX queues */ 14847d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 14859b8d05b8SZbigniew Bodek msix_vector = ENA_IO_IRQ_IDX(i); 14869b8d05b8SZbigniew Bodek ena_qid = ENA_IO_RXQ_IDX(i); 14879b8d05b8SZbigniew Bodek ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 14889b8d05b8SZbigniew Bodek ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 14899762a033SMarcin Wojtas ctx.queue_size = adapter->requested_rx_ring_size; 14909b8d05b8SZbigniew Bodek ctx.msix_vector = msix_vector; 14919b8d05b8SZbigniew Bodek ctx.qid = ena_qid; 1492eb4c4f4aSMarcin Wojtas ctx.numa_node = adapter->que[i].domain; 1493eb4c4f4aSMarcin Wojtas 14949b8d05b8SZbigniew Bodek rc = ena_com_create_io_queue(ena_dev, &ctx); 14953f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 14963fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 14979b8d05b8SZbigniew Bodek "Failed to create io RX queue[%d] rc: %d\n", i, rc); 14989b8d05b8SZbigniew Bodek goto err_rx; 14999b8d05b8SZbigniew Bodek } 15009b8d05b8SZbigniew Bodek 15019b8d05b8SZbigniew Bodek ring = &adapter->rx_ring[i]; 15029b8d05b8SZbigniew Bodek rc = ena_com_get_io_handlers(ena_dev, ena_qid, 15039b8d05b8SZbigniew Bodek &ring->ena_com_io_sq, 15049b8d05b8SZbigniew Bodek &ring->ena_com_io_cq); 15053f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 15063fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 15079b8d05b8SZbigniew Bodek "Failed to get RX queue handlers. RX queue num" 15089b8d05b8SZbigniew Bodek " %d rc: %d\n", i, rc); 15099b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ena_qid); 15109b8d05b8SZbigniew Bodek goto err_rx; 15119b8d05b8SZbigniew Bodek } 1512eb4c4f4aSMarcin Wojtas 1513eb4c4f4aSMarcin Wojtas if (ctx.numa_node >= 0) { 1514eb4c4f4aSMarcin Wojtas ena_com_update_numa_node(ring->ena_com_io_cq, 1515eb4c4f4aSMarcin Wojtas ctx.numa_node); 1516eb4c4f4aSMarcin Wojtas } 15179b8d05b8SZbigniew Bodek } 15189b8d05b8SZbigniew Bodek 15197d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 15205cb9db07SMarcin Wojtas queue = &adapter->que[i]; 15215cb9db07SMarcin Wojtas 15226c3e93cbSGleb Smirnoff NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); 15235cb9db07SMarcin Wojtas queue->cleanup_tq = taskqueue_create_fast("ena cleanup", 15245cb9db07SMarcin Wojtas M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); 15255cb9db07SMarcin Wojtas 15266d1ef2abSArtur Rojek #ifdef RSS 15276d1ef2abSArtur Rojek cpu_mask = &queue->cpu_mask; 15286d1ef2abSArtur Rojek #endif 15296d1ef2abSArtur Rojek taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET, 15306d1ef2abSArtur Rojek cpu_mask, 15315cb9db07SMarcin Wojtas "%s queue %d cleanup", 15325cb9db07SMarcin Wojtas device_get_nameunit(adapter->pdev), i); 15335cb9db07SMarcin Wojtas } 15345cb9db07SMarcin Wojtas 15359b8d05b8SZbigniew Bodek return (0); 15369b8d05b8SZbigniew Bodek 15379b8d05b8SZbigniew Bodek err_rx: 15389b8d05b8SZbigniew Bodek while (i--) 15399b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); 15407d8c4feeSMarcin Wojtas i = adapter->num_io_queues; 15419b8d05b8SZbigniew Bodek err_tx: 15429b8d05b8SZbigniew Bodek while (i--) 15439b8d05b8SZbigniew Bodek ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); 15449b8d05b8SZbigniew Bodek 15459b8d05b8SZbigniew Bodek return (ENXIO); 15469b8d05b8SZbigniew Bodek } 15479b8d05b8SZbigniew Bodek 15489b8d05b8SZbigniew Bodek /********************************************************************* 15499b8d05b8SZbigniew Bodek * 15509b8d05b8SZbigniew Bodek * MSIX & Interrupt Service routine 15519b8d05b8SZbigniew Bodek * 15529b8d05b8SZbigniew Bodek **********************************************************************/ 15539b8d05b8SZbigniew Bodek 15549b8d05b8SZbigniew Bodek /** 15559b8d05b8SZbigniew Bodek * ena_handle_msix - MSIX Interrupt Handler for admin/async queue 15569b8d05b8SZbigniew Bodek * @arg: interrupt number 15579b8d05b8SZbigniew Bodek **/ 15589b8d05b8SZbigniew Bodek static void 15599b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg) 15609b8d05b8SZbigniew Bodek { 15619b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 15629b8d05b8SZbigniew Bodek 15639b8d05b8SZbigniew Bodek ena_com_admin_q_comp_intr_handler(adapter->ena_dev); 1564fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) 15659b8d05b8SZbigniew Bodek ena_com_aenq_intr_handler(adapter->ena_dev, arg); 15669b8d05b8SZbigniew Bodek } 15679b8d05b8SZbigniew Bodek 15685cb9db07SMarcin Wojtas /** 15695cb9db07SMarcin Wojtas * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx 15705cb9db07SMarcin Wojtas * @arg: queue 15715cb9db07SMarcin Wojtas **/ 15725cb9db07SMarcin Wojtas static int 15735cb9db07SMarcin Wojtas ena_handle_msix(void *arg) 15745cb9db07SMarcin Wojtas { 15755cb9db07SMarcin Wojtas struct ena_que *queue = arg; 15765cb9db07SMarcin Wojtas struct ena_adapter *adapter = queue->adapter; 15775cb9db07SMarcin Wojtas if_t ifp = adapter->ifp; 15785cb9db07SMarcin Wojtas 15795cb9db07SMarcin Wojtas if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) 15805cb9db07SMarcin Wojtas return (FILTER_STRAY); 15815cb9db07SMarcin Wojtas 15825cb9db07SMarcin Wojtas taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); 15835cb9db07SMarcin Wojtas 15845cb9db07SMarcin Wojtas return (FILTER_HANDLED); 15855cb9db07SMarcin Wojtas } 15865cb9db07SMarcin Wojtas 15879b8d05b8SZbigniew Bodek static int 15889b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter) 15899b8d05b8SZbigniew Bodek { 15909b8d05b8SZbigniew Bodek device_t dev = adapter->pdev; 15918805021aSMarcin Wojtas int msix_vecs, msix_req; 15928805021aSMarcin Wojtas int i, rc = 0; 15939b8d05b8SZbigniew Bodek 1594fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 15953fc5d816SMarcin Wojtas ena_log(dev, ERR, "Error, MSI-X is already enabled\n"); 1596fd43fd2aSMarcin Wojtas return (EINVAL); 1597fd43fd2aSMarcin Wojtas } 1598fd43fd2aSMarcin Wojtas 15999b8d05b8SZbigniew Bodek /* Reserved the max msix vectors we might need */ 16007d8c4feeSMarcin Wojtas msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); 16019b8d05b8SZbigniew Bodek 1602cd5d5804SMarcin Wojtas adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), 1603cd5d5804SMarcin Wojtas M_DEVBUF, M_WAITOK | M_ZERO); 1604cd5d5804SMarcin Wojtas 16053fc5d816SMarcin Wojtas ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", 16063fc5d816SMarcin Wojtas msix_vecs); 16079b8d05b8SZbigniew Bodek 16089b8d05b8SZbigniew Bodek for (i = 0; i < msix_vecs; i++) { 16099b8d05b8SZbigniew Bodek adapter->msix_entries[i].entry = i; 16109b8d05b8SZbigniew Bodek /* Vectors must start from 1 */ 16119b8d05b8SZbigniew Bodek adapter->msix_entries[i].vector = i + 1; 16129b8d05b8SZbigniew Bodek } 16139b8d05b8SZbigniew Bodek 16148805021aSMarcin Wojtas msix_req = msix_vecs; 16159b8d05b8SZbigniew Bodek rc = pci_alloc_msix(dev, &msix_vecs); 16163f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 16173fc5d816SMarcin Wojtas ena_log(dev, ERR, 16189b8d05b8SZbigniew Bodek "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); 16197d2544e6SMarcin Wojtas 16209b8d05b8SZbigniew Bodek rc = ENOSPC; 16217d2544e6SMarcin Wojtas goto err_msix_free; 16229b8d05b8SZbigniew Bodek } 16239b8d05b8SZbigniew Bodek 16248805021aSMarcin Wojtas if (msix_vecs != msix_req) { 16252b5b60feSMarcin Wojtas if (msix_vecs == ENA_ADMIN_MSIX_VEC) { 16263fc5d816SMarcin Wojtas ena_log(dev, ERR, 16272b5b60feSMarcin Wojtas "Not enough number of MSI-x allocated: %d\n", 16282b5b60feSMarcin Wojtas msix_vecs); 16292b5b60feSMarcin Wojtas pci_release_msi(dev); 16302b5b60feSMarcin Wojtas rc = ENOSPC; 16312b5b60feSMarcin Wojtas goto err_msix_free; 16322b5b60feSMarcin Wojtas } 16333fc5d816SMarcin Wojtas ena_log(dev, ERR, "Enable only %d MSI-x (out of %d), reduce " 16348805021aSMarcin Wojtas "the number of queues\n", msix_vecs, msix_req); 16358805021aSMarcin Wojtas } 16368805021aSMarcin Wojtas 16379b8d05b8SZbigniew Bodek adapter->msix_vecs = msix_vecs; 1638fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 16399b8d05b8SZbigniew Bodek 16407d2544e6SMarcin Wojtas return (0); 16417d2544e6SMarcin Wojtas 16427d2544e6SMarcin Wojtas err_msix_free: 16437d2544e6SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 16447d2544e6SMarcin Wojtas adapter->msix_entries = NULL; 16457d2544e6SMarcin Wojtas 16469b8d05b8SZbigniew Bodek return (rc); 16479b8d05b8SZbigniew Bodek } 16489b8d05b8SZbigniew Bodek 16499b8d05b8SZbigniew Bodek static void 16509b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter) 16519b8d05b8SZbigniew Bodek { 16529b8d05b8SZbigniew Bodek 16539b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, 16549b8d05b8SZbigniew Bodek ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", 16559b8d05b8SZbigniew Bodek device_get_nameunit(adapter->pdev)); 16569b8d05b8SZbigniew Bodek /* 16579b8d05b8SZbigniew Bodek * Handler is NULL on purpose, it will be set 16589b8d05b8SZbigniew Bodek * when mgmnt interrupt is acquired 16599b8d05b8SZbigniew Bodek */ 16609b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; 16619b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; 16629b8d05b8SZbigniew Bodek adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = 16639b8d05b8SZbigniew Bodek adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; 16649b8d05b8SZbigniew Bodek } 16659b8d05b8SZbigniew Bodek 166677958fcdSMarcin Wojtas static int 16679b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter) 16689b8d05b8SZbigniew Bodek { 16696d1ef2abSArtur Rojek #ifdef RSS 16706d1ef2abSArtur Rojek int num_buckets = rss_getnumbuckets(); 16716d1ef2abSArtur Rojek static int last_bind = 0; 1672eb4c4f4aSMarcin Wojtas int cur_bind; 1673eb4c4f4aSMarcin Wojtas int idx; 16746d1ef2abSArtur Rojek #endif 16759b8d05b8SZbigniew Bodek int irq_idx; 16769b8d05b8SZbigniew Bodek 167777958fcdSMarcin Wojtas if (adapter->msix_entries == NULL) 167877958fcdSMarcin Wojtas return (EINVAL); 167977958fcdSMarcin Wojtas 1680eb4c4f4aSMarcin Wojtas #ifdef RSS 1681eb4c4f4aSMarcin Wojtas if (adapter->first_bind < 0) { 1682eb4c4f4aSMarcin Wojtas adapter->first_bind = last_bind; 1683eb4c4f4aSMarcin Wojtas last_bind = (last_bind + adapter->num_io_queues) % num_buckets; 1684eb4c4f4aSMarcin Wojtas } 1685eb4c4f4aSMarcin Wojtas cur_bind = adapter->first_bind; 1686eb4c4f4aSMarcin Wojtas #endif 1687eb4c4f4aSMarcin Wojtas 16887d8c4feeSMarcin Wojtas for (int i = 0; i < adapter->num_io_queues; i++) { 16899b8d05b8SZbigniew Bodek irq_idx = ENA_IO_IRQ_IDX(i); 16909b8d05b8SZbigniew Bodek 16919b8d05b8SZbigniew Bodek snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, 16929b8d05b8SZbigniew Bodek "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); 16939b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].handler = ena_handle_msix; 16949b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].data = &adapter->que[i]; 16959b8d05b8SZbigniew Bodek adapter->irq_tbl[irq_idx].vector = 16969b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector; 16973fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n", 16989b8d05b8SZbigniew Bodek adapter->msix_entries[irq_idx].vector); 1699277f11c4SMarcin Wojtas 17006d1ef2abSArtur Rojek #ifdef RSS 17019b8d05b8SZbigniew Bodek adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = 1702eb4c4f4aSMarcin Wojtas rss_getcpu(cur_bind); 1703eb4c4f4aSMarcin Wojtas cur_bind = (cur_bind + 1) % num_buckets; 17046d1ef2abSArtur Rojek CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask); 1705eb4c4f4aSMarcin Wojtas 1706eb4c4f4aSMarcin Wojtas for (idx = 0; idx < MAXMEMDOM; ++idx) { 1707eb4c4f4aSMarcin Wojtas if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx])) 1708eb4c4f4aSMarcin Wojtas break; 1709eb4c4f4aSMarcin Wojtas } 1710eb4c4f4aSMarcin Wojtas adapter->que[i].domain = idx; 1711eb4c4f4aSMarcin Wojtas #else 1712eb4c4f4aSMarcin Wojtas adapter->que[i].domain = -1; 17136d1ef2abSArtur Rojek #endif 17149b8d05b8SZbigniew Bodek } 171577958fcdSMarcin Wojtas 171677958fcdSMarcin Wojtas return (0); 17179b8d05b8SZbigniew Bodek } 17189b8d05b8SZbigniew Bodek 17199b8d05b8SZbigniew Bodek static int 17209b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter) 17219b8d05b8SZbigniew Bodek { 17223fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 17239b8d05b8SZbigniew Bodek struct ena_irq *irq; 17249b8d05b8SZbigniew Bodek unsigned long flags; 17259b8d05b8SZbigniew Bodek int rc, rcc; 17269b8d05b8SZbigniew Bodek 17279b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 17289b8d05b8SZbigniew Bodek 17299b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 17309b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 17319b8d05b8SZbigniew Bodek &irq->vector, flags); 17329b8d05b8SZbigniew Bodek 17333f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 17343fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 17353fc5d816SMarcin Wojtas irq->vector); 17367d2544e6SMarcin Wojtas return (ENXIO); 17379b8d05b8SZbigniew Bodek } 17389b8d05b8SZbigniew Bodek 17390bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 17400bdffe59SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, 17410bdffe59SMarcin Wojtas irq->data, &irq->cookie); 17423f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 17433fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to register " 17449b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 17459b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 17467d2544e6SMarcin Wojtas goto err_res_free; 17479b8d05b8SZbigniew Bodek } 17489b8d05b8SZbigniew Bodek irq->requested = true; 17499b8d05b8SZbigniew Bodek 17509b8d05b8SZbigniew Bodek return (rc); 17519b8d05b8SZbigniew Bodek 17527d2544e6SMarcin Wojtas err_res_free: 17533fc5d816SMarcin Wojtas ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector); 17549b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 17559b8d05b8SZbigniew Bodek irq->vector, irq->res); 17563f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 17573fc5d816SMarcin Wojtas ena_log(pdev, ERR, "dev has no parent while " 17589b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 17599b8d05b8SZbigniew Bodek irq->res = NULL; 17609b8d05b8SZbigniew Bodek 17619b8d05b8SZbigniew Bodek return (rc); 17629b8d05b8SZbigniew Bodek } 17639b8d05b8SZbigniew Bodek 17649b8d05b8SZbigniew Bodek static int 17659b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter) 17669b8d05b8SZbigniew Bodek { 17673fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 17689b8d05b8SZbigniew Bodek struct ena_irq *irq; 17699b8d05b8SZbigniew Bodek unsigned long flags = 0; 17709b8d05b8SZbigniew Bodek int rc = 0, i, rcc; 17719b8d05b8SZbigniew Bodek 1772fd43fd2aSMarcin Wojtas if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { 17733fc5d816SMarcin Wojtas ena_log(pdev, ERR, 17744e8acd84SMarcin Wojtas "failed to request I/O IRQ: MSI-X is not enabled\n"); 17759b8d05b8SZbigniew Bodek return (EINVAL); 17769b8d05b8SZbigniew Bodek } else { 17779b8d05b8SZbigniew Bodek flags = RF_ACTIVE | RF_SHAREABLE; 17789b8d05b8SZbigniew Bodek } 17799b8d05b8SZbigniew Bodek 17809b8d05b8SZbigniew Bodek for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 17819b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 17829b8d05b8SZbigniew Bodek 17833f9ed7abSMarcin Wojtas if (unlikely(irq->requested)) 17849b8d05b8SZbigniew Bodek continue; 17859b8d05b8SZbigniew Bodek 17869b8d05b8SZbigniew Bodek irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, 17879b8d05b8SZbigniew Bodek &irq->vector, flags); 17883f9ed7abSMarcin Wojtas if (unlikely(irq->res == NULL)) { 1789469a8407SMarcin Wojtas rc = ENOMEM; 17903fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not allocate irq vector: %d\n", 17913fc5d816SMarcin Wojtas irq->vector); 17929b8d05b8SZbigniew Bodek goto err; 17939b8d05b8SZbigniew Bodek } 17949b8d05b8SZbigniew Bodek 17950bdffe59SMarcin Wojtas rc = bus_setup_intr(adapter->pdev, irq->res, 17965cb9db07SMarcin Wojtas INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, 17975cb9db07SMarcin Wojtas irq->data, &irq->cookie); 17983f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 17993fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to register " 18009b8d05b8SZbigniew Bodek "interrupt handler for irq %ju: %d\n", 18019b8d05b8SZbigniew Bodek rman_get_start(irq->res), rc); 18029b8d05b8SZbigniew Bodek goto err; 18039b8d05b8SZbigniew Bodek } 18049b8d05b8SZbigniew Bodek irq->requested = true; 18056d1ef2abSArtur Rojek 18066d1ef2abSArtur Rojek #ifdef RSS 18076d1ef2abSArtur Rojek rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu); 18086d1ef2abSArtur Rojek if (unlikely(rc != 0)) { 18096d1ef2abSArtur Rojek ena_log(pdev, ERR, "failed to bind " 18106d1ef2abSArtur Rojek "interrupt handler for irq %ju to cpu %d: %d\n", 18116d1ef2abSArtur Rojek rman_get_start(irq->res), irq->cpu, rc); 18126d1ef2abSArtur Rojek goto err; 18136d1ef2abSArtur Rojek } 18146d1ef2abSArtur Rojek 18156d1ef2abSArtur Rojek ena_log(pdev, INFO, "queue %d - cpu %d\n", 18166d1ef2abSArtur Rojek i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); 18176d1ef2abSArtur Rojek #endif 18189b8d05b8SZbigniew Bodek } 18199b8d05b8SZbigniew Bodek 18209b8d05b8SZbigniew Bodek return (rc); 18219b8d05b8SZbigniew Bodek 18229b8d05b8SZbigniew Bodek err: 18239b8d05b8SZbigniew Bodek 18249b8d05b8SZbigniew Bodek for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { 18259b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 18269b8d05b8SZbigniew Bodek rcc = 0; 18279b8d05b8SZbigniew Bodek 18289b8d05b8SZbigniew Bodek /* Once we entered err: section and irq->requested is true we 18299b8d05b8SZbigniew Bodek free both intr and resources */ 18300bdffe59SMarcin Wojtas if (irq->requested) 18319b8d05b8SZbigniew Bodek rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 18323f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 18333fc5d816SMarcin Wojtas ena_log(pdev, ERR, "could not release irq: %d, error: %d\n", 18343fc5d816SMarcin Wojtas irq->vector, rcc); 18359b8d05b8SZbigniew Bodek 1836eb3f25b4SGordon Bergling /* If we entered err: section without irq->requested set we know 18379b8d05b8SZbigniew Bodek it was bus_alloc_resource_any() that needs cleanup, provided 18389b8d05b8SZbigniew Bodek res is not NULL. In case res is NULL no work in needed in 18399b8d05b8SZbigniew Bodek this iteration */ 18409b8d05b8SZbigniew Bodek rcc = 0; 18419b8d05b8SZbigniew Bodek if (irq->res != NULL) { 18429b8d05b8SZbigniew Bodek rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 18439b8d05b8SZbigniew Bodek irq->vector, irq->res); 18449b8d05b8SZbigniew Bodek } 18453f9ed7abSMarcin Wojtas if (unlikely(rcc != 0)) 18463fc5d816SMarcin Wojtas ena_log(pdev, ERR, "dev has no parent while " 18479b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 18489b8d05b8SZbigniew Bodek irq->requested = false; 18499b8d05b8SZbigniew Bodek irq->res = NULL; 18509b8d05b8SZbigniew Bodek } 18519b8d05b8SZbigniew Bodek 18529b8d05b8SZbigniew Bodek return (rc); 18539b8d05b8SZbigniew Bodek } 18549b8d05b8SZbigniew Bodek 18559b8d05b8SZbigniew Bodek static void 18569b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter) 18579b8d05b8SZbigniew Bodek { 18583fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18599b8d05b8SZbigniew Bodek struct ena_irq *irq; 18609b8d05b8SZbigniew Bodek int rc; 18619b8d05b8SZbigniew Bodek 18629b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; 18639b8d05b8SZbigniew Bodek if (irq->requested) { 18643fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 18659b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); 18663f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 18673fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to tear down irq: %d\n", 18683fc5d816SMarcin Wojtas irq->vector); 18699b8d05b8SZbigniew Bodek irq->requested = 0; 18709b8d05b8SZbigniew Bodek } 18719b8d05b8SZbigniew Bodek 18729b8d05b8SZbigniew Bodek if (irq->res != NULL) { 18733fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector); 18749b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 18759b8d05b8SZbigniew Bodek irq->vector, irq->res); 18769b8d05b8SZbigniew Bodek irq->res = NULL; 18773f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 18783fc5d816SMarcin Wojtas ena_log(pdev, ERR, "dev has no parent while " 18799b8d05b8SZbigniew Bodek "releasing res for irq: %d\n", irq->vector); 18809b8d05b8SZbigniew Bodek } 18819b8d05b8SZbigniew Bodek } 18829b8d05b8SZbigniew Bodek 18839b8d05b8SZbigniew Bodek static void 18849b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter) 18859b8d05b8SZbigniew Bodek { 18863fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 18879b8d05b8SZbigniew Bodek struct ena_irq *irq; 18889b8d05b8SZbigniew Bodek int rc; 18899b8d05b8SZbigniew Bodek 18909b8d05b8SZbigniew Bodek for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { 18919b8d05b8SZbigniew Bodek irq = &adapter->irq_tbl[i]; 18929b8d05b8SZbigniew Bodek if (irq->requested) { 18933fc5d816SMarcin Wojtas ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector); 18949b8d05b8SZbigniew Bodek rc = bus_teardown_intr(adapter->pdev, irq->res, 18959b8d05b8SZbigniew Bodek irq->cookie); 18963f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 18973fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to tear down irq: %d\n", 18983fc5d816SMarcin Wojtas irq->vector); 18999b8d05b8SZbigniew Bodek } 19009b8d05b8SZbigniew Bodek irq->requested = 0; 19019b8d05b8SZbigniew Bodek } 19029b8d05b8SZbigniew Bodek 19039b8d05b8SZbigniew Bodek if (irq->res != NULL) { 19043fc5d816SMarcin Wojtas ena_log(pdev, DBG, "release resource irq: %d\n", 19059b8d05b8SZbigniew Bodek irq->vector); 19069b8d05b8SZbigniew Bodek rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, 19079b8d05b8SZbigniew Bodek irq->vector, irq->res); 19089b8d05b8SZbigniew Bodek irq->res = NULL; 19093f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 19103fc5d816SMarcin Wojtas ena_log(pdev, ERR, "dev has no parent" 19119b8d05b8SZbigniew Bodek " while releasing res for irq: %d\n", 19129b8d05b8SZbigniew Bodek irq->vector); 19139b8d05b8SZbigniew Bodek } 19149b8d05b8SZbigniew Bodek } 19159b8d05b8SZbigniew Bodek } 19169b8d05b8SZbigniew Bodek } 19179b8d05b8SZbigniew Bodek 19189b8d05b8SZbigniew Bodek static void 19199b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter* adapter) 19209b8d05b8SZbigniew Bodek { 19219b8d05b8SZbigniew Bodek 19229b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 19239b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(adapter); 19249b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 19259b8d05b8SZbigniew Bodek } 19269b8d05b8SZbigniew Bodek 19279b8d05b8SZbigniew Bodek static void 19289b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter) 19299b8d05b8SZbigniew Bodek { 19309b8d05b8SZbigniew Bodek 1931fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { 1932fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); 19339b8d05b8SZbigniew Bodek pci_release_msi(adapter->pdev); 1934fd43fd2aSMarcin Wojtas } 19359b8d05b8SZbigniew Bodek 19369b8d05b8SZbigniew Bodek adapter->msix_vecs = 0; 1937cd5d5804SMarcin Wojtas free(adapter->msix_entries, M_DEVBUF); 19389b8d05b8SZbigniew Bodek adapter->msix_entries = NULL; 19399b8d05b8SZbigniew Bodek } 19409b8d05b8SZbigniew Bodek 19419b8d05b8SZbigniew Bodek static void 19429b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter) 19439b8d05b8SZbigniew Bodek { 19449b8d05b8SZbigniew Bodek struct ena_com_io_cq* io_cq; 19459b8d05b8SZbigniew Bodek struct ena_eth_io_intr_reg intr_reg; 1946223c8cb1SArtur Rojek struct ena_ring *tx_ring; 19479b8d05b8SZbigniew Bodek uint16_t ena_qid; 19489b8d05b8SZbigniew Bodek int i; 19499b8d05b8SZbigniew Bodek 19509b8d05b8SZbigniew Bodek /* Unmask interrupts for all queues */ 19517d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 19529b8d05b8SZbigniew Bodek ena_qid = ENA_IO_TXQ_IDX(i); 19539b8d05b8SZbigniew Bodek io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; 19549b8d05b8SZbigniew Bodek ena_com_update_intr_reg(&intr_reg, 0, 0, true); 1955223c8cb1SArtur Rojek tx_ring = &adapter->tx_ring[i]; 1956223c8cb1SArtur Rojek counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1); 19579b8d05b8SZbigniew Bodek ena_com_unmask_intr(io_cq, &intr_reg); 19589b8d05b8SZbigniew Bodek } 19599b8d05b8SZbigniew Bodek } 19609b8d05b8SZbigniew Bodek 19619b8d05b8SZbigniew Bodek static int 19629b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter) 19639b8d05b8SZbigniew Bodek { 19649b8d05b8SZbigniew Bodek int rc; 19659b8d05b8SZbigniew Bodek 1966fd43fd2aSMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { 19679b8d05b8SZbigniew Bodek rc = ena_rss_configure(adapter); 196856d41ad5SMarcin Wojtas if (rc != 0) { 19693fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 197056d41ad5SMarcin Wojtas "Failed to configure RSS\n"); 19719b8d05b8SZbigniew Bodek return (rc); 19729b8d05b8SZbigniew Bodek } 197356d41ad5SMarcin Wojtas } 19749b8d05b8SZbigniew Bodek 19757d2544e6SMarcin Wojtas rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); 19763f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 19777d2544e6SMarcin Wojtas return (rc); 19787d2544e6SMarcin Wojtas 19799b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(adapter); 198030217e2dSMarcin Wojtas ena_reset_counters((counter_u64_t *)&adapter->hw_stats, 198130217e2dSMarcin Wojtas sizeof(adapter->hw_stats)); 19829b8d05b8SZbigniew Bodek 19839b8d05b8SZbigniew Bodek return (0); 19849b8d05b8SZbigniew Bodek } 19859b8d05b8SZbigniew Bodek 19869762a033SMarcin Wojtas static void 19879762a033SMarcin Wojtas set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, 19889762a033SMarcin Wojtas int new_rx_size) 19899762a033SMarcin Wojtas { 19909762a033SMarcin Wojtas int i; 19919762a033SMarcin Wojtas 19929762a033SMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 19939762a033SMarcin Wojtas adapter->tx_ring[i].ring_size = new_tx_size; 19949762a033SMarcin Wojtas adapter->rx_ring[i].ring_size = new_rx_size; 19959762a033SMarcin Wojtas } 19969762a033SMarcin Wojtas } 19979762a033SMarcin Wojtas 19989762a033SMarcin Wojtas static int 19999762a033SMarcin Wojtas create_queues_with_size_backoff(struct ena_adapter *adapter) 20009762a033SMarcin Wojtas { 20013fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 20029762a033SMarcin Wojtas int rc; 20039762a033SMarcin Wojtas uint32_t cur_rx_ring_size, cur_tx_ring_size; 20049762a033SMarcin Wojtas uint32_t new_rx_ring_size, new_tx_ring_size; 20059762a033SMarcin Wojtas 20069762a033SMarcin Wojtas /* 20079762a033SMarcin Wojtas * Current queue sizes might be set to smaller than the requested 20089762a033SMarcin Wojtas * ones due to past queue allocation failures. 20099762a033SMarcin Wojtas */ 20109762a033SMarcin Wojtas set_io_rings_size(adapter, adapter->requested_tx_ring_size, 20119762a033SMarcin Wojtas adapter->requested_rx_ring_size); 20129762a033SMarcin Wojtas 20139762a033SMarcin Wojtas while (1) { 20149762a033SMarcin Wojtas /* Allocate transmit descriptors */ 20159762a033SMarcin Wojtas rc = ena_setup_all_tx_resources(adapter); 20169762a033SMarcin Wojtas if (unlikely(rc != 0)) { 20173fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_tx\n"); 20189762a033SMarcin Wojtas goto err_setup_tx; 20199762a033SMarcin Wojtas } 20209762a033SMarcin Wojtas 20219762a033SMarcin Wojtas /* Allocate receive descriptors */ 20229762a033SMarcin Wojtas rc = ena_setup_all_rx_resources(adapter); 20239762a033SMarcin Wojtas if (unlikely(rc != 0)) { 20243fc5d816SMarcin Wojtas ena_log(pdev, ERR, "err_setup_rx\n"); 20259762a033SMarcin Wojtas goto err_setup_rx; 20269762a033SMarcin Wojtas } 20279762a033SMarcin Wojtas 20289762a033SMarcin Wojtas /* Create IO queues for Rx & Tx */ 20299762a033SMarcin Wojtas rc = ena_create_io_queues(adapter); 20309762a033SMarcin Wojtas if (unlikely(rc != 0)) { 20313fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20329762a033SMarcin Wojtas "create IO queues failed\n"); 20339762a033SMarcin Wojtas goto err_io_que; 20349762a033SMarcin Wojtas } 20359762a033SMarcin Wojtas 20369762a033SMarcin Wojtas return (0); 20379762a033SMarcin Wojtas 20389762a033SMarcin Wojtas err_io_que: 20399762a033SMarcin Wojtas ena_free_all_rx_resources(adapter); 20409762a033SMarcin Wojtas err_setup_rx: 20419762a033SMarcin Wojtas ena_free_all_tx_resources(adapter); 20429762a033SMarcin Wojtas err_setup_tx: 20439762a033SMarcin Wojtas /* 20449762a033SMarcin Wojtas * Lower the ring size if ENOMEM. Otherwise, return the 20459762a033SMarcin Wojtas * error straightaway. 20469762a033SMarcin Wojtas */ 20479762a033SMarcin Wojtas if (unlikely(rc != ENOMEM)) { 20483fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20499762a033SMarcin Wojtas "Queue creation failed with error code: %d\n", rc); 20509762a033SMarcin Wojtas return (rc); 20519762a033SMarcin Wojtas } 20529762a033SMarcin Wojtas 20539762a033SMarcin Wojtas cur_tx_ring_size = adapter->tx_ring[0].ring_size; 20549762a033SMarcin Wojtas cur_rx_ring_size = adapter->rx_ring[0].ring_size; 20559762a033SMarcin Wojtas 20563fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20579762a033SMarcin Wojtas "Not enough memory to create queues with sizes TX=%d, RX=%d\n", 20589762a033SMarcin Wojtas cur_tx_ring_size, cur_rx_ring_size); 20599762a033SMarcin Wojtas 20609762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size; 20619762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size; 20629762a033SMarcin Wojtas 20639762a033SMarcin Wojtas /* 20649762a033SMarcin Wojtas * Decrease the size of a larger queue, or decrease both if they are 20659762a033SMarcin Wojtas * the same size. 20669762a033SMarcin Wojtas */ 20679762a033SMarcin Wojtas if (cur_rx_ring_size <= cur_tx_ring_size) 20689762a033SMarcin Wojtas new_tx_ring_size = cur_tx_ring_size / 2; 20699762a033SMarcin Wojtas if (cur_rx_ring_size >= cur_tx_ring_size) 20709762a033SMarcin Wojtas new_rx_ring_size = cur_rx_ring_size / 2; 20719762a033SMarcin Wojtas 20729762a033SMarcin Wojtas if (new_tx_ring_size < ENA_MIN_RING_SIZE || 20739762a033SMarcin Wojtas new_rx_ring_size < ENA_MIN_RING_SIZE) { 20743fc5d816SMarcin Wojtas ena_log(pdev, ERR, 20759762a033SMarcin Wojtas "Queue creation failed with the smallest possible queue size" 20769762a033SMarcin Wojtas "of %d for both queues. Not retrying with smaller queues\n", 20779762a033SMarcin Wojtas ENA_MIN_RING_SIZE); 20789762a033SMarcin Wojtas return (rc); 20799762a033SMarcin Wojtas } 20809762a033SMarcin Wojtas 208177160654SArtur Rojek ena_log(pdev, INFO, 208277160654SArtur Rojek "Retrying queue creation with sizes TX=%d, RX=%d\n", 208377160654SArtur Rojek new_tx_ring_size, new_rx_ring_size); 208477160654SArtur Rojek 20859762a033SMarcin Wojtas set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); 20869762a033SMarcin Wojtas } 20879762a033SMarcin Wojtas } 20889762a033SMarcin Wojtas 208938c7b965SMarcin Wojtas int 20909b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter) 20919b8d05b8SZbigniew Bodek { 20929b8d05b8SZbigniew Bodek int rc = 0; 20939b8d05b8SZbigniew Bodek 209407aff471SArtur Rojek ENA_LOCK_ASSERT(); 2095cb98c439SArtur Rojek 20963f9ed7abSMarcin Wojtas if (unlikely(device_is_attached(adapter->pdev) == 0)) { 20973fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "device is not attached!\n"); 20989b8d05b8SZbigniew Bodek return (ENXIO); 20999b8d05b8SZbigniew Bodek } 21009b8d05b8SZbigniew Bodek 2101579d23aaSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2102579d23aaSMarcin Wojtas return (0); 2103579d23aaSMarcin Wojtas 21043fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "device is going UP\n"); 21059b8d05b8SZbigniew Bodek 210678554d0cSDawid Gorecki /* 210778554d0cSDawid Gorecki * ena_timer_service can use functions, which write to the admin queue. 210878554d0cSDawid Gorecki * Those calls are not protected by ENA_LOCK, and because of that, the 210978554d0cSDawid Gorecki * timer should be stopped when bringing the device up or down. 211078554d0cSDawid Gorecki */ 211178554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 211278554d0cSDawid Gorecki 21139b8d05b8SZbigniew Bodek /* setup interrupts for IO queues */ 211477958fcdSMarcin Wojtas rc = ena_setup_io_intr(adapter); 211577958fcdSMarcin Wojtas if (unlikely(rc != 0)) { 21163fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n"); 211777958fcdSMarcin Wojtas goto error; 211877958fcdSMarcin Wojtas } 21199b8d05b8SZbigniew Bodek rc = ena_request_io_irq(adapter); 21203f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21213fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "err_req_irq\n"); 212277958fcdSMarcin Wojtas goto error; 21239b8d05b8SZbigniew Bodek } 21249b8d05b8SZbigniew Bodek 21253fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 21269762a033SMarcin Wojtas "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, " 21279762a033SMarcin Wojtas "LLQ is %s\n", 21287d8c4feeSMarcin Wojtas adapter->num_io_queues, 21299762a033SMarcin Wojtas adapter->requested_rx_ring_size, 21309762a033SMarcin Wojtas adapter->requested_tx_ring_size, 21319762a033SMarcin Wojtas (adapter->ena_dev->tx_mem_queue_type == 21329762a033SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED"); 21337d8c4feeSMarcin Wojtas 21349762a033SMarcin Wojtas rc = create_queues_with_size_backoff(adapter); 21353f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 21363fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 21379762a033SMarcin Wojtas "error creating queues with size backoff\n"); 21389762a033SMarcin Wojtas goto err_create_queues_with_backoff; 21399b8d05b8SZbigniew Bodek } 21409b8d05b8SZbigniew Bodek 2141fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 21429b8d05b8SZbigniew Bodek if_link_state_change(adapter->ifp, LINK_STATE_UP); 21439b8d05b8SZbigniew Bodek 21449b8d05b8SZbigniew Bodek rc = ena_up_complete(adapter); 21453f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 21469b8d05b8SZbigniew Bodek goto err_up_complete; 21479b8d05b8SZbigniew Bodek 21489b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_up, 1); 21499b8d05b8SZbigniew Bodek 21509b8d05b8SZbigniew Bodek ena_update_hwassist(adapter); 21519b8d05b8SZbigniew Bodek 21529b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, 21539b8d05b8SZbigniew Bodek IFF_DRV_OACTIVE); 21549b8d05b8SZbigniew Bodek 2155fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); 215693471047SZbigniew Bodek 215793471047SZbigniew Bodek ena_unmask_all_io_irqs(adapter); 21589b8d05b8SZbigniew Bodek 215978554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 216078554d0cSDawid Gorecki 21619b8d05b8SZbigniew Bodek return (0); 21629b8d05b8SZbigniew Bodek 21639b8d05b8SZbigniew Bodek err_up_complete: 21649b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 21659b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 21669b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 21679762a033SMarcin Wojtas err_create_queues_with_backoff: 21689b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 216977958fcdSMarcin Wojtas error: 217078554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 217178554d0cSDawid Gorecki 21729b8d05b8SZbigniew Bodek return (rc); 21739b8d05b8SZbigniew Bodek } 21749b8d05b8SZbigniew Bodek 21759b8d05b8SZbigniew Bodek static uint64_t 21769b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt) 21779b8d05b8SZbigniew Bodek { 21789b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 21799b8d05b8SZbigniew Bodek struct ena_hw_stats *stats; 21809b8d05b8SZbigniew Bodek 21819b8d05b8SZbigniew Bodek adapter = if_getsoftc(ifp); 21829b8d05b8SZbigniew Bodek stats = &adapter->hw_stats; 21839b8d05b8SZbigniew Bodek 21849b8d05b8SZbigniew Bodek switch (cnt) { 21859b8d05b8SZbigniew Bodek case IFCOUNTER_IPACKETS: 218630217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_packets)); 21879b8d05b8SZbigniew Bodek case IFCOUNTER_OPACKETS: 218830217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_packets)); 21899b8d05b8SZbigniew Bodek case IFCOUNTER_IBYTES: 219030217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_bytes)); 21919b8d05b8SZbigniew Bodek case IFCOUNTER_OBYTES: 219230217e2dSMarcin Wojtas return (counter_u64_fetch(stats->tx_bytes)); 21939b8d05b8SZbigniew Bodek case IFCOUNTER_IQDROPS: 219430217e2dSMarcin Wojtas return (counter_u64_fetch(stats->rx_drops)); 21956c84cec3SMarcin Wojtas case IFCOUNTER_OQDROPS: 21966c84cec3SMarcin Wojtas return (counter_u64_fetch(stats->tx_drops)); 21979b8d05b8SZbigniew Bodek default: 21989b8d05b8SZbigniew Bodek return (if_get_counter_default(ifp, cnt)); 21999b8d05b8SZbigniew Bodek } 22009b8d05b8SZbigniew Bodek } 22019b8d05b8SZbigniew Bodek 22029b8d05b8SZbigniew Bodek static int 22039b8d05b8SZbigniew Bodek ena_media_change(if_t ifp) 22049b8d05b8SZbigniew Bodek { 22059b8d05b8SZbigniew Bodek /* Media Change is not supported by firmware */ 22069b8d05b8SZbigniew Bodek return (0); 22079b8d05b8SZbigniew Bodek } 22089b8d05b8SZbigniew Bodek 22099b8d05b8SZbigniew Bodek static void 22109b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr) 22119b8d05b8SZbigniew Bodek { 22129b8d05b8SZbigniew Bodek struct ena_adapter *adapter = if_getsoftc(ifp); 22133fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, "Media status update\n"); 22149b8d05b8SZbigniew Bodek 221507aff471SArtur Rojek ENA_LOCK_LOCK(); 22169b8d05b8SZbigniew Bodek 22179b8d05b8SZbigniew Bodek ifmr->ifm_status = IFM_AVALID; 22189b8d05b8SZbigniew Bodek ifmr->ifm_active = IFM_ETHER; 22199b8d05b8SZbigniew Bodek 2220fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { 222107aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22223fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "Link is down\n"); 22239b8d05b8SZbigniew Bodek return; 22249b8d05b8SZbigniew Bodek } 22259b8d05b8SZbigniew Bodek 22269b8d05b8SZbigniew Bodek ifmr->ifm_status |= IFM_ACTIVE; 2227b8ca5dbeSMarcin Wojtas ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; 22289b8d05b8SZbigniew Bodek 222907aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22309b8d05b8SZbigniew Bodek } 22319b8d05b8SZbigniew Bodek 22329b8d05b8SZbigniew Bodek static void 22339b8d05b8SZbigniew Bodek ena_init(void *arg) 22349b8d05b8SZbigniew Bodek { 22359b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)arg; 22369b8d05b8SZbigniew Bodek 2237fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { 223807aff471SArtur Rojek ENA_LOCK_LOCK(); 22399b8d05b8SZbigniew Bodek ena_up(adapter); 224007aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22413d3a90f9SZbigniew Bodek } 22429b8d05b8SZbigniew Bodek } 22439b8d05b8SZbigniew Bodek 22449b8d05b8SZbigniew Bodek static int 22459b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data) 22469b8d05b8SZbigniew Bodek { 22479b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 22489b8d05b8SZbigniew Bodek struct ifreq *ifr; 22499b8d05b8SZbigniew Bodek int rc; 22509b8d05b8SZbigniew Bodek 22519b8d05b8SZbigniew Bodek adapter = ifp->if_softc; 22529b8d05b8SZbigniew Bodek ifr = (struct ifreq *)data; 22539b8d05b8SZbigniew Bodek 22549b8d05b8SZbigniew Bodek /* 22559b8d05b8SZbigniew Bodek * Acquiring lock to prevent from running up and down routines parallel. 22569b8d05b8SZbigniew Bodek */ 22579b8d05b8SZbigniew Bodek rc = 0; 22589b8d05b8SZbigniew Bodek switch (command) { 22599b8d05b8SZbigniew Bodek case SIOCSIFMTU: 2260dbf2eb54SMarcin Wojtas if (ifp->if_mtu == ifr->ifr_mtu) 2261dbf2eb54SMarcin Wojtas break; 226207aff471SArtur Rojek ENA_LOCK_LOCK(); 22639b8d05b8SZbigniew Bodek ena_down(adapter); 22649b8d05b8SZbigniew Bodek 22659b8d05b8SZbigniew Bodek ena_change_mtu(ifp, ifr->ifr_mtu); 22669b8d05b8SZbigniew Bodek 22679b8d05b8SZbigniew Bodek rc = ena_up(adapter); 226807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22699b8d05b8SZbigniew Bodek break; 22709b8d05b8SZbigniew Bodek 22719b8d05b8SZbigniew Bodek case SIOCSIFFLAGS: 22720bdffe59SMarcin Wojtas if ((ifp->if_flags & IFF_UP) != 0) { 22730bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 22740bdffe59SMarcin Wojtas if ((ifp->if_flags & (IFF_PROMISC | 22750bdffe59SMarcin Wojtas IFF_ALLMULTI)) != 0) { 22763fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, 22779b8d05b8SZbigniew Bodek "ioctl promisc/allmulti\n"); 22789b8d05b8SZbigniew Bodek } 22799b8d05b8SZbigniew Bodek } else { 228007aff471SArtur Rojek ENA_LOCK_LOCK(); 22819b8d05b8SZbigniew Bodek rc = ena_up(adapter); 228207aff471SArtur Rojek ENA_LOCK_UNLOCK(); 22839b8d05b8SZbigniew Bodek } 22849b8d05b8SZbigniew Bodek } else { 22850bdffe59SMarcin Wojtas if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 228607aff471SArtur Rojek ENA_LOCK_LOCK(); 22879b8d05b8SZbigniew Bodek ena_down(adapter); 228807aff471SArtur Rojek ENA_LOCK_UNLOCK(); 2289e67c6554SZbigniew Bodek } 22909b8d05b8SZbigniew Bodek } 22919b8d05b8SZbigniew Bodek break; 22929b8d05b8SZbigniew Bodek 22939b8d05b8SZbigniew Bodek case SIOCADDMULTI: 22949b8d05b8SZbigniew Bodek case SIOCDELMULTI: 22959b8d05b8SZbigniew Bodek break; 22969b8d05b8SZbigniew Bodek 22979b8d05b8SZbigniew Bodek case SIOCSIFMEDIA: 22989b8d05b8SZbigniew Bodek case SIOCGIFMEDIA: 22999b8d05b8SZbigniew Bodek rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 23009b8d05b8SZbigniew Bodek break; 23019b8d05b8SZbigniew Bodek 23029b8d05b8SZbigniew Bodek case SIOCSIFCAP: 23039b8d05b8SZbigniew Bodek { 23049b8d05b8SZbigniew Bodek int reinit = 0; 23059b8d05b8SZbigniew Bodek 23069b8d05b8SZbigniew Bodek if (ifr->ifr_reqcap != ifp->if_capenable) { 23079b8d05b8SZbigniew Bodek ifp->if_capenable = ifr->ifr_reqcap; 23089b8d05b8SZbigniew Bodek reinit = 1; 23099b8d05b8SZbigniew Bodek } 23109b8d05b8SZbigniew Bodek 23110bdffe59SMarcin Wojtas if ((reinit != 0) && 23120bdffe59SMarcin Wojtas ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { 231307aff471SArtur Rojek ENA_LOCK_LOCK(); 23149b8d05b8SZbigniew Bodek ena_down(adapter); 23159b8d05b8SZbigniew Bodek rc = ena_up(adapter); 231607aff471SArtur Rojek ENA_LOCK_UNLOCK(); 23179b8d05b8SZbigniew Bodek } 23189b8d05b8SZbigniew Bodek } 23199b8d05b8SZbigniew Bodek 23209b8d05b8SZbigniew Bodek break; 23219b8d05b8SZbigniew Bodek default: 23229b8d05b8SZbigniew Bodek rc = ether_ioctl(ifp, command, data); 23239b8d05b8SZbigniew Bodek break; 23249b8d05b8SZbigniew Bodek } 23259b8d05b8SZbigniew Bodek 23269b8d05b8SZbigniew Bodek return (rc); 23279b8d05b8SZbigniew Bodek } 23289b8d05b8SZbigniew Bodek 23299b8d05b8SZbigniew Bodek static int 23309b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) 23319b8d05b8SZbigniew Bodek { 23329b8d05b8SZbigniew Bodek int caps = 0; 23339b8d05b8SZbigniew Bodek 23340bdffe59SMarcin Wojtas if ((feat->offload.tx & 23359b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 23369b8d05b8SZbigniew Bodek ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | 23370bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) 23389b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM; 23399b8d05b8SZbigniew Bodek 23400bdffe59SMarcin Wojtas if ((feat->offload.tx & 23419b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | 23420bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) 23439b8d05b8SZbigniew Bodek caps |= IFCAP_TXCSUM_IPV6; 23449b8d05b8SZbigniew Bodek 23450bdffe59SMarcin Wojtas if ((feat->offload.tx & 23460bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) 23479b8d05b8SZbigniew Bodek caps |= IFCAP_TSO4; 23489b8d05b8SZbigniew Bodek 23490bdffe59SMarcin Wojtas if ((feat->offload.tx & 23500bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) 23519b8d05b8SZbigniew Bodek caps |= IFCAP_TSO6; 23529b8d05b8SZbigniew Bodek 23530bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 23549b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | 23550bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) 23569b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM; 23579b8d05b8SZbigniew Bodek 23580bdffe59SMarcin Wojtas if ((feat->offload.rx_supported & 23590bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) 23609b8d05b8SZbigniew Bodek caps |= IFCAP_RXCSUM_IPV6; 23619b8d05b8SZbigniew Bodek 23629b8d05b8SZbigniew Bodek caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; 23639b8d05b8SZbigniew Bodek 23649b8d05b8SZbigniew Bodek return (caps); 23659b8d05b8SZbigniew Bodek } 23669b8d05b8SZbigniew Bodek 23679b8d05b8SZbigniew Bodek static void 23689b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) 23699b8d05b8SZbigniew Bodek { 23709b8d05b8SZbigniew Bodek 23719b8d05b8SZbigniew Bodek host_info->supported_network_features[0] = 23729b8d05b8SZbigniew Bodek (uint32_t)if_getcapabilities(ifp); 23739b8d05b8SZbigniew Bodek } 23749b8d05b8SZbigniew Bodek 23759b8d05b8SZbigniew Bodek static void 23769b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter) 23779b8d05b8SZbigniew Bodek { 23789b8d05b8SZbigniew Bodek if_t ifp = adapter->ifp; 23799b8d05b8SZbigniew Bodek uint32_t feat = adapter->tx_offload_cap; 23809b8d05b8SZbigniew Bodek int cap = if_getcapenable(ifp); 23819b8d05b8SZbigniew Bodek int flags = 0; 23829b8d05b8SZbigniew Bodek 23839b8d05b8SZbigniew Bodek if_clearhwassist(ifp); 23849b8d05b8SZbigniew Bodek 23850bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM) != 0) { 23860bdffe59SMarcin Wojtas if ((feat & 23870bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) 23889b8d05b8SZbigniew Bodek flags |= CSUM_IP; 23890bdffe59SMarcin Wojtas if ((feat & 23909b8d05b8SZbigniew Bodek (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | 23910bdffe59SMarcin Wojtas ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) 23929b8d05b8SZbigniew Bodek flags |= CSUM_IP_UDP | CSUM_IP_TCP; 23939b8d05b8SZbigniew Bodek } 23949b8d05b8SZbigniew Bodek 23950bdffe59SMarcin Wojtas if ((cap & IFCAP_TXCSUM_IPV6) != 0) 23969b8d05b8SZbigniew Bodek flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; 23979b8d05b8SZbigniew Bodek 23980bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO4) != 0) 23999b8d05b8SZbigniew Bodek flags |= CSUM_IP_TSO; 24009b8d05b8SZbigniew Bodek 24010bdffe59SMarcin Wojtas if ((cap & IFCAP_TSO6) != 0) 24029b8d05b8SZbigniew Bodek flags |= CSUM_IP6_TSO; 24039b8d05b8SZbigniew Bodek 24049b8d05b8SZbigniew Bodek if_sethwassistbits(ifp, flags, 0); 24059b8d05b8SZbigniew Bodek } 24069b8d05b8SZbigniew Bodek 24079b8d05b8SZbigniew Bodek static int 24089b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, 24099b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *feat) 24109b8d05b8SZbigniew Bodek { 24119b8d05b8SZbigniew Bodek if_t ifp; 24129b8d05b8SZbigniew Bodek int caps = 0; 24139b8d05b8SZbigniew Bodek 24149b8d05b8SZbigniew Bodek ifp = adapter->ifp = if_gethandle(IFT_ETHER); 24153f9ed7abSMarcin Wojtas if (unlikely(ifp == NULL)) { 24163fc5d816SMarcin Wojtas ena_log(pdev, ERR, "can not allocate ifnet structure\n"); 24179b8d05b8SZbigniew Bodek return (ENXIO); 24189b8d05b8SZbigniew Bodek } 24199b8d05b8SZbigniew Bodek if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); 24209b8d05b8SZbigniew Bodek if_setdev(ifp, pdev); 24219b8d05b8SZbigniew Bodek if_setsoftc(ifp, adapter); 24229b8d05b8SZbigniew Bodek 242392dc69a7SMarcin Wojtas if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 242492dc69a7SMarcin Wojtas IFF_KNOWSEPOCH); 24259b8d05b8SZbigniew Bodek if_setinitfn(ifp, ena_init); 24269b8d05b8SZbigniew Bodek if_settransmitfn(ifp, ena_mq_start); 24279b8d05b8SZbigniew Bodek if_setqflushfn(ifp, ena_qflush); 24289b8d05b8SZbigniew Bodek if_setioctlfn(ifp, ena_ioctl); 24299b8d05b8SZbigniew Bodek if_setgetcounterfn(ifp, ena_get_counter); 24309b8d05b8SZbigniew Bodek 24319762a033SMarcin Wojtas if_setsendqlen(ifp, adapter->requested_tx_ring_size); 24329b8d05b8SZbigniew Bodek if_setsendqready(ifp); 24339b8d05b8SZbigniew Bodek if_setmtu(ifp, ETHERMTU); 24349b8d05b8SZbigniew Bodek if_setbaudrate(ifp, 0); 24359b8d05b8SZbigniew Bodek /* Zeroize capabilities... */ 24369b8d05b8SZbigniew Bodek if_setcapabilities(ifp, 0); 24379b8d05b8SZbigniew Bodek if_setcapenable(ifp, 0); 24389b8d05b8SZbigniew Bodek /* check hardware support */ 24399b8d05b8SZbigniew Bodek caps = ena_get_dev_offloads(feat); 24409b8d05b8SZbigniew Bodek /* ... and set them */ 24419b8d05b8SZbigniew Bodek if_setcapabilitiesbit(ifp, caps, 0); 24429b8d05b8SZbigniew Bodek 24439b8d05b8SZbigniew Bodek /* TSO parameters */ 24448a573700SZbigniew Bodek ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - 24458a573700SZbigniew Bodek (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 24468a573700SZbigniew Bodek ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; 24478a573700SZbigniew Bodek ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; 24489b8d05b8SZbigniew Bodek 24499b8d05b8SZbigniew Bodek if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 24509b8d05b8SZbigniew Bodek if_setcapenable(ifp, if_getcapabilities(ifp)); 24519b8d05b8SZbigniew Bodek 24529b8d05b8SZbigniew Bodek /* 24539b8d05b8SZbigniew Bodek * Specify the media types supported by this adapter and register 24549b8d05b8SZbigniew Bodek * callbacks to update media and link information 24559b8d05b8SZbigniew Bodek */ 24569b8d05b8SZbigniew Bodek ifmedia_init(&adapter->media, IFM_IMASK, 24579b8d05b8SZbigniew Bodek ena_media_change, ena_media_status); 24589b8d05b8SZbigniew Bodek ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 24599b8d05b8SZbigniew Bodek ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 24609b8d05b8SZbigniew Bodek 24619b8d05b8SZbigniew Bodek ether_ifattach(ifp, adapter->mac_addr); 24629b8d05b8SZbigniew Bodek 24639b8d05b8SZbigniew Bodek return (0); 24649b8d05b8SZbigniew Bodek } 24659b8d05b8SZbigniew Bodek 246638c7b965SMarcin Wojtas void 24679b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter) 24689b8d05b8SZbigniew Bodek { 2469a195fab0SMarcin Wojtas int rc; 24709b8d05b8SZbigniew Bodek 247107aff471SArtur Rojek ENA_LOCK_ASSERT(); 2472cb98c439SArtur Rojek 2473579d23aaSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 2474579d23aaSMarcin Wojtas return; 2475579d23aaSMarcin Wojtas 247678554d0cSDawid Gorecki /* Drain timer service to avoid admin queue race condition. */ 247778554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 24789b8d05b8SZbigniew Bodek 247978554d0cSDawid Gorecki ena_log(adapter->pdev, INFO, "device is going DOWN\n"); 24809b8d05b8SZbigniew Bodek 2481fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); 24829b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, 24839b8d05b8SZbigniew Bodek IFF_DRV_RUNNING); 24849b8d05b8SZbigniew Bodek 24859b8d05b8SZbigniew Bodek ena_free_io_irq(adapter); 24869b8d05b8SZbigniew Bodek 2487fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { 2488a195fab0SMarcin Wojtas rc = ena_com_dev_reset(adapter->ena_dev, 2489a195fab0SMarcin Wojtas adapter->reset_reason); 24903f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 24913fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 2492a195fab0SMarcin Wojtas "Device reset failed\n"); 2493a195fab0SMarcin Wojtas } 2494a195fab0SMarcin Wojtas 24959b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(adapter); 24969b8d05b8SZbigniew Bodek 24979b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(adapter); 24989b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(adapter); 24999b8d05b8SZbigniew Bodek ena_free_all_tx_resources(adapter); 25009b8d05b8SZbigniew Bodek ena_free_all_rx_resources(adapter); 25019b8d05b8SZbigniew Bodek 25029b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.interface_down, 1); 250378554d0cSDawid Gorecki 250478554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 25059b8d05b8SZbigniew Bodek } 25069b8d05b8SZbigniew Bodek 25077d8c4feeSMarcin Wojtas static uint32_t 25087d8c4feeSMarcin Wojtas ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev, 25099b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx) 25109b8d05b8SZbigniew Bodek { 25117d8c4feeSMarcin Wojtas uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 25129b8d05b8SZbigniew Bodek 25136064f289SMarcin Wojtas /* Regular queues capabilities */ 25146064f289SMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 25156064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 25166064f289SMarcin Wojtas &get_feat_ctx->max_queue_ext.max_queue_ext; 25174fa9e02dSMarcin Wojtas io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, 25184fa9e02dSMarcin Wojtas max_queue_ext->max_rx_cq_num); 25196064f289SMarcin Wojtas 25204fa9e02dSMarcin Wojtas io_tx_sq_num = max_queue_ext->max_tx_sq_num; 25214fa9e02dSMarcin Wojtas io_tx_cq_num = max_queue_ext->max_tx_cq_num; 25226064f289SMarcin Wojtas } else { 25236064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 25246064f289SMarcin Wojtas &get_feat_ctx->max_queues; 25254fa9e02dSMarcin Wojtas io_tx_sq_num = max_queues->max_sq_num; 25264fa9e02dSMarcin Wojtas io_tx_cq_num = max_queues->max_cq_num; 25274fa9e02dSMarcin Wojtas io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); 25286064f289SMarcin Wojtas } 25299b8d05b8SZbigniew Bodek 25304fa9e02dSMarcin Wojtas /* In case of LLQ use the llq fields for the tx SQ/CQ */ 25314fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 25324fa9e02dSMarcin Wojtas io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 25334fa9e02dSMarcin Wojtas 25347d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); 25357d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num); 25367d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num); 25377d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num); 2538609e6f6dSGordon Bergling /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */ 25397d8c4feeSMarcin Wojtas max_num_io_queues = min_t(uint32_t, max_num_io_queues, 25407d8c4feeSMarcin Wojtas pci_msix_count(pdev) - 1); 25416d1ef2abSArtur Rojek #ifdef RSS 25426d1ef2abSArtur Rojek max_num_io_queues = min_t(uint32_t, max_num_io_queues, 25436d1ef2abSArtur Rojek rss_getnumbuckets()); 25446d1ef2abSArtur Rojek #endif 25459b8d05b8SZbigniew Bodek 25467d8c4feeSMarcin Wojtas return (max_num_io_queues); 25479b8d05b8SZbigniew Bodek } 25489b8d05b8SZbigniew Bodek 25490bdffe59SMarcin Wojtas static int 25503fc5d816SMarcin Wojtas ena_enable_wc(device_t pdev, struct resource *res) 25514fa9e02dSMarcin Wojtas { 2552472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__) 25534fa9e02dSMarcin Wojtas vm_offset_t va; 25544fa9e02dSMarcin Wojtas vm_size_t len; 25554fa9e02dSMarcin Wojtas int rc; 25564fa9e02dSMarcin Wojtas 25574fa9e02dSMarcin Wojtas va = (vm_offset_t)rman_get_virtual(res); 25584fa9e02dSMarcin Wojtas len = rman_get_size(res); 25594fa9e02dSMarcin Wojtas /* Enable write combining */ 2560472d4784SMarcin Wojtas rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); 25614fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 25623fc5d816SMarcin Wojtas ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc); 25634fa9e02dSMarcin Wojtas return (rc); 25644fa9e02dSMarcin Wojtas } 25654fa9e02dSMarcin Wojtas 25664fa9e02dSMarcin Wojtas return (0); 25674fa9e02dSMarcin Wojtas #endif 25684fa9e02dSMarcin Wojtas return (EOPNOTSUPP); 25694fa9e02dSMarcin Wojtas } 25704fa9e02dSMarcin Wojtas 25714fa9e02dSMarcin Wojtas static int 25724fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, 25734fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq, 25744fa9e02dSMarcin Wojtas struct ena_llq_configurations *llq_default_configurations) 25754fa9e02dSMarcin Wojtas { 257690232d18SDawid Gorecki int rc; 25774fa9e02dSMarcin Wojtas uint32_t llq_feature_mask; 25784fa9e02dSMarcin Wojtas 25794fa9e02dSMarcin Wojtas llq_feature_mask = 1 << ENA_ADMIN_LLQ; 25804fa9e02dSMarcin Wojtas if (!(ena_dev->supported_features & llq_feature_mask)) { 25813fc5d816SMarcin Wojtas ena_log(pdev, WARN, 25824fa9e02dSMarcin Wojtas "LLQ is not supported. Fallback to host mode policy.\n"); 25834fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 25844fa9e02dSMarcin Wojtas return (0); 25854fa9e02dSMarcin Wojtas } 25864fa9e02dSMarcin Wojtas 258790232d18SDawid Gorecki if (ena_dev->mem_bar == NULL) { 258890232d18SDawid Gorecki ena_log(pdev, WARN, 258990232d18SDawid Gorecki "LLQ is advertised as supported but device doesn't expose mem bar.\n"); 259090232d18SDawid Gorecki ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 259190232d18SDawid Gorecki return (0); 259290232d18SDawid Gorecki } 259390232d18SDawid Gorecki 25944fa9e02dSMarcin Wojtas rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 25954fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 25963fc5d816SMarcin Wojtas ena_log(pdev, WARN, "Failed to configure the device mode. " 25974fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 25984fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 259990232d18SDawid Gorecki } 260090232d18SDawid Gorecki 26014fa9e02dSMarcin Wojtas return (0); 26024fa9e02dSMarcin Wojtas } 26034fa9e02dSMarcin Wojtas 260490232d18SDawid Gorecki static int 260590232d18SDawid Gorecki ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev) 260690232d18SDawid Gorecki { 260790232d18SDawid Gorecki struct ena_adapter *adapter = device_get_softc(pdev); 260890232d18SDawid Gorecki int rc, rid; 26094fa9e02dSMarcin Wojtas 26104fa9e02dSMarcin Wojtas /* Try to allocate resources for LLQ bar */ 26114fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_MEM_BAR); 26124fa9e02dSMarcin Wojtas adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 26134fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 26144fa9e02dSMarcin Wojtas if (unlikely(adapter->memory == NULL)) { 26153fc5d816SMarcin Wojtas ena_log(pdev, WARN, "unable to allocate LLQ bar resource. " 26164fa9e02dSMarcin Wojtas "Fallback to host mode policy.\n"); 26174fa9e02dSMarcin Wojtas ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 26184fa9e02dSMarcin Wojtas return (0); 26194fa9e02dSMarcin Wojtas } 26204fa9e02dSMarcin Wojtas 26214fa9e02dSMarcin Wojtas /* Enable write combining for better LLQ performance */ 26223fc5d816SMarcin Wojtas rc = ena_enable_wc(adapter->pdev, adapter->memory); 26234fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 26243fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to enable write combining.\n"); 26254fa9e02dSMarcin Wojtas return (rc); 26264fa9e02dSMarcin Wojtas } 26274fa9e02dSMarcin Wojtas 26284fa9e02dSMarcin Wojtas /* 26294fa9e02dSMarcin Wojtas * Save virtual address of the device's memory region 26304fa9e02dSMarcin Wojtas * for the ena_com layer. 26314fa9e02dSMarcin Wojtas */ 26324fa9e02dSMarcin Wojtas ena_dev->mem_bar = rman_get_virtual(adapter->memory); 26334fa9e02dSMarcin Wojtas 26344fa9e02dSMarcin Wojtas return (0); 26354fa9e02dSMarcin Wojtas } 26364fa9e02dSMarcin Wojtas 26374fa9e02dSMarcin Wojtas static inline 2638beaadec9SMarcin Wojtas void set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2639beaadec9SMarcin Wojtas struct ena_admin_feature_llq_desc *llq) 26404fa9e02dSMarcin Wojtas { 2641beaadec9SMarcin Wojtas 26424fa9e02dSMarcin Wojtas llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 26434fa9e02dSMarcin Wojtas llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 26444fa9e02dSMarcin Wojtas llq_config->llq_num_decs_before_header = 26454fa9e02dSMarcin Wojtas ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 2646beaadec9SMarcin Wojtas if ((llq->entry_size_ctrl_supported & 2647beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && 2648beaadec9SMarcin Wojtas ena_force_large_llq_header) { 2649beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2650beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2651beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size_value = 256; 2652beaadec9SMarcin Wojtas } else { 2653beaadec9SMarcin Wojtas llq_config->llq_ring_entry_size = 2654beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_128B; 26554fa9e02dSMarcin Wojtas llq_config->llq_ring_entry_size_value = 128; 26564fa9e02dSMarcin Wojtas } 2657beaadec9SMarcin Wojtas } 26584fa9e02dSMarcin Wojtas 26594fa9e02dSMarcin Wojtas static int 26607d8c4feeSMarcin Wojtas ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) 26619b8d05b8SZbigniew Bodek { 26624fa9e02dSMarcin Wojtas struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 26634fa9e02dSMarcin Wojtas struct ena_com_dev *ena_dev = ctx->ena_dev; 26646064f289SMarcin Wojtas uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; 26657d8c4feeSMarcin Wojtas uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE; 26667d8c4feeSMarcin Wojtas uint32_t max_tx_queue_size; 26677d8c4feeSMarcin Wojtas uint32_t max_rx_queue_size; 26689b8d05b8SZbigniew Bodek 26694fa9e02dSMarcin Wojtas if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 26706064f289SMarcin Wojtas struct ena_admin_queue_ext_feature_fields *max_queue_ext = 26716064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 26727d8c4feeSMarcin Wojtas max_rx_queue_size = min_t(uint32_t, 26737d8c4feeSMarcin Wojtas max_queue_ext->max_rx_cq_depth, 26746064f289SMarcin Wojtas max_queue_ext->max_rx_sq_depth); 26757d8c4feeSMarcin Wojtas max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 26764fa9e02dSMarcin Wojtas 26774fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 26784fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 26797d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26804fa9e02dSMarcin Wojtas llq->max_llq_depth); 26814fa9e02dSMarcin Wojtas else 26827d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 26836064f289SMarcin Wojtas max_queue_ext->max_tx_sq_depth); 26844fa9e02dSMarcin Wojtas 26856064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26866064f289SMarcin Wojtas max_queue_ext->max_per_packet_tx_descs); 26877d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 26887d8c4feeSMarcin Wojtas max_queue_ext->max_per_packet_rx_descs); 26896064f289SMarcin Wojtas } else { 26906064f289SMarcin Wojtas struct ena_admin_queue_feature_desc *max_queues = 26916064f289SMarcin Wojtas &ctx->get_feat_ctx->max_queues; 26927d8c4feeSMarcin Wojtas max_rx_queue_size = min_t(uint32_t, 26937d8c4feeSMarcin Wojtas max_queues->max_cq_depth, 26946064f289SMarcin Wojtas max_queues->max_sq_depth); 26957d8c4feeSMarcin Wojtas max_tx_queue_size = max_queues->max_cq_depth; 26964fa9e02dSMarcin Wojtas 26974fa9e02dSMarcin Wojtas if (ena_dev->tx_mem_queue_type == 26984fa9e02dSMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) 26997d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27004fa9e02dSMarcin Wojtas llq->max_llq_depth); 27014fa9e02dSMarcin Wojtas else 27027d8c4feeSMarcin Wojtas max_tx_queue_size = min_t(uint32_t, max_tx_queue_size, 27034fa9e02dSMarcin Wojtas max_queues->max_sq_depth); 27044fa9e02dSMarcin Wojtas 27056064f289SMarcin Wojtas ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27067d8c4feeSMarcin Wojtas max_queues->max_packet_tx_descs); 27077d8c4feeSMarcin Wojtas ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, 27086064f289SMarcin Wojtas max_queues->max_packet_rx_descs); 27096064f289SMarcin Wojtas } 27109b8d05b8SZbigniew Bodek 27119b8d05b8SZbigniew Bodek /* round down to the nearest power of 2 */ 27127d8c4feeSMarcin Wojtas max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1); 27137d8c4feeSMarcin Wojtas max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1); 27146064f289SMarcin Wojtas 2715beaadec9SMarcin Wojtas /* 2716beaadec9SMarcin Wojtas * When forcing large headers, we multiply the entry size by 2, 2717beaadec9SMarcin Wojtas * and therefore divide the queue size by 2, leaving the amount 2718beaadec9SMarcin Wojtas * of memory used by the queues unchanged. 2719beaadec9SMarcin Wojtas */ 2720beaadec9SMarcin Wojtas if (ena_force_large_llq_header) { 2721beaadec9SMarcin Wojtas if ((llq->entry_size_ctrl_supported & 2722beaadec9SMarcin Wojtas ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 && 2723beaadec9SMarcin Wojtas ena_dev->tx_mem_queue_type == 2724beaadec9SMarcin Wojtas ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2725beaadec9SMarcin Wojtas max_tx_queue_size /= 2; 27263fc5d816SMarcin Wojtas ena_log(ctx->pdev, INFO, 2727beaadec9SMarcin Wojtas "Forcing large headers and decreasing maximum Tx queue size to %d\n", 2728beaadec9SMarcin Wojtas max_tx_queue_size); 2729beaadec9SMarcin Wojtas } else { 27303fc5d816SMarcin Wojtas ena_log(ctx->pdev, WARN, 2731beaadec9SMarcin Wojtas "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 2732beaadec9SMarcin Wojtas } 2733beaadec9SMarcin Wojtas } 2734beaadec9SMarcin Wojtas 27357d8c4feeSMarcin Wojtas tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, 27367d8c4feeSMarcin Wojtas max_tx_queue_size); 27377d8c4feeSMarcin Wojtas rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, 27387d8c4feeSMarcin Wojtas max_rx_queue_size); 27399b8d05b8SZbigniew Bodek 27407d8c4feeSMarcin Wojtas tx_queue_size = 1 << (flsl(tx_queue_size) - 1); 27417d8c4feeSMarcin Wojtas rx_queue_size = 1 << (flsl(rx_queue_size) - 1); 27427d8c4feeSMarcin Wojtas 27437d8c4feeSMarcin Wojtas ctx->max_tx_queue_size = max_tx_queue_size; 27447d8c4feeSMarcin Wojtas ctx->max_rx_queue_size = max_rx_queue_size; 27456064f289SMarcin Wojtas ctx->tx_queue_size = tx_queue_size; 27467d8c4feeSMarcin Wojtas ctx->rx_queue_size = rx_queue_size; 27476064f289SMarcin Wojtas 27486064f289SMarcin Wojtas return (0); 27499b8d05b8SZbigniew Bodek } 27509b8d05b8SZbigniew Bodek 27510bdffe59SMarcin Wojtas static void 275246021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) 27539b8d05b8SZbigniew Bodek { 27549b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info; 275546021271SMarcin Wojtas uintptr_t rid; 27569b8d05b8SZbigniew Bodek int rc; 27579b8d05b8SZbigniew Bodek 27589b8d05b8SZbigniew Bodek /* Allocate only the host info */ 27599b8d05b8SZbigniew Bodek rc = ena_com_allocate_host_info(ena_dev); 27603f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 27613fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot allocate host info\n"); 27629b8d05b8SZbigniew Bodek return; 27639b8d05b8SZbigniew Bodek } 27649b8d05b8SZbigniew Bodek 27659b8d05b8SZbigniew Bodek host_info = ena_dev->host_attr.host_info; 27669b8d05b8SZbigniew Bodek 276746021271SMarcin Wojtas if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) 276846021271SMarcin Wojtas host_info->bdf = rid; 27699b8d05b8SZbigniew Bodek host_info->os_type = ENA_ADMIN_OS_FREEBSD; 27709b8d05b8SZbigniew Bodek host_info->kernel_ver = osreldate; 27719b8d05b8SZbigniew Bodek 27729b8d05b8SZbigniew Bodek sprintf(host_info->kernel_ver_str, "%d", osreldate); 27739b8d05b8SZbigniew Bodek host_info->os_dist = 0; 27749b8d05b8SZbigniew Bodek strncpy(host_info->os_dist_str, osrelease, 27759b8d05b8SZbigniew Bodek sizeof(host_info->os_dist_str) - 1); 27769b8d05b8SZbigniew Bodek 27779b8d05b8SZbigniew Bodek host_info->driver_version = 27789b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MAJOR) | 27799b8d05b8SZbigniew Bodek (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 27809b8d05b8SZbigniew Bodek (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 27818ece6b25SMarcin Wojtas host_info->num_cpus = mp_ncpus; 2782c7444389SMarcin Wojtas host_info->driver_supported_features = 27836d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 27846d1ef2abSArtur Rojek ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 27859b8d05b8SZbigniew Bodek 27869b8d05b8SZbigniew Bodek rc = ena_com_set_host_attributes(ena_dev); 27873f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 2788a195fab0SMarcin Wojtas if (rc == EOPNOTSUPP) 27893fc5d816SMarcin Wojtas ena_log(dev, WARN, "Cannot set host attributes\n"); 27909b8d05b8SZbigniew Bodek else 27913fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot set host attributes\n"); 27929b8d05b8SZbigniew Bodek 27939b8d05b8SZbigniew Bodek goto err; 27949b8d05b8SZbigniew Bodek } 27959b8d05b8SZbigniew Bodek 27969b8d05b8SZbigniew Bodek return; 27979b8d05b8SZbigniew Bodek 27989b8d05b8SZbigniew Bodek err: 27999b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 28009b8d05b8SZbigniew Bodek } 28019b8d05b8SZbigniew Bodek 28029b8d05b8SZbigniew Bodek static int 28039b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev, 28049b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) 28059b8d05b8SZbigniew Bodek { 28069b8d05b8SZbigniew Bodek struct ena_com_dev* ena_dev = adapter->ena_dev; 28079b8d05b8SZbigniew Bodek bool readless_supported; 28089b8d05b8SZbigniew Bodek uint32_t aenq_groups; 28099b8d05b8SZbigniew Bodek int dma_width; 28109b8d05b8SZbigniew Bodek int rc; 28119b8d05b8SZbigniew Bodek 28129b8d05b8SZbigniew Bodek rc = ena_com_mmio_reg_read_request_init(ena_dev); 28133f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28143fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to init mmio read less\n"); 28150bdffe59SMarcin Wojtas return (rc); 28169b8d05b8SZbigniew Bodek } 28179b8d05b8SZbigniew Bodek 28189b8d05b8SZbigniew Bodek /* 28199b8d05b8SZbigniew Bodek * The PCIe configuration space revision id indicate if mmio reg 28209b8d05b8SZbigniew Bodek * read is disabled 28219b8d05b8SZbigniew Bodek */ 28229b8d05b8SZbigniew Bodek readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); 28239b8d05b8SZbigniew Bodek ena_com_set_mmio_read_mode(ena_dev, readless_supported); 28249b8d05b8SZbigniew Bodek 2825a195fab0SMarcin Wojtas rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 28263f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28273fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Can not reset device\n"); 28289b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28299b8d05b8SZbigniew Bodek } 28309b8d05b8SZbigniew Bodek 28319b8d05b8SZbigniew Bodek rc = ena_com_validate_version(ena_dev); 28323f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28333fc5d816SMarcin Wojtas ena_log(pdev, ERR, "device version is too low\n"); 28349b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28359b8d05b8SZbigniew Bodek } 28369b8d05b8SZbigniew Bodek 28379b8d05b8SZbigniew Bodek dma_width = ena_com_get_dma_width(ena_dev); 28383f9ed7abSMarcin Wojtas if (unlikely(dma_width < 0)) { 28393fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Invalid dma width value %d", dma_width); 28409b8d05b8SZbigniew Bodek rc = dma_width; 28419b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28429b8d05b8SZbigniew Bodek } 28439b8d05b8SZbigniew Bodek adapter->dma_width = dma_width; 28449b8d05b8SZbigniew Bodek 28459b8d05b8SZbigniew Bodek /* ENA admin level init */ 284667ec48bbSMarcin Wojtas rc = ena_com_admin_init(ena_dev, &aenq_handlers); 28473f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28483fc5d816SMarcin Wojtas ena_log(pdev, ERR, 28499b8d05b8SZbigniew Bodek "Can not initialize ena admin queue with device\n"); 28509b8d05b8SZbigniew Bodek goto err_mmio_read_less; 28519b8d05b8SZbigniew Bodek } 28529b8d05b8SZbigniew Bodek 28539b8d05b8SZbigniew Bodek /* 28549b8d05b8SZbigniew Bodek * To enable the msix interrupts the driver needs to know the number 28559b8d05b8SZbigniew Bodek * of queues. So the driver uses polling mode to retrieve this 28569b8d05b8SZbigniew Bodek * information 28579b8d05b8SZbigniew Bodek */ 28589b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, true); 28599b8d05b8SZbigniew Bodek 286046021271SMarcin Wojtas ena_config_host_info(ena_dev, pdev); 28619b8d05b8SZbigniew Bodek 28629b8d05b8SZbigniew Bodek /* Get Device Attributes */ 28639b8d05b8SZbigniew Bodek rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 28643f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28653fc5d816SMarcin Wojtas ena_log(pdev, ERR, 28669b8d05b8SZbigniew Bodek "Cannot get attribute for ena device rc: %d\n", rc); 28679b8d05b8SZbigniew Bodek goto err_admin_init; 28689b8d05b8SZbigniew Bodek } 28699b8d05b8SZbigniew Bodek 2870e6de9a83SMarcin Wojtas aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 2871e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_FATAL_ERROR) | 2872e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_WARNING) | 287340621d71SMarcin Wojtas BIT(ENA_ADMIN_NOTIFICATION) | 2874e6de9a83SMarcin Wojtas BIT(ENA_ADMIN_KEEP_ALIVE); 28759b8d05b8SZbigniew Bodek 28769b8d05b8SZbigniew Bodek aenq_groups &= get_feat_ctx->aenq.supported_groups; 28779b8d05b8SZbigniew Bodek rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 28783f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 28793fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc); 28809b8d05b8SZbigniew Bodek goto err_admin_init; 28819b8d05b8SZbigniew Bodek } 28829b8d05b8SZbigniew Bodek 28839b8d05b8SZbigniew Bodek *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 28849b8d05b8SZbigniew Bodek 28850bdffe59SMarcin Wojtas return (0); 28869b8d05b8SZbigniew Bodek 28879b8d05b8SZbigniew Bodek err_admin_init: 28889b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 28899b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 28909b8d05b8SZbigniew Bodek err_mmio_read_less: 28919b8d05b8SZbigniew Bodek ena_com_mmio_reg_read_request_destroy(ena_dev); 28929b8d05b8SZbigniew Bodek 28930bdffe59SMarcin Wojtas return (rc); 28949b8d05b8SZbigniew Bodek } 28959b8d05b8SZbigniew Bodek 2896aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) 28979b8d05b8SZbigniew Bodek { 28989b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 28999b8d05b8SZbigniew Bodek int rc; 29009b8d05b8SZbigniew Bodek 29019b8d05b8SZbigniew Bodek rc = ena_enable_msix(adapter); 29023f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29033fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n"); 29040bdffe59SMarcin Wojtas return (rc); 29059b8d05b8SZbigniew Bodek } 29069b8d05b8SZbigniew Bodek 29079b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(adapter); 29089b8d05b8SZbigniew Bodek 29099b8d05b8SZbigniew Bodek rc = ena_request_mgmnt_irq(adapter); 29103f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 29113fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n"); 29129b8d05b8SZbigniew Bodek goto err_disable_msix; 29139b8d05b8SZbigniew Bodek } 29149b8d05b8SZbigniew Bodek 29159b8d05b8SZbigniew Bodek ena_com_set_admin_polling_mode(ena_dev, false); 29169b8d05b8SZbigniew Bodek 29179b8d05b8SZbigniew Bodek ena_com_admin_aenq_enable(ena_dev); 29189b8d05b8SZbigniew Bodek 29190bdffe59SMarcin Wojtas return (0); 29209b8d05b8SZbigniew Bodek 29219b8d05b8SZbigniew Bodek err_disable_msix: 29229b8d05b8SZbigniew Bodek ena_disable_msix(adapter); 29239b8d05b8SZbigniew Bodek 29240bdffe59SMarcin Wojtas return (rc); 29259b8d05b8SZbigniew Bodek } 29269b8d05b8SZbigniew Bodek 29279b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */ 29289b8d05b8SZbigniew Bodek static void ena_keep_alive_wd(void *adapter_data, 29299b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 29309b8d05b8SZbigniew Bodek { 29319b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 293230217e2dSMarcin Wojtas struct ena_admin_aenq_keep_alive_desc *desc; 29339b8d05b8SZbigniew Bodek sbintime_t stime; 293430217e2dSMarcin Wojtas uint64_t rx_drops; 29356c84cec3SMarcin Wojtas uint64_t tx_drops; 293630217e2dSMarcin Wojtas 293730217e2dSMarcin Wojtas desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 293830217e2dSMarcin Wojtas 293930217e2dSMarcin Wojtas rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 29406c84cec3SMarcin Wojtas tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 294130217e2dSMarcin Wojtas counter_u64_zero(adapter->hw_stats.rx_drops); 294230217e2dSMarcin Wojtas counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); 29436c84cec3SMarcin Wojtas counter_u64_zero(adapter->hw_stats.tx_drops); 29446c84cec3SMarcin Wojtas counter_u64_add(adapter->hw_stats.tx_drops, tx_drops); 29459b8d05b8SZbigniew Bodek 29469b8d05b8SZbigniew Bodek stime = getsbinuptime(); 29479b8d05b8SZbigniew Bodek atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); 29489b8d05b8SZbigniew Bodek } 29499b8d05b8SZbigniew Bodek 29509b8d05b8SZbigniew Bodek /* Check for keep alive expiration */ 29519b8d05b8SZbigniew Bodek static void check_for_missing_keep_alive(struct ena_adapter *adapter) 29529b8d05b8SZbigniew Bodek { 29539b8d05b8SZbigniew Bodek sbintime_t timestamp, time; 29549b8d05b8SZbigniew Bodek 29559b8d05b8SZbigniew Bodek if (adapter->wd_active == 0) 29569b8d05b8SZbigniew Bodek return; 29579b8d05b8SZbigniew Bodek 295840621d71SMarcin Wojtas if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 29599b8d05b8SZbigniew Bodek return; 29609b8d05b8SZbigniew Bodek 29619b8d05b8SZbigniew Bodek timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); 29629b8d05b8SZbigniew Bodek time = getsbinuptime() - timestamp; 29639b8d05b8SZbigniew Bodek if (unlikely(time > adapter->keep_alive_timeout)) { 29643fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n"); 29659b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.wd_expired, 1); 29667926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 29679b8d05b8SZbigniew Bodek } 2968858659f7SMarcin Wojtas } 29699b8d05b8SZbigniew Bodek 29709b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */ 29719b8d05b8SZbigniew Bodek static void check_for_admin_com_state(struct ena_adapter *adapter) 29729b8d05b8SZbigniew Bodek { 29730bdffe59SMarcin Wojtas if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == 29740bdffe59SMarcin Wojtas false)) { 29753fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 29769b8d05b8SZbigniew Bodek "ENA admin queue is not in running state!\n"); 29779b8d05b8SZbigniew Bodek counter_u64_add(adapter->dev_stats.admin_q_pause, 1); 29787926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 29799b8d05b8SZbigniew Bodek } 2980858659f7SMarcin Wojtas } 29819b8d05b8SZbigniew Bodek 298274dba3adSMarcin Wojtas static int 2983d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter, 2984d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring) 2985d12f7bfcSMarcin Wojtas { 2986d12f7bfcSMarcin Wojtas if (likely(rx_ring->first_interrupt)) 2987d12f7bfcSMarcin Wojtas return (0); 2988d12f7bfcSMarcin Wojtas 2989d12f7bfcSMarcin Wojtas if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) 2990d12f7bfcSMarcin Wojtas return (0); 2991d12f7bfcSMarcin Wojtas 2992d12f7bfcSMarcin Wojtas rx_ring->no_interrupt_event_cnt++; 2993d12f7bfcSMarcin Wojtas 2994d12f7bfcSMarcin Wojtas if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { 29953fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Potential MSIX issue on Rx side " 2996d12f7bfcSMarcin Wojtas "Queue = %d. Reset the device\n", rx_ring->qid); 29977926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT); 2998d12f7bfcSMarcin Wojtas return (EIO); 2999d12f7bfcSMarcin Wojtas } 3000d12f7bfcSMarcin Wojtas 3001d12f7bfcSMarcin Wojtas return (0); 3002d12f7bfcSMarcin Wojtas } 3003d12f7bfcSMarcin Wojtas 3004d12f7bfcSMarcin Wojtas static int 3005d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter, 300674dba3adSMarcin Wojtas struct ena_ring *tx_ring) 300774dba3adSMarcin Wojtas { 30083fc5d816SMarcin Wojtas device_t pdev = adapter->pdev; 300974dba3adSMarcin Wojtas struct bintime curtime, time; 301074dba3adSMarcin Wojtas struct ena_tx_buffer *tx_buf; 3011*d8aba82bSDawid Gorecki int time_since_last_cleanup; 3012*d8aba82bSDawid Gorecki int missing_tx_comp_to; 3013d12f7bfcSMarcin Wojtas sbintime_t time_offset; 301474dba3adSMarcin Wojtas uint32_t missed_tx = 0; 3015d12f7bfcSMarcin Wojtas int i, rc = 0; 301674dba3adSMarcin Wojtas 301774dba3adSMarcin Wojtas getbinuptime(&curtime); 301874dba3adSMarcin Wojtas 301974dba3adSMarcin Wojtas for (i = 0; i < tx_ring->ring_size; i++) { 302074dba3adSMarcin Wojtas tx_buf = &tx_ring->tx_buffer_info[i]; 302174dba3adSMarcin Wojtas 30220bdffe59SMarcin Wojtas if (bintime_isset(&tx_buf->timestamp) == 0) 302374dba3adSMarcin Wojtas continue; 302474dba3adSMarcin Wojtas 302574dba3adSMarcin Wojtas time = curtime; 302674dba3adSMarcin Wojtas bintime_sub(&time, &tx_buf->timestamp); 3027d12f7bfcSMarcin Wojtas time_offset = bttosbt(time); 3028d12f7bfcSMarcin Wojtas 3029d12f7bfcSMarcin Wojtas if (unlikely(!tx_ring->first_interrupt && 3030d12f7bfcSMarcin Wojtas time_offset > 2 * adapter->missing_tx_timeout)) { 3031d12f7bfcSMarcin Wojtas /* 3032d12f7bfcSMarcin Wojtas * If after graceful period interrupt is still not 3033d12f7bfcSMarcin Wojtas * received, we schedule a reset. 3034d12f7bfcSMarcin Wojtas */ 30353fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3036d12f7bfcSMarcin Wojtas "Potential MSIX issue on Tx side Queue = %d. " 3037d12f7bfcSMarcin Wojtas "Reset the device\n", tx_ring->qid); 30387926bc44SMarcin Wojtas ena_trigger_reset(adapter, 30397926bc44SMarcin Wojtas ENA_REGS_RESET_MISS_INTERRUPT); 3040d12f7bfcSMarcin Wojtas return (EIO); 3041d12f7bfcSMarcin Wojtas } 304274dba3adSMarcin Wojtas 304374dba3adSMarcin Wojtas /* Check again if packet is still waiting */ 3044d12f7bfcSMarcin Wojtas if (unlikely(time_offset > adapter->missing_tx_timeout)) { 304574dba3adSMarcin Wojtas 3046*d8aba82bSDawid Gorecki if (!tx_buf->print_once) { 3047*d8aba82bSDawid Gorecki time_since_last_cleanup = TICKS_2_USEC(ticks - 3048*d8aba82bSDawid Gorecki tx_ring->tx_last_cleanup_ticks); 3049*d8aba82bSDawid Gorecki missing_tx_comp_to = 3050*d8aba82bSDawid Gorecki sbttoms(adapter->missing_tx_timeout); 30513fc5d816SMarcin Wojtas ena_log(pdev, WARN, "Found a Tx that wasn't " 3052*d8aba82bSDawid Gorecki "completed on time, qid %d, index %d." 3053*d8aba82bSDawid Gorecki "%d usecs have passed since last cleanup." 3054*d8aba82bSDawid Gorecki "Missing Tx timeout value %d msecs.\n", 3055*d8aba82bSDawid Gorecki tx_ring->qid, i, time_since_last_cleanup, 3056*d8aba82bSDawid Gorecki missing_tx_comp_to); 3057*d8aba82bSDawid Gorecki } 305874dba3adSMarcin Wojtas 305974dba3adSMarcin Wojtas tx_buf->print_once = true; 306074dba3adSMarcin Wojtas missed_tx++; 3061d12f7bfcSMarcin Wojtas } 3062d12f7bfcSMarcin Wojtas } 306374dba3adSMarcin Wojtas 3064d12f7bfcSMarcin Wojtas if (unlikely(missed_tx > adapter->missing_tx_threshold)) { 30653fc5d816SMarcin Wojtas ena_log(pdev, ERR, 3066d12f7bfcSMarcin Wojtas "The number of lost tx completion is above the threshold " 3067d12f7bfcSMarcin Wojtas "(%d > %d). Reset the device\n", 30684e8acd84SMarcin Wojtas missed_tx, adapter->missing_tx_threshold); 30697926bc44SMarcin Wojtas ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL); 3070d12f7bfcSMarcin Wojtas rc = EIO; 307174dba3adSMarcin Wojtas } 307274dba3adSMarcin Wojtas 3073d12f7bfcSMarcin Wojtas counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); 3074d12f7bfcSMarcin Wojtas 3075d12f7bfcSMarcin Wojtas return (rc); 307674dba3adSMarcin Wojtas } 307774dba3adSMarcin Wojtas 30789b8d05b8SZbigniew Bodek /* 30799b8d05b8SZbigniew Bodek * Check for TX which were not completed on time. 30809b8d05b8SZbigniew Bodek * Timeout is defined by "missing_tx_timeout". 30819b8d05b8SZbigniew Bodek * Reset will be performed if number of incompleted 30829b8d05b8SZbigniew Bodek * transactions exceeds "missing_tx_threshold". 30839b8d05b8SZbigniew Bodek */ 30840bdffe59SMarcin Wojtas static void 3085d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter) 30869b8d05b8SZbigniew Bodek { 30879b8d05b8SZbigniew Bodek struct ena_ring *tx_ring; 3088d12f7bfcSMarcin Wojtas struct ena_ring *rx_ring; 308974dba3adSMarcin Wojtas int i, budget, rc; 30909b8d05b8SZbigniew Bodek 30919b8d05b8SZbigniew Bodek /* Make sure the driver doesn't turn the device in other process */ 30929b8d05b8SZbigniew Bodek rmb(); 30939b8d05b8SZbigniew Bodek 3094fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 30959b8d05b8SZbigniew Bodek return; 30969b8d05b8SZbigniew Bodek 3097fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 30989b8d05b8SZbigniew Bodek return; 30999b8d05b8SZbigniew Bodek 310040621d71SMarcin Wojtas if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) 31019b8d05b8SZbigniew Bodek return; 31029b8d05b8SZbigniew Bodek 31039b8d05b8SZbigniew Bodek budget = adapter->missing_tx_max_queues; 31049b8d05b8SZbigniew Bodek 31057d8c4feeSMarcin Wojtas for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) { 31069b8d05b8SZbigniew Bodek tx_ring = &adapter->tx_ring[i]; 3107d12f7bfcSMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 31089b8d05b8SZbigniew Bodek 3109d12f7bfcSMarcin Wojtas rc = check_missing_comp_in_tx_queue(adapter, tx_ring); 3110d12f7bfcSMarcin Wojtas if (unlikely(rc != 0)) 3111d12f7bfcSMarcin Wojtas return; 3112d12f7bfcSMarcin Wojtas 3113d12f7bfcSMarcin Wojtas rc = check_for_rx_interrupt_queue(adapter, rx_ring); 31140bdffe59SMarcin Wojtas if (unlikely(rc != 0)) 31159b8d05b8SZbigniew Bodek return; 31169b8d05b8SZbigniew Bodek 31179b8d05b8SZbigniew Bodek budget--; 3118cd5d5804SMarcin Wojtas if (budget == 0) { 31199b8d05b8SZbigniew Bodek i++; 31209b8d05b8SZbigniew Bodek break; 31219b8d05b8SZbigniew Bodek } 31229b8d05b8SZbigniew Bodek } 31239b8d05b8SZbigniew Bodek 31247d8c4feeSMarcin Wojtas adapter->next_monitored_tx_qid = i % adapter->num_io_queues; 31259b8d05b8SZbigniew Bodek } 31269b8d05b8SZbigniew Bodek 31275cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */ 3128efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2 3129efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the 3130efe6ab18SMarcin Wojtas * msix handler failed to refill new Rx descriptors (due to a lack of memory 3131efe6ab18SMarcin Wojtas * for example). 3132efe6ab18SMarcin Wojtas * This case will lead to a deadlock: 3133efe6ab18SMarcin Wojtas * The device won't send interrupts since all the new Rx packets will be dropped 3134efe6ab18SMarcin Wojtas * The msix handler won't allocate new Rx descriptors so the device won't be 3135efe6ab18SMarcin Wojtas * able to send new packets. 3136efe6ab18SMarcin Wojtas * 3137efe6ab18SMarcin Wojtas * When such a situation is detected - execute rx cleanup task in another thread 3138efe6ab18SMarcin Wojtas */ 3139efe6ab18SMarcin Wojtas static void 3140efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter) 3141efe6ab18SMarcin Wojtas { 3142efe6ab18SMarcin Wojtas struct ena_ring *rx_ring; 3143efe6ab18SMarcin Wojtas int i, refill_required; 3144efe6ab18SMarcin Wojtas 3145fd43fd2aSMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 3146efe6ab18SMarcin Wojtas return; 3147efe6ab18SMarcin Wojtas 3148fd43fd2aSMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) 3149efe6ab18SMarcin Wojtas return; 3150efe6ab18SMarcin Wojtas 31517d8c4feeSMarcin Wojtas for (i = 0; i < adapter->num_io_queues; i++) { 3152efe6ab18SMarcin Wojtas rx_ring = &adapter->rx_ring[i]; 3153efe6ab18SMarcin Wojtas 31548483b844SMarcin Wojtas refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 3155efe6ab18SMarcin Wojtas if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 3156efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue++; 3157efe6ab18SMarcin Wojtas 3158efe6ab18SMarcin Wojtas if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 3159efe6ab18SMarcin Wojtas counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 3160efe6ab18SMarcin Wojtas 1); 3161efe6ab18SMarcin Wojtas 31623fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 31633fc5d816SMarcin Wojtas "Rx ring %d is stalled. Triggering the refill function\n", 31643fc5d816SMarcin Wojtas i); 3165efe6ab18SMarcin Wojtas 31665cb9db07SMarcin Wojtas taskqueue_enqueue(rx_ring->que->cleanup_tq, 31675cb9db07SMarcin Wojtas &rx_ring->que->cleanup_task); 3168efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3169efe6ab18SMarcin Wojtas } 3170efe6ab18SMarcin Wojtas } else { 3171efe6ab18SMarcin Wojtas rx_ring->empty_rx_queue = 0; 3172efe6ab18SMarcin Wojtas } 3173efe6ab18SMarcin Wojtas } 3174efe6ab18SMarcin Wojtas } 31759b8d05b8SZbigniew Bodek 317640621d71SMarcin Wojtas static void ena_update_hints(struct ena_adapter *adapter, 317740621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints) 317840621d71SMarcin Wojtas { 317940621d71SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 318040621d71SMarcin Wojtas 318140621d71SMarcin Wojtas if (hints->admin_completion_tx_timeout) 318240621d71SMarcin Wojtas ena_dev->admin_queue.completion_timeout = 318340621d71SMarcin Wojtas hints->admin_completion_tx_timeout * 1000; 318440621d71SMarcin Wojtas 318540621d71SMarcin Wojtas if (hints->mmio_read_timeout) 318640621d71SMarcin Wojtas /* convert to usec */ 318740621d71SMarcin Wojtas ena_dev->mmio_read.reg_read_to = 318840621d71SMarcin Wojtas hints->mmio_read_timeout * 1000; 318940621d71SMarcin Wojtas 319040621d71SMarcin Wojtas if (hints->missed_tx_completion_count_threshold_to_reset) 319140621d71SMarcin Wojtas adapter->missing_tx_threshold = 319240621d71SMarcin Wojtas hints->missed_tx_completion_count_threshold_to_reset; 319340621d71SMarcin Wojtas 319440621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout) { 319540621d71SMarcin Wojtas if (hints->missing_tx_completion_timeout == 319640621d71SMarcin Wojtas ENA_HW_HINTS_NO_TIMEOUT) 319740621d71SMarcin Wojtas adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; 319840621d71SMarcin Wojtas else 319940621d71SMarcin Wojtas adapter->missing_tx_timeout = 320040621d71SMarcin Wojtas SBT_1MS * hints->missing_tx_completion_timeout; 320140621d71SMarcin Wojtas } 320240621d71SMarcin Wojtas 320340621d71SMarcin Wojtas if (hints->driver_watchdog_timeout) { 320440621d71SMarcin Wojtas if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 320540621d71SMarcin Wojtas adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 320640621d71SMarcin Wojtas else 320740621d71SMarcin Wojtas adapter->keep_alive_timeout = 320840621d71SMarcin Wojtas SBT_1MS * hints->driver_watchdog_timeout; 320940621d71SMarcin Wojtas } 321040621d71SMarcin Wojtas } 321140621d71SMarcin Wojtas 3212f180142cSMarcin Wojtas /** 3213f180142cSMarcin Wojtas * ena_copy_eni_metrics - Get and copy ENI metrics from the HW. 3214f180142cSMarcin Wojtas * @adapter: ENA device adapter 3215f180142cSMarcin Wojtas * 3216f180142cSMarcin Wojtas * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics 3217f180142cSMarcin Wojtas * and other error codes on failure. 3218f180142cSMarcin Wojtas * 3219f180142cSMarcin Wojtas * This function can possibly cause a race with other calls to the admin queue. 3220f180142cSMarcin Wojtas * Because of that, the caller should either lock this function or make sure 3221f180142cSMarcin Wojtas * that there is no race in the current context. 3222f180142cSMarcin Wojtas */ 3223f180142cSMarcin Wojtas static int 3224f180142cSMarcin Wojtas ena_copy_eni_metrics(struct ena_adapter *adapter) 3225f180142cSMarcin Wojtas { 3226f180142cSMarcin Wojtas static bool print_once = true; 3227f180142cSMarcin Wojtas int rc; 3228f180142cSMarcin Wojtas 3229f180142cSMarcin Wojtas rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics); 3230f180142cSMarcin Wojtas 3231f180142cSMarcin Wojtas if (rc != 0) { 3232f180142cSMarcin Wojtas if (rc == ENA_COM_UNSUPPORTED) { 3233f180142cSMarcin Wojtas if (print_once) { 32343fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 3235f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3236f180142cSMarcin Wojtas print_once = false; 3237f180142cSMarcin Wojtas } else { 32383fc5d816SMarcin Wojtas ena_log(adapter->pdev, DBG, 3239f180142cSMarcin Wojtas "Retrieving ENI metrics is not supported.\n"); 3240f180142cSMarcin Wojtas } 3241f180142cSMarcin Wojtas } else { 32423fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3243f180142cSMarcin Wojtas "Failed to get ENI metrics: %d\n", rc); 3244f180142cSMarcin Wojtas } 3245f180142cSMarcin Wojtas } 3246f180142cSMarcin Wojtas 3247f180142cSMarcin Wojtas return (rc); 3248f180142cSMarcin Wojtas } 3249f180142cSMarcin Wojtas 32509b8d05b8SZbigniew Bodek static void 32519b8d05b8SZbigniew Bodek ena_timer_service(void *data) 32529b8d05b8SZbigniew Bodek { 32539b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)data; 32549b8d05b8SZbigniew Bodek struct ena_admin_host_info *host_info = 32559b8d05b8SZbigniew Bodek adapter->ena_dev->host_attr.host_info; 32569b8d05b8SZbigniew Bodek 32579b8d05b8SZbigniew Bodek check_for_missing_keep_alive(adapter); 32589b8d05b8SZbigniew Bodek 32599b8d05b8SZbigniew Bodek check_for_admin_com_state(adapter); 32609b8d05b8SZbigniew Bodek 3261d12f7bfcSMarcin Wojtas check_for_missing_completions(adapter); 32629b8d05b8SZbigniew Bodek 3263efe6ab18SMarcin Wojtas check_for_empty_rx_ring(adapter); 3264efe6ab18SMarcin Wojtas 3265f180142cSMarcin Wojtas /* 3266f180142cSMarcin Wojtas * User controller update of the ENI metrics. 3267f180142cSMarcin Wojtas * If the delay was set to 0, then the stats shouldn't be updated at 3268f180142cSMarcin Wojtas * all. 3269f180142cSMarcin Wojtas * Otherwise, wait 'eni_metrics_sample_interval' seconds, before 3270f180142cSMarcin Wojtas * updating stats. 3271f180142cSMarcin Wojtas * As timer service is executed every second, it's enough to increment 3272f180142cSMarcin Wojtas * appropriate counter each time the timer service is executed. 3273f180142cSMarcin Wojtas */ 3274f180142cSMarcin Wojtas if ((adapter->eni_metrics_sample_interval != 0) && 3275f180142cSMarcin Wojtas (++adapter->eni_metrics_sample_interval_cnt >= 3276f180142cSMarcin Wojtas adapter->eni_metrics_sample_interval)) { 3277f180142cSMarcin Wojtas /* 3278f180142cSMarcin Wojtas * There is no race with other admin queue calls, as: 327978554d0cSDawid Gorecki * - Timer service runs after attach function ends, so all 3280f180142cSMarcin Wojtas * configuration calls to the admin queue are finished. 328178554d0cSDawid Gorecki * - Timer service is temporarily stopped when bringing 328278554d0cSDawid Gorecki * the interface up or down. 3283f180142cSMarcin Wojtas * - After interface is up, the driver doesn't use (at least 3284f180142cSMarcin Wojtas * for now) other functions writing to the admin queue. 3285f180142cSMarcin Wojtas * 3286f180142cSMarcin Wojtas * It may change in the future, so in that situation, the lock 3287f180142cSMarcin Wojtas * will be needed. ENA_LOCK_*() cannot be used for that purpose, 3288f180142cSMarcin Wojtas * as callout ena_timer_service is protected by them. It could 3289f180142cSMarcin Wojtas * lead to the deadlock if callout_drain() would hold the lock 3290f180142cSMarcin Wojtas * before ena_copy_eni_metrics() was executed. It's advised to 3291f180142cSMarcin Wojtas * use separate lock in that situation which will be used only 3292f180142cSMarcin Wojtas * for the admin queue. 3293f180142cSMarcin Wojtas */ 3294f180142cSMarcin Wojtas (void)ena_copy_eni_metrics(adapter); 3295f180142cSMarcin Wojtas adapter->eni_metrics_sample_interval_cnt = 0; 3296f180142cSMarcin Wojtas } 3297f180142cSMarcin Wojtas 3298f180142cSMarcin Wojtas 32990bdffe59SMarcin Wojtas if (host_info != NULL) 33009b8d05b8SZbigniew Bodek ena_update_host_info(host_info, adapter->ifp); 33019b8d05b8SZbigniew Bodek 3302fd43fd2aSMarcin Wojtas if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 3303d10ec3adSDawid Gorecki /* 3304d10ec3adSDawid Gorecki * Timeout when validating version indicates that the device 3305d10ec3adSDawid Gorecki * became unresponsive. If that happens skip the reset and 3306d10ec3adSDawid Gorecki * reschedule timer service, so the reset can be retried later. 3307d10ec3adSDawid Gorecki */ 3308d10ec3adSDawid Gorecki if (ena_com_validate_version(adapter->ena_dev) == 3309d10ec3adSDawid Gorecki ENA_COM_TIMER_EXPIRED) { 3310d10ec3adSDawid Gorecki ena_log(adapter->pdev, WARN, 3311d10ec3adSDawid Gorecki "FW unresponsive, skipping reset\n"); 3312d10ec3adSDawid Gorecki ENA_TIMER_RESET(adapter); 3313d10ec3adSDawid Gorecki return; 3314d10ec3adSDawid Gorecki } 33153fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, "Trigger reset is on\n"); 33169b8d05b8SZbigniew Bodek taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); 33179b8d05b8SZbigniew Bodek return; 33189b8d05b8SZbigniew Bodek } 33199b8d05b8SZbigniew Bodek 33209b8d05b8SZbigniew Bodek /* 33219b8d05b8SZbigniew Bodek * Schedule another timeout one second from now. 33229b8d05b8SZbigniew Bodek */ 332378554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 33249b8d05b8SZbigniew Bodek } 33259b8d05b8SZbigniew Bodek 332638c7b965SMarcin Wojtas void 332732f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful) 33289b8d05b8SZbigniew Bodek { 332932f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 33309b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 33319b8d05b8SZbigniew Bodek bool dev_up; 333232f63fa7SMarcin Wojtas 333332f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) 333432f63fa7SMarcin Wojtas return; 333532f63fa7SMarcin Wojtas 333632f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_DOWN); 333732f63fa7SMarcin Wojtas 333878554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 333932f63fa7SMarcin Wojtas 334032f63fa7SMarcin Wojtas dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); 334132f63fa7SMarcin Wojtas if (dev_up) 334232f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 334332f63fa7SMarcin Wojtas 334432f63fa7SMarcin Wojtas if (!graceful) 334532f63fa7SMarcin Wojtas ena_com_set_admin_running_state(ena_dev, false); 334632f63fa7SMarcin Wojtas 334732f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) 334832f63fa7SMarcin Wojtas ena_down(adapter); 334932f63fa7SMarcin Wojtas 335032f63fa7SMarcin Wojtas /* 335132f63fa7SMarcin Wojtas * Stop the device from sending AENQ events (if the device was up, and 335232f63fa7SMarcin Wojtas * the trigger reset was on, ena_down already performs device reset) 335332f63fa7SMarcin Wojtas */ 335432f63fa7SMarcin Wojtas if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) 335532f63fa7SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 335632f63fa7SMarcin Wojtas 335732f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 335832f63fa7SMarcin Wojtas 335932f63fa7SMarcin Wojtas ena_disable_msix(adapter); 336032f63fa7SMarcin Wojtas 3361e2735b09SMarcin Wojtas /* 3362e2735b09SMarcin Wojtas * IO rings resources should be freed because `ena_restore_device()` 3363e2735b09SMarcin Wojtas * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX 3364e2735b09SMarcin Wojtas * vectors. The amount of MSIX vectors after destroy-restore may be 3365e2735b09SMarcin Wojtas * different than before. Therefore, IO rings resources should be 3366e2735b09SMarcin Wojtas * established from scratch each time. 3367e2735b09SMarcin Wojtas */ 3368e2735b09SMarcin Wojtas ena_free_all_io_rings_resources(adapter); 3369e2735b09SMarcin Wojtas 337032f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 337132f63fa7SMarcin Wojtas 337232f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 337332f63fa7SMarcin Wojtas 337432f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 337532f63fa7SMarcin Wojtas 337632f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 337732f63fa7SMarcin Wojtas 337832f63fa7SMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 337932f63fa7SMarcin Wojtas 338032f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); 338132f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 338232f63fa7SMarcin Wojtas } 338332f63fa7SMarcin Wojtas 338432f63fa7SMarcin Wojtas static int 338532f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter, 338632f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx *get_feat_ctx) 338732f63fa7SMarcin Wojtas { 338832f63fa7SMarcin Wojtas 338932f63fa7SMarcin Wojtas if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, 339032f63fa7SMarcin Wojtas ETHER_ADDR_LEN) != 0) { 33913fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n"); 339232f63fa7SMarcin Wojtas return (EINVAL); 339332f63fa7SMarcin Wojtas } 339432f63fa7SMarcin Wojtas 339532f63fa7SMarcin Wojtas if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { 33963fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 339732f63fa7SMarcin Wojtas "Error, device max mtu is smaller than ifp MTU\n"); 339832f63fa7SMarcin Wojtas return (EINVAL); 339932f63fa7SMarcin Wojtas } 340032f63fa7SMarcin Wojtas 340132f63fa7SMarcin Wojtas return 0; 340232f63fa7SMarcin Wojtas } 340332f63fa7SMarcin Wojtas 340438c7b965SMarcin Wojtas int 340532f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter) 340632f63fa7SMarcin Wojtas { 340732f63fa7SMarcin Wojtas struct ena_com_dev_get_features_ctx get_feat_ctx; 340832f63fa7SMarcin Wojtas struct ena_com_dev *ena_dev = adapter->ena_dev; 340932f63fa7SMarcin Wojtas if_t ifp = adapter->ifp; 341032f63fa7SMarcin Wojtas device_t dev = adapter->pdev; 341132f63fa7SMarcin Wojtas int wd_active; 34129b8d05b8SZbigniew Bodek int rc; 34139b8d05b8SZbigniew Bodek 341432f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 341532f63fa7SMarcin Wojtas 341632f63fa7SMarcin Wojtas rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); 341732f63fa7SMarcin Wojtas if (rc != 0) { 34183fc5d816SMarcin Wojtas ena_log(dev, ERR, "Cannot initialize device\n"); 341932f63fa7SMarcin Wojtas goto err; 342032f63fa7SMarcin Wojtas } 342132f63fa7SMarcin Wojtas /* 342232f63fa7SMarcin Wojtas * Only enable WD if it was enabled before reset, so it won't override 342332f63fa7SMarcin Wojtas * value set by the user by the sysctl. 342432f63fa7SMarcin Wojtas */ 342532f63fa7SMarcin Wojtas if (adapter->wd_active != 0) 342632f63fa7SMarcin Wojtas adapter->wd_active = wd_active; 342732f63fa7SMarcin Wojtas 342832f63fa7SMarcin Wojtas rc = ena_device_validate_params(adapter, &get_feat_ctx); 342932f63fa7SMarcin Wojtas if (rc != 0) { 34303fc5d816SMarcin Wojtas ena_log(dev, ERR, "Validation of device parameters failed\n"); 343132f63fa7SMarcin Wojtas goto err_device_destroy; 343232f63fa7SMarcin Wojtas } 343332f63fa7SMarcin Wojtas 343432f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 343532f63fa7SMarcin Wojtas /* Make sure we don't have a race with AENQ Links state handler */ 343632f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) 343732f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 343832f63fa7SMarcin Wojtas 3439aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 344032f63fa7SMarcin Wojtas if (rc != 0) { 34413fc5d816SMarcin Wojtas ena_log(dev, ERR, "Enable MSI-X failed\n"); 344232f63fa7SMarcin Wojtas goto err_device_destroy; 344332f63fa7SMarcin Wojtas } 344432f63fa7SMarcin Wojtas 3445e2735b09SMarcin Wojtas /* 3446e2735b09SMarcin Wojtas * Effective value of used MSIX vectors should be the same as before 3447e2735b09SMarcin Wojtas * `ena_destroy_device()`, if possible, or closest to it if less vectors 3448e2735b09SMarcin Wojtas * are available. 3449e2735b09SMarcin Wojtas */ 3450e2735b09SMarcin Wojtas if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues) 3451e2735b09SMarcin Wojtas adapter->num_io_queues = 3452e2735b09SMarcin Wojtas adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3453e2735b09SMarcin Wojtas 3454e2735b09SMarcin Wojtas /* Re-initialize rings basic information */ 3455e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3456e2735b09SMarcin Wojtas 345732f63fa7SMarcin Wojtas /* If the interface was up before the reset bring it up */ 345832f63fa7SMarcin Wojtas if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { 345932f63fa7SMarcin Wojtas rc = ena_up(adapter); 346032f63fa7SMarcin Wojtas if (rc != 0) { 34613fc5d816SMarcin Wojtas ena_log(dev, ERR, "Failed to create I/O queues\n"); 346232f63fa7SMarcin Wojtas goto err_disable_msix; 346332f63fa7SMarcin Wojtas } 346432f63fa7SMarcin Wojtas } 346532f63fa7SMarcin Wojtas 346624392281SMarcin Wojtas /* Indicate that device is running again and ready to work */ 346732f63fa7SMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 346824392281SMarcin Wojtas 346924392281SMarcin Wojtas /* 347024392281SMarcin Wojtas * As the AENQ handlers weren't executed during reset because 347124392281SMarcin Wojtas * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the 347224392281SMarcin Wojtas * timestamp must be updated again That will prevent next reset 347324392281SMarcin Wojtas * caused by missing keep alive. 347424392281SMarcin Wojtas */ 347524392281SMarcin Wojtas adapter->keep_alive_timestamp = getsbinuptime(); 347678554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 347778554d0cSDawid Gorecki 34787d8c4feeSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); 347932f63fa7SMarcin Wojtas 348032f63fa7SMarcin Wojtas return (rc); 348132f63fa7SMarcin Wojtas 348232f63fa7SMarcin Wojtas err_disable_msix: 348332f63fa7SMarcin Wojtas ena_free_mgmnt_irq(adapter); 348432f63fa7SMarcin Wojtas ena_disable_msix(adapter); 348532f63fa7SMarcin Wojtas err_device_destroy: 348632f63fa7SMarcin Wojtas ena_com_abort_admin_commands(ena_dev); 348732f63fa7SMarcin Wojtas ena_com_wait_for_abort_completion(ena_dev); 348832f63fa7SMarcin Wojtas ena_com_admin_destroy(ena_dev); 348932f63fa7SMarcin Wojtas ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); 349032f63fa7SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 349132f63fa7SMarcin Wojtas err: 349232f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 349332f63fa7SMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); 34943fc5d816SMarcin Wojtas ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n"); 349532f63fa7SMarcin Wojtas 349678554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 349778554d0cSDawid Gorecki 349832f63fa7SMarcin Wojtas return (rc); 349932f63fa7SMarcin Wojtas } 350032f63fa7SMarcin Wojtas 350132f63fa7SMarcin Wojtas static void 350232f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending) 350332f63fa7SMarcin Wojtas { 350432f63fa7SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)arg; 350532f63fa7SMarcin Wojtas 350607aff471SArtur Rojek ENA_LOCK_LOCK(); 3507433ab9b6SArtur Rojek if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { 350832f63fa7SMarcin Wojtas ena_destroy_device(adapter, false); 350932f63fa7SMarcin Wojtas ena_restore_device(adapter); 3510d209ffeeSDawid Gorecki 3511d209ffeeSDawid Gorecki ena_log(adapter->pdev, INFO, 3512d209ffeeSDawid Gorecki "Device reset completed successfully, Driver info: %s\n", 3513d209ffeeSDawid Gorecki ena_version); 3514433ab9b6SArtur Rojek } 351507aff471SArtur Rojek ENA_LOCK_UNLOCK(); 35169b8d05b8SZbigniew Bodek } 35179b8d05b8SZbigniew Bodek 35189b8d05b8SZbigniew Bodek /** 35199b8d05b8SZbigniew Bodek * ena_attach - Device Initialization Routine 35209b8d05b8SZbigniew Bodek * @pdev: device information struct 35219b8d05b8SZbigniew Bodek * 35229b8d05b8SZbigniew Bodek * Returns 0 on success, otherwise on failure. 35239b8d05b8SZbigniew Bodek * 35249b8d05b8SZbigniew Bodek * ena_attach initializes an adapter identified by a device structure. 35259b8d05b8SZbigniew Bodek * The OS initialization, configuring of the adapter private structure, 35269b8d05b8SZbigniew Bodek * and a hardware reset occur. 35279b8d05b8SZbigniew Bodek **/ 35289b8d05b8SZbigniew Bodek static int 35299b8d05b8SZbigniew Bodek ena_attach(device_t pdev) 35309b8d05b8SZbigniew Bodek { 35319b8d05b8SZbigniew Bodek struct ena_com_dev_get_features_ctx get_feat_ctx; 35324fa9e02dSMarcin Wojtas struct ena_llq_configurations llq_config; 35336064f289SMarcin Wojtas struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 35349b8d05b8SZbigniew Bodek static int version_printed; 35359b8d05b8SZbigniew Bodek struct ena_adapter *adapter; 35369b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = NULL; 35377d8c4feeSMarcin Wojtas uint32_t max_num_io_queues; 35381c808fcdSMichal Krawczyk int msix_rid; 35394fa9e02dSMarcin Wojtas int rid, rc; 35404fa9e02dSMarcin Wojtas 35419b8d05b8SZbigniew Bodek adapter = device_get_softc(pdev); 35429b8d05b8SZbigniew Bodek adapter->pdev = pdev; 3543eb4c4f4aSMarcin Wojtas adapter->first_bind = -1; 35449b8d05b8SZbigniew Bodek 35456959869eSMarcin Wojtas /* 35466959869eSMarcin Wojtas * Set up the timer service - driver is responsible for avoiding 35476959869eSMarcin Wojtas * concurrency, as the callout won't be using any locking inside. 35486959869eSMarcin Wojtas */ 354978554d0cSDawid Gorecki ENA_TIMER_INIT(adapter); 35509b8d05b8SZbigniew Bodek adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; 35519b8d05b8SZbigniew Bodek adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; 35529b8d05b8SZbigniew Bodek adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; 35539b8d05b8SZbigniew Bodek adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; 35549b8d05b8SZbigniew Bodek 35559b8d05b8SZbigniew Bodek if (version_printed++ == 0) 35563fc5d816SMarcin Wojtas ena_log(pdev, INFO, "%s\n", ena_version); 35579b8d05b8SZbigniew Bodek 35589b8d05b8SZbigniew Bodek /* Allocate memory for ena_dev structure */ 3559cd5d5804SMarcin Wojtas ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, 3560cd5d5804SMarcin Wojtas M_WAITOK | M_ZERO); 35619b8d05b8SZbigniew Bodek 35629b8d05b8SZbigniew Bodek adapter->ena_dev = ena_dev; 35639b8d05b8SZbigniew Bodek ena_dev->dmadev = pdev; 35644fa9e02dSMarcin Wojtas 35654fa9e02dSMarcin Wojtas rid = PCIR_BAR(ENA_REG_BAR); 35664fa9e02dSMarcin Wojtas adapter->memory = NULL; 35674fa9e02dSMarcin Wojtas adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 35684fa9e02dSMarcin Wojtas &rid, RF_ACTIVE); 35694fa9e02dSMarcin Wojtas if (unlikely(adapter->registers == NULL)) { 35703fc5d816SMarcin Wojtas ena_log(pdev, ERR, 35714fa9e02dSMarcin Wojtas "unable to allocate bus resource: registers!\n"); 35724fa9e02dSMarcin Wojtas rc = ENOMEM; 35734fa9e02dSMarcin Wojtas goto err_dev_free; 35744fa9e02dSMarcin Wojtas } 35754fa9e02dSMarcin Wojtas 35761c808fcdSMichal Krawczyk /* MSIx vector table may reside on BAR0 with registers or on BAR1. */ 35771c808fcdSMichal Krawczyk msix_rid = pci_msix_table_bar(pdev); 35781c808fcdSMichal Krawczyk if (msix_rid != rid) { 35791c808fcdSMichal Krawczyk adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, 35801c808fcdSMichal Krawczyk &msix_rid, RF_ACTIVE); 35811c808fcdSMichal Krawczyk if (unlikely(adapter->msix == NULL)) { 35823fc5d816SMarcin Wojtas ena_log(pdev, ERR, 35831c808fcdSMichal Krawczyk "unable to allocate bus resource: msix!\n"); 35841c808fcdSMichal Krawczyk rc = ENOMEM; 35851c808fcdSMichal Krawczyk goto err_pci_free; 35861c808fcdSMichal Krawczyk } 35871c808fcdSMichal Krawczyk adapter->msix_rid = msix_rid; 35881c808fcdSMichal Krawczyk } 35891c808fcdSMichal Krawczyk 35909b8d05b8SZbigniew Bodek ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, 35919b8d05b8SZbigniew Bodek M_WAITOK | M_ZERO); 35929b8d05b8SZbigniew Bodek 35939b8d05b8SZbigniew Bodek /* Store register resources */ 35949b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = 35959b8d05b8SZbigniew Bodek rman_get_bustag(adapter->registers); 35969b8d05b8SZbigniew Bodek ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = 35979b8d05b8SZbigniew Bodek rman_get_bushandle(adapter->registers); 35989b8d05b8SZbigniew Bodek 35993f9ed7abSMarcin Wojtas if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { 36003fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to pmap registers bar\n"); 36019b8d05b8SZbigniew Bodek rc = ENXIO; 3602cd5d5804SMarcin Wojtas goto err_bus_free; 36039b8d05b8SZbigniew Bodek } 36049b8d05b8SZbigniew Bodek 36059b8d05b8SZbigniew Bodek ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 36069b8d05b8SZbigniew Bodek 3607fd43fd2aSMarcin Wojtas /* Initially clear all the flags */ 3608fd43fd2aSMarcin Wojtas ENA_FLAG_ZERO(adapter); 3609fd43fd2aSMarcin Wojtas 36109b8d05b8SZbigniew Bodek /* Device initialization */ 36119b8d05b8SZbigniew Bodek rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); 36123f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36133fc5d816SMarcin Wojtas ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc); 36149b8d05b8SZbigniew Bodek rc = ENXIO; 36159b8d05b8SZbigniew Bodek goto err_bus_free; 36169b8d05b8SZbigniew Bodek } 36179b8d05b8SZbigniew Bodek 3618beaadec9SMarcin Wojtas set_default_llq_configurations(&llq_config, &get_feat_ctx.llq); 36194fa9e02dSMarcin Wojtas 362090232d18SDawid Gorecki rc = ena_map_llq_mem_bar(pdev, ena_dev); 362190232d18SDawid Gorecki if (unlikely(rc != 0)) { 362290232d18SDawid Gorecki ena_log(pdev, ERR, "failed to map ENA mem bar"); 362390232d18SDawid Gorecki goto err_com_free; 362490232d18SDawid Gorecki } 362590232d18SDawid Gorecki 36264fa9e02dSMarcin Wojtas rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, 36274fa9e02dSMarcin Wojtas &llq_config); 36284fa9e02dSMarcin Wojtas if (unlikely(rc != 0)) { 36293fc5d816SMarcin Wojtas ena_log(pdev, ERR, "failed to set placement policy\n"); 36304fa9e02dSMarcin Wojtas goto err_com_free; 36314fa9e02dSMarcin Wojtas } 36324fa9e02dSMarcin Wojtas 36330b432b70SMarcin Wojtas if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 36340b432b70SMarcin Wojtas adapter->disable_meta_caching = 36350b432b70SMarcin Wojtas !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 36360b432b70SMarcin Wojtas BIT(ENA_ADMIN_DISABLE_META_CACHING)); 36370b432b70SMarcin Wojtas 36389b8d05b8SZbigniew Bodek adapter->keep_alive_timestamp = getsbinuptime(); 36399b8d05b8SZbigniew Bodek 36409b8d05b8SZbigniew Bodek adapter->tx_offload_cap = get_feat_ctx.offload.tx; 36419b8d05b8SZbigniew Bodek 36429b8d05b8SZbigniew Bodek memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, 36439b8d05b8SZbigniew Bodek ETHER_ADDR_LEN); 36449b8d05b8SZbigniew Bodek 36457d8c4feeSMarcin Wojtas calc_queue_ctx.pdev = pdev; 36466064f289SMarcin Wojtas calc_queue_ctx.ena_dev = ena_dev; 36476064f289SMarcin Wojtas calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 36486064f289SMarcin Wojtas 36497d8c4feeSMarcin Wojtas /* Calculate initial and maximum IO queue number and size */ 36507d8c4feeSMarcin Wojtas max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, 36517d8c4feeSMarcin Wojtas &get_feat_ctx); 36527d8c4feeSMarcin Wojtas rc = ena_calc_io_queue_size(&calc_queue_ctx); 36537d8c4feeSMarcin Wojtas if (unlikely((rc != 0) || (max_num_io_queues <= 0))) { 36546064f289SMarcin Wojtas rc = EFAULT; 36559b8d05b8SZbigniew Bodek goto err_com_free; 36569b8d05b8SZbigniew Bodek } 36579b8d05b8SZbigniew Bodek 36589762a033SMarcin Wojtas adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; 36599762a033SMarcin Wojtas adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; 36607d8c4feeSMarcin Wojtas adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 36617d8c4feeSMarcin Wojtas adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 36626064f289SMarcin Wojtas adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 36636064f289SMarcin Wojtas adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 36646064f289SMarcin Wojtas 36657d8c4feeSMarcin Wojtas adapter->max_num_io_queues = max_num_io_queues; 36667d8c4feeSMarcin Wojtas 36676064f289SMarcin Wojtas adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; 36689b8d05b8SZbigniew Bodek 36697d8c4feeSMarcin Wojtas adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 36707d8c4feeSMarcin Wojtas 36717d8c4feeSMarcin Wojtas adapter->reset_reason = ENA_REGS_RESET_NORMAL; 36727d8c4feeSMarcin Wojtas 36739b8d05b8SZbigniew Bodek /* set up dma tags for rx and tx buffers */ 36749b8d05b8SZbigniew Bodek rc = ena_setup_tx_dma_tag(adapter); 36754e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 36763fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create TX DMA tag\n"); 3677cd5d5804SMarcin Wojtas goto err_com_free; 36784e8acd84SMarcin Wojtas } 36799b8d05b8SZbigniew Bodek 36809b8d05b8SZbigniew Bodek rc = ena_setup_rx_dma_tag(adapter); 36814e8acd84SMarcin Wojtas if (unlikely(rc != 0)) { 36823fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Failed to create RX DMA tag\n"); 3683cd5d5804SMarcin Wojtas goto err_tx_tag_free; 36844e8acd84SMarcin Wojtas } 36859b8d05b8SZbigniew Bodek 3686e2735b09SMarcin Wojtas /* 3687e2735b09SMarcin Wojtas * The amount of requested MSIX vectors is equal to 3688e2735b09SMarcin Wojtas * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant 3689e2735b09SMarcin Wojtas * number of admin queue interrupts. The former is initially determined 3690e2735b09SMarcin Wojtas * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be 3691e2735b09SMarcin Wojtas * achieved if there are not enough system resources. By default, the 3692e2735b09SMarcin Wojtas * number of effectively used IO queues is the same but later on it can 3693e2735b09SMarcin Wojtas * be limited by the user using sysctl interface. 3694e2735b09SMarcin Wojtas */ 3695aa9c3226SMarcin Wojtas rc = ena_enable_msix_and_set_admin_interrupts(adapter); 36963f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) { 36973fc5d816SMarcin Wojtas ena_log(pdev, ERR, 36989b8d05b8SZbigniew Bodek "Failed to enable and set the admin interrupts\n"); 3699c115a1e2SMarcin Wojtas goto err_io_free; 3700c115a1e2SMarcin Wojtas } 3701e2735b09SMarcin Wojtas /* By default all of allocated MSIX vectors are actively used */ 3702e2735b09SMarcin Wojtas adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC; 3703e2735b09SMarcin Wojtas 3704e2735b09SMarcin Wojtas /* initialize rings basic information */ 3705e2735b09SMarcin Wojtas ena_init_io_rings(adapter); 3706c115a1e2SMarcin Wojtas 3707c115a1e2SMarcin Wojtas /* setup network interface */ 3708c115a1e2SMarcin Wojtas rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); 3709c115a1e2SMarcin Wojtas if (unlikely(rc != 0)) { 37103fc5d816SMarcin Wojtas ena_log(pdev, ERR, "Error with network interface setup\n"); 3711c115a1e2SMarcin Wojtas goto err_msix_free; 37129b8d05b8SZbigniew Bodek } 37139b8d05b8SZbigniew Bodek 3714081169f2SZbigniew Bodek /* Initialize reset task queue */ 3715081169f2SZbigniew Bodek TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); 3716081169f2SZbigniew Bodek adapter->reset_tq = taskqueue_create("ena_reset_enqueue", 3717081169f2SZbigniew Bodek M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); 3718081169f2SZbigniew Bodek taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, 3719081169f2SZbigniew Bodek "%s rstq", device_get_nameunit(adapter->pdev)); 3720081169f2SZbigniew Bodek 37219b8d05b8SZbigniew Bodek /* Initialize statistics */ 37229b8d05b8SZbigniew Bodek ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, 37239b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 372430217e2dSMarcin Wojtas ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, 372530217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 37269b8d05b8SZbigniew Bodek ena_sysctl_add_nodes(adapter); 37279b8d05b8SZbigniew Bodek 3728d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3729d17b7d87SMarcin Wojtas rc = ena_netmap_attach(adapter); 3730d17b7d87SMarcin Wojtas if (rc != 0) { 37313fc5d816SMarcin Wojtas ena_log(pdev, ERR, "netmap attach failed: %d\n", rc); 3732d17b7d87SMarcin Wojtas goto err_detach; 3733d17b7d87SMarcin Wojtas } 3734d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3735d17b7d87SMarcin Wojtas 37369b8d05b8SZbigniew Bodek /* Tell the stack that the interface is not active */ 37379b8d05b8SZbigniew Bodek if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 3738fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); 37399b8d05b8SZbigniew Bodek 374078554d0cSDawid Gorecki /* Run the timer service */ 374178554d0cSDawid Gorecki ENA_TIMER_RESET(adapter); 374278554d0cSDawid Gorecki 37439b8d05b8SZbigniew Bodek return (0); 37449b8d05b8SZbigniew Bodek 3745d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3746d17b7d87SMarcin Wojtas err_detach: 3747d17b7d87SMarcin Wojtas ether_ifdetach(adapter->ifp); 3748d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3749c115a1e2SMarcin Wojtas err_msix_free: 3750c115a1e2SMarcin Wojtas ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); 3751c115a1e2SMarcin Wojtas ena_free_mgmnt_irq(adapter); 3752c115a1e2SMarcin Wojtas ena_disable_msix(adapter); 3753cd5d5804SMarcin Wojtas err_io_free: 37549b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(adapter); 37559b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(adapter); 3756cd5d5804SMarcin Wojtas err_tx_tag_free: 37579b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(adapter); 3758cd5d5804SMarcin Wojtas err_com_free: 37599b8d05b8SZbigniew Bodek ena_com_admin_destroy(ena_dev); 37609b8d05b8SZbigniew Bodek ena_com_delete_host_info(ena_dev); 3761cd5d5804SMarcin Wojtas ena_com_mmio_reg_read_request_destroy(ena_dev); 37629b8d05b8SZbigniew Bodek err_bus_free: 37639b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 37641c808fcdSMichal Krawczyk err_pci_free: 37659b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 37664fa9e02dSMarcin Wojtas err_dev_free: 37674fa9e02dSMarcin Wojtas free(ena_dev, M_DEVBUF); 3768cd5d5804SMarcin Wojtas 37699b8d05b8SZbigniew Bodek return (rc); 37709b8d05b8SZbigniew Bodek } 37719b8d05b8SZbigniew Bodek 37729b8d05b8SZbigniew Bodek /** 37739b8d05b8SZbigniew Bodek * ena_detach - Device Removal Routine 37749b8d05b8SZbigniew Bodek * @pdev: device information struct 37759b8d05b8SZbigniew Bodek * 37769b8d05b8SZbigniew Bodek * ena_detach is called by the device subsystem to alert the driver 37779b8d05b8SZbigniew Bodek * that it should release a PCI device. 37789b8d05b8SZbigniew Bodek **/ 37799b8d05b8SZbigniew Bodek static int 37809b8d05b8SZbigniew Bodek ena_detach(device_t pdev) 37819b8d05b8SZbigniew Bodek { 37829b8d05b8SZbigniew Bodek struct ena_adapter *adapter = device_get_softc(pdev); 37839b8d05b8SZbigniew Bodek struct ena_com_dev *ena_dev = adapter->ena_dev; 37849b8d05b8SZbigniew Bodek int rc; 37859b8d05b8SZbigniew Bodek 37869b8d05b8SZbigniew Bodek /* Make sure VLANS are not using driver */ 37879b8d05b8SZbigniew Bodek if (adapter->ifp->if_vlantrunk != NULL) { 37883fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n"); 37899b8d05b8SZbigniew Bodek return (EBUSY); 37909b8d05b8SZbigniew Bodek } 37919b8d05b8SZbigniew Bodek 37929151c55dSMarcin Wojtas ether_ifdetach(adapter->ifp); 37939151c55dSMarcin Wojtas 37946959869eSMarcin Wojtas /* Stop timer service */ 379507aff471SArtur Rojek ENA_LOCK_LOCK(); 379678554d0cSDawid Gorecki ENA_TIMER_DRAIN(adapter); 379707aff471SArtur Rojek ENA_LOCK_UNLOCK(); 37986959869eSMarcin Wojtas 37996959869eSMarcin Wojtas /* Release reset task */ 38009b8d05b8SZbigniew Bodek while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) 38019b8d05b8SZbigniew Bodek taskqueue_drain(adapter->reset_tq, &adapter->reset_task); 38029b8d05b8SZbigniew Bodek taskqueue_free(adapter->reset_tq); 38039b8d05b8SZbigniew Bodek 380407aff471SArtur Rojek ENA_LOCK_LOCK(); 38059b8d05b8SZbigniew Bodek ena_down(adapter); 380632f63fa7SMarcin Wojtas ena_destroy_device(adapter, true); 380707aff471SArtur Rojek ENA_LOCK_UNLOCK(); 38089b8d05b8SZbigniew Bodek 38090e7d31f6SMarcin Wojtas /* Restore unregistered sysctl queue nodes. */ 38100e7d31f6SMarcin Wojtas ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues, 38110e7d31f6SMarcin Wojtas adapter->max_num_io_queues); 38120e7d31f6SMarcin Wojtas 3813d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3814d17b7d87SMarcin Wojtas netmap_detach(adapter->ifp); 3815d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 3816d17b7d87SMarcin Wojtas 381730217e2dSMarcin Wojtas ena_free_counters((counter_u64_t *)&adapter->hw_stats, 381830217e2dSMarcin Wojtas sizeof(struct ena_hw_stats)); 38199b8d05b8SZbigniew Bodek ena_free_counters((counter_u64_t *)&adapter->dev_stats, 38209b8d05b8SZbigniew Bodek sizeof(struct ena_stats_dev)); 38219b8d05b8SZbigniew Bodek 38229b8d05b8SZbigniew Bodek rc = ena_free_rx_dma_tag(adapter); 38233f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 38243fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 38259b8d05b8SZbigniew Bodek "Unmapped RX DMA tag associations\n"); 38269b8d05b8SZbigniew Bodek 38279b8d05b8SZbigniew Bodek rc = ena_free_tx_dma_tag(adapter); 38283f9ed7abSMarcin Wojtas if (unlikely(rc != 0)) 38293fc5d816SMarcin Wojtas ena_log(adapter->pdev, WARN, 38309b8d05b8SZbigniew Bodek "Unmapped TX DMA tag associations\n"); 38319b8d05b8SZbigniew Bodek 38329b8d05b8SZbigniew Bodek ena_free_irqs(adapter); 38339b8d05b8SZbigniew Bodek 38349b8d05b8SZbigniew Bodek ena_free_pci_resources(adapter); 38359b8d05b8SZbigniew Bodek 38366d1ef2abSArtur Rojek if (adapter->rss_indir != NULL) 38376d1ef2abSArtur Rojek free(adapter->rss_indir, M_DEVBUF); 38386d1ef2abSArtur Rojek 383932f63fa7SMarcin Wojtas if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) 384032f63fa7SMarcin Wojtas ena_com_rss_destroy(ena_dev); 384132f63fa7SMarcin Wojtas 384232f63fa7SMarcin Wojtas ena_com_delete_host_info(ena_dev); 384332f63fa7SMarcin Wojtas 38449151c55dSMarcin Wojtas if_free(adapter->ifp); 38459151c55dSMarcin Wojtas 38469b8d05b8SZbigniew Bodek free(ena_dev->bus, M_DEVBUF); 38479b8d05b8SZbigniew Bodek 38489b8d05b8SZbigniew Bodek free(ena_dev, M_DEVBUF); 38499b8d05b8SZbigniew Bodek 38509b8d05b8SZbigniew Bodek return (bus_generic_detach(pdev)); 38519b8d05b8SZbigniew Bodek } 38529b8d05b8SZbigniew Bodek 38539b8d05b8SZbigniew Bodek /****************************************************************************** 38549b8d05b8SZbigniew Bodek ******************************** AENQ Handlers ******************************* 38559b8d05b8SZbigniew Bodek *****************************************************************************/ 38569b8d05b8SZbigniew Bodek /** 38579b8d05b8SZbigniew Bodek * ena_update_on_link_change: 38589b8d05b8SZbigniew Bodek * Notify the network interface about the change in link status 38599b8d05b8SZbigniew Bodek **/ 38609b8d05b8SZbigniew Bodek static void 38619b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data, 38629b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 38639b8d05b8SZbigniew Bodek { 38649b8d05b8SZbigniew Bodek struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 38659b8d05b8SZbigniew Bodek struct ena_admin_aenq_link_change_desc *aenq_desc; 38669b8d05b8SZbigniew Bodek int status; 38679b8d05b8SZbigniew Bodek if_t ifp; 38689b8d05b8SZbigniew Bodek 38699b8d05b8SZbigniew Bodek aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 38709b8d05b8SZbigniew Bodek ifp = adapter->ifp; 38719b8d05b8SZbigniew Bodek status = aenq_desc->flags & 38729b8d05b8SZbigniew Bodek ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; 38739b8d05b8SZbigniew Bodek 38749b8d05b8SZbigniew Bodek if (status != 0) { 38753fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is UP\n"); 3876fd43fd2aSMarcin Wojtas ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); 387732f63fa7SMarcin Wojtas if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) 387832f63fa7SMarcin Wojtas if_link_state_change(ifp, LINK_STATE_UP); 387932f63fa7SMarcin Wojtas } else { 38803fc5d816SMarcin Wojtas ena_log(adapter->pdev, INFO, "link is DOWN\n"); 38819b8d05b8SZbigniew Bodek if_link_state_change(ifp, LINK_STATE_DOWN); 3882fd43fd2aSMarcin Wojtas ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); 38839b8d05b8SZbigniew Bodek } 38849b8d05b8SZbigniew Bodek } 38859b8d05b8SZbigniew Bodek 388640621d71SMarcin Wojtas static void ena_notification(void *adapter_data, 388740621d71SMarcin Wojtas struct ena_admin_aenq_entry *aenq_e) 388840621d71SMarcin Wojtas { 388940621d71SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 389040621d71SMarcin Wojtas struct ena_admin_ena_hw_hints *hints; 389140621d71SMarcin Wojtas 38923fc5d816SMarcin Wojtas ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, adapter->ena_dev, 389340621d71SMarcin Wojtas "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, 389440621d71SMarcin Wojtas ENA_ADMIN_NOTIFICATION); 389540621d71SMarcin Wojtas 38969eb1615fSMarcin Wojtas switch (aenq_e->aenq_common_desc.syndrome) { 389740621d71SMarcin Wojtas case ENA_ADMIN_UPDATE_HINTS: 389840621d71SMarcin Wojtas hints = 389940621d71SMarcin Wojtas (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); 390040621d71SMarcin Wojtas ena_update_hints(adapter, hints); 390140621d71SMarcin Wojtas break; 390240621d71SMarcin Wojtas default: 39033fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 390440621d71SMarcin Wojtas "Invalid aenq notification link state %d\n", 39059eb1615fSMarcin Wojtas aenq_e->aenq_common_desc.syndrome); 390640621d71SMarcin Wojtas } 390740621d71SMarcin Wojtas } 390840621d71SMarcin Wojtas 390907aff471SArtur Rojek static void 391007aff471SArtur Rojek ena_lock_init(void *arg) 391107aff471SArtur Rojek { 391207aff471SArtur Rojek ENA_LOCK_INIT(); 391307aff471SArtur Rojek } 391407aff471SArtur Rojek SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL); 391507aff471SArtur Rojek 391607aff471SArtur Rojek static void 391707aff471SArtur Rojek ena_lock_uninit(void *arg) 391807aff471SArtur Rojek { 391907aff471SArtur Rojek ENA_LOCK_DESTROY(); 392007aff471SArtur Rojek } 392107aff471SArtur Rojek SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL); 392207aff471SArtur Rojek 39239b8d05b8SZbigniew Bodek /** 39249b8d05b8SZbigniew Bodek * This handler will called for unknown event group or unimplemented handlers 39259b8d05b8SZbigniew Bodek **/ 39269b8d05b8SZbigniew Bodek static void 3927e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data, 39289b8d05b8SZbigniew Bodek struct ena_admin_aenq_entry *aenq_e) 39299b8d05b8SZbigniew Bodek { 3930e6de9a83SMarcin Wojtas struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 3931e6de9a83SMarcin Wojtas 39323fc5d816SMarcin Wojtas ena_log(adapter->pdev, ERR, 3933e6de9a83SMarcin Wojtas "Unknown event was received or event with unimplemented handler\n"); 39349b8d05b8SZbigniew Bodek } 39359b8d05b8SZbigniew Bodek 39369b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = { 39379b8d05b8SZbigniew Bodek .handlers = { 39389b8d05b8SZbigniew Bodek [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 393940621d71SMarcin Wojtas [ENA_ADMIN_NOTIFICATION] = ena_notification, 39409b8d05b8SZbigniew Bodek [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, 39419b8d05b8SZbigniew Bodek }, 39429b8d05b8SZbigniew Bodek .unimplemented_handler = unimplemented_aenq_handler 39439b8d05b8SZbigniew Bodek }; 39449b8d05b8SZbigniew Bodek 39459b8d05b8SZbigniew Bodek /********************************************************************* 39469b8d05b8SZbigniew Bodek * FreeBSD Device Interface Entry Points 39479b8d05b8SZbigniew Bodek *********************************************************************/ 39489b8d05b8SZbigniew Bodek 39499b8d05b8SZbigniew Bodek static device_method_t ena_methods[] = { 39509b8d05b8SZbigniew Bodek /* Device interface */ 39519b8d05b8SZbigniew Bodek DEVMETHOD(device_probe, ena_probe), 39529b8d05b8SZbigniew Bodek DEVMETHOD(device_attach, ena_attach), 39539b8d05b8SZbigniew Bodek DEVMETHOD(device_detach, ena_detach), 39549b8d05b8SZbigniew Bodek DEVMETHOD_END 39559b8d05b8SZbigniew Bodek }; 39569b8d05b8SZbigniew Bodek 39579b8d05b8SZbigniew Bodek static driver_t ena_driver = { 39589b8d05b8SZbigniew Bodek "ena", ena_methods, sizeof(struct ena_adapter), 39599b8d05b8SZbigniew Bodek }; 39609b8d05b8SZbigniew Bodek 39611dc1476cSJohn Baldwin DRIVER_MODULE(ena, pci, ena_driver, 0, 0); 396240abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, 3963329e817fSWarner Losh nitems(ena_vendor_info_array) - 1); 39649b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1); 39659b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1); 3966d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP 3967d17b7d87SMarcin Wojtas MODULE_DEPEND(ena, netmap, 1, 1, 1); 3968d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */ 39699b8d05b8SZbigniew Bodek 39709b8d05b8SZbigniew Bodek /*********************************************************************/ 3971