xref: /freebsd/sys/dev/ena/ena.c (revision 8cd86b51be4ab0fe70bad4830e608d56db5c850f)
19b8d05b8SZbigniew Bodek /*-
20835cc78SMarcin Wojtas  * SPDX-License-Identifier: BSD-2-Clause
39b8d05b8SZbigniew Bodek  *
4246aa273SOsama Abboud  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
59b8d05b8SZbigniew Bodek  * All rights reserved.
69b8d05b8SZbigniew Bodek  *
79b8d05b8SZbigniew Bodek  * Redistribution and use in source and binary forms, with or without
89b8d05b8SZbigniew Bodek  * modification, are permitted provided that the following conditions
99b8d05b8SZbigniew Bodek  * are met:
109b8d05b8SZbigniew Bodek  *
119b8d05b8SZbigniew Bodek  * 1. Redistributions of source code must retain the above copyright
129b8d05b8SZbigniew Bodek  *    notice, this list of conditions and the following disclaimer.
139b8d05b8SZbigniew Bodek  *
149b8d05b8SZbigniew Bodek  * 2. Redistributions in binary form must reproduce the above copyright
159b8d05b8SZbigniew Bodek  *    notice, this list of conditions and the following disclaimer in the
169b8d05b8SZbigniew Bodek  *    documentation and/or other materials provided with the distribution.
179b8d05b8SZbigniew Bodek  *
189b8d05b8SZbigniew Bodek  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
199b8d05b8SZbigniew Bodek  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
209b8d05b8SZbigniew Bodek  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
219b8d05b8SZbigniew Bodek  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
229b8d05b8SZbigniew Bodek  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
239b8d05b8SZbigniew Bodek  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
249b8d05b8SZbigniew Bodek  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
259b8d05b8SZbigniew Bodek  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
269b8d05b8SZbigniew Bodek  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
279b8d05b8SZbigniew Bodek  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
289b8d05b8SZbigniew Bodek  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
299b8d05b8SZbigniew Bodek  */
309b8d05b8SZbigniew Bodek #include <sys/cdefs.h>
31b40dd828SAndriy Gapon #include "opt_rss.h"
32b40dd828SAndriy Gapon 
339b8d05b8SZbigniew Bodek #include <sys/param.h>
349b8d05b8SZbigniew Bodek #include <sys/systm.h>
359b8d05b8SZbigniew Bodek #include <sys/bus.h>
369b8d05b8SZbigniew Bodek #include <sys/endian.h>
3782e558eaSDawid Gorecki #include <sys/eventhandler.h>
389b8d05b8SZbigniew Bodek #include <sys/kernel.h>
399b8d05b8SZbigniew Bodek #include <sys/kthread.h>
409b8d05b8SZbigniew Bodek #include <sys/malloc.h>
419b8d05b8SZbigniew Bodek #include <sys/mbuf.h>
429b8d05b8SZbigniew Bodek #include <sys/module.h>
439b8d05b8SZbigniew Bodek #include <sys/rman.h>
449b8d05b8SZbigniew Bodek #include <sys/smp.h>
459b8d05b8SZbigniew Bodek #include <sys/socket.h>
469b8d05b8SZbigniew Bodek #include <sys/sockio.h>
479b8d05b8SZbigniew Bodek #include <sys/sysctl.h>
489b8d05b8SZbigniew Bodek #include <sys/taskqueue.h>
499b8d05b8SZbigniew Bodek #include <sys/time.h>
5082e558eaSDawid Gorecki 
5182e558eaSDawid Gorecki #include <vm/vm.h>
5282e558eaSDawid Gorecki #include <vm/pmap.h>
539b8d05b8SZbigniew Bodek 
540ac122c3SDawid Gorecki #include <machine/atomic.h>
559b8d05b8SZbigniew Bodek #include <machine/bus.h>
569b8d05b8SZbigniew Bodek #include <machine/in_cksum.h>
5782e558eaSDawid Gorecki #include <machine/resource.h>
5882e558eaSDawid Gorecki 
5982e558eaSDawid Gorecki #include <dev/pci/pcireg.h>
6082e558eaSDawid Gorecki #include <dev/pci/pcivar.h>
619b8d05b8SZbigniew Bodek 
629b8d05b8SZbigniew Bodek #include <net/bpf.h>
639b8d05b8SZbigniew Bodek #include <net/ethernet.h>
649b8d05b8SZbigniew Bodek #include <net/if.h>
659b8d05b8SZbigniew Bodek #include <net/if_arp.h>
669b8d05b8SZbigniew Bodek #include <net/if_dl.h>
679b8d05b8SZbigniew Bodek #include <net/if_media.h>
689b8d05b8SZbigniew Bodek #include <net/if_types.h>
6982e558eaSDawid Gorecki #include <net/if_var.h>
709b8d05b8SZbigniew Bodek #include <net/if_vlan_var.h>
719b8d05b8SZbigniew Bodek #include <netinet/in.h>
7282e558eaSDawid Gorecki #include <netinet/in_systm.h>
739b8d05b8SZbigniew Bodek #include <netinet/if_ether.h>
749b8d05b8SZbigniew Bodek #include <netinet/ip.h>
759b8d05b8SZbigniew Bodek #include <netinet/ip6.h>
769b8d05b8SZbigniew Bodek #include <netinet/tcp.h>
779b8d05b8SZbigniew Bodek #include <netinet/udp.h>
789b8d05b8SZbigniew Bodek 
799b8d05b8SZbigniew Bodek #include "ena.h"
8082e558eaSDawid Gorecki #include "ena_datapath.h"
81986e7b92SArtur Rojek #include "ena_rss.h"
8282e558eaSDawid Gorecki #include "ena_sysctl.h"
839b8d05b8SZbigniew Bodek 
84d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP
85d17b7d87SMarcin Wojtas #include "ena_netmap.h"
86d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */
87d17b7d87SMarcin Wojtas 
889b8d05b8SZbigniew Bodek /*********************************************************
899b8d05b8SZbigniew Bodek  *  Function prototypes
909b8d05b8SZbigniew Bodek  *********************************************************/
919b8d05b8SZbigniew Bodek static int ena_probe(device_t);
929b8d05b8SZbigniew Bodek static void ena_intr_msix_mgmnt(void *);
939b8d05b8SZbigniew Bodek static void ena_free_pci_resources(struct ena_adapter *);
949b8d05b8SZbigniew Bodek static int ena_change_mtu(if_t, int);
959b8d05b8SZbigniew Bodek static inline void ena_alloc_counters(counter_u64_t *, int);
969b8d05b8SZbigniew Bodek static inline void ena_free_counters(counter_u64_t *, int);
979b8d05b8SZbigniew Bodek static inline void ena_reset_counters(counter_u64_t *, int);
9882e558eaSDawid Gorecki static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *,
9982e558eaSDawid Gorecki     uint16_t);
1007d8c4feeSMarcin Wojtas static void ena_init_io_rings_basic(struct ena_adapter *);
1017d8c4feeSMarcin Wojtas static void ena_init_io_rings_advanced(struct ena_adapter *);
102cd5d5804SMarcin Wojtas static void ena_init_io_rings(struct ena_adapter *);
1039b8d05b8SZbigniew Bodek static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
1049b8d05b8SZbigniew Bodek static void ena_free_all_io_rings_resources(struct ena_adapter *);
1059b8d05b8SZbigniew Bodek static int ena_setup_tx_dma_tag(struct ena_adapter *);
1069b8d05b8SZbigniew Bodek static int ena_free_tx_dma_tag(struct ena_adapter *);
1079b8d05b8SZbigniew Bodek static int ena_setup_rx_dma_tag(struct ena_adapter *);
1089b8d05b8SZbigniew Bodek static int ena_free_rx_dma_tag(struct ena_adapter *);
1096f2128c7SMarcin Wojtas static void ena_release_all_tx_dmamap(struct ena_ring *);
1109b8d05b8SZbigniew Bodek static int ena_setup_tx_resources(struct ena_adapter *, int);
1119b8d05b8SZbigniew Bodek static void ena_free_tx_resources(struct ena_adapter *, int);
1129b8d05b8SZbigniew Bodek static int ena_setup_all_tx_resources(struct ena_adapter *);
1139b8d05b8SZbigniew Bodek static void ena_free_all_tx_resources(struct ena_adapter *);
1149b8d05b8SZbigniew Bodek static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
1159b8d05b8SZbigniew Bodek static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
1169b8d05b8SZbigniew Bodek static int ena_setup_all_rx_resources(struct ena_adapter *);
1179b8d05b8SZbigniew Bodek static void ena_free_all_rx_resources(struct ena_adapter *);
1189b8d05b8SZbigniew Bodek static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
1199b8d05b8SZbigniew Bodek     struct ena_rx_buffer *);
1209b8d05b8SZbigniew Bodek static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
1219b8d05b8SZbigniew Bodek     struct ena_rx_buffer *);
1229b8d05b8SZbigniew Bodek static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
1239b8d05b8SZbigniew Bodek static void ena_refill_all_rx_bufs(struct ena_adapter *);
1249b8d05b8SZbigniew Bodek static void ena_free_all_rx_bufs(struct ena_adapter *);
1259b8d05b8SZbigniew Bodek static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
1269b8d05b8SZbigniew Bodek static void ena_free_all_tx_bufs(struct ena_adapter *);
1279b8d05b8SZbigniew Bodek static void ena_destroy_all_tx_queues(struct ena_adapter *);
1289b8d05b8SZbigniew Bodek static void ena_destroy_all_rx_queues(struct ena_adapter *);
1299b8d05b8SZbigniew Bodek static void ena_destroy_all_io_queues(struct ena_adapter *);
1309b8d05b8SZbigniew Bodek static int ena_create_io_queues(struct ena_adapter *);
1315cb9db07SMarcin Wojtas static int ena_handle_msix(void *);
1329b8d05b8SZbigniew Bodek static int ena_enable_msix(struct ena_adapter *);
1339b8d05b8SZbigniew Bodek static void ena_setup_mgmnt_intr(struct ena_adapter *);
13477958fcdSMarcin Wojtas static int ena_setup_io_intr(struct ena_adapter *);
1359b8d05b8SZbigniew Bodek static int ena_request_mgmnt_irq(struct ena_adapter *);
1369b8d05b8SZbigniew Bodek static int ena_request_io_irq(struct ena_adapter *);
1379b8d05b8SZbigniew Bodek static void ena_free_mgmnt_irq(struct ena_adapter *);
1389b8d05b8SZbigniew Bodek static void ena_free_io_irq(struct ena_adapter *);
1399b8d05b8SZbigniew Bodek static void ena_free_irqs(struct ena_adapter *);
1409b8d05b8SZbigniew Bodek static void ena_disable_msix(struct ena_adapter *);
1419b8d05b8SZbigniew Bodek static void ena_unmask_all_io_irqs(struct ena_adapter *);
1429b8d05b8SZbigniew Bodek static int ena_up_complete(struct ena_adapter *);
1439b8d05b8SZbigniew Bodek static uint64_t ena_get_counter(if_t, ift_counter);
1449b8d05b8SZbigniew Bodek static int ena_media_change(if_t);
1459b8d05b8SZbigniew Bodek static void ena_media_status(if_t, struct ifmediareq *);
1469b8d05b8SZbigniew Bodek static void ena_init(void *);
1479b8d05b8SZbigniew Bodek static int ena_ioctl(if_t, u_long, caddr_t);
1489b8d05b8SZbigniew Bodek static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
1499b8d05b8SZbigniew Bodek static void ena_update_host_info(struct ena_admin_host_info *, if_t);
1509b8d05b8SZbigniew Bodek static void ena_update_hwassist(struct ena_adapter *);
151aa386085SZhenlei Huang static void ena_setup_ifnet(device_t, struct ena_adapter *,
1529b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *);
1533fc5d816SMarcin Wojtas static int ena_enable_wc(device_t, struct resource *);
1544fa9e02dSMarcin Wojtas static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
1554fa9e02dSMarcin Wojtas     struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
15690232d18SDawid Gorecki static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
1577d8c4feeSMarcin Wojtas static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
1589b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *);
1597d8c4feeSMarcin Wojtas static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
16046021271SMarcin Wojtas static void ena_config_host_info(struct ena_com_dev *, device_t);
1619b8d05b8SZbigniew Bodek static int ena_attach(device_t);
1629b8d05b8SZbigniew Bodek static int ena_detach(device_t);
1639b8d05b8SZbigniew Bodek static int ena_device_init(struct ena_adapter *, device_t,
1649b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *, int *);
165aa9c3226SMarcin Wojtas static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
1669b8d05b8SZbigniew Bodek static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
16782e558eaSDawid Gorecki static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *);
168f180142cSMarcin Wojtas static int ena_copy_eni_metrics(struct ena_adapter *);
16936d42c86SOsama Abboud static int ena_copy_srd_metrics(struct ena_adapter *);
170f97993adSOsama Abboud static int ena_copy_customer_metrics(struct ena_adapter *);
1719b8d05b8SZbigniew Bodek static void ena_timer_service(void *);
1729b8d05b8SZbigniew Bodek 
1738f15f8a7SDawid Gorecki static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
1748f15f8a7SDawid Gorecki     " v" ENA_DRV_MODULE_VERSION;
1759b8d05b8SZbigniew Bodek 
1769b8d05b8SZbigniew Bodek static ena_vendor_info_t ena_vendor_info_array[] = {
1779b8d05b8SZbigniew Bodek 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 },
1787d2e6f20SMarcin Wojtas 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 },
1799b8d05b8SZbigniew Bodek 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 },
1807d2e6f20SMarcin Wojtas 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 },
1819b8d05b8SZbigniew Bodek 	/* Last entry */
1829b8d05b8SZbigniew Bodek 	{ 0, 0, 0 }
1839b8d05b8SZbigniew Bodek };
1849b8d05b8SZbigniew Bodek 
18507aff471SArtur Rojek struct sx ena_global_lock;
18607aff471SArtur Rojek 
1879b8d05b8SZbigniew Bodek /*
1889b8d05b8SZbigniew Bodek  * Contains pointers to event handlers, e.g. link state chage.
1899b8d05b8SZbigniew Bodek  */
1909b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers;
1919b8d05b8SZbigniew Bodek 
1929b8d05b8SZbigniew Bodek void
1939b8d05b8SZbigniew Bodek ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1949b8d05b8SZbigniew Bodek {
1950bdffe59SMarcin Wojtas 	if (error != 0)
1969b8d05b8SZbigniew Bodek 		return;
1979b8d05b8SZbigniew Bodek 	*(bus_addr_t *)arg = segs[0].ds_addr;
1989b8d05b8SZbigniew Bodek }
1999b8d05b8SZbigniew Bodek 
2009b8d05b8SZbigniew Bodek int
20182e558eaSDawid Gorecki ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
20282e558eaSDawid Gorecki     int mapflags, bus_size_t alignment, int domain)
2039b8d05b8SZbigniew Bodek {
2049b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = device_get_softc(dmadev);
2053fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
2060bdffe59SMarcin Wojtas 	uint32_t maxsize;
2070bdffe59SMarcin Wojtas 	uint64_t dma_space_addr;
2089b8d05b8SZbigniew Bodek 	int error;
2099b8d05b8SZbigniew Bodek 
2100bdffe59SMarcin Wojtas 	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
2110bdffe59SMarcin Wojtas 
2120bdffe59SMarcin Wojtas 	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
2133f9ed7abSMarcin Wojtas 	if (unlikely(dma_space_addr == 0))
2149b8d05b8SZbigniew Bodek 		dma_space_addr = BUS_SPACE_MAXADDR;
2150bdffe59SMarcin Wojtas 
2169b8d05b8SZbigniew Bodek 	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
2174f8f476eSMarcin Wojtas 	    alignment, 0,      /* alignment, bounds 		*/
2188a573700SZbigniew Bodek 	    dma_space_addr,    /* lowaddr of exclusion window	*/
2198a573700SZbigniew Bodek 	    BUS_SPACE_MAXADDR, /* highaddr of exclusion window	*/
2209b8d05b8SZbigniew Bodek 	    NULL, NULL,	       /* filter, filterarg 		*/
2219b8d05b8SZbigniew Bodek 	    maxsize,	       /* maxsize 			*/
2229b8d05b8SZbigniew Bodek 	    1,		       /* nsegments 			*/
2239b8d05b8SZbigniew Bodek 	    maxsize,	       /* maxsegsize 			*/
2249b8d05b8SZbigniew Bodek 	    BUS_DMA_ALLOCNOW,  /* flags 			*/
2259b8d05b8SZbigniew Bodek 	    NULL,	       /* lockfunc 			*/
2269b8d05b8SZbigniew Bodek 	    NULL,	       /* lockarg 			*/
2279b8d05b8SZbigniew Bodek 	    &dma->tag);
2283f9ed7abSMarcin Wojtas 	if (unlikely(error != 0)) {
2293fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
2309b8d05b8SZbigniew Bodek 		goto fail_tag;
2319b8d05b8SZbigniew Bodek 	}
2329b8d05b8SZbigniew Bodek 
233eb4c4f4aSMarcin Wojtas 	error = bus_dma_tag_set_domain(dma->tag, domain);
234eb4c4f4aSMarcin Wojtas 	if (unlikely(error != 0)) {
235eb4c4f4aSMarcin Wojtas 		ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n",
236eb4c4f4aSMarcin Wojtas 		    error);
237eb4c4f4aSMarcin Wojtas 		goto fail_map_create;
238eb4c4f4aSMarcin Wojtas 	}
239eb4c4f4aSMarcin Wojtas 
2409b8d05b8SZbigniew Bodek 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
2419b8d05b8SZbigniew Bodek 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
2423f9ed7abSMarcin Wojtas 	if (unlikely(error != 0)) {
2433fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
2444e8acd84SMarcin Wojtas 		    (uintmax_t)size, error);
2459b8d05b8SZbigniew Bodek 		goto fail_map_create;
2469b8d05b8SZbigniew Bodek 	}
2479b8d05b8SZbigniew Bodek 
2489b8d05b8SZbigniew Bodek 	dma->paddr = 0;
24982e558eaSDawid Gorecki 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
25082e558eaSDawid Gorecki 	    ena_dmamap_callback, &dma->paddr, mapflags);
2513f9ed7abSMarcin Wojtas 	if (unlikely((error != 0) || (dma->paddr == 0))) {
2523fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
2539b8d05b8SZbigniew Bodek 		goto fail_map_load;
2549b8d05b8SZbigniew Bodek 	}
2559b8d05b8SZbigniew Bodek 
256e8073738SMarcin Wojtas 	bus_dmamap_sync(dma->tag, dma->map,
257e8073738SMarcin Wojtas 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
258e8073738SMarcin Wojtas 
2599b8d05b8SZbigniew Bodek 	return (0);
2609b8d05b8SZbigniew Bodek 
2619b8d05b8SZbigniew Bodek fail_map_load:
2629b8d05b8SZbigniew Bodek 	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
2637d2544e6SMarcin Wojtas fail_map_create:
2649b8d05b8SZbigniew Bodek 	bus_dma_tag_destroy(dma->tag);
2659b8d05b8SZbigniew Bodek fail_tag:
2669b8d05b8SZbigniew Bodek 	dma->tag = NULL;
2675b14f92eSMarcin Wojtas 	dma->vaddr = NULL;
2685b14f92eSMarcin Wojtas 	dma->paddr = 0;
2699b8d05b8SZbigniew Bodek 
2709b8d05b8SZbigniew Bodek 	return (error);
2719b8d05b8SZbigniew Bodek }
2729b8d05b8SZbigniew Bodek 
2739b8d05b8SZbigniew Bodek static void
2749b8d05b8SZbigniew Bodek ena_free_pci_resources(struct ena_adapter *adapter)
2759b8d05b8SZbigniew Bodek {
2769b8d05b8SZbigniew Bodek 	device_t pdev = adapter->pdev;
2779b8d05b8SZbigniew Bodek 
2789b8d05b8SZbigniew Bodek 	if (adapter->memory != NULL) {
2799b8d05b8SZbigniew Bodek 		bus_release_resource(pdev, SYS_RES_MEMORY,
2809b8d05b8SZbigniew Bodek 		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
2819b8d05b8SZbigniew Bodek 	}
2829b8d05b8SZbigniew Bodek 
2839b8d05b8SZbigniew Bodek 	if (adapter->registers != NULL) {
2849b8d05b8SZbigniew Bodek 		bus_release_resource(pdev, SYS_RES_MEMORY,
2859b8d05b8SZbigniew Bodek 		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
2869b8d05b8SZbigniew Bodek 	}
2871c808fcdSMichal Krawczyk 
2881c808fcdSMichal Krawczyk 	if (adapter->msix != NULL) {
28982e558eaSDawid Gorecki 		bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
29082e558eaSDawid Gorecki 		    adapter->msix);
2911c808fcdSMichal Krawczyk 	}
2929b8d05b8SZbigniew Bodek }
2939b8d05b8SZbigniew Bodek 
2949b8d05b8SZbigniew Bodek static int
2959b8d05b8SZbigniew Bodek ena_probe(device_t dev)
2969b8d05b8SZbigniew Bodek {
2979b8d05b8SZbigniew Bodek 	ena_vendor_info_t *ent;
2989b8d05b8SZbigniew Bodek 	uint16_t pci_vendor_id = 0;
2999b8d05b8SZbigniew Bodek 	uint16_t pci_device_id = 0;
3009b8d05b8SZbigniew Bodek 
3019b8d05b8SZbigniew Bodek 	pci_vendor_id = pci_get_vendor(dev);
3029b8d05b8SZbigniew Bodek 	pci_device_id = pci_get_device(dev);
3039b8d05b8SZbigniew Bodek 
3049b8d05b8SZbigniew Bodek 	ent = ena_vendor_info_array;
3059b8d05b8SZbigniew Bodek 	while (ent->vendor_id != 0) {
3069b8d05b8SZbigniew Bodek 		if ((pci_vendor_id == ent->vendor_id) &&
3079b8d05b8SZbigniew Bodek 		    (pci_device_id == ent->device_id)) {
30882e558eaSDawid Gorecki 			ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
30982e558eaSDawid Gorecki 			    pci_device_id);
3109b8d05b8SZbigniew Bodek 
3118f15f8a7SDawid Gorecki 			device_set_desc(dev, ENA_DEVICE_DESC);
3129b8d05b8SZbigniew Bodek 			return (BUS_PROBE_DEFAULT);
3139b8d05b8SZbigniew Bodek 		}
3149b8d05b8SZbigniew Bodek 
3159b8d05b8SZbigniew Bodek 		ent++;
3169b8d05b8SZbigniew Bodek 	}
3179b8d05b8SZbigniew Bodek 
3189b8d05b8SZbigniew Bodek 	return (ENXIO);
3199b8d05b8SZbigniew Bodek }
3209b8d05b8SZbigniew Bodek 
3219b8d05b8SZbigniew Bodek static int
3229b8d05b8SZbigniew Bodek ena_change_mtu(if_t ifp, int new_mtu)
3239b8d05b8SZbigniew Bodek {
3249b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = if_getsoftc(ifp);
3253fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
3263cfadb28SMarcin Wojtas 	int rc;
3279b8d05b8SZbigniew Bodek 
3283cfadb28SMarcin Wojtas 	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
32982e558eaSDawid Gorecki 		ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n",
3303cfadb28SMarcin Wojtas 		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
3313cfadb28SMarcin Wojtas 		return (EINVAL);
3329b8d05b8SZbigniew Bodek 	}
3339b8d05b8SZbigniew Bodek 
3349b8d05b8SZbigniew Bodek 	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
3353cfadb28SMarcin Wojtas 	if (likely(rc == 0)) {
3363fc5d816SMarcin Wojtas 		ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
3373cfadb28SMarcin Wojtas 		if_setmtu(ifp, new_mtu);
3383cfadb28SMarcin Wojtas 	} else {
3393fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
3403cfadb28SMarcin Wojtas 	}
3419b8d05b8SZbigniew Bodek 
3423cfadb28SMarcin Wojtas 	return (rc);
3439b8d05b8SZbigniew Bodek }
3449b8d05b8SZbigniew Bodek 
3459b8d05b8SZbigniew Bodek static inline void
3469b8d05b8SZbigniew Bodek ena_alloc_counters(counter_u64_t *begin, int size)
3479b8d05b8SZbigniew Bodek {
3489b8d05b8SZbigniew Bodek 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
3499b8d05b8SZbigniew Bodek 
3509b8d05b8SZbigniew Bodek 	for (; begin < end; ++begin)
3519b8d05b8SZbigniew Bodek 		*begin = counter_u64_alloc(M_WAITOK);
3529b8d05b8SZbigniew Bodek }
3539b8d05b8SZbigniew Bodek 
3549b8d05b8SZbigniew Bodek static inline void
3559b8d05b8SZbigniew Bodek ena_free_counters(counter_u64_t *begin, int size)
3569b8d05b8SZbigniew Bodek {
3579b8d05b8SZbigniew Bodek 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
3589b8d05b8SZbigniew Bodek 
3599b8d05b8SZbigniew Bodek 	for (; begin < end; ++begin)
3609b8d05b8SZbigniew Bodek 		counter_u64_free(*begin);
3619b8d05b8SZbigniew Bodek }
3629b8d05b8SZbigniew Bodek 
3639b8d05b8SZbigniew Bodek static inline void
3649b8d05b8SZbigniew Bodek ena_reset_counters(counter_u64_t *begin, int size)
3659b8d05b8SZbigniew Bodek {
3669b8d05b8SZbigniew Bodek 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
3679b8d05b8SZbigniew Bodek 
3689b8d05b8SZbigniew Bodek 	for (; begin < end; ++begin)
3699b8d05b8SZbigniew Bodek 		counter_u64_zero(*begin);
3709b8d05b8SZbigniew Bodek }
3719b8d05b8SZbigniew Bodek 
3729b8d05b8SZbigniew Bodek static void
3739b8d05b8SZbigniew Bodek ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
3749b8d05b8SZbigniew Bodek     uint16_t qid)
3759b8d05b8SZbigniew Bodek {
3769b8d05b8SZbigniew Bodek 	ring->qid = qid;
3779b8d05b8SZbigniew Bodek 	ring->adapter = adapter;
3789b8d05b8SZbigniew Bodek 	ring->ena_dev = adapter->ena_dev;
379b72f1f45SMark Johnston 	atomic_store_8(&ring->first_interrupt, 0);
380d12f7bfcSMarcin Wojtas 	ring->no_interrupt_event_cnt = 0;
3819b8d05b8SZbigniew Bodek }
3829b8d05b8SZbigniew Bodek 
383cd5d5804SMarcin Wojtas static void
3847d8c4feeSMarcin Wojtas ena_init_io_rings_basic(struct ena_adapter *adapter)
3859b8d05b8SZbigniew Bodek {
3869b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev;
3879b8d05b8SZbigniew Bodek 	struct ena_ring *txr, *rxr;
3889b8d05b8SZbigniew Bodek 	struct ena_que *que;
3899b8d05b8SZbigniew Bodek 	int i;
3909b8d05b8SZbigniew Bodek 
3919b8d05b8SZbigniew Bodek 	ena_dev = adapter->ena_dev;
3929b8d05b8SZbigniew Bodek 
3937d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
3949b8d05b8SZbigniew Bodek 		txr = &adapter->tx_ring[i];
3959b8d05b8SZbigniew Bodek 		rxr = &adapter->rx_ring[i];
3969b8d05b8SZbigniew Bodek 
3979b8d05b8SZbigniew Bodek 		/* TX/RX common ring state */
3989b8d05b8SZbigniew Bodek 		ena_init_io_rings_common(adapter, txr, i);
3999b8d05b8SZbigniew Bodek 		ena_init_io_rings_common(adapter, rxr, i);
4009b8d05b8SZbigniew Bodek 
4019b8d05b8SZbigniew Bodek 		/* TX specific ring state */
4029b8d05b8SZbigniew Bodek 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
4039b8d05b8SZbigniew Bodek 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
4049b8d05b8SZbigniew Bodek 
4059b8d05b8SZbigniew Bodek 		que = &adapter->que[i];
4069b8d05b8SZbigniew Bodek 		que->adapter = adapter;
4079b8d05b8SZbigniew Bodek 		que->id = i;
4089b8d05b8SZbigniew Bodek 		que->tx_ring = txr;
4099b8d05b8SZbigniew Bodek 		que->rx_ring = rxr;
4109b8d05b8SZbigniew Bodek 
4119b8d05b8SZbigniew Bodek 		txr->que = que;
4129b8d05b8SZbigniew Bodek 		rxr->que = que;
413efe6ab18SMarcin Wojtas 
414efe6ab18SMarcin Wojtas 		rxr->empty_rx_queue = 0;
4157d8c4feeSMarcin Wojtas 		rxr->rx_mbuf_sz = ena_mbuf_sz;
4169b8d05b8SZbigniew Bodek 	}
4179b8d05b8SZbigniew Bodek }
4189b8d05b8SZbigniew Bodek 
4199b8d05b8SZbigniew Bodek static void
4207d8c4feeSMarcin Wojtas ena_init_io_rings_advanced(struct ena_adapter *adapter)
4217d8c4feeSMarcin Wojtas {
4227d8c4feeSMarcin Wojtas 	struct ena_ring *txr, *rxr;
4237d8c4feeSMarcin Wojtas 	int i;
4247d8c4feeSMarcin Wojtas 
4257d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
4267d8c4feeSMarcin Wojtas 		txr = &adapter->tx_ring[i];
4277d8c4feeSMarcin Wojtas 		rxr = &adapter->rx_ring[i];
4287d8c4feeSMarcin Wojtas 
4297d8c4feeSMarcin Wojtas 		/* Allocate a buf ring */
4307d8c4feeSMarcin Wojtas 		txr->buf_ring_size = adapter->buf_ring_size;
43182e558eaSDawid Gorecki 		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
43282e558eaSDawid Gorecki 		    &txr->ring_mtx);
4337d8c4feeSMarcin Wojtas 
4347d8c4feeSMarcin Wojtas 		/* Allocate Tx statistics. */
4357d8c4feeSMarcin Wojtas 		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
4367d8c4feeSMarcin Wojtas 		    sizeof(txr->tx_stats));
437d8aba82bSDawid Gorecki 		txr->tx_last_cleanup_ticks = ticks;
4387d8c4feeSMarcin Wojtas 
4397d8c4feeSMarcin Wojtas 		/* Allocate Rx statistics. */
4407d8c4feeSMarcin Wojtas 		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
4417d8c4feeSMarcin Wojtas 		    sizeof(rxr->rx_stats));
4427d8c4feeSMarcin Wojtas 
4437d8c4feeSMarcin Wojtas 		/* Initialize locks */
4447d8c4feeSMarcin Wojtas 		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
4457d8c4feeSMarcin Wojtas 		    device_get_nameunit(adapter->pdev), i);
4467d8c4feeSMarcin Wojtas 		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
4477d8c4feeSMarcin Wojtas 		    device_get_nameunit(adapter->pdev), i);
4487d8c4feeSMarcin Wojtas 
4497d8c4feeSMarcin Wojtas 		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
4507d8c4feeSMarcin Wojtas 	}
4517d8c4feeSMarcin Wojtas }
4527d8c4feeSMarcin Wojtas 
4537d8c4feeSMarcin Wojtas static void
4547d8c4feeSMarcin Wojtas ena_init_io_rings(struct ena_adapter *adapter)
4557d8c4feeSMarcin Wojtas {
4567d8c4feeSMarcin Wojtas 	/*
4577d8c4feeSMarcin Wojtas 	 * IO rings initialization can be divided into the 2 steps:
4587d8c4feeSMarcin Wojtas 	 *   1. Initialize variables and fields with initial values and copy
4597d8c4feeSMarcin Wojtas 	 *      them from adapter/ena_dev (basic)
4607d8c4feeSMarcin Wojtas 	 *   2. Allocate mutex, counters and buf_ring (advanced)
4617d8c4feeSMarcin Wojtas 	 */
4627d8c4feeSMarcin Wojtas 	ena_init_io_rings_basic(adapter);
4637d8c4feeSMarcin Wojtas 	ena_init_io_rings_advanced(adapter);
4647d8c4feeSMarcin Wojtas }
4657d8c4feeSMarcin Wojtas 
4667d8c4feeSMarcin Wojtas static void
4679b8d05b8SZbigniew Bodek ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
4689b8d05b8SZbigniew Bodek {
4699b8d05b8SZbigniew Bodek 	struct ena_ring *txr = &adapter->tx_ring[qid];
4709b8d05b8SZbigniew Bodek 	struct ena_ring *rxr = &adapter->rx_ring[qid];
4719b8d05b8SZbigniew Bodek 
4729b8d05b8SZbigniew Bodek 	ena_free_counters((counter_u64_t *)&txr->tx_stats,
4739b8d05b8SZbigniew Bodek 	    sizeof(txr->tx_stats));
4749b8d05b8SZbigniew Bodek 	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
4759b8d05b8SZbigniew Bodek 	    sizeof(rxr->rx_stats));
4769b8d05b8SZbigniew Bodek 
4777d2544e6SMarcin Wojtas 	ENA_RING_MTX_LOCK(txr);
4787d2544e6SMarcin Wojtas 	drbr_free(txr->br, M_DEVBUF);
4797d2544e6SMarcin Wojtas 	ENA_RING_MTX_UNLOCK(txr);
4807d2544e6SMarcin Wojtas 
4819b8d05b8SZbigniew Bodek 	mtx_destroy(&txr->ring_mtx);
4829b8d05b8SZbigniew Bodek }
4839b8d05b8SZbigniew Bodek 
4849b8d05b8SZbigniew Bodek static void
4859b8d05b8SZbigniew Bodek ena_free_all_io_rings_resources(struct ena_adapter *adapter)
4869b8d05b8SZbigniew Bodek {
4879b8d05b8SZbigniew Bodek 	int i;
4889b8d05b8SZbigniew Bodek 
4897d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++)
4909b8d05b8SZbigniew Bodek 		ena_free_io_ring_resources(adapter, i);
4919b8d05b8SZbigniew Bodek }
4929b8d05b8SZbigniew Bodek 
4939b8d05b8SZbigniew Bodek static int
4949b8d05b8SZbigniew Bodek ena_setup_tx_dma_tag(struct ena_adapter *adapter)
4959b8d05b8SZbigniew Bodek {
4969b8d05b8SZbigniew Bodek 	int ret;
4979b8d05b8SZbigniew Bodek 
4989b8d05b8SZbigniew Bodek 	/* Create DMA tag for Tx buffers */
4999b8d05b8SZbigniew Bodek 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
5009b8d05b8SZbigniew Bodek 	    1, 0,				  /* alignment, bounds 	     */
5018a573700SZbigniew Bodek 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
5028a573700SZbigniew Bodek 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
5039b8d05b8SZbigniew Bodek 	    NULL, NULL,				  /* filter, filterarg 	     */
5049b8d05b8SZbigniew Bodek 	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
5058a573700SZbigniew Bodek 	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
5069b8d05b8SZbigniew Bodek 	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
5079b8d05b8SZbigniew Bodek 	    0,					  /* flags 		     */
5089b8d05b8SZbigniew Bodek 	    NULL,				  /* lockfunc 		     */
5099b8d05b8SZbigniew Bodek 	    NULL,				  /* lockfuncarg 	     */
5109b8d05b8SZbigniew Bodek 	    &adapter->tx_buf_tag);
5119b8d05b8SZbigniew Bodek 
5129b8d05b8SZbigniew Bodek 	return (ret);
5139b8d05b8SZbigniew Bodek }
5149b8d05b8SZbigniew Bodek 
5159b8d05b8SZbigniew Bodek static int
5169b8d05b8SZbigniew Bodek ena_free_tx_dma_tag(struct ena_adapter *adapter)
5179b8d05b8SZbigniew Bodek {
5189b8d05b8SZbigniew Bodek 	int ret;
5199b8d05b8SZbigniew Bodek 
5209b8d05b8SZbigniew Bodek 	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
5219b8d05b8SZbigniew Bodek 
5223f9ed7abSMarcin Wojtas 	if (likely(ret == 0))
5239b8d05b8SZbigniew Bodek 		adapter->tx_buf_tag = NULL;
5249b8d05b8SZbigniew Bodek 
5259b8d05b8SZbigniew Bodek 	return (ret);
5269b8d05b8SZbigniew Bodek }
5279b8d05b8SZbigniew Bodek 
5289b8d05b8SZbigniew Bodek static int
5299b8d05b8SZbigniew Bodek ena_setup_rx_dma_tag(struct ena_adapter *adapter)
5309b8d05b8SZbigniew Bodek {
5319b8d05b8SZbigniew Bodek 	int ret;
5329b8d05b8SZbigniew Bodek 
5339b8d05b8SZbigniew Bodek 	/* Create DMA tag for Rx buffers*/
5349b8d05b8SZbigniew Bodek 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
5359b8d05b8SZbigniew Bodek 	    1, 0,				  /* alignment, bounds 	     */
5368a573700SZbigniew Bodek 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
5378a573700SZbigniew Bodek 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
5389b8d05b8SZbigniew Bodek 	    NULL, NULL,				  /* filter, filterarg 	     */
53904cf2b88SMarcin Wojtas 	    ena_mbuf_sz,			  /* maxsize 		     */
5404727bda6SMarcin Wojtas 	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
54104cf2b88SMarcin Wojtas 	    ena_mbuf_sz,			  /* maxsegsize 	     */
5429b8d05b8SZbigniew Bodek 	    0,					  /* flags 		     */
5439b8d05b8SZbigniew Bodek 	    NULL,				  /* lockfunc 		     */
5449b8d05b8SZbigniew Bodek 	    NULL,				  /* lockarg 		     */
5459b8d05b8SZbigniew Bodek 	    &adapter->rx_buf_tag);
5469b8d05b8SZbigniew Bodek 
5479b8d05b8SZbigniew Bodek 	return (ret);
5489b8d05b8SZbigniew Bodek }
5499b8d05b8SZbigniew Bodek 
5509b8d05b8SZbigniew Bodek static int
5519b8d05b8SZbigniew Bodek ena_free_rx_dma_tag(struct ena_adapter *adapter)
5529b8d05b8SZbigniew Bodek {
5539b8d05b8SZbigniew Bodek 	int ret;
5549b8d05b8SZbigniew Bodek 
5559b8d05b8SZbigniew Bodek 	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
5569b8d05b8SZbigniew Bodek 
5573f9ed7abSMarcin Wojtas 	if (likely(ret == 0))
5589b8d05b8SZbigniew Bodek 		adapter->rx_buf_tag = NULL;
5599b8d05b8SZbigniew Bodek 
5609b8d05b8SZbigniew Bodek 	return (ret);
5619b8d05b8SZbigniew Bodek }
5629b8d05b8SZbigniew Bodek 
5636f2128c7SMarcin Wojtas static void
5646f2128c7SMarcin Wojtas ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
5656f2128c7SMarcin Wojtas {
5666f2128c7SMarcin Wojtas 	struct ena_adapter *adapter = tx_ring->adapter;
5676f2128c7SMarcin Wojtas 	struct ena_tx_buffer *tx_info;
56882e558eaSDawid Gorecki 	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
5696f2128c7SMarcin Wojtas 	int i;
5706f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
5716f2128c7SMarcin Wojtas 	struct ena_netmap_tx_info *nm_info;
5726f2128c7SMarcin Wojtas 	int j;
5736f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
5746f2128c7SMarcin Wojtas 
5756f2128c7SMarcin Wojtas 	for (i = 0; i < tx_ring->ring_size; ++i) {
5766f2128c7SMarcin Wojtas 		tx_info = &tx_ring->tx_buffer_info[i];
5776f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
5787583c633SJustin Hibbits 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
5796f2128c7SMarcin Wojtas 			nm_info = &tx_info->nm_info;
5806f2128c7SMarcin Wojtas 			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
5816f2128c7SMarcin Wojtas 				if (nm_info->map_seg[j] != NULL) {
5826f2128c7SMarcin Wojtas 					bus_dmamap_destroy(tx_tag,
5836f2128c7SMarcin Wojtas 					    nm_info->map_seg[j]);
5846f2128c7SMarcin Wojtas 					nm_info->map_seg[j] = NULL;
5856f2128c7SMarcin Wojtas 				}
5866f2128c7SMarcin Wojtas 			}
5876f2128c7SMarcin Wojtas 		}
5886f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
589888810f0SMarcin Wojtas 		if (tx_info->dmamap != NULL) {
590888810f0SMarcin Wojtas 			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
591888810f0SMarcin Wojtas 			tx_info->dmamap = NULL;
5926f2128c7SMarcin Wojtas 		}
5936f2128c7SMarcin Wojtas 	}
5946f2128c7SMarcin Wojtas }
5956f2128c7SMarcin Wojtas 
5969b8d05b8SZbigniew Bodek /**
5979b8d05b8SZbigniew Bodek  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
5989b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
5999b8d05b8SZbigniew Bodek  * @qid: queue index
6009b8d05b8SZbigniew Bodek  *
6019b8d05b8SZbigniew Bodek  * Returns 0 on success, otherwise on failure.
6029b8d05b8SZbigniew Bodek  **/
6039b8d05b8SZbigniew Bodek static int
6049b8d05b8SZbigniew Bodek ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
6059b8d05b8SZbigniew Bodek {
6063fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
6076d1ef2abSArtur Rojek 	char thread_name[MAXCOMLEN + 1];
6089b8d05b8SZbigniew Bodek 	struct ena_que *que = &adapter->que[qid];
6099b8d05b8SZbigniew Bodek 	struct ena_ring *tx_ring = que->tx_ring;
6106d1ef2abSArtur Rojek 	cpuset_t *cpu_mask = NULL;
6119b8d05b8SZbigniew Bodek 	int size, i, err;
6126f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
6136f2128c7SMarcin Wojtas 	bus_dmamap_t *map;
6146f2128c7SMarcin Wojtas 	int j;
6156f2128c7SMarcin Wojtas 
6166f2128c7SMarcin Wojtas 	ena_netmap_reset_tx_ring(adapter, qid);
6176f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
6189b8d05b8SZbigniew Bodek 
6199b8d05b8SZbigniew Bodek 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
6209b8d05b8SZbigniew Bodek 
6219b8d05b8SZbigniew Bodek 	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6223f9ed7abSMarcin Wojtas 	if (unlikely(tx_ring->tx_buffer_info == NULL))
6237d2544e6SMarcin Wojtas 		return (ENOMEM);
6249b8d05b8SZbigniew Bodek 
6259b8d05b8SZbigniew Bodek 	size = sizeof(uint16_t) * tx_ring->ring_size;
6269b8d05b8SZbigniew Bodek 	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6273f9ed7abSMarcin Wojtas 	if (unlikely(tx_ring->free_tx_ids == NULL))
6287d2544e6SMarcin Wojtas 		goto err_buf_info_free;
6299b8d05b8SZbigniew Bodek 
6304fa9e02dSMarcin Wojtas 	size = tx_ring->tx_max_header_size;
6314fa9e02dSMarcin Wojtas 	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
6324fa9e02dSMarcin Wojtas 	    M_NOWAIT | M_ZERO);
6334fa9e02dSMarcin Wojtas 	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
6344fa9e02dSMarcin Wojtas 		goto err_tx_ids_free;
6354fa9e02dSMarcin Wojtas 
6369b8d05b8SZbigniew Bodek 	/* Req id stack for TX OOO completions */
6379b8d05b8SZbigniew Bodek 	for (i = 0; i < tx_ring->ring_size; i++)
6389b8d05b8SZbigniew Bodek 		tx_ring->free_tx_ids[i] = i;
6399b8d05b8SZbigniew Bodek 
6409b8d05b8SZbigniew Bodek 	/* Reset TX statistics. */
6419b8d05b8SZbigniew Bodek 	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
6429b8d05b8SZbigniew Bodek 	    sizeof(tx_ring->tx_stats));
6439b8d05b8SZbigniew Bodek 
6449b8d05b8SZbigniew Bodek 	tx_ring->next_to_use = 0;
6459b8d05b8SZbigniew Bodek 	tx_ring->next_to_clean = 0;
646af66d7d0SMarcin Wojtas 	tx_ring->acum_pkts = 0;
6479b8d05b8SZbigniew Bodek 
6489b8d05b8SZbigniew Bodek 	/* Make sure that drbr is empty */
649b38cf613SZbigniew Bodek 	ENA_RING_MTX_LOCK(tx_ring);
6509b8d05b8SZbigniew Bodek 	drbr_flush(adapter->ifp, tx_ring->br);
651b38cf613SZbigniew Bodek 	ENA_RING_MTX_UNLOCK(tx_ring);
6529b8d05b8SZbigniew Bodek 
6539b8d05b8SZbigniew Bodek 	/* ... and create the buffer DMA maps */
6549b8d05b8SZbigniew Bodek 	for (i = 0; i < tx_ring->ring_size; i++) {
6559b8d05b8SZbigniew Bodek 		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
656888810f0SMarcin Wojtas 		    &tx_ring->tx_buffer_info[i].dmamap);
6573f9ed7abSMarcin Wojtas 		if (unlikely(err != 0)) {
6583fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
65982e558eaSDawid Gorecki 			    "Unable to create Tx DMA map for buffer %d\n", i);
6606f2128c7SMarcin Wojtas 			goto err_map_release;
6619b8d05b8SZbigniew Bodek 		}
6626f2128c7SMarcin Wojtas 
6636f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
6647583c633SJustin Hibbits 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
6656f2128c7SMarcin Wojtas 			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
6666f2128c7SMarcin Wojtas 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
6676f2128c7SMarcin Wojtas 				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
6686f2128c7SMarcin Wojtas 				    &map[j]);
6696f2128c7SMarcin Wojtas 				if (unlikely(err != 0)) {
6703fc5d816SMarcin Wojtas 					ena_log(pdev, ERR,
67182e558eaSDawid Gorecki 					    "Unable to create Tx DMA for buffer %d %d\n",
67282e558eaSDawid Gorecki 					    i, j);
6736f2128c7SMarcin Wojtas 					goto err_map_release;
6746f2128c7SMarcin Wojtas 				}
6756f2128c7SMarcin Wojtas 			}
6766f2128c7SMarcin Wojtas 		}
6776f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
6789b8d05b8SZbigniew Bodek 	}
6799b8d05b8SZbigniew Bodek 
6809b8d05b8SZbigniew Bodek 	/* Allocate taskqueues */
6819b8d05b8SZbigniew Bodek 	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
6829b8d05b8SZbigniew Bodek 	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
6839b8d05b8SZbigniew Bodek 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
6843f9ed7abSMarcin Wojtas 	if (unlikely(tx_ring->enqueue_tq == NULL)) {
6853fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
6869b8d05b8SZbigniew Bodek 		    "Unable to create taskqueue for enqueue task\n");
6879b8d05b8SZbigniew Bodek 		i = tx_ring->ring_size;
6886f2128c7SMarcin Wojtas 		goto err_map_release;
6899b8d05b8SZbigniew Bodek 	}
6909b8d05b8SZbigniew Bodek 
6915cb9db07SMarcin Wojtas 	tx_ring->running = true;
6925cb9db07SMarcin Wojtas 
6936d1ef2abSArtur Rojek #ifdef RSS
6946d1ef2abSArtur Rojek 	cpu_mask = &que->cpu_mask;
6956d1ef2abSArtur Rojek 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
6966d1ef2abSArtur Rojek 	    device_get_nameunit(adapter->pdev), que->cpu);
6976d1ef2abSArtur Rojek #else
6986d1ef2abSArtur Rojek 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
6996d1ef2abSArtur Rojek 	    device_get_nameunit(adapter->pdev), que->id);
7006d1ef2abSArtur Rojek #endif
7016d1ef2abSArtur Rojek 	taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
7026d1ef2abSArtur Rojek 	    cpu_mask, "%s", thread_name);
7039b8d05b8SZbigniew Bodek 
7049b8d05b8SZbigniew Bodek 	return (0);
7059b8d05b8SZbigniew Bodek 
7066f2128c7SMarcin Wojtas err_map_release:
7076f2128c7SMarcin Wojtas 	ena_release_all_tx_dmamap(tx_ring);
7084fa9e02dSMarcin Wojtas err_tx_ids_free:
709cd5d5804SMarcin Wojtas 	free(tx_ring->free_tx_ids, M_DEVBUF);
7107d2544e6SMarcin Wojtas 	tx_ring->free_tx_ids = NULL;
7117d2544e6SMarcin Wojtas err_buf_info_free:
712cd5d5804SMarcin Wojtas 	free(tx_ring->tx_buffer_info, M_DEVBUF);
7137d2544e6SMarcin Wojtas 	tx_ring->tx_buffer_info = NULL;
7147d2544e6SMarcin Wojtas 
7159b8d05b8SZbigniew Bodek 	return (ENOMEM);
7169b8d05b8SZbigniew Bodek }
7179b8d05b8SZbigniew Bodek 
7189b8d05b8SZbigniew Bodek /**
7199b8d05b8SZbigniew Bodek  * ena_free_tx_resources - Free Tx Resources per Queue
7209b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
7219b8d05b8SZbigniew Bodek  * @qid: queue index
7229b8d05b8SZbigniew Bodek  *
7239b8d05b8SZbigniew Bodek  * Free all transmit software resources
7249b8d05b8SZbigniew Bodek  **/
7259b8d05b8SZbigniew Bodek static void
7269b8d05b8SZbigniew Bodek ena_free_tx_resources(struct ena_adapter *adapter, int qid)
7279b8d05b8SZbigniew Bodek {
7289b8d05b8SZbigniew Bodek 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
7296f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
7306f2128c7SMarcin Wojtas 	struct ena_netmap_tx_info *nm_info;
7316f2128c7SMarcin Wojtas 	int j;
7326f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
7339b8d05b8SZbigniew Bodek 
73482e558eaSDawid Gorecki 	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
7359b8d05b8SZbigniew Bodek 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
7369b8d05b8SZbigniew Bodek 
7379b8d05b8SZbigniew Bodek 	taskqueue_free(tx_ring->enqueue_tq);
7389b8d05b8SZbigniew Bodek 
739b38cf613SZbigniew Bodek 	ENA_RING_MTX_LOCK(tx_ring);
7409b8d05b8SZbigniew Bodek 	/* Flush buffer ring, */
7419b8d05b8SZbigniew Bodek 	drbr_flush(adapter->ifp, tx_ring->br);
7429b8d05b8SZbigniew Bodek 
7439b8d05b8SZbigniew Bodek 	/* Free buffer DMA maps, */
7449b8d05b8SZbigniew Bodek 	for (int i = 0; i < tx_ring->ring_size; i++) {
745e8073738SMarcin Wojtas 		bus_dmamap_sync(adapter->tx_buf_tag,
746888810f0SMarcin Wojtas 		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
7479b8d05b8SZbigniew Bodek 		bus_dmamap_unload(adapter->tx_buf_tag,
748888810f0SMarcin Wojtas 		    tx_ring->tx_buffer_info[i].dmamap);
7499b8d05b8SZbigniew Bodek 		bus_dmamap_destroy(adapter->tx_buf_tag,
750888810f0SMarcin Wojtas 		    tx_ring->tx_buffer_info[i].dmamap);
7514fa9e02dSMarcin Wojtas 
7526f2128c7SMarcin Wojtas #ifdef DEV_NETMAP
7537583c633SJustin Hibbits 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
7546f2128c7SMarcin Wojtas 			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
7556f2128c7SMarcin Wojtas 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
7566f2128c7SMarcin Wojtas 				if (nm_info->socket_buf_idx[j] != 0) {
7576f2128c7SMarcin Wojtas 					bus_dmamap_sync(adapter->tx_buf_tag,
7586f2128c7SMarcin Wojtas 					    nm_info->map_seg[j],
7596f2128c7SMarcin Wojtas 					    BUS_DMASYNC_POSTWRITE);
7606f2128c7SMarcin Wojtas 					ena_netmap_unload(adapter,
7616f2128c7SMarcin Wojtas 					    nm_info->map_seg[j]);
7626f2128c7SMarcin Wojtas 				}
7636f2128c7SMarcin Wojtas 				bus_dmamap_destroy(adapter->tx_buf_tag,
7646f2128c7SMarcin Wojtas 				    nm_info->map_seg[j]);
7656f2128c7SMarcin Wojtas 				nm_info->socket_buf_idx[j] = 0;
7666f2128c7SMarcin Wojtas 			}
7676f2128c7SMarcin Wojtas 		}
7686f2128c7SMarcin Wojtas #endif /* DEV_NETMAP */
7696f2128c7SMarcin Wojtas 
770e8073738SMarcin Wojtas 		m_freem(tx_ring->tx_buffer_info[i].mbuf);
771e8073738SMarcin Wojtas 		tx_ring->tx_buffer_info[i].mbuf = NULL;
7729b8d05b8SZbigniew Bodek 	}
773416e8864SZbigniew Bodek 	ENA_RING_MTX_UNLOCK(tx_ring);
7749b8d05b8SZbigniew Bodek 
7759b8d05b8SZbigniew Bodek 	/* And free allocated memory. */
776cd5d5804SMarcin Wojtas 	free(tx_ring->tx_buffer_info, M_DEVBUF);
7779b8d05b8SZbigniew Bodek 	tx_ring->tx_buffer_info = NULL;
7789b8d05b8SZbigniew Bodek 
779cd5d5804SMarcin Wojtas 	free(tx_ring->free_tx_ids, M_DEVBUF);
7809b8d05b8SZbigniew Bodek 	tx_ring->free_tx_ids = NULL;
7814fa9e02dSMarcin Wojtas 
7828483b844SMarcin Wojtas 	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
7834fa9e02dSMarcin Wojtas 	tx_ring->push_buf_intermediate_buf = NULL;
7849b8d05b8SZbigniew Bodek }
7859b8d05b8SZbigniew Bodek 
7869b8d05b8SZbigniew Bodek /**
7879b8d05b8SZbigniew Bodek  * ena_setup_all_tx_resources - allocate all queues Tx resources
7889b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
7899b8d05b8SZbigniew Bodek  *
7909b8d05b8SZbigniew Bodek  * Returns 0 on success, otherwise on failure.
7919b8d05b8SZbigniew Bodek  **/
7929b8d05b8SZbigniew Bodek static int
7939b8d05b8SZbigniew Bodek ena_setup_all_tx_resources(struct ena_adapter *adapter)
7949b8d05b8SZbigniew Bodek {
7959b8d05b8SZbigniew Bodek 	int i, rc;
7969b8d05b8SZbigniew Bodek 
7977d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
7989b8d05b8SZbigniew Bodek 		rc = ena_setup_tx_resources(adapter, i);
7990bdffe59SMarcin Wojtas 		if (rc != 0) {
8003fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
8019b8d05b8SZbigniew Bodek 			    "Allocation for Tx Queue %u failed\n", i);
8029b8d05b8SZbigniew Bodek 			goto err_setup_tx;
8039b8d05b8SZbigniew Bodek 		}
8047d2544e6SMarcin Wojtas 	}
8059b8d05b8SZbigniew Bodek 
8069b8d05b8SZbigniew Bodek 	return (0);
8079b8d05b8SZbigniew Bodek 
8089b8d05b8SZbigniew Bodek err_setup_tx:
8099b8d05b8SZbigniew Bodek 	/* Rewind the index freeing the rings as we go */
8109b8d05b8SZbigniew Bodek 	while (i--)
8119b8d05b8SZbigniew Bodek 		ena_free_tx_resources(adapter, i);
8129b8d05b8SZbigniew Bodek 	return (rc);
8139b8d05b8SZbigniew Bodek }
8149b8d05b8SZbigniew Bodek 
8159b8d05b8SZbigniew Bodek /**
8169b8d05b8SZbigniew Bodek  * ena_free_all_tx_resources - Free Tx Resources for All Queues
8179b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
8189b8d05b8SZbigniew Bodek  *
8199b8d05b8SZbigniew Bodek  * Free all transmit software resources
8209b8d05b8SZbigniew Bodek  **/
8219b8d05b8SZbigniew Bodek static void
8229b8d05b8SZbigniew Bodek ena_free_all_tx_resources(struct ena_adapter *adapter)
8239b8d05b8SZbigniew Bodek {
8249b8d05b8SZbigniew Bodek 	int i;
8259b8d05b8SZbigniew Bodek 
8267d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++)
8279b8d05b8SZbigniew Bodek 		ena_free_tx_resources(adapter, i);
8289b8d05b8SZbigniew Bodek }
8299b8d05b8SZbigniew Bodek 
8309b8d05b8SZbigniew Bodek /**
8319b8d05b8SZbigniew Bodek  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
8329b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
8339b8d05b8SZbigniew Bodek  * @qid: queue index
8349b8d05b8SZbigniew Bodek  *
8359b8d05b8SZbigniew Bodek  * Returns 0 on success, otherwise on failure.
8369b8d05b8SZbigniew Bodek  **/
8379b8d05b8SZbigniew Bodek static int
8389b8d05b8SZbigniew Bodek ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
8399b8d05b8SZbigniew Bodek {
8403fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
8419b8d05b8SZbigniew Bodek 	struct ena_que *que = &adapter->que[qid];
8429b8d05b8SZbigniew Bodek 	struct ena_ring *rx_ring = que->rx_ring;
8439b8d05b8SZbigniew Bodek 	int size, err, i;
8449b8d05b8SZbigniew Bodek 
8459b8d05b8SZbigniew Bodek 	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
8469b8d05b8SZbigniew Bodek 
8479a0f2079SMarcin Wojtas #ifdef DEV_NETMAP
8489a0f2079SMarcin Wojtas 	ena_netmap_reset_rx_ring(adapter, qid);
8499a0f2079SMarcin Wojtas 	rx_ring->initialized = false;
8509a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */
8519a0f2079SMarcin Wojtas 
8529b8d05b8SZbigniew Bodek 	/*
8539b8d05b8SZbigniew Bodek 	 * Alloc extra element so in rx path
8549b8d05b8SZbigniew Bodek 	 * we can always prefetch rx_info + 1
8559b8d05b8SZbigniew Bodek 	 */
8569b8d05b8SZbigniew Bodek 	size += sizeof(struct ena_rx_buffer);
8579b8d05b8SZbigniew Bodek 
858cd5d5804SMarcin Wojtas 	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
8599b8d05b8SZbigniew Bodek 
86043fefd16SMarcin Wojtas 	size = sizeof(uint16_t) * rx_ring->ring_size;
86143fefd16SMarcin Wojtas 	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
86243fefd16SMarcin Wojtas 
86343fefd16SMarcin Wojtas 	for (i = 0; i < rx_ring->ring_size; i++)
86443fefd16SMarcin Wojtas 		rx_ring->free_rx_ids[i] = i;
86543fefd16SMarcin Wojtas 
8669b8d05b8SZbigniew Bodek 	/* Reset RX statistics. */
8679b8d05b8SZbigniew Bodek 	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
8689b8d05b8SZbigniew Bodek 	    sizeof(rx_ring->rx_stats));
8699b8d05b8SZbigniew Bodek 
8709b8d05b8SZbigniew Bodek 	rx_ring->next_to_clean = 0;
8719b8d05b8SZbigniew Bodek 	rx_ring->next_to_use = 0;
8729b8d05b8SZbigniew Bodek 
8739b8d05b8SZbigniew Bodek 	/* ... and create the buffer DMA maps */
8749b8d05b8SZbigniew Bodek 	for (i = 0; i < rx_ring->ring_size; i++) {
8759b8d05b8SZbigniew Bodek 		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
8769b8d05b8SZbigniew Bodek 		    &(rx_ring->rx_buffer_info[i].map));
8779b8d05b8SZbigniew Bodek 		if (err != 0) {
8783fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
8799b8d05b8SZbigniew Bodek 			    "Unable to create Rx DMA map for buffer %d\n", i);
8807d2544e6SMarcin Wojtas 			goto err_buf_info_unmap;
8819b8d05b8SZbigniew Bodek 		}
8829b8d05b8SZbigniew Bodek 	}
8839b8d05b8SZbigniew Bodek 
8849b8d05b8SZbigniew Bodek 	/* Create LRO for the ring */
8857583c633SJustin Hibbits 	if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
8869b8d05b8SZbigniew Bodek 		int err = tcp_lro_init(&rx_ring->lro);
8870bdffe59SMarcin Wojtas 		if (err != 0) {
8883fc5d816SMarcin Wojtas 			ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
8893fc5d816SMarcin Wojtas 			    qid);
8909b8d05b8SZbigniew Bodek 		} else {
8913fc5d816SMarcin Wojtas 			ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
8923fc5d816SMarcin Wojtas 			    qid);
8939b8d05b8SZbigniew Bodek 			rx_ring->lro.ifp = adapter->ifp;
8949b8d05b8SZbigniew Bodek 		}
8959b8d05b8SZbigniew Bodek 	}
8969b8d05b8SZbigniew Bodek 
8979b8d05b8SZbigniew Bodek 	return (0);
8989b8d05b8SZbigniew Bodek 
8997d2544e6SMarcin Wojtas err_buf_info_unmap:
9009b8d05b8SZbigniew Bodek 	while (i--) {
9019b8d05b8SZbigniew Bodek 		bus_dmamap_destroy(adapter->rx_buf_tag,
9029b8d05b8SZbigniew Bodek 		    rx_ring->rx_buffer_info[i].map);
9039b8d05b8SZbigniew Bodek 	}
9049b8d05b8SZbigniew Bodek 
90543fefd16SMarcin Wojtas 	free(rx_ring->free_rx_ids, M_DEVBUF);
90643fefd16SMarcin Wojtas 	rx_ring->free_rx_ids = NULL;
907cd5d5804SMarcin Wojtas 	free(rx_ring->rx_buffer_info, M_DEVBUF);
9089b8d05b8SZbigniew Bodek 	rx_ring->rx_buffer_info = NULL;
9099b8d05b8SZbigniew Bodek 	return (ENOMEM);
9109b8d05b8SZbigniew Bodek }
9119b8d05b8SZbigniew Bodek 
9129b8d05b8SZbigniew Bodek /**
9139b8d05b8SZbigniew Bodek  * ena_free_rx_resources - Free Rx Resources
9149b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
9159b8d05b8SZbigniew Bodek  * @qid: queue index
9169b8d05b8SZbigniew Bodek  *
9179b8d05b8SZbigniew Bodek  * Free all receive software resources
9189b8d05b8SZbigniew Bodek  **/
9199b8d05b8SZbigniew Bodek static void
9209b8d05b8SZbigniew Bodek ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
9219b8d05b8SZbigniew Bodek {
9229b8d05b8SZbigniew Bodek 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
9239b8d05b8SZbigniew Bodek 
9249b8d05b8SZbigniew Bodek 	/* Free buffer DMA maps, */
9259b8d05b8SZbigniew Bodek 	for (int i = 0; i < rx_ring->ring_size; i++) {
926e8073738SMarcin Wojtas 		bus_dmamap_sync(adapter->rx_buf_tag,
927e8073738SMarcin Wojtas 		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
9289b8d05b8SZbigniew Bodek 		m_freem(rx_ring->rx_buffer_info[i].mbuf);
9299b8d05b8SZbigniew Bodek 		rx_ring->rx_buffer_info[i].mbuf = NULL;
9309b8d05b8SZbigniew Bodek 		bus_dmamap_unload(adapter->rx_buf_tag,
9319b8d05b8SZbigniew Bodek 		    rx_ring->rx_buffer_info[i].map);
9329b8d05b8SZbigniew Bodek 		bus_dmamap_destroy(adapter->rx_buf_tag,
9339b8d05b8SZbigniew Bodek 		    rx_ring->rx_buffer_info[i].map);
9349b8d05b8SZbigniew Bodek 	}
9359b8d05b8SZbigniew Bodek 
9369b8d05b8SZbigniew Bodek 	/* free LRO resources, */
9379b8d05b8SZbigniew Bodek 	tcp_lro_free(&rx_ring->lro);
9389b8d05b8SZbigniew Bodek 
9399b8d05b8SZbigniew Bodek 	/* free allocated memory */
940cd5d5804SMarcin Wojtas 	free(rx_ring->rx_buffer_info, M_DEVBUF);
9419b8d05b8SZbigniew Bodek 	rx_ring->rx_buffer_info = NULL;
9429b8d05b8SZbigniew Bodek 
94343fefd16SMarcin Wojtas 	free(rx_ring->free_rx_ids, M_DEVBUF);
94443fefd16SMarcin Wojtas 	rx_ring->free_rx_ids = NULL;
9459b8d05b8SZbigniew Bodek }
9469b8d05b8SZbigniew Bodek 
9479b8d05b8SZbigniew Bodek /**
9489b8d05b8SZbigniew Bodek  * ena_setup_all_rx_resources - allocate all queues Rx resources
9499b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
9509b8d05b8SZbigniew Bodek  *
9519b8d05b8SZbigniew Bodek  * Returns 0 on success, otherwise on failure.
9529b8d05b8SZbigniew Bodek  **/
9539b8d05b8SZbigniew Bodek static int
9549b8d05b8SZbigniew Bodek ena_setup_all_rx_resources(struct ena_adapter *adapter)
9559b8d05b8SZbigniew Bodek {
9569b8d05b8SZbigniew Bodek 	int i, rc = 0;
9579b8d05b8SZbigniew Bodek 
9587d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
9599b8d05b8SZbigniew Bodek 		rc = ena_setup_rx_resources(adapter, i);
9600bdffe59SMarcin Wojtas 		if (rc != 0) {
9613fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
9629b8d05b8SZbigniew Bodek 			    "Allocation for Rx Queue %u failed\n", i);
9639b8d05b8SZbigniew Bodek 			goto err_setup_rx;
9649b8d05b8SZbigniew Bodek 		}
9657d2544e6SMarcin Wojtas 	}
9669b8d05b8SZbigniew Bodek 	return (0);
9679b8d05b8SZbigniew Bodek 
9689b8d05b8SZbigniew Bodek err_setup_rx:
9699b8d05b8SZbigniew Bodek 	/* rewind the index freeing the rings as we go */
9709b8d05b8SZbigniew Bodek 	while (i--)
9719b8d05b8SZbigniew Bodek 		ena_free_rx_resources(adapter, i);
9729b8d05b8SZbigniew Bodek 	return (rc);
9739b8d05b8SZbigniew Bodek }
9749b8d05b8SZbigniew Bodek 
9759b8d05b8SZbigniew Bodek /**
9769b8d05b8SZbigniew Bodek  * ena_free_all_rx_resources - Free Rx resources for all queues
9779b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
9789b8d05b8SZbigniew Bodek  *
9799b8d05b8SZbigniew Bodek  * Free all receive software resources
9809b8d05b8SZbigniew Bodek  **/
9819b8d05b8SZbigniew Bodek static void
9829b8d05b8SZbigniew Bodek ena_free_all_rx_resources(struct ena_adapter *adapter)
9839b8d05b8SZbigniew Bodek {
9849b8d05b8SZbigniew Bodek 	int i;
9859b8d05b8SZbigniew Bodek 
9867d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++)
9879b8d05b8SZbigniew Bodek 		ena_free_rx_resources(adapter, i);
9889b8d05b8SZbigniew Bodek }
9899b8d05b8SZbigniew Bodek 
9909b8d05b8SZbigniew Bodek static inline int
99182e558eaSDawid Gorecki ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
99282e558eaSDawid Gorecki     struct ena_rx_buffer *rx_info)
9939b8d05b8SZbigniew Bodek {
9943fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
9959b8d05b8SZbigniew Bodek 	struct ena_com_buf *ena_buf;
9969b8d05b8SZbigniew Bodek 	bus_dma_segment_t segs[1];
9979b8d05b8SZbigniew Bodek 	int nsegs, error;
9984727bda6SMarcin Wojtas 	int mlen;
9999b8d05b8SZbigniew Bodek 
10009b8d05b8SZbigniew Bodek 	/* if previous allocated frag is not used */
10013f9ed7abSMarcin Wojtas 	if (unlikely(rx_info->mbuf != NULL))
10029b8d05b8SZbigniew Bodek 		return (0);
10039b8d05b8SZbigniew Bodek 
10049b8d05b8SZbigniew Bodek 	/* Get mbuf using UMA allocator */
100504cf2b88SMarcin Wojtas 	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
100604cf2b88SMarcin Wojtas 	    rx_ring->rx_mbuf_sz);
10079b8d05b8SZbigniew Bodek 
10083f9ed7abSMarcin Wojtas 	if (unlikely(rx_info->mbuf == NULL)) {
10094727bda6SMarcin Wojtas 		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
10104727bda6SMarcin Wojtas 		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
10114727bda6SMarcin Wojtas 		if (unlikely(rx_info->mbuf == NULL)) {
10129b8d05b8SZbigniew Bodek 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
10139b8d05b8SZbigniew Bodek 			return (ENOMEM);
10149b8d05b8SZbigniew Bodek 		}
10154727bda6SMarcin Wojtas 		mlen = MCLBYTES;
10164727bda6SMarcin Wojtas 	} else {
101704cf2b88SMarcin Wojtas 		mlen = rx_ring->rx_mbuf_sz;
10184727bda6SMarcin Wojtas 	}
10199b8d05b8SZbigniew Bodek 	/* Set mbuf length*/
10204727bda6SMarcin Wojtas 	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
10219b8d05b8SZbigniew Bodek 
10229b8d05b8SZbigniew Bodek 	/* Map packets for DMA */
102382e558eaSDawid Gorecki 	ena_log(pdev, DBG,
102482e558eaSDawid Gorecki 	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
10259b8d05b8SZbigniew Bodek 	    adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
10269b8d05b8SZbigniew Bodek 	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
10279b8d05b8SZbigniew Bodek 	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
10283f9ed7abSMarcin Wojtas 	if (unlikely((error != 0) || (nsegs != 1))) {
10293fc5d816SMarcin Wojtas 		ena_log(pdev, WARN,
10303fc5d816SMarcin Wojtas 		    "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
10319b8d05b8SZbigniew Bodek 		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
10329b8d05b8SZbigniew Bodek 		goto exit;
10339b8d05b8SZbigniew Bodek 	}
10349b8d05b8SZbigniew Bodek 
10359b8d05b8SZbigniew Bodek 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
10369b8d05b8SZbigniew Bodek 
10379b8d05b8SZbigniew Bodek 	ena_buf = &rx_info->ena_buf;
10389b8d05b8SZbigniew Bodek 	ena_buf->paddr = segs[0].ds_addr;
10394727bda6SMarcin Wojtas 	ena_buf->len = mlen;
10409b8d05b8SZbigniew Bodek 
104182e558eaSDawid Gorecki 	ena_log(pdev, DBG,
104282e558eaSDawid Gorecki 	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
10439b8d05b8SZbigniew Bodek 	    rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
10449b8d05b8SZbigniew Bodek 
10459b8d05b8SZbigniew Bodek 	return (0);
10469b8d05b8SZbigniew Bodek 
10479b8d05b8SZbigniew Bodek exit:
10489b8d05b8SZbigniew Bodek 	m_freem(rx_info->mbuf);
10499b8d05b8SZbigniew Bodek 	rx_info->mbuf = NULL;
10509b8d05b8SZbigniew Bodek 	return (EFAULT);
10519b8d05b8SZbigniew Bodek }
10529b8d05b8SZbigniew Bodek 
10539b8d05b8SZbigniew Bodek static void
10549b8d05b8SZbigniew Bodek ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
10559b8d05b8SZbigniew Bodek     struct ena_rx_buffer *rx_info)
10569b8d05b8SZbigniew Bodek {
10574e8acd84SMarcin Wojtas 	if (rx_info->mbuf == NULL) {
10583fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, WARN,
10593fc5d816SMarcin Wojtas 		    "Trying to free unallocated buffer\n");
10609b8d05b8SZbigniew Bodek 		return;
10614e8acd84SMarcin Wojtas 	}
10629b8d05b8SZbigniew Bodek 
1063e8073738SMarcin Wojtas 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1064e8073738SMarcin Wojtas 	    BUS_DMASYNC_POSTREAD);
10659b8d05b8SZbigniew Bodek 	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
10669b8d05b8SZbigniew Bodek 	m_freem(rx_info->mbuf);
10679b8d05b8SZbigniew Bodek 	rx_info->mbuf = NULL;
10689b8d05b8SZbigniew Bodek }
10699b8d05b8SZbigniew Bodek 
10709b8d05b8SZbigniew Bodek /**
10719b8d05b8SZbigniew Bodek  * ena_refill_rx_bufs - Refills ring with descriptors
10729b8d05b8SZbigniew Bodek  * @rx_ring: the ring which we want to feed with free descriptors
10739b8d05b8SZbigniew Bodek  * @num: number of descriptors to refill
10749b8d05b8SZbigniew Bodek  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
10759b8d05b8SZbigniew Bodek  **/
107638c7b965SMarcin Wojtas int
10779b8d05b8SZbigniew Bodek ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
10789b8d05b8SZbigniew Bodek {
10799b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = rx_ring->adapter;
10803fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
108143fefd16SMarcin Wojtas 	uint16_t next_to_use, req_id;
10829b8d05b8SZbigniew Bodek 	uint32_t i;
10839b8d05b8SZbigniew Bodek 	int rc;
10849b8d05b8SZbigniew Bodek 
10853fc5d816SMarcin Wojtas 	ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
10869b8d05b8SZbigniew Bodek 
10879b8d05b8SZbigniew Bodek 	next_to_use = rx_ring->next_to_use;
10889b8d05b8SZbigniew Bodek 
10899b8d05b8SZbigniew Bodek 	for (i = 0; i < num; i++) {
109043fefd16SMarcin Wojtas 		struct ena_rx_buffer *rx_info;
109143fefd16SMarcin Wojtas 
10923fc5d816SMarcin Wojtas 		ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
10933fc5d816SMarcin Wojtas 		    next_to_use);
10949b8d05b8SZbigniew Bodek 
109543fefd16SMarcin Wojtas 		req_id = rx_ring->free_rx_ids[next_to_use];
109643fefd16SMarcin Wojtas 		rx_info = &rx_ring->rx_buffer_info[req_id];
10979a0f2079SMarcin Wojtas #ifdef DEV_NETMAP
1098358bcc4cSMarcin Wojtas 		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
109982e558eaSDawid Gorecki 			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
110082e558eaSDawid Gorecki 			    rx_info);
11019a0f2079SMarcin Wojtas 		else
11029a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */
11039b8d05b8SZbigniew Bodek 			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
11043f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0)) {
11053fc5d816SMarcin Wojtas 			ena_log_io(pdev, WARN,
11064e8acd84SMarcin Wojtas 			    "failed to alloc buffer for rx queue %d\n",
11074e8acd84SMarcin Wojtas 			    rx_ring->qid);
11089b8d05b8SZbigniew Bodek 			break;
11099b8d05b8SZbigniew Bodek 		}
11109b8d05b8SZbigniew Bodek 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
111143fefd16SMarcin Wojtas 		    &rx_info->ena_buf, req_id);
11120bdffe59SMarcin Wojtas 		if (unlikely(rc != 0)) {
11133fc5d816SMarcin Wojtas 			ena_log_io(pdev, WARN,
11149b8d05b8SZbigniew Bodek 			    "failed to add buffer for rx queue %d\n",
11159b8d05b8SZbigniew Bodek 			    rx_ring->qid);
11169b8d05b8SZbigniew Bodek 			break;
11179b8d05b8SZbigniew Bodek 		}
11189b8d05b8SZbigniew Bodek 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
11199b8d05b8SZbigniew Bodek 		    rx_ring->ring_size);
11209b8d05b8SZbigniew Bodek 	}
11219b8d05b8SZbigniew Bodek 
11223f9ed7abSMarcin Wojtas 	if (unlikely(i < num)) {
11239b8d05b8SZbigniew Bodek 		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
11243fc5d816SMarcin Wojtas 		ena_log_io(pdev, WARN,
11254e8acd84SMarcin Wojtas 		    "refilled rx qid %d with only %d mbufs (from %d)\n",
11264e8acd84SMarcin Wojtas 		    rx_ring->qid, i, num);
11279b8d05b8SZbigniew Bodek 	}
11289b8d05b8SZbigniew Bodek 
11298483b844SMarcin Wojtas 	if (likely(i != 0))
11309b8d05b8SZbigniew Bodek 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
11318483b844SMarcin Wojtas 
11329b8d05b8SZbigniew Bodek 	rx_ring->next_to_use = next_to_use;
11339b8d05b8SZbigniew Bodek 	return (i);
11349b8d05b8SZbigniew Bodek }
11359b8d05b8SZbigniew Bodek 
11367d8c4feeSMarcin Wojtas int
113721823546SMarcin Wojtas ena_update_buf_ring_size(struct ena_adapter *adapter,
113821823546SMarcin Wojtas     uint32_t new_buf_ring_size)
113921823546SMarcin Wojtas {
114021823546SMarcin Wojtas 	uint32_t old_buf_ring_size;
114121823546SMarcin Wojtas 	int rc = 0;
114221823546SMarcin Wojtas 	bool dev_was_up;
114321823546SMarcin Wojtas 
114421823546SMarcin Wojtas 	old_buf_ring_size = adapter->buf_ring_size;
114521823546SMarcin Wojtas 	adapter->buf_ring_size = new_buf_ring_size;
114621823546SMarcin Wojtas 
114721823546SMarcin Wojtas 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
114821823546SMarcin Wojtas 	ena_down(adapter);
114921823546SMarcin Wojtas 
115021823546SMarcin Wojtas 	/* Reconfigure buf ring for all Tx rings. */
115121823546SMarcin Wojtas 	ena_free_all_io_rings_resources(adapter);
115221823546SMarcin Wojtas 	ena_init_io_rings_advanced(adapter);
115321823546SMarcin Wojtas 	if (dev_was_up) {
115421823546SMarcin Wojtas 		/*
115521823546SMarcin Wojtas 		 * If ena_up() fails, it's not because of recent buf_ring size
115621823546SMarcin Wojtas 		 * changes. Because of that, we just want to revert old drbr
115721823546SMarcin Wojtas 		 * value and trigger the reset because something else had to
115821823546SMarcin Wojtas 		 * go wrong.
115921823546SMarcin Wojtas 		 */
116021823546SMarcin Wojtas 		rc = ena_up(adapter);
116121823546SMarcin Wojtas 		if (unlikely(rc != 0)) {
11623fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
116321823546SMarcin Wojtas 			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
116421823546SMarcin Wojtas 			    new_buf_ring_size, old_buf_ring_size);
116521823546SMarcin Wojtas 
116621823546SMarcin Wojtas 			/* Revert old size and trigger the reset */
116721823546SMarcin Wojtas 			adapter->buf_ring_size = old_buf_ring_size;
116821823546SMarcin Wojtas 			ena_free_all_io_rings_resources(adapter);
116921823546SMarcin Wojtas 			ena_init_io_rings_advanced(adapter);
117021823546SMarcin Wojtas 
117121823546SMarcin Wojtas 			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
117221823546SMarcin Wojtas 			    adapter);
117321823546SMarcin Wojtas 			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
117421823546SMarcin Wojtas 		}
117521823546SMarcin Wojtas 	}
117621823546SMarcin Wojtas 
117721823546SMarcin Wojtas 	return (rc);
117821823546SMarcin Wojtas }
117921823546SMarcin Wojtas 
118021823546SMarcin Wojtas int
11817d8c4feeSMarcin Wojtas ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
11827d8c4feeSMarcin Wojtas     uint32_t new_rx_size)
11837d8c4feeSMarcin Wojtas {
11847d8c4feeSMarcin Wojtas 	uint32_t old_tx_size, old_rx_size;
11857d8c4feeSMarcin Wojtas 	int rc = 0;
11867d8c4feeSMarcin Wojtas 	bool dev_was_up;
11877d8c4feeSMarcin Wojtas 
11889762a033SMarcin Wojtas 	old_tx_size = adapter->requested_tx_ring_size;
11899762a033SMarcin Wojtas 	old_rx_size = adapter->requested_rx_ring_size;
11909762a033SMarcin Wojtas 	adapter->requested_tx_ring_size = new_tx_size;
11919762a033SMarcin Wojtas 	adapter->requested_rx_ring_size = new_rx_size;
11927d8c4feeSMarcin Wojtas 
11937d8c4feeSMarcin Wojtas 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
11947d8c4feeSMarcin Wojtas 	ena_down(adapter);
11957d8c4feeSMarcin Wojtas 
11967d8c4feeSMarcin Wojtas 	/* Configure queues with new size. */
11977d8c4feeSMarcin Wojtas 	ena_init_io_rings_basic(adapter);
11987d8c4feeSMarcin Wojtas 	if (dev_was_up) {
11997d8c4feeSMarcin Wojtas 		rc = ena_up(adapter);
12007d8c4feeSMarcin Wojtas 		if (unlikely(rc != 0)) {
12013fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
12027d8c4feeSMarcin Wojtas 			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
12037d8c4feeSMarcin Wojtas 			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
12047d8c4feeSMarcin Wojtas 
12057d8c4feeSMarcin Wojtas 			/* Revert old size. */
12069762a033SMarcin Wojtas 			adapter->requested_tx_ring_size = old_tx_size;
12079762a033SMarcin Wojtas 			adapter->requested_rx_ring_size = old_rx_size;
12087d8c4feeSMarcin Wojtas 			ena_init_io_rings_basic(adapter);
12097d8c4feeSMarcin Wojtas 
12107d8c4feeSMarcin Wojtas 			/* And try again. */
12117d8c4feeSMarcin Wojtas 			rc = ena_up(adapter);
12127d8c4feeSMarcin Wojtas 			if (unlikely(rc != 0)) {
12133fc5d816SMarcin Wojtas 				ena_log(adapter->pdev, ERR,
12147d8c4feeSMarcin Wojtas 				    "Failed to revert old queue sizes. Triggering device reset.\n");
12157d8c4feeSMarcin Wojtas 				/*
12167d8c4feeSMarcin Wojtas 				 * If we've failed again, something had to go
12177d8c4feeSMarcin Wojtas 				 * wrong. After reset, the device should try to
12187d8c4feeSMarcin Wojtas 				 * go up
12197d8c4feeSMarcin Wojtas 				 */
12207d8c4feeSMarcin Wojtas 				ENA_FLAG_SET_ATOMIC(
12217d8c4feeSMarcin Wojtas 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
12227d8c4feeSMarcin Wojtas 				ena_trigger_reset(adapter,
12237d8c4feeSMarcin Wojtas 				    ENA_REGS_RESET_OS_TRIGGER);
12247d8c4feeSMarcin Wojtas 			}
12257d8c4feeSMarcin Wojtas 		}
12267d8c4feeSMarcin Wojtas 	}
12277d8c4feeSMarcin Wojtas 
12287d8c4feeSMarcin Wojtas 	return (rc);
12297d8c4feeSMarcin Wojtas }
12307d8c4feeSMarcin Wojtas 
12319b8d05b8SZbigniew Bodek static void
123256d41ad5SMarcin Wojtas ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
123356d41ad5SMarcin Wojtas {
123456d41ad5SMarcin Wojtas 	ena_free_all_io_rings_resources(adapter);
123556d41ad5SMarcin Wojtas 	/* Force indirection table to be reinitialized */
123656d41ad5SMarcin Wojtas 	ena_com_rss_destroy(adapter->ena_dev);
123756d41ad5SMarcin Wojtas 
123856d41ad5SMarcin Wojtas 	adapter->num_io_queues = num;
123956d41ad5SMarcin Wojtas 	ena_init_io_rings(adapter);
124056d41ad5SMarcin Wojtas }
124156d41ad5SMarcin Wojtas 
1242f9e1d947SOsama Abboud int
1243f9e1d947SOsama Abboud ena_update_base_cpu(struct ena_adapter *adapter, int new_num)
1244f9e1d947SOsama Abboud {
1245f9e1d947SOsama Abboud 	int old_num;
1246f9e1d947SOsama Abboud 	int rc = 0;
1247f9e1d947SOsama Abboud 	bool dev_was_up;
1248f9e1d947SOsama Abboud 
1249f9e1d947SOsama Abboud 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1250f9e1d947SOsama Abboud 	old_num = adapter->irq_cpu_base;
1251f9e1d947SOsama Abboud 
1252f9e1d947SOsama Abboud 	ena_down(adapter);
1253f9e1d947SOsama Abboud 
1254f9e1d947SOsama Abboud 	adapter->irq_cpu_base = new_num;
1255f9e1d947SOsama Abboud 
1256f9e1d947SOsama Abboud 	if (dev_was_up) {
1257f9e1d947SOsama Abboud 		rc = ena_up(adapter);
1258f9e1d947SOsama Abboud 		if (unlikely(rc != 0)) {
1259f9e1d947SOsama Abboud 			ena_log(adapter->pdev, ERR,
1260f9e1d947SOsama Abboud 			    "Failed to configure device %d IRQ base CPU. "
1261f9e1d947SOsama Abboud 			    "Reverting to previous value: %d\n",
1262f9e1d947SOsama Abboud 			    new_num, old_num);
1263f9e1d947SOsama Abboud 
1264f9e1d947SOsama Abboud 			adapter->irq_cpu_base = old_num;
1265f9e1d947SOsama Abboud 
1266f9e1d947SOsama Abboud 			rc = ena_up(adapter);
1267f9e1d947SOsama Abboud 			if (unlikely(rc != 0)) {
1268f9e1d947SOsama Abboud 				ena_log(adapter->pdev, ERR,
1269f9e1d947SOsama Abboud 				    "Failed to revert to previous setup."
1270f9e1d947SOsama Abboud 				    "Triggering device reset.\n");
1271f9e1d947SOsama Abboud 				ENA_FLAG_SET_ATOMIC(
1272f9e1d947SOsama Abboud 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1273f9e1d947SOsama Abboud 				ena_trigger_reset(adapter,
1274f9e1d947SOsama Abboud 				    ENA_REGS_RESET_OS_TRIGGER);
1275f9e1d947SOsama Abboud 			}
1276f9e1d947SOsama Abboud 		}
1277f9e1d947SOsama Abboud 	}
1278f9e1d947SOsama Abboud 	return (rc);
1279f9e1d947SOsama Abboud }
1280f9e1d947SOsama Abboud 
1281f9e1d947SOsama Abboud int
1282f9e1d947SOsama Abboud ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num)
1283f9e1d947SOsama Abboud {
1284f9e1d947SOsama Abboud 	uint32_t old_num;
1285f9e1d947SOsama Abboud 	int rc = 0;
1286f9e1d947SOsama Abboud 	bool dev_was_up;
1287f9e1d947SOsama Abboud 
1288f9e1d947SOsama Abboud 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1289f9e1d947SOsama Abboud 	old_num = adapter->irq_cpu_stride;
1290f9e1d947SOsama Abboud 
1291f9e1d947SOsama Abboud 	ena_down(adapter);
1292f9e1d947SOsama Abboud 
1293f9e1d947SOsama Abboud 	adapter->irq_cpu_stride = new_num;
1294f9e1d947SOsama Abboud 
1295f9e1d947SOsama Abboud 	if (dev_was_up) {
1296f9e1d947SOsama Abboud 		rc = ena_up(adapter);
1297f9e1d947SOsama Abboud 		if (unlikely(rc != 0)) {
1298f9e1d947SOsama Abboud 			ena_log(adapter->pdev, ERR,
1299f9e1d947SOsama Abboud 			    "Failed to configure device %d IRQ CPU stride. "
1300f9e1d947SOsama Abboud 			    "Reverting to previous value: %d\n",
1301f9e1d947SOsama Abboud 			    new_num, old_num);
1302f9e1d947SOsama Abboud 
1303f9e1d947SOsama Abboud 			adapter->irq_cpu_stride = old_num;
1304f9e1d947SOsama Abboud 
1305f9e1d947SOsama Abboud 			rc = ena_up(adapter);
1306f9e1d947SOsama Abboud 			if (unlikely(rc != 0)) {
1307f9e1d947SOsama Abboud 				ena_log(adapter->pdev, ERR,
1308f9e1d947SOsama Abboud 				    "Failed to revert to previous setup."
1309f9e1d947SOsama Abboud 				    "Triggering device reset.\n");
1310f9e1d947SOsama Abboud 				ENA_FLAG_SET_ATOMIC(
1311f9e1d947SOsama Abboud 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1312f9e1d947SOsama Abboud 				ena_trigger_reset(adapter,
1313f9e1d947SOsama Abboud 				    ENA_REGS_RESET_OS_TRIGGER);
1314f9e1d947SOsama Abboud 			}
1315f9e1d947SOsama Abboud 		}
1316f9e1d947SOsama Abboud 	}
1317f9e1d947SOsama Abboud 	return (rc);
1318f9e1d947SOsama Abboud }
1319f9e1d947SOsama Abboud 
132056d41ad5SMarcin Wojtas /* Caller should sanitize new_num */
132156d41ad5SMarcin Wojtas int
132256d41ad5SMarcin Wojtas ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
132356d41ad5SMarcin Wojtas {
132456d41ad5SMarcin Wojtas 	uint32_t old_num;
132556d41ad5SMarcin Wojtas 	int rc = 0;
132656d41ad5SMarcin Wojtas 	bool dev_was_up;
132756d41ad5SMarcin Wojtas 
132856d41ad5SMarcin Wojtas 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
132956d41ad5SMarcin Wojtas 	old_num = adapter->num_io_queues;
133056d41ad5SMarcin Wojtas 	ena_down(adapter);
133156d41ad5SMarcin Wojtas 
133256d41ad5SMarcin Wojtas 	ena_update_io_rings(adapter, new_num);
133356d41ad5SMarcin Wojtas 
133456d41ad5SMarcin Wojtas 	if (dev_was_up) {
133556d41ad5SMarcin Wojtas 		rc = ena_up(adapter);
133656d41ad5SMarcin Wojtas 		if (unlikely(rc != 0)) {
13373fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
133856d41ad5SMarcin Wojtas 			    "Failed to configure device with %u IO queues. "
133956d41ad5SMarcin Wojtas 			    "Reverting to previous value: %u\n",
134056d41ad5SMarcin Wojtas 			    new_num, old_num);
134156d41ad5SMarcin Wojtas 
134256d41ad5SMarcin Wojtas 			ena_update_io_rings(adapter, old_num);
134356d41ad5SMarcin Wojtas 
134456d41ad5SMarcin Wojtas 			rc = ena_up(adapter);
134556d41ad5SMarcin Wojtas 			if (unlikely(rc != 0)) {
13463fc5d816SMarcin Wojtas 				ena_log(adapter->pdev, ERR,
134756d41ad5SMarcin Wojtas 				    "Failed to revert to previous setup IO "
134856d41ad5SMarcin Wojtas 				    "queues. Triggering device reset.\n");
134956d41ad5SMarcin Wojtas 				ENA_FLAG_SET_ATOMIC(
135056d41ad5SMarcin Wojtas 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
135156d41ad5SMarcin Wojtas 				ena_trigger_reset(adapter,
135256d41ad5SMarcin Wojtas 				    ENA_REGS_RESET_OS_TRIGGER);
135356d41ad5SMarcin Wojtas 			}
135456d41ad5SMarcin Wojtas 		}
135556d41ad5SMarcin Wojtas 	}
135656d41ad5SMarcin Wojtas 
135756d41ad5SMarcin Wojtas 	return (rc);
135856d41ad5SMarcin Wojtas }
135956d41ad5SMarcin Wojtas 
136056d41ad5SMarcin Wojtas static void
13619b8d05b8SZbigniew Bodek ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
13629b8d05b8SZbigniew Bodek {
13639b8d05b8SZbigniew Bodek 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
13649b8d05b8SZbigniew Bodek 	unsigned int i;
13659b8d05b8SZbigniew Bodek 
13669b8d05b8SZbigniew Bodek 	for (i = 0; i < rx_ring->ring_size; i++) {
13679b8d05b8SZbigniew Bodek 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
13689b8d05b8SZbigniew Bodek 
13690bdffe59SMarcin Wojtas 		if (rx_info->mbuf != NULL)
13709b8d05b8SZbigniew Bodek 			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
13719a0f2079SMarcin Wojtas #ifdef DEV_NETMAP
13729a0f2079SMarcin Wojtas 		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
13737583c633SJustin Hibbits 		    (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
13749a0f2079SMarcin Wojtas 			if (rx_info->netmap_buf_idx != 0)
13759a0f2079SMarcin Wojtas 				ena_netmap_free_rx_slot(adapter, rx_ring,
13769a0f2079SMarcin Wojtas 				    rx_info);
13779a0f2079SMarcin Wojtas 		}
13789a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */
13799b8d05b8SZbigniew Bodek 	}
13809b8d05b8SZbigniew Bodek }
13819b8d05b8SZbigniew Bodek 
13829b8d05b8SZbigniew Bodek /**
13839b8d05b8SZbigniew Bodek  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
13849b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
13859b8d05b8SZbigniew Bodek  *
13869b8d05b8SZbigniew Bodek  */
13879b8d05b8SZbigniew Bodek static void
13889b8d05b8SZbigniew Bodek ena_refill_all_rx_bufs(struct ena_adapter *adapter)
13899b8d05b8SZbigniew Bodek {
13909b8d05b8SZbigniew Bodek 	struct ena_ring *rx_ring;
13919b8d05b8SZbigniew Bodek 	int i, rc, bufs_num;
13929b8d05b8SZbigniew Bodek 
13937d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
13949b8d05b8SZbigniew Bodek 		rx_ring = &adapter->rx_ring[i];
13959b8d05b8SZbigniew Bodek 		bufs_num = rx_ring->ring_size - 1;
13969b8d05b8SZbigniew Bodek 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
13979b8d05b8SZbigniew Bodek 		if (unlikely(rc != bufs_num))
13983fc5d816SMarcin Wojtas 			ena_log_io(adapter->pdev, WARN,
13993fc5d816SMarcin Wojtas 			    "refilling Queue %d failed. "
140082e558eaSDawid Gorecki 			    "Allocated %d buffers from: %d\n",
140182e558eaSDawid Gorecki 			    i, rc, bufs_num);
14029a0f2079SMarcin Wojtas #ifdef DEV_NETMAP
14039a0f2079SMarcin Wojtas 		rx_ring->initialized = true;
14049a0f2079SMarcin Wojtas #endif /* DEV_NETMAP */
14059b8d05b8SZbigniew Bodek 	}
14069b8d05b8SZbigniew Bodek }
14079b8d05b8SZbigniew Bodek 
14089b8d05b8SZbigniew Bodek static void
14099b8d05b8SZbigniew Bodek ena_free_all_rx_bufs(struct ena_adapter *adapter)
14109b8d05b8SZbigniew Bodek {
14119b8d05b8SZbigniew Bodek 	int i;
14129b8d05b8SZbigniew Bodek 
14137d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++)
14149b8d05b8SZbigniew Bodek 		ena_free_rx_bufs(adapter, i);
14159b8d05b8SZbigniew Bodek }
14169b8d05b8SZbigniew Bodek 
14179b8d05b8SZbigniew Bodek /**
14189b8d05b8SZbigniew Bodek  * ena_free_tx_bufs - Free Tx Buffers per Queue
14199b8d05b8SZbigniew Bodek  * @adapter: network interface device structure
14209b8d05b8SZbigniew Bodek  * @qid: queue index
14219b8d05b8SZbigniew Bodek  **/
14229b8d05b8SZbigniew Bodek static void
14239b8d05b8SZbigniew Bodek ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
14249b8d05b8SZbigniew Bodek {
14254e8acd84SMarcin Wojtas 	bool print_once = true;
14269b8d05b8SZbigniew Bodek 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
14279b8d05b8SZbigniew Bodek 
1428416e8864SZbigniew Bodek 	ENA_RING_MTX_LOCK(tx_ring);
14299b8d05b8SZbigniew Bodek 	for (int i = 0; i < tx_ring->ring_size; i++) {
14309b8d05b8SZbigniew Bodek 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
14319b8d05b8SZbigniew Bodek 
14329b8d05b8SZbigniew Bodek 		if (tx_info->mbuf == NULL)
14339b8d05b8SZbigniew Bodek 			continue;
14349b8d05b8SZbigniew Bodek 
14354e8acd84SMarcin Wojtas 		if (print_once) {
14363fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, WARN,
143782e558eaSDawid Gorecki 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
143882e558eaSDawid Gorecki 			    i);
14394e8acd84SMarcin Wojtas 			print_once = false;
14404e8acd84SMarcin Wojtas 		} else {
14413fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, DBG,
144282e558eaSDawid Gorecki 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
144382e558eaSDawid Gorecki 			    i);
14444e8acd84SMarcin Wojtas 		}
14459b8d05b8SZbigniew Bodek 
1446888810f0SMarcin Wojtas 		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1447e8073738SMarcin Wojtas 		    BUS_DMASYNC_POSTWRITE);
1448888810f0SMarcin Wojtas 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
14494fa9e02dSMarcin Wojtas 
14509b8d05b8SZbigniew Bodek 		m_free(tx_info->mbuf);
14519b8d05b8SZbigniew Bodek 		tx_info->mbuf = NULL;
14529b8d05b8SZbigniew Bodek 	}
1453416e8864SZbigniew Bodek 	ENA_RING_MTX_UNLOCK(tx_ring);
14549b8d05b8SZbigniew Bodek }
14559b8d05b8SZbigniew Bodek 
14569b8d05b8SZbigniew Bodek static void
14579b8d05b8SZbigniew Bodek ena_free_all_tx_bufs(struct ena_adapter *adapter)
14589b8d05b8SZbigniew Bodek {
14597d8c4feeSMarcin Wojtas 	for (int i = 0; i < adapter->num_io_queues; i++)
14609b8d05b8SZbigniew Bodek 		ena_free_tx_bufs(adapter, i);
14619b8d05b8SZbigniew Bodek }
14629b8d05b8SZbigniew Bodek 
14639b8d05b8SZbigniew Bodek static void
14649b8d05b8SZbigniew Bodek ena_destroy_all_tx_queues(struct ena_adapter *adapter)
14659b8d05b8SZbigniew Bodek {
14669b8d05b8SZbigniew Bodek 	uint16_t ena_qid;
14679b8d05b8SZbigniew Bodek 	int i;
14689b8d05b8SZbigniew Bodek 
14697d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
14709b8d05b8SZbigniew Bodek 		ena_qid = ENA_IO_TXQ_IDX(i);
14719b8d05b8SZbigniew Bodek 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
14729b8d05b8SZbigniew Bodek 	}
14739b8d05b8SZbigniew Bodek }
14749b8d05b8SZbigniew Bodek 
14759b8d05b8SZbigniew Bodek static void
14769b8d05b8SZbigniew Bodek ena_destroy_all_rx_queues(struct ena_adapter *adapter)
14779b8d05b8SZbigniew Bodek {
14789b8d05b8SZbigniew Bodek 	uint16_t ena_qid;
14799b8d05b8SZbigniew Bodek 	int i;
14809b8d05b8SZbigniew Bodek 
14817d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
14829b8d05b8SZbigniew Bodek 		ena_qid = ENA_IO_RXQ_IDX(i);
14839b8d05b8SZbigniew Bodek 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
14849b8d05b8SZbigniew Bodek 	}
14859b8d05b8SZbigniew Bodek }
14869b8d05b8SZbigniew Bodek 
14879b8d05b8SZbigniew Bodek static void
14889b8d05b8SZbigniew Bodek ena_destroy_all_io_queues(struct ena_adapter *adapter)
14899b8d05b8SZbigniew Bodek {
14905cb9db07SMarcin Wojtas 	struct ena_que *queue;
14915cb9db07SMarcin Wojtas 	int i;
14925cb9db07SMarcin Wojtas 
14937d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
14945cb9db07SMarcin Wojtas 		queue = &adapter->que[i];
149582e558eaSDawid Gorecki 		while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
149682e558eaSDawid Gorecki 			taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
14975cb9db07SMarcin Wojtas 		taskqueue_free(queue->cleanup_tq);
14985cb9db07SMarcin Wojtas 	}
14995cb9db07SMarcin Wojtas 
15009b8d05b8SZbigniew Bodek 	ena_destroy_all_tx_queues(adapter);
15019b8d05b8SZbigniew Bodek 	ena_destroy_all_rx_queues(adapter);
15029b8d05b8SZbigniew Bodek }
15039b8d05b8SZbigniew Bodek 
15049b8d05b8SZbigniew Bodek static int
15059b8d05b8SZbigniew Bodek ena_create_io_queues(struct ena_adapter *adapter)
15069b8d05b8SZbigniew Bodek {
15079b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = adapter->ena_dev;
15089b8d05b8SZbigniew Bodek 	struct ena_com_create_io_ctx ctx;
15099b8d05b8SZbigniew Bodek 	struct ena_ring *ring;
15105cb9db07SMarcin Wojtas 	struct ena_que *queue;
15119b8d05b8SZbigniew Bodek 	uint16_t ena_qid;
15129b8d05b8SZbigniew Bodek 	uint32_t msix_vector;
15136d1ef2abSArtur Rojek 	cpuset_t *cpu_mask = NULL;
15149b8d05b8SZbigniew Bodek 	int rc, i;
15159b8d05b8SZbigniew Bodek 
15169b8d05b8SZbigniew Bodek 	/* Create TX queues */
15177d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
15189b8d05b8SZbigniew Bodek 		msix_vector = ENA_IO_IRQ_IDX(i);
15199b8d05b8SZbigniew Bodek 		ena_qid = ENA_IO_TXQ_IDX(i);
15209b8d05b8SZbigniew Bodek 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
15219b8d05b8SZbigniew Bodek 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
15229762a033SMarcin Wojtas 		ctx.queue_size = adapter->requested_tx_ring_size;
15239b8d05b8SZbigniew Bodek 		ctx.msix_vector = msix_vector;
15249b8d05b8SZbigniew Bodek 		ctx.qid = ena_qid;
1525eb4c4f4aSMarcin Wojtas 		ctx.numa_node = adapter->que[i].domain;
1526eb4c4f4aSMarcin Wojtas 
15279b8d05b8SZbigniew Bodek 		rc = ena_com_create_io_queue(ena_dev, &ctx);
15280bdffe59SMarcin Wojtas 		if (rc != 0) {
15293fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
15309b8d05b8SZbigniew Bodek 			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
15319b8d05b8SZbigniew Bodek 			goto err_tx;
15329b8d05b8SZbigniew Bodek 		}
15339b8d05b8SZbigniew Bodek 		ring = &adapter->tx_ring[i];
15349b8d05b8SZbigniew Bodek 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
153582e558eaSDawid Gorecki 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
15360bdffe59SMarcin Wojtas 		if (rc != 0) {
15373fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
15389b8d05b8SZbigniew Bodek 			    "Failed to get TX queue handlers. TX queue num"
153982e558eaSDawid Gorecki 			    " %d rc: %d\n",
154082e558eaSDawid Gorecki 			    i, rc);
15419b8d05b8SZbigniew Bodek 			ena_com_destroy_io_queue(ena_dev, ena_qid);
15429b8d05b8SZbigniew Bodek 			goto err_tx;
15439b8d05b8SZbigniew Bodek 		}
1544eb4c4f4aSMarcin Wojtas 
1545eb4c4f4aSMarcin Wojtas 		if (ctx.numa_node >= 0) {
1546eb4c4f4aSMarcin Wojtas 			ena_com_update_numa_node(ring->ena_com_io_cq,
1547eb4c4f4aSMarcin Wojtas 			    ctx.numa_node);
1548eb4c4f4aSMarcin Wojtas 		}
15499b8d05b8SZbigniew Bodek 	}
15509b8d05b8SZbigniew Bodek 
15519b8d05b8SZbigniew Bodek 	/* Create RX queues */
15527d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
15539b8d05b8SZbigniew Bodek 		msix_vector = ENA_IO_IRQ_IDX(i);
15549b8d05b8SZbigniew Bodek 		ena_qid = ENA_IO_RXQ_IDX(i);
15559b8d05b8SZbigniew Bodek 		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
15569b8d05b8SZbigniew Bodek 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
15579762a033SMarcin Wojtas 		ctx.queue_size = adapter->requested_rx_ring_size;
15589b8d05b8SZbigniew Bodek 		ctx.msix_vector = msix_vector;
15599b8d05b8SZbigniew Bodek 		ctx.qid = ena_qid;
1560eb4c4f4aSMarcin Wojtas 		ctx.numa_node = adapter->que[i].domain;
1561eb4c4f4aSMarcin Wojtas 
15629b8d05b8SZbigniew Bodek 		rc = ena_com_create_io_queue(ena_dev, &ctx);
15633f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0)) {
15643fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
15659b8d05b8SZbigniew Bodek 			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
15669b8d05b8SZbigniew Bodek 			goto err_rx;
15679b8d05b8SZbigniew Bodek 		}
15689b8d05b8SZbigniew Bodek 
15699b8d05b8SZbigniew Bodek 		ring = &adapter->rx_ring[i];
15709b8d05b8SZbigniew Bodek 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
157182e558eaSDawid Gorecki 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
15723f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0)) {
15733fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
15749b8d05b8SZbigniew Bodek 			    "Failed to get RX queue handlers. RX queue num"
157582e558eaSDawid Gorecki 			    " %d rc: %d\n",
157682e558eaSDawid Gorecki 			    i, rc);
15779b8d05b8SZbigniew Bodek 			ena_com_destroy_io_queue(ena_dev, ena_qid);
15789b8d05b8SZbigniew Bodek 			goto err_rx;
15799b8d05b8SZbigniew Bodek 		}
1580eb4c4f4aSMarcin Wojtas 
1581eb4c4f4aSMarcin Wojtas 		if (ctx.numa_node >= 0) {
1582eb4c4f4aSMarcin Wojtas 			ena_com_update_numa_node(ring->ena_com_io_cq,
1583eb4c4f4aSMarcin Wojtas 			    ctx.numa_node);
1584eb4c4f4aSMarcin Wojtas 		}
15859b8d05b8SZbigniew Bodek 	}
15869b8d05b8SZbigniew Bodek 
15877d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
15885cb9db07SMarcin Wojtas 		queue = &adapter->que[i];
15895cb9db07SMarcin Wojtas 
15906c3e93cbSGleb Smirnoff 		NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
15915cb9db07SMarcin Wojtas 		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
15925cb9db07SMarcin Wojtas 		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
15935cb9db07SMarcin Wojtas 
15946d1ef2abSArtur Rojek #ifdef RSS
15956d1ef2abSArtur Rojek 		cpu_mask = &queue->cpu_mask;
15966d1ef2abSArtur Rojek #endif
15976d1ef2abSArtur Rojek 		taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
159882e558eaSDawid Gorecki 		    cpu_mask, "%s queue %d cleanup",
15995cb9db07SMarcin Wojtas 		    device_get_nameunit(adapter->pdev), i);
16005cb9db07SMarcin Wojtas 	}
16015cb9db07SMarcin Wojtas 
16029b8d05b8SZbigniew Bodek 	return (0);
16039b8d05b8SZbigniew Bodek 
16049b8d05b8SZbigniew Bodek err_rx:
16059b8d05b8SZbigniew Bodek 	while (i--)
16069b8d05b8SZbigniew Bodek 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
16077d8c4feeSMarcin Wojtas 	i = adapter->num_io_queues;
16089b8d05b8SZbigniew Bodek err_tx:
16099b8d05b8SZbigniew Bodek 	while (i--)
16109b8d05b8SZbigniew Bodek 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
16119b8d05b8SZbigniew Bodek 
16129b8d05b8SZbigniew Bodek 	return (ENXIO);
16139b8d05b8SZbigniew Bodek }
16149b8d05b8SZbigniew Bodek 
16159b8d05b8SZbigniew Bodek /*********************************************************************
16169b8d05b8SZbigniew Bodek  *
16179b8d05b8SZbigniew Bodek  *  MSIX & Interrupt Service routine
16189b8d05b8SZbigniew Bodek  *
16199b8d05b8SZbigniew Bodek  **********************************************************************/
16209b8d05b8SZbigniew Bodek 
16219b8d05b8SZbigniew Bodek /**
16229b8d05b8SZbigniew Bodek  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
16239b8d05b8SZbigniew Bodek  * @arg: interrupt number
16249b8d05b8SZbigniew Bodek  **/
16259b8d05b8SZbigniew Bodek static void
16269b8d05b8SZbigniew Bodek ena_intr_msix_mgmnt(void *arg)
16279b8d05b8SZbigniew Bodek {
16289b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
16299b8d05b8SZbigniew Bodek 
16309b8d05b8SZbigniew Bodek 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1631fd43fd2aSMarcin Wojtas 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
16329b8d05b8SZbigniew Bodek 		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
16339b8d05b8SZbigniew Bodek }
16349b8d05b8SZbigniew Bodek 
16355cb9db07SMarcin Wojtas /**
16365cb9db07SMarcin Wojtas  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
16375cb9db07SMarcin Wojtas  * @arg: queue
16385cb9db07SMarcin Wojtas  **/
16395cb9db07SMarcin Wojtas static int
16405cb9db07SMarcin Wojtas ena_handle_msix(void *arg)
16415cb9db07SMarcin Wojtas {
16425cb9db07SMarcin Wojtas 	struct ena_que *queue = arg;
16435cb9db07SMarcin Wojtas 	struct ena_adapter *adapter = queue->adapter;
16445cb9db07SMarcin Wojtas 	if_t ifp = adapter->ifp;
16455cb9db07SMarcin Wojtas 
16465cb9db07SMarcin Wojtas 	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
16475cb9db07SMarcin Wojtas 		return (FILTER_STRAY);
16485cb9db07SMarcin Wojtas 
16495cb9db07SMarcin Wojtas 	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
16505cb9db07SMarcin Wojtas 
16515cb9db07SMarcin Wojtas 	return (FILTER_HANDLED);
16525cb9db07SMarcin Wojtas }
16535cb9db07SMarcin Wojtas 
16549b8d05b8SZbigniew Bodek static int
16559b8d05b8SZbigniew Bodek ena_enable_msix(struct ena_adapter *adapter)
16569b8d05b8SZbigniew Bodek {
16579b8d05b8SZbigniew Bodek 	device_t dev = adapter->pdev;
16588805021aSMarcin Wojtas 	int msix_vecs, msix_req;
16598805021aSMarcin Wojtas 	int i, rc = 0;
16609b8d05b8SZbigniew Bodek 
1661fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
16623fc5d816SMarcin Wojtas 		ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
1663fd43fd2aSMarcin Wojtas 		return (EINVAL);
1664fd43fd2aSMarcin Wojtas 	}
1665fd43fd2aSMarcin Wojtas 
16669b8d05b8SZbigniew Bodek 	/* Reserved the max msix vectors we might need */
16677d8c4feeSMarcin Wojtas 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
16689b8d05b8SZbigniew Bodek 
1669cd5d5804SMarcin Wojtas 	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1670cd5d5804SMarcin Wojtas 	    M_DEVBUF, M_WAITOK | M_ZERO);
1671cd5d5804SMarcin Wojtas 
167282e558eaSDawid Gorecki 	ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
16739b8d05b8SZbigniew Bodek 
16749b8d05b8SZbigniew Bodek 	for (i = 0; i < msix_vecs; i++) {
16759b8d05b8SZbigniew Bodek 		adapter->msix_entries[i].entry = i;
16769b8d05b8SZbigniew Bodek 		/* Vectors must start from 1 */
16779b8d05b8SZbigniew Bodek 		adapter->msix_entries[i].vector = i + 1;
16789b8d05b8SZbigniew Bodek 	}
16799b8d05b8SZbigniew Bodek 
16808805021aSMarcin Wojtas 	msix_req = msix_vecs;
16819b8d05b8SZbigniew Bodek 	rc = pci_alloc_msix(dev, &msix_vecs);
16823f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
168382e558eaSDawid Gorecki 		ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n",
168482e558eaSDawid Gorecki 		    msix_vecs, rc);
16857d2544e6SMarcin Wojtas 
16869b8d05b8SZbigniew Bodek 		rc = ENOSPC;
16877d2544e6SMarcin Wojtas 		goto err_msix_free;
16889b8d05b8SZbigniew Bodek 	}
16899b8d05b8SZbigniew Bodek 
16908805021aSMarcin Wojtas 	if (msix_vecs != msix_req) {
16912b5b60feSMarcin Wojtas 		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
16923fc5d816SMarcin Wojtas 			ena_log(dev, ERR,
16932b5b60feSMarcin Wojtas 			    "Not enough number of MSI-x allocated: %d\n",
16942b5b60feSMarcin Wojtas 			    msix_vecs);
16952b5b60feSMarcin Wojtas 			pci_release_msi(dev);
16962b5b60feSMarcin Wojtas 			rc = ENOSPC;
16972b5b60feSMarcin Wojtas 			goto err_msix_free;
16982b5b60feSMarcin Wojtas 		}
169982e558eaSDawid Gorecki 		ena_log(dev, ERR,
170082e558eaSDawid Gorecki 		    "Enable only %d MSI-x (out of %d), reduce "
170182e558eaSDawid Gorecki 		    "the number of queues\n",
170282e558eaSDawid Gorecki 		    msix_vecs, msix_req);
17038805021aSMarcin Wojtas 	}
17048805021aSMarcin Wojtas 
17059b8d05b8SZbigniew Bodek 	adapter->msix_vecs = msix_vecs;
1706fd43fd2aSMarcin Wojtas 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
17079b8d05b8SZbigniew Bodek 
17087d2544e6SMarcin Wojtas 	return (0);
17097d2544e6SMarcin Wojtas 
17107d2544e6SMarcin Wojtas err_msix_free:
17117d2544e6SMarcin Wojtas 	free(adapter->msix_entries, M_DEVBUF);
17127d2544e6SMarcin Wojtas 	adapter->msix_entries = NULL;
17137d2544e6SMarcin Wojtas 
17149b8d05b8SZbigniew Bodek 	return (rc);
17159b8d05b8SZbigniew Bodek }
17169b8d05b8SZbigniew Bodek 
17179b8d05b8SZbigniew Bodek static void
17189b8d05b8SZbigniew Bodek ena_setup_mgmnt_intr(struct ena_adapter *adapter)
17199b8d05b8SZbigniew Bodek {
172082e558eaSDawid Gorecki 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
172182e558eaSDawid Gorecki 	    "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
17229b8d05b8SZbigniew Bodek 	/*
17239b8d05b8SZbigniew Bodek 	 * Handler is NULL on purpose, it will be set
17249b8d05b8SZbigniew Bodek 	 * when mgmnt interrupt is acquired
17259b8d05b8SZbigniew Bodek 	 */
17269b8d05b8SZbigniew Bodek 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
17279b8d05b8SZbigniew Bodek 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
17289b8d05b8SZbigniew Bodek 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
17299b8d05b8SZbigniew Bodek 	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
17309b8d05b8SZbigniew Bodek }
17319b8d05b8SZbigniew Bodek 
173277958fcdSMarcin Wojtas static int
17339b8d05b8SZbigniew Bodek ena_setup_io_intr(struct ena_adapter *adapter)
17349b8d05b8SZbigniew Bodek {
17356d1ef2abSArtur Rojek #ifdef RSS
17366d1ef2abSArtur Rojek 	int num_buckets = rss_getnumbuckets();
17376d1ef2abSArtur Rojek 	static int last_bind = 0;
1738eb4c4f4aSMarcin Wojtas 	int cur_bind;
1739eb4c4f4aSMarcin Wojtas 	int idx;
17406d1ef2abSArtur Rojek #endif
17419b8d05b8SZbigniew Bodek 	int irq_idx;
17429b8d05b8SZbigniew Bodek 
174377958fcdSMarcin Wojtas 	if (adapter->msix_entries == NULL)
174477958fcdSMarcin Wojtas 		return (EINVAL);
174577958fcdSMarcin Wojtas 
1746eb4c4f4aSMarcin Wojtas #ifdef RSS
1747eb4c4f4aSMarcin Wojtas 	if (adapter->first_bind < 0) {
1748eb4c4f4aSMarcin Wojtas 		adapter->first_bind = last_bind;
1749eb4c4f4aSMarcin Wojtas 		last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
1750eb4c4f4aSMarcin Wojtas 	}
1751eb4c4f4aSMarcin Wojtas 	cur_bind = adapter->first_bind;
1752eb4c4f4aSMarcin Wojtas #endif
1753eb4c4f4aSMarcin Wojtas 
17547d8c4feeSMarcin Wojtas 	for (int i = 0; i < adapter->num_io_queues; i++) {
17559b8d05b8SZbigniew Bodek 		irq_idx = ENA_IO_IRQ_IDX(i);
17569b8d05b8SZbigniew Bodek 
17579b8d05b8SZbigniew Bodek 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
17589b8d05b8SZbigniew Bodek 		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
17599b8d05b8SZbigniew Bodek 		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
17609b8d05b8SZbigniew Bodek 		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
17619b8d05b8SZbigniew Bodek 		adapter->irq_tbl[irq_idx].vector =
17629b8d05b8SZbigniew Bodek 		    adapter->msix_entries[irq_idx].vector;
17633fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
17649b8d05b8SZbigniew Bodek 		    adapter->msix_entries[irq_idx].vector);
1765277f11c4SMarcin Wojtas 
1766f9e1d947SOsama Abboud 		if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1767f9e1d947SOsama Abboud 			adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1768f9e1d947SOsama Abboud 			    (unsigned)(adapter->irq_cpu_base +
1769f9e1d947SOsama Abboud 			    i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus;
1770f9e1d947SOsama Abboud 			CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1771f9e1d947SOsama Abboud 		}
1772f9e1d947SOsama Abboud 
17736d1ef2abSArtur Rojek #ifdef RSS
17749b8d05b8SZbigniew Bodek 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1775eb4c4f4aSMarcin Wojtas 		    rss_getcpu(cur_bind);
1776eb4c4f4aSMarcin Wojtas 		cur_bind = (cur_bind + 1) % num_buckets;
17776d1ef2abSArtur Rojek 		CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1778eb4c4f4aSMarcin Wojtas 
1779eb4c4f4aSMarcin Wojtas 		for (idx = 0; idx < MAXMEMDOM; ++idx) {
1780eb4c4f4aSMarcin Wojtas 			if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1781eb4c4f4aSMarcin Wojtas 				break;
1782eb4c4f4aSMarcin Wojtas 		}
1783eb4c4f4aSMarcin Wojtas 		adapter->que[i].domain = idx;
1784eb4c4f4aSMarcin Wojtas #else
1785eb4c4f4aSMarcin Wojtas 		adapter->que[i].domain = -1;
17866d1ef2abSArtur Rojek #endif
17879b8d05b8SZbigniew Bodek 	}
178877958fcdSMarcin Wojtas 
178977958fcdSMarcin Wojtas 	return (0);
17909b8d05b8SZbigniew Bodek }
17919b8d05b8SZbigniew Bodek 
17929b8d05b8SZbigniew Bodek static int
17939b8d05b8SZbigniew Bodek ena_request_mgmnt_irq(struct ena_adapter *adapter)
17949b8d05b8SZbigniew Bodek {
17953fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
17969b8d05b8SZbigniew Bodek 	struct ena_irq *irq;
17979b8d05b8SZbigniew Bodek 	unsigned long flags;
17989b8d05b8SZbigniew Bodek 	int rc, rcc;
17999b8d05b8SZbigniew Bodek 
18009b8d05b8SZbigniew Bodek 	flags = RF_ACTIVE | RF_SHAREABLE;
18019b8d05b8SZbigniew Bodek 
18029b8d05b8SZbigniew Bodek 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
18039b8d05b8SZbigniew Bodek 	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
18049b8d05b8SZbigniew Bodek 	    &irq->vector, flags);
18059b8d05b8SZbigniew Bodek 
18063f9ed7abSMarcin Wojtas 	if (unlikely(irq->res == NULL)) {
18073fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
18083fc5d816SMarcin Wojtas 		    irq->vector);
18097d2544e6SMarcin Wojtas 		return (ENXIO);
18109b8d05b8SZbigniew Bodek 	}
18119b8d05b8SZbigniew Bodek 
18120bdffe59SMarcin Wojtas 	rc = bus_setup_intr(adapter->pdev, irq->res,
181382e558eaSDawid Gorecki 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
181482e558eaSDawid Gorecki 	    &irq->cookie);
18153f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
181682e558eaSDawid Gorecki 		ena_log(pdev, ERR,
181782e558eaSDawid Gorecki 		    "failed to register interrupt handler for irq %ju: %d\n",
18189b8d05b8SZbigniew Bodek 		    rman_get_start(irq->res), rc);
18197d2544e6SMarcin Wojtas 		goto err_res_free;
18209b8d05b8SZbigniew Bodek 	}
18219b8d05b8SZbigniew Bodek 	irq->requested = true;
18229b8d05b8SZbigniew Bodek 
18239b8d05b8SZbigniew Bodek 	return (rc);
18249b8d05b8SZbigniew Bodek 
18257d2544e6SMarcin Wojtas err_res_free:
18263fc5d816SMarcin Wojtas 	ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
182782e558eaSDawid Gorecki 	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
182882e558eaSDawid Gorecki 	    irq->res);
18293f9ed7abSMarcin Wojtas 	if (unlikely(rcc != 0))
183082e558eaSDawid Gorecki 		ena_log(pdev, ERR,
183182e558eaSDawid Gorecki 		    "dev has no parent while releasing res for irq: %d\n",
183282e558eaSDawid Gorecki 		    irq->vector);
18339b8d05b8SZbigniew Bodek 	irq->res = NULL;
18349b8d05b8SZbigniew Bodek 
18359b8d05b8SZbigniew Bodek 	return (rc);
18369b8d05b8SZbigniew Bodek }
18379b8d05b8SZbigniew Bodek 
18389b8d05b8SZbigniew Bodek static int
18399b8d05b8SZbigniew Bodek ena_request_io_irq(struct ena_adapter *adapter)
18409b8d05b8SZbigniew Bodek {
18413fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
18429b8d05b8SZbigniew Bodek 	struct ena_irq *irq;
18439b8d05b8SZbigniew Bodek 	unsigned long flags = 0;
18449b8d05b8SZbigniew Bodek 	int rc = 0, i, rcc;
18459b8d05b8SZbigniew Bodek 
1846fd43fd2aSMarcin Wojtas 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
18473fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
18484e8acd84SMarcin Wojtas 		    "failed to request I/O IRQ: MSI-X is not enabled\n");
18499b8d05b8SZbigniew Bodek 		return (EINVAL);
18509b8d05b8SZbigniew Bodek 	} else {
18519b8d05b8SZbigniew Bodek 		flags = RF_ACTIVE | RF_SHAREABLE;
18529b8d05b8SZbigniew Bodek 	}
18539b8d05b8SZbigniew Bodek 
18549b8d05b8SZbigniew Bodek 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
18559b8d05b8SZbigniew Bodek 		irq = &adapter->irq_tbl[i];
18569b8d05b8SZbigniew Bodek 
18573f9ed7abSMarcin Wojtas 		if (unlikely(irq->requested))
18589b8d05b8SZbigniew Bodek 			continue;
18599b8d05b8SZbigniew Bodek 
18609b8d05b8SZbigniew Bodek 		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
18619b8d05b8SZbigniew Bodek 		    &irq->vector, flags);
18623f9ed7abSMarcin Wojtas 		if (unlikely(irq->res == NULL)) {
1863469a8407SMarcin Wojtas 			rc = ENOMEM;
186482e558eaSDawid Gorecki 			ena_log(pdev, ERR,
186582e558eaSDawid Gorecki 			    "could not allocate irq vector: %d\n", irq->vector);
18669b8d05b8SZbigniew Bodek 			goto err;
18679b8d05b8SZbigniew Bodek 		}
18689b8d05b8SZbigniew Bodek 
18690bdffe59SMarcin Wojtas 		rc = bus_setup_intr(adapter->pdev, irq->res,
187082e558eaSDawid Gorecki 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
187182e558eaSDawid Gorecki 		    &irq->cookie);
18723f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0)) {
187382e558eaSDawid Gorecki 			ena_log(pdev, ERR,
187482e558eaSDawid Gorecki 			    "failed to register interrupt handler for irq %ju: %d\n",
18759b8d05b8SZbigniew Bodek 			    rman_get_start(irq->res), rc);
18769b8d05b8SZbigniew Bodek 			goto err;
18779b8d05b8SZbigniew Bodek 		}
18789b8d05b8SZbigniew Bodek 		irq->requested = true;
18796d1ef2abSArtur Rojek 
1880f9e1d947SOsama Abboud 		if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
18816d1ef2abSArtur Rojek 			rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
18826d1ef2abSArtur Rojek 			if (unlikely(rc != 0)) {
188382e558eaSDawid Gorecki 				ena_log(pdev, ERR,
188482e558eaSDawid Gorecki 				    "failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
18856d1ef2abSArtur Rojek 				    rman_get_start(irq->res), irq->cpu, rc);
18866d1ef2abSArtur Rojek 				goto err;
18876d1ef2abSArtur Rojek 			}
18886d1ef2abSArtur Rojek 
18896d1ef2abSArtur Rojek 			ena_log(pdev, INFO, "queue %d - cpu %d\n",
18906d1ef2abSArtur Rojek 			    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
18919b8d05b8SZbigniew Bodek 		}
1892f9e1d947SOsama Abboud 	}
18939b8d05b8SZbigniew Bodek 	return (rc);
18949b8d05b8SZbigniew Bodek 
18959b8d05b8SZbigniew Bodek err:
18969b8d05b8SZbigniew Bodek 
18979b8d05b8SZbigniew Bodek 	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
18989b8d05b8SZbigniew Bodek 		irq = &adapter->irq_tbl[i];
18999b8d05b8SZbigniew Bodek 		rcc = 0;
19009b8d05b8SZbigniew Bodek 
19019b8d05b8SZbigniew Bodek 		/* Once we entered err: section and irq->requested is true we
19029b8d05b8SZbigniew Bodek 		   free both intr and resources */
1903f9e1d947SOsama Abboud 		if (irq->requested) {
190482e558eaSDawid Gorecki 			rcc = bus_teardown_intr(adapter->pdev, irq->res,
190582e558eaSDawid Gorecki 			    irq->cookie);
19063f9ed7abSMarcin Wojtas 			if (unlikely(rcc != 0))
190782e558eaSDawid Gorecki 				ena_log(pdev, ERR,
190882e558eaSDawid Gorecki 				    "could not release irq: %d, error: %d\n",
19093fc5d816SMarcin Wojtas 				    irq->vector, rcc);
1910f9e1d947SOsama Abboud 		}
19119b8d05b8SZbigniew Bodek 
1912eb3f25b4SGordon Bergling 		/* If we entered err: section without irq->requested set we know
19139b8d05b8SZbigniew Bodek 		   it was bus_alloc_resource_any() that needs cleanup, provided
19149b8d05b8SZbigniew Bodek 		   res is not NULL. In case res is NULL no work in needed in
19159b8d05b8SZbigniew Bodek 		   this iteration */
19169b8d05b8SZbigniew Bodek 		rcc = 0;
19179b8d05b8SZbigniew Bodek 		if (irq->res != NULL) {
19189b8d05b8SZbigniew Bodek 			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
19199b8d05b8SZbigniew Bodek 			    irq->vector, irq->res);
19209b8d05b8SZbigniew Bodek 		}
19213f9ed7abSMarcin Wojtas 		if (unlikely(rcc != 0))
192282e558eaSDawid Gorecki 			ena_log(pdev, ERR,
192382e558eaSDawid Gorecki 			    "dev has no parent while releasing res for irq: %d\n",
192482e558eaSDawid Gorecki 			    irq->vector);
19259b8d05b8SZbigniew Bodek 		irq->requested = false;
19269b8d05b8SZbigniew Bodek 		irq->res = NULL;
19279b8d05b8SZbigniew Bodek 	}
19289b8d05b8SZbigniew Bodek 
19299b8d05b8SZbigniew Bodek 	return (rc);
19309b8d05b8SZbigniew Bodek }
19319b8d05b8SZbigniew Bodek 
19329b8d05b8SZbigniew Bodek static void
19339b8d05b8SZbigniew Bodek ena_free_mgmnt_irq(struct ena_adapter *adapter)
19349b8d05b8SZbigniew Bodek {
19353fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
19369b8d05b8SZbigniew Bodek 	struct ena_irq *irq;
19379b8d05b8SZbigniew Bodek 	int rc;
19389b8d05b8SZbigniew Bodek 
19399b8d05b8SZbigniew Bodek 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
19409b8d05b8SZbigniew Bodek 	if (irq->requested) {
19413fc5d816SMarcin Wojtas 		ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
19429b8d05b8SZbigniew Bodek 		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
19433f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0))
19443fc5d816SMarcin Wojtas 			ena_log(pdev, ERR, "failed to tear down irq: %d\n",
19453fc5d816SMarcin Wojtas 			    irq->vector);
19469b8d05b8SZbigniew Bodek 		irq->requested = 0;
19479b8d05b8SZbigniew Bodek 	}
19489b8d05b8SZbigniew Bodek 
19499b8d05b8SZbigniew Bodek 	if (irq->res != NULL) {
19503fc5d816SMarcin Wojtas 		ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
19519b8d05b8SZbigniew Bodek 		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
19529b8d05b8SZbigniew Bodek 		    irq->vector, irq->res);
19539b8d05b8SZbigniew Bodek 		irq->res = NULL;
19543f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0))
195582e558eaSDawid Gorecki 			ena_log(pdev, ERR,
195682e558eaSDawid Gorecki 			    "dev has no parent while releasing res for irq: %d\n",
195782e558eaSDawid Gorecki 			    irq->vector);
19589b8d05b8SZbigniew Bodek 	}
19599b8d05b8SZbigniew Bodek }
19609b8d05b8SZbigniew Bodek 
19619b8d05b8SZbigniew Bodek static void
19629b8d05b8SZbigniew Bodek ena_free_io_irq(struct ena_adapter *adapter)
19639b8d05b8SZbigniew Bodek {
19643fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
19659b8d05b8SZbigniew Bodek 	struct ena_irq *irq;
19669b8d05b8SZbigniew Bodek 	int rc;
19679b8d05b8SZbigniew Bodek 
19689b8d05b8SZbigniew Bodek 	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
19699b8d05b8SZbigniew Bodek 		irq = &adapter->irq_tbl[i];
19709b8d05b8SZbigniew Bodek 		if (irq->requested) {
19713fc5d816SMarcin Wojtas 			ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
19729b8d05b8SZbigniew Bodek 			rc = bus_teardown_intr(adapter->pdev, irq->res,
19739b8d05b8SZbigniew Bodek 			    irq->cookie);
19743f9ed7abSMarcin Wojtas 			if (unlikely(rc != 0)) {
197582e558eaSDawid Gorecki 				ena_log(pdev, ERR,
197682e558eaSDawid Gorecki 				    "failed to tear down irq: %d\n",
19773fc5d816SMarcin Wojtas 				    irq->vector);
19789b8d05b8SZbigniew Bodek 			}
19799b8d05b8SZbigniew Bodek 			irq->requested = 0;
19809b8d05b8SZbigniew Bodek 		}
19819b8d05b8SZbigniew Bodek 
19829b8d05b8SZbigniew Bodek 		if (irq->res != NULL) {
19833fc5d816SMarcin Wojtas 			ena_log(pdev, DBG, "release resource irq: %d\n",
19849b8d05b8SZbigniew Bodek 			    irq->vector);
19859b8d05b8SZbigniew Bodek 			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
19869b8d05b8SZbigniew Bodek 			    irq->vector, irq->res);
19879b8d05b8SZbigniew Bodek 			irq->res = NULL;
19883f9ed7abSMarcin Wojtas 			if (unlikely(rc != 0)) {
198982e558eaSDawid Gorecki 				ena_log(pdev, ERR,
199082e558eaSDawid Gorecki 				    "dev has no parent while releasing res for irq: %d\n",
19919b8d05b8SZbigniew Bodek 				    irq->vector);
19929b8d05b8SZbigniew Bodek 			}
19939b8d05b8SZbigniew Bodek 		}
19949b8d05b8SZbigniew Bodek 	}
19959b8d05b8SZbigniew Bodek }
19969b8d05b8SZbigniew Bodek 
19979b8d05b8SZbigniew Bodek static void
19989b8d05b8SZbigniew Bodek ena_free_irqs(struct ena_adapter *adapter)
19999b8d05b8SZbigniew Bodek {
20009b8d05b8SZbigniew Bodek 	ena_free_io_irq(adapter);
20019b8d05b8SZbigniew Bodek 	ena_free_mgmnt_irq(adapter);
20029b8d05b8SZbigniew Bodek 	ena_disable_msix(adapter);
20039b8d05b8SZbigniew Bodek }
20049b8d05b8SZbigniew Bodek 
20059b8d05b8SZbigniew Bodek static void
20069b8d05b8SZbigniew Bodek ena_disable_msix(struct ena_adapter *adapter)
20079b8d05b8SZbigniew Bodek {
2008fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
2009fd43fd2aSMarcin Wojtas 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
20109b8d05b8SZbigniew Bodek 		pci_release_msi(adapter->pdev);
2011fd43fd2aSMarcin Wojtas 	}
20129b8d05b8SZbigniew Bodek 
20139b8d05b8SZbigniew Bodek 	adapter->msix_vecs = 0;
2014cd5d5804SMarcin Wojtas 	free(adapter->msix_entries, M_DEVBUF);
20159b8d05b8SZbigniew Bodek 	adapter->msix_entries = NULL;
20169b8d05b8SZbigniew Bodek }
20179b8d05b8SZbigniew Bodek 
20189b8d05b8SZbigniew Bodek static void
20199b8d05b8SZbigniew Bodek ena_unmask_all_io_irqs(struct ena_adapter *adapter)
20209b8d05b8SZbigniew Bodek {
20219b8d05b8SZbigniew Bodek 	struct ena_com_io_cq *io_cq;
20229b8d05b8SZbigniew Bodek 	struct ena_eth_io_intr_reg intr_reg;
2023223c8cb1SArtur Rojek 	struct ena_ring *tx_ring;
20249b8d05b8SZbigniew Bodek 	uint16_t ena_qid;
20259b8d05b8SZbigniew Bodek 	int i;
20269b8d05b8SZbigniew Bodek 
20279b8d05b8SZbigniew Bodek 	/* Unmask interrupts for all queues */
20287d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
20299b8d05b8SZbigniew Bodek 		ena_qid = ENA_IO_TXQ_IDX(i);
20309b8d05b8SZbigniew Bodek 		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
203172e34ebdSOsama Abboud 		ena_com_update_intr_reg(&intr_reg, 0, 0, true, false);
2032223c8cb1SArtur Rojek 		tx_ring = &adapter->tx_ring[i];
2033223c8cb1SArtur Rojek 		counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
20349b8d05b8SZbigniew Bodek 		ena_com_unmask_intr(io_cq, &intr_reg);
20359b8d05b8SZbigniew Bodek 	}
20369b8d05b8SZbigniew Bodek }
20379b8d05b8SZbigniew Bodek 
20389b8d05b8SZbigniew Bodek static int
20399b8d05b8SZbigniew Bodek ena_up_complete(struct ena_adapter *adapter)
20409b8d05b8SZbigniew Bodek {
20419b8d05b8SZbigniew Bodek 	int rc;
20429b8d05b8SZbigniew Bodek 
2043fd43fd2aSMarcin Wojtas 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
20449b8d05b8SZbigniew Bodek 		rc = ena_rss_configure(adapter);
204556d41ad5SMarcin Wojtas 		if (rc != 0) {
20463fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
204756d41ad5SMarcin Wojtas 			    "Failed to configure RSS\n");
20489b8d05b8SZbigniew Bodek 			return (rc);
20499b8d05b8SZbigniew Bodek 		}
205056d41ad5SMarcin Wojtas 	}
20519b8d05b8SZbigniew Bodek 
20527583c633SJustin Hibbits 	rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
20533f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0))
20547d2544e6SMarcin Wojtas 		return (rc);
20557d2544e6SMarcin Wojtas 
20569b8d05b8SZbigniew Bodek 	ena_refill_all_rx_bufs(adapter);
205730217e2dSMarcin Wojtas 	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
205830217e2dSMarcin Wojtas 	    sizeof(adapter->hw_stats));
20599b8d05b8SZbigniew Bodek 
20609b8d05b8SZbigniew Bodek 	return (0);
20619b8d05b8SZbigniew Bodek }
20629b8d05b8SZbigniew Bodek 
20639762a033SMarcin Wojtas static void
206482e558eaSDawid Gorecki set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size)
20659762a033SMarcin Wojtas {
20669762a033SMarcin Wojtas 	int i;
20679762a033SMarcin Wojtas 
20689762a033SMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
20699762a033SMarcin Wojtas 		adapter->tx_ring[i].ring_size = new_tx_size;
20709762a033SMarcin Wojtas 		adapter->rx_ring[i].ring_size = new_rx_size;
20719762a033SMarcin Wojtas 	}
20729762a033SMarcin Wojtas }
20739762a033SMarcin Wojtas 
20749762a033SMarcin Wojtas static int
20759762a033SMarcin Wojtas create_queues_with_size_backoff(struct ena_adapter *adapter)
20769762a033SMarcin Wojtas {
20773fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
20789762a033SMarcin Wojtas 	int rc;
20799762a033SMarcin Wojtas 	uint32_t cur_rx_ring_size, cur_tx_ring_size;
20809762a033SMarcin Wojtas 	uint32_t new_rx_ring_size, new_tx_ring_size;
20819762a033SMarcin Wojtas 
20829762a033SMarcin Wojtas 	/*
20839762a033SMarcin Wojtas 	 * Current queue sizes might be set to smaller than the requested
20849762a033SMarcin Wojtas 	 * ones due to past queue allocation failures.
20859762a033SMarcin Wojtas 	 */
20869762a033SMarcin Wojtas 	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
20879762a033SMarcin Wojtas 	    adapter->requested_rx_ring_size);
20889762a033SMarcin Wojtas 
20899762a033SMarcin Wojtas 	while (1) {
20909762a033SMarcin Wojtas 		/* Allocate transmit descriptors */
20919762a033SMarcin Wojtas 		rc = ena_setup_all_tx_resources(adapter);
20929762a033SMarcin Wojtas 		if (unlikely(rc != 0)) {
20933fc5d816SMarcin Wojtas 			ena_log(pdev, ERR, "err_setup_tx\n");
20949762a033SMarcin Wojtas 			goto err_setup_tx;
20959762a033SMarcin Wojtas 		}
20969762a033SMarcin Wojtas 
20979762a033SMarcin Wojtas 		/* Allocate receive descriptors */
20989762a033SMarcin Wojtas 		rc = ena_setup_all_rx_resources(adapter);
20999762a033SMarcin Wojtas 		if (unlikely(rc != 0)) {
21003fc5d816SMarcin Wojtas 			ena_log(pdev, ERR, "err_setup_rx\n");
21019762a033SMarcin Wojtas 			goto err_setup_rx;
21029762a033SMarcin Wojtas 		}
21039762a033SMarcin Wojtas 
21049762a033SMarcin Wojtas 		/* Create IO queues for Rx & Tx */
21059762a033SMarcin Wojtas 		rc = ena_create_io_queues(adapter);
21069762a033SMarcin Wojtas 		if (unlikely(rc != 0)) {
210782e558eaSDawid Gorecki 			ena_log(pdev, ERR, "create IO queues failed\n");
21089762a033SMarcin Wojtas 			goto err_io_que;
21099762a033SMarcin Wojtas 		}
21109762a033SMarcin Wojtas 
21119762a033SMarcin Wojtas 		return (0);
21129762a033SMarcin Wojtas 
21139762a033SMarcin Wojtas err_io_que:
21149762a033SMarcin Wojtas 		ena_free_all_rx_resources(adapter);
21159762a033SMarcin Wojtas err_setup_rx:
21169762a033SMarcin Wojtas 		ena_free_all_tx_resources(adapter);
21179762a033SMarcin Wojtas err_setup_tx:
21189762a033SMarcin Wojtas 		/*
21199762a033SMarcin Wojtas 		 * Lower the ring size if ENOMEM. Otherwise, return the
21209762a033SMarcin Wojtas 		 * error straightaway.
21219762a033SMarcin Wojtas 		 */
21229762a033SMarcin Wojtas 		if (unlikely(rc != ENOMEM)) {
21233fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
21249762a033SMarcin Wojtas 			    "Queue creation failed with error code: %d\n", rc);
21259762a033SMarcin Wojtas 			return (rc);
21269762a033SMarcin Wojtas 		}
21279762a033SMarcin Wojtas 
21289762a033SMarcin Wojtas 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
21299762a033SMarcin Wojtas 		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
21309762a033SMarcin Wojtas 
21313fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
21329762a033SMarcin Wojtas 		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
21339762a033SMarcin Wojtas 		    cur_tx_ring_size, cur_rx_ring_size);
21349762a033SMarcin Wojtas 
21359762a033SMarcin Wojtas 		new_tx_ring_size = cur_tx_ring_size;
21369762a033SMarcin Wojtas 		new_rx_ring_size = cur_rx_ring_size;
21379762a033SMarcin Wojtas 
21389762a033SMarcin Wojtas 		/*
213982e558eaSDawid Gorecki 		 * Decrease the size of a larger queue, or decrease both if they
214082e558eaSDawid Gorecki 		 * are the same size.
21419762a033SMarcin Wojtas 		 */
21429762a033SMarcin Wojtas 		if (cur_rx_ring_size <= cur_tx_ring_size)
21439762a033SMarcin Wojtas 			new_tx_ring_size = cur_tx_ring_size / 2;
21449762a033SMarcin Wojtas 		if (cur_rx_ring_size >= cur_tx_ring_size)
21459762a033SMarcin Wojtas 			new_rx_ring_size = cur_rx_ring_size / 2;
21469762a033SMarcin Wojtas 
21479762a033SMarcin Wojtas 		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
21489762a033SMarcin Wojtas 		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
21493fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
21509762a033SMarcin Wojtas 			    "Queue creation failed with the smallest possible queue size"
21519762a033SMarcin Wojtas 			    "of %d for both queues. Not retrying with smaller queues\n",
21529762a033SMarcin Wojtas 			    ENA_MIN_RING_SIZE);
21539762a033SMarcin Wojtas 			return (rc);
21549762a033SMarcin Wojtas 		}
21559762a033SMarcin Wojtas 
215677160654SArtur Rojek 		ena_log(pdev, INFO,
215777160654SArtur Rojek 		    "Retrying queue creation with sizes TX=%d, RX=%d\n",
215877160654SArtur Rojek 		    new_tx_ring_size, new_rx_ring_size);
215977160654SArtur Rojek 
21609762a033SMarcin Wojtas 		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
21619762a033SMarcin Wojtas 	}
21629762a033SMarcin Wojtas }
21639762a033SMarcin Wojtas 
216438c7b965SMarcin Wojtas int
21659b8d05b8SZbigniew Bodek ena_up(struct ena_adapter *adapter)
21669b8d05b8SZbigniew Bodek {
21679b8d05b8SZbigniew Bodek 	int rc = 0;
21689b8d05b8SZbigniew Bodek 
216907aff471SArtur Rojek 	ENA_LOCK_ASSERT();
2170cb98c439SArtur Rojek 
21713f9ed7abSMarcin Wojtas 	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
21723fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "device is not attached!\n");
21739b8d05b8SZbigniew Bodek 		return (ENXIO);
21749b8d05b8SZbigniew Bodek 	}
21759b8d05b8SZbigniew Bodek 
2176579d23aaSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2177579d23aaSMarcin Wojtas 		return (0);
2178579d23aaSMarcin Wojtas 
21793fc5d816SMarcin Wojtas 	ena_log(adapter->pdev, INFO, "device is going UP\n");
21809b8d05b8SZbigniew Bodek 
21819b8d05b8SZbigniew Bodek 	/* setup interrupts for IO queues */
218277958fcdSMarcin Wojtas 	rc = ena_setup_io_intr(adapter);
218377958fcdSMarcin Wojtas 	if (unlikely(rc != 0)) {
21843fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
218577958fcdSMarcin Wojtas 		goto error;
218677958fcdSMarcin Wojtas 	}
21879b8d05b8SZbigniew Bodek 	rc = ena_request_io_irq(adapter);
21883f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
21893fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "err_req_irq\n");
219077958fcdSMarcin Wojtas 		goto error;
21919b8d05b8SZbigniew Bodek 	}
21929b8d05b8SZbigniew Bodek 
21933fc5d816SMarcin Wojtas 	ena_log(adapter->pdev, INFO,
219482e558eaSDawid Gorecki 	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n",
21957d8c4feeSMarcin Wojtas 	    adapter->num_io_queues,
21969762a033SMarcin Wojtas 	    adapter->requested_rx_ring_size,
21979762a033SMarcin Wojtas 	    adapter->requested_tx_ring_size,
21989762a033SMarcin Wojtas 	    (adapter->ena_dev->tx_mem_queue_type ==
21999762a033SMarcin Wojtas 		ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
22007d8c4feeSMarcin Wojtas 
22019762a033SMarcin Wojtas 	rc = create_queues_with_size_backoff(adapter);
22023f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
22033fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR,
22049762a033SMarcin Wojtas 		    "error creating queues with size backoff\n");
22059762a033SMarcin Wojtas 		goto err_create_queues_with_backoff;
22069b8d05b8SZbigniew Bodek 	}
22079b8d05b8SZbigniew Bodek 
2208fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
22099b8d05b8SZbigniew Bodek 		if_link_state_change(adapter->ifp, LINK_STATE_UP);
22109b8d05b8SZbigniew Bodek 
22119b8d05b8SZbigniew Bodek 	rc = ena_up_complete(adapter);
22123f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0))
22139b8d05b8SZbigniew Bodek 		goto err_up_complete;
22149b8d05b8SZbigniew Bodek 
22159b8d05b8SZbigniew Bodek 	counter_u64_add(adapter->dev_stats.interface_up, 1);
22169b8d05b8SZbigniew Bodek 
22179b8d05b8SZbigniew Bodek 	ena_update_hwassist(adapter);
22189b8d05b8SZbigniew Bodek 
221982e558eaSDawid Gorecki 	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
22209b8d05b8SZbigniew Bodek 
2221fd43fd2aSMarcin Wojtas 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
222293471047SZbigniew Bodek 
222393471047SZbigniew Bodek 	ena_unmask_all_io_irqs(adapter);
22249b8d05b8SZbigniew Bodek 
22259b8d05b8SZbigniew Bodek 	return (0);
22269b8d05b8SZbigniew Bodek 
22279b8d05b8SZbigniew Bodek err_up_complete:
22289b8d05b8SZbigniew Bodek 	ena_destroy_all_io_queues(adapter);
22299b8d05b8SZbigniew Bodek 	ena_free_all_rx_resources(adapter);
22309b8d05b8SZbigniew Bodek 	ena_free_all_tx_resources(adapter);
22319762a033SMarcin Wojtas err_create_queues_with_backoff:
22329b8d05b8SZbigniew Bodek 	ena_free_io_irq(adapter);
223377958fcdSMarcin Wojtas error:
22349b8d05b8SZbigniew Bodek 	return (rc);
22359b8d05b8SZbigniew Bodek }
22369b8d05b8SZbigniew Bodek 
22379b8d05b8SZbigniew Bodek static uint64_t
22389b8d05b8SZbigniew Bodek ena_get_counter(if_t ifp, ift_counter cnt)
22399b8d05b8SZbigniew Bodek {
22409b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter;
22419b8d05b8SZbigniew Bodek 	struct ena_hw_stats *stats;
22429b8d05b8SZbigniew Bodek 
22439b8d05b8SZbigniew Bodek 	adapter = if_getsoftc(ifp);
22449b8d05b8SZbigniew Bodek 	stats = &adapter->hw_stats;
22459b8d05b8SZbigniew Bodek 
22469b8d05b8SZbigniew Bodek 	switch (cnt) {
22479b8d05b8SZbigniew Bodek 	case IFCOUNTER_IPACKETS:
224830217e2dSMarcin Wojtas 		return (counter_u64_fetch(stats->rx_packets));
22499b8d05b8SZbigniew Bodek 	case IFCOUNTER_OPACKETS:
225030217e2dSMarcin Wojtas 		return (counter_u64_fetch(stats->tx_packets));
22519b8d05b8SZbigniew Bodek 	case IFCOUNTER_IBYTES:
225230217e2dSMarcin Wojtas 		return (counter_u64_fetch(stats->rx_bytes));
22539b8d05b8SZbigniew Bodek 	case IFCOUNTER_OBYTES:
225430217e2dSMarcin Wojtas 		return (counter_u64_fetch(stats->tx_bytes));
22559b8d05b8SZbigniew Bodek 	case IFCOUNTER_IQDROPS:
225630217e2dSMarcin Wojtas 		return (counter_u64_fetch(stats->rx_drops));
22576c84cec3SMarcin Wojtas 	case IFCOUNTER_OQDROPS:
22586c84cec3SMarcin Wojtas 		return (counter_u64_fetch(stats->tx_drops));
22599b8d05b8SZbigniew Bodek 	default:
22609b8d05b8SZbigniew Bodek 		return (if_get_counter_default(ifp, cnt));
22619b8d05b8SZbigniew Bodek 	}
22629b8d05b8SZbigniew Bodek }
22639b8d05b8SZbigniew Bodek 
22649b8d05b8SZbigniew Bodek static int
22659b8d05b8SZbigniew Bodek ena_media_change(if_t ifp)
22669b8d05b8SZbigniew Bodek {
22679b8d05b8SZbigniew Bodek 	/* Media Change is not supported by firmware */
22689b8d05b8SZbigniew Bodek 	return (0);
22699b8d05b8SZbigniew Bodek }
22709b8d05b8SZbigniew Bodek 
22719b8d05b8SZbigniew Bodek static void
22729b8d05b8SZbigniew Bodek ena_media_status(if_t ifp, struct ifmediareq *ifmr)
22739b8d05b8SZbigniew Bodek {
22749b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = if_getsoftc(ifp);
22753fc5d816SMarcin Wojtas 	ena_log(adapter->pdev, DBG, "Media status update\n");
22769b8d05b8SZbigniew Bodek 
227707aff471SArtur Rojek 	ENA_LOCK_LOCK();
22789b8d05b8SZbigniew Bodek 
22799b8d05b8SZbigniew Bodek 	ifmr->ifm_status = IFM_AVALID;
22809b8d05b8SZbigniew Bodek 	ifmr->ifm_active = IFM_ETHER;
22819b8d05b8SZbigniew Bodek 
2282fd43fd2aSMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
228307aff471SArtur Rojek 		ENA_LOCK_UNLOCK();
22843fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, INFO, "Link is down\n");
22859b8d05b8SZbigniew Bodek 		return;
22869b8d05b8SZbigniew Bodek 	}
22879b8d05b8SZbigniew Bodek 
22889b8d05b8SZbigniew Bodek 	ifmr->ifm_status |= IFM_ACTIVE;
2289b8ca5dbeSMarcin Wojtas 	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
22909b8d05b8SZbigniew Bodek 
229107aff471SArtur Rojek 	ENA_LOCK_UNLOCK();
22929b8d05b8SZbigniew Bodek }
22939b8d05b8SZbigniew Bodek 
22949b8d05b8SZbigniew Bodek static void
22959b8d05b8SZbigniew Bodek ena_init(void *arg)
22969b8d05b8SZbigniew Bodek {
22979b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
22989b8d05b8SZbigniew Bodek 
2299fd43fd2aSMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
230007aff471SArtur Rojek 		ENA_LOCK_LOCK();
23019b8d05b8SZbigniew Bodek 		ena_up(adapter);
230207aff471SArtur Rojek 		ENA_LOCK_UNLOCK();
23033d3a90f9SZbigniew Bodek 	}
23049b8d05b8SZbigniew Bodek }
23059b8d05b8SZbigniew Bodek 
23069b8d05b8SZbigniew Bodek static int
23079b8d05b8SZbigniew Bodek ena_ioctl(if_t ifp, u_long command, caddr_t data)
23089b8d05b8SZbigniew Bodek {
23099b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter;
23109b8d05b8SZbigniew Bodek 	struct ifreq *ifr;
23119b8d05b8SZbigniew Bodek 	int rc;
23129b8d05b8SZbigniew Bodek 
23137583c633SJustin Hibbits 	adapter = if_getsoftc(ifp);
23149b8d05b8SZbigniew Bodek 	ifr = (struct ifreq *)data;
23159b8d05b8SZbigniew Bodek 
23169b8d05b8SZbigniew Bodek 	/*
23179b8d05b8SZbigniew Bodek 	 * Acquiring lock to prevent from running up and down routines parallel.
23189b8d05b8SZbigniew Bodek 	 */
23199b8d05b8SZbigniew Bodek 	rc = 0;
23209b8d05b8SZbigniew Bodek 	switch (command) {
23219b8d05b8SZbigniew Bodek 	case SIOCSIFMTU:
23227583c633SJustin Hibbits 		if (if_getmtu(ifp) == ifr->ifr_mtu)
2323dbf2eb54SMarcin Wojtas 			break;
232407aff471SArtur Rojek 		ENA_LOCK_LOCK();
23259b8d05b8SZbigniew Bodek 		ena_down(adapter);
23269b8d05b8SZbigniew Bodek 
23279b8d05b8SZbigniew Bodek 		ena_change_mtu(ifp, ifr->ifr_mtu);
23289b8d05b8SZbigniew Bodek 
23299b8d05b8SZbigniew Bodek 		rc = ena_up(adapter);
233007aff471SArtur Rojek 		ENA_LOCK_UNLOCK();
23319b8d05b8SZbigniew Bodek 		break;
23329b8d05b8SZbigniew Bodek 
23339b8d05b8SZbigniew Bodek 	case SIOCSIFFLAGS:
23347583c633SJustin Hibbits 		if ((if_getflags(ifp) & IFF_UP) != 0) {
23350bdffe59SMarcin Wojtas 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
23367583c633SJustin Hibbits 				if ((if_getflags(ifp) & (IFF_PROMISC |
23377583c633SJustin Hibbits 				    IFF_ALLMULTI)) != 0) {
23383fc5d816SMarcin Wojtas 					ena_log(adapter->pdev, INFO,
23399b8d05b8SZbigniew Bodek 					    "ioctl promisc/allmulti\n");
23409b8d05b8SZbigniew Bodek 				}
23419b8d05b8SZbigniew Bodek 			} else {
234207aff471SArtur Rojek 				ENA_LOCK_LOCK();
23439b8d05b8SZbigniew Bodek 				rc = ena_up(adapter);
234407aff471SArtur Rojek 				ENA_LOCK_UNLOCK();
23459b8d05b8SZbigniew Bodek 			}
23469b8d05b8SZbigniew Bodek 		} else {
23470bdffe59SMarcin Wojtas 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
234807aff471SArtur Rojek 				ENA_LOCK_LOCK();
23499b8d05b8SZbigniew Bodek 				ena_down(adapter);
235007aff471SArtur Rojek 				ENA_LOCK_UNLOCK();
2351e67c6554SZbigniew Bodek 			}
23529b8d05b8SZbigniew Bodek 		}
23539b8d05b8SZbigniew Bodek 		break;
23549b8d05b8SZbigniew Bodek 
23559b8d05b8SZbigniew Bodek 	case SIOCADDMULTI:
23569b8d05b8SZbigniew Bodek 	case SIOCDELMULTI:
23579b8d05b8SZbigniew Bodek 		break;
23589b8d05b8SZbigniew Bodek 
23599b8d05b8SZbigniew Bodek 	case SIOCSIFMEDIA:
23609b8d05b8SZbigniew Bodek 	case SIOCGIFMEDIA:
23619b8d05b8SZbigniew Bodek 		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
23629b8d05b8SZbigniew Bodek 		break;
23639b8d05b8SZbigniew Bodek 
23649b8d05b8SZbigniew Bodek 	case SIOCSIFCAP:
23659b8d05b8SZbigniew Bodek 		{
23669b8d05b8SZbigniew Bodek 			int reinit = 0;
23679b8d05b8SZbigniew Bodek 
23687583c633SJustin Hibbits 			if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
23697583c633SJustin Hibbits 				if_setcapenable(ifp, ifr->ifr_reqcap);
23709b8d05b8SZbigniew Bodek 				reinit = 1;
23719b8d05b8SZbigniew Bodek 			}
23729b8d05b8SZbigniew Bodek 
23730bdffe59SMarcin Wojtas 			if ((reinit != 0) &&
23740bdffe59SMarcin Wojtas 			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
237507aff471SArtur Rojek 				ENA_LOCK_LOCK();
23769b8d05b8SZbigniew Bodek 				ena_down(adapter);
23779b8d05b8SZbigniew Bodek 				rc = ena_up(adapter);
237807aff471SArtur Rojek 				ENA_LOCK_UNLOCK();
23799b8d05b8SZbigniew Bodek 			}
23809b8d05b8SZbigniew Bodek 		}
23819b8d05b8SZbigniew Bodek 
23829b8d05b8SZbigniew Bodek 		break;
23839b8d05b8SZbigniew Bodek 	default:
23849b8d05b8SZbigniew Bodek 		rc = ether_ioctl(ifp, command, data);
23859b8d05b8SZbigniew Bodek 		break;
23869b8d05b8SZbigniew Bodek 	}
23879b8d05b8SZbigniew Bodek 
23889b8d05b8SZbigniew Bodek 	return (rc);
23899b8d05b8SZbigniew Bodek }
23909b8d05b8SZbigniew Bodek 
23919b8d05b8SZbigniew Bodek static int
23929b8d05b8SZbigniew Bodek ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
23939b8d05b8SZbigniew Bodek {
23949b8d05b8SZbigniew Bodek 	int caps = 0;
23959b8d05b8SZbigniew Bodek 
23960bdffe59SMarcin Wojtas 	if ((feat->offload.tx &
23979b8d05b8SZbigniew Bodek 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
23989b8d05b8SZbigniew Bodek 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
23990bdffe59SMarcin Wojtas 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
24009b8d05b8SZbigniew Bodek 		caps |= IFCAP_TXCSUM;
24019b8d05b8SZbigniew Bodek 
24020bdffe59SMarcin Wojtas 	if ((feat->offload.tx &
24039b8d05b8SZbigniew Bodek 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
24040bdffe59SMarcin Wojtas 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
24059b8d05b8SZbigniew Bodek 		caps |= IFCAP_TXCSUM_IPV6;
24069b8d05b8SZbigniew Bodek 
240782e558eaSDawid Gorecki 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
24089b8d05b8SZbigniew Bodek 		caps |= IFCAP_TSO4;
24099b8d05b8SZbigniew Bodek 
241082e558eaSDawid Gorecki 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
24119b8d05b8SZbigniew Bodek 		caps |= IFCAP_TSO6;
24129b8d05b8SZbigniew Bodek 
24130bdffe59SMarcin Wojtas 	if ((feat->offload.rx_supported &
24149b8d05b8SZbigniew Bodek 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
24150bdffe59SMarcin Wojtas 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
24169b8d05b8SZbigniew Bodek 		caps |= IFCAP_RXCSUM;
24179b8d05b8SZbigniew Bodek 
24180bdffe59SMarcin Wojtas 	if ((feat->offload.rx_supported &
24190bdffe59SMarcin Wojtas 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
24209b8d05b8SZbigniew Bodek 		caps |= IFCAP_RXCSUM_IPV6;
24219b8d05b8SZbigniew Bodek 
24229b8d05b8SZbigniew Bodek 	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
24239b8d05b8SZbigniew Bodek 
24249b8d05b8SZbigniew Bodek 	return (caps);
24259b8d05b8SZbigniew Bodek }
24269b8d05b8SZbigniew Bodek 
24279b8d05b8SZbigniew Bodek static void
24289b8d05b8SZbigniew Bodek ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
24299b8d05b8SZbigniew Bodek {
243082e558eaSDawid Gorecki 	host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
24319b8d05b8SZbigniew Bodek }
24329b8d05b8SZbigniew Bodek 
24339b8d05b8SZbigniew Bodek static void
24349b8d05b8SZbigniew Bodek ena_update_hwassist(struct ena_adapter *adapter)
24359b8d05b8SZbigniew Bodek {
24369b8d05b8SZbigniew Bodek 	if_t ifp = adapter->ifp;
24379b8d05b8SZbigniew Bodek 	uint32_t feat = adapter->tx_offload_cap;
24389b8d05b8SZbigniew Bodek 	int cap = if_getcapenable(ifp);
24399b8d05b8SZbigniew Bodek 	int flags = 0;
24409b8d05b8SZbigniew Bodek 
24419b8d05b8SZbigniew Bodek 	if_clearhwassist(ifp);
24429b8d05b8SZbigniew Bodek 
24430bdffe59SMarcin Wojtas 	if ((cap & IFCAP_TXCSUM) != 0) {
24440bdffe59SMarcin Wojtas 		if ((feat &
24450bdffe59SMarcin Wojtas 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
24469b8d05b8SZbigniew Bodek 			flags |= CSUM_IP;
24470bdffe59SMarcin Wojtas 		if ((feat &
24489b8d05b8SZbigniew Bodek 		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
24490bdffe59SMarcin Wojtas 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
24509b8d05b8SZbigniew Bodek 			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
24519b8d05b8SZbigniew Bodek 	}
24529b8d05b8SZbigniew Bodek 
24530bdffe59SMarcin Wojtas 	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
24549b8d05b8SZbigniew Bodek 		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
24559b8d05b8SZbigniew Bodek 
24560bdffe59SMarcin Wojtas 	if ((cap & IFCAP_TSO4) != 0)
24579b8d05b8SZbigniew Bodek 		flags |= CSUM_IP_TSO;
24589b8d05b8SZbigniew Bodek 
24590bdffe59SMarcin Wojtas 	if ((cap & IFCAP_TSO6) != 0)
24609b8d05b8SZbigniew Bodek 		flags |= CSUM_IP6_TSO;
24619b8d05b8SZbigniew Bodek 
24629b8d05b8SZbigniew Bodek 	if_sethwassistbits(ifp, flags, 0);
24639b8d05b8SZbigniew Bodek }
24649b8d05b8SZbigniew Bodek 
2465aa386085SZhenlei Huang static void
24669b8d05b8SZbigniew Bodek ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
24679b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *feat)
24689b8d05b8SZbigniew Bodek {
24699b8d05b8SZbigniew Bodek 	if_t ifp;
24709b8d05b8SZbigniew Bodek 	int caps = 0;
24719b8d05b8SZbigniew Bodek 
24729b8d05b8SZbigniew Bodek 	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
24739b8d05b8SZbigniew Bodek 	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
24749b8d05b8SZbigniew Bodek 	if_setdev(ifp, pdev);
24759b8d05b8SZbigniew Bodek 	if_setsoftc(ifp, adapter);
24769b8d05b8SZbigniew Bodek 
2477a6b55ee6SGleb Smirnoff 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
24789b8d05b8SZbigniew Bodek 	if_setinitfn(ifp, ena_init);
24799b8d05b8SZbigniew Bodek 	if_settransmitfn(ifp, ena_mq_start);
24809b8d05b8SZbigniew Bodek 	if_setqflushfn(ifp, ena_qflush);
24819b8d05b8SZbigniew Bodek 	if_setioctlfn(ifp, ena_ioctl);
24829b8d05b8SZbigniew Bodek 	if_setgetcounterfn(ifp, ena_get_counter);
24839b8d05b8SZbigniew Bodek 
24849762a033SMarcin Wojtas 	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
24859b8d05b8SZbigniew Bodek 	if_setsendqready(ifp);
24869b8d05b8SZbigniew Bodek 	if_setmtu(ifp, ETHERMTU);
24879b8d05b8SZbigniew Bodek 	if_setbaudrate(ifp, 0);
24889b8d05b8SZbigniew Bodek 	/* Zeroize capabilities... */
24899b8d05b8SZbigniew Bodek 	if_setcapabilities(ifp, 0);
24909b8d05b8SZbigniew Bodek 	if_setcapenable(ifp, 0);
24919b8d05b8SZbigniew Bodek 	/* check hardware support */
24929b8d05b8SZbigniew Bodek 	caps = ena_get_dev_offloads(feat);
24939b8d05b8SZbigniew Bodek 	/* ... and set them */
24949b8d05b8SZbigniew Bodek 	if_setcapabilitiesbit(ifp, caps, 0);
24959b8d05b8SZbigniew Bodek 
24969b8d05b8SZbigniew Bodek 	/* TSO parameters */
24977583c633SJustin Hibbits 	if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
24987583c633SJustin Hibbits 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
24997583c633SJustin Hibbits 	if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
25007583c633SJustin Hibbits 	if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE);
25019b8d05b8SZbigniew Bodek 
25029b8d05b8SZbigniew Bodek 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
25039b8d05b8SZbigniew Bodek 	if_setcapenable(ifp, if_getcapabilities(ifp));
25049b8d05b8SZbigniew Bodek 
25059b8d05b8SZbigniew Bodek 	/*
25069b8d05b8SZbigniew Bodek 	 * Specify the media types supported by this adapter and register
25079b8d05b8SZbigniew Bodek 	 * callbacks to update media and link information
25089b8d05b8SZbigniew Bodek 	 */
250982e558eaSDawid Gorecki 	ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
251082e558eaSDawid Gorecki 	    ena_media_status);
25119b8d05b8SZbigniew Bodek 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
25129b8d05b8SZbigniew Bodek 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
25139b8d05b8SZbigniew Bodek 
25149b8d05b8SZbigniew Bodek 	ether_ifattach(ifp, adapter->mac_addr);
25159b8d05b8SZbigniew Bodek }
25169b8d05b8SZbigniew Bodek 
251738c7b965SMarcin Wojtas void
25189b8d05b8SZbigniew Bodek ena_down(struct ena_adapter *adapter)
25199b8d05b8SZbigniew Bodek {
2520a195fab0SMarcin Wojtas 	int rc;
25219b8d05b8SZbigniew Bodek 
252207aff471SArtur Rojek 	ENA_LOCK_ASSERT();
2523cb98c439SArtur Rojek 
2524579d23aaSMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2525579d23aaSMarcin Wojtas 		return;
2526579d23aaSMarcin Wojtas 
252778554d0cSDawid Gorecki 	ena_log(adapter->pdev, INFO, "device is going DOWN\n");
25289b8d05b8SZbigniew Bodek 
2529fd43fd2aSMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
253082e558eaSDawid Gorecki 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
25319b8d05b8SZbigniew Bodek 
25329b8d05b8SZbigniew Bodek 	ena_free_io_irq(adapter);
25339b8d05b8SZbigniew Bodek 
2534fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
253582e558eaSDawid Gorecki 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
25363f9ed7abSMarcin Wojtas 		if (unlikely(rc != 0))
253782e558eaSDawid Gorecki 			ena_log(adapter->pdev, ERR, "Device reset failed\n");
2538a195fab0SMarcin Wojtas 	}
2539a195fab0SMarcin Wojtas 
25409b8d05b8SZbigniew Bodek 	ena_destroy_all_io_queues(adapter);
25419b8d05b8SZbigniew Bodek 
25429b8d05b8SZbigniew Bodek 	ena_free_all_tx_bufs(adapter);
25439b8d05b8SZbigniew Bodek 	ena_free_all_rx_bufs(adapter);
25449b8d05b8SZbigniew Bodek 	ena_free_all_tx_resources(adapter);
25459b8d05b8SZbigniew Bodek 	ena_free_all_rx_resources(adapter);
25469b8d05b8SZbigniew Bodek 
25479b8d05b8SZbigniew Bodek 	counter_u64_add(adapter->dev_stats.interface_down, 1);
25489b8d05b8SZbigniew Bodek }
25499b8d05b8SZbigniew Bodek 
25507d8c4feeSMarcin Wojtas static uint32_t
25517d8c4feeSMarcin Wojtas ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
25529b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *get_feat_ctx)
25539b8d05b8SZbigniew Bodek {
25547d8c4feeSMarcin Wojtas 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
25559b8d05b8SZbigniew Bodek 
25566064f289SMarcin Wojtas 	/* Regular queues capabilities */
25576064f289SMarcin Wojtas 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
25586064f289SMarcin Wojtas 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
25596064f289SMarcin Wojtas 		    &get_feat_ctx->max_queue_ext.max_queue_ext;
25604fa9e02dSMarcin Wojtas 		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
25614fa9e02dSMarcin Wojtas 		    max_queue_ext->max_rx_cq_num);
25626064f289SMarcin Wojtas 
25634fa9e02dSMarcin Wojtas 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
25644fa9e02dSMarcin Wojtas 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
25656064f289SMarcin Wojtas 	} else {
25666064f289SMarcin Wojtas 		struct ena_admin_queue_feature_desc *max_queues =
25676064f289SMarcin Wojtas 		    &get_feat_ctx->max_queues;
25684fa9e02dSMarcin Wojtas 		io_tx_sq_num = max_queues->max_sq_num;
25694fa9e02dSMarcin Wojtas 		io_tx_cq_num = max_queues->max_cq_num;
25704fa9e02dSMarcin Wojtas 		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
25716064f289SMarcin Wojtas 	}
25729b8d05b8SZbigniew Bodek 
25734fa9e02dSMarcin Wojtas 	/* In case of LLQ use the llq fields for the tx SQ/CQ */
25744fa9e02dSMarcin Wojtas 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
25754fa9e02dSMarcin Wojtas 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
25764fa9e02dSMarcin Wojtas 
25777d8c4feeSMarcin Wojtas 	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
25787d8c4feeSMarcin Wojtas 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
25797d8c4feeSMarcin Wojtas 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
25807d8c4feeSMarcin Wojtas 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2581609e6f6dSGordon Bergling 	/* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
25827d8c4feeSMarcin Wojtas 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
25837d8c4feeSMarcin Wojtas 	    pci_msix_count(pdev) - 1);
25846d1ef2abSArtur Rojek #ifdef RSS
25856d1ef2abSArtur Rojek 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
25866d1ef2abSArtur Rojek 	    rss_getnumbuckets());
25876d1ef2abSArtur Rojek #endif
25889b8d05b8SZbigniew Bodek 
25897d8c4feeSMarcin Wojtas 	return (max_num_io_queues);
25909b8d05b8SZbigniew Bodek }
25919b8d05b8SZbigniew Bodek 
25920bdffe59SMarcin Wojtas static int
25933fc5d816SMarcin Wojtas ena_enable_wc(device_t pdev, struct resource *res)
25944fa9e02dSMarcin Wojtas {
2595472d4784SMarcin Wojtas #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
25964fa9e02dSMarcin Wojtas 	vm_offset_t va;
25974fa9e02dSMarcin Wojtas 	vm_size_t len;
25984fa9e02dSMarcin Wojtas 	int rc;
25994fa9e02dSMarcin Wojtas 
26004fa9e02dSMarcin Wojtas 	va = (vm_offset_t)rman_get_virtual(res);
26014fa9e02dSMarcin Wojtas 	len = rman_get_size(res);
26024fa9e02dSMarcin Wojtas 	/* Enable write combining */
2603472d4784SMarcin Wojtas 	rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
26044fa9e02dSMarcin Wojtas 	if (unlikely(rc != 0)) {
26053fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
26064fa9e02dSMarcin Wojtas 		return (rc);
26074fa9e02dSMarcin Wojtas 	}
26084fa9e02dSMarcin Wojtas 
26094fa9e02dSMarcin Wojtas 	return (0);
26104fa9e02dSMarcin Wojtas #endif
26114fa9e02dSMarcin Wojtas 	return (EOPNOTSUPP);
26124fa9e02dSMarcin Wojtas }
26134fa9e02dSMarcin Wojtas 
26144fa9e02dSMarcin Wojtas static int
26154fa9e02dSMarcin Wojtas ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
26164fa9e02dSMarcin Wojtas     struct ena_admin_feature_llq_desc *llq,
26174fa9e02dSMarcin Wojtas     struct ena_llq_configurations *llq_default_configurations)
26184fa9e02dSMarcin Wojtas {
261990232d18SDawid Gorecki 	int rc;
26204fa9e02dSMarcin Wojtas 	uint32_t llq_feature_mask;
26214fa9e02dSMarcin Wojtas 
26224fa9e02dSMarcin Wojtas 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
26234fa9e02dSMarcin Wojtas 	if (!(ena_dev->supported_features & llq_feature_mask)) {
26243fc5d816SMarcin Wojtas 		ena_log(pdev, WARN,
26254fa9e02dSMarcin Wojtas 		    "LLQ is not supported. Fallback to host mode policy.\n");
26264fa9e02dSMarcin Wojtas 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
26274fa9e02dSMarcin Wojtas 		return (0);
26284fa9e02dSMarcin Wojtas 	}
26294fa9e02dSMarcin Wojtas 
263090232d18SDawid Gorecki 	if (ena_dev->mem_bar == NULL) {
263190232d18SDawid Gorecki 		ena_log(pdev, WARN,
263290232d18SDawid Gorecki 		    "LLQ is advertised as supported but device doesn't expose mem bar.\n");
263390232d18SDawid Gorecki 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
263490232d18SDawid Gorecki 		return (0);
263590232d18SDawid Gorecki 	}
263690232d18SDawid Gorecki 
26374fa9e02dSMarcin Wojtas 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
26384fa9e02dSMarcin Wojtas 	if (unlikely(rc != 0)) {
263982e558eaSDawid Gorecki 		ena_log(pdev, WARN,
264082e558eaSDawid Gorecki 		    "Failed to configure the device mode. "
26414fa9e02dSMarcin Wojtas 		    "Fallback to host mode policy.\n");
26424fa9e02dSMarcin Wojtas 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
264390232d18SDawid Gorecki 	}
264490232d18SDawid Gorecki 
26454fa9e02dSMarcin Wojtas 	return (0);
26464fa9e02dSMarcin Wojtas }
26474fa9e02dSMarcin Wojtas 
264890232d18SDawid Gorecki static int
264990232d18SDawid Gorecki ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
265090232d18SDawid Gorecki {
265190232d18SDawid Gorecki 	struct ena_adapter *adapter = device_get_softc(pdev);
265290232d18SDawid Gorecki 	int rc, rid;
26534fa9e02dSMarcin Wojtas 
26544fa9e02dSMarcin Wojtas 	/* Try to allocate resources for LLQ bar */
26554fa9e02dSMarcin Wojtas 	rid = PCIR_BAR(ENA_MEM_BAR);
265682e558eaSDawid Gorecki 	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
265782e558eaSDawid Gorecki 	    RF_ACTIVE);
26584fa9e02dSMarcin Wojtas 	if (unlikely(adapter->memory == NULL)) {
265982e558eaSDawid Gorecki 		ena_log(pdev, WARN,
26603324e304SMichal Krawczyk 		    "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n");
26614fa9e02dSMarcin Wojtas 		return (0);
26624fa9e02dSMarcin Wojtas 	}
26634fa9e02dSMarcin Wojtas 
26644fa9e02dSMarcin Wojtas 	/* Enable write combining for better LLQ performance */
26653fc5d816SMarcin Wojtas 	rc = ena_enable_wc(adapter->pdev, adapter->memory);
26664fa9e02dSMarcin Wojtas 	if (unlikely(rc != 0)) {
26673fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "failed to enable write combining.\n");
26684fa9e02dSMarcin Wojtas 		return (rc);
26694fa9e02dSMarcin Wojtas 	}
26704fa9e02dSMarcin Wojtas 
26714fa9e02dSMarcin Wojtas 	/*
26724fa9e02dSMarcin Wojtas 	 * Save virtual address of the device's memory region
26734fa9e02dSMarcin Wojtas 	 * for the ena_com layer.
26744fa9e02dSMarcin Wojtas 	 */
26754fa9e02dSMarcin Wojtas 	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
26764fa9e02dSMarcin Wojtas 
26774fa9e02dSMarcin Wojtas 	return (0);
26784fa9e02dSMarcin Wojtas }
26794fa9e02dSMarcin Wojtas 
268082e558eaSDawid Gorecki static inline void
268182e558eaSDawid Gorecki set_default_llq_configurations(struct ena_llq_configurations *llq_config,
2682beaadec9SMarcin Wojtas     struct ena_admin_feature_llq_desc *llq)
26834fa9e02dSMarcin Wojtas {
26844fa9e02dSMarcin Wojtas 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
26854fa9e02dSMarcin Wojtas 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
26864fa9e02dSMarcin Wojtas 	llq_config->llq_num_decs_before_header =
26874fa9e02dSMarcin Wojtas 	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
268882e558eaSDawid Gorecki 	if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
268982e558eaSDawid Gorecki 	    0 && ena_force_large_llq_header) {
2690beaadec9SMarcin Wojtas 		llq_config->llq_ring_entry_size =
2691beaadec9SMarcin Wojtas 		    ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2692beaadec9SMarcin Wojtas 		llq_config->llq_ring_entry_size_value = 256;
2693beaadec9SMarcin Wojtas 	} else {
2694beaadec9SMarcin Wojtas 		llq_config->llq_ring_entry_size =
2695beaadec9SMarcin Wojtas 		    ENA_ADMIN_LIST_ENTRY_SIZE_128B;
26964fa9e02dSMarcin Wojtas 		llq_config->llq_ring_entry_size_value = 128;
26974fa9e02dSMarcin Wojtas 	}
2698beaadec9SMarcin Wojtas }
26994fa9e02dSMarcin Wojtas 
27004fa9e02dSMarcin Wojtas static int
27017d8c4feeSMarcin Wojtas ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
27029b8d05b8SZbigniew Bodek {
27034fa9e02dSMarcin Wojtas 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
27044fa9e02dSMarcin Wojtas 	struct ena_com_dev *ena_dev = ctx->ena_dev;
27056064f289SMarcin Wojtas 	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
27067d8c4feeSMarcin Wojtas 	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
27077d8c4feeSMarcin Wojtas 	uint32_t max_tx_queue_size;
27087d8c4feeSMarcin Wojtas 	uint32_t max_rx_queue_size;
27099b8d05b8SZbigniew Bodek 
27104fa9e02dSMarcin Wojtas 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
27116064f289SMarcin Wojtas 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
27126064f289SMarcin Wojtas 		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
27137d8c4feeSMarcin Wojtas 		max_rx_queue_size = min_t(uint32_t,
27147d8c4feeSMarcin Wojtas 		    max_queue_ext->max_rx_cq_depth,
27156064f289SMarcin Wojtas 		    max_queue_ext->max_rx_sq_depth);
27167d8c4feeSMarcin Wojtas 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
27174fa9e02dSMarcin Wojtas 
27184fa9e02dSMarcin Wojtas 		if (ena_dev->tx_mem_queue_type ==
27194fa9e02dSMarcin Wojtas 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
27207d8c4feeSMarcin Wojtas 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
27214fa9e02dSMarcin Wojtas 			    llq->max_llq_depth);
27224fa9e02dSMarcin Wojtas 		else
27237d8c4feeSMarcin Wojtas 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
27246064f289SMarcin Wojtas 			    max_queue_ext->max_tx_sq_depth);
27254fa9e02dSMarcin Wojtas 
27266064f289SMarcin Wojtas 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
27276064f289SMarcin Wojtas 		    max_queue_ext->max_per_packet_tx_descs);
27287d8c4feeSMarcin Wojtas 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
27297d8c4feeSMarcin Wojtas 		    max_queue_ext->max_per_packet_rx_descs);
27306064f289SMarcin Wojtas 	} else {
27316064f289SMarcin Wojtas 		struct ena_admin_queue_feature_desc *max_queues =
27326064f289SMarcin Wojtas 		    &ctx->get_feat_ctx->max_queues;
273382e558eaSDawid Gorecki 		max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
27346064f289SMarcin Wojtas 		    max_queues->max_sq_depth);
27357d8c4feeSMarcin Wojtas 		max_tx_queue_size = max_queues->max_cq_depth;
27364fa9e02dSMarcin Wojtas 
27374fa9e02dSMarcin Wojtas 		if (ena_dev->tx_mem_queue_type ==
27384fa9e02dSMarcin Wojtas 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
27397d8c4feeSMarcin Wojtas 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
27404fa9e02dSMarcin Wojtas 			    llq->max_llq_depth);
27414fa9e02dSMarcin Wojtas 		else
27427d8c4feeSMarcin Wojtas 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
27434fa9e02dSMarcin Wojtas 			    max_queues->max_sq_depth);
27444fa9e02dSMarcin Wojtas 
27456064f289SMarcin Wojtas 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
27467d8c4feeSMarcin Wojtas 		    max_queues->max_packet_tx_descs);
27477d8c4feeSMarcin Wojtas 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
27486064f289SMarcin Wojtas 		    max_queues->max_packet_rx_descs);
27496064f289SMarcin Wojtas 	}
27509b8d05b8SZbigniew Bodek 
27519b8d05b8SZbigniew Bodek 	/* round down to the nearest power of 2 */
27527d8c4feeSMarcin Wojtas 	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
27537d8c4feeSMarcin Wojtas 	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
27546064f289SMarcin Wojtas 
2755beaadec9SMarcin Wojtas 	/*
2756beaadec9SMarcin Wojtas 	 * When forcing large headers, we multiply the entry size by 2,
2757beaadec9SMarcin Wojtas 	 * and therefore divide the queue size by 2, leaving the amount
2758beaadec9SMarcin Wojtas 	 * of memory used by the queues unchanged.
2759beaadec9SMarcin Wojtas 	 */
2760beaadec9SMarcin Wojtas 	if (ena_force_large_llq_header) {
2761beaadec9SMarcin Wojtas 		if ((llq->entry_size_ctrl_supported &
2762beaadec9SMarcin Wojtas 		    ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
2763beaadec9SMarcin Wojtas 		    ena_dev->tx_mem_queue_type ==
2764beaadec9SMarcin Wojtas 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2765beaadec9SMarcin Wojtas 			max_tx_queue_size /= 2;
27663fc5d816SMarcin Wojtas 			ena_log(ctx->pdev, INFO,
2767beaadec9SMarcin Wojtas 			    "Forcing large headers and decreasing maximum Tx queue size to %d\n",
2768beaadec9SMarcin Wojtas 			    max_tx_queue_size);
2769beaadec9SMarcin Wojtas 		} else {
27703fc5d816SMarcin Wojtas 			ena_log(ctx->pdev, WARN,
2771beaadec9SMarcin Wojtas 			    "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
2772beaadec9SMarcin Wojtas 		}
2773beaadec9SMarcin Wojtas 	}
2774beaadec9SMarcin Wojtas 
27757d8c4feeSMarcin Wojtas 	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
27767d8c4feeSMarcin Wojtas 	    max_tx_queue_size);
27777d8c4feeSMarcin Wojtas 	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
27787d8c4feeSMarcin Wojtas 	    max_rx_queue_size);
27799b8d05b8SZbigniew Bodek 
27807d8c4feeSMarcin Wojtas 	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
27817d8c4feeSMarcin Wojtas 	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
27827d8c4feeSMarcin Wojtas 
27837d8c4feeSMarcin Wojtas 	ctx->max_tx_queue_size = max_tx_queue_size;
27847d8c4feeSMarcin Wojtas 	ctx->max_rx_queue_size = max_rx_queue_size;
27856064f289SMarcin Wojtas 	ctx->tx_queue_size = tx_queue_size;
27867d8c4feeSMarcin Wojtas 	ctx->rx_queue_size = rx_queue_size;
27876064f289SMarcin Wojtas 
27886064f289SMarcin Wojtas 	return (0);
27899b8d05b8SZbigniew Bodek }
27909b8d05b8SZbigniew Bodek 
27910bdffe59SMarcin Wojtas static void
279246021271SMarcin Wojtas ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
27939b8d05b8SZbigniew Bodek {
27949b8d05b8SZbigniew Bodek 	struct ena_admin_host_info *host_info;
279546021271SMarcin Wojtas 	uintptr_t rid;
27969b8d05b8SZbigniew Bodek 	int rc;
27979b8d05b8SZbigniew Bodek 
27989b8d05b8SZbigniew Bodek 	/* Allocate only the host info */
27999b8d05b8SZbigniew Bodek 	rc = ena_com_allocate_host_info(ena_dev);
28003f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
28013fc5d816SMarcin Wojtas 		ena_log(dev, ERR, "Cannot allocate host info\n");
28029b8d05b8SZbigniew Bodek 		return;
28039b8d05b8SZbigniew Bodek 	}
28049b8d05b8SZbigniew Bodek 
28059b8d05b8SZbigniew Bodek 	host_info = ena_dev->host_attr.host_info;
28069b8d05b8SZbigniew Bodek 
280746021271SMarcin Wojtas 	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
280846021271SMarcin Wojtas 		host_info->bdf = rid;
28099b8d05b8SZbigniew Bodek 	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
28109b8d05b8SZbigniew Bodek 	host_info->kernel_ver = osreldate;
28119b8d05b8SZbigniew Bodek 
28129b8d05b8SZbigniew Bodek 	sprintf(host_info->kernel_ver_str, "%d", osreldate);
28139b8d05b8SZbigniew Bodek 	host_info->os_dist = 0;
28149b8d05b8SZbigniew Bodek 	strncpy(host_info->os_dist_str, osrelease,
28159b8d05b8SZbigniew Bodek 	    sizeof(host_info->os_dist_str) - 1);
28169b8d05b8SZbigniew Bodek 
28178f15f8a7SDawid Gorecki 	host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
28188f15f8a7SDawid Gorecki 	    (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
28198f15f8a7SDawid Gorecki 	    (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
28208ece6b25SMarcin Wojtas 	host_info->num_cpus = mp_ncpus;
2821c7444389SMarcin Wojtas 	host_info->driver_supported_features =
28226d1ef2abSArtur Rojek 	    ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
28236d1ef2abSArtur Rojek 	    ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
28249b8d05b8SZbigniew Bodek 
28259b8d05b8SZbigniew Bodek 	rc = ena_com_set_host_attributes(ena_dev);
28263f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
2827a195fab0SMarcin Wojtas 		if (rc == EOPNOTSUPP)
28283fc5d816SMarcin Wojtas 			ena_log(dev, WARN, "Cannot set host attributes\n");
28299b8d05b8SZbigniew Bodek 		else
28303fc5d816SMarcin Wojtas 			ena_log(dev, ERR, "Cannot set host attributes\n");
28319b8d05b8SZbigniew Bodek 
28329b8d05b8SZbigniew Bodek 		goto err;
28339b8d05b8SZbigniew Bodek 	}
28349b8d05b8SZbigniew Bodek 
28359b8d05b8SZbigniew Bodek 	return;
28369b8d05b8SZbigniew Bodek 
28379b8d05b8SZbigniew Bodek err:
28389b8d05b8SZbigniew Bodek 	ena_com_delete_host_info(ena_dev);
28399b8d05b8SZbigniew Bodek }
28409b8d05b8SZbigniew Bodek 
28419b8d05b8SZbigniew Bodek static int
28429b8d05b8SZbigniew Bodek ena_device_init(struct ena_adapter *adapter, device_t pdev,
28439b8d05b8SZbigniew Bodek     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
28449b8d05b8SZbigniew Bodek {
28453324e304SMichal Krawczyk 	struct ena_llq_configurations llq_config;
28469b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = adapter->ena_dev;
28479b8d05b8SZbigniew Bodek 	bool readless_supported;
28489b8d05b8SZbigniew Bodek 	uint32_t aenq_groups;
28499b8d05b8SZbigniew Bodek 	int dma_width;
28509b8d05b8SZbigniew Bodek 	int rc;
28519b8d05b8SZbigniew Bodek 
28529b8d05b8SZbigniew Bodek 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
28533f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
28543fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "failed to init mmio read less\n");
28550bdffe59SMarcin Wojtas 		return (rc);
28569b8d05b8SZbigniew Bodek 	}
28579b8d05b8SZbigniew Bodek 
28589b8d05b8SZbigniew Bodek 	/*
28599b8d05b8SZbigniew Bodek 	 * The PCIe configuration space revision id indicate if mmio reg
28609b8d05b8SZbigniew Bodek 	 * read is disabled
28619b8d05b8SZbigniew Bodek 	 */
28629b8d05b8SZbigniew Bodek 	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
28639b8d05b8SZbigniew Bodek 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
28649b8d05b8SZbigniew Bodek 
2865a195fab0SMarcin Wojtas 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
28663f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
28673fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Can not reset device\n");
28689b8d05b8SZbigniew Bodek 		goto err_mmio_read_less;
28699b8d05b8SZbigniew Bodek 	}
28709b8d05b8SZbigniew Bodek 
28719b8d05b8SZbigniew Bodek 	rc = ena_com_validate_version(ena_dev);
28723f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
28733fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "device version is too low\n");
28749b8d05b8SZbigniew Bodek 		goto err_mmio_read_less;
28759b8d05b8SZbigniew Bodek 	}
28769b8d05b8SZbigniew Bodek 
28779b8d05b8SZbigniew Bodek 	dma_width = ena_com_get_dma_width(ena_dev);
28783f9ed7abSMarcin Wojtas 	if (unlikely(dma_width < 0)) {
28793fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
28809b8d05b8SZbigniew Bodek 		rc = dma_width;
28819b8d05b8SZbigniew Bodek 		goto err_mmio_read_less;
28829b8d05b8SZbigniew Bodek 	}
28839b8d05b8SZbigniew Bodek 	adapter->dma_width = dma_width;
28849b8d05b8SZbigniew Bodek 
28859b8d05b8SZbigniew Bodek 	/* ENA admin level init */
288667ec48bbSMarcin Wojtas 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
28873f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
28883fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
28899b8d05b8SZbigniew Bodek 		    "Can not initialize ena admin queue with device\n");
28909b8d05b8SZbigniew Bodek 		goto err_mmio_read_less;
28919b8d05b8SZbigniew Bodek 	}
28929b8d05b8SZbigniew Bodek 
28939b8d05b8SZbigniew Bodek 	/*
28949b8d05b8SZbigniew Bodek 	 * To enable the msix interrupts the driver needs to know the number
28959b8d05b8SZbigniew Bodek 	 * of queues. So the driver uses polling mode to retrieve this
28969b8d05b8SZbigniew Bodek 	 * information
28979b8d05b8SZbigniew Bodek 	 */
28989b8d05b8SZbigniew Bodek 	ena_com_set_admin_polling_mode(ena_dev, true);
28999b8d05b8SZbigniew Bodek 
290046021271SMarcin Wojtas 	ena_config_host_info(ena_dev, pdev);
29019b8d05b8SZbigniew Bodek 
29029b8d05b8SZbigniew Bodek 	/* Get Device Attributes */
29039b8d05b8SZbigniew Bodek 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
29043f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
29053fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
29069b8d05b8SZbigniew Bodek 		    "Cannot get attribute for ena device rc: %d\n", rc);
29079b8d05b8SZbigniew Bodek 		goto err_admin_init;
29089b8d05b8SZbigniew Bodek 	}
29099b8d05b8SZbigniew Bodek 
2910e6de9a83SMarcin Wojtas 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2911e6de9a83SMarcin Wojtas 	    BIT(ENA_ADMIN_FATAL_ERROR) |
2912e6de9a83SMarcin Wojtas 	    BIT(ENA_ADMIN_WARNING) |
291340621d71SMarcin Wojtas 	    BIT(ENA_ADMIN_NOTIFICATION) |
2914*8cd86b51SOsama Abboud 	    BIT(ENA_ADMIN_KEEP_ALIVE) |
2915*8cd86b51SOsama Abboud 	    BIT(ENA_ADMIN_CONF_NOTIFICATIONS);
29169b8d05b8SZbigniew Bodek 
29179b8d05b8SZbigniew Bodek 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
29189b8d05b8SZbigniew Bodek 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
29193f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
29203fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
29219b8d05b8SZbigniew Bodek 		goto err_admin_init;
29229b8d05b8SZbigniew Bodek 	}
29239b8d05b8SZbigniew Bodek 
29249b8d05b8SZbigniew Bodek 	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
29259b8d05b8SZbigniew Bodek 
29263324e304SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
29273324e304SMichal Krawczyk 
29283324e304SMichal Krawczyk 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
29293324e304SMichal Krawczyk 	    &llq_config);
29303324e304SMichal Krawczyk 	if (unlikely(rc != 0)) {
29313324e304SMichal Krawczyk 		ena_log(pdev, ERR, "Failed to set placement policy\n");
29323324e304SMichal Krawczyk 		goto err_admin_init;
29333324e304SMichal Krawczyk 	}
29343324e304SMichal Krawczyk 
29350bdffe59SMarcin Wojtas 	return (0);
29369b8d05b8SZbigniew Bodek 
29379b8d05b8SZbigniew Bodek err_admin_init:
29389b8d05b8SZbigniew Bodek 	ena_com_delete_host_info(ena_dev);
29399b8d05b8SZbigniew Bodek 	ena_com_admin_destroy(ena_dev);
29409b8d05b8SZbigniew Bodek err_mmio_read_less:
29419b8d05b8SZbigniew Bodek 	ena_com_mmio_reg_read_request_destroy(ena_dev);
29429b8d05b8SZbigniew Bodek 
29430bdffe59SMarcin Wojtas 	return (rc);
29449b8d05b8SZbigniew Bodek }
29459b8d05b8SZbigniew Bodek 
294682e558eaSDawid Gorecki static int
294782e558eaSDawid Gorecki ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
29489b8d05b8SZbigniew Bodek {
29499b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = adapter->ena_dev;
29509b8d05b8SZbigniew Bodek 	int rc;
29519b8d05b8SZbigniew Bodek 
29529b8d05b8SZbigniew Bodek 	rc = ena_enable_msix(adapter);
29533f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
29543fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
29550bdffe59SMarcin Wojtas 		return (rc);
29569b8d05b8SZbigniew Bodek 	}
29579b8d05b8SZbigniew Bodek 
29589b8d05b8SZbigniew Bodek 	ena_setup_mgmnt_intr(adapter);
29599b8d05b8SZbigniew Bodek 
29609b8d05b8SZbigniew Bodek 	rc = ena_request_mgmnt_irq(adapter);
29613f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
29623fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
29639b8d05b8SZbigniew Bodek 		goto err_disable_msix;
29649b8d05b8SZbigniew Bodek 	}
29659b8d05b8SZbigniew Bodek 
29669b8d05b8SZbigniew Bodek 	ena_com_set_admin_polling_mode(ena_dev, false);
29679b8d05b8SZbigniew Bodek 
29689b8d05b8SZbigniew Bodek 	ena_com_admin_aenq_enable(ena_dev);
29699b8d05b8SZbigniew Bodek 
29700bdffe59SMarcin Wojtas 	return (0);
29719b8d05b8SZbigniew Bodek 
29729b8d05b8SZbigniew Bodek err_disable_msix:
29739b8d05b8SZbigniew Bodek 	ena_disable_msix(adapter);
29749b8d05b8SZbigniew Bodek 
29750bdffe59SMarcin Wojtas 	return (rc);
29769b8d05b8SZbigniew Bodek }
29779b8d05b8SZbigniew Bodek 
29789b8d05b8SZbigniew Bodek /* Function called on ENA_ADMIN_KEEP_ALIVE event */
297982e558eaSDawid Gorecki static void
298082e558eaSDawid Gorecki ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
29819b8d05b8SZbigniew Bodek {
29829b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
298330217e2dSMarcin Wojtas 	struct ena_admin_aenq_keep_alive_desc *desc;
29849b8d05b8SZbigniew Bodek 	sbintime_t stime;
298530217e2dSMarcin Wojtas 	uint64_t rx_drops;
29866c84cec3SMarcin Wojtas 	uint64_t tx_drops;
298730217e2dSMarcin Wojtas 
298830217e2dSMarcin Wojtas 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
298930217e2dSMarcin Wojtas 
299030217e2dSMarcin Wojtas 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
29916c84cec3SMarcin Wojtas 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
299230217e2dSMarcin Wojtas 	counter_u64_zero(adapter->hw_stats.rx_drops);
299330217e2dSMarcin Wojtas 	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
29946c84cec3SMarcin Wojtas 	counter_u64_zero(adapter->hw_stats.tx_drops);
29956c84cec3SMarcin Wojtas 	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
29969b8d05b8SZbigniew Bodek 
29979b8d05b8SZbigniew Bodek 	stime = getsbinuptime();
29989b8d05b8SZbigniew Bodek 	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
29999b8d05b8SZbigniew Bodek }
30009b8d05b8SZbigniew Bodek 
30019b8d05b8SZbigniew Bodek /* Check for keep alive expiration */
300282e558eaSDawid Gorecki static void
300382e558eaSDawid Gorecki check_for_missing_keep_alive(struct ena_adapter *adapter)
30049b8d05b8SZbigniew Bodek {
30059b8d05b8SZbigniew Bodek 	sbintime_t timestamp, time;
30069b8d05b8SZbigniew Bodek 
30079b8d05b8SZbigniew Bodek 	if (adapter->wd_active == 0)
30089b8d05b8SZbigniew Bodek 		return;
30099b8d05b8SZbigniew Bodek 
301040621d71SMarcin Wojtas 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
30119b8d05b8SZbigniew Bodek 		return;
30129b8d05b8SZbigniew Bodek 
30139b8d05b8SZbigniew Bodek 	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
30149b8d05b8SZbigniew Bodek 	time = getsbinuptime() - timestamp;
30159b8d05b8SZbigniew Bodek 	if (unlikely(time > adapter->keep_alive_timeout)) {
30163fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
30179b8d05b8SZbigniew Bodek 		counter_u64_add(adapter->dev_stats.wd_expired, 1);
30187926bc44SMarcin Wojtas 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
30199b8d05b8SZbigniew Bodek 	}
3020858659f7SMarcin Wojtas }
30219b8d05b8SZbigniew Bodek 
30229b8d05b8SZbigniew Bodek /* Check if admin queue is enabled */
302382e558eaSDawid Gorecki static void
302482e558eaSDawid Gorecki check_for_admin_com_state(struct ena_adapter *adapter)
30259b8d05b8SZbigniew Bodek {
302682e558eaSDawid Gorecki 	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
30273fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR,
30289b8d05b8SZbigniew Bodek 		    "ENA admin queue is not in running state!\n");
30299b8d05b8SZbigniew Bodek 		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
30307926bc44SMarcin Wojtas 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
30319b8d05b8SZbigniew Bodek 	}
3032858659f7SMarcin Wojtas }
30339b8d05b8SZbigniew Bodek 
303474dba3adSMarcin Wojtas static int
3035d12f7bfcSMarcin Wojtas check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3036d12f7bfcSMarcin Wojtas     struct ena_ring *rx_ring)
3037d12f7bfcSMarcin Wojtas {
30380ac122c3SDawid Gorecki 	if (likely(atomic_load_8(&rx_ring->first_interrupt)))
3039d12f7bfcSMarcin Wojtas 		return (0);
3040d12f7bfcSMarcin Wojtas 
3041d12f7bfcSMarcin Wojtas 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3042d12f7bfcSMarcin Wojtas 		return (0);
3043d12f7bfcSMarcin Wojtas 
3044d12f7bfcSMarcin Wojtas 	rx_ring->no_interrupt_event_cnt++;
3045d12f7bfcSMarcin Wojtas 
304682e558eaSDawid Gorecki 	if (rx_ring->no_interrupt_event_cnt ==
304782e558eaSDawid Gorecki 	    ENA_MAX_NO_INTERRUPT_ITERATIONS) {
304882e558eaSDawid Gorecki 		ena_log(adapter->pdev, ERR,
304982e558eaSDawid Gorecki 		    "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
305082e558eaSDawid Gorecki 		    rx_ring->qid);
30517926bc44SMarcin Wojtas 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3052d12f7bfcSMarcin Wojtas 		return (EIO);
3053d12f7bfcSMarcin Wojtas 	}
3054d12f7bfcSMarcin Wojtas 
3055d12f7bfcSMarcin Wojtas 	return (0);
3056d12f7bfcSMarcin Wojtas }
3057d12f7bfcSMarcin Wojtas 
3058d12f7bfcSMarcin Wojtas static int
3059d12f7bfcSMarcin Wojtas check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
306074dba3adSMarcin Wojtas     struct ena_ring *tx_ring)
306174dba3adSMarcin Wojtas {
30621f67704eSOsama Abboud 	uint32_t missed_tx = 0, new_missed_tx = 0;
30633fc5d816SMarcin Wojtas 	device_t pdev = adapter->pdev;
306474dba3adSMarcin Wojtas 	struct bintime curtime, time;
306574dba3adSMarcin Wojtas 	struct ena_tx_buffer *tx_buf;
3066d8aba82bSDawid Gorecki 	int time_since_last_cleanup;
3067d8aba82bSDawid Gorecki 	int missing_tx_comp_to;
3068d12f7bfcSMarcin Wojtas 	sbintime_t time_offset;
3069d12f7bfcSMarcin Wojtas 	int i, rc = 0;
307074dba3adSMarcin Wojtas 
307174dba3adSMarcin Wojtas 	getbinuptime(&curtime);
307274dba3adSMarcin Wojtas 
307374dba3adSMarcin Wojtas 	for (i = 0; i < tx_ring->ring_size; i++) {
307474dba3adSMarcin Wojtas 		tx_buf = &tx_ring->tx_buffer_info[i];
307574dba3adSMarcin Wojtas 
30760bdffe59SMarcin Wojtas 		if (bintime_isset(&tx_buf->timestamp) == 0)
307774dba3adSMarcin Wojtas 			continue;
307874dba3adSMarcin Wojtas 
307974dba3adSMarcin Wojtas 		time = curtime;
308074dba3adSMarcin Wojtas 		bintime_sub(&time, &tx_buf->timestamp);
3081d12f7bfcSMarcin Wojtas 		time_offset = bttosbt(time);
3082d12f7bfcSMarcin Wojtas 
30830ac122c3SDawid Gorecki 		if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3084d12f7bfcSMarcin Wojtas 		    time_offset > 2 * adapter->missing_tx_timeout)) {
3085d12f7bfcSMarcin Wojtas 			/*
3086d12f7bfcSMarcin Wojtas 			 * If after graceful period interrupt is still not
3087d12f7bfcSMarcin Wojtas 			 * received, we schedule a reset.
3088d12f7bfcSMarcin Wojtas 			 */
30893fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
3090d12f7bfcSMarcin Wojtas 			    "Potential MSIX issue on Tx side Queue = %d. "
309182e558eaSDawid Gorecki 			    "Reset the device\n",
309282e558eaSDawid Gorecki 			    tx_ring->qid);
30937926bc44SMarcin Wojtas 			ena_trigger_reset(adapter,
30947926bc44SMarcin Wojtas 			    ENA_REGS_RESET_MISS_INTERRUPT);
3095d12f7bfcSMarcin Wojtas 			return (EIO);
3096d12f7bfcSMarcin Wojtas 		}
309774dba3adSMarcin Wojtas 
309874dba3adSMarcin Wojtas 		/* Check again if packet is still waiting */
3099d12f7bfcSMarcin Wojtas 		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
310074dba3adSMarcin Wojtas 
3101f01b2cd9SArthur Kiyanovski 			if (tx_buf->print_once) {
31029272e45cSArthur Kiyanovski 				time_since_last_cleanup = TICKS_2_MSEC(ticks -
3103d8aba82bSDawid Gorecki 				    tx_ring->tx_last_cleanup_ticks);
310482e558eaSDawid Gorecki 				missing_tx_comp_to = sbttoms(
310582e558eaSDawid Gorecki 				    adapter->missing_tx_timeout);
310682e558eaSDawid Gorecki 				ena_log(pdev, WARN,
310782e558eaSDawid Gorecki 				    "Found a Tx that wasn't completed on time, qid %d, index %d. "
31089272e45cSArthur Kiyanovski 				    "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
3109d8aba82bSDawid Gorecki 				    tx_ring->qid, i, time_since_last_cleanup,
3110d8aba82bSDawid Gorecki 				    missing_tx_comp_to);
31111f67704eSOsama Abboud 				/* Add new TX completions which are missed */
31121f67704eSOsama Abboud 				new_missed_tx++;
3113d8aba82bSDawid Gorecki 			}
311474dba3adSMarcin Wojtas 
3115f01b2cd9SArthur Kiyanovski 			tx_buf->print_once = false;
311674dba3adSMarcin Wojtas 			missed_tx++;
3117d12f7bfcSMarcin Wojtas 		}
3118d12f7bfcSMarcin Wojtas 	}
31191f67704eSOsama Abboud 	/* Checking if this TX ring missing TX completions have passed the threshold */
3120d12f7bfcSMarcin Wojtas 	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
31213fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
3122d12f7bfcSMarcin Wojtas 		    "The number of lost tx completion is above the threshold "
3123d12f7bfcSMarcin Wojtas 		    "(%d > %d). Reset the device\n",
31244e8acd84SMarcin Wojtas 		    missed_tx, adapter->missing_tx_threshold);
31257926bc44SMarcin Wojtas 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3126d12f7bfcSMarcin Wojtas 		rc = EIO;
312774dba3adSMarcin Wojtas 	}
31281f67704eSOsama Abboud 	/* Add the newly discovered missing TX completions */
31291f67704eSOsama Abboud 	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
3130d12f7bfcSMarcin Wojtas 
3131d12f7bfcSMarcin Wojtas 	return (rc);
313274dba3adSMarcin Wojtas }
313374dba3adSMarcin Wojtas 
31349b8d05b8SZbigniew Bodek /*
31359b8d05b8SZbigniew Bodek  * Check for TX which were not completed on time.
31369b8d05b8SZbigniew Bodek  * Timeout is defined by "missing_tx_timeout".
31379b8d05b8SZbigniew Bodek  * Reset will be performed if number of incompleted
31389b8d05b8SZbigniew Bodek  * transactions exceeds "missing_tx_threshold".
31399b8d05b8SZbigniew Bodek  */
31400bdffe59SMarcin Wojtas static void
3141d12f7bfcSMarcin Wojtas check_for_missing_completions(struct ena_adapter *adapter)
31429b8d05b8SZbigniew Bodek {
31439b8d05b8SZbigniew Bodek 	struct ena_ring *tx_ring;
3144d12f7bfcSMarcin Wojtas 	struct ena_ring *rx_ring;
314574dba3adSMarcin Wojtas 	int i, budget, rc;
31469b8d05b8SZbigniew Bodek 
31479b8d05b8SZbigniew Bodek 	/* Make sure the driver doesn't turn the device in other process */
31489b8d05b8SZbigniew Bodek 	rmb();
31499b8d05b8SZbigniew Bodek 
3150fd43fd2aSMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
31519b8d05b8SZbigniew Bodek 		return;
31529b8d05b8SZbigniew Bodek 
3153fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
31549b8d05b8SZbigniew Bodek 		return;
31559b8d05b8SZbigniew Bodek 
315640621d71SMarcin Wojtas 	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
31579b8d05b8SZbigniew Bodek 		return;
31589b8d05b8SZbigniew Bodek 
31599b8d05b8SZbigniew Bodek 	budget = adapter->missing_tx_max_queues;
31609b8d05b8SZbigniew Bodek 
31617d8c4feeSMarcin Wojtas 	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
31629b8d05b8SZbigniew Bodek 		tx_ring = &adapter->tx_ring[i];
3163d12f7bfcSMarcin Wojtas 		rx_ring = &adapter->rx_ring[i];
31649b8d05b8SZbigniew Bodek 
3165d12f7bfcSMarcin Wojtas 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3166d12f7bfcSMarcin Wojtas 		if (unlikely(rc != 0))
3167d12f7bfcSMarcin Wojtas 			return;
3168d12f7bfcSMarcin Wojtas 
3169d12f7bfcSMarcin Wojtas 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
31700bdffe59SMarcin Wojtas 		if (unlikely(rc != 0))
31719b8d05b8SZbigniew Bodek 			return;
31729b8d05b8SZbigniew Bodek 
31739b8d05b8SZbigniew Bodek 		budget--;
3174cd5d5804SMarcin Wojtas 		if (budget == 0) {
31759b8d05b8SZbigniew Bodek 			i++;
31769b8d05b8SZbigniew Bodek 			break;
31779b8d05b8SZbigniew Bodek 		}
31789b8d05b8SZbigniew Bodek 	}
31799b8d05b8SZbigniew Bodek 
31807d8c4feeSMarcin Wojtas 	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
31819b8d05b8SZbigniew Bodek }
31829b8d05b8SZbigniew Bodek 
31835cb9db07SMarcin Wojtas /* trigger rx cleanup after 2 consecutive detections */
3184efe6ab18SMarcin Wojtas #define EMPTY_RX_REFILL 2
3185efe6ab18SMarcin Wojtas /* For the rare case where the device runs out of Rx descriptors and the
3186efe6ab18SMarcin Wojtas  * msix handler failed to refill new Rx descriptors (due to a lack of memory
3187efe6ab18SMarcin Wojtas  * for example).
3188efe6ab18SMarcin Wojtas  * This case will lead to a deadlock:
3189efe6ab18SMarcin Wojtas  * The device won't send interrupts since all the new Rx packets will be dropped
3190efe6ab18SMarcin Wojtas  * The msix handler won't allocate new Rx descriptors so the device won't be
3191efe6ab18SMarcin Wojtas  * able to send new packets.
3192efe6ab18SMarcin Wojtas  *
3193efe6ab18SMarcin Wojtas  * When such a situation is detected - execute rx cleanup task in another thread
3194efe6ab18SMarcin Wojtas  */
3195efe6ab18SMarcin Wojtas static void
3196efe6ab18SMarcin Wojtas check_for_empty_rx_ring(struct ena_adapter *adapter)
3197efe6ab18SMarcin Wojtas {
3198efe6ab18SMarcin Wojtas 	struct ena_ring *rx_ring;
3199efe6ab18SMarcin Wojtas 	int i, refill_required;
3200efe6ab18SMarcin Wojtas 
3201fd43fd2aSMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3202efe6ab18SMarcin Wojtas 		return;
3203efe6ab18SMarcin Wojtas 
3204fd43fd2aSMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3205efe6ab18SMarcin Wojtas 		return;
3206efe6ab18SMarcin Wojtas 
32077d8c4feeSMarcin Wojtas 	for (i = 0; i < adapter->num_io_queues; i++) {
3208efe6ab18SMarcin Wojtas 		rx_ring = &adapter->rx_ring[i];
3209efe6ab18SMarcin Wojtas 
321082e558eaSDawid Gorecki 		refill_required = ena_com_free_q_entries(
321182e558eaSDawid Gorecki 		    rx_ring->ena_com_io_sq);
3212efe6ab18SMarcin Wojtas 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3213efe6ab18SMarcin Wojtas 			rx_ring->empty_rx_queue++;
3214efe6ab18SMarcin Wojtas 
3215efe6ab18SMarcin Wojtas 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3216efe6ab18SMarcin Wojtas 				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3217efe6ab18SMarcin Wojtas 				    1);
3218efe6ab18SMarcin Wojtas 
32193fc5d816SMarcin Wojtas 				ena_log(adapter->pdev, WARN,
32203fc5d816SMarcin Wojtas 				    "Rx ring %d is stalled. Triggering the refill function\n",
32213fc5d816SMarcin Wojtas 				    i);
3222efe6ab18SMarcin Wojtas 
32235cb9db07SMarcin Wojtas 				taskqueue_enqueue(rx_ring->que->cleanup_tq,
32245cb9db07SMarcin Wojtas 				    &rx_ring->que->cleanup_task);
3225efe6ab18SMarcin Wojtas 				rx_ring->empty_rx_queue = 0;
3226efe6ab18SMarcin Wojtas 			}
3227efe6ab18SMarcin Wojtas 		} else {
3228efe6ab18SMarcin Wojtas 			rx_ring->empty_rx_queue = 0;
3229efe6ab18SMarcin Wojtas 		}
3230efe6ab18SMarcin Wojtas 	}
3231efe6ab18SMarcin Wojtas }
32329b8d05b8SZbigniew Bodek 
323382e558eaSDawid Gorecki static void
323482e558eaSDawid Gorecki ena_update_hints(struct ena_adapter *adapter,
323540621d71SMarcin Wojtas     struct ena_admin_ena_hw_hints *hints)
323640621d71SMarcin Wojtas {
323740621d71SMarcin Wojtas 	struct ena_com_dev *ena_dev = adapter->ena_dev;
323840621d71SMarcin Wojtas 
323940621d71SMarcin Wojtas 	if (hints->admin_completion_tx_timeout)
324040621d71SMarcin Wojtas 		ena_dev->admin_queue.completion_timeout =
324140621d71SMarcin Wojtas 		    hints->admin_completion_tx_timeout * 1000;
324240621d71SMarcin Wojtas 
324340621d71SMarcin Wojtas 	if (hints->mmio_read_timeout)
324440621d71SMarcin Wojtas 		/* convert to usec */
324582e558eaSDawid Gorecki 		ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
324640621d71SMarcin Wojtas 
324740621d71SMarcin Wojtas 	if (hints->missed_tx_completion_count_threshold_to_reset)
324840621d71SMarcin Wojtas 		adapter->missing_tx_threshold =
324940621d71SMarcin Wojtas 		    hints->missed_tx_completion_count_threshold_to_reset;
325040621d71SMarcin Wojtas 
325140621d71SMarcin Wojtas 	if (hints->missing_tx_completion_timeout) {
325240621d71SMarcin Wojtas 		if (hints->missing_tx_completion_timeout ==
325340621d71SMarcin Wojtas 		    ENA_HW_HINTS_NO_TIMEOUT)
325440621d71SMarcin Wojtas 			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
325540621d71SMarcin Wojtas 		else
325682e558eaSDawid Gorecki 			adapter->missing_tx_timeout = SBT_1MS *
325782e558eaSDawid Gorecki 			    hints->missing_tx_completion_timeout;
325840621d71SMarcin Wojtas 	}
325940621d71SMarcin Wojtas 
326040621d71SMarcin Wojtas 	if (hints->driver_watchdog_timeout) {
326140621d71SMarcin Wojtas 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
326240621d71SMarcin Wojtas 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
326340621d71SMarcin Wojtas 		else
326482e558eaSDawid Gorecki 			adapter->keep_alive_timeout = SBT_1MS *
326582e558eaSDawid Gorecki 			    hints->driver_watchdog_timeout;
326640621d71SMarcin Wojtas 	}
326740621d71SMarcin Wojtas }
326840621d71SMarcin Wojtas 
3269f180142cSMarcin Wojtas /**
3270f180142cSMarcin Wojtas  * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3271f180142cSMarcin Wojtas  * @adapter: ENA device adapter
3272f180142cSMarcin Wojtas  *
3273f180142cSMarcin Wojtas  * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3274f180142cSMarcin Wojtas  * and other error codes on failure.
3275f180142cSMarcin Wojtas  *
3276f180142cSMarcin Wojtas  * This function can possibly cause a race with other calls to the admin queue.
3277f180142cSMarcin Wojtas  * Because of that, the caller should either lock this function or make sure
3278f180142cSMarcin Wojtas  * that there is no race in the current context.
3279f180142cSMarcin Wojtas  */
3280f180142cSMarcin Wojtas static int
3281f180142cSMarcin Wojtas ena_copy_eni_metrics(struct ena_adapter *adapter)
3282f180142cSMarcin Wojtas {
3283f180142cSMarcin Wojtas 	static bool print_once = true;
3284f180142cSMarcin Wojtas 	int rc;
3285f180142cSMarcin Wojtas 
3286f180142cSMarcin Wojtas 	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3287f180142cSMarcin Wojtas 
3288f180142cSMarcin Wojtas 	if (rc != 0) {
3289f180142cSMarcin Wojtas 		if (rc == ENA_COM_UNSUPPORTED) {
3290f180142cSMarcin Wojtas 			if (print_once) {
32913fc5d816SMarcin Wojtas 				ena_log(adapter->pdev, WARN,
3292f180142cSMarcin Wojtas 				    "Retrieving ENI metrics is not supported.\n");
3293f180142cSMarcin Wojtas 				print_once = false;
3294f180142cSMarcin Wojtas 			} else {
32953fc5d816SMarcin Wojtas 				ena_log(adapter->pdev, DBG,
3296f180142cSMarcin Wojtas 				    "Retrieving ENI metrics is not supported.\n");
3297f180142cSMarcin Wojtas 			}
3298f180142cSMarcin Wojtas 		} else {
32993fc5d816SMarcin Wojtas 			ena_log(adapter->pdev, ERR,
3300f180142cSMarcin Wojtas 			    "Failed to get ENI metrics: %d\n", rc);
3301f180142cSMarcin Wojtas 		}
3302f180142cSMarcin Wojtas 	}
3303f180142cSMarcin Wojtas 
3304f180142cSMarcin Wojtas 	return (rc);
3305f180142cSMarcin Wojtas }
3306f180142cSMarcin Wojtas 
3307f97993adSOsama Abboud static int
330836d42c86SOsama Abboud ena_copy_srd_metrics(struct ena_adapter *adapter)
330936d42c86SOsama Abboud {
331036d42c86SOsama Abboud 	return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info);
331136d42c86SOsama Abboud }
331236d42c86SOsama Abboud 
331336d42c86SOsama Abboud static int
3314f97993adSOsama Abboud ena_copy_customer_metrics(struct ena_adapter *adapter)
3315f97993adSOsama Abboud {
3316f97993adSOsama Abboud 	struct ena_com_dev *dev;
3317f97993adSOsama Abboud 	u32 supported_metrics_count;
3318f97993adSOsama Abboud 	int rc, len;
3319f97993adSOsama Abboud 
3320f97993adSOsama Abboud 	dev = adapter->ena_dev;
3321f97993adSOsama Abboud 
3322f97993adSOsama Abboud 	supported_metrics_count = ena_com_get_customer_metric_count(dev);
3323f97993adSOsama Abboud 	len = supported_metrics_count * sizeof(u64);
3324f97993adSOsama Abboud 
3325f97993adSOsama Abboud 	/* Fill the data buffer */
3326f97993adSOsama Abboud 	rc = ena_com_get_customer_metrics(adapter->ena_dev,
3327f97993adSOsama Abboud 	    (char *)(adapter->customer_metrics_array), len);
3328f97993adSOsama Abboud 
3329f97993adSOsama Abboud 	return (rc);
3330f97993adSOsama Abboud }
3331f97993adSOsama Abboud 
33329b8d05b8SZbigniew Bodek static void
33339b8d05b8SZbigniew Bodek ena_timer_service(void *data)
33349b8d05b8SZbigniew Bodek {
33359b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = (struct ena_adapter *)data;
33369b8d05b8SZbigniew Bodek 	struct ena_admin_host_info *host_info =
33379b8d05b8SZbigniew Bodek 	    adapter->ena_dev->host_attr.host_info;
33389b8d05b8SZbigniew Bodek 
33399b8d05b8SZbigniew Bodek 	check_for_missing_keep_alive(adapter);
33409b8d05b8SZbigniew Bodek 
33419b8d05b8SZbigniew Bodek 	check_for_admin_com_state(adapter);
33429b8d05b8SZbigniew Bodek 
3343d12f7bfcSMarcin Wojtas 	check_for_missing_completions(adapter);
33449b8d05b8SZbigniew Bodek 
3345efe6ab18SMarcin Wojtas 	check_for_empty_rx_ring(adapter);
3346efe6ab18SMarcin Wojtas 
3347f180142cSMarcin Wojtas 	/*
33485b925280SOsama Abboud 	 * User controller update of the ENA metrics.
3349f180142cSMarcin Wojtas 	 * If the delay was set to 0, then the stats shouldn't be updated at
3350f180142cSMarcin Wojtas 	 * all.
33515b925280SOsama Abboud 	 * Otherwise, wait 'metrics_sample_interval' seconds, before
3352f180142cSMarcin Wojtas 	 * updating stats.
3353f180142cSMarcin Wojtas 	 * As timer service is executed every second, it's enough to increment
3354f180142cSMarcin Wojtas 	 * appropriate counter each time the timer service is executed.
3355f180142cSMarcin Wojtas 	 */
33565b925280SOsama Abboud 	if ((adapter->metrics_sample_interval != 0) &&
33575b925280SOsama Abboud 	    (++adapter->metrics_sample_interval_cnt >=
33585b925280SOsama Abboud 	    adapter->metrics_sample_interval)) {
3359b899a02aSDawid Gorecki 		taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
33605b925280SOsama Abboud 		adapter->metrics_sample_interval_cnt = 0;
3361f180142cSMarcin Wojtas 	}
3362f180142cSMarcin Wojtas 
3363f180142cSMarcin Wojtas 
33640bdffe59SMarcin Wojtas 	if (host_info != NULL)
33659b8d05b8SZbigniew Bodek 		ena_update_host_info(host_info, adapter->ifp);
33669b8d05b8SZbigniew Bodek 
3367fd43fd2aSMarcin Wojtas 	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3368d10ec3adSDawid Gorecki 		/*
3369d10ec3adSDawid Gorecki 		 * Timeout when validating version indicates that the device
3370d10ec3adSDawid Gorecki 		 * became unresponsive. If that happens skip the reset and
3371d10ec3adSDawid Gorecki 		 * reschedule timer service, so the reset can be retried later.
3372d10ec3adSDawid Gorecki 		 */
3373d10ec3adSDawid Gorecki 		if (ena_com_validate_version(adapter->ena_dev) ==
3374d10ec3adSDawid Gorecki 		    ENA_COM_TIMER_EXPIRED) {
3375d10ec3adSDawid Gorecki 			ena_log(adapter->pdev, WARN,
3376d10ec3adSDawid Gorecki 			    "FW unresponsive, skipping reset\n");
3377d10ec3adSDawid Gorecki 			ENA_TIMER_RESET(adapter);
3378d10ec3adSDawid Gorecki 			return;
3379d10ec3adSDawid Gorecki 		}
33803fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
33819b8d05b8SZbigniew Bodek 		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
33829b8d05b8SZbigniew Bodek 		return;
33839b8d05b8SZbigniew Bodek 	}
33849b8d05b8SZbigniew Bodek 
33859b8d05b8SZbigniew Bodek 	/*
33869b8d05b8SZbigniew Bodek 	 * Schedule another timeout one second from now.
33879b8d05b8SZbigniew Bodek 	 */
338878554d0cSDawid Gorecki 	ENA_TIMER_RESET(adapter);
33899b8d05b8SZbigniew Bodek }
33909b8d05b8SZbigniew Bodek 
339138c7b965SMarcin Wojtas void
339232f63fa7SMarcin Wojtas ena_destroy_device(struct ena_adapter *adapter, bool graceful)
33939b8d05b8SZbigniew Bodek {
339432f63fa7SMarcin Wojtas 	if_t ifp = adapter->ifp;
33959b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = adapter->ena_dev;
33969b8d05b8SZbigniew Bodek 	bool dev_up;
339732f63fa7SMarcin Wojtas 
339832f63fa7SMarcin Wojtas 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
339932f63fa7SMarcin Wojtas 		return;
340032f63fa7SMarcin Wojtas 
3401c59a5fbdSArthur Kiyanovski 	if (!graceful)
340232f63fa7SMarcin Wojtas 		if_link_state_change(ifp, LINK_STATE_DOWN);
340332f63fa7SMarcin Wojtas 
340478554d0cSDawid Gorecki 	ENA_TIMER_DRAIN(adapter);
340532f63fa7SMarcin Wojtas 
340632f63fa7SMarcin Wojtas 	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
340732f63fa7SMarcin Wojtas 	if (dev_up)
340832f63fa7SMarcin Wojtas 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
340932f63fa7SMarcin Wojtas 
341032f63fa7SMarcin Wojtas 	if (!graceful)
341132f63fa7SMarcin Wojtas 		ena_com_set_admin_running_state(ena_dev, false);
341232f63fa7SMarcin Wojtas 
341332f63fa7SMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
341432f63fa7SMarcin Wojtas 		ena_down(adapter);
341532f63fa7SMarcin Wojtas 
341632f63fa7SMarcin Wojtas 	/*
341732f63fa7SMarcin Wojtas 	 * Stop the device from sending AENQ events (if the device was up, and
341832f63fa7SMarcin Wojtas 	 * the trigger reset was on, ena_down already performs device reset)
341932f63fa7SMarcin Wojtas 	 */
342032f63fa7SMarcin Wojtas 	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
342132f63fa7SMarcin Wojtas 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
342232f63fa7SMarcin Wojtas 
342332f63fa7SMarcin Wojtas 	ena_free_mgmnt_irq(adapter);
342432f63fa7SMarcin Wojtas 
342532f63fa7SMarcin Wojtas 	ena_disable_msix(adapter);
342632f63fa7SMarcin Wojtas 
3427e2735b09SMarcin Wojtas 	/*
3428e2735b09SMarcin Wojtas 	 * IO rings resources should be freed because `ena_restore_device()`
3429e2735b09SMarcin Wojtas 	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3430e2735b09SMarcin Wojtas 	 * vectors. The amount of MSIX vectors after destroy-restore may be
3431e2735b09SMarcin Wojtas 	 * different than before. Therefore, IO rings resources should be
3432e2735b09SMarcin Wojtas 	 * established from scratch each time.
3433e2735b09SMarcin Wojtas 	 */
3434e2735b09SMarcin Wojtas 	ena_free_all_io_rings_resources(adapter);
3435e2735b09SMarcin Wojtas 
343632f63fa7SMarcin Wojtas 	ena_com_abort_admin_commands(ena_dev);
343732f63fa7SMarcin Wojtas 
343832f63fa7SMarcin Wojtas 	ena_com_wait_for_abort_completion(ena_dev);
343932f63fa7SMarcin Wojtas 
344032f63fa7SMarcin Wojtas 	ena_com_admin_destroy(ena_dev);
344132f63fa7SMarcin Wojtas 
344232f63fa7SMarcin Wojtas 	ena_com_mmio_reg_read_request_destroy(ena_dev);
344332f63fa7SMarcin Wojtas 
344432f63fa7SMarcin Wojtas 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
344532f63fa7SMarcin Wojtas 
344632f63fa7SMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
344732f63fa7SMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
344832f63fa7SMarcin Wojtas }
344932f63fa7SMarcin Wojtas 
345032f63fa7SMarcin Wojtas static int
345132f63fa7SMarcin Wojtas ena_device_validate_params(struct ena_adapter *adapter,
345232f63fa7SMarcin Wojtas     struct ena_com_dev_get_features_ctx *get_feat_ctx)
345332f63fa7SMarcin Wojtas {
345432f63fa7SMarcin Wojtas 	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
345532f63fa7SMarcin Wojtas 	    ETHER_ADDR_LEN) != 0) {
34563fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
345732f63fa7SMarcin Wojtas 		return (EINVAL);
345832f63fa7SMarcin Wojtas 	}
345932f63fa7SMarcin Wojtas 
346032f63fa7SMarcin Wojtas 	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
34613fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR,
346232f63fa7SMarcin Wojtas 		    "Error, device max mtu is smaller than ifp MTU\n");
346332f63fa7SMarcin Wojtas 		return (EINVAL);
346432f63fa7SMarcin Wojtas 	}
346532f63fa7SMarcin Wojtas 
346632f63fa7SMarcin Wojtas 	return 0;
346732f63fa7SMarcin Wojtas }
346832f63fa7SMarcin Wojtas 
346938c7b965SMarcin Wojtas int
347032f63fa7SMarcin Wojtas ena_restore_device(struct ena_adapter *adapter)
347132f63fa7SMarcin Wojtas {
347232f63fa7SMarcin Wojtas 	struct ena_com_dev_get_features_ctx get_feat_ctx;
347332f63fa7SMarcin Wojtas 	struct ena_com_dev *ena_dev = adapter->ena_dev;
347432f63fa7SMarcin Wojtas 	if_t ifp = adapter->ifp;
347532f63fa7SMarcin Wojtas 	device_t dev = adapter->pdev;
347632f63fa7SMarcin Wojtas 	int wd_active;
34779b8d05b8SZbigniew Bodek 	int rc;
34789b8d05b8SZbigniew Bodek 
347932f63fa7SMarcin Wojtas 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
348032f63fa7SMarcin Wojtas 
348132f63fa7SMarcin Wojtas 	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
348232f63fa7SMarcin Wojtas 	if (rc != 0) {
34833fc5d816SMarcin Wojtas 		ena_log(dev, ERR, "Cannot initialize device\n");
348432f63fa7SMarcin Wojtas 		goto err;
348532f63fa7SMarcin Wojtas 	}
348632f63fa7SMarcin Wojtas 	/*
348732f63fa7SMarcin Wojtas 	 * Only enable WD if it was enabled before reset, so it won't override
348832f63fa7SMarcin Wojtas 	 * value set by the user by the sysctl.
348932f63fa7SMarcin Wojtas 	 */
349032f63fa7SMarcin Wojtas 	if (adapter->wd_active != 0)
349132f63fa7SMarcin Wojtas 		adapter->wd_active = wd_active;
349232f63fa7SMarcin Wojtas 
349332f63fa7SMarcin Wojtas 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
349432f63fa7SMarcin Wojtas 	if (rc != 0) {
34953fc5d816SMarcin Wojtas 		ena_log(dev, ERR, "Validation of device parameters failed\n");
349632f63fa7SMarcin Wojtas 		goto err_device_destroy;
349732f63fa7SMarcin Wojtas 	}
349832f63fa7SMarcin Wojtas 
349932f63fa7SMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
350032f63fa7SMarcin Wojtas 	/* Make sure we don't have a race with AENQ Links state handler */
350132f63fa7SMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
350232f63fa7SMarcin Wojtas 		if_link_state_change(ifp, LINK_STATE_UP);
350332f63fa7SMarcin Wojtas 
3504aa9c3226SMarcin Wojtas 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
350532f63fa7SMarcin Wojtas 	if (rc != 0) {
35063fc5d816SMarcin Wojtas 		ena_log(dev, ERR, "Enable MSI-X failed\n");
350732f63fa7SMarcin Wojtas 		goto err_device_destroy;
350832f63fa7SMarcin Wojtas 	}
350932f63fa7SMarcin Wojtas 
3510e2735b09SMarcin Wojtas 	/*
3511e2735b09SMarcin Wojtas 	 * Effective value of used MSIX vectors should be the same as before
3512e2735b09SMarcin Wojtas 	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3513e2735b09SMarcin Wojtas 	 * are available.
3514e2735b09SMarcin Wojtas 	 */
3515e2735b09SMarcin Wojtas 	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
351682e558eaSDawid Gorecki 		adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3517e2735b09SMarcin Wojtas 
3518e2735b09SMarcin Wojtas 	/* Re-initialize rings basic information */
3519e2735b09SMarcin Wojtas 	ena_init_io_rings(adapter);
3520e2735b09SMarcin Wojtas 
352132f63fa7SMarcin Wojtas 	/* If the interface was up before the reset bring it up */
352232f63fa7SMarcin Wojtas 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
352332f63fa7SMarcin Wojtas 		rc = ena_up(adapter);
352432f63fa7SMarcin Wojtas 		if (rc != 0) {
35253fc5d816SMarcin Wojtas 			ena_log(dev, ERR, "Failed to create I/O queues\n");
352632f63fa7SMarcin Wojtas 			goto err_disable_msix;
352732f63fa7SMarcin Wojtas 		}
352832f63fa7SMarcin Wojtas 	}
352932f63fa7SMarcin Wojtas 
353024392281SMarcin Wojtas 	/* Indicate that device is running again and ready to work */
353132f63fa7SMarcin Wojtas 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
353224392281SMarcin Wojtas 
353324392281SMarcin Wojtas 	/*
353424392281SMarcin Wojtas 	 * As the AENQ handlers weren't executed during reset because
353524392281SMarcin Wojtas 	 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
353624392281SMarcin Wojtas 	 * timestamp must be updated again That will prevent next reset
353724392281SMarcin Wojtas 	 * caused by missing keep alive.
353824392281SMarcin Wojtas 	 */
353924392281SMarcin Wojtas 	adapter->keep_alive_timestamp = getsbinuptime();
354078554d0cSDawid Gorecki 	ENA_TIMER_RESET(adapter);
354178554d0cSDawid Gorecki 
35427d8c4feeSMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
354332f63fa7SMarcin Wojtas 
354432f63fa7SMarcin Wojtas 	return (rc);
354532f63fa7SMarcin Wojtas 
354632f63fa7SMarcin Wojtas err_disable_msix:
354732f63fa7SMarcin Wojtas 	ena_free_mgmnt_irq(adapter);
354832f63fa7SMarcin Wojtas 	ena_disable_msix(adapter);
354932f63fa7SMarcin Wojtas err_device_destroy:
355032f63fa7SMarcin Wojtas 	ena_com_abort_admin_commands(ena_dev);
355132f63fa7SMarcin Wojtas 	ena_com_wait_for_abort_completion(ena_dev);
355232f63fa7SMarcin Wojtas 	ena_com_admin_destroy(ena_dev);
355332f63fa7SMarcin Wojtas 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
355432f63fa7SMarcin Wojtas 	ena_com_mmio_reg_read_request_destroy(ena_dev);
355532f63fa7SMarcin Wojtas err:
355632f63fa7SMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
355732f63fa7SMarcin Wojtas 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
35583fc5d816SMarcin Wojtas 	ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
355932f63fa7SMarcin Wojtas 
356032f63fa7SMarcin Wojtas 	return (rc);
356132f63fa7SMarcin Wojtas }
356232f63fa7SMarcin Wojtas 
356332f63fa7SMarcin Wojtas static void
3564b899a02aSDawid Gorecki ena_metrics_task(void *arg, int pending)
3565b899a02aSDawid Gorecki {
3566b899a02aSDawid Gorecki 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3567b899a02aSDawid Gorecki 
3568b899a02aSDawid Gorecki 	ENA_LOCK_LOCK();
3569f97993adSOsama Abboud 
3570f97993adSOsama Abboud 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
3571f97993adSOsama Abboud 		(void)ena_copy_customer_metrics(adapter);
3572f97993adSOsama Abboud 	else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS))
3573b899a02aSDawid Gorecki 		(void)ena_copy_eni_metrics(adapter);
3574f97993adSOsama Abboud 
357536d42c86SOsama Abboud 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
357636d42c86SOsama Abboud 		(void)ena_copy_srd_metrics(adapter);
357736d42c86SOsama Abboud 
3578b899a02aSDawid Gorecki 	ENA_LOCK_UNLOCK();
3579b899a02aSDawid Gorecki }
3580b899a02aSDawid Gorecki 
3581b899a02aSDawid Gorecki static void
358232f63fa7SMarcin Wojtas ena_reset_task(void *arg, int pending)
358332f63fa7SMarcin Wojtas {
358432f63fa7SMarcin Wojtas 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
358532f63fa7SMarcin Wojtas 
358607aff471SArtur Rojek 	ENA_LOCK_LOCK();
3587433ab9b6SArtur Rojek 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
358832f63fa7SMarcin Wojtas 		ena_destroy_device(adapter, false);
358932f63fa7SMarcin Wojtas 		ena_restore_device(adapter);
3590d209ffeeSDawid Gorecki 
3591d209ffeeSDawid Gorecki 		ena_log(adapter->pdev, INFO,
3592d209ffeeSDawid Gorecki 		    "Device reset completed successfully, Driver info: %s\n",
3593d209ffeeSDawid Gorecki 		    ena_version);
3594433ab9b6SArtur Rojek 	}
359507aff471SArtur Rojek 	ENA_LOCK_UNLOCK();
35969b8d05b8SZbigniew Bodek }
35979b8d05b8SZbigniew Bodek 
3598b9e80b52SOsama Abboud static void
3599b9e80b52SOsama Abboud ena_free_stats(struct ena_adapter *adapter)
3600b9e80b52SOsama Abboud {
3601b9e80b52SOsama Abboud 	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3602b9e80b52SOsama Abboud 	    sizeof(struct ena_hw_stats));
3603b9e80b52SOsama Abboud 	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3604b9e80b52SOsama Abboud 	    sizeof(struct ena_stats_dev));
3605b9e80b52SOsama Abboud 
3606b9e80b52SOsama Abboud }
36079b8d05b8SZbigniew Bodek /**
36089b8d05b8SZbigniew Bodek  * ena_attach - Device Initialization Routine
36099b8d05b8SZbigniew Bodek  * @pdev: device information struct
36109b8d05b8SZbigniew Bodek  *
36119b8d05b8SZbigniew Bodek  * Returns 0 on success, otherwise on failure.
36129b8d05b8SZbigniew Bodek  *
36139b8d05b8SZbigniew Bodek  * ena_attach initializes an adapter identified by a device structure.
36149b8d05b8SZbigniew Bodek  * The OS initialization, configuring of the adapter private structure,
36159b8d05b8SZbigniew Bodek  * and a hardware reset occur.
36169b8d05b8SZbigniew Bodek  **/
36179b8d05b8SZbigniew Bodek static int
36189b8d05b8SZbigniew Bodek ena_attach(device_t pdev)
36199b8d05b8SZbigniew Bodek {
36209b8d05b8SZbigniew Bodek 	struct ena_com_dev_get_features_ctx get_feat_ctx;
36216064f289SMarcin Wojtas 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
36229b8d05b8SZbigniew Bodek 	static int version_printed;
36239b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter;
36249b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = NULL;
36257d8c4feeSMarcin Wojtas 	uint32_t max_num_io_queues;
36261c808fcdSMichal Krawczyk 	int msix_rid;
36274fa9e02dSMarcin Wojtas 	int rid, rc;
36284fa9e02dSMarcin Wojtas 
36299b8d05b8SZbigniew Bodek 	adapter = device_get_softc(pdev);
36309b8d05b8SZbigniew Bodek 	adapter->pdev = pdev;
3631eb4c4f4aSMarcin Wojtas 	adapter->first_bind = -1;
36329b8d05b8SZbigniew Bodek 
36336959869eSMarcin Wojtas 	/*
36346959869eSMarcin Wojtas 	 * Set up the timer service - driver is responsible for avoiding
36356959869eSMarcin Wojtas 	 * concurrency, as the callout won't be using any locking inside.
36366959869eSMarcin Wojtas 	 */
363778554d0cSDawid Gorecki 	ENA_TIMER_INIT(adapter);
36388f15f8a7SDawid Gorecki 	adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
36398f15f8a7SDawid Gorecki 	adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
36408f15f8a7SDawid Gorecki 	adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
36418f15f8a7SDawid Gorecki 	adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
36429b8d05b8SZbigniew Bodek 
3643f9e1d947SOsama Abboud 	adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED;
3644f9e1d947SOsama Abboud 	adapter->irq_cpu_stride = 0;
3645f9e1d947SOsama Abboud 
3646f9e1d947SOsama Abboud #ifdef RSS
3647f9e1d947SOsama Abboud 	adapter->rss_enabled = 1;
3648f9e1d947SOsama Abboud #endif
3649f9e1d947SOsama Abboud 
36509b8d05b8SZbigniew Bodek 	if (version_printed++ == 0)
36513fc5d816SMarcin Wojtas 		ena_log(pdev, INFO, "%s\n", ena_version);
36529b8d05b8SZbigniew Bodek 
36539b8d05b8SZbigniew Bodek 	/* Allocate memory for ena_dev structure */
3654cd5d5804SMarcin Wojtas 	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3655cd5d5804SMarcin Wojtas 	    M_WAITOK | M_ZERO);
36569b8d05b8SZbigniew Bodek 
36579b8d05b8SZbigniew Bodek 	adapter->ena_dev = ena_dev;
36589b8d05b8SZbigniew Bodek 	ena_dev->dmadev = pdev;
36594fa9e02dSMarcin Wojtas 
36604fa9e02dSMarcin Wojtas 	rid = PCIR_BAR(ENA_REG_BAR);
36614fa9e02dSMarcin Wojtas 	adapter->memory = NULL;
366282e558eaSDawid Gorecki 	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
366382e558eaSDawid Gorecki 	    RF_ACTIVE);
36644fa9e02dSMarcin Wojtas 	if (unlikely(adapter->registers == NULL)) {
36653fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
36664fa9e02dSMarcin Wojtas 		    "unable to allocate bus resource: registers!\n");
36674fa9e02dSMarcin Wojtas 		rc = ENOMEM;
36684fa9e02dSMarcin Wojtas 		goto err_dev_free;
36694fa9e02dSMarcin Wojtas 	}
36704fa9e02dSMarcin Wojtas 
36711c808fcdSMichal Krawczyk 	/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
36721c808fcdSMichal Krawczyk 	msix_rid = pci_msix_table_bar(pdev);
36731c808fcdSMichal Krawczyk 	if (msix_rid != rid) {
36741c808fcdSMichal Krawczyk 		adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
36751c808fcdSMichal Krawczyk 		    &msix_rid, RF_ACTIVE);
36761c808fcdSMichal Krawczyk 		if (unlikely(adapter->msix == NULL)) {
36773fc5d816SMarcin Wojtas 			ena_log(pdev, ERR,
36781c808fcdSMichal Krawczyk 			    "unable to allocate bus resource: msix!\n");
36791c808fcdSMichal Krawczyk 			rc = ENOMEM;
36801c808fcdSMichal Krawczyk 			goto err_pci_free;
36811c808fcdSMichal Krawczyk 		}
36821c808fcdSMichal Krawczyk 		adapter->msix_rid = msix_rid;
36831c808fcdSMichal Krawczyk 	}
36841c808fcdSMichal Krawczyk 
36859b8d05b8SZbigniew Bodek 	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
36869b8d05b8SZbigniew Bodek 	    M_WAITOK | M_ZERO);
36879b8d05b8SZbigniew Bodek 
36889b8d05b8SZbigniew Bodek 	/* Store register resources */
368982e558eaSDawid Gorecki 	((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
369082e558eaSDawid Gorecki 	    adapter->registers);
369182e558eaSDawid Gorecki 	((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
369282e558eaSDawid Gorecki 	    adapter->registers);
36939b8d05b8SZbigniew Bodek 
36943f9ed7abSMarcin Wojtas 	if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
36953fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "failed to pmap registers bar\n");
36969b8d05b8SZbigniew Bodek 		rc = ENXIO;
3697cd5d5804SMarcin Wojtas 		goto err_bus_free;
36989b8d05b8SZbigniew Bodek 	}
36999b8d05b8SZbigniew Bodek 
37003324e304SMichal Krawczyk 	rc = ena_map_llq_mem_bar(pdev, ena_dev);
37013324e304SMichal Krawczyk 	if (unlikely(rc != 0)) {
37023324e304SMichal Krawczyk 		ena_log(pdev, ERR, "Failed to map ENA mem bar");
37033324e304SMichal Krawczyk 		goto err_bus_free;
37043324e304SMichal Krawczyk 	}
37059b8d05b8SZbigniew Bodek 
3706fd43fd2aSMarcin Wojtas 	/* Initially clear all the flags */
3707fd43fd2aSMarcin Wojtas 	ENA_FLAG_ZERO(adapter);
3708fd43fd2aSMarcin Wojtas 
37099b8d05b8SZbigniew Bodek 	/* Device initialization */
37109b8d05b8SZbigniew Bodek 	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
37113f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
37123fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
37139b8d05b8SZbigniew Bodek 		rc = ENXIO;
37149b8d05b8SZbigniew Bodek 		goto err_bus_free;
37159b8d05b8SZbigniew Bodek 	}
37169b8d05b8SZbigniew Bodek 
37170b432b70SMarcin Wojtas 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
371882e558eaSDawid Gorecki 		adapter->disable_meta_caching = !!(
371982e558eaSDawid Gorecki 		    get_feat_ctx.llq.accel_mode.u.get.supported_flags &
37200b432b70SMarcin Wojtas 		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
37210b432b70SMarcin Wojtas 
37229b8d05b8SZbigniew Bodek 	adapter->keep_alive_timestamp = getsbinuptime();
37239b8d05b8SZbigniew Bodek 
37249b8d05b8SZbigniew Bodek 	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
37259b8d05b8SZbigniew Bodek 
37269b8d05b8SZbigniew Bodek 	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
37279b8d05b8SZbigniew Bodek 	    ETHER_ADDR_LEN);
37289b8d05b8SZbigniew Bodek 
37297d8c4feeSMarcin Wojtas 	calc_queue_ctx.pdev = pdev;
37306064f289SMarcin Wojtas 	calc_queue_ctx.ena_dev = ena_dev;
37316064f289SMarcin Wojtas 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
37326064f289SMarcin Wojtas 
37337d8c4feeSMarcin Wojtas 	/* Calculate initial and maximum IO queue number and size */
37347d8c4feeSMarcin Wojtas 	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
37357d8c4feeSMarcin Wojtas 	    &get_feat_ctx);
37367d8c4feeSMarcin Wojtas 	rc = ena_calc_io_queue_size(&calc_queue_ctx);
37377d8c4feeSMarcin Wojtas 	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
37386064f289SMarcin Wojtas 		rc = EFAULT;
37399b8d05b8SZbigniew Bodek 		goto err_com_free;
37409b8d05b8SZbigniew Bodek 	}
37419b8d05b8SZbigniew Bodek 
37429762a033SMarcin Wojtas 	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
37439762a033SMarcin Wojtas 	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
37447d8c4feeSMarcin Wojtas 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
37457d8c4feeSMarcin Wojtas 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
37466064f289SMarcin Wojtas 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
37476064f289SMarcin Wojtas 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
37486064f289SMarcin Wojtas 
37497d8c4feeSMarcin Wojtas 	adapter->max_num_io_queues = max_num_io_queues;
37507d8c4feeSMarcin Wojtas 
37516064f289SMarcin Wojtas 	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
37529b8d05b8SZbigniew Bodek 
37537d8c4feeSMarcin Wojtas 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
37547d8c4feeSMarcin Wojtas 
37557d8c4feeSMarcin Wojtas 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
37567d8c4feeSMarcin Wojtas 
37579b8d05b8SZbigniew Bodek 	/* set up dma tags for rx and tx buffers */
37589b8d05b8SZbigniew Bodek 	rc = ena_setup_tx_dma_tag(adapter);
37594e8acd84SMarcin Wojtas 	if (unlikely(rc != 0)) {
37603fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
3761cd5d5804SMarcin Wojtas 		goto err_com_free;
37624e8acd84SMarcin Wojtas 	}
37639b8d05b8SZbigniew Bodek 
37649b8d05b8SZbigniew Bodek 	rc = ena_setup_rx_dma_tag(adapter);
37654e8acd84SMarcin Wojtas 	if (unlikely(rc != 0)) {
37663fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
3767cd5d5804SMarcin Wojtas 		goto err_tx_tag_free;
37684e8acd84SMarcin Wojtas 	}
37699b8d05b8SZbigniew Bodek 
3770e2735b09SMarcin Wojtas 	/*
3771e2735b09SMarcin Wojtas 	 * The amount of requested MSIX vectors is equal to
3772e2735b09SMarcin Wojtas 	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3773e2735b09SMarcin Wojtas 	 * number of admin queue interrupts. The former is initially determined
3774e2735b09SMarcin Wojtas 	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3775e2735b09SMarcin Wojtas 	 * achieved if there are not enough system resources. By default, the
3776e2735b09SMarcin Wojtas 	 * number of effectively used IO queues is the same but later on it can
3777e2735b09SMarcin Wojtas 	 * be limited by the user using sysctl interface.
3778e2735b09SMarcin Wojtas 	 */
3779aa9c3226SMarcin Wojtas 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
37803f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0)) {
37813fc5d816SMarcin Wojtas 		ena_log(pdev, ERR,
37829b8d05b8SZbigniew Bodek 		    "Failed to enable and set the admin interrupts\n");
3783c115a1e2SMarcin Wojtas 		goto err_io_free;
3784c115a1e2SMarcin Wojtas 	}
3785e2735b09SMarcin Wojtas 	/* By default all of allocated MSIX vectors are actively used */
3786e2735b09SMarcin Wojtas 	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3787e2735b09SMarcin Wojtas 
3788e2735b09SMarcin Wojtas 	/* initialize rings basic information */
3789e2735b09SMarcin Wojtas 	ena_init_io_rings(adapter);
3790c115a1e2SMarcin Wojtas 
3791f97993adSOsama Abboud 	rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
3792f97993adSOsama Abboud 	if (rc) {
3793f97993adSOsama Abboud 		ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n");
3794f97993adSOsama Abboud 		goto err_msix_free;
3795f97993adSOsama Abboud 	}
3796f97993adSOsama Abboud 
3797f97993adSOsama Abboud 	rc = ena_sysctl_allocate_customer_metrics_buffer(adapter);
3798f97993adSOsama Abboud 	if (unlikely(rc)){
3799f97993adSOsama Abboud 		ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n");
3800f97993adSOsama Abboud 		goto err_metrics_buffer_destroy;
3801f97993adSOsama Abboud 	}
3802f97993adSOsama Abboud 
3803b9e80b52SOsama Abboud 	/* Initialize statistics */
3804b9e80b52SOsama Abboud 	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3805b9e80b52SOsama Abboud 	    sizeof(struct ena_stats_dev));
3806b9e80b52SOsama Abboud 	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3807b9e80b52SOsama Abboud 	    sizeof(struct ena_hw_stats));
3808b9e80b52SOsama Abboud 	ena_sysctl_add_nodes(adapter);
3809b9e80b52SOsama Abboud 
3810c115a1e2SMarcin Wojtas 	/* setup network interface */
3811aa386085SZhenlei Huang 	ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
38129b8d05b8SZbigniew Bodek 
3813081169f2SZbigniew Bodek 	/* Initialize reset task queue */
3814081169f2SZbigniew Bodek 	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3815081169f2SZbigniew Bodek 	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3816081169f2SZbigniew Bodek 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
381782e558eaSDawid Gorecki 	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
381882e558eaSDawid Gorecki 	    device_get_nameunit(adapter->pdev));
3819081169f2SZbigniew Bodek 
3820b899a02aSDawid Gorecki 	/* Initialize metrics task queue */
3821b899a02aSDawid Gorecki 	TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
3822b899a02aSDawid Gorecki 	adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
3823b899a02aSDawid Gorecki 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
382482e558eaSDawid Gorecki 	taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
382582e558eaSDawid Gorecki 	    device_get_nameunit(adapter->pdev));
3826b899a02aSDawid Gorecki 
3827d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP
3828d17b7d87SMarcin Wojtas 	rc = ena_netmap_attach(adapter);
3829d17b7d87SMarcin Wojtas 	if (rc != 0) {
38303fc5d816SMarcin Wojtas 		ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
3831d17b7d87SMarcin Wojtas 		goto err_detach;
3832d17b7d87SMarcin Wojtas 	}
3833d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */
3834d17b7d87SMarcin Wojtas 
38359b8d05b8SZbigniew Bodek 	/* Tell the stack that the interface is not active */
38369b8d05b8SZbigniew Bodek 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3837fd43fd2aSMarcin Wojtas 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
38389b8d05b8SZbigniew Bodek 
383978554d0cSDawid Gorecki 	/* Run the timer service */
384078554d0cSDawid Gorecki 	ENA_TIMER_RESET(adapter);
384178554d0cSDawid Gorecki 
38429b8d05b8SZbigniew Bodek 	return (0);
38439b8d05b8SZbigniew Bodek 
3844d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP
3845d17b7d87SMarcin Wojtas err_detach:
3846d17b7d87SMarcin Wojtas 	ether_ifdetach(adapter->ifp);
3847f97993adSOsama Abboud 	free(adapter->customer_metrics_array, M_DEVBUF);
38485517ca84SOsama Abboud #endif /* DEV_NETMAP */
3849f97993adSOsama Abboud err_metrics_buffer_destroy:
3850f97993adSOsama Abboud 	ena_com_delete_customer_metrics_buffer(ena_dev);
3851c115a1e2SMarcin Wojtas err_msix_free:
3852b9e80b52SOsama Abboud 	ena_free_stats(adapter);
3853c115a1e2SMarcin Wojtas 	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3854c115a1e2SMarcin Wojtas 	ena_free_mgmnt_irq(adapter);
3855c115a1e2SMarcin Wojtas 	ena_disable_msix(adapter);
3856cd5d5804SMarcin Wojtas err_io_free:
38579b8d05b8SZbigniew Bodek 	ena_free_all_io_rings_resources(adapter);
38589b8d05b8SZbigniew Bodek 	ena_free_rx_dma_tag(adapter);
3859cd5d5804SMarcin Wojtas err_tx_tag_free:
38609b8d05b8SZbigniew Bodek 	ena_free_tx_dma_tag(adapter);
3861cd5d5804SMarcin Wojtas err_com_free:
38629b8d05b8SZbigniew Bodek 	ena_com_admin_destroy(ena_dev);
38639b8d05b8SZbigniew Bodek 	ena_com_delete_host_info(ena_dev);
3864cd5d5804SMarcin Wojtas 	ena_com_mmio_reg_read_request_destroy(ena_dev);
38659b8d05b8SZbigniew Bodek err_bus_free:
38669b8d05b8SZbigniew Bodek 	free(ena_dev->bus, M_DEVBUF);
38671c808fcdSMichal Krawczyk err_pci_free:
38689b8d05b8SZbigniew Bodek 	ena_free_pci_resources(adapter);
38694fa9e02dSMarcin Wojtas err_dev_free:
38704fa9e02dSMarcin Wojtas 	free(ena_dev, M_DEVBUF);
3871cd5d5804SMarcin Wojtas 
38729b8d05b8SZbigniew Bodek 	return (rc);
38739b8d05b8SZbigniew Bodek }
38749b8d05b8SZbigniew Bodek 
38759b8d05b8SZbigniew Bodek /**
38769b8d05b8SZbigniew Bodek  * ena_detach - Device Removal Routine
38779b8d05b8SZbigniew Bodek  * @pdev: device information struct
38789b8d05b8SZbigniew Bodek  *
38799b8d05b8SZbigniew Bodek  * ena_detach is called by the device subsystem to alert the driver
38809b8d05b8SZbigniew Bodek  * that it should release a PCI device.
38819b8d05b8SZbigniew Bodek  **/
38829b8d05b8SZbigniew Bodek static int
38839b8d05b8SZbigniew Bodek ena_detach(device_t pdev)
38849b8d05b8SZbigniew Bodek {
38859b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = device_get_softc(pdev);
38869b8d05b8SZbigniew Bodek 	struct ena_com_dev *ena_dev = adapter->ena_dev;
38879b8d05b8SZbigniew Bodek 	int rc;
38889b8d05b8SZbigniew Bodek 
38899b8d05b8SZbigniew Bodek 	/* Make sure VLANS are not using driver */
38907583c633SJustin Hibbits 	if (if_vlantrunkinuse(adapter->ifp)) {
38913fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
38929b8d05b8SZbigniew Bodek 		return (EBUSY);
38939b8d05b8SZbigniew Bodek 	}
38949b8d05b8SZbigniew Bodek 
38959151c55dSMarcin Wojtas 	ether_ifdetach(adapter->ifp);
38969151c55dSMarcin Wojtas 
38976959869eSMarcin Wojtas 	/* Stop timer service */
389807aff471SArtur Rojek 	ENA_LOCK_LOCK();
389978554d0cSDawid Gorecki 	ENA_TIMER_DRAIN(adapter);
390007aff471SArtur Rojek 	ENA_LOCK_UNLOCK();
39016959869eSMarcin Wojtas 
3902b899a02aSDawid Gorecki 	/* Release metrics task */
3903b899a02aSDawid Gorecki 	while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
3904b899a02aSDawid Gorecki 		taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
3905b899a02aSDawid Gorecki 	taskqueue_free(adapter->metrics_tq);
3906b899a02aSDawid Gorecki 
39076959869eSMarcin Wojtas 	/* Release reset task */
39089b8d05b8SZbigniew Bodek 	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
39099b8d05b8SZbigniew Bodek 		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
39109b8d05b8SZbigniew Bodek 	taskqueue_free(adapter->reset_tq);
39119b8d05b8SZbigniew Bodek 
391207aff471SArtur Rojek 	ENA_LOCK_LOCK();
39139b8d05b8SZbigniew Bodek 	ena_down(adapter);
391432f63fa7SMarcin Wojtas 	ena_destroy_device(adapter, true);
391507aff471SArtur Rojek 	ENA_LOCK_UNLOCK();
39169b8d05b8SZbigniew Bodek 
39170e7d31f6SMarcin Wojtas 	/* Restore unregistered sysctl queue nodes. */
39180e7d31f6SMarcin Wojtas 	ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
39190e7d31f6SMarcin Wojtas 	    adapter->max_num_io_queues);
39200e7d31f6SMarcin Wojtas 
3921d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP
3922d17b7d87SMarcin Wojtas 	netmap_detach(adapter->ifp);
3923d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */
3924d17b7d87SMarcin Wojtas 
3925b9e80b52SOsama Abboud 	ena_free_stats(adapter);
39269b8d05b8SZbigniew Bodek 
39279b8d05b8SZbigniew Bodek 	rc = ena_free_rx_dma_tag(adapter);
39283f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0))
39293fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, WARN,
39309b8d05b8SZbigniew Bodek 		    "Unmapped RX DMA tag associations\n");
39319b8d05b8SZbigniew Bodek 
39329b8d05b8SZbigniew Bodek 	rc = ena_free_tx_dma_tag(adapter);
39333f9ed7abSMarcin Wojtas 	if (unlikely(rc != 0))
39343fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, WARN,
39359b8d05b8SZbigniew Bodek 		    "Unmapped TX DMA tag associations\n");
39369b8d05b8SZbigniew Bodek 
39379b8d05b8SZbigniew Bodek 	ena_free_irqs(adapter);
39389b8d05b8SZbigniew Bodek 
39399b8d05b8SZbigniew Bodek 	ena_free_pci_resources(adapter);
39409b8d05b8SZbigniew Bodek 
39416d1ef2abSArtur Rojek 	if (adapter->rss_indir != NULL)
39426d1ef2abSArtur Rojek 		free(adapter->rss_indir, M_DEVBUF);
39436d1ef2abSArtur Rojek 
394432f63fa7SMarcin Wojtas 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
394532f63fa7SMarcin Wojtas 		ena_com_rss_destroy(ena_dev);
394632f63fa7SMarcin Wojtas 
394732f63fa7SMarcin Wojtas 	ena_com_delete_host_info(ena_dev);
394832f63fa7SMarcin Wojtas 
3949f97993adSOsama Abboud 	free(adapter->customer_metrics_array, M_DEVBUF);
3950f97993adSOsama Abboud 
3951f97993adSOsama Abboud 	ena_com_delete_customer_metrics_buffer(ena_dev);
3952f97993adSOsama Abboud 
39539151c55dSMarcin Wojtas 	if_free(adapter->ifp);
39549151c55dSMarcin Wojtas 
39559b8d05b8SZbigniew Bodek 	free(ena_dev->bus, M_DEVBUF);
39569b8d05b8SZbigniew Bodek 
39579b8d05b8SZbigniew Bodek 	free(ena_dev, M_DEVBUF);
39589b8d05b8SZbigniew Bodek 
39599b8d05b8SZbigniew Bodek 	return (bus_generic_detach(pdev));
39609b8d05b8SZbigniew Bodek }
39619b8d05b8SZbigniew Bodek 
39629b8d05b8SZbigniew Bodek /******************************************************************************
39639b8d05b8SZbigniew Bodek  ******************************** AENQ Handlers *******************************
39649b8d05b8SZbigniew Bodek  *****************************************************************************/
39659b8d05b8SZbigniew Bodek /**
39669b8d05b8SZbigniew Bodek  * ena_update_on_link_change:
39679b8d05b8SZbigniew Bodek  * Notify the network interface about the change in link status
39689b8d05b8SZbigniew Bodek  **/
39699b8d05b8SZbigniew Bodek static void
39709b8d05b8SZbigniew Bodek ena_update_on_link_change(void *adapter_data,
39719b8d05b8SZbigniew Bodek     struct ena_admin_aenq_entry *aenq_e)
39729b8d05b8SZbigniew Bodek {
39739b8d05b8SZbigniew Bodek 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
39749b8d05b8SZbigniew Bodek 	struct ena_admin_aenq_link_change_desc *aenq_desc;
39759b8d05b8SZbigniew Bodek 	int status;
39769b8d05b8SZbigniew Bodek 	if_t ifp;
39779b8d05b8SZbigniew Bodek 
39789b8d05b8SZbigniew Bodek 	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
39799b8d05b8SZbigniew Bodek 	ifp = adapter->ifp;
39809b8d05b8SZbigniew Bodek 	status = aenq_desc->flags &
39819b8d05b8SZbigniew Bodek 	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
39829b8d05b8SZbigniew Bodek 
39839b8d05b8SZbigniew Bodek 	if (status != 0) {
39843fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, INFO, "link is UP\n");
3985fd43fd2aSMarcin Wojtas 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
398632f63fa7SMarcin Wojtas 		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
398732f63fa7SMarcin Wojtas 			if_link_state_change(ifp, LINK_STATE_UP);
398832f63fa7SMarcin Wojtas 	} else {
39893fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, INFO, "link is DOWN\n");
39909b8d05b8SZbigniew Bodek 		if_link_state_change(ifp, LINK_STATE_DOWN);
3991fd43fd2aSMarcin Wojtas 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
39929b8d05b8SZbigniew Bodek 	}
39939b8d05b8SZbigniew Bodek }
39949b8d05b8SZbigniew Bodek 
399582e558eaSDawid Gorecki static void
399682e558eaSDawid Gorecki ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
399740621d71SMarcin Wojtas {
399840621d71SMarcin Wojtas 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
399940621d71SMarcin Wojtas 	struct ena_admin_ena_hw_hints *hints;
400040621d71SMarcin Wojtas 
400182e558eaSDawid Gorecki 	ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
400282e558eaSDawid Gorecki 	    adapter->ena_dev, "Invalid group(%x) expected %x\n",
400382e558eaSDawid Gorecki 	    aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
400440621d71SMarcin Wojtas 
40059eb1615fSMarcin Wojtas 	switch (aenq_e->aenq_common_desc.syndrome) {
400640621d71SMarcin Wojtas 	case ENA_ADMIN_UPDATE_HINTS:
400740621d71SMarcin Wojtas 		hints =
400840621d71SMarcin Wojtas 		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
400940621d71SMarcin Wojtas 		ena_update_hints(adapter, hints);
401040621d71SMarcin Wojtas 		break;
401140621d71SMarcin Wojtas 	default:
40123fc5d816SMarcin Wojtas 		ena_log(adapter->pdev, ERR,
401340621d71SMarcin Wojtas 		    "Invalid aenq notification link state %d\n",
40149eb1615fSMarcin Wojtas 		    aenq_e->aenq_common_desc.syndrome);
401540621d71SMarcin Wojtas 	}
401640621d71SMarcin Wojtas }
401740621d71SMarcin Wojtas 
401807aff471SArtur Rojek static void
401907aff471SArtur Rojek ena_lock_init(void *arg)
402007aff471SArtur Rojek {
402107aff471SArtur Rojek 	ENA_LOCK_INIT();
402207aff471SArtur Rojek }
402307aff471SArtur Rojek SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
402407aff471SArtur Rojek 
402507aff471SArtur Rojek static void
402607aff471SArtur Rojek ena_lock_uninit(void *arg)
402707aff471SArtur Rojek {
402807aff471SArtur Rojek 	ENA_LOCK_DESTROY();
402907aff471SArtur Rojek }
403007aff471SArtur Rojek SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
403107aff471SArtur Rojek 
40329b8d05b8SZbigniew Bodek /**
40339b8d05b8SZbigniew Bodek  * This handler will called for unknown event group or unimplemented handlers
40349b8d05b8SZbigniew Bodek  **/
40359b8d05b8SZbigniew Bodek static void
4036e6de9a83SMarcin Wojtas unimplemented_aenq_handler(void *adapter_data,
40379b8d05b8SZbigniew Bodek     struct ena_admin_aenq_entry *aenq_e)
40389b8d05b8SZbigniew Bodek {
4039e6de9a83SMarcin Wojtas 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4040e6de9a83SMarcin Wojtas 
40413fc5d816SMarcin Wojtas 	ena_log(adapter->pdev, ERR,
4042e6de9a83SMarcin Wojtas 	    "Unknown event was received or event with unimplemented handler\n");
40439b8d05b8SZbigniew Bodek }
40449b8d05b8SZbigniew Bodek 
4045*8cd86b51SOsama Abboud static void ena_conf_notification(void *adapter_data,
4046*8cd86b51SOsama Abboud     struct ena_admin_aenq_entry *aenq_e)
4047*8cd86b51SOsama Abboud {
4048*8cd86b51SOsama Abboud 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4049*8cd86b51SOsama Abboud 	struct ena_admin_aenq_conf_notifications_desc *desc;
4050*8cd86b51SOsama Abboud 	u64 bitmap, bit;
4051*8cd86b51SOsama Abboud 
4052*8cd86b51SOsama Abboud 	desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e;
4053*8cd86b51SOsama Abboud 	bitmap = desc->notifications_bitmap;
4054*8cd86b51SOsama Abboud 
4055*8cd86b51SOsama Abboud 	if (bitmap == 0) {
4056*8cd86b51SOsama Abboud 		ena_log(adapter->pdev, INFO,
4057*8cd86b51SOsama Abboud 		    "Empty configuration notification bitmap\n");
4058*8cd86b51SOsama Abboud 		return;
4059*8cd86b51SOsama Abboud 	}
4060*8cd86b51SOsama Abboud 
4061*8cd86b51SOsama Abboud 	for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) {
4062*8cd86b51SOsama Abboud 		bit--;
4063*8cd86b51SOsama Abboud 		ena_log(adapter->pdev, INFO,
4064*8cd86b51SOsama Abboud 		    "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n",
4065*8cd86b51SOsama Abboud 		    bit + 1);
4066*8cd86b51SOsama Abboud 		// Clear the processed bit
4067*8cd86b51SOsama Abboud 		bitmap &= ~(1UL << bit);
4068*8cd86b51SOsama Abboud 	}
4069*8cd86b51SOsama Abboud }
4070*8cd86b51SOsama Abboud 
40719b8d05b8SZbigniew Bodek static struct ena_aenq_handlers aenq_handlers = {
40729b8d05b8SZbigniew Bodek     .handlers = {
40739b8d05b8SZbigniew Bodek 	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
407440621d71SMarcin Wojtas 	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
40759b8d05b8SZbigniew Bodek 	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4076*8cd86b51SOsama Abboud 	    [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification,
40779b8d05b8SZbigniew Bodek     },
40789b8d05b8SZbigniew Bodek     .unimplemented_handler = unimplemented_aenq_handler
40799b8d05b8SZbigniew Bodek };
40809b8d05b8SZbigniew Bodek 
40819b8d05b8SZbigniew Bodek /*********************************************************************
40829b8d05b8SZbigniew Bodek  *  FreeBSD Device Interface Entry Points
40839b8d05b8SZbigniew Bodek  *********************************************************************/
40849b8d05b8SZbigniew Bodek 
408582e558eaSDawid Gorecki static device_method_t ena_methods[] = { /* Device interface */
40869b8d05b8SZbigniew Bodek 	DEVMETHOD(device_probe, ena_probe),
40879b8d05b8SZbigniew Bodek 	DEVMETHOD(device_attach, ena_attach),
408882e558eaSDawid Gorecki 	DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END
40899b8d05b8SZbigniew Bodek };
40909b8d05b8SZbigniew Bodek 
40919b8d05b8SZbigniew Bodek static driver_t ena_driver = {
409282e558eaSDawid Gorecki 	"ena",
409382e558eaSDawid Gorecki 	ena_methods,
409482e558eaSDawid Gorecki 	sizeof(struct ena_adapter),
40959b8d05b8SZbigniew Bodek };
40969b8d05b8SZbigniew Bodek 
40971dc1476cSJohn Baldwin DRIVER_MODULE(ena, pci, ena_driver, 0, 0);
409840abe76bSWarner Losh MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
4099329e817fSWarner Losh     nitems(ena_vendor_info_array) - 1);
41009b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, pci, 1, 1, 1);
41019b8d05b8SZbigniew Bodek MODULE_DEPEND(ena, ether, 1, 1, 1);
4102d17b7d87SMarcin Wojtas #ifdef DEV_NETMAP
4103d17b7d87SMarcin Wojtas MODULE_DEPEND(ena, netmap, 1, 1, 1);
4104d17b7d87SMarcin Wojtas #endif /* DEV_NETMAP */
41059b8d05b8SZbigniew Bodek 
41069b8d05b8SZbigniew Bodek /*********************************************************************/
4107