xref: /freebsd/sys/dev/ena/ena.c (revision a33ec635d1f6d574d54e6f6d74766d070183be4c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #include "opt_rss.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/smp.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 #include <sys/time.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 
54 #include <machine/atomic.h>
55 #include <machine/bus.h>
56 #include <machine/in_cksum.h>
57 #include <machine/resource.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_var.h>
70 #include <net/if_vlan_var.h>
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
78 
79 #include "ena.h"
80 #include "ena_datapath.h"
81 #include "ena_rss.h"
82 #include "ena_sysctl.h"
83 
84 #ifdef DEV_NETMAP
85 #include "ena_netmap.h"
86 #endif /* DEV_NETMAP */
87 
88 /*********************************************************
89  *  Function prototypes
90  *********************************************************/
91 static int ena_probe(device_t);
92 static void ena_intr_msix_mgmnt(void *);
93 static void ena_free_pci_resources(struct ena_adapter *);
94 static int ena_change_mtu(if_t, int);
95 static inline void ena_alloc_counters(counter_u64_t *, int);
96 static inline void ena_free_counters(counter_u64_t *, int);
97 static inline void ena_reset_counters(counter_u64_t *, int);
98 static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *,
99     uint16_t);
100 static void ena_init_io_rings_basic(struct ena_adapter *);
101 static void ena_init_io_rings_advanced(struct ena_adapter *);
102 static void ena_init_io_rings(struct ena_adapter *);
103 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
104 static void ena_free_all_io_rings_resources(struct ena_adapter *);
105 static int ena_setup_tx_dma_tag(struct ena_adapter *);
106 static int ena_free_tx_dma_tag(struct ena_adapter *);
107 static int ena_setup_rx_dma_tag(struct ena_adapter *);
108 static int ena_free_rx_dma_tag(struct ena_adapter *);
109 static void ena_release_all_tx_dmamap(struct ena_ring *);
110 static int ena_setup_tx_resources(struct ena_adapter *, int);
111 static void ena_free_tx_resources(struct ena_adapter *, int);
112 static int ena_setup_all_tx_resources(struct ena_adapter *);
113 static void ena_free_all_tx_resources(struct ena_adapter *);
114 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
115 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
116 static int ena_setup_all_rx_resources(struct ena_adapter *);
117 static void ena_free_all_rx_resources(struct ena_adapter *);
118 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
119     struct ena_rx_buffer *);
120 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
121     struct ena_rx_buffer *);
122 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
123 static void ena_refill_all_rx_bufs(struct ena_adapter *);
124 static void ena_free_all_rx_bufs(struct ena_adapter *);
125 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
126 static void ena_free_all_tx_bufs(struct ena_adapter *);
127 static void ena_destroy_all_tx_queues(struct ena_adapter *);
128 static void ena_destroy_all_rx_queues(struct ena_adapter *);
129 static void ena_destroy_all_io_queues(struct ena_adapter *);
130 static int ena_create_io_queues(struct ena_adapter *);
131 static int ena_handle_msix(void *);
132 static int ena_enable_msix(struct ena_adapter *);
133 static void ena_setup_mgmnt_intr(struct ena_adapter *);
134 static int ena_setup_io_intr(struct ena_adapter *);
135 static int ena_request_mgmnt_irq(struct ena_adapter *);
136 static int ena_request_io_irq(struct ena_adapter *);
137 static void ena_free_mgmnt_irq(struct ena_adapter *);
138 static void ena_free_io_irq(struct ena_adapter *);
139 static void ena_free_irqs(struct ena_adapter *);
140 static void ena_disable_msix(struct ena_adapter *);
141 static void ena_unmask_all_io_irqs(struct ena_adapter *);
142 static int ena_up_complete(struct ena_adapter *);
143 static uint64_t ena_get_counter(if_t, ift_counter);
144 static int ena_media_change(if_t);
145 static void ena_media_status(if_t, struct ifmediareq *);
146 static void ena_init(void *);
147 static int ena_ioctl(if_t, u_long, caddr_t);
148 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
149 static void ena_update_host_info(struct ena_admin_host_info *, if_t);
150 static void ena_update_hwassist(struct ena_adapter *);
151 static void ena_setup_ifnet(device_t, struct ena_adapter *,
152     struct ena_com_dev_get_features_ctx *);
153 static int ena_enable_wc(device_t, struct resource *);
154 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
155     struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
156 static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
157 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
158     struct ena_com_dev_get_features_ctx *);
159 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
160 static void ena_config_host_info(struct ena_com_dev *, device_t);
161 static int ena_attach(device_t);
162 static int ena_detach(device_t);
163 static int ena_device_init(struct ena_adapter *, device_t,
164     struct ena_com_dev_get_features_ctx *, int *);
165 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
166 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
167 static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *);
168 static int ena_copy_eni_metrics(struct ena_adapter *);
169 static int ena_copy_srd_metrics(struct ena_adapter *);
170 static int ena_copy_customer_metrics(struct ena_adapter *);
171 static void ena_timer_service(void *);
172 static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *,
173     struct ena_ring *);
174 
175 
176 static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
177     " v" ENA_DRV_MODULE_VERSION;
178 
179 static ena_vendor_info_t ena_vendor_info_array[] = {
180 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 },
181 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 },
182 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 },
183 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 },
184 	/* Last entry */
185 	{ 0, 0, 0 }
186 };
187 
188 struct sx ena_global_lock;
189 
190 /*
191  * Contains pointers to event handlers, e.g. link state chage.
192  */
193 static struct ena_aenq_handlers aenq_handlers;
194 
195 void
196 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
197 {
198 	if (error != 0)
199 		return;
200 	*(bus_addr_t *)arg = segs[0].ds_addr;
201 }
202 
203 int
204 ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
205     int mapflags, bus_size_t alignment, int domain)
206 {
207 	struct ena_adapter *adapter = device_get_softc(dmadev);
208 	device_t pdev = adapter->pdev;
209 	uint32_t maxsize;
210 	uint64_t dma_space_addr;
211 	int error;
212 
213 	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
214 
215 	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
216 	if (unlikely(dma_space_addr == 0))
217 		dma_space_addr = BUS_SPACE_MAXADDR;
218 
219 	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
220 	    alignment, 0,      /* alignment, bounds 		*/
221 	    dma_space_addr,    /* lowaddr of exclusion window	*/
222 	    BUS_SPACE_MAXADDR, /* highaddr of exclusion window	*/
223 	    NULL, NULL,	       /* filter, filterarg 		*/
224 	    maxsize,	       /* maxsize 			*/
225 	    1,		       /* nsegments 			*/
226 	    maxsize,	       /* maxsegsize 			*/
227 	    BUS_DMA_ALLOCNOW,  /* flags 			*/
228 	    NULL,	       /* lockfunc 			*/
229 	    NULL,	       /* lockarg 			*/
230 	    &dma->tag);
231 	if (unlikely(error != 0)) {
232 		ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
233 		goto fail_tag;
234 	}
235 
236 	error = bus_dma_tag_set_domain(dma->tag, domain);
237 	if (unlikely(error != 0)) {
238 		ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n",
239 		    error);
240 		goto fail_map_create;
241 	}
242 
243 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
244 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
245 	if (unlikely(error != 0)) {
246 		ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
247 		    (uintmax_t)size, error);
248 		goto fail_map_create;
249 	}
250 
251 	dma->paddr = 0;
252 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
253 	    ena_dmamap_callback, &dma->paddr, mapflags);
254 	if (unlikely((error != 0) || (dma->paddr == 0))) {
255 		ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
256 		goto fail_map_load;
257 	}
258 
259 	bus_dmamap_sync(dma->tag, dma->map,
260 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
261 
262 	return (0);
263 
264 fail_map_load:
265 	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
266 fail_map_create:
267 	bus_dma_tag_destroy(dma->tag);
268 fail_tag:
269 	dma->tag = NULL;
270 	dma->vaddr = NULL;
271 	dma->paddr = 0;
272 
273 	return (error);
274 }
275 
276 static void
277 ena_free_pci_resources(struct ena_adapter *adapter)
278 {
279 	device_t pdev = adapter->pdev;
280 
281 	if (adapter->memory != NULL) {
282 		bus_release_resource(pdev, SYS_RES_MEMORY,
283 		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
284 	}
285 
286 	if (adapter->registers != NULL) {
287 		bus_release_resource(pdev, SYS_RES_MEMORY,
288 		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
289 	}
290 
291 	if (adapter->msix != NULL) {
292 		bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
293 		    adapter->msix);
294 	}
295 }
296 
297 static int
298 ena_probe(device_t dev)
299 {
300 	ena_vendor_info_t *ent;
301 	uint16_t pci_vendor_id = 0;
302 	uint16_t pci_device_id = 0;
303 
304 	pci_vendor_id = pci_get_vendor(dev);
305 	pci_device_id = pci_get_device(dev);
306 
307 	ent = ena_vendor_info_array;
308 	while (ent->vendor_id != 0) {
309 		if ((pci_vendor_id == ent->vendor_id) &&
310 		    (pci_device_id == ent->device_id)) {
311 			ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
312 			    pci_device_id);
313 
314 			device_set_desc(dev, ENA_DEVICE_DESC);
315 			return (BUS_PROBE_DEFAULT);
316 		}
317 
318 		ent++;
319 	}
320 
321 	return (ENXIO);
322 }
323 
324 static int
325 ena_change_mtu(if_t ifp, int new_mtu)
326 {
327 	struct ena_adapter *adapter = if_getsoftc(ifp);
328 	device_t pdev = adapter->pdev;
329 	int rc;
330 
331 	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
332 		ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n",
333 		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
334 		return (EINVAL);
335 	}
336 
337 	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
338 	if (likely(rc == 0)) {
339 		ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
340 		if_setmtu(ifp, new_mtu);
341 	} else {
342 		ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
343 	}
344 
345 	return (rc);
346 }
347 
348 static inline void
349 ena_alloc_counters(counter_u64_t *begin, int size)
350 {
351 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
352 
353 	for (; begin < end; ++begin)
354 		*begin = counter_u64_alloc(M_WAITOK);
355 }
356 
357 static inline void
358 ena_free_counters(counter_u64_t *begin, int size)
359 {
360 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
361 
362 	for (; begin < end; ++begin)
363 		counter_u64_free(*begin);
364 }
365 
366 static inline void
367 ena_reset_counters(counter_u64_t *begin, int size)
368 {
369 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
370 
371 	for (; begin < end; ++begin)
372 		counter_u64_zero(*begin);
373 }
374 
375 static void
376 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
377     uint16_t qid)
378 {
379 	ring->qid = qid;
380 	ring->adapter = adapter;
381 	ring->ena_dev = adapter->ena_dev;
382 	atomic_store_8(&ring->first_interrupt, 0);
383 	ring->no_interrupt_event_cnt = 0;
384 }
385 
386 static void
387 ena_init_io_rings_basic(struct ena_adapter *adapter)
388 {
389 	struct ena_com_dev *ena_dev;
390 	struct ena_ring *txr, *rxr;
391 	struct ena_que *que;
392 	int i;
393 
394 	ena_dev = adapter->ena_dev;
395 
396 	for (i = 0; i < adapter->num_io_queues; i++) {
397 		txr = &adapter->tx_ring[i];
398 		rxr = &adapter->rx_ring[i];
399 
400 		/* TX/RX common ring state */
401 		ena_init_io_rings_common(adapter, txr, i);
402 		ena_init_io_rings_common(adapter, rxr, i);
403 
404 		/* TX specific ring state */
405 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
406 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
407 
408 		que = &adapter->que[i];
409 		que->adapter = adapter;
410 		que->id = i;
411 		que->tx_ring = txr;
412 		que->rx_ring = rxr;
413 
414 		txr->que = que;
415 		rxr->que = que;
416 
417 		rxr->empty_rx_queue = 0;
418 		rxr->rx_mbuf_sz = ena_mbuf_sz;
419 	}
420 }
421 
422 static void
423 ena_init_io_rings_advanced(struct ena_adapter *adapter)
424 {
425 	struct ena_ring *txr, *rxr;
426 	int i;
427 
428 	for (i = 0; i < adapter->num_io_queues; i++) {
429 		txr = &adapter->tx_ring[i];
430 		rxr = &adapter->rx_ring[i];
431 
432 		/* Allocate a buf ring */
433 		txr->buf_ring_size = adapter->buf_ring_size;
434 		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
435 		    &txr->ring_mtx);
436 
437 		/* Allocate Tx statistics. */
438 		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
439 		    sizeof(txr->tx_stats));
440 		txr->tx_last_cleanup_ticks = ticks;
441 
442 		/* Allocate Rx statistics. */
443 		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
444 		    sizeof(rxr->rx_stats));
445 
446 		/* Initialize locks */
447 		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
448 		    device_get_nameunit(adapter->pdev), i);
449 		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
450 		    device_get_nameunit(adapter->pdev), i);
451 
452 		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
453 	}
454 }
455 
456 static void
457 ena_init_io_rings(struct ena_adapter *adapter)
458 {
459 	/*
460 	 * IO rings initialization can be divided into the 2 steps:
461 	 *   1. Initialize variables and fields with initial values and copy
462 	 *      them from adapter/ena_dev (basic)
463 	 *   2. Allocate mutex, counters and buf_ring (advanced)
464 	 */
465 	ena_init_io_rings_basic(adapter);
466 	ena_init_io_rings_advanced(adapter);
467 }
468 
469 static void
470 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
471 {
472 	struct ena_ring *txr = &adapter->tx_ring[qid];
473 	struct ena_ring *rxr = &adapter->rx_ring[qid];
474 
475 	ena_free_counters((counter_u64_t *)&txr->tx_stats,
476 	    sizeof(txr->tx_stats));
477 	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
478 	    sizeof(rxr->rx_stats));
479 
480 	ENA_RING_MTX_LOCK(txr);
481 	drbr_free(txr->br, M_DEVBUF);
482 	ENA_RING_MTX_UNLOCK(txr);
483 
484 	mtx_destroy(&txr->ring_mtx);
485 }
486 
487 static void
488 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
489 {
490 	int i;
491 
492 	for (i = 0; i < adapter->num_io_queues; i++)
493 		ena_free_io_ring_resources(adapter, i);
494 }
495 
496 static int
497 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
498 {
499 	int ret;
500 
501 	/* Create DMA tag for Tx buffers */
502 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
503 	    1, 0,				  /* alignment, bounds 	     */
504 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
505 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
506 	    NULL, NULL,				  /* filter, filterarg 	     */
507 	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
508 	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
509 	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
510 	    0,					  /* flags 		     */
511 	    NULL,				  /* lockfunc 		     */
512 	    NULL,				  /* lockfuncarg 	     */
513 	    &adapter->tx_buf_tag);
514 
515 	return (ret);
516 }
517 
518 static int
519 ena_free_tx_dma_tag(struct ena_adapter *adapter)
520 {
521 	int ret;
522 
523 	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
524 
525 	if (likely(ret == 0))
526 		adapter->tx_buf_tag = NULL;
527 
528 	return (ret);
529 }
530 
531 static int
532 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
533 {
534 	int ret;
535 
536 	/* Create DMA tag for Rx buffers*/
537 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
538 	    1, 0,				  /* alignment, bounds 	     */
539 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
540 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
541 	    NULL, NULL,				  /* filter, filterarg 	     */
542 	    ena_mbuf_sz,			  /* maxsize 		     */
543 	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
544 	    ena_mbuf_sz,			  /* maxsegsize 	     */
545 	    0,					  /* flags 		     */
546 	    NULL,				  /* lockfunc 		     */
547 	    NULL,				  /* lockarg 		     */
548 	    &adapter->rx_buf_tag);
549 
550 	return (ret);
551 }
552 
553 static int
554 ena_free_rx_dma_tag(struct ena_adapter *adapter)
555 {
556 	int ret;
557 
558 	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
559 
560 	if (likely(ret == 0))
561 		adapter->rx_buf_tag = NULL;
562 
563 	return (ret);
564 }
565 
566 int
567 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc)
568 {
569 	struct ena_adapter *adapter = tx_ring->adapter;
570 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
571 
572 	if (unlikely(tx_req_id_rc != 0)) {
573 		if (tx_req_id_rc == ENA_COM_FAULT) {
574 			reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED;
575 			ena_log(adapter->pdev, ERR,
576 			    "TX descriptor malformed. req_id %hu qid %hu\n",
577 			    req_id, tx_ring->qid);
578 		} else if (tx_req_id_rc == ENA_COM_INVAL) {
579 			ena_log_nm(adapter->pdev, WARN,
580 			    "Invalid req_id %hu in qid %hu\n",
581 			    req_id, tx_ring->qid);
582 			counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
583 		}
584 
585 		ena_trigger_reset(adapter, reset_reason);
586 		return (EFAULT);
587 	}
588 
589 	return (0);
590 }
591 
592 static void
593 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
594 {
595 	struct ena_adapter *adapter = tx_ring->adapter;
596 	struct ena_tx_buffer *tx_info;
597 	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
598 	int i;
599 #ifdef DEV_NETMAP
600 	struct ena_netmap_tx_info *nm_info;
601 	int j;
602 #endif /* DEV_NETMAP */
603 
604 	for (i = 0; i < tx_ring->ring_size; ++i) {
605 		tx_info = &tx_ring->tx_buffer_info[i];
606 #ifdef DEV_NETMAP
607 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
608 			nm_info = &tx_info->nm_info;
609 			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
610 				if (nm_info->map_seg[j] != NULL) {
611 					bus_dmamap_destroy(tx_tag,
612 					    nm_info->map_seg[j]);
613 					nm_info->map_seg[j] = NULL;
614 				}
615 			}
616 		}
617 #endif /* DEV_NETMAP */
618 		if (tx_info->dmamap != NULL) {
619 			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
620 			tx_info->dmamap = NULL;
621 		}
622 	}
623 }
624 
625 /**
626  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
627  * @adapter: network interface device structure
628  * @qid: queue index
629  *
630  * Returns 0 on success, otherwise on failure.
631  **/
632 static int
633 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
634 {
635 	device_t pdev = adapter->pdev;
636 	char thread_name[MAXCOMLEN + 1];
637 	struct ena_que *que = &adapter->que[qid];
638 	struct ena_ring *tx_ring = que->tx_ring;
639 	cpuset_t *cpu_mask = NULL;
640 	int size, i, err;
641 #ifdef DEV_NETMAP
642 	bus_dmamap_t *map;
643 	int j;
644 
645 	ena_netmap_reset_tx_ring(adapter, qid);
646 #endif /* DEV_NETMAP */
647 
648 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
649 
650 	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
651 	if (unlikely(tx_ring->tx_buffer_info == NULL))
652 		return (ENOMEM);
653 
654 	size = sizeof(uint16_t) * tx_ring->ring_size;
655 	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
656 	if (unlikely(tx_ring->free_tx_ids == NULL))
657 		goto err_buf_info_free;
658 
659 	size = tx_ring->tx_max_header_size;
660 	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
661 	    M_NOWAIT | M_ZERO);
662 	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
663 		goto err_tx_ids_free;
664 
665 	/* Req id stack for TX OOO completions */
666 	for (i = 0; i < tx_ring->ring_size; i++)
667 		tx_ring->free_tx_ids[i] = i;
668 
669 	/* Reset TX statistics. */
670 	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
671 	    sizeof(tx_ring->tx_stats));
672 
673 	tx_ring->next_to_use = 0;
674 	tx_ring->next_to_clean = 0;
675 	tx_ring->acum_pkts = 0;
676 
677 	/* Make sure that drbr is empty */
678 	ENA_RING_MTX_LOCK(tx_ring);
679 	drbr_flush(adapter->ifp, tx_ring->br);
680 	ENA_RING_MTX_UNLOCK(tx_ring);
681 
682 	/* ... and create the buffer DMA maps */
683 	for (i = 0; i < tx_ring->ring_size; i++) {
684 		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
685 		    &tx_ring->tx_buffer_info[i].dmamap);
686 		if (unlikely(err != 0)) {
687 			ena_log(pdev, ERR,
688 			    "Unable to create Tx DMA map for buffer %d\n", i);
689 			goto err_map_release;
690 		}
691 
692 #ifdef DEV_NETMAP
693 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
694 			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
695 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
696 				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
697 				    &map[j]);
698 				if (unlikely(err != 0)) {
699 					ena_log(pdev, ERR,
700 					    "Unable to create Tx DMA for buffer %d %d\n",
701 					    i, j);
702 					goto err_map_release;
703 				}
704 			}
705 		}
706 #endif /* DEV_NETMAP */
707 	}
708 
709 	/* Allocate taskqueues */
710 	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
711 	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
712 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
713 	if (unlikely(tx_ring->enqueue_tq == NULL)) {
714 		ena_log(pdev, ERR,
715 		    "Unable to create taskqueue for enqueue task\n");
716 		i = tx_ring->ring_size;
717 		goto err_map_release;
718 	}
719 
720 	tx_ring->running = true;
721 
722 #ifdef RSS
723 	cpu_mask = &que->cpu_mask;
724 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
725 	    device_get_nameunit(adapter->pdev), que->cpu);
726 #else
727 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
728 	    device_get_nameunit(adapter->pdev), que->id);
729 #endif
730 	taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
731 	    cpu_mask, "%s", thread_name);
732 
733 	return (0);
734 
735 err_map_release:
736 	ena_release_all_tx_dmamap(tx_ring);
737 err_tx_ids_free:
738 	free(tx_ring->free_tx_ids, M_DEVBUF);
739 	tx_ring->free_tx_ids = NULL;
740 err_buf_info_free:
741 	free(tx_ring->tx_buffer_info, M_DEVBUF);
742 	tx_ring->tx_buffer_info = NULL;
743 
744 	return (ENOMEM);
745 }
746 
747 /**
748  * ena_free_tx_resources - Free Tx Resources per Queue
749  * @adapter: network interface device structure
750  * @qid: queue index
751  *
752  * Free all transmit software resources
753  **/
754 static void
755 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
756 {
757 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
758 #ifdef DEV_NETMAP
759 	struct ena_netmap_tx_info *nm_info;
760 	int j;
761 #endif /* DEV_NETMAP */
762 
763 	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
764 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
765 
766 	taskqueue_free(tx_ring->enqueue_tq);
767 
768 	ENA_RING_MTX_LOCK(tx_ring);
769 	/* Flush buffer ring, */
770 	drbr_flush(adapter->ifp, tx_ring->br);
771 
772 	/* Free buffer DMA maps, */
773 	for (int i = 0; i < tx_ring->ring_size; i++) {
774 		bus_dmamap_sync(adapter->tx_buf_tag,
775 		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
776 		bus_dmamap_unload(adapter->tx_buf_tag,
777 		    tx_ring->tx_buffer_info[i].dmamap);
778 		bus_dmamap_destroy(adapter->tx_buf_tag,
779 		    tx_ring->tx_buffer_info[i].dmamap);
780 
781 #ifdef DEV_NETMAP
782 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
783 			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
784 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
785 				if (nm_info->socket_buf_idx[j] != 0) {
786 					bus_dmamap_sync(adapter->tx_buf_tag,
787 					    nm_info->map_seg[j],
788 					    BUS_DMASYNC_POSTWRITE);
789 					ena_netmap_unload(adapter,
790 					    nm_info->map_seg[j]);
791 				}
792 				bus_dmamap_destroy(adapter->tx_buf_tag,
793 				    nm_info->map_seg[j]);
794 				nm_info->socket_buf_idx[j] = 0;
795 			}
796 		}
797 #endif /* DEV_NETMAP */
798 
799 		m_freem(tx_ring->tx_buffer_info[i].mbuf);
800 		tx_ring->tx_buffer_info[i].mbuf = NULL;
801 	}
802 	ENA_RING_MTX_UNLOCK(tx_ring);
803 
804 	/* And free allocated memory. */
805 	free(tx_ring->tx_buffer_info, M_DEVBUF);
806 	tx_ring->tx_buffer_info = NULL;
807 
808 	free(tx_ring->free_tx_ids, M_DEVBUF);
809 	tx_ring->free_tx_ids = NULL;
810 
811 	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
812 	tx_ring->push_buf_intermediate_buf = NULL;
813 }
814 
815 /**
816  * ena_setup_all_tx_resources - allocate all queues Tx resources
817  * @adapter: network interface device structure
818  *
819  * Returns 0 on success, otherwise on failure.
820  **/
821 static int
822 ena_setup_all_tx_resources(struct ena_adapter *adapter)
823 {
824 	int i, rc;
825 
826 	for (i = 0; i < adapter->num_io_queues; i++) {
827 		rc = ena_setup_tx_resources(adapter, i);
828 		if (rc != 0) {
829 			ena_log(adapter->pdev, ERR,
830 			    "Allocation for Tx Queue %u failed\n", i);
831 			goto err_setup_tx;
832 		}
833 	}
834 
835 	return (0);
836 
837 err_setup_tx:
838 	/* Rewind the index freeing the rings as we go */
839 	while (i--)
840 		ena_free_tx_resources(adapter, i);
841 	return (rc);
842 }
843 
844 /**
845  * ena_free_all_tx_resources - Free Tx Resources for All Queues
846  * @adapter: network interface device structure
847  *
848  * Free all transmit software resources
849  **/
850 static void
851 ena_free_all_tx_resources(struct ena_adapter *adapter)
852 {
853 	int i;
854 
855 	for (i = 0; i < adapter->num_io_queues; i++)
856 		ena_free_tx_resources(adapter, i);
857 }
858 
859 /**
860  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
861  * @adapter: network interface device structure
862  * @qid: queue index
863  *
864  * Returns 0 on success, otherwise on failure.
865  **/
866 static int
867 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
868 {
869 	device_t pdev = adapter->pdev;
870 	struct ena_que *que = &adapter->que[qid];
871 	struct ena_ring *rx_ring = que->rx_ring;
872 	int size, err, i;
873 
874 	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
875 
876 #ifdef DEV_NETMAP
877 	ena_netmap_reset_rx_ring(adapter, qid);
878 	rx_ring->initialized = false;
879 #endif /* DEV_NETMAP */
880 
881 	/*
882 	 * Alloc extra element so in rx path
883 	 * we can always prefetch rx_info + 1
884 	 */
885 	size += sizeof(struct ena_rx_buffer);
886 
887 	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
888 
889 	size = sizeof(uint16_t) * rx_ring->ring_size;
890 	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
891 
892 	for (i = 0; i < rx_ring->ring_size; i++)
893 		rx_ring->free_rx_ids[i] = i;
894 
895 	/* Reset RX statistics. */
896 	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
897 	    sizeof(rx_ring->rx_stats));
898 
899 	rx_ring->next_to_clean = 0;
900 	rx_ring->next_to_use = 0;
901 
902 	/* ... and create the buffer DMA maps */
903 	for (i = 0; i < rx_ring->ring_size; i++) {
904 		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
905 		    &(rx_ring->rx_buffer_info[i].map));
906 		if (err != 0) {
907 			ena_log(pdev, ERR,
908 			    "Unable to create Rx DMA map for buffer %d\n", i);
909 			goto err_buf_info_unmap;
910 		}
911 	}
912 
913 	/* Create LRO for the ring */
914 	if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
915 		int err = tcp_lro_init(&rx_ring->lro);
916 		if (err != 0) {
917 			ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
918 			    qid);
919 		} else {
920 			ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
921 			    qid);
922 			rx_ring->lro.ifp = adapter->ifp;
923 		}
924 	}
925 
926 	return (0);
927 
928 err_buf_info_unmap:
929 	while (i--) {
930 		bus_dmamap_destroy(adapter->rx_buf_tag,
931 		    rx_ring->rx_buffer_info[i].map);
932 	}
933 
934 	free(rx_ring->free_rx_ids, M_DEVBUF);
935 	rx_ring->free_rx_ids = NULL;
936 	free(rx_ring->rx_buffer_info, M_DEVBUF);
937 	rx_ring->rx_buffer_info = NULL;
938 	return (ENOMEM);
939 }
940 
941 /**
942  * ena_free_rx_resources - Free Rx Resources
943  * @adapter: network interface device structure
944  * @qid: queue index
945  *
946  * Free all receive software resources
947  **/
948 static void
949 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
950 {
951 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
952 
953 	/* Free buffer DMA maps, */
954 	for (int i = 0; i < rx_ring->ring_size; i++) {
955 		bus_dmamap_sync(adapter->rx_buf_tag,
956 		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
957 		m_freem(rx_ring->rx_buffer_info[i].mbuf);
958 		rx_ring->rx_buffer_info[i].mbuf = NULL;
959 		bus_dmamap_unload(adapter->rx_buf_tag,
960 		    rx_ring->rx_buffer_info[i].map);
961 		bus_dmamap_destroy(adapter->rx_buf_tag,
962 		    rx_ring->rx_buffer_info[i].map);
963 	}
964 
965 	/* free LRO resources, */
966 	tcp_lro_free(&rx_ring->lro);
967 
968 	/* free allocated memory */
969 	free(rx_ring->rx_buffer_info, M_DEVBUF);
970 	rx_ring->rx_buffer_info = NULL;
971 
972 	free(rx_ring->free_rx_ids, M_DEVBUF);
973 	rx_ring->free_rx_ids = NULL;
974 }
975 
976 /**
977  * ena_setup_all_rx_resources - allocate all queues Rx resources
978  * @adapter: network interface device structure
979  *
980  * Returns 0 on success, otherwise on failure.
981  **/
982 static int
983 ena_setup_all_rx_resources(struct ena_adapter *adapter)
984 {
985 	int i, rc = 0;
986 
987 	for (i = 0; i < adapter->num_io_queues; i++) {
988 		rc = ena_setup_rx_resources(adapter, i);
989 		if (rc != 0) {
990 			ena_log(adapter->pdev, ERR,
991 			    "Allocation for Rx Queue %u failed\n", i);
992 			goto err_setup_rx;
993 		}
994 	}
995 	return (0);
996 
997 err_setup_rx:
998 	/* rewind the index freeing the rings as we go */
999 	while (i--)
1000 		ena_free_rx_resources(adapter, i);
1001 	return (rc);
1002 }
1003 
1004 /**
1005  * ena_free_all_rx_resources - Free Rx resources for all queues
1006  * @adapter: network interface device structure
1007  *
1008  * Free all receive software resources
1009  **/
1010 static void
1011 ena_free_all_rx_resources(struct ena_adapter *adapter)
1012 {
1013 	int i;
1014 
1015 	for (i = 0; i < adapter->num_io_queues; i++)
1016 		ena_free_rx_resources(adapter, i);
1017 }
1018 
1019 static inline int
1020 ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1021     struct ena_rx_buffer *rx_info)
1022 {
1023 	device_t pdev = adapter->pdev;
1024 	struct ena_com_buf *ena_buf;
1025 	bus_dma_segment_t segs[1];
1026 	int nsegs, error;
1027 	int mlen;
1028 
1029 	/* if previous allocated frag is not used */
1030 	if (unlikely(rx_info->mbuf != NULL))
1031 		return (0);
1032 
1033 	/* Get mbuf using UMA allocator */
1034 	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1035 	    rx_ring->rx_mbuf_sz);
1036 
1037 	if (unlikely(rx_info->mbuf == NULL)) {
1038 		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1039 		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1040 		if (unlikely(rx_info->mbuf == NULL)) {
1041 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1042 			return (ENOMEM);
1043 		}
1044 		mlen = MCLBYTES;
1045 	} else {
1046 		mlen = rx_ring->rx_mbuf_sz;
1047 	}
1048 	/* Set mbuf length*/
1049 	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1050 
1051 	/* Map packets for DMA */
1052 	ena_log(pdev, DBG,
1053 	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1054 	    adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
1055 	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1056 	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1057 	if (unlikely((error != 0) || (nsegs != 1))) {
1058 		ena_log(pdev, WARN,
1059 		    "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
1060 		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1061 		goto exit;
1062 	}
1063 
1064 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1065 
1066 	ena_buf = &rx_info->ena_buf;
1067 	ena_buf->paddr = segs[0].ds_addr;
1068 	ena_buf->len = mlen;
1069 
1070 	ena_log(pdev, DBG,
1071 	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1072 	    rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
1073 
1074 	return (0);
1075 
1076 exit:
1077 	m_freem(rx_info->mbuf);
1078 	rx_info->mbuf = NULL;
1079 	return (EFAULT);
1080 }
1081 
1082 static void
1083 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1084     struct ena_rx_buffer *rx_info)
1085 {
1086 	if (rx_info->mbuf == NULL) {
1087 		ena_log(adapter->pdev, WARN,
1088 		    "Trying to free unallocated buffer\n");
1089 		return;
1090 	}
1091 
1092 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1093 	    BUS_DMASYNC_POSTREAD);
1094 	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1095 	m_freem(rx_info->mbuf);
1096 	rx_info->mbuf = NULL;
1097 }
1098 
1099 /**
1100  * ena_refill_rx_bufs - Refills ring with descriptors
1101  * @rx_ring: the ring which we want to feed with free descriptors
1102  * @num: number of descriptors to refill
1103  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1104  **/
1105 int
1106 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1107 {
1108 	struct ena_adapter *adapter = rx_ring->adapter;
1109 	device_t pdev = adapter->pdev;
1110 	uint16_t next_to_use, req_id;
1111 	uint32_t i;
1112 	int rc;
1113 
1114 	ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
1115 
1116 	next_to_use = rx_ring->next_to_use;
1117 
1118 	for (i = 0; i < num; i++) {
1119 		struct ena_rx_buffer *rx_info;
1120 
1121 		ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
1122 		    next_to_use);
1123 
1124 		req_id = rx_ring->free_rx_ids[next_to_use];
1125 		rx_info = &rx_ring->rx_buffer_info[req_id];
1126 #ifdef DEV_NETMAP
1127 		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1128 			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
1129 			    rx_info);
1130 		else
1131 #endif /* DEV_NETMAP */
1132 			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1133 		if (unlikely(rc != 0)) {
1134 			ena_log_io(pdev, WARN,
1135 			    "failed to alloc buffer for rx queue %d\n",
1136 			    rx_ring->qid);
1137 			break;
1138 		}
1139 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1140 		    &rx_info->ena_buf, req_id);
1141 		if (unlikely(rc != 0)) {
1142 			ena_log_io(pdev, WARN,
1143 			    "failed to add buffer for rx queue %d\n",
1144 			    rx_ring->qid);
1145 			break;
1146 		}
1147 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1148 		    rx_ring->ring_size);
1149 	}
1150 
1151 	if (unlikely(i < num)) {
1152 		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1153 		ena_log_io(pdev, WARN,
1154 		    "refilled rx qid %d with only %d mbufs (from %d)\n",
1155 		    rx_ring->qid, i, num);
1156 	}
1157 
1158 	if (likely(i != 0))
1159 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1160 
1161 	rx_ring->next_to_use = next_to_use;
1162 	return (i);
1163 }
1164 
1165 int
1166 ena_update_buf_ring_size(struct ena_adapter *adapter,
1167     uint32_t new_buf_ring_size)
1168 {
1169 	uint32_t old_buf_ring_size;
1170 	int rc = 0;
1171 	bool dev_was_up;
1172 
1173 	old_buf_ring_size = adapter->buf_ring_size;
1174 	adapter->buf_ring_size = new_buf_ring_size;
1175 
1176 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1177 	ena_down(adapter);
1178 
1179 	/* Reconfigure buf ring for all Tx rings. */
1180 	ena_free_all_io_rings_resources(adapter);
1181 	ena_init_io_rings_advanced(adapter);
1182 	if (dev_was_up) {
1183 		/*
1184 		 * If ena_up() fails, it's not because of recent buf_ring size
1185 		 * changes. Because of that, we just want to revert old drbr
1186 		 * value and trigger the reset because something else had to
1187 		 * go wrong.
1188 		 */
1189 		rc = ena_up(adapter);
1190 		if (unlikely(rc != 0)) {
1191 			ena_log(adapter->pdev, ERR,
1192 			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1193 			    new_buf_ring_size, old_buf_ring_size);
1194 
1195 			/* Revert old size and trigger the reset */
1196 			adapter->buf_ring_size = old_buf_ring_size;
1197 			ena_free_all_io_rings_resources(adapter);
1198 			ena_init_io_rings_advanced(adapter);
1199 
1200 			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1201 			    adapter);
1202 			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1203 		}
1204 	}
1205 
1206 	return (rc);
1207 }
1208 
1209 int
1210 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1211     uint32_t new_rx_size)
1212 {
1213 	uint32_t old_tx_size, old_rx_size;
1214 	int rc = 0;
1215 	bool dev_was_up;
1216 
1217 	old_tx_size = adapter->requested_tx_ring_size;
1218 	old_rx_size = adapter->requested_rx_ring_size;
1219 	adapter->requested_tx_ring_size = new_tx_size;
1220 	adapter->requested_rx_ring_size = new_rx_size;
1221 
1222 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1223 	ena_down(adapter);
1224 
1225 	/* Configure queues with new size. */
1226 	ena_init_io_rings_basic(adapter);
1227 	if (dev_was_up) {
1228 		rc = ena_up(adapter);
1229 		if (unlikely(rc != 0)) {
1230 			ena_log(adapter->pdev, ERR,
1231 			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1232 			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1233 
1234 			/* Revert old size. */
1235 			adapter->requested_tx_ring_size = old_tx_size;
1236 			adapter->requested_rx_ring_size = old_rx_size;
1237 			ena_init_io_rings_basic(adapter);
1238 
1239 			/* And try again. */
1240 			rc = ena_up(adapter);
1241 			if (unlikely(rc != 0)) {
1242 				ena_log(adapter->pdev, ERR,
1243 				    "Failed to revert old queue sizes. Triggering device reset.\n");
1244 				/*
1245 				 * If we've failed again, something had to go
1246 				 * wrong. After reset, the device should try to
1247 				 * go up
1248 				 */
1249 				ENA_FLAG_SET_ATOMIC(
1250 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1251 				ena_trigger_reset(adapter,
1252 				    ENA_REGS_RESET_OS_TRIGGER);
1253 			}
1254 		}
1255 	}
1256 
1257 	return (rc);
1258 }
1259 
1260 static void
1261 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1262 {
1263 	ena_free_all_io_rings_resources(adapter);
1264 	/* Force indirection table to be reinitialized */
1265 	ena_com_rss_destroy(adapter->ena_dev);
1266 
1267 	adapter->num_io_queues = num;
1268 	ena_init_io_rings(adapter);
1269 }
1270 
1271 int
1272 ena_update_base_cpu(struct ena_adapter *adapter, int new_num)
1273 {
1274 	int old_num;
1275 	int rc = 0;
1276 	bool dev_was_up;
1277 
1278 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1279 	old_num = adapter->irq_cpu_base;
1280 
1281 	ena_down(adapter);
1282 
1283 	adapter->irq_cpu_base = new_num;
1284 
1285 	if (dev_was_up) {
1286 		rc = ena_up(adapter);
1287 		if (unlikely(rc != 0)) {
1288 			ena_log(adapter->pdev, ERR,
1289 			    "Failed to configure device %d IRQ base CPU. "
1290 			    "Reverting to previous value: %d\n",
1291 			    new_num, old_num);
1292 
1293 			adapter->irq_cpu_base = old_num;
1294 
1295 			rc = ena_up(adapter);
1296 			if (unlikely(rc != 0)) {
1297 				ena_log(adapter->pdev, ERR,
1298 				    "Failed to revert to previous setup."
1299 				    "Triggering device reset.\n");
1300 				ENA_FLAG_SET_ATOMIC(
1301 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1302 				ena_trigger_reset(adapter,
1303 				    ENA_REGS_RESET_OS_TRIGGER);
1304 			}
1305 		}
1306 	}
1307 	return (rc);
1308 }
1309 
1310 int
1311 ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num)
1312 {
1313 	uint32_t old_num;
1314 	int rc = 0;
1315 	bool dev_was_up;
1316 
1317 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1318 	old_num = adapter->irq_cpu_stride;
1319 
1320 	ena_down(adapter);
1321 
1322 	adapter->irq_cpu_stride = new_num;
1323 
1324 	if (dev_was_up) {
1325 		rc = ena_up(adapter);
1326 		if (unlikely(rc != 0)) {
1327 			ena_log(adapter->pdev, ERR,
1328 			    "Failed to configure device %d IRQ CPU stride. "
1329 			    "Reverting to previous value: %d\n",
1330 			    new_num, old_num);
1331 
1332 			adapter->irq_cpu_stride = old_num;
1333 
1334 			rc = ena_up(adapter);
1335 			if (unlikely(rc != 0)) {
1336 				ena_log(adapter->pdev, ERR,
1337 				    "Failed to revert to previous setup."
1338 				    "Triggering device reset.\n");
1339 				ENA_FLAG_SET_ATOMIC(
1340 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1341 				ena_trigger_reset(adapter,
1342 				    ENA_REGS_RESET_OS_TRIGGER);
1343 			}
1344 		}
1345 	}
1346 	return (rc);
1347 }
1348 
1349 /* Caller should sanitize new_num */
1350 int
1351 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1352 {
1353 	uint32_t old_num;
1354 	int rc = 0;
1355 	bool dev_was_up;
1356 
1357 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1358 	old_num = adapter->num_io_queues;
1359 	ena_down(adapter);
1360 
1361 	ena_update_io_rings(adapter, new_num);
1362 
1363 	if (dev_was_up) {
1364 		rc = ena_up(adapter);
1365 		if (unlikely(rc != 0)) {
1366 			ena_log(adapter->pdev, ERR,
1367 			    "Failed to configure device with %u IO queues. "
1368 			    "Reverting to previous value: %u\n",
1369 			    new_num, old_num);
1370 
1371 			ena_update_io_rings(adapter, old_num);
1372 
1373 			rc = ena_up(adapter);
1374 			if (unlikely(rc != 0)) {
1375 				ena_log(adapter->pdev, ERR,
1376 				    "Failed to revert to previous setup IO "
1377 				    "queues. Triggering device reset.\n");
1378 				ENA_FLAG_SET_ATOMIC(
1379 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1380 				ena_trigger_reset(adapter,
1381 				    ENA_REGS_RESET_OS_TRIGGER);
1382 			}
1383 		}
1384 	}
1385 
1386 	return (rc);
1387 }
1388 
1389 static void
1390 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1391 {
1392 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1393 	unsigned int i;
1394 
1395 	for (i = 0; i < rx_ring->ring_size; i++) {
1396 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1397 
1398 		if (rx_info->mbuf != NULL)
1399 			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1400 #ifdef DEV_NETMAP
1401 		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1402 		    (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
1403 			if (rx_info->netmap_buf_idx != 0)
1404 				ena_netmap_free_rx_slot(adapter, rx_ring,
1405 				    rx_info);
1406 		}
1407 #endif /* DEV_NETMAP */
1408 	}
1409 }
1410 
1411 /**
1412  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1413  * @adapter: network interface device structure
1414  *
1415  */
1416 static void
1417 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1418 {
1419 	struct ena_ring *rx_ring;
1420 	int i, rc, bufs_num;
1421 
1422 	for (i = 0; i < adapter->num_io_queues; i++) {
1423 		rx_ring = &adapter->rx_ring[i];
1424 		bufs_num = rx_ring->ring_size - 1;
1425 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1426 		if (unlikely(rc != bufs_num))
1427 			ena_log_io(adapter->pdev, WARN,
1428 			    "refilling Queue %d failed. "
1429 			    "Allocated %d buffers from: %d\n",
1430 			    i, rc, bufs_num);
1431 #ifdef DEV_NETMAP
1432 		rx_ring->initialized = true;
1433 #endif /* DEV_NETMAP */
1434 	}
1435 }
1436 
1437 static void
1438 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1439 {
1440 	int i;
1441 
1442 	for (i = 0; i < adapter->num_io_queues; i++)
1443 		ena_free_rx_bufs(adapter, i);
1444 }
1445 
1446 /**
1447  * ena_free_tx_bufs - Free Tx Buffers per Queue
1448  * @adapter: network interface device structure
1449  * @qid: queue index
1450  **/
1451 static void
1452 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1453 {
1454 	bool print_once = true;
1455 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1456 
1457 	ENA_RING_MTX_LOCK(tx_ring);
1458 	for (int i = 0; i < tx_ring->ring_size; i++) {
1459 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1460 
1461 		if (tx_info->mbuf == NULL)
1462 			continue;
1463 
1464 		if (print_once) {
1465 			ena_log(adapter->pdev, WARN,
1466 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1467 			    i);
1468 			print_once = false;
1469 		} else {
1470 			ena_log(adapter->pdev, DBG,
1471 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1472 			    i);
1473 		}
1474 
1475 		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1476 		    BUS_DMASYNC_POSTWRITE);
1477 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1478 
1479 		m_free(tx_info->mbuf);
1480 		tx_info->mbuf = NULL;
1481 	}
1482 	ENA_RING_MTX_UNLOCK(tx_ring);
1483 }
1484 
1485 static void
1486 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1487 {
1488 	for (int i = 0; i < adapter->num_io_queues; i++)
1489 		ena_free_tx_bufs(adapter, i);
1490 }
1491 
1492 static void
1493 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1494 {
1495 	uint16_t ena_qid;
1496 	int i;
1497 
1498 	for (i = 0; i < adapter->num_io_queues; i++) {
1499 		ena_qid = ENA_IO_TXQ_IDX(i);
1500 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1501 	}
1502 }
1503 
1504 static void
1505 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1506 {
1507 	uint16_t ena_qid;
1508 	int i;
1509 
1510 	for (i = 0; i < adapter->num_io_queues; i++) {
1511 		ena_qid = ENA_IO_RXQ_IDX(i);
1512 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1513 	}
1514 }
1515 
1516 static void
1517 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1518 {
1519 	struct ena_que *queue;
1520 	int i;
1521 
1522 	for (i = 0; i < adapter->num_io_queues; i++) {
1523 		queue = &adapter->que[i];
1524 		while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
1525 			taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
1526 		taskqueue_free(queue->cleanup_tq);
1527 	}
1528 
1529 	ena_destroy_all_tx_queues(adapter);
1530 	ena_destroy_all_rx_queues(adapter);
1531 }
1532 
1533 static int
1534 ena_create_io_queues(struct ena_adapter *adapter)
1535 {
1536 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1537 	struct ena_com_create_io_ctx ctx;
1538 	struct ena_ring *ring;
1539 	struct ena_que *queue;
1540 	uint16_t ena_qid;
1541 	uint32_t msix_vector;
1542 	cpuset_t *cpu_mask = NULL;
1543 	int rc, i;
1544 
1545 	/* Create TX queues */
1546 	for (i = 0; i < adapter->num_io_queues; i++) {
1547 		msix_vector = ENA_IO_IRQ_IDX(i);
1548 		ena_qid = ENA_IO_TXQ_IDX(i);
1549 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1550 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1551 		ctx.queue_size = adapter->requested_tx_ring_size;
1552 		ctx.msix_vector = msix_vector;
1553 		ctx.qid = ena_qid;
1554 		ctx.numa_node = adapter->que[i].domain;
1555 
1556 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1557 		if (rc != 0) {
1558 			ena_log(adapter->pdev, ERR,
1559 			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1560 			goto err_tx;
1561 		}
1562 		ring = &adapter->tx_ring[i];
1563 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1564 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1565 		if (rc != 0) {
1566 			ena_log(adapter->pdev, ERR,
1567 			    "Failed to get TX queue handlers. TX queue num"
1568 			    " %d rc: %d\n",
1569 			    i, rc);
1570 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1571 			goto err_tx;
1572 		}
1573 
1574 		if (ctx.numa_node >= 0) {
1575 			ena_com_update_numa_node(ring->ena_com_io_cq,
1576 			    ctx.numa_node);
1577 		}
1578 	}
1579 
1580 	/* Create RX queues */
1581 	for (i = 0; i < adapter->num_io_queues; i++) {
1582 		msix_vector = ENA_IO_IRQ_IDX(i);
1583 		ena_qid = ENA_IO_RXQ_IDX(i);
1584 		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1585 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1586 		ctx.queue_size = adapter->requested_rx_ring_size;
1587 		ctx.msix_vector = msix_vector;
1588 		ctx.qid = ena_qid;
1589 		ctx.numa_node = adapter->que[i].domain;
1590 
1591 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1592 		if (unlikely(rc != 0)) {
1593 			ena_log(adapter->pdev, ERR,
1594 			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1595 			goto err_rx;
1596 		}
1597 
1598 		ring = &adapter->rx_ring[i];
1599 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1600 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1601 		if (unlikely(rc != 0)) {
1602 			ena_log(adapter->pdev, ERR,
1603 			    "Failed to get RX queue handlers. RX queue num"
1604 			    " %d rc: %d\n",
1605 			    i, rc);
1606 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1607 			goto err_rx;
1608 		}
1609 
1610 		if (ctx.numa_node >= 0) {
1611 			ena_com_update_numa_node(ring->ena_com_io_cq,
1612 			    ctx.numa_node);
1613 		}
1614 	}
1615 
1616 	for (i = 0; i < adapter->num_io_queues; i++) {
1617 		queue = &adapter->que[i];
1618 
1619 		NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1620 		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1621 		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1622 
1623 #ifdef RSS
1624 		cpu_mask = &queue->cpu_mask;
1625 #endif
1626 		taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
1627 		    cpu_mask, "%s queue %d cleanup",
1628 		    device_get_nameunit(adapter->pdev), i);
1629 	}
1630 
1631 	return (0);
1632 
1633 err_rx:
1634 	while (i--)
1635 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1636 	i = adapter->num_io_queues;
1637 err_tx:
1638 	while (i--)
1639 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1640 
1641 	return (ENXIO);
1642 }
1643 
1644 /*********************************************************************
1645  *
1646  *  MSIX & Interrupt Service routine
1647  *
1648  **********************************************************************/
1649 
1650 /**
1651  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1652  * @arg: interrupt number
1653  **/
1654 static void
1655 ena_intr_msix_mgmnt(void *arg)
1656 {
1657 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1658 
1659 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1660 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1661 		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1662 }
1663 
1664 /**
1665  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1666  * @arg: queue
1667  **/
1668 static int
1669 ena_handle_msix(void *arg)
1670 {
1671 	struct ena_que *queue = arg;
1672 	struct ena_adapter *adapter = queue->adapter;
1673 	if_t ifp = adapter->ifp;
1674 
1675 	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1676 		return (FILTER_STRAY);
1677 
1678 	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1679 
1680 	return (FILTER_HANDLED);
1681 }
1682 
1683 static int
1684 ena_enable_msix(struct ena_adapter *adapter)
1685 {
1686 	device_t dev = adapter->pdev;
1687 	int msix_vecs, msix_req;
1688 	int i, rc = 0;
1689 
1690 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1691 		ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
1692 		return (EINVAL);
1693 	}
1694 
1695 	/* Reserved the max msix vectors we might need */
1696 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1697 
1698 	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1699 	    M_DEVBUF, M_WAITOK | M_ZERO);
1700 
1701 	ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1702 
1703 	for (i = 0; i < msix_vecs; i++) {
1704 		adapter->msix_entries[i].entry = i;
1705 		/* Vectors must start from 1 */
1706 		adapter->msix_entries[i].vector = i + 1;
1707 	}
1708 
1709 	msix_req = msix_vecs;
1710 	rc = pci_alloc_msix(dev, &msix_vecs);
1711 	if (unlikely(rc != 0)) {
1712 		ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n",
1713 		    msix_vecs, rc);
1714 
1715 		rc = ENOSPC;
1716 		goto err_msix_free;
1717 	}
1718 
1719 	if (msix_vecs != msix_req) {
1720 		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1721 			ena_log(dev, ERR,
1722 			    "Not enough number of MSI-x allocated: %d\n",
1723 			    msix_vecs);
1724 			pci_release_msi(dev);
1725 			rc = ENOSPC;
1726 			goto err_msix_free;
1727 		}
1728 		ena_log(dev, ERR,
1729 		    "Enable only %d MSI-x (out of %d), reduce "
1730 		    "the number of queues\n",
1731 		    msix_vecs, msix_req);
1732 	}
1733 
1734 	adapter->msix_vecs = msix_vecs;
1735 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1736 
1737 	return (0);
1738 
1739 err_msix_free:
1740 	free(adapter->msix_entries, M_DEVBUF);
1741 	adapter->msix_entries = NULL;
1742 
1743 	return (rc);
1744 }
1745 
1746 static void
1747 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1748 {
1749 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
1750 	    "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
1751 	/*
1752 	 * Handler is NULL on purpose, it will be set
1753 	 * when mgmnt interrupt is acquired
1754 	 */
1755 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1756 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1757 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1758 	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1759 }
1760 
1761 static int
1762 ena_setup_io_intr(struct ena_adapter *adapter)
1763 {
1764 #ifdef RSS
1765 	int num_buckets = rss_getnumbuckets();
1766 	static int last_bind = 0;
1767 	int cur_bind;
1768 	int idx;
1769 #endif
1770 	int irq_idx;
1771 
1772 	if (adapter->msix_entries == NULL)
1773 		return (EINVAL);
1774 
1775 #ifdef RSS
1776 	if (adapter->first_bind < 0) {
1777 		adapter->first_bind = last_bind;
1778 		last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
1779 	}
1780 	cur_bind = adapter->first_bind;
1781 #endif
1782 
1783 	for (int i = 0; i < adapter->num_io_queues; i++) {
1784 		irq_idx = ENA_IO_IRQ_IDX(i);
1785 
1786 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1787 		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1788 		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1789 		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1790 		adapter->irq_tbl[irq_idx].vector =
1791 		    adapter->msix_entries[irq_idx].vector;
1792 		ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
1793 		    adapter->msix_entries[irq_idx].vector);
1794 
1795 		if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1796 			adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1797 			    (unsigned)(adapter->irq_cpu_base +
1798 			    i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus;
1799 			CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1800 		}
1801 
1802 #ifdef RSS
1803 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1804 		    rss_getcpu(cur_bind);
1805 		cur_bind = (cur_bind + 1) % num_buckets;
1806 		CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1807 
1808 		for (idx = 0; idx < MAXMEMDOM; ++idx) {
1809 			if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1810 				break;
1811 		}
1812 		adapter->que[i].domain = idx;
1813 #else
1814 		adapter->que[i].domain = -1;
1815 #endif
1816 	}
1817 
1818 	return (0);
1819 }
1820 
1821 static int
1822 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1823 {
1824 	device_t pdev = adapter->pdev;
1825 	struct ena_irq *irq;
1826 	unsigned long flags;
1827 	int rc, rcc;
1828 
1829 	flags = RF_ACTIVE | RF_SHAREABLE;
1830 
1831 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1832 	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1833 	    &irq->vector, flags);
1834 
1835 	if (unlikely(irq->res == NULL)) {
1836 		ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
1837 		    irq->vector);
1838 		return (ENXIO);
1839 	}
1840 
1841 	rc = bus_setup_intr(adapter->pdev, irq->res,
1842 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
1843 	    &irq->cookie);
1844 	if (unlikely(rc != 0)) {
1845 		ena_log(pdev, ERR,
1846 		    "failed to register interrupt handler for irq %ju: %d\n",
1847 		    rman_get_start(irq->res), rc);
1848 		goto err_res_free;
1849 	}
1850 	irq->requested = true;
1851 
1852 	return (rc);
1853 
1854 err_res_free:
1855 	ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
1856 	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
1857 	    irq->res);
1858 	if (unlikely(rcc != 0))
1859 		ena_log(pdev, ERR,
1860 		    "dev has no parent while releasing res for irq: %d\n",
1861 		    irq->vector);
1862 	irq->res = NULL;
1863 
1864 	return (rc);
1865 }
1866 
1867 static int
1868 ena_request_io_irq(struct ena_adapter *adapter)
1869 {
1870 	device_t pdev = adapter->pdev;
1871 	struct ena_irq *irq;
1872 	unsigned long flags = 0;
1873 	int rc = 0, i, rcc;
1874 
1875 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1876 		ena_log(pdev, ERR,
1877 		    "failed to request I/O IRQ: MSI-X is not enabled\n");
1878 		return (EINVAL);
1879 	} else {
1880 		flags = RF_ACTIVE | RF_SHAREABLE;
1881 	}
1882 
1883 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1884 		irq = &adapter->irq_tbl[i];
1885 
1886 		if (unlikely(irq->requested))
1887 			continue;
1888 
1889 		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1890 		    &irq->vector, flags);
1891 		if (unlikely(irq->res == NULL)) {
1892 			rc = ENOMEM;
1893 			ena_log(pdev, ERR,
1894 			    "could not allocate irq vector: %d\n", irq->vector);
1895 			goto err;
1896 		}
1897 
1898 		rc = bus_setup_intr(adapter->pdev, irq->res,
1899 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
1900 		    &irq->cookie);
1901 		if (unlikely(rc != 0)) {
1902 			ena_log(pdev, ERR,
1903 			    "failed to register interrupt handler for irq %ju: %d\n",
1904 			    rman_get_start(irq->res), rc);
1905 			goto err;
1906 		}
1907 		irq->requested = true;
1908 
1909 		if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1910 			rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
1911 			if (unlikely(rc != 0)) {
1912 				ena_log(pdev, ERR,
1913 				    "failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
1914 				    rman_get_start(irq->res), irq->cpu, rc);
1915 				goto err;
1916 			}
1917 
1918 			ena_log(pdev, INFO, "queue %d - cpu %d\n",
1919 			    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1920 		}
1921 	}
1922 	return (rc);
1923 
1924 err:
1925 
1926 	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1927 		irq = &adapter->irq_tbl[i];
1928 		rcc = 0;
1929 
1930 		/* Once we entered err: section and irq->requested is true we
1931 		   free both intr and resources */
1932 		if (irq->requested) {
1933 			rcc = bus_teardown_intr(adapter->pdev, irq->res,
1934 			    irq->cookie);
1935 			if (unlikely(rcc != 0))
1936 				ena_log(pdev, ERR,
1937 				    "could not release irq: %d, error: %d\n",
1938 				    irq->vector, rcc);
1939 		}
1940 
1941 		/* If we entered err: section without irq->requested set we know
1942 		   it was bus_alloc_resource_any() that needs cleanup, provided
1943 		   res is not NULL. In case res is NULL no work in needed in
1944 		   this iteration */
1945 		rcc = 0;
1946 		if (irq->res != NULL) {
1947 			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1948 			    irq->vector, irq->res);
1949 		}
1950 		if (unlikely(rcc != 0))
1951 			ena_log(pdev, ERR,
1952 			    "dev has no parent while releasing res for irq: %d\n",
1953 			    irq->vector);
1954 		irq->requested = false;
1955 		irq->res = NULL;
1956 	}
1957 
1958 	return (rc);
1959 }
1960 
1961 static void
1962 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1963 {
1964 	device_t pdev = adapter->pdev;
1965 	struct ena_irq *irq;
1966 	int rc;
1967 
1968 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1969 	if (irq->requested) {
1970 		ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
1971 		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1972 		if (unlikely(rc != 0))
1973 			ena_log(pdev, ERR, "failed to tear down irq: %d\n",
1974 			    irq->vector);
1975 		irq->requested = 0;
1976 	}
1977 
1978 	if (irq->res != NULL) {
1979 		ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
1980 		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1981 		    irq->vector, irq->res);
1982 		irq->res = NULL;
1983 		if (unlikely(rc != 0))
1984 			ena_log(pdev, ERR,
1985 			    "dev has no parent while releasing res for irq: %d\n",
1986 			    irq->vector);
1987 	}
1988 }
1989 
1990 static void
1991 ena_free_io_irq(struct ena_adapter *adapter)
1992 {
1993 	device_t pdev = adapter->pdev;
1994 	struct ena_irq *irq;
1995 	int rc;
1996 
1997 	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1998 		irq = &adapter->irq_tbl[i];
1999 		if (irq->requested) {
2000 			ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
2001 			rc = bus_teardown_intr(adapter->pdev, irq->res,
2002 			    irq->cookie);
2003 			if (unlikely(rc != 0)) {
2004 				ena_log(pdev, ERR,
2005 				    "failed to tear down irq: %d\n",
2006 				    irq->vector);
2007 			}
2008 			irq->requested = 0;
2009 		}
2010 
2011 		if (irq->res != NULL) {
2012 			ena_log(pdev, DBG, "release resource irq: %d\n",
2013 			    irq->vector);
2014 			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2015 			    irq->vector, irq->res);
2016 			irq->res = NULL;
2017 			if (unlikely(rc != 0)) {
2018 				ena_log(pdev, ERR,
2019 				    "dev has no parent while releasing res for irq: %d\n",
2020 				    irq->vector);
2021 			}
2022 		}
2023 	}
2024 }
2025 
2026 static void
2027 ena_free_irqs(struct ena_adapter *adapter)
2028 {
2029 	ena_free_io_irq(adapter);
2030 	ena_free_mgmnt_irq(adapter);
2031 	ena_disable_msix(adapter);
2032 }
2033 
2034 static void
2035 ena_disable_msix(struct ena_adapter *adapter)
2036 {
2037 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
2038 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
2039 		pci_release_msi(adapter->pdev);
2040 	}
2041 
2042 	adapter->msix_vecs = 0;
2043 	free(adapter->msix_entries, M_DEVBUF);
2044 	adapter->msix_entries = NULL;
2045 }
2046 
2047 static void
2048 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
2049 {
2050 	struct ena_com_io_cq *io_cq;
2051 	struct ena_eth_io_intr_reg intr_reg;
2052 	struct ena_ring *tx_ring;
2053 	uint16_t ena_qid;
2054 	int i;
2055 
2056 	/* Unmask interrupts for all queues */
2057 	for (i = 0; i < adapter->num_io_queues; i++) {
2058 		ena_qid = ENA_IO_TXQ_IDX(i);
2059 		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2060 		ena_com_update_intr_reg(&intr_reg, 0, 0, true, false);
2061 		tx_ring = &adapter->tx_ring[i];
2062 		counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
2063 		ena_com_unmask_intr(io_cq, &intr_reg);
2064 	}
2065 }
2066 
2067 static int
2068 ena_up_complete(struct ena_adapter *adapter)
2069 {
2070 	int rc;
2071 
2072 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
2073 		rc = ena_rss_configure(adapter);
2074 		if (rc != 0) {
2075 			ena_log(adapter->pdev, ERR,
2076 			    "Failed to configure RSS\n");
2077 			return (rc);
2078 		}
2079 	}
2080 
2081 	rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
2082 	if (unlikely(rc != 0))
2083 		return (rc);
2084 
2085 	ena_refill_all_rx_bufs(adapter);
2086 	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
2087 	    sizeof(adapter->hw_stats));
2088 
2089 	return (0);
2090 }
2091 
2092 static void
2093 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size)
2094 {
2095 	int i;
2096 
2097 	for (i = 0; i < adapter->num_io_queues; i++) {
2098 		adapter->tx_ring[i].ring_size = new_tx_size;
2099 		adapter->rx_ring[i].ring_size = new_rx_size;
2100 	}
2101 }
2102 
2103 static int
2104 create_queues_with_size_backoff(struct ena_adapter *adapter)
2105 {
2106 	device_t pdev = adapter->pdev;
2107 	int rc;
2108 	uint32_t cur_rx_ring_size, cur_tx_ring_size;
2109 	uint32_t new_rx_ring_size, new_tx_ring_size;
2110 
2111 	/*
2112 	 * Current queue sizes might be set to smaller than the requested
2113 	 * ones due to past queue allocation failures.
2114 	 */
2115 	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2116 	    adapter->requested_rx_ring_size);
2117 
2118 	while (1) {
2119 		/* Allocate transmit descriptors */
2120 		rc = ena_setup_all_tx_resources(adapter);
2121 		if (unlikely(rc != 0)) {
2122 			ena_log(pdev, ERR, "err_setup_tx\n");
2123 			goto err_setup_tx;
2124 		}
2125 
2126 		/* Allocate receive descriptors */
2127 		rc = ena_setup_all_rx_resources(adapter);
2128 		if (unlikely(rc != 0)) {
2129 			ena_log(pdev, ERR, "err_setup_rx\n");
2130 			goto err_setup_rx;
2131 		}
2132 
2133 		/* Create IO queues for Rx & Tx */
2134 		rc = ena_create_io_queues(adapter);
2135 		if (unlikely(rc != 0)) {
2136 			ena_log(pdev, ERR, "create IO queues failed\n");
2137 			goto err_io_que;
2138 		}
2139 
2140 		return (0);
2141 
2142 err_io_que:
2143 		ena_free_all_rx_resources(adapter);
2144 err_setup_rx:
2145 		ena_free_all_tx_resources(adapter);
2146 err_setup_tx:
2147 		/*
2148 		 * Lower the ring size if ENOMEM. Otherwise, return the
2149 		 * error straightaway.
2150 		 */
2151 		if (unlikely(rc != ENOMEM)) {
2152 			ena_log(pdev, ERR,
2153 			    "Queue creation failed with error code: %d\n", rc);
2154 			return (rc);
2155 		}
2156 
2157 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2158 		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2159 
2160 		ena_log(pdev, ERR,
2161 		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2162 		    cur_tx_ring_size, cur_rx_ring_size);
2163 
2164 		new_tx_ring_size = cur_tx_ring_size;
2165 		new_rx_ring_size = cur_rx_ring_size;
2166 
2167 		/*
2168 		 * Decrease the size of a larger queue, or decrease both if they
2169 		 * are the same size.
2170 		 */
2171 		if (cur_rx_ring_size <= cur_tx_ring_size)
2172 			new_tx_ring_size = cur_tx_ring_size / 2;
2173 		if (cur_rx_ring_size >= cur_tx_ring_size)
2174 			new_rx_ring_size = cur_rx_ring_size / 2;
2175 
2176 		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2177 		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2178 			ena_log(pdev, ERR,
2179 			    "Queue creation failed with the smallest possible queue size"
2180 			    "of %d for both queues. Not retrying with smaller queues\n",
2181 			    ENA_MIN_RING_SIZE);
2182 			return (rc);
2183 		}
2184 
2185 		ena_log(pdev, INFO,
2186 		    "Retrying queue creation with sizes TX=%d, RX=%d\n",
2187 		    new_tx_ring_size, new_rx_ring_size);
2188 
2189 		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2190 	}
2191 }
2192 
2193 int
2194 ena_up(struct ena_adapter *adapter)
2195 {
2196 	int rc = 0;
2197 
2198 	ENA_LOCK_ASSERT();
2199 
2200 	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2201 		ena_log(adapter->pdev, ERR, "device is not attached!\n");
2202 		return (ENXIO);
2203 	}
2204 
2205 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2206 		return (0);
2207 
2208 	ena_log(adapter->pdev, INFO, "device is going UP\n");
2209 
2210 	/* setup interrupts for IO queues */
2211 	rc = ena_setup_io_intr(adapter);
2212 	if (unlikely(rc != 0)) {
2213 		ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
2214 		goto error;
2215 	}
2216 	rc = ena_request_io_irq(adapter);
2217 	if (unlikely(rc != 0)) {
2218 		ena_log(adapter->pdev, ERR, "err_req_irq\n");
2219 		goto error;
2220 	}
2221 
2222 	ena_log(adapter->pdev, INFO,
2223 	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n",
2224 	    adapter->num_io_queues,
2225 	    adapter->requested_rx_ring_size,
2226 	    adapter->requested_tx_ring_size,
2227 	    (adapter->ena_dev->tx_mem_queue_type ==
2228 		ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
2229 
2230 	rc = create_queues_with_size_backoff(adapter);
2231 	if (unlikely(rc != 0)) {
2232 		ena_log(adapter->pdev, ERR,
2233 		    "error creating queues with size backoff\n");
2234 		goto err_create_queues_with_backoff;
2235 	}
2236 
2237 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2238 		if_link_state_change(adapter->ifp, LINK_STATE_UP);
2239 
2240 	rc = ena_up_complete(adapter);
2241 	if (unlikely(rc != 0))
2242 		goto err_up_complete;
2243 
2244 	counter_u64_add(adapter->dev_stats.interface_up, 1);
2245 
2246 	ena_update_hwassist(adapter);
2247 
2248 	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2249 
2250 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2251 
2252 	ena_unmask_all_io_irqs(adapter);
2253 
2254 	return (0);
2255 
2256 err_up_complete:
2257 	ena_destroy_all_io_queues(adapter);
2258 	ena_free_all_rx_resources(adapter);
2259 	ena_free_all_tx_resources(adapter);
2260 err_create_queues_with_backoff:
2261 	ena_free_io_irq(adapter);
2262 error:
2263 	return (rc);
2264 }
2265 
2266 static uint64_t
2267 ena_get_counter(if_t ifp, ift_counter cnt)
2268 {
2269 	struct ena_adapter *adapter;
2270 	struct ena_hw_stats *stats;
2271 
2272 	adapter = if_getsoftc(ifp);
2273 	stats = &adapter->hw_stats;
2274 
2275 	switch (cnt) {
2276 	case IFCOUNTER_IPACKETS:
2277 		return (counter_u64_fetch(stats->rx_packets));
2278 	case IFCOUNTER_OPACKETS:
2279 		return (counter_u64_fetch(stats->tx_packets));
2280 	case IFCOUNTER_IBYTES:
2281 		return (counter_u64_fetch(stats->rx_bytes));
2282 	case IFCOUNTER_OBYTES:
2283 		return (counter_u64_fetch(stats->tx_bytes));
2284 	case IFCOUNTER_IQDROPS:
2285 		return (counter_u64_fetch(stats->rx_drops));
2286 	case IFCOUNTER_OQDROPS:
2287 		return (counter_u64_fetch(stats->tx_drops));
2288 	default:
2289 		return (if_get_counter_default(ifp, cnt));
2290 	}
2291 }
2292 
2293 static int
2294 ena_media_change(if_t ifp)
2295 {
2296 	/* Media Change is not supported by firmware */
2297 	return (0);
2298 }
2299 
2300 static void
2301 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2302 {
2303 	struct ena_adapter *adapter = if_getsoftc(ifp);
2304 	ena_log(adapter->pdev, DBG, "Media status update\n");
2305 
2306 	ENA_LOCK_LOCK();
2307 
2308 	ifmr->ifm_status = IFM_AVALID;
2309 	ifmr->ifm_active = IFM_ETHER;
2310 
2311 	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2312 		ENA_LOCK_UNLOCK();
2313 		ena_log(adapter->pdev, INFO, "Link is down\n");
2314 		return;
2315 	}
2316 
2317 	ifmr->ifm_status |= IFM_ACTIVE;
2318 	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2319 
2320 	ENA_LOCK_UNLOCK();
2321 }
2322 
2323 static void
2324 ena_init(void *arg)
2325 {
2326 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2327 
2328 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2329 		ENA_LOCK_LOCK();
2330 		ena_up(adapter);
2331 		ENA_LOCK_UNLOCK();
2332 	}
2333 }
2334 
2335 static int
2336 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2337 {
2338 	struct ena_adapter *adapter;
2339 	struct ifreq *ifr;
2340 	int rc;
2341 
2342 	adapter = if_getsoftc(ifp);
2343 	ifr = (struct ifreq *)data;
2344 
2345 	/*
2346 	 * Acquiring lock to prevent from running up and down routines parallel.
2347 	 */
2348 	rc = 0;
2349 	switch (command) {
2350 	case SIOCSIFMTU:
2351 		if (if_getmtu(ifp) == ifr->ifr_mtu)
2352 			break;
2353 		ENA_LOCK_LOCK();
2354 		ena_down(adapter);
2355 
2356 		ena_change_mtu(ifp, ifr->ifr_mtu);
2357 
2358 		rc = ena_up(adapter);
2359 		ENA_LOCK_UNLOCK();
2360 		break;
2361 
2362 	case SIOCSIFFLAGS:
2363 		if ((if_getflags(ifp) & IFF_UP) != 0) {
2364 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2365 				if ((if_getflags(ifp) & (IFF_PROMISC |
2366 				    IFF_ALLMULTI)) != 0) {
2367 					ena_log(adapter->pdev, INFO,
2368 					    "ioctl promisc/allmulti\n");
2369 				}
2370 			} else {
2371 				ENA_LOCK_LOCK();
2372 				rc = ena_up(adapter);
2373 				ENA_LOCK_UNLOCK();
2374 			}
2375 		} else {
2376 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2377 				ENA_LOCK_LOCK();
2378 				ena_down(adapter);
2379 				ENA_LOCK_UNLOCK();
2380 			}
2381 		}
2382 		break;
2383 
2384 	case SIOCADDMULTI:
2385 	case SIOCDELMULTI:
2386 		break;
2387 
2388 	case SIOCSIFMEDIA:
2389 	case SIOCGIFMEDIA:
2390 		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2391 		break;
2392 
2393 	case SIOCSIFCAP:
2394 		{
2395 			int reinit = 0;
2396 
2397 			if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
2398 				if_setcapenable(ifp, ifr->ifr_reqcap);
2399 				reinit = 1;
2400 			}
2401 
2402 			if ((reinit != 0) &&
2403 			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2404 				ENA_LOCK_LOCK();
2405 				ena_down(adapter);
2406 				rc = ena_up(adapter);
2407 				ENA_LOCK_UNLOCK();
2408 			}
2409 		}
2410 
2411 		break;
2412 	default:
2413 		rc = ether_ioctl(ifp, command, data);
2414 		break;
2415 	}
2416 
2417 	return (rc);
2418 }
2419 
2420 static int
2421 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2422 {
2423 	int caps = 0;
2424 
2425 	if ((feat->offload.tx &
2426 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2427 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2428 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2429 		caps |= IFCAP_TXCSUM;
2430 
2431 	if ((feat->offload.tx &
2432 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2433 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2434 		caps |= IFCAP_TXCSUM_IPV6;
2435 
2436 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2437 		caps |= IFCAP_TSO4;
2438 
2439 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2440 		caps |= IFCAP_TSO6;
2441 
2442 	if ((feat->offload.rx_supported &
2443 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2444 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2445 		caps |= IFCAP_RXCSUM;
2446 
2447 	if ((feat->offload.rx_supported &
2448 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2449 		caps |= IFCAP_RXCSUM_IPV6;
2450 
2451 	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2452 
2453 	return (caps);
2454 }
2455 
2456 static void
2457 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2458 {
2459 	host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
2460 }
2461 
2462 static void
2463 ena_update_hwassist(struct ena_adapter *adapter)
2464 {
2465 	if_t ifp = adapter->ifp;
2466 	uint32_t feat = adapter->tx_offload_cap;
2467 	int cap = if_getcapenable(ifp);
2468 	int flags = 0;
2469 
2470 	if_clearhwassist(ifp);
2471 
2472 	if ((cap & IFCAP_TXCSUM) != 0) {
2473 		if ((feat &
2474 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2475 			flags |= CSUM_IP;
2476 		if ((feat &
2477 		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2478 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2479 			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2480 	}
2481 
2482 	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2483 		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2484 
2485 	if ((cap & IFCAP_TSO4) != 0)
2486 		flags |= CSUM_IP_TSO;
2487 
2488 	if ((cap & IFCAP_TSO6) != 0)
2489 		flags |= CSUM_IP6_TSO;
2490 
2491 	if_sethwassistbits(ifp, flags, 0);
2492 }
2493 
2494 static void
2495 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2496     struct ena_com_dev_get_features_ctx *feat)
2497 {
2498 	if_t ifp;
2499 	int caps = 0;
2500 
2501 	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2502 	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2503 	if_setdev(ifp, pdev);
2504 	if_setsoftc(ifp, adapter);
2505 
2506 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2507 	if_setinitfn(ifp, ena_init);
2508 	if_settransmitfn(ifp, ena_mq_start);
2509 	if_setqflushfn(ifp, ena_qflush);
2510 	if_setioctlfn(ifp, ena_ioctl);
2511 	if_setgetcounterfn(ifp, ena_get_counter);
2512 
2513 	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2514 	if_setsendqready(ifp);
2515 	if_setmtu(ifp, ETHERMTU);
2516 	if_setbaudrate(ifp, 0);
2517 	/* Zeroize capabilities... */
2518 	if_setcapabilities(ifp, 0);
2519 	if_setcapenable(ifp, 0);
2520 	/* check hardware support */
2521 	caps = ena_get_dev_offloads(feat);
2522 	/* ... and set them */
2523 	if_setcapabilitiesbit(ifp, caps, 0);
2524 
2525 	/* TSO parameters */
2526 	if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
2527 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2528 	if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
2529 	if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE);
2530 
2531 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2532 	if_setcapenable(ifp, if_getcapabilities(ifp));
2533 
2534 	/*
2535 	 * Specify the media types supported by this adapter and register
2536 	 * callbacks to update media and link information
2537 	 */
2538 	ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
2539 	    ena_media_status);
2540 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2541 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2542 
2543 	ether_ifattach(ifp, adapter->mac_addr);
2544 }
2545 
2546 void
2547 ena_down(struct ena_adapter *adapter)
2548 {
2549 	int rc;
2550 
2551 	ENA_LOCK_ASSERT();
2552 
2553 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2554 		return;
2555 
2556 	ena_log(adapter->pdev, INFO, "device is going DOWN\n");
2557 
2558 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2559 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2560 
2561 	ena_free_io_irq(adapter);
2562 
2563 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2564 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2565 		if (unlikely(rc != 0))
2566 			ena_log(adapter->pdev, ERR, "Device reset failed\n");
2567 	}
2568 
2569 	ena_destroy_all_io_queues(adapter);
2570 
2571 	ena_free_all_tx_bufs(adapter);
2572 	ena_free_all_rx_bufs(adapter);
2573 	ena_free_all_tx_resources(adapter);
2574 	ena_free_all_rx_resources(adapter);
2575 
2576 	counter_u64_add(adapter->dev_stats.interface_down, 1);
2577 }
2578 
2579 static uint32_t
2580 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2581     struct ena_com_dev_get_features_ctx *get_feat_ctx)
2582 {
2583 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2584 
2585 	/* Regular queues capabilities */
2586 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2587 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2588 		    &get_feat_ctx->max_queue_ext.max_queue_ext;
2589 		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2590 		    max_queue_ext->max_rx_cq_num);
2591 
2592 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2593 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2594 	} else {
2595 		struct ena_admin_queue_feature_desc *max_queues =
2596 		    &get_feat_ctx->max_queues;
2597 		io_tx_sq_num = max_queues->max_sq_num;
2598 		io_tx_cq_num = max_queues->max_cq_num;
2599 		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2600 	}
2601 
2602 	/* In case of LLQ use the llq fields for the tx SQ/CQ */
2603 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2604 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2605 
2606 	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2607 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2608 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2609 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2610 	/* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
2611 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2612 	    pci_msix_count(pdev) - 1);
2613 #ifdef RSS
2614 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2615 	    rss_getnumbuckets());
2616 #endif
2617 
2618 	return (max_num_io_queues);
2619 }
2620 
2621 static int
2622 ena_enable_wc(device_t pdev, struct resource *res)
2623 {
2624 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2625 	vm_offset_t va;
2626 	vm_size_t len;
2627 	int rc;
2628 
2629 	va = (vm_offset_t)rman_get_virtual(res);
2630 	len = rman_get_size(res);
2631 	/* Enable write combining */
2632 	rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2633 	if (unlikely(rc != 0)) {
2634 		ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
2635 		return (rc);
2636 	}
2637 
2638 	return (0);
2639 #endif
2640 	return (EOPNOTSUPP);
2641 }
2642 
2643 static int
2644 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2645     struct ena_admin_feature_llq_desc *llq,
2646     struct ena_llq_configurations *llq_default_configurations)
2647 {
2648 	int rc;
2649 	uint32_t llq_feature_mask;
2650 
2651 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2652 	if (!(ena_dev->supported_features & llq_feature_mask)) {
2653 		ena_log(pdev, WARN,
2654 		    "LLQ is not supported. Fallback to host mode policy.\n");
2655 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2656 		return (0);
2657 	}
2658 
2659 	if (ena_dev->mem_bar == NULL) {
2660 		ena_log(pdev, WARN,
2661 		    "LLQ is advertised as supported but device doesn't expose mem bar.\n");
2662 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2663 		return (0);
2664 	}
2665 
2666 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2667 	if (unlikely(rc != 0)) {
2668 		ena_log(pdev, WARN,
2669 		    "Failed to configure the device mode. "
2670 		    "Fallback to host mode policy.\n");
2671 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2672 	}
2673 
2674 	return (0);
2675 }
2676 
2677 static int
2678 ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
2679 {
2680 	struct ena_adapter *adapter = device_get_softc(pdev);
2681 	int rc, rid;
2682 
2683 	/* Try to allocate resources for LLQ bar */
2684 	rid = PCIR_BAR(ENA_MEM_BAR);
2685 	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
2686 	    RF_ACTIVE);
2687 	if (unlikely(adapter->memory == NULL)) {
2688 		ena_log(pdev, WARN,
2689 		    "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n");
2690 		return (0);
2691 	}
2692 
2693 	/* Enable write combining for better LLQ performance */
2694 	rc = ena_enable_wc(adapter->pdev, adapter->memory);
2695 	if (unlikely(rc != 0)) {
2696 		ena_log(pdev, ERR, "failed to enable write combining.\n");
2697 		return (rc);
2698 	}
2699 
2700 	/*
2701 	 * Save virtual address of the device's memory region
2702 	 * for the ena_com layer.
2703 	 */
2704 	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2705 
2706 	return (0);
2707 }
2708 
2709 static inline void
2710 set_default_llq_configurations(struct ena_llq_configurations *llq_config,
2711     struct ena_admin_feature_llq_desc *llq)
2712 {
2713 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2714 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2715 	llq_config->llq_num_decs_before_header =
2716 	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2717 	if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
2718 	    0 && ena_force_large_llq_header) {
2719 		llq_config->llq_ring_entry_size =
2720 		    ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2721 		llq_config->llq_ring_entry_size_value = 256;
2722 	} else {
2723 		llq_config->llq_ring_entry_size =
2724 		    ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2725 		llq_config->llq_ring_entry_size_value = 128;
2726 	}
2727 }
2728 
2729 static int
2730 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2731 {
2732 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2733 	struct ena_com_dev *ena_dev = ctx->ena_dev;
2734 	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2735 	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2736 	uint32_t max_tx_queue_size;
2737 	uint32_t max_rx_queue_size;
2738 
2739 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2740 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2741 		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2742 		max_rx_queue_size = min_t(uint32_t,
2743 		    max_queue_ext->max_rx_cq_depth,
2744 		    max_queue_ext->max_rx_sq_depth);
2745 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2746 
2747 		if (ena_dev->tx_mem_queue_type ==
2748 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2749 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2750 			    llq->max_llq_depth);
2751 		else
2752 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2753 			    max_queue_ext->max_tx_sq_depth);
2754 
2755 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2756 		    max_queue_ext->max_per_packet_tx_descs);
2757 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2758 		    max_queue_ext->max_per_packet_rx_descs);
2759 	} else {
2760 		struct ena_admin_queue_feature_desc *max_queues =
2761 		    &ctx->get_feat_ctx->max_queues;
2762 		max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
2763 		    max_queues->max_sq_depth);
2764 		max_tx_queue_size = max_queues->max_cq_depth;
2765 
2766 		if (ena_dev->tx_mem_queue_type ==
2767 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2768 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2769 			    llq->max_llq_depth);
2770 		else
2771 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2772 			    max_queues->max_sq_depth);
2773 
2774 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2775 		    max_queues->max_packet_tx_descs);
2776 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2777 		    max_queues->max_packet_rx_descs);
2778 	}
2779 
2780 	/* round down to the nearest power of 2 */
2781 	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2782 	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2783 
2784 	/*
2785 	 * When forcing large headers, we multiply the entry size by 2,
2786 	 * and therefore divide the queue size by 2, leaving the amount
2787 	 * of memory used by the queues unchanged.
2788 	 */
2789 	if (ena_force_large_llq_header) {
2790 		if ((llq->entry_size_ctrl_supported &
2791 		    ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
2792 		    ena_dev->tx_mem_queue_type ==
2793 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2794 			max_tx_queue_size /= 2;
2795 			ena_log(ctx->pdev, INFO,
2796 			    "Forcing large headers and decreasing maximum Tx queue size to %d\n",
2797 			    max_tx_queue_size);
2798 		} else {
2799 			ena_log(ctx->pdev, WARN,
2800 			    "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
2801 		}
2802 	}
2803 
2804 	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2805 	    max_tx_queue_size);
2806 	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2807 	    max_rx_queue_size);
2808 
2809 	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2810 	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2811 
2812 	ctx->max_tx_queue_size = max_tx_queue_size;
2813 	ctx->max_rx_queue_size = max_rx_queue_size;
2814 	ctx->tx_queue_size = tx_queue_size;
2815 	ctx->rx_queue_size = rx_queue_size;
2816 
2817 	return (0);
2818 }
2819 
2820 static void
2821 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2822 {
2823 	struct ena_admin_host_info *host_info;
2824 	uintptr_t rid;
2825 	int rc;
2826 
2827 	/* Allocate only the host info */
2828 	rc = ena_com_allocate_host_info(ena_dev);
2829 	if (unlikely(rc != 0)) {
2830 		ena_log(dev, ERR, "Cannot allocate host info\n");
2831 		return;
2832 	}
2833 
2834 	host_info = ena_dev->host_attr.host_info;
2835 
2836 	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2837 		host_info->bdf = rid;
2838 	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2839 	host_info->kernel_ver = osreldate;
2840 
2841 	sprintf(host_info->kernel_ver_str, "%d", osreldate);
2842 	host_info->os_dist = 0;
2843 	strncpy(host_info->os_dist_str, osrelease,
2844 	    sizeof(host_info->os_dist_str) - 1);
2845 
2846 	host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
2847 	    (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2848 	    (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2849 	host_info->num_cpus = mp_ncpus;
2850 	host_info->driver_supported_features =
2851 	    ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
2852 	    ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
2853 
2854 	rc = ena_com_set_host_attributes(ena_dev);
2855 	if (unlikely(rc != 0)) {
2856 		if (rc == EOPNOTSUPP)
2857 			ena_log(dev, WARN, "Cannot set host attributes\n");
2858 		else
2859 			ena_log(dev, ERR, "Cannot set host attributes\n");
2860 
2861 		goto err;
2862 	}
2863 
2864 	return;
2865 
2866 err:
2867 	ena_com_delete_host_info(ena_dev);
2868 }
2869 
2870 static int
2871 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2872     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2873 {
2874 	struct ena_llq_configurations llq_config;
2875 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2876 	bool readless_supported;
2877 	uint32_t aenq_groups;
2878 	int dma_width;
2879 	int rc;
2880 
2881 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2882 	if (unlikely(rc != 0)) {
2883 		ena_log(pdev, ERR, "failed to init mmio read less\n");
2884 		return (rc);
2885 	}
2886 
2887 	/*
2888 	 * The PCIe configuration space revision id indicate if mmio reg
2889 	 * read is disabled
2890 	 */
2891 	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2892 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2893 
2894 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2895 	if (unlikely(rc != 0)) {
2896 		ena_log(pdev, ERR, "Can not reset device\n");
2897 		goto err_mmio_read_less;
2898 	}
2899 
2900 	rc = ena_com_validate_version(ena_dev);
2901 	if (unlikely(rc != 0)) {
2902 		ena_log(pdev, ERR, "device version is too low\n");
2903 		goto err_mmio_read_less;
2904 	}
2905 
2906 	dma_width = ena_com_get_dma_width(ena_dev);
2907 	if (unlikely(dma_width < 0)) {
2908 		ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
2909 		rc = dma_width;
2910 		goto err_mmio_read_less;
2911 	}
2912 	adapter->dma_width = dma_width;
2913 
2914 	/* ENA admin level init */
2915 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2916 	if (unlikely(rc != 0)) {
2917 		ena_log(pdev, ERR,
2918 		    "Can not initialize ena admin queue with device\n");
2919 		goto err_mmio_read_less;
2920 	}
2921 
2922 	/*
2923 	 * To enable the msix interrupts the driver needs to know the number
2924 	 * of queues. So the driver uses polling mode to retrieve this
2925 	 * information
2926 	 */
2927 	ena_com_set_admin_polling_mode(ena_dev, true);
2928 
2929 	ena_config_host_info(ena_dev, pdev);
2930 
2931 	/* Get Device Attributes */
2932 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2933 	if (unlikely(rc != 0)) {
2934 		ena_log(pdev, ERR,
2935 		    "Cannot get attribute for ena device rc: %d\n", rc);
2936 		goto err_admin_init;
2937 	}
2938 
2939 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2940 	    BIT(ENA_ADMIN_FATAL_ERROR) |
2941 	    BIT(ENA_ADMIN_WARNING) |
2942 	    BIT(ENA_ADMIN_NOTIFICATION) |
2943 	    BIT(ENA_ADMIN_KEEP_ALIVE) |
2944 	    BIT(ENA_ADMIN_CONF_NOTIFICATIONS);
2945 
2946 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2947 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2948 	if (unlikely(rc != 0)) {
2949 		ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
2950 		goto err_admin_init;
2951 	}
2952 
2953 	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2954 
2955 	set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
2956 
2957 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
2958 	    &llq_config);
2959 	if (unlikely(rc != 0)) {
2960 		ena_log(pdev, ERR, "Failed to set placement policy\n");
2961 		goto err_admin_init;
2962 	}
2963 
2964 	return (0);
2965 
2966 err_admin_init:
2967 	ena_com_delete_host_info(ena_dev);
2968 	ena_com_admin_destroy(ena_dev);
2969 err_mmio_read_less:
2970 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2971 
2972 	return (rc);
2973 }
2974 
2975 static int
2976 ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2977 {
2978 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2979 	int rc;
2980 
2981 	rc = ena_enable_msix(adapter);
2982 	if (unlikely(rc != 0)) {
2983 		ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
2984 		return (rc);
2985 	}
2986 
2987 	ena_setup_mgmnt_intr(adapter);
2988 
2989 	rc = ena_request_mgmnt_irq(adapter);
2990 	if (unlikely(rc != 0)) {
2991 		ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
2992 		goto err_disable_msix;
2993 	}
2994 
2995 	ena_com_set_admin_polling_mode(ena_dev, false);
2996 
2997 	ena_com_admin_aenq_enable(ena_dev);
2998 
2999 	return (0);
3000 
3001 err_disable_msix:
3002 	ena_disable_msix(adapter);
3003 
3004 	return (rc);
3005 }
3006 
3007 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
3008 static void
3009 ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
3010 {
3011 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3012 	struct ena_admin_aenq_keep_alive_desc *desc;
3013 	sbintime_t stime;
3014 	uint64_t rx_drops;
3015 	uint64_t tx_drops;
3016 
3017 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3018 
3019 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3020 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3021 	counter_u64_zero(adapter->hw_stats.rx_drops);
3022 	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
3023 	counter_u64_zero(adapter->hw_stats.tx_drops);
3024 	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
3025 
3026 	stime = getsbinuptime();
3027 	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
3028 }
3029 
3030 /* Check for keep alive expiration */
3031 static void
3032 check_for_missing_keep_alive(struct ena_adapter *adapter)
3033 {
3034 	sbintime_t timestamp, time;
3035 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3036 
3037 	if (adapter->wd_active == 0)
3038 		return;
3039 
3040 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3041 		return;
3042 
3043 	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
3044 	time = getsbinuptime() - timestamp;
3045 	if (unlikely(time > adapter->keep_alive_timeout)) {
3046 		ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
3047 		if (ena_com_aenq_has_keep_alive(adapter->ena_dev))
3048 			reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
3049 
3050 		ena_trigger_reset(adapter, reset_reason);
3051 	}
3052 }
3053 
3054 /* Check if admin queue is enabled */
3055 static void
3056 check_for_admin_com_state(struct ena_adapter *adapter)
3057 {
3058 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO;
3059 	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
3060 		ena_log(adapter->pdev, ERR,
3061 		    "ENA admin queue is not in running state!\n");
3062 		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3063 		if (ena_com_get_missing_admin_interrupt(adapter->ena_dev))
3064 			reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
3065 
3066 		ena_trigger_reset(adapter, reset_reason);
3067 	}
3068 }
3069 
3070 static int
3071 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3072     struct ena_ring *rx_ring)
3073 {
3074 	if (likely(atomic_load_8(&rx_ring->first_interrupt)))
3075 		return (0);
3076 
3077 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3078 		return (0);
3079 
3080 	rx_ring->no_interrupt_event_cnt++;
3081 
3082 	if (rx_ring->no_interrupt_event_cnt ==
3083 	    ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3084 		ena_log(adapter->pdev, ERR,
3085 		    "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3086 		    rx_ring->qid);
3087 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3088 		return (EIO);
3089 	}
3090 
3091 	return (0);
3092 }
3093 
3094 static enum ena_regs_reset_reason_types
3095 check_cdesc_in_tx_cq(struct ena_adapter *adapter,
3096     struct ena_ring *tx_ring)
3097 {
3098 	device_t pdev = adapter->pdev;
3099 	int rc;
3100 	u16 req_id;
3101 
3102 	rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
3103 	/* TX CQ is empty */
3104 	if (rc == ENA_COM_TRY_AGAIN) {
3105 		ena_log(pdev, ERR,
3106 		    "No completion descriptors found in CQ %d\n",
3107 		    tx_ring->qid);
3108 		return ENA_REGS_RESET_MISS_TX_CMPL;
3109 	}
3110 
3111 	/* TX CQ has cdescs */
3112 	ena_log(pdev, ERR,
3113 	    "Completion descriptors found in CQ %d",
3114 	    tx_ring->qid);
3115 
3116 	return ENA_REGS_RESET_MISS_INTERRUPT;
3117 }
3118 
3119 static int
3120 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3121     struct ena_ring *tx_ring)
3122 {
3123 	uint32_t missed_tx = 0, new_missed_tx = 0;
3124 	device_t pdev = adapter->pdev;
3125 	struct bintime curtime, time;
3126 	struct ena_tx_buffer *tx_buf;
3127 	int time_since_last_cleanup;
3128 	int missing_tx_comp_to;
3129 	sbintime_t time_offset;
3130 	int i, rc = 0;
3131 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
3132 	bool cleanup_scheduled, cleanup_running;
3133 
3134 	getbinuptime(&curtime);
3135 
3136 	for (i = 0; i < tx_ring->ring_size; i++) {
3137 		tx_buf = &tx_ring->tx_buffer_info[i];
3138 
3139 		if (bintime_isset(&tx_buf->timestamp) == 0)
3140 			continue;
3141 
3142 		time = curtime;
3143 		bintime_sub(&time, &tx_buf->timestamp);
3144 		time_offset = bttosbt(time);
3145 
3146 		if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3147 		    time_offset > 2 * adapter->missing_tx_timeout)) {
3148 			/*
3149 			 * If after graceful period interrupt is still not
3150 			 * received, we schedule a reset.
3151 			 */
3152 			ena_log(pdev, ERR,
3153 			    "Potential MSIX issue on Tx side Queue = %d. "
3154 			    "Reset the device\n",
3155 			    tx_ring->qid);
3156 			ena_trigger_reset(adapter,
3157 			    ENA_REGS_RESET_MISS_INTERRUPT);
3158 			return (EIO);
3159 		}
3160 
3161 		/* Check again if packet is still waiting */
3162 		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3163 
3164 			if (tx_buf->print_once) {
3165 				time_since_last_cleanup = TICKS_2_MSEC(ticks -
3166 				    tx_ring->tx_last_cleanup_ticks);
3167 				missing_tx_comp_to = sbttoms(
3168 				    adapter->missing_tx_timeout);
3169 				ena_log(pdev, WARN,
3170 				    "Found a Tx that wasn't completed on time, qid %d, index %d. "
3171 				    "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
3172 				    tx_ring->qid, i, time_since_last_cleanup,
3173 				    missing_tx_comp_to);
3174 				/* Add new TX completions which are missed */
3175 				new_missed_tx++;
3176 			}
3177 
3178 			tx_buf->print_once = false;
3179 			missed_tx++;
3180 		}
3181 	}
3182 	/* Checking if this TX ring missing TX completions have passed the threshold */
3183 	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3184 		ena_log(pdev, ERR,
3185 		    "The number of lost tx completion is above the threshold "
3186 		    "(%d > %d). Reset the device\n",
3187 		    missed_tx, adapter->missing_tx_threshold);
3188 		/* Set the reset flag to prevent ena_cleanup() from running */
3189 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3190 		/* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and
3191 		 * that cleanup_running is visible to check_missing_comp_in_tx_queue() to
3192 		 * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq()
3193 		 */
3194 		mb();
3195 		cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
3196 		cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
3197 		if (!(cleanup_scheduled || cleanup_running))
3198 			reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring);
3199 
3200 		adapter->reset_reason = reset_reason;
3201 		rc = EIO;
3202 	}
3203 	/* Add the newly discovered missing TX completions */
3204 	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
3205 
3206 	return (rc);
3207 }
3208 
3209 /*
3210  * Check for TX which were not completed on time.
3211  * Timeout is defined by "missing_tx_timeout".
3212  * Reset will be performed if number of incompleted
3213  * transactions exceeds "missing_tx_threshold".
3214  */
3215 static void
3216 check_for_missing_completions(struct ena_adapter *adapter)
3217 {
3218 	struct ena_ring *tx_ring;
3219 	struct ena_ring *rx_ring;
3220 	int i, budget, rc;
3221 
3222 	/* Make sure the driver doesn't turn the device in other process */
3223 	rmb();
3224 
3225 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3226 		return;
3227 
3228 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3229 		return;
3230 
3231 	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3232 		return;
3233 
3234 	budget = adapter->missing_tx_max_queues;
3235 
3236 	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3237 		tx_ring = &adapter->tx_ring[i];
3238 		rx_ring = &adapter->rx_ring[i];
3239 
3240 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3241 		if (unlikely(rc != 0))
3242 			return;
3243 
3244 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3245 		if (unlikely(rc != 0))
3246 			return;
3247 
3248 		budget--;
3249 		if (budget == 0) {
3250 			i++;
3251 			break;
3252 		}
3253 	}
3254 
3255 	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3256 }
3257 
3258 /* trigger rx cleanup after 2 consecutive detections */
3259 #define EMPTY_RX_REFILL 2
3260 /* For the rare case where the device runs out of Rx descriptors and the
3261  * msix handler failed to refill new Rx descriptors (due to a lack of memory
3262  * for example).
3263  * This case will lead to a deadlock:
3264  * The device won't send interrupts since all the new Rx packets will be dropped
3265  * The msix handler won't allocate new Rx descriptors so the device won't be
3266  * able to send new packets.
3267  *
3268  * When such a situation is detected - execute rx cleanup task in another thread
3269  */
3270 static void
3271 check_for_empty_rx_ring(struct ena_adapter *adapter)
3272 {
3273 	struct ena_ring *rx_ring;
3274 	int i, refill_required;
3275 
3276 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3277 		return;
3278 
3279 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3280 		return;
3281 
3282 	for (i = 0; i < adapter->num_io_queues; i++) {
3283 		rx_ring = &adapter->rx_ring[i];
3284 
3285 		refill_required = ena_com_free_q_entries(
3286 		    rx_ring->ena_com_io_sq);
3287 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3288 			rx_ring->empty_rx_queue++;
3289 
3290 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3291 				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3292 				    1);
3293 
3294 				ena_log(adapter->pdev, WARN,
3295 				    "Rx ring %d is stalled. Triggering the refill function\n",
3296 				    i);
3297 
3298 				taskqueue_enqueue(rx_ring->que->cleanup_tq,
3299 				    &rx_ring->que->cleanup_task);
3300 				rx_ring->empty_rx_queue = 0;
3301 			}
3302 		} else {
3303 			rx_ring->empty_rx_queue = 0;
3304 		}
3305 	}
3306 }
3307 
3308 static void
3309 ena_update_hints(struct ena_adapter *adapter,
3310     struct ena_admin_ena_hw_hints *hints)
3311 {
3312 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3313 
3314 	if (hints->admin_completion_tx_timeout)
3315 		ena_dev->admin_queue.completion_timeout =
3316 		    hints->admin_completion_tx_timeout * 1000;
3317 
3318 	if (hints->mmio_read_timeout)
3319 		/* convert to usec */
3320 		ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
3321 
3322 	if (hints->missed_tx_completion_count_threshold_to_reset)
3323 		adapter->missing_tx_threshold =
3324 		    hints->missed_tx_completion_count_threshold_to_reset;
3325 
3326 	if (hints->missing_tx_completion_timeout) {
3327 		if (hints->missing_tx_completion_timeout ==
3328 		    ENA_HW_HINTS_NO_TIMEOUT)
3329 			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3330 		else
3331 			adapter->missing_tx_timeout = SBT_1MS *
3332 			    hints->missing_tx_completion_timeout;
3333 	}
3334 
3335 	if (hints->driver_watchdog_timeout) {
3336 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3337 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3338 		else
3339 			adapter->keep_alive_timeout = SBT_1MS *
3340 			    hints->driver_watchdog_timeout;
3341 	}
3342 }
3343 
3344 /**
3345  * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3346  * @adapter: ENA device adapter
3347  *
3348  * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3349  * and other error codes on failure.
3350  *
3351  * This function can possibly cause a race with other calls to the admin queue.
3352  * Because of that, the caller should either lock this function or make sure
3353  * that there is no race in the current context.
3354  */
3355 static int
3356 ena_copy_eni_metrics(struct ena_adapter *adapter)
3357 {
3358 	static bool print_once = true;
3359 	int rc;
3360 
3361 	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3362 
3363 	if (rc != 0) {
3364 		if (rc == ENA_COM_UNSUPPORTED) {
3365 			if (print_once) {
3366 				ena_log(adapter->pdev, WARN,
3367 				    "Retrieving ENI metrics is not supported.\n");
3368 				print_once = false;
3369 			} else {
3370 				ena_log(adapter->pdev, DBG,
3371 				    "Retrieving ENI metrics is not supported.\n");
3372 			}
3373 		} else {
3374 			ena_log(adapter->pdev, ERR,
3375 			    "Failed to get ENI metrics: %d\n", rc);
3376 		}
3377 	}
3378 
3379 	return (rc);
3380 }
3381 
3382 static int
3383 ena_copy_srd_metrics(struct ena_adapter *adapter)
3384 {
3385 	return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info);
3386 }
3387 
3388 static int
3389 ena_copy_customer_metrics(struct ena_adapter *adapter)
3390 {
3391 	struct ena_com_dev *dev;
3392 	u32 supported_metrics_count;
3393 	int rc, len;
3394 
3395 	dev = adapter->ena_dev;
3396 
3397 	supported_metrics_count = ena_com_get_customer_metric_count(dev);
3398 	len = supported_metrics_count * sizeof(u64);
3399 
3400 	/* Fill the data buffer */
3401 	rc = ena_com_get_customer_metrics(adapter->ena_dev,
3402 	    (char *)(adapter->customer_metrics_array), len);
3403 
3404 	return (rc);
3405 }
3406 
3407 static void
3408 ena_timer_service(void *data)
3409 {
3410 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3411 	struct ena_admin_host_info *host_info =
3412 	    adapter->ena_dev->host_attr.host_info;
3413 
3414 	check_for_missing_keep_alive(adapter);
3415 
3416 	check_for_admin_com_state(adapter);
3417 
3418 	check_for_missing_completions(adapter);
3419 
3420 	check_for_empty_rx_ring(adapter);
3421 
3422 	/*
3423 	 * User controller update of the ENA metrics.
3424 	 * If the delay was set to 0, then the stats shouldn't be updated at
3425 	 * all.
3426 	 * Otherwise, wait 'metrics_sample_interval' seconds, before
3427 	 * updating stats.
3428 	 * As timer service is executed every second, it's enough to increment
3429 	 * appropriate counter each time the timer service is executed.
3430 	 */
3431 	if ((adapter->metrics_sample_interval != 0) &&
3432 	    (++adapter->metrics_sample_interval_cnt >=
3433 	    adapter->metrics_sample_interval)) {
3434 		taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
3435 		adapter->metrics_sample_interval_cnt = 0;
3436 	}
3437 
3438 
3439 	if (host_info != NULL)
3440 		ena_update_host_info(host_info, adapter->ifp);
3441 
3442 	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3443 		/*
3444 		 * Timeout when validating version indicates that the device
3445 		 * became unresponsive. If that happens skip the reset and
3446 		 * reschedule timer service, so the reset can be retried later.
3447 		 */
3448 		if (ena_com_validate_version(adapter->ena_dev) ==
3449 		    ENA_COM_TIMER_EXPIRED) {
3450 			ena_log(adapter->pdev, WARN,
3451 			    "FW unresponsive, skipping reset\n");
3452 			ENA_TIMER_RESET(adapter);
3453 			return;
3454 		}
3455 		ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
3456 		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3457 		return;
3458 	}
3459 
3460 	/*
3461 	 * Schedule another timeout one second from now.
3462 	 */
3463 	ENA_TIMER_RESET(adapter);
3464 }
3465 
3466 void
3467 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3468 {
3469 	if_t ifp = adapter->ifp;
3470 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3471 	bool dev_up;
3472 
3473 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3474 		return;
3475 
3476 	if (!graceful)
3477 		if_link_state_change(ifp, LINK_STATE_DOWN);
3478 
3479 	ENA_TIMER_DRAIN(adapter);
3480 
3481 	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3482 	if (dev_up)
3483 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3484 
3485 	if (!graceful)
3486 		ena_com_set_admin_running_state(ena_dev, false);
3487 
3488 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3489 		ena_down(adapter);
3490 
3491 	/*
3492 	 * Stop the device from sending AENQ events (if the device was up, and
3493 	 * the trigger reset was on, ena_down already performs device reset)
3494 	 */
3495 	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3496 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3497 
3498 	ena_free_mgmnt_irq(adapter);
3499 
3500 	ena_disable_msix(adapter);
3501 
3502 	/*
3503 	 * IO rings resources should be freed because `ena_restore_device()`
3504 	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3505 	 * vectors. The amount of MSIX vectors after destroy-restore may be
3506 	 * different than before. Therefore, IO rings resources should be
3507 	 * established from scratch each time.
3508 	 */
3509 	ena_free_all_io_rings_resources(adapter);
3510 
3511 	ena_com_abort_admin_commands(ena_dev);
3512 
3513 	ena_com_wait_for_abort_completion(ena_dev);
3514 
3515 	ena_com_admin_destroy(ena_dev);
3516 
3517 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3518 
3519 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3520 
3521 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3522 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3523 }
3524 
3525 static int
3526 ena_device_validate_params(struct ena_adapter *adapter,
3527     struct ena_com_dev_get_features_ctx *get_feat_ctx)
3528 {
3529 	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3530 	    ETHER_ADDR_LEN) != 0) {
3531 		ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
3532 		return (EINVAL);
3533 	}
3534 
3535 	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3536 		ena_log(adapter->pdev, ERR,
3537 		    "Error, device max mtu is smaller than ifp MTU\n");
3538 		return (EINVAL);
3539 	}
3540 
3541 	return 0;
3542 }
3543 
3544 int
3545 ena_restore_device(struct ena_adapter *adapter)
3546 {
3547 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3548 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3549 	if_t ifp = adapter->ifp;
3550 	device_t dev = adapter->pdev;
3551 	int wd_active;
3552 	int rc;
3553 
3554 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3555 
3556 	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3557 	if (rc != 0) {
3558 		ena_log(dev, ERR, "Cannot initialize device\n");
3559 		goto err;
3560 	}
3561 	/*
3562 	 * Only enable WD if it was enabled before reset, so it won't override
3563 	 * value set by the user by the sysctl.
3564 	 */
3565 	if (adapter->wd_active != 0)
3566 		adapter->wd_active = wd_active;
3567 
3568 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3569 	if (rc != 0) {
3570 		ena_log(dev, ERR, "Validation of device parameters failed\n");
3571 		goto err_device_destroy;
3572 	}
3573 
3574 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3575 	/* Make sure we don't have a race with AENQ Links state handler */
3576 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3577 		if_link_state_change(ifp, LINK_STATE_UP);
3578 
3579 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3580 	if (rc != 0) {
3581 		ena_log(dev, ERR, "Enable MSI-X failed\n");
3582 		goto err_device_destroy;
3583 	}
3584 
3585 	/*
3586 	 * Effective value of used MSIX vectors should be the same as before
3587 	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3588 	 * are available.
3589 	 */
3590 	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3591 		adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3592 
3593 	/* Re-initialize rings basic information */
3594 	ena_init_io_rings(adapter);
3595 
3596 	/* If the interface was up before the reset bring it up */
3597 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3598 		rc = ena_up(adapter);
3599 		if (rc != 0) {
3600 			ena_log(dev, ERR, "Failed to create I/O queues\n");
3601 			goto err_disable_msix;
3602 		}
3603 	}
3604 
3605 	/* Indicate that device is running again and ready to work */
3606 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3607 
3608 	/*
3609 	 * As the AENQ handlers weren't executed during reset because
3610 	 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3611 	 * timestamp must be updated again That will prevent next reset
3612 	 * caused by missing keep alive.
3613 	 */
3614 	adapter->keep_alive_timestamp = getsbinuptime();
3615 	ENA_TIMER_RESET(adapter);
3616 
3617 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3618 
3619 	return (rc);
3620 
3621 err_disable_msix:
3622 	ena_free_mgmnt_irq(adapter);
3623 	ena_disable_msix(adapter);
3624 err_device_destroy:
3625 	ena_com_abort_admin_commands(ena_dev);
3626 	ena_com_wait_for_abort_completion(ena_dev);
3627 	ena_com_admin_destroy(ena_dev);
3628 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3629 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3630 err:
3631 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3632 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3633 	ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
3634 
3635 	return (rc);
3636 }
3637 
3638 static void
3639 ena_metrics_task(void *arg, int pending)
3640 {
3641 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3642 
3643 	ENA_LOCK_LOCK();
3644 
3645 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
3646 		(void)ena_copy_customer_metrics(adapter);
3647 	else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS))
3648 		(void)ena_copy_eni_metrics(adapter);
3649 
3650 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
3651 		(void)ena_copy_srd_metrics(adapter);
3652 
3653 	ENA_LOCK_UNLOCK();
3654 }
3655 
3656 static void
3657 ena_reset_task(void *arg, int pending)
3658 {
3659 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3660 
3661 	ENA_LOCK_LOCK();
3662 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3663 		ena_increment_reset_counter(adapter);
3664 		ena_destroy_device(adapter, false);
3665 		ena_restore_device(adapter);
3666 
3667 		ena_log(adapter->pdev, INFO,
3668 		    "Device reset completed successfully, Driver info: %s\n",
3669 		    ena_version);
3670 	}
3671 	ENA_LOCK_UNLOCK();
3672 }
3673 
3674 static void
3675 ena_free_stats(struct ena_adapter *adapter)
3676 {
3677 	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3678 	    sizeof(struct ena_hw_stats));
3679 	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3680 	    sizeof(struct ena_stats_dev));
3681 
3682 }
3683 /**
3684  * ena_attach - Device Initialization Routine
3685  * @pdev: device information struct
3686  *
3687  * Returns 0 on success, otherwise on failure.
3688  *
3689  * ena_attach initializes an adapter identified by a device structure.
3690  * The OS initialization, configuring of the adapter private structure,
3691  * and a hardware reset occur.
3692  **/
3693 static int
3694 ena_attach(device_t pdev)
3695 {
3696 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3697 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3698 	static int version_printed;
3699 	struct ena_adapter *adapter;
3700 	struct ena_com_dev *ena_dev = NULL;
3701 	uint32_t max_num_io_queues;
3702 	int msix_rid;
3703 	int rid, rc;
3704 
3705 	adapter = device_get_softc(pdev);
3706 	adapter->pdev = pdev;
3707 	adapter->first_bind = -1;
3708 
3709 	/*
3710 	 * Set up the timer service - driver is responsible for avoiding
3711 	 * concurrency, as the callout won't be using any locking inside.
3712 	 */
3713 	ENA_TIMER_INIT(adapter);
3714 	adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
3715 	adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
3716 	adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
3717 	adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
3718 
3719 	adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED;
3720 	adapter->irq_cpu_stride = 0;
3721 
3722 #ifdef RSS
3723 	adapter->rss_enabled = 1;
3724 #endif
3725 
3726 	if (version_printed++ == 0)
3727 		ena_log(pdev, INFO, "%s\n", ena_version);
3728 
3729 	/* Allocate memory for ena_dev structure */
3730 	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3731 	    M_WAITOK | M_ZERO);
3732 
3733 	adapter->ena_dev = ena_dev;
3734 	ena_dev->dmadev = pdev;
3735 
3736 	rid = PCIR_BAR(ENA_REG_BAR);
3737 	adapter->memory = NULL;
3738 	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
3739 	    RF_ACTIVE);
3740 	if (unlikely(adapter->registers == NULL)) {
3741 		ena_log(pdev, ERR,
3742 		    "unable to allocate bus resource: registers!\n");
3743 		rc = ENOMEM;
3744 		goto err_dev_free;
3745 	}
3746 
3747 	/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
3748 	msix_rid = pci_msix_table_bar(pdev);
3749 	if (msix_rid != rid) {
3750 		adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3751 		    &msix_rid, RF_ACTIVE);
3752 		if (unlikely(adapter->msix == NULL)) {
3753 			ena_log(pdev, ERR,
3754 			    "unable to allocate bus resource: msix!\n");
3755 			rc = ENOMEM;
3756 			goto err_pci_free;
3757 		}
3758 		adapter->msix_rid = msix_rid;
3759 	}
3760 
3761 	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3762 	    M_WAITOK | M_ZERO);
3763 
3764 	/* Store register resources */
3765 	((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
3766 	    adapter->registers);
3767 	((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
3768 	    adapter->registers);
3769 
3770 	if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
3771 		ena_log(pdev, ERR, "failed to pmap registers bar\n");
3772 		rc = ENXIO;
3773 		goto err_bus_free;
3774 	}
3775 
3776 	rc = ena_map_llq_mem_bar(pdev, ena_dev);
3777 	if (unlikely(rc != 0)) {
3778 		ena_log(pdev, ERR, "Failed to map ENA mem bar");
3779 		goto err_bus_free;
3780 	}
3781 
3782 	ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
3783 
3784 	/* Initially clear all the flags */
3785 	ENA_FLAG_ZERO(adapter);
3786 
3787 	/* Device initialization */
3788 	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3789 	if (unlikely(rc != 0)) {
3790 		ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
3791 		rc = ENXIO;
3792 		goto err_bus_free;
3793 	}
3794 
3795 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3796 		adapter->disable_meta_caching = !!(
3797 		    get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3798 		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
3799 
3800 	adapter->keep_alive_timestamp = getsbinuptime();
3801 
3802 	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3803 
3804 	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3805 	    ETHER_ADDR_LEN);
3806 
3807 	calc_queue_ctx.pdev = pdev;
3808 	calc_queue_ctx.ena_dev = ena_dev;
3809 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3810 
3811 	/* Calculate initial and maximum IO queue number and size */
3812 	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3813 	    &get_feat_ctx);
3814 	rc = ena_calc_io_queue_size(&calc_queue_ctx);
3815 	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3816 		rc = EFAULT;
3817 		goto err_com_free;
3818 	}
3819 
3820 	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3821 	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3822 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3823 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3824 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3825 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3826 
3827 	adapter->max_num_io_queues = max_num_io_queues;
3828 
3829 	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3830 
3831 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3832 
3833 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3834 
3835 	/* set up dma tags for rx and tx buffers */
3836 	rc = ena_setup_tx_dma_tag(adapter);
3837 	if (unlikely(rc != 0)) {
3838 		ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
3839 		goto err_com_free;
3840 	}
3841 
3842 	rc = ena_setup_rx_dma_tag(adapter);
3843 	if (unlikely(rc != 0)) {
3844 		ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
3845 		goto err_tx_tag_free;
3846 	}
3847 
3848 	/*
3849 	 * The amount of requested MSIX vectors is equal to
3850 	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3851 	 * number of admin queue interrupts. The former is initially determined
3852 	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3853 	 * achieved if there are not enough system resources. By default, the
3854 	 * number of effectively used IO queues is the same but later on it can
3855 	 * be limited by the user using sysctl interface.
3856 	 */
3857 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3858 	if (unlikely(rc != 0)) {
3859 		ena_log(pdev, ERR,
3860 		    "Failed to enable and set the admin interrupts\n");
3861 		goto err_io_free;
3862 	}
3863 	/* By default all of allocated MSIX vectors are actively used */
3864 	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3865 
3866 	/* initialize rings basic information */
3867 	ena_init_io_rings(adapter);
3868 
3869 	rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
3870 	if (rc) {
3871 		ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n");
3872 		goto err_msix_free;
3873 	}
3874 
3875 	rc = ena_sysctl_allocate_customer_metrics_buffer(adapter);
3876 	if (unlikely(rc)){
3877 		ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n");
3878 		goto err_metrics_buffer_destroy;
3879 	}
3880 
3881 	/* Initialize statistics */
3882 	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3883 	    sizeof(struct ena_stats_dev));
3884 	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3885 	    sizeof(struct ena_hw_stats));
3886 	ena_sysctl_add_nodes(adapter);
3887 
3888 	/* setup network interface */
3889 	ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3890 
3891 	/* Initialize reset task queue */
3892 	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3893 	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3894 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3895 	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
3896 	    device_get_nameunit(adapter->pdev));
3897 
3898 	/* Initialize metrics task queue */
3899 	TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
3900 	adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
3901 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
3902 	taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
3903 	    device_get_nameunit(adapter->pdev));
3904 
3905 #ifdef DEV_NETMAP
3906 	rc = ena_netmap_attach(adapter);
3907 	if (rc != 0) {
3908 		ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
3909 		goto err_detach;
3910 	}
3911 #endif /* DEV_NETMAP */
3912 
3913 	/* Tell the stack that the interface is not active */
3914 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3915 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3916 
3917 	/* Run the timer service */
3918 	ENA_TIMER_RESET(adapter);
3919 
3920 	return (0);
3921 
3922 #ifdef DEV_NETMAP
3923 err_detach:
3924 	ether_ifdetach(adapter->ifp);
3925 	free(adapter->customer_metrics_array, M_DEVBUF);
3926 #endif /* DEV_NETMAP */
3927 err_metrics_buffer_destroy:
3928 	ena_com_delete_customer_metrics_buffer(ena_dev);
3929 err_msix_free:
3930 	ena_free_stats(adapter);
3931 	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3932 	ena_free_mgmnt_irq(adapter);
3933 	ena_disable_msix(adapter);
3934 err_io_free:
3935 	ena_free_all_io_rings_resources(adapter);
3936 	ena_free_rx_dma_tag(adapter);
3937 err_tx_tag_free:
3938 	ena_free_tx_dma_tag(adapter);
3939 err_com_free:
3940 	ena_com_admin_destroy(ena_dev);
3941 	ena_com_delete_host_info(ena_dev);
3942 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3943 err_bus_free:
3944 	free(ena_dev->bus, M_DEVBUF);
3945 err_pci_free:
3946 	ena_free_pci_resources(adapter);
3947 err_dev_free:
3948 	free(ena_dev, M_DEVBUF);
3949 
3950 	return (rc);
3951 }
3952 
3953 /**
3954  * ena_detach - Device Removal Routine
3955  * @pdev: device information struct
3956  *
3957  * ena_detach is called by the device subsystem to alert the driver
3958  * that it should release a PCI device.
3959  **/
3960 static int
3961 ena_detach(device_t pdev)
3962 {
3963 	struct ena_adapter *adapter = device_get_softc(pdev);
3964 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3965 	int rc;
3966 
3967 	/* Make sure VLANS are not using driver */
3968 	if (if_vlantrunkinuse(adapter->ifp)) {
3969 		ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
3970 		return (EBUSY);
3971 	}
3972 
3973 	ether_ifdetach(adapter->ifp);
3974 
3975 	/* Stop timer service */
3976 	ENA_LOCK_LOCK();
3977 	ENA_TIMER_DRAIN(adapter);
3978 	ENA_LOCK_UNLOCK();
3979 
3980 	/* Release metrics task */
3981 	while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
3982 		taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
3983 	taskqueue_free(adapter->metrics_tq);
3984 
3985 	/* Release reset task */
3986 	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3987 		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3988 	taskqueue_free(adapter->reset_tq);
3989 
3990 	ENA_LOCK_LOCK();
3991 	ena_down(adapter);
3992 	ena_destroy_device(adapter, true);
3993 	ENA_LOCK_UNLOCK();
3994 
3995 	/* Restore unregistered sysctl queue nodes. */
3996 	ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
3997 	    adapter->max_num_io_queues);
3998 
3999 #ifdef DEV_NETMAP
4000 	netmap_detach(adapter->ifp);
4001 #endif /* DEV_NETMAP */
4002 
4003 	ena_free_stats(adapter);
4004 
4005 	rc = ena_free_rx_dma_tag(adapter);
4006 	if (unlikely(rc != 0))
4007 		ena_log(adapter->pdev, WARN,
4008 		    "Unmapped RX DMA tag associations\n");
4009 
4010 	rc = ena_free_tx_dma_tag(adapter);
4011 	if (unlikely(rc != 0))
4012 		ena_log(adapter->pdev, WARN,
4013 		    "Unmapped TX DMA tag associations\n");
4014 
4015 	ena_free_irqs(adapter);
4016 
4017 	ena_free_pci_resources(adapter);
4018 
4019 	if (adapter->rss_indir != NULL)
4020 		free(adapter->rss_indir, M_DEVBUF);
4021 
4022 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
4023 		ena_com_rss_destroy(ena_dev);
4024 
4025 	ena_com_delete_host_info(ena_dev);
4026 
4027 	free(adapter->customer_metrics_array, M_DEVBUF);
4028 
4029 	ena_com_delete_customer_metrics_buffer(ena_dev);
4030 
4031 	if_free(adapter->ifp);
4032 
4033 	free(ena_dev->bus, M_DEVBUF);
4034 
4035 	free(ena_dev, M_DEVBUF);
4036 
4037 	return (bus_generic_detach(pdev));
4038 }
4039 
4040 /******************************************************************************
4041  ******************************** AENQ Handlers *******************************
4042  *****************************************************************************/
4043 /**
4044  * ena_update_on_link_change:
4045  * Notify the network interface about the change in link status
4046  **/
4047 static void
4048 ena_update_on_link_change(void *adapter_data,
4049     struct ena_admin_aenq_entry *aenq_e)
4050 {
4051 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4052 	struct ena_admin_aenq_link_change_desc *aenq_desc;
4053 	int status;
4054 	if_t ifp;
4055 
4056 	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
4057 	ifp = adapter->ifp;
4058 	status = aenq_desc->flags &
4059 	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4060 
4061 	if (status != 0) {
4062 		ena_log(adapter->pdev, INFO, "link is UP\n");
4063 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
4064 		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
4065 			if_link_state_change(ifp, LINK_STATE_UP);
4066 	} else {
4067 		ena_log(adapter->pdev, INFO, "link is DOWN\n");
4068 		if_link_state_change(ifp, LINK_STATE_DOWN);
4069 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
4070 	}
4071 }
4072 
4073 static void
4074 ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
4075 {
4076 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4077 	struct ena_admin_ena_hw_hints *hints;
4078 
4079 	ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4080 	    adapter->ena_dev, "Invalid group(%x) expected %x\n",
4081 	    aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
4082 
4083 	switch (aenq_e->aenq_common_desc.syndrome) {
4084 	case ENA_ADMIN_UPDATE_HINTS:
4085 		hints =
4086 		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
4087 		ena_update_hints(adapter, hints);
4088 		break;
4089 	default:
4090 		ena_log(adapter->pdev, ERR,
4091 		    "Invalid aenq notification link state %d\n",
4092 		    aenq_e->aenq_common_desc.syndrome);
4093 	}
4094 }
4095 
4096 static void
4097 ena_lock_init(void *arg)
4098 {
4099 	ENA_LOCK_INIT();
4100 }
4101 SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
4102 
4103 static void
4104 ena_lock_uninit(void *arg)
4105 {
4106 	ENA_LOCK_DESTROY();
4107 }
4108 SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
4109 
4110 /**
4111  * This handler will called for unknown event group or unimplemented handlers
4112  **/
4113 static void
4114 unimplemented_aenq_handler(void *adapter_data,
4115     struct ena_admin_aenq_entry *aenq_e)
4116 {
4117 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4118 
4119 	ena_log(adapter->pdev, ERR,
4120 	    "Unknown event was received or event with unimplemented handler\n");
4121 }
4122 
4123 static void ena_conf_notification(void *adapter_data,
4124     struct ena_admin_aenq_entry *aenq_e)
4125 {
4126 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4127 	struct ena_admin_aenq_conf_notifications_desc *desc;
4128 	u64 bitmap, bit;
4129 
4130 	desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e;
4131 	bitmap = desc->notifications_bitmap;
4132 
4133 	if (bitmap == 0) {
4134 		ena_log(adapter->pdev, INFO,
4135 		    "Empty configuration notification bitmap\n");
4136 		return;
4137 	}
4138 
4139 	for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) {
4140 		bit--;
4141 		ena_log(adapter->pdev, INFO,
4142 		    "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n",
4143 		    bit + 1);
4144 		// Clear the processed bit
4145 		bitmap &= ~(1UL << bit);
4146 	}
4147 }
4148 
4149 static struct ena_aenq_handlers aenq_handlers = {
4150     .handlers = {
4151 	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4152 	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
4153 	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4154 	    [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification,
4155     },
4156     .unimplemented_handler = unimplemented_aenq_handler
4157 };
4158 
4159 /*********************************************************************
4160  *  FreeBSD Device Interface Entry Points
4161  *********************************************************************/
4162 
4163 static device_method_t ena_methods[] = { /* Device interface */
4164 	DEVMETHOD(device_probe, ena_probe),
4165 	DEVMETHOD(device_attach, ena_attach),
4166 	DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END
4167 };
4168 
4169 static driver_t ena_driver = {
4170 	"ena",
4171 	ena_methods,
4172 	sizeof(struct ena_adapter),
4173 };
4174 
4175 DRIVER_MODULE(ena, pci, ena_driver, 0, 0);
4176 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
4177     nitems(ena_vendor_info_array) - 1);
4178 MODULE_DEPEND(ena, pci, 1, 1, 1);
4179 MODULE_DEPEND(ena, ether, 1, 1, 1);
4180 #ifdef DEV_NETMAP
4181 MODULE_DEPEND(ena, netmap, 1, 1, 1);
4182 #endif /* DEV_NETMAP */
4183 
4184 /*********************************************************************/
4185