xref: /freebsd/sys/dev/ena/ena.c (revision 99282790b7d01ec3c4072621d46a0d7302517ad4)
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48 #include <sys/time.h>
49 #include <sys/eventhandler.h>
50 
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <machine/in_cksum.h>
54 
55 #include <net/bpf.h>
56 #include <net/ethernet.h>
57 #include <net/if.h>
58 #include <net/if_var.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_types.h>
63 #include <net/if_vlan_var.h>
64 
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/if_ether.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70 #include <netinet/tcp.h>
71 #include <netinet/udp.h>
72 
73 #include <dev/pci/pcivar.h>
74 #include <dev/pci/pcireg.h>
75 
76 #include <vm/vm.h>
77 #include <vm/pmap.h>
78 
79 #include "ena_datapath.h"
80 #include "ena.h"
81 #include "ena_sysctl.h"
82 
83 #ifdef DEV_NETMAP
84 #include "ena_netmap.h"
85 #endif /* DEV_NETMAP */
86 
87 /*********************************************************
88  *  Function prototypes
89  *********************************************************/
90 static int	ena_probe(device_t);
91 static void	ena_intr_msix_mgmnt(void *);
92 static void	ena_free_pci_resources(struct ena_adapter *);
93 static int	ena_change_mtu(if_t, int);
94 static inline void ena_alloc_counters(counter_u64_t *, int);
95 static inline void ena_free_counters(counter_u64_t *, int);
96 static inline void ena_reset_counters(counter_u64_t *, int);
97 static void	ena_init_io_rings_common(struct ena_adapter *,
98     struct ena_ring *, uint16_t);
99 static void	ena_init_io_rings_basic(struct ena_adapter *);
100 static void	ena_init_io_rings_advanced(struct ena_adapter *);
101 static void	ena_init_io_rings(struct ena_adapter *);
102 static void	ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
103 static void	ena_free_all_io_rings_resources(struct ena_adapter *);
104 static int	ena_setup_tx_dma_tag(struct ena_adapter *);
105 static int	ena_free_tx_dma_tag(struct ena_adapter *);
106 static int	ena_setup_rx_dma_tag(struct ena_adapter *);
107 static int	ena_free_rx_dma_tag(struct ena_adapter *);
108 static void	ena_release_all_tx_dmamap(struct ena_ring *);
109 static int	ena_setup_tx_resources(struct ena_adapter *, int);
110 static void	ena_free_tx_resources(struct ena_adapter *, int);
111 static int	ena_setup_all_tx_resources(struct ena_adapter *);
112 static void	ena_free_all_tx_resources(struct ena_adapter *);
113 static int	ena_setup_rx_resources(struct ena_adapter *, unsigned int);
114 static void	ena_free_rx_resources(struct ena_adapter *, unsigned int);
115 static int	ena_setup_all_rx_resources(struct ena_adapter *);
116 static void	ena_free_all_rx_resources(struct ena_adapter *);
117 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
118     struct ena_rx_buffer *);
119 static void	ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
120     struct ena_rx_buffer *);
121 static void	ena_free_rx_bufs(struct ena_adapter *, unsigned int);
122 static void	ena_refill_all_rx_bufs(struct ena_adapter *);
123 static void	ena_free_all_rx_bufs(struct ena_adapter *);
124 static void	ena_free_tx_bufs(struct ena_adapter *, unsigned int);
125 static void	ena_free_all_tx_bufs(struct ena_adapter *);
126 static void	ena_destroy_all_tx_queues(struct ena_adapter *);
127 static void	ena_destroy_all_rx_queues(struct ena_adapter *);
128 static void	ena_destroy_all_io_queues(struct ena_adapter *);
129 static int	ena_create_io_queues(struct ena_adapter *);
130 static int	ena_handle_msix(void *);
131 static int	ena_enable_msix(struct ena_adapter *);
132 static void	ena_setup_mgmnt_intr(struct ena_adapter *);
133 static int	ena_setup_io_intr(struct ena_adapter *);
134 static int	ena_request_mgmnt_irq(struct ena_adapter *);
135 static int	ena_request_io_irq(struct ena_adapter *);
136 static void	ena_free_mgmnt_irq(struct ena_adapter *);
137 static void	ena_free_io_irq(struct ena_adapter *);
138 static void	ena_free_irqs(struct ena_adapter*);
139 static void	ena_disable_msix(struct ena_adapter *);
140 static void	ena_unmask_all_io_irqs(struct ena_adapter *);
141 static int	ena_rss_configure(struct ena_adapter *);
142 static int	ena_up_complete(struct ena_adapter *);
143 static uint64_t	ena_get_counter(if_t, ift_counter);
144 static int	ena_media_change(if_t);
145 static void	ena_media_status(if_t, struct ifmediareq *);
146 static void	ena_init(void *);
147 static int	ena_ioctl(if_t, u_long, caddr_t);
148 static int	ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
149 static void	ena_update_host_info(struct ena_admin_host_info *, if_t);
150 static void	ena_update_hwassist(struct ena_adapter *);
151 static int	ena_setup_ifnet(device_t, struct ena_adapter *,
152     struct ena_com_dev_get_features_ctx *);
153 static int	ena_enable_wc(struct resource *);
154 static int	ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
155     struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
156 static uint32_t	ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
157     struct ena_com_dev_get_features_ctx *);
158 static int	ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
159 static int	ena_rss_init_default(struct ena_adapter *);
160 static void	ena_rss_init_default_deferred(void *);
161 static void	ena_config_host_info(struct ena_com_dev *, device_t);
162 static int	ena_attach(device_t);
163 static int	ena_detach(device_t);
164 static int	ena_device_init(struct ena_adapter *, device_t,
165     struct ena_com_dev_get_features_ctx *, int *);
166 static int	ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
167 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
168 static void	unimplemented_aenq_handler(void *,
169     struct ena_admin_aenq_entry *);
170 static void	ena_timer_service(void *);
171 
172 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
173 
174 static ena_vendor_info_t ena_vendor_info_array[] = {
175     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
176     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
177     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
178     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
179     /* Last entry */
180     { 0, 0, 0 }
181 };
182 
183 /*
184  * Contains pointers to event handlers, e.g. link state chage.
185  */
186 static struct ena_aenq_handlers aenq_handlers;
187 
188 void
189 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
190 {
191 	if (error != 0)
192 		return;
193 	*(bus_addr_t *) arg = segs[0].ds_addr;
194 }
195 
196 int
197 ena_dma_alloc(device_t dmadev, bus_size_t size,
198     ena_mem_handle_t *dma , int mapflags)
199 {
200 	struct ena_adapter* adapter = device_get_softc(dmadev);
201 	uint32_t maxsize;
202 	uint64_t dma_space_addr;
203 	int error;
204 
205 	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
206 
207 	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
208 	if (unlikely(dma_space_addr == 0))
209 		dma_space_addr = BUS_SPACE_MAXADDR;
210 
211 	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
212 	    8, 0,	      /* alignment, bounds 		*/
213 	    dma_space_addr,   /* lowaddr of exclusion window	*/
214 	    BUS_SPACE_MAXADDR,/* highaddr of exclusion window	*/
215 	    NULL, NULL,	      /* filter, filterarg 		*/
216 	    maxsize,	      /* maxsize 			*/
217 	    1,		      /* nsegments 			*/
218 	    maxsize,	      /* maxsegsize 			*/
219 	    BUS_DMA_ALLOCNOW, /* flags 				*/
220 	    NULL,	      /* lockfunc 			*/
221 	    NULL,	      /* lockarg 			*/
222 	    &dma->tag);
223 	if (unlikely(error != 0)) {
224 		ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
225 		goto fail_tag;
226 	}
227 
228 	error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
229 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
230 	if (unlikely(error != 0)) {
231 		ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
232 		    (uintmax_t)size, error);
233 		goto fail_map_create;
234 	}
235 
236 	dma->paddr = 0;
237 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
238 	    size, ena_dmamap_callback, &dma->paddr, mapflags);
239 	if (unlikely((error != 0) || (dma->paddr == 0))) {
240 		ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
241 		goto fail_map_load;
242 	}
243 
244 	bus_dmamap_sync(dma->tag, dma->map,
245 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
246 
247 	return (0);
248 
249 fail_map_load:
250 	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
251 fail_map_create:
252 	bus_dma_tag_destroy(dma->tag);
253 fail_tag:
254 	dma->tag = NULL;
255 	dma->vaddr = NULL;
256 	dma->paddr = 0;
257 
258 	return (error);
259 }
260 
261 /*
262  * This function should generate unique key for the whole driver.
263  * If the key was already genereated in the previous call (for example
264  * for another adapter), then it should be returned instead.
265  */
266 void
267 ena_rss_key_fill(void *key, size_t size)
268 {
269 	static bool key_generated;
270 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
271 
272 	KASSERT(size <= ENA_HASH_KEY_SIZE, ("Requested more bytes than ENA RSS key can hold"));
273 
274 	if (!key_generated) {
275 		arc4random_buf(default_key, ENA_HASH_KEY_SIZE);
276 		key_generated = true;
277 	}
278 
279 	memcpy(key, default_key, size);
280 }
281 
282 static void
283 ena_free_pci_resources(struct ena_adapter *adapter)
284 {
285 	device_t pdev = adapter->pdev;
286 
287 	if (adapter->memory != NULL) {
288 		bus_release_resource(pdev, SYS_RES_MEMORY,
289 		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
290 	}
291 
292 	if (adapter->registers != NULL) {
293 		bus_release_resource(pdev, SYS_RES_MEMORY,
294 		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
295 	}
296 }
297 
298 static int
299 ena_probe(device_t dev)
300 {
301 	ena_vendor_info_t *ent;
302 	char		adapter_name[60];
303 	uint16_t	pci_vendor_id = 0;
304 	uint16_t	pci_device_id = 0;
305 
306 	pci_vendor_id = pci_get_vendor(dev);
307 	pci_device_id = pci_get_device(dev);
308 
309 	ent = ena_vendor_info_array;
310 	while (ent->vendor_id != 0) {
311 		if ((pci_vendor_id == ent->vendor_id) &&
312 		    (pci_device_id == ent->device_id)) {
313 			ena_trace(ENA_DBG, "vendor=%x device=%x\n",
314 			    pci_vendor_id, pci_device_id);
315 
316 			sprintf(adapter_name, DEVICE_DESC);
317 			device_set_desc_copy(dev, adapter_name);
318 			return (BUS_PROBE_DEFAULT);
319 		}
320 
321 		ent++;
322 
323 	}
324 
325 	return (ENXIO);
326 }
327 
328 static int
329 ena_change_mtu(if_t ifp, int new_mtu)
330 {
331 	struct ena_adapter *adapter = if_getsoftc(ifp);
332 	int rc;
333 
334 	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
335 		device_printf(adapter->pdev, "Invalid MTU setting. "
336 		    "new_mtu: %d max mtu: %d min mtu: %d\n",
337 		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
338 		return (EINVAL);
339 	}
340 
341 	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
342 	if (likely(rc == 0)) {
343 		ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu);
344 		if_setmtu(ifp, new_mtu);
345 	} else {
346 		device_printf(adapter->pdev, "Failed to set MTU to %d\n",
347 		    new_mtu);
348 	}
349 
350 	return (rc);
351 }
352 
353 static inline void
354 ena_alloc_counters(counter_u64_t *begin, int size)
355 {
356 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
357 
358 	for (; begin < end; ++begin)
359 		*begin = counter_u64_alloc(M_WAITOK);
360 }
361 
362 static inline void
363 ena_free_counters(counter_u64_t *begin, int size)
364 {
365 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
366 
367 	for (; begin < end; ++begin)
368 		counter_u64_free(*begin);
369 }
370 
371 static inline void
372 ena_reset_counters(counter_u64_t *begin, int size)
373 {
374 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
375 
376 	for (; begin < end; ++begin)
377 		counter_u64_zero(*begin);
378 }
379 
380 static void
381 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
382     uint16_t qid)
383 {
384 
385 	ring->qid = qid;
386 	ring->adapter = adapter;
387 	ring->ena_dev = adapter->ena_dev;
388 	ring->first_interrupt = false;
389 	ring->no_interrupt_event_cnt = 0;
390 }
391 
392 static void
393 ena_init_io_rings_basic(struct ena_adapter *adapter)
394 {
395 	struct ena_com_dev *ena_dev;
396 	struct ena_ring *txr, *rxr;
397 	struct ena_que *que;
398 	int i;
399 
400 	ena_dev = adapter->ena_dev;
401 
402 	for (i = 0; i < adapter->num_io_queues; i++) {
403 		txr = &adapter->tx_ring[i];
404 		rxr = &adapter->rx_ring[i];
405 
406 		/* TX/RX common ring state */
407 		ena_init_io_rings_common(adapter, txr, i);
408 		ena_init_io_rings_common(adapter, rxr, i);
409 
410 		/* TX specific ring state */
411 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
412 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
413 
414 		que = &adapter->que[i];
415 		que->adapter = adapter;
416 		que->id = i;
417 		que->tx_ring = txr;
418 		que->rx_ring = rxr;
419 
420 		txr->que = que;
421 		rxr->que = que;
422 
423 		rxr->empty_rx_queue = 0;
424 		rxr->rx_mbuf_sz = ena_mbuf_sz;
425 	}
426 }
427 
428 static void
429 ena_init_io_rings_advanced(struct ena_adapter *adapter)
430 {
431 	struct ena_ring *txr, *rxr;
432 	int i;
433 
434 	for (i = 0; i < adapter->num_io_queues; i++) {
435 		txr = &adapter->tx_ring[i];
436 		rxr = &adapter->rx_ring[i];
437 
438 		/* Allocate a buf ring */
439 		txr->buf_ring_size = adapter->buf_ring_size;
440 		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
441 		    M_WAITOK, &txr->ring_mtx);
442 
443 		/* Allocate Tx statistics. */
444 		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
445 		    sizeof(txr->tx_stats));
446 
447 		/* Allocate Rx statistics. */
448 		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
449 		    sizeof(rxr->rx_stats));
450 
451 		/* Initialize locks */
452 		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
453 		    device_get_nameunit(adapter->pdev), i);
454 		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
455 		    device_get_nameunit(adapter->pdev), i);
456 
457 		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
458 	}
459 }
460 
461 static void
462 ena_init_io_rings(struct ena_adapter *adapter)
463 {
464 	/*
465 	 * IO rings initialization can be divided into the 2 steps:
466 	 *   1. Initialize variables and fields with initial values and copy
467 	 *      them from adapter/ena_dev (basic)
468 	 *   2. Allocate mutex, counters and buf_ring (advanced)
469 	 */
470 	ena_init_io_rings_basic(adapter);
471 	ena_init_io_rings_advanced(adapter);
472 }
473 
474 static void
475 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
476 {
477 	struct ena_ring *txr = &adapter->tx_ring[qid];
478 	struct ena_ring *rxr = &adapter->rx_ring[qid];
479 
480 	ena_free_counters((counter_u64_t *)&txr->tx_stats,
481 	    sizeof(txr->tx_stats));
482 	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
483 	    sizeof(rxr->rx_stats));
484 
485 	ENA_RING_MTX_LOCK(txr);
486 	drbr_free(txr->br, M_DEVBUF);
487 	ENA_RING_MTX_UNLOCK(txr);
488 
489 	mtx_destroy(&txr->ring_mtx);
490 }
491 
492 static void
493 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
494 {
495 	int i;
496 
497 	for (i = 0; i < adapter->num_io_queues; i++)
498 		ena_free_io_ring_resources(adapter, i);
499 
500 }
501 
502 static int
503 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
504 {
505 	int ret;
506 
507 	/* Create DMA tag for Tx buffers */
508 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
509 	    1, 0,				  /* alignment, bounds 	     */
510 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
511 	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
512 	    NULL, NULL,				  /* filter, filterarg 	     */
513 	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
514 	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
515 	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
516 	    0,					  /* flags 		     */
517 	    NULL,				  /* lockfunc 		     */
518 	    NULL,				  /* lockfuncarg 	     */
519 	    &adapter->tx_buf_tag);
520 
521 	return (ret);
522 }
523 
524 static int
525 ena_free_tx_dma_tag(struct ena_adapter *adapter)
526 {
527 	int ret;
528 
529 	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
530 
531 	if (likely(ret == 0))
532 		adapter->tx_buf_tag = NULL;
533 
534 	return (ret);
535 }
536 
537 static int
538 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
539 {
540 	int ret;
541 
542 	/* Create DMA tag for Rx buffers*/
543 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
544 	    1, 0,				  /* alignment, bounds 	     */
545 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
546 	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
547 	    NULL, NULL,				  /* filter, filterarg 	     */
548 	    ena_mbuf_sz,			  /* maxsize 		     */
549 	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
550 	    ena_mbuf_sz,			  /* maxsegsize 	     */
551 	    0,					  /* flags 		     */
552 	    NULL,				  /* lockfunc 		     */
553 	    NULL,				  /* lockarg 		     */
554 	    &adapter->rx_buf_tag);
555 
556 	return (ret);
557 }
558 
559 static int
560 ena_free_rx_dma_tag(struct ena_adapter *adapter)
561 {
562 	int ret;
563 
564 	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
565 
566 	if (likely(ret == 0))
567 		adapter->rx_buf_tag = NULL;
568 
569 	return (ret);
570 }
571 
572 static void
573 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
574 {
575 	struct ena_adapter *adapter = tx_ring->adapter;
576 	struct ena_tx_buffer *tx_info;
577 	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
578 	int i;
579 #ifdef DEV_NETMAP
580 	struct ena_netmap_tx_info *nm_info;
581 	int j;
582 #endif /* DEV_NETMAP */
583 
584 	for (i = 0; i < tx_ring->ring_size; ++i) {
585 		tx_info = &tx_ring->tx_buffer_info[i];
586 #ifdef DEV_NETMAP
587 		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
588 			nm_info = &tx_info->nm_info;
589 			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
590 				if (nm_info->map_seg[j] != NULL) {
591 					bus_dmamap_destroy(tx_tag,
592 					    nm_info->map_seg[j]);
593 					nm_info->map_seg[j] = NULL;
594 				}
595 			}
596 		}
597 #endif /* DEV_NETMAP */
598 		if (tx_info->dmamap != NULL) {
599 			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
600 			tx_info->dmamap = NULL;
601 		}
602 	}
603 }
604 
605 /**
606  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
607  * @adapter: network interface device structure
608  * @qid: queue index
609  *
610  * Returns 0 on success, otherwise on failure.
611  **/
612 static int
613 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
614 {
615 	struct ena_que *que = &adapter->que[qid];
616 	struct ena_ring *tx_ring = que->tx_ring;
617 	int size, i, err;
618 #ifdef DEV_NETMAP
619 	bus_dmamap_t *map;
620 	int j;
621 
622 	ena_netmap_reset_tx_ring(adapter, qid);
623 #endif /* DEV_NETMAP */
624 
625 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
626 
627 	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
628 	if (unlikely(tx_ring->tx_buffer_info == NULL))
629 		return (ENOMEM);
630 
631 	size = sizeof(uint16_t) * tx_ring->ring_size;
632 	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
633 	if (unlikely(tx_ring->free_tx_ids == NULL))
634 		goto err_buf_info_free;
635 
636 	size = tx_ring->tx_max_header_size;
637 	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
638 	    M_NOWAIT | M_ZERO);
639 	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
640 		goto err_tx_ids_free;
641 
642 	/* Req id stack for TX OOO completions */
643 	for (i = 0; i < tx_ring->ring_size; i++)
644 		tx_ring->free_tx_ids[i] = i;
645 
646 	/* Reset TX statistics. */
647 	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
648 	    sizeof(tx_ring->tx_stats));
649 
650 	tx_ring->next_to_use = 0;
651 	tx_ring->next_to_clean = 0;
652 	tx_ring->acum_pkts = 0;
653 
654 	/* Make sure that drbr is empty */
655 	ENA_RING_MTX_LOCK(tx_ring);
656 	drbr_flush(adapter->ifp, tx_ring->br);
657 	ENA_RING_MTX_UNLOCK(tx_ring);
658 
659 	/* ... and create the buffer DMA maps */
660 	for (i = 0; i < tx_ring->ring_size; i++) {
661 		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
662 		    &tx_ring->tx_buffer_info[i].dmamap);
663 		if (unlikely(err != 0)) {
664 			ena_trace(ENA_ALERT,
665 			    "Unable to create Tx DMA map for buffer %d\n",
666 			    i);
667 			goto err_map_release;
668 		}
669 
670 #ifdef DEV_NETMAP
671 		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
672 			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
673 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
674 				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
675 				    &map[j]);
676 				if (unlikely(err != 0)) {
677 					ena_trace(ENA_ALERT, "Unable to create "
678 					    "Tx DMA for buffer %d %d\n", i, j);
679 					goto err_map_release;
680 				}
681 			}
682 		}
683 #endif /* DEV_NETMAP */
684 	}
685 
686 	/* Allocate taskqueues */
687 	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
688 	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
689 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
690 	if (unlikely(tx_ring->enqueue_tq == NULL)) {
691 		ena_trace(ENA_ALERT,
692 		    "Unable to create taskqueue for enqueue task\n");
693 		i = tx_ring->ring_size;
694 		goto err_map_release;
695 	}
696 
697 	tx_ring->running = true;
698 
699 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
700 	    "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
701 
702 	return (0);
703 
704 err_map_release:
705 	ena_release_all_tx_dmamap(tx_ring);
706 err_tx_ids_free:
707 	free(tx_ring->free_tx_ids, M_DEVBUF);
708 	tx_ring->free_tx_ids = NULL;
709 err_buf_info_free:
710 	free(tx_ring->tx_buffer_info, M_DEVBUF);
711 	tx_ring->tx_buffer_info = NULL;
712 
713 	return (ENOMEM);
714 }
715 
716 /**
717  * ena_free_tx_resources - Free Tx Resources per Queue
718  * @adapter: network interface device structure
719  * @qid: queue index
720  *
721  * Free all transmit software resources
722  **/
723 static void
724 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
725 {
726 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
727 #ifdef DEV_NETMAP
728 	struct ena_netmap_tx_info *nm_info;
729 	int j;
730 #endif /* DEV_NETMAP */
731 
732 	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
733 	    NULL))
734 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
735 
736 	taskqueue_free(tx_ring->enqueue_tq);
737 
738 	ENA_RING_MTX_LOCK(tx_ring);
739 	/* Flush buffer ring, */
740 	drbr_flush(adapter->ifp, tx_ring->br);
741 
742 	/* Free buffer DMA maps, */
743 	for (int i = 0; i < tx_ring->ring_size; i++) {
744 		bus_dmamap_sync(adapter->tx_buf_tag,
745 		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
746 		bus_dmamap_unload(adapter->tx_buf_tag,
747 		    tx_ring->tx_buffer_info[i].dmamap);
748 		bus_dmamap_destroy(adapter->tx_buf_tag,
749 		    tx_ring->tx_buffer_info[i].dmamap);
750 
751 #ifdef DEV_NETMAP
752 		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
753 			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
754 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
755 				if (nm_info->socket_buf_idx[j] != 0) {
756 					bus_dmamap_sync(adapter->tx_buf_tag,
757 					    nm_info->map_seg[j],
758 					    BUS_DMASYNC_POSTWRITE);
759 					ena_netmap_unload(adapter,
760 					    nm_info->map_seg[j]);
761 				}
762 				bus_dmamap_destroy(adapter->tx_buf_tag,
763 				    nm_info->map_seg[j]);
764 				nm_info->socket_buf_idx[j] = 0;
765 			}
766 		}
767 #endif /* DEV_NETMAP */
768 
769 		m_freem(tx_ring->tx_buffer_info[i].mbuf);
770 		tx_ring->tx_buffer_info[i].mbuf = NULL;
771 	}
772 	ENA_RING_MTX_UNLOCK(tx_ring);
773 
774 	/* And free allocated memory. */
775 	free(tx_ring->tx_buffer_info, M_DEVBUF);
776 	tx_ring->tx_buffer_info = NULL;
777 
778 	free(tx_ring->free_tx_ids, M_DEVBUF);
779 	tx_ring->free_tx_ids = NULL;
780 
781 	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
782 	tx_ring->push_buf_intermediate_buf = NULL;
783 }
784 
785 /**
786  * ena_setup_all_tx_resources - allocate all queues Tx resources
787  * @adapter: network interface device structure
788  *
789  * Returns 0 on success, otherwise on failure.
790  **/
791 static int
792 ena_setup_all_tx_resources(struct ena_adapter *adapter)
793 {
794 	int i, rc;
795 
796 	for (i = 0; i < adapter->num_io_queues; i++) {
797 		rc = ena_setup_tx_resources(adapter, i);
798 		if (rc != 0) {
799 			device_printf(adapter->pdev,
800 			    "Allocation for Tx Queue %u failed\n", i);
801 			goto err_setup_tx;
802 		}
803 	}
804 
805 	return (0);
806 
807 err_setup_tx:
808 	/* Rewind the index freeing the rings as we go */
809 	while (i--)
810 		ena_free_tx_resources(adapter, i);
811 	return (rc);
812 }
813 
814 /**
815  * ena_free_all_tx_resources - Free Tx Resources for All Queues
816  * @adapter: network interface device structure
817  *
818  * Free all transmit software resources
819  **/
820 static void
821 ena_free_all_tx_resources(struct ena_adapter *adapter)
822 {
823 	int i;
824 
825 	for (i = 0; i < adapter->num_io_queues; i++)
826 		ena_free_tx_resources(adapter, i);
827 }
828 
829 /**
830  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
831  * @adapter: network interface device structure
832  * @qid: queue index
833  *
834  * Returns 0 on success, otherwise on failure.
835  **/
836 static int
837 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
838 {
839 	struct ena_que *que = &adapter->que[qid];
840 	struct ena_ring *rx_ring = que->rx_ring;
841 	int size, err, i;
842 
843 	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
844 
845 #ifdef DEV_NETMAP
846 	ena_netmap_reset_rx_ring(adapter, qid);
847 	rx_ring->initialized = false;
848 #endif /* DEV_NETMAP */
849 
850 	/*
851 	 * Alloc extra element so in rx path
852 	 * we can always prefetch rx_info + 1
853 	 */
854 	size += sizeof(struct ena_rx_buffer);
855 
856 	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
857 
858 	size = sizeof(uint16_t) * rx_ring->ring_size;
859 	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
860 
861 	for (i = 0; i < rx_ring->ring_size; i++)
862 		rx_ring->free_rx_ids[i] = i;
863 
864 	/* Reset RX statistics. */
865 	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
866 	    sizeof(rx_ring->rx_stats));
867 
868 	rx_ring->next_to_clean = 0;
869 	rx_ring->next_to_use = 0;
870 
871 	/* ... and create the buffer DMA maps */
872 	for (i = 0; i < rx_ring->ring_size; i++) {
873 		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
874 		    &(rx_ring->rx_buffer_info[i].map));
875 		if (err != 0) {
876 			ena_trace(ENA_ALERT,
877 			    "Unable to create Rx DMA map for buffer %d\n", i);
878 			goto err_buf_info_unmap;
879 		}
880 	}
881 
882 	/* Create LRO for the ring */
883 	if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
884 		int err = tcp_lro_init(&rx_ring->lro);
885 		if (err != 0) {
886 			device_printf(adapter->pdev,
887 			    "LRO[%d] Initialization failed!\n", qid);
888 		} else {
889 			ena_trace(ENA_INFO,
890 			    "RX Soft LRO[%d] Initialized\n", qid);
891 			rx_ring->lro.ifp = adapter->ifp;
892 		}
893 	}
894 
895 	return (0);
896 
897 err_buf_info_unmap:
898 	while (i--) {
899 		bus_dmamap_destroy(adapter->rx_buf_tag,
900 		    rx_ring->rx_buffer_info[i].map);
901 	}
902 
903 	free(rx_ring->free_rx_ids, M_DEVBUF);
904 	rx_ring->free_rx_ids = NULL;
905 	free(rx_ring->rx_buffer_info, M_DEVBUF);
906 	rx_ring->rx_buffer_info = NULL;
907 	return (ENOMEM);
908 }
909 
910 /**
911  * ena_free_rx_resources - Free Rx Resources
912  * @adapter: network interface device structure
913  * @qid: queue index
914  *
915  * Free all receive software resources
916  **/
917 static void
918 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
919 {
920 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
921 
922 	/* Free buffer DMA maps, */
923 	for (int i = 0; i < rx_ring->ring_size; i++) {
924 		bus_dmamap_sync(adapter->rx_buf_tag,
925 		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
926 		m_freem(rx_ring->rx_buffer_info[i].mbuf);
927 		rx_ring->rx_buffer_info[i].mbuf = NULL;
928 		bus_dmamap_unload(adapter->rx_buf_tag,
929 		    rx_ring->rx_buffer_info[i].map);
930 		bus_dmamap_destroy(adapter->rx_buf_tag,
931 		    rx_ring->rx_buffer_info[i].map);
932 	}
933 
934 	/* free LRO resources, */
935 	tcp_lro_free(&rx_ring->lro);
936 
937 	/* free allocated memory */
938 	free(rx_ring->rx_buffer_info, M_DEVBUF);
939 	rx_ring->rx_buffer_info = NULL;
940 
941 	free(rx_ring->free_rx_ids, M_DEVBUF);
942 	rx_ring->free_rx_ids = NULL;
943 }
944 
945 /**
946  * ena_setup_all_rx_resources - allocate all queues Rx resources
947  * @adapter: network interface device structure
948  *
949  * Returns 0 on success, otherwise on failure.
950  **/
951 static int
952 ena_setup_all_rx_resources(struct ena_adapter *adapter)
953 {
954 	int i, rc = 0;
955 
956 	for (i = 0; i < adapter->num_io_queues; i++) {
957 		rc = ena_setup_rx_resources(adapter, i);
958 		if (rc != 0) {
959 			device_printf(adapter->pdev,
960 			    "Allocation for Rx Queue %u failed\n", i);
961 			goto err_setup_rx;
962 		}
963 	}
964 	return (0);
965 
966 err_setup_rx:
967 	/* rewind the index freeing the rings as we go */
968 	while (i--)
969 		ena_free_rx_resources(adapter, i);
970 	return (rc);
971 }
972 
973 /**
974  * ena_free_all_rx_resources - Free Rx resources for all queues
975  * @adapter: network interface device structure
976  *
977  * Free all receive software resources
978  **/
979 static void
980 ena_free_all_rx_resources(struct ena_adapter *adapter)
981 {
982 	int i;
983 
984 	for (i = 0; i < adapter->num_io_queues; i++)
985 		ena_free_rx_resources(adapter, i);
986 }
987 
988 static inline int
989 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
990     struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
991 {
992 	struct ena_com_buf *ena_buf;
993 	bus_dma_segment_t segs[1];
994 	int nsegs, error;
995 	int mlen;
996 
997 	/* if previous allocated frag is not used */
998 	if (unlikely(rx_info->mbuf != NULL))
999 		return (0);
1000 
1001 	/* Get mbuf using UMA allocator */
1002 	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1003 	    rx_ring->rx_mbuf_sz);
1004 
1005 	if (unlikely(rx_info->mbuf == NULL)) {
1006 		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1007 		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1008 		if (unlikely(rx_info->mbuf == NULL)) {
1009 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1010 			return (ENOMEM);
1011 		}
1012 		mlen = MCLBYTES;
1013 	} else {
1014 		mlen = rx_ring->rx_mbuf_sz;
1015 	}
1016 	/* Set mbuf length*/
1017 	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1018 
1019 	/* Map packets for DMA */
1020 	ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
1021 	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1022 	    adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
1023 	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1024 	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1025 	if (unlikely((error != 0) || (nsegs != 1))) {
1026 		ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, "
1027 		    "nsegs: %d\n", error, nsegs);
1028 		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1029 		goto exit;
1030 
1031 	}
1032 
1033 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1034 
1035 	ena_buf = &rx_info->ena_buf;
1036 	ena_buf->paddr = segs[0].ds_addr;
1037 	ena_buf->len = mlen;
1038 
1039 	ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
1040 	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1041 	    rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1042 
1043 	return (0);
1044 
1045 exit:
1046 	m_freem(rx_info->mbuf);
1047 	rx_info->mbuf = NULL;
1048 	return (EFAULT);
1049 }
1050 
1051 static void
1052 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1053     struct ena_rx_buffer *rx_info)
1054 {
1055 
1056 	if (rx_info->mbuf == NULL) {
1057 		ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n");
1058 		return;
1059 	}
1060 
1061 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1062 	    BUS_DMASYNC_POSTREAD);
1063 	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1064 	m_freem(rx_info->mbuf);
1065 	rx_info->mbuf = NULL;
1066 }
1067 
1068 /**
1069  * ena_refill_rx_bufs - Refills ring with descriptors
1070  * @rx_ring: the ring which we want to feed with free descriptors
1071  * @num: number of descriptors to refill
1072  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1073  **/
1074 int
1075 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1076 {
1077 	struct ena_adapter *adapter = rx_ring->adapter;
1078 	uint16_t next_to_use, req_id;
1079 	uint32_t i;
1080 	int rc;
1081 
1082 	ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n",
1083 	    rx_ring->qid);
1084 
1085 	next_to_use = rx_ring->next_to_use;
1086 
1087 	for (i = 0; i < num; i++) {
1088 		struct ena_rx_buffer *rx_info;
1089 
1090 		ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC,
1091 		    "RX buffer - next to use: %d\n", next_to_use);
1092 
1093 		req_id = rx_ring->free_rx_ids[next_to_use];
1094 		rx_info = &rx_ring->rx_buffer_info[req_id];
1095 #ifdef DEV_NETMAP
1096 		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1097 			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1098 		else
1099 #endif /* DEV_NETMAP */
1100 			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1101 		if (unlikely(rc != 0)) {
1102 			ena_trace(ENA_WARNING,
1103 			    "failed to alloc buffer for rx queue %d\n",
1104 			    rx_ring->qid);
1105 			break;
1106 		}
1107 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1108 		    &rx_info->ena_buf, req_id);
1109 		if (unlikely(rc != 0)) {
1110 			ena_trace(ENA_WARNING,
1111 			    "failed to add buffer for rx queue %d\n",
1112 			    rx_ring->qid);
1113 			break;
1114 		}
1115 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1116 		    rx_ring->ring_size);
1117 	}
1118 
1119 	if (unlikely(i < num)) {
1120 		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1121 		ena_trace(ENA_WARNING,
1122 		     "refilled rx qid %d with only %d mbufs (from %d)\n",
1123 		     rx_ring->qid, i, num);
1124 	}
1125 
1126 	if (likely(i != 0))
1127 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1128 
1129 	rx_ring->next_to_use = next_to_use;
1130 	return (i);
1131 }
1132 
1133 int
1134 ena_update_buf_ring_size(struct ena_adapter *adapter,
1135     uint32_t new_buf_ring_size)
1136 {
1137 	uint32_t old_buf_ring_size;
1138 	int rc = 0;
1139 	bool dev_was_up;
1140 
1141 	ENA_LOCK_LOCK(adapter);
1142 
1143 	old_buf_ring_size = adapter->buf_ring_size;
1144 	adapter->buf_ring_size = new_buf_ring_size;
1145 
1146 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1147 	ena_down(adapter);
1148 
1149 	/* Reconfigure buf ring for all Tx rings. */
1150 	ena_free_all_io_rings_resources(adapter);
1151 	ena_init_io_rings_advanced(adapter);
1152 	if (dev_was_up) {
1153 		/*
1154 		 * If ena_up() fails, it's not because of recent buf_ring size
1155 		 * changes. Because of that, we just want to revert old drbr
1156 		 * value and trigger the reset because something else had to
1157 		 * go wrong.
1158 		 */
1159 		rc = ena_up(adapter);
1160 		if (unlikely(rc != 0)) {
1161 			device_printf(adapter->pdev,
1162 			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1163 			    new_buf_ring_size, old_buf_ring_size);
1164 
1165 			/* Revert old size and trigger the reset */
1166 			adapter->buf_ring_size = old_buf_ring_size;
1167 			ena_free_all_io_rings_resources(adapter);
1168 			ena_init_io_rings_advanced(adapter);
1169 
1170 			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1171 			    adapter);
1172 			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1173 
1174 		}
1175 	}
1176 
1177 	ENA_LOCK_UNLOCK(adapter);
1178 
1179 	return (rc);
1180 }
1181 
1182 int
1183 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1184     uint32_t new_rx_size)
1185 {
1186 	uint32_t old_tx_size, old_rx_size;
1187 	int rc = 0;
1188 	bool dev_was_up;
1189 
1190 	ENA_LOCK_LOCK(adapter);
1191 
1192 	old_tx_size = adapter->requested_tx_ring_size;
1193 	old_rx_size = adapter->requested_rx_ring_size;
1194 	adapter->requested_tx_ring_size = new_tx_size;
1195 	adapter->requested_rx_ring_size = new_rx_size;
1196 
1197 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1198 	ena_down(adapter);
1199 
1200 	/* Configure queues with new size. */
1201 	ena_init_io_rings_basic(adapter);
1202 	if (dev_was_up) {
1203 		rc = ena_up(adapter);
1204 		if (unlikely(rc != 0)) {
1205 			device_printf(adapter->pdev,
1206 			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1207 			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1208 
1209 			/* Revert old size. */
1210 			adapter->requested_tx_ring_size = old_tx_size;
1211 			adapter->requested_rx_ring_size = old_rx_size;
1212 			ena_init_io_rings_basic(adapter);
1213 
1214 			/* And try again. */
1215 			rc = ena_up(adapter);
1216 			if (unlikely(rc != 0)) {
1217 				device_printf(adapter->pdev,
1218 				    "Failed to revert old queue sizes. Triggering device reset.\n");
1219 				/*
1220 				 * If we've failed again, something had to go
1221 				 * wrong. After reset, the device should try to
1222 				 * go up
1223 				 */
1224 				ENA_FLAG_SET_ATOMIC(
1225 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1226 				ena_trigger_reset(adapter,
1227 				    ENA_REGS_RESET_OS_TRIGGER);
1228 			}
1229 		}
1230 	}
1231 
1232 	ENA_LOCK_UNLOCK(adapter);
1233 
1234 	return (rc);
1235 }
1236 
1237 static void
1238 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1239 {
1240 	ena_free_all_io_rings_resources(adapter);
1241 	/* Force indirection table to be reinitialized */
1242 	ena_com_rss_destroy(adapter->ena_dev);
1243 
1244 	adapter->num_io_queues = num;
1245 	ena_init_io_rings(adapter);
1246 }
1247 
1248 /* Caller should sanitize new_num */
1249 int
1250 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1251 {
1252 	uint32_t old_num;
1253 	int rc = 0;
1254 	bool dev_was_up;
1255 
1256 	ENA_LOCK_LOCK(adapter);
1257 
1258 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1259 	old_num = adapter->num_io_queues;
1260 	ena_down(adapter);
1261 
1262 	ena_update_io_rings(adapter, new_num);
1263 
1264 	if (dev_was_up) {
1265 		rc = ena_up(adapter);
1266 		if (unlikely(rc != 0)) {
1267 			device_printf(adapter->pdev,
1268 			    "Failed to configure device with %u IO queues. "
1269 			    "Reverting to previous value: %u\n",
1270 			    new_num, old_num);
1271 
1272 			ena_update_io_rings(adapter, old_num);
1273 
1274 			rc = ena_up(adapter);
1275 			if (unlikely(rc != 0)) {
1276 				device_printf(adapter->pdev,
1277 				    "Failed to revert to previous setup IO "
1278 				    "queues. Triggering device reset.\n");
1279 				ENA_FLAG_SET_ATOMIC(
1280 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1281 				ena_trigger_reset(adapter,
1282 				    ENA_REGS_RESET_OS_TRIGGER);
1283 			}
1284 		}
1285 	}
1286 
1287 	ENA_LOCK_UNLOCK(adapter);
1288 
1289 	return (rc);
1290 }
1291 
1292 static void
1293 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1294 {
1295 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1296 	unsigned int i;
1297 
1298 	for (i = 0; i < rx_ring->ring_size; i++) {
1299 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1300 
1301 		if (rx_info->mbuf != NULL)
1302 			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1303 #ifdef DEV_NETMAP
1304 		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1305 		    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1306 			if (rx_info->netmap_buf_idx != 0)
1307 				ena_netmap_free_rx_slot(adapter, rx_ring,
1308 				    rx_info);
1309 		}
1310 #endif /* DEV_NETMAP */
1311 	}
1312 }
1313 
1314 /**
1315  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1316  * @adapter: network interface device structure
1317  *
1318  */
1319 static void
1320 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1321 {
1322 	struct ena_ring *rx_ring;
1323 	int i, rc, bufs_num;
1324 
1325 	for (i = 0; i < adapter->num_io_queues; i++) {
1326 		rx_ring = &adapter->rx_ring[i];
1327 		bufs_num = rx_ring->ring_size - 1;
1328 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1329 		if (unlikely(rc != bufs_num))
1330 			ena_trace(ENA_WARNING, "refilling Queue %d failed. "
1331 			    "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1332 #ifdef DEV_NETMAP
1333 		rx_ring->initialized = true;
1334 #endif /* DEV_NETMAP */
1335 	}
1336 }
1337 
1338 static void
1339 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1340 {
1341 	int i;
1342 
1343 	for (i = 0; i < adapter->num_io_queues; i++)
1344 		ena_free_rx_bufs(adapter, i);
1345 }
1346 
1347 /**
1348  * ena_free_tx_bufs - Free Tx Buffers per Queue
1349  * @adapter: network interface device structure
1350  * @qid: queue index
1351  **/
1352 static void
1353 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1354 {
1355 	bool print_once = true;
1356 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1357 
1358 	ENA_RING_MTX_LOCK(tx_ring);
1359 	for (int i = 0; i < tx_ring->ring_size; i++) {
1360 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1361 
1362 		if (tx_info->mbuf == NULL)
1363 			continue;
1364 
1365 		if (print_once) {
1366 			device_printf(adapter->pdev,
1367 			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1368 			    qid, i);
1369 			print_once = false;
1370 		} else {
1371 			ena_trace(ENA_DBG,
1372 			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1373 			     qid, i);
1374 		}
1375 
1376 		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1377 		    BUS_DMASYNC_POSTWRITE);
1378 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1379 
1380 		m_free(tx_info->mbuf);
1381 		tx_info->mbuf = NULL;
1382 	}
1383 	ENA_RING_MTX_UNLOCK(tx_ring);
1384 }
1385 
1386 static void
1387 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1388 {
1389 
1390 	for (int i = 0; i < adapter->num_io_queues; i++)
1391 		ena_free_tx_bufs(adapter, i);
1392 }
1393 
1394 static void
1395 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1396 {
1397 	uint16_t ena_qid;
1398 	int i;
1399 
1400 	for (i = 0; i < adapter->num_io_queues; i++) {
1401 		ena_qid = ENA_IO_TXQ_IDX(i);
1402 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1403 	}
1404 }
1405 
1406 static void
1407 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1408 {
1409 	uint16_t ena_qid;
1410 	int i;
1411 
1412 	for (i = 0; i < adapter->num_io_queues; i++) {
1413 		ena_qid = ENA_IO_RXQ_IDX(i);
1414 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1415 	}
1416 }
1417 
1418 static void
1419 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1420 {
1421 	struct ena_que *queue;
1422 	int i;
1423 
1424 	for (i = 0; i < adapter->num_io_queues; i++) {
1425 		queue = &adapter->que[i];
1426 		while (taskqueue_cancel(queue->cleanup_tq,
1427 		    &queue->cleanup_task, NULL))
1428 			taskqueue_drain(queue->cleanup_tq,
1429 			    &queue->cleanup_task);
1430 		taskqueue_free(queue->cleanup_tq);
1431 	}
1432 
1433 	ena_destroy_all_tx_queues(adapter);
1434 	ena_destroy_all_rx_queues(adapter);
1435 }
1436 
1437 static int
1438 ena_create_io_queues(struct ena_adapter *adapter)
1439 {
1440 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1441 	struct ena_com_create_io_ctx ctx;
1442 	struct ena_ring *ring;
1443 	struct ena_que *queue;
1444 	uint16_t ena_qid;
1445 	uint32_t msix_vector;
1446 	int rc, i;
1447 
1448 	/* Create TX queues */
1449 	for (i = 0; i < adapter->num_io_queues; i++) {
1450 		msix_vector = ENA_IO_IRQ_IDX(i);
1451 		ena_qid = ENA_IO_TXQ_IDX(i);
1452 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1453 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1454 		ctx.queue_size = adapter->requested_tx_ring_size;
1455 		ctx.msix_vector = msix_vector;
1456 		ctx.qid = ena_qid;
1457 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1458 		if (rc != 0) {
1459 			device_printf(adapter->pdev,
1460 			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1461 			goto err_tx;
1462 		}
1463 		ring = &adapter->tx_ring[i];
1464 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1465 		    &ring->ena_com_io_sq,
1466 		    &ring->ena_com_io_cq);
1467 		if (rc != 0) {
1468 			device_printf(adapter->pdev,
1469 			    "Failed to get TX queue handlers. TX queue num"
1470 			    " %d rc: %d\n", i, rc);
1471 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1472 			goto err_tx;
1473 		}
1474 	}
1475 
1476 	/* Create RX queues */
1477 	for (i = 0; i < adapter->num_io_queues; i++) {
1478 		msix_vector = ENA_IO_IRQ_IDX(i);
1479 		ena_qid = ENA_IO_RXQ_IDX(i);
1480 		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1481 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1482 		ctx.queue_size = adapter->requested_rx_ring_size;
1483 		ctx.msix_vector = msix_vector;
1484 		ctx.qid = ena_qid;
1485 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1486 		if (unlikely(rc != 0)) {
1487 			device_printf(adapter->pdev,
1488 			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1489 			goto err_rx;
1490 		}
1491 
1492 		ring = &adapter->rx_ring[i];
1493 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1494 		    &ring->ena_com_io_sq,
1495 		    &ring->ena_com_io_cq);
1496 		if (unlikely(rc != 0)) {
1497 			device_printf(adapter->pdev,
1498 			    "Failed to get RX queue handlers. RX queue num"
1499 			    " %d rc: %d\n", i, rc);
1500 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1501 			goto err_rx;
1502 		}
1503 	}
1504 
1505 	for (i = 0; i < adapter->num_io_queues; i++) {
1506 		queue = &adapter->que[i];
1507 
1508 		NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1509 		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1510 		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1511 
1512 		taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET,
1513 		    "%s queue %d cleanup",
1514 		    device_get_nameunit(adapter->pdev), i);
1515 	}
1516 
1517 	return (0);
1518 
1519 err_rx:
1520 	while (i--)
1521 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1522 	i = adapter->num_io_queues;
1523 err_tx:
1524 	while (i--)
1525 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1526 
1527 	return (ENXIO);
1528 }
1529 
1530 /*********************************************************************
1531  *
1532  *  MSIX & Interrupt Service routine
1533  *
1534  **********************************************************************/
1535 
1536 /**
1537  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1538  * @arg: interrupt number
1539  **/
1540 static void
1541 ena_intr_msix_mgmnt(void *arg)
1542 {
1543 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1544 
1545 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1546 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1547 		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1548 }
1549 
1550 /**
1551  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1552  * @arg: queue
1553  **/
1554 static int
1555 ena_handle_msix(void *arg)
1556 {
1557 	struct ena_que *queue = arg;
1558 	struct ena_adapter *adapter = queue->adapter;
1559 	if_t ifp = adapter->ifp;
1560 
1561 	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1562 		return (FILTER_STRAY);
1563 
1564 	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1565 
1566 	return (FILTER_HANDLED);
1567 }
1568 
1569 static int
1570 ena_enable_msix(struct ena_adapter *adapter)
1571 {
1572 	device_t dev = adapter->pdev;
1573 	int msix_vecs, msix_req;
1574 	int i, rc = 0;
1575 
1576 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1577 		device_printf(dev, "Error, MSI-X is already enabled\n");
1578 		return (EINVAL);
1579 	}
1580 
1581 	/* Reserved the max msix vectors we might need */
1582 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1583 
1584 	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1585 	    M_DEVBUF, M_WAITOK | M_ZERO);
1586 
1587 	ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1588 
1589 	for (i = 0; i < msix_vecs; i++) {
1590 		adapter->msix_entries[i].entry = i;
1591 		/* Vectors must start from 1 */
1592 		adapter->msix_entries[i].vector = i + 1;
1593 	}
1594 
1595 	msix_req = msix_vecs;
1596 	rc = pci_alloc_msix(dev, &msix_vecs);
1597 	if (unlikely(rc != 0)) {
1598 		device_printf(dev,
1599 		    "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1600 
1601 		rc = ENOSPC;
1602 		goto err_msix_free;
1603 	}
1604 
1605 	if (msix_vecs != msix_req) {
1606 		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1607 			device_printf(dev,
1608 			    "Not enough number of MSI-x allocated: %d\n",
1609 			    msix_vecs);
1610 			pci_release_msi(dev);
1611 			rc = ENOSPC;
1612 			goto err_msix_free;
1613 		}
1614 		device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1615 		    "the number of queues\n", msix_vecs, msix_req);
1616 	}
1617 
1618 	adapter->msix_vecs = msix_vecs;
1619 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1620 
1621 	return (0);
1622 
1623 err_msix_free:
1624 	free(adapter->msix_entries, M_DEVBUF);
1625 	adapter->msix_entries = NULL;
1626 
1627 	return (rc);
1628 }
1629 
1630 static void
1631 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1632 {
1633 
1634 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1635 	    ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1636 	    device_get_nameunit(adapter->pdev));
1637 	/*
1638 	 * Handler is NULL on purpose, it will be set
1639 	 * when mgmnt interrupt is acquired
1640 	 */
1641 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1642 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1643 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1644 	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1645 }
1646 
1647 static int
1648 ena_setup_io_intr(struct ena_adapter *adapter)
1649 {
1650 	static int last_bind_cpu = -1;
1651 	int irq_idx;
1652 
1653 	if (adapter->msix_entries == NULL)
1654 		return (EINVAL);
1655 
1656 	for (int i = 0; i < adapter->num_io_queues; i++) {
1657 		irq_idx = ENA_IO_IRQ_IDX(i);
1658 
1659 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1660 		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1661 		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1662 		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1663 		adapter->irq_tbl[irq_idx].vector =
1664 		    adapter->msix_entries[irq_idx].vector;
1665 		ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1666 		    adapter->msix_entries[irq_idx].vector);
1667 
1668 		/*
1669 		 * We want to bind rings to the corresponding cpu
1670 		 * using something similar to the RSS round-robin technique.
1671 		 */
1672 		if (unlikely(last_bind_cpu < 0))
1673 			last_bind_cpu = CPU_FIRST();
1674 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1675 		    last_bind_cpu;
1676 		last_bind_cpu = CPU_NEXT(last_bind_cpu);
1677 	}
1678 
1679 	return (0);
1680 }
1681 
1682 static int
1683 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1684 {
1685 	struct ena_irq *irq;
1686 	unsigned long flags;
1687 	int rc, rcc;
1688 
1689 	flags = RF_ACTIVE | RF_SHAREABLE;
1690 
1691 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1692 	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1693 	    &irq->vector, flags);
1694 
1695 	if (unlikely(irq->res == NULL)) {
1696 		device_printf(adapter->pdev, "could not allocate "
1697 		    "irq vector: %d\n", irq->vector);
1698 		return (ENXIO);
1699 	}
1700 
1701 	rc = bus_setup_intr(adapter->pdev, irq->res,
1702 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1703 	    irq->data, &irq->cookie);
1704 	if (unlikely(rc != 0)) {
1705 		device_printf(adapter->pdev, "failed to register "
1706 		    "interrupt handler for irq %ju: %d\n",
1707 		    rman_get_start(irq->res), rc);
1708 		goto err_res_free;
1709 	}
1710 	irq->requested = true;
1711 
1712 	return (rc);
1713 
1714 err_res_free:
1715 	ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1716 	    irq->vector);
1717 	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1718 	    irq->vector, irq->res);
1719 	if (unlikely(rcc != 0))
1720 		device_printf(adapter->pdev, "dev has no parent while "
1721 		    "releasing res for irq: %d\n", irq->vector);
1722 	irq->res = NULL;
1723 
1724 	return (rc);
1725 }
1726 
1727 static int
1728 ena_request_io_irq(struct ena_adapter *adapter)
1729 {
1730 	struct ena_irq *irq;
1731 	unsigned long flags = 0;
1732 	int rc = 0, i, rcc;
1733 
1734 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1735 		device_printf(adapter->pdev,
1736 		    "failed to request I/O IRQ: MSI-X is not enabled\n");
1737 		return (EINVAL);
1738 	} else {
1739 		flags = RF_ACTIVE | RF_SHAREABLE;
1740 	}
1741 
1742 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1743 		irq = &adapter->irq_tbl[i];
1744 
1745 		if (unlikely(irq->requested))
1746 			continue;
1747 
1748 		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1749 		    &irq->vector, flags);
1750 		if (unlikely(irq->res == NULL)) {
1751 			rc = ENOMEM;
1752 			device_printf(adapter->pdev, "could not allocate "
1753 			    "irq vector: %d\n", irq->vector);
1754 			goto err;
1755 		}
1756 
1757 		rc = bus_setup_intr(adapter->pdev, irq->res,
1758 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
1759 		    irq->data, &irq->cookie);
1760 		 if (unlikely(rc != 0)) {
1761 			device_printf(adapter->pdev, "failed to register "
1762 			    "interrupt handler for irq %ju: %d\n",
1763 			    rman_get_start(irq->res), rc);
1764 			goto err;
1765 		}
1766 		irq->requested = true;
1767 
1768 		ena_trace(ENA_INFO, "queue %d - cpu %d\n",
1769 		    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1770 	}
1771 
1772 	return (rc);
1773 
1774 err:
1775 
1776 	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1777 		irq = &adapter->irq_tbl[i];
1778 		rcc = 0;
1779 
1780 		/* Once we entered err: section and irq->requested is true we
1781 		   free both intr and resources */
1782 		if (irq->requested)
1783 			rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1784 		if (unlikely(rcc != 0))
1785 			device_printf(adapter->pdev, "could not release"
1786 			    " irq: %d, error: %d\n", irq->vector, rcc);
1787 
1788 		/* If we entred err: section without irq->requested set we know
1789 		   it was bus_alloc_resource_any() that needs cleanup, provided
1790 		   res is not NULL. In case res is NULL no work in needed in
1791 		   this iteration */
1792 		rcc = 0;
1793 		if (irq->res != NULL) {
1794 			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1795 			    irq->vector, irq->res);
1796 		}
1797 		if (unlikely(rcc != 0))
1798 			device_printf(adapter->pdev, "dev has no parent while "
1799 			    "releasing res for irq: %d\n", irq->vector);
1800 		irq->requested = false;
1801 		irq->res = NULL;
1802 	}
1803 
1804 	return (rc);
1805 }
1806 
1807 static void
1808 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1809 {
1810 	struct ena_irq *irq;
1811 	int rc;
1812 
1813 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1814 	if (irq->requested) {
1815 		ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1816 		    irq->vector);
1817 		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1818 		if (unlikely(rc != 0))
1819 			device_printf(adapter->pdev, "failed to tear "
1820 			    "down irq: %d\n", irq->vector);
1821 		irq->requested = 0;
1822 	}
1823 
1824 	if (irq->res != NULL) {
1825 		ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1826 		    irq->vector);
1827 		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1828 		    irq->vector, irq->res);
1829 		irq->res = NULL;
1830 		if (unlikely(rc != 0))
1831 			device_printf(adapter->pdev, "dev has no parent while "
1832 			    "releasing res for irq: %d\n", irq->vector);
1833 	}
1834 }
1835 
1836 static void
1837 ena_free_io_irq(struct ena_adapter *adapter)
1838 {
1839 	struct ena_irq *irq;
1840 	int rc;
1841 
1842 	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1843 		irq = &adapter->irq_tbl[i];
1844 		if (irq->requested) {
1845 			ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1846 			    irq->vector);
1847 			rc = bus_teardown_intr(adapter->pdev, irq->res,
1848 			    irq->cookie);
1849 			if (unlikely(rc != 0)) {
1850 				device_printf(adapter->pdev, "failed to tear "
1851 				    "down irq: %d\n", irq->vector);
1852 			}
1853 			irq->requested = 0;
1854 		}
1855 
1856 		if (irq->res != NULL) {
1857 			ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1858 			    irq->vector);
1859 			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1860 			    irq->vector, irq->res);
1861 			irq->res = NULL;
1862 			if (unlikely(rc != 0)) {
1863 				device_printf(adapter->pdev, "dev has no parent"
1864 				    " while releasing res for irq: %d\n",
1865 				    irq->vector);
1866 			}
1867 		}
1868 	}
1869 }
1870 
1871 static void
1872 ena_free_irqs(struct ena_adapter* adapter)
1873 {
1874 
1875 	ena_free_io_irq(adapter);
1876 	ena_free_mgmnt_irq(adapter);
1877 	ena_disable_msix(adapter);
1878 }
1879 
1880 static void
1881 ena_disable_msix(struct ena_adapter *adapter)
1882 {
1883 
1884 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1885 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1886 		pci_release_msi(adapter->pdev);
1887 	}
1888 
1889 	adapter->msix_vecs = 0;
1890 	if (adapter->msix_entries != NULL)
1891 		free(adapter->msix_entries, M_DEVBUF);
1892 	adapter->msix_entries = NULL;
1893 }
1894 
1895 static void
1896 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1897 {
1898 	struct ena_com_io_cq* io_cq;
1899 	struct ena_eth_io_intr_reg intr_reg;
1900 	uint16_t ena_qid;
1901 	int i;
1902 
1903 	/* Unmask interrupts for all queues */
1904 	for (i = 0; i < adapter->num_io_queues; i++) {
1905 		ena_qid = ENA_IO_TXQ_IDX(i);
1906 		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1907 		ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1908 		ena_com_unmask_intr(io_cq, &intr_reg);
1909 	}
1910 }
1911 
1912 /* Configure the Rx forwarding */
1913 static int
1914 ena_rss_configure(struct ena_adapter *adapter)
1915 {
1916 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1917 	int rc;
1918 
1919 	/* In case the RSS table was destroyed */
1920 	if (!ena_dev->rss.tbl_log_size) {
1921 		rc = ena_rss_init_default(adapter);
1922 		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
1923 			device_printf(adapter->pdev,
1924 			    "WARNING: RSS was not properly re-initialized,"
1925 			    " it will affect bandwidth\n");
1926 			ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
1927 			return (rc);
1928 		}
1929 	}
1930 
1931 	/* Set indirect table */
1932 	rc = ena_com_indirect_table_set(ena_dev);
1933 	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1934 		return (rc);
1935 
1936 	/* Configure hash function (if supported) */
1937 	rc = ena_com_set_hash_function(ena_dev);
1938 	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1939 		return (rc);
1940 
1941 	/* Configure hash inputs (if supported) */
1942 	rc = ena_com_set_hash_ctrl(ena_dev);
1943 	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1944 		return (rc);
1945 
1946 	return (0);
1947 }
1948 
1949 static int
1950 ena_up_complete(struct ena_adapter *adapter)
1951 {
1952 	int rc;
1953 
1954 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1955 		rc = ena_rss_configure(adapter);
1956 		if (rc != 0) {
1957 			device_printf(adapter->pdev,
1958 			    "Failed to configure RSS\n");
1959 			return (rc);
1960 		}
1961 	}
1962 
1963 	rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
1964 	if (unlikely(rc != 0))
1965 		return (rc);
1966 
1967 	ena_refill_all_rx_bufs(adapter);
1968 	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1969 	    sizeof(adapter->hw_stats));
1970 
1971 	return (0);
1972 }
1973 
1974 static void
1975 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
1976     int new_rx_size)
1977 {
1978 	int i;
1979 
1980 	for (i = 0; i < adapter->num_io_queues; i++) {
1981 		adapter->tx_ring[i].ring_size = new_tx_size;
1982 		adapter->rx_ring[i].ring_size = new_rx_size;
1983 	}
1984 }
1985 
1986 static int
1987 create_queues_with_size_backoff(struct ena_adapter *adapter)
1988 {
1989 	int rc;
1990 	uint32_t cur_rx_ring_size, cur_tx_ring_size;
1991 	uint32_t new_rx_ring_size, new_tx_ring_size;
1992 
1993 	/*
1994 	 * Current queue sizes might be set to smaller than the requested
1995 	 * ones due to past queue allocation failures.
1996 	 */
1997 	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
1998 	    adapter->requested_rx_ring_size);
1999 
2000 	while (1) {
2001 		/* Allocate transmit descriptors */
2002 		rc = ena_setup_all_tx_resources(adapter);
2003 		if (unlikely(rc != 0)) {
2004 			ena_trace(ENA_ALERT, "err_setup_tx\n");
2005 			goto err_setup_tx;
2006 		}
2007 
2008 		/* Allocate receive descriptors */
2009 		rc = ena_setup_all_rx_resources(adapter);
2010 		if (unlikely(rc != 0)) {
2011 			ena_trace(ENA_ALERT, "err_setup_rx\n");
2012 			goto err_setup_rx;
2013 		}
2014 
2015 		/* Create IO queues for Rx & Tx */
2016 		rc = ena_create_io_queues(adapter);
2017 		if (unlikely(rc != 0)) {
2018 			ena_trace(ENA_ALERT,
2019 			    "create IO queues failed\n");
2020 			goto err_io_que;
2021 		}
2022 
2023 		return (0);
2024 
2025 err_io_que:
2026 		ena_free_all_rx_resources(adapter);
2027 err_setup_rx:
2028 		ena_free_all_tx_resources(adapter);
2029 err_setup_tx:
2030 		/*
2031 		 * Lower the ring size if ENOMEM. Otherwise, return the
2032 		 * error straightaway.
2033 		 */
2034 		if (unlikely(rc != ENOMEM)) {
2035 			ena_trace(ENA_ALERT,
2036 			    "Queue creation failed with error code: %d\n", rc);
2037 			return (rc);
2038 		}
2039 
2040 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2041 		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2042 
2043 		device_printf(adapter->pdev,
2044 		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2045 		    cur_tx_ring_size, cur_rx_ring_size);
2046 
2047 		new_tx_ring_size = cur_tx_ring_size;
2048 		new_rx_ring_size = cur_rx_ring_size;
2049 
2050 		/*
2051 		 * Decrease the size of a larger queue, or decrease both if they are
2052 		 * the same size.
2053 		 */
2054 		if (cur_rx_ring_size <= cur_tx_ring_size)
2055 			new_tx_ring_size = cur_tx_ring_size / 2;
2056 		if (cur_rx_ring_size >= cur_tx_ring_size)
2057 			new_rx_ring_size = cur_rx_ring_size / 2;
2058 
2059 		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2060 		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2061 			device_printf(adapter->pdev,
2062 			    "Queue creation failed with the smallest possible queue size"
2063 			    "of %d for both queues. Not retrying with smaller queues\n",
2064 			    ENA_MIN_RING_SIZE);
2065 			return (rc);
2066 		}
2067 
2068 		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2069 	}
2070 }
2071 
2072 int
2073 ena_up(struct ena_adapter *adapter)
2074 {
2075 	int rc = 0;
2076 
2077 	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2078 		device_printf(adapter->pdev, "device is not attached!\n");
2079 		return (ENXIO);
2080 	}
2081 
2082 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2083 		return (0);
2084 
2085 	device_printf(adapter->pdev, "device is going UP\n");
2086 
2087 	/* setup interrupts for IO queues */
2088 	rc = ena_setup_io_intr(adapter);
2089 	if (unlikely(rc != 0)) {
2090 		ena_trace(ENA_ALERT, "error setting up IO interrupt\n");
2091 		goto error;
2092 	}
2093 	rc = ena_request_io_irq(adapter);
2094 	if (unlikely(rc != 0)) {
2095 		ena_trace(ENA_ALERT, "err_req_irq\n");
2096 		goto error;
2097 	}
2098 
2099 	device_printf(adapter->pdev,
2100 	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
2101 	    "LLQ is %s\n",
2102 	    adapter->num_io_queues,
2103 	    adapter->requested_rx_ring_size,
2104 	    adapter->requested_tx_ring_size,
2105 	    (adapter->ena_dev->tx_mem_queue_type ==
2106 	        ENA_ADMIN_PLACEMENT_POLICY_DEV) ?  "ENABLED" : "DISABLED");
2107 
2108 	rc = create_queues_with_size_backoff(adapter);
2109 	if (unlikely(rc != 0)) {
2110 		ena_trace(ENA_ALERT,
2111 		    "error creating queues with size backoff\n");
2112 		goto err_create_queues_with_backoff;
2113 	}
2114 
2115 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2116 		if_link_state_change(adapter->ifp, LINK_STATE_UP);
2117 
2118 	rc = ena_up_complete(adapter);
2119 	if (unlikely(rc != 0))
2120 		goto err_up_complete;
2121 
2122 	counter_u64_add(adapter->dev_stats.interface_up, 1);
2123 
2124 	ena_update_hwassist(adapter);
2125 
2126 	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2127 		IFF_DRV_OACTIVE);
2128 
2129 	/* Activate timer service only if the device is running.
2130 		* If this flag is not set, it means that the driver is being
2131 		* reset and timer service will be activated afterwards.
2132 		*/
2133 	if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
2134 		callout_reset_sbt(&adapter->timer_service, SBT_1S,
2135 			SBT_1S, ena_timer_service, (void *)adapter, 0);
2136 	}
2137 
2138 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2139 
2140 	ena_unmask_all_io_irqs(adapter);
2141 
2142 	return (0);
2143 
2144 err_up_complete:
2145 	ena_destroy_all_io_queues(adapter);
2146 	ena_free_all_rx_resources(adapter);
2147 	ena_free_all_tx_resources(adapter);
2148 err_create_queues_with_backoff:
2149 	ena_free_io_irq(adapter);
2150 error:
2151 	return (rc);
2152 }
2153 
2154 static uint64_t
2155 ena_get_counter(if_t ifp, ift_counter cnt)
2156 {
2157 	struct ena_adapter *adapter;
2158 	struct ena_hw_stats *stats;
2159 
2160 	adapter = if_getsoftc(ifp);
2161 	stats = &adapter->hw_stats;
2162 
2163 	switch (cnt) {
2164 	case IFCOUNTER_IPACKETS:
2165 		return (counter_u64_fetch(stats->rx_packets));
2166 	case IFCOUNTER_OPACKETS:
2167 		return (counter_u64_fetch(stats->tx_packets));
2168 	case IFCOUNTER_IBYTES:
2169 		return (counter_u64_fetch(stats->rx_bytes));
2170 	case IFCOUNTER_OBYTES:
2171 		return (counter_u64_fetch(stats->tx_bytes));
2172 	case IFCOUNTER_IQDROPS:
2173 		return (counter_u64_fetch(stats->rx_drops));
2174 	case IFCOUNTER_OQDROPS:
2175 		return (counter_u64_fetch(stats->tx_drops));
2176 	default:
2177 		return (if_get_counter_default(ifp, cnt));
2178 	}
2179 }
2180 
2181 static int
2182 ena_media_change(if_t ifp)
2183 {
2184 	/* Media Change is not supported by firmware */
2185 	return (0);
2186 }
2187 
2188 static void
2189 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2190 {
2191 	struct ena_adapter *adapter = if_getsoftc(ifp);
2192 	ena_trace(ENA_DBG, "enter\n");
2193 
2194 	ENA_LOCK_LOCK(adapter);
2195 
2196 	ifmr->ifm_status = IFM_AVALID;
2197 	ifmr->ifm_active = IFM_ETHER;
2198 
2199 	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2200 		ENA_LOCK_UNLOCK(adapter);
2201 		ena_trace(ENA_INFO, "Link is down\n");
2202 		return;
2203 	}
2204 
2205 	ifmr->ifm_status |= IFM_ACTIVE;
2206 	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2207 
2208 	ENA_LOCK_UNLOCK(adapter);
2209 }
2210 
2211 static void
2212 ena_init(void *arg)
2213 {
2214 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2215 
2216 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2217 		ENA_LOCK_LOCK(adapter);
2218 		ena_up(adapter);
2219 		ENA_LOCK_UNLOCK(adapter);
2220 	}
2221 }
2222 
2223 static int
2224 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2225 {
2226 	struct ena_adapter *adapter;
2227 	struct ifreq *ifr;
2228 	int rc;
2229 
2230 	adapter = ifp->if_softc;
2231 	ifr = (struct ifreq *)data;
2232 
2233 	/*
2234 	 * Acquiring lock to prevent from running up and down routines parallel.
2235 	 */
2236 	rc = 0;
2237 	switch (command) {
2238 	case SIOCSIFMTU:
2239 		if (ifp->if_mtu == ifr->ifr_mtu)
2240 			break;
2241 		ENA_LOCK_LOCK(adapter);
2242 		ena_down(adapter);
2243 
2244 		ena_change_mtu(ifp, ifr->ifr_mtu);
2245 
2246 		rc = ena_up(adapter);
2247 		ENA_LOCK_UNLOCK(adapter);
2248 		break;
2249 
2250 	case SIOCSIFFLAGS:
2251 		if ((ifp->if_flags & IFF_UP) != 0) {
2252 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2253 				if ((ifp->if_flags & (IFF_PROMISC |
2254 				    IFF_ALLMULTI)) != 0) {
2255 					device_printf(adapter->pdev,
2256 					    "ioctl promisc/allmulti\n");
2257 				}
2258 			} else {
2259 				ENA_LOCK_LOCK(adapter);
2260 				rc = ena_up(adapter);
2261 				ENA_LOCK_UNLOCK(adapter);
2262 			}
2263 		} else {
2264 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2265 				ENA_LOCK_LOCK(adapter);
2266 				ena_down(adapter);
2267 				ENA_LOCK_UNLOCK(adapter);
2268 			}
2269 		}
2270 		break;
2271 
2272 	case SIOCADDMULTI:
2273 	case SIOCDELMULTI:
2274 		break;
2275 
2276 	case SIOCSIFMEDIA:
2277 	case SIOCGIFMEDIA:
2278 		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2279 		break;
2280 
2281 	case SIOCSIFCAP:
2282 		{
2283 			int reinit = 0;
2284 
2285 			if (ifr->ifr_reqcap != ifp->if_capenable) {
2286 				ifp->if_capenable = ifr->ifr_reqcap;
2287 				reinit = 1;
2288 			}
2289 
2290 			if ((reinit != 0) &&
2291 			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2292 				ENA_LOCK_LOCK(adapter);
2293 				ena_down(adapter);
2294 				rc = ena_up(adapter);
2295 				ENA_LOCK_UNLOCK(adapter);
2296 			}
2297 		}
2298 
2299 		break;
2300 	default:
2301 		rc = ether_ioctl(ifp, command, data);
2302 		break;
2303 	}
2304 
2305 	return (rc);
2306 }
2307 
2308 static int
2309 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2310 {
2311 	int caps = 0;
2312 
2313 	if ((feat->offload.tx &
2314 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2315 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2316 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2317 		caps |= IFCAP_TXCSUM;
2318 
2319 	if ((feat->offload.tx &
2320 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2321 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2322 		caps |= IFCAP_TXCSUM_IPV6;
2323 
2324 	if ((feat->offload.tx &
2325 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2326 		caps |= IFCAP_TSO4;
2327 
2328 	if ((feat->offload.tx &
2329 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2330 		caps |= IFCAP_TSO6;
2331 
2332 	if ((feat->offload.rx_supported &
2333 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2334 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2335 		caps |= IFCAP_RXCSUM;
2336 
2337 	if ((feat->offload.rx_supported &
2338 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2339 		caps |= IFCAP_RXCSUM_IPV6;
2340 
2341 	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2342 
2343 	return (caps);
2344 }
2345 
2346 static void
2347 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2348 {
2349 
2350 	host_info->supported_network_features[0] =
2351 	    (uint32_t)if_getcapabilities(ifp);
2352 }
2353 
2354 static void
2355 ena_update_hwassist(struct ena_adapter *adapter)
2356 {
2357 	if_t ifp = adapter->ifp;
2358 	uint32_t feat = adapter->tx_offload_cap;
2359 	int cap = if_getcapenable(ifp);
2360 	int flags = 0;
2361 
2362 	if_clearhwassist(ifp);
2363 
2364 	if ((cap & IFCAP_TXCSUM) != 0) {
2365 		if ((feat &
2366 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2367 			flags |= CSUM_IP;
2368 		if ((feat &
2369 		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2370 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2371 			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2372 	}
2373 
2374 	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2375 		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2376 
2377 	if ((cap & IFCAP_TSO4) != 0)
2378 		flags |= CSUM_IP_TSO;
2379 
2380 	if ((cap & IFCAP_TSO6) != 0)
2381 		flags |= CSUM_IP6_TSO;
2382 
2383 	if_sethwassistbits(ifp, flags, 0);
2384 }
2385 
2386 static int
2387 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2388     struct ena_com_dev_get_features_ctx *feat)
2389 {
2390 	if_t ifp;
2391 	int caps = 0;
2392 
2393 	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2394 	if (unlikely(ifp == NULL)) {
2395 		ena_trace(ENA_ALERT, "can not allocate ifnet structure\n");
2396 		return (ENXIO);
2397 	}
2398 	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2399 	if_setdev(ifp, pdev);
2400 	if_setsoftc(ifp, adapter);
2401 
2402 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2403 	    IFF_KNOWSEPOCH);
2404 	if_setinitfn(ifp, ena_init);
2405 	if_settransmitfn(ifp, ena_mq_start);
2406 	if_setqflushfn(ifp, ena_qflush);
2407 	if_setioctlfn(ifp, ena_ioctl);
2408 	if_setgetcounterfn(ifp, ena_get_counter);
2409 
2410 	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2411 	if_setsendqready(ifp);
2412 	if_setmtu(ifp, ETHERMTU);
2413 	if_setbaudrate(ifp, 0);
2414 	/* Zeroize capabilities... */
2415 	if_setcapabilities(ifp, 0);
2416 	if_setcapenable(ifp, 0);
2417 	/* check hardware support */
2418 	caps = ena_get_dev_offloads(feat);
2419 	/* ... and set them */
2420 	if_setcapabilitiesbit(ifp, caps, 0);
2421 
2422 	/* TSO parameters */
2423 	ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2424 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2425 	ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2426 	ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2427 
2428 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2429 	if_setcapenable(ifp, if_getcapabilities(ifp));
2430 
2431 	/*
2432 	 * Specify the media types supported by this adapter and register
2433 	 * callbacks to update media and link information
2434 	 */
2435 	ifmedia_init(&adapter->media, IFM_IMASK,
2436 	    ena_media_change, ena_media_status);
2437 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2438 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2439 
2440 	ether_ifattach(ifp, adapter->mac_addr);
2441 
2442 	return (0);
2443 }
2444 
2445 void
2446 ena_down(struct ena_adapter *adapter)
2447 {
2448 	int rc;
2449 
2450 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2451 		return;
2452 
2453 	device_printf(adapter->pdev, "device is going DOWN\n");
2454 
2455 	callout_drain(&adapter->timer_service);
2456 
2457 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2458 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2459 		IFF_DRV_RUNNING);
2460 
2461 	ena_free_io_irq(adapter);
2462 
2463 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2464 		rc = ena_com_dev_reset(adapter->ena_dev,
2465 			adapter->reset_reason);
2466 		if (unlikely(rc != 0))
2467 			device_printf(adapter->pdev,
2468 				"Device reset failed\n");
2469 	}
2470 
2471 	ena_destroy_all_io_queues(adapter);
2472 
2473 	ena_free_all_tx_bufs(adapter);
2474 	ena_free_all_rx_bufs(adapter);
2475 	ena_free_all_tx_resources(adapter);
2476 	ena_free_all_rx_resources(adapter);
2477 
2478 	counter_u64_add(adapter->dev_stats.interface_down, 1);
2479 }
2480 
2481 static uint32_t
2482 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2483     struct ena_com_dev_get_features_ctx *get_feat_ctx)
2484 {
2485 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2486 
2487 	/* Regular queues capabilities */
2488 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2489 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2490 		    &get_feat_ctx->max_queue_ext.max_queue_ext;
2491 		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2492 			max_queue_ext->max_rx_cq_num);
2493 
2494 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2495 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2496 	} else {
2497 		struct ena_admin_queue_feature_desc *max_queues =
2498 		    &get_feat_ctx->max_queues;
2499 		io_tx_sq_num = max_queues->max_sq_num;
2500 		io_tx_cq_num = max_queues->max_cq_num;
2501 		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2502 	}
2503 
2504 	/* In case of LLQ use the llq fields for the tx SQ/CQ */
2505 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2506 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2507 
2508 	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2509 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2510 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2511 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2512 	/* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2513 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2514 	    pci_msix_count(pdev) - 1);
2515 
2516 	return (max_num_io_queues);
2517 }
2518 
2519 static int
2520 ena_enable_wc(struct resource *res)
2521 {
2522 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2523 	vm_offset_t va;
2524 	vm_size_t len;
2525 	int rc;
2526 
2527 	va = (vm_offset_t)rman_get_virtual(res);
2528 	len = rman_get_size(res);
2529 	/* Enable write combining */
2530 	rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2531 	if (unlikely(rc != 0)) {
2532 		ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc);
2533 		return (rc);
2534 	}
2535 
2536 	return (0);
2537 #endif
2538 	return (EOPNOTSUPP);
2539 }
2540 
2541 static int
2542 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2543     struct ena_admin_feature_llq_desc *llq,
2544     struct ena_llq_configurations *llq_default_configurations)
2545 {
2546 	struct ena_adapter *adapter = device_get_softc(pdev);
2547 	int rc, rid;
2548 	uint32_t llq_feature_mask;
2549 
2550 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2551 	if (!(ena_dev->supported_features & llq_feature_mask)) {
2552 		device_printf(pdev,
2553 		    "LLQ is not supported. Fallback to host mode policy.\n");
2554 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2555 		return (0);
2556 	}
2557 
2558 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2559 	if (unlikely(rc != 0)) {
2560 		device_printf(pdev, "Failed to configure the device mode. "
2561 		    "Fallback to host mode policy.\n");
2562 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2563 		return (0);
2564 	}
2565 
2566 	/* Nothing to config, exit */
2567 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2568 		return (0);
2569 
2570 	/* Try to allocate resources for LLQ bar */
2571 	rid = PCIR_BAR(ENA_MEM_BAR);
2572 	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
2573 	    &rid, RF_ACTIVE);
2574 	if (unlikely(adapter->memory == NULL)) {
2575 		device_printf(pdev, "unable to allocate LLQ bar resource. "
2576 		    "Fallback to host mode policy.\n");
2577 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2578 		return (0);
2579 	}
2580 
2581 	/* Enable write combining for better LLQ performance */
2582 	rc = ena_enable_wc(adapter->memory);
2583 	if (unlikely(rc != 0)) {
2584 		device_printf(pdev, "failed to enable write combining.\n");
2585 		return (rc);
2586 	}
2587 
2588 	/*
2589 	 * Save virtual address of the device's memory region
2590 	 * for the ena_com layer.
2591 	 */
2592 	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2593 
2594 	return (0);
2595 }
2596 
2597 static inline
2598 void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
2599 {
2600 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2601 	llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2602 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2603 	llq_config->llq_num_decs_before_header =
2604 	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2605 	llq_config->llq_ring_entry_size_value = 128;
2606 }
2607 
2608 static int
2609 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2610 {
2611 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2612 	struct ena_com_dev *ena_dev = ctx->ena_dev;
2613 	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2614 	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2615 	uint32_t max_tx_queue_size;
2616 	uint32_t max_rx_queue_size;
2617 
2618 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2619 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2620 		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2621 		max_rx_queue_size = min_t(uint32_t,
2622 		    max_queue_ext->max_rx_cq_depth,
2623 		    max_queue_ext->max_rx_sq_depth);
2624 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2625 
2626 		if (ena_dev->tx_mem_queue_type ==
2627 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2628 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2629 			    llq->max_llq_depth);
2630 		else
2631 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2632 			    max_queue_ext->max_tx_sq_depth);
2633 
2634 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2635 		    max_queue_ext->max_per_packet_tx_descs);
2636 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2637 		    max_queue_ext->max_per_packet_rx_descs);
2638 	} else {
2639 		struct ena_admin_queue_feature_desc *max_queues =
2640 		    &ctx->get_feat_ctx->max_queues;
2641 		max_rx_queue_size = min_t(uint32_t,
2642 		    max_queues->max_cq_depth,
2643 		    max_queues->max_sq_depth);
2644 		max_tx_queue_size = max_queues->max_cq_depth;
2645 
2646 		if (ena_dev->tx_mem_queue_type ==
2647 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2648 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2649 			    llq->max_llq_depth);
2650 		else
2651 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2652 			    max_queues->max_sq_depth);
2653 
2654 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2655 		    max_queues->max_packet_tx_descs);
2656 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2657 		    max_queues->max_packet_rx_descs);
2658 	}
2659 
2660 	/* round down to the nearest power of 2 */
2661 	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2662 	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2663 
2664 	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2665 	    max_tx_queue_size);
2666 	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2667 	    max_rx_queue_size);
2668 
2669 	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2670 	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2671 
2672 	ctx->max_tx_queue_size = max_tx_queue_size;
2673 	ctx->max_rx_queue_size = max_rx_queue_size;
2674 	ctx->tx_queue_size = tx_queue_size;
2675 	ctx->rx_queue_size = rx_queue_size;
2676 
2677 	return (0);
2678 }
2679 
2680 static int
2681 ena_rss_init_default(struct ena_adapter *adapter)
2682 {
2683 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2684 	device_t dev = adapter->pdev;
2685 	int qid, rc, i;
2686 
2687 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2688 	if (unlikely(rc != 0)) {
2689 		device_printf(dev, "Cannot init indirect table\n");
2690 		return (rc);
2691 	}
2692 
2693 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2694 		qid = i % adapter->num_io_queues;
2695 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2696 		    ENA_IO_RXQ_IDX(qid));
2697 		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2698 			device_printf(dev, "Cannot fill indirect table\n");
2699 			goto err_rss_destroy;
2700 		}
2701 	}
2702 
2703 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2704 	    ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2705 	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2706 		device_printf(dev, "Cannot fill hash function\n");
2707 		goto err_rss_destroy;
2708 	}
2709 
2710 	rc = ena_com_set_default_hash_ctrl(ena_dev);
2711 	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2712 		device_printf(dev, "Cannot fill hash control\n");
2713 		goto err_rss_destroy;
2714 	}
2715 
2716 	return (0);
2717 
2718 err_rss_destroy:
2719 	ena_com_rss_destroy(ena_dev);
2720 	return (rc);
2721 }
2722 
2723 static void
2724 ena_rss_init_default_deferred(void *arg)
2725 {
2726 	struct ena_adapter *adapter;
2727 	devclass_t dc;
2728 	int max;
2729 	int rc;
2730 
2731 	dc = devclass_find("ena");
2732 	if (unlikely(dc == NULL)) {
2733 		ena_trace(ENA_ALERT, "No devclass ena\n");
2734 		return;
2735 	}
2736 
2737 	max = devclass_get_maxunit(dc);
2738 	while (max-- >= 0) {
2739 		adapter = devclass_get_softc(dc, max);
2740 		if (adapter != NULL) {
2741 			rc = ena_rss_init_default(adapter);
2742 			ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2743 			if (unlikely(rc != 0)) {
2744 				device_printf(adapter->pdev,
2745 				    "WARNING: RSS was not properly initialized,"
2746 				    " it will affect bandwidth\n");
2747 				ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2748 			}
2749 		}
2750 	}
2751 }
2752 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
2753 
2754 static void
2755 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2756 {
2757 	struct ena_admin_host_info *host_info;
2758 	uintptr_t rid;
2759 	int rc;
2760 
2761 	/* Allocate only the host info */
2762 	rc = ena_com_allocate_host_info(ena_dev);
2763 	if (unlikely(rc != 0)) {
2764 		ena_trace(ENA_ALERT, "Cannot allocate host info\n");
2765 		return;
2766 	}
2767 
2768 	host_info = ena_dev->host_attr.host_info;
2769 
2770 	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2771 		host_info->bdf = rid;
2772 	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2773 	host_info->kernel_ver = osreldate;
2774 
2775 	sprintf(host_info->kernel_ver_str, "%d", osreldate);
2776 	host_info->os_dist = 0;
2777 	strncpy(host_info->os_dist_str, osrelease,
2778 	    sizeof(host_info->os_dist_str) - 1);
2779 
2780 	host_info->driver_version =
2781 		(DRV_MODULE_VER_MAJOR) |
2782 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2783 		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2784 	host_info->num_cpus = mp_ncpus;
2785 
2786 	rc = ena_com_set_host_attributes(ena_dev);
2787 	if (unlikely(rc != 0)) {
2788 		if (rc == EOPNOTSUPP)
2789 			ena_trace(ENA_WARNING, "Cannot set host attributes\n");
2790 		else
2791 			ena_trace(ENA_ALERT, "Cannot set host attributes\n");
2792 
2793 		goto err;
2794 	}
2795 
2796 	return;
2797 
2798 err:
2799 	ena_com_delete_host_info(ena_dev);
2800 }
2801 
2802 static int
2803 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2804     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2805 {
2806 	struct ena_com_dev* ena_dev = adapter->ena_dev;
2807 	bool readless_supported;
2808 	uint32_t aenq_groups;
2809 	int dma_width;
2810 	int rc;
2811 
2812 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2813 	if (unlikely(rc != 0)) {
2814 		device_printf(pdev, "failed to init mmio read less\n");
2815 		return (rc);
2816 	}
2817 
2818 	/*
2819 	 * The PCIe configuration space revision id indicate if mmio reg
2820 	 * read is disabled
2821 	 */
2822 	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2823 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2824 
2825 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2826 	if (unlikely(rc != 0)) {
2827 		device_printf(pdev, "Can not reset device\n");
2828 		goto err_mmio_read_less;
2829 	}
2830 
2831 	rc = ena_com_validate_version(ena_dev);
2832 	if (unlikely(rc != 0)) {
2833 		device_printf(pdev, "device version is too low\n");
2834 		goto err_mmio_read_less;
2835 	}
2836 
2837 	dma_width = ena_com_get_dma_width(ena_dev);
2838 	if (unlikely(dma_width < 0)) {
2839 		device_printf(pdev, "Invalid dma width value %d", dma_width);
2840 		rc = dma_width;
2841 		goto err_mmio_read_less;
2842 	}
2843 	adapter->dma_width = dma_width;
2844 
2845 	/* ENA admin level init */
2846 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2847 	if (unlikely(rc != 0)) {
2848 		device_printf(pdev,
2849 		    "Can not initialize ena admin queue with device\n");
2850 		goto err_mmio_read_less;
2851 	}
2852 
2853 	/*
2854 	 * To enable the msix interrupts the driver needs to know the number
2855 	 * of queues. So the driver uses polling mode to retrieve this
2856 	 * information
2857 	 */
2858 	ena_com_set_admin_polling_mode(ena_dev, true);
2859 
2860 	ena_config_host_info(ena_dev, pdev);
2861 
2862 	/* Get Device Attributes */
2863 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2864 	if (unlikely(rc != 0)) {
2865 		device_printf(pdev,
2866 		    "Cannot get attribute for ena device rc: %d\n", rc);
2867 		goto err_admin_init;
2868 	}
2869 
2870 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2871 	    BIT(ENA_ADMIN_FATAL_ERROR) |
2872 	    BIT(ENA_ADMIN_WARNING) |
2873 	    BIT(ENA_ADMIN_NOTIFICATION) |
2874 	    BIT(ENA_ADMIN_KEEP_ALIVE);
2875 
2876 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2877 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2878 	if (unlikely(rc != 0)) {
2879 		device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
2880 		goto err_admin_init;
2881 	}
2882 
2883 	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2884 
2885 	return (0);
2886 
2887 err_admin_init:
2888 	ena_com_delete_host_info(ena_dev);
2889 	ena_com_admin_destroy(ena_dev);
2890 err_mmio_read_less:
2891 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2892 
2893 	return (rc);
2894 }
2895 
2896 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2897 {
2898 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2899 	int rc;
2900 
2901 	rc = ena_enable_msix(adapter);
2902 	if (unlikely(rc != 0)) {
2903 		device_printf(adapter->pdev, "Error with MSI-X enablement\n");
2904 		return (rc);
2905 	}
2906 
2907 	ena_setup_mgmnt_intr(adapter);
2908 
2909 	rc = ena_request_mgmnt_irq(adapter);
2910 	if (unlikely(rc != 0)) {
2911 		device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
2912 		goto err_disable_msix;
2913 	}
2914 
2915 	ena_com_set_admin_polling_mode(ena_dev, false);
2916 
2917 	ena_com_admin_aenq_enable(ena_dev);
2918 
2919 	return (0);
2920 
2921 err_disable_msix:
2922 	ena_disable_msix(adapter);
2923 
2924 	return (rc);
2925 }
2926 
2927 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
2928 static void ena_keep_alive_wd(void *adapter_data,
2929     struct ena_admin_aenq_entry *aenq_e)
2930 {
2931 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2932 	struct ena_admin_aenq_keep_alive_desc *desc;
2933 	sbintime_t stime;
2934 	uint64_t rx_drops;
2935 	uint64_t tx_drops;
2936 
2937 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2938 
2939 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2940 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2941 	counter_u64_zero(adapter->hw_stats.rx_drops);
2942 	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2943 	counter_u64_zero(adapter->hw_stats.tx_drops);
2944 	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2945 
2946 	stime = getsbinuptime();
2947 	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2948 }
2949 
2950 /* Check for keep alive expiration */
2951 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2952 {
2953 	sbintime_t timestamp, time;
2954 
2955 	if (adapter->wd_active == 0)
2956 		return;
2957 
2958 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2959 		return;
2960 
2961 	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2962 	time = getsbinuptime() - timestamp;
2963 	if (unlikely(time > adapter->keep_alive_timeout)) {
2964 		device_printf(adapter->pdev,
2965 		    "Keep alive watchdog timeout.\n");
2966 		counter_u64_add(adapter->dev_stats.wd_expired, 1);
2967 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2968 	}
2969 }
2970 
2971 /* Check if admin queue is enabled */
2972 static void check_for_admin_com_state(struct ena_adapter *adapter)
2973 {
2974 	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
2975 	    false)) {
2976 		device_printf(adapter->pdev,
2977 		    "ENA admin queue is not in running state!\n");
2978 		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
2979 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
2980 	}
2981 }
2982 
2983 static int
2984 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2985     struct ena_ring *rx_ring)
2986 {
2987 	if (likely(rx_ring->first_interrupt))
2988 		return (0);
2989 
2990 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2991 		return (0);
2992 
2993 	rx_ring->no_interrupt_event_cnt++;
2994 
2995 	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2996 		device_printf(adapter->pdev, "Potential MSIX issue on Rx side "
2997 		    "Queue = %d. Reset the device\n", rx_ring->qid);
2998 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
2999 		return (EIO);
3000 	}
3001 
3002 	return (0);
3003 }
3004 
3005 static int
3006 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3007     struct ena_ring *tx_ring)
3008 {
3009 	struct bintime curtime, time;
3010 	struct ena_tx_buffer *tx_buf;
3011 	sbintime_t time_offset;
3012 	uint32_t missed_tx = 0;
3013 	int i, rc = 0;
3014 
3015 	getbinuptime(&curtime);
3016 
3017 	for (i = 0; i < tx_ring->ring_size; i++) {
3018 		tx_buf = &tx_ring->tx_buffer_info[i];
3019 
3020 		if (bintime_isset(&tx_buf->timestamp) == 0)
3021 			continue;
3022 
3023 		time = curtime;
3024 		bintime_sub(&time, &tx_buf->timestamp);
3025 		time_offset = bttosbt(time);
3026 
3027 		if (unlikely(!tx_ring->first_interrupt &&
3028 		    time_offset > 2 * adapter->missing_tx_timeout)) {
3029 			/*
3030 			 * If after graceful period interrupt is still not
3031 			 * received, we schedule a reset.
3032 			 */
3033 			device_printf(adapter->pdev,
3034 			    "Potential MSIX issue on Tx side Queue = %d. "
3035 			    "Reset the device\n", tx_ring->qid);
3036 			ena_trigger_reset(adapter,
3037 			    ENA_REGS_RESET_MISS_INTERRUPT);
3038 			return (EIO);
3039 		}
3040 
3041 		/* Check again if packet is still waiting */
3042 		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3043 
3044 			if (!tx_buf->print_once)
3045 				ena_trace(ENA_WARNING, "Found a Tx that wasn't "
3046 				    "completed on time, qid %d, index %d.\n",
3047 				    tx_ring->qid, i);
3048 
3049 			tx_buf->print_once = true;
3050 			missed_tx++;
3051 		}
3052 	}
3053 
3054 	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3055 		device_printf(adapter->pdev,
3056 		    "The number of lost tx completion is above the threshold "
3057 		    "(%d > %d). Reset the device\n",
3058 		    missed_tx, adapter->missing_tx_threshold);
3059 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3060 		rc = EIO;
3061 	}
3062 
3063 	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3064 
3065 	return (rc);
3066 }
3067 
3068 /*
3069  * Check for TX which were not completed on time.
3070  * Timeout is defined by "missing_tx_timeout".
3071  * Reset will be performed if number of incompleted
3072  * transactions exceeds "missing_tx_threshold".
3073  */
3074 static void
3075 check_for_missing_completions(struct ena_adapter *adapter)
3076 {
3077 	struct ena_ring *tx_ring;
3078 	struct ena_ring *rx_ring;
3079 	int i, budget, rc;
3080 
3081 	/* Make sure the driver doesn't turn the device in other process */
3082 	rmb();
3083 
3084 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3085 		return;
3086 
3087 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3088 		return;
3089 
3090 	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3091 		return;
3092 
3093 	budget = adapter->missing_tx_max_queues;
3094 
3095 	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3096 		tx_ring = &adapter->tx_ring[i];
3097 		rx_ring = &adapter->rx_ring[i];
3098 
3099 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3100 		if (unlikely(rc != 0))
3101 			return;
3102 
3103 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3104 		if (unlikely(rc != 0))
3105 			return;
3106 
3107 		budget--;
3108 		if (budget == 0) {
3109 			i++;
3110 			break;
3111 		}
3112 	}
3113 
3114 	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3115 }
3116 
3117 /* trigger rx cleanup after 2 consecutive detections */
3118 #define EMPTY_RX_REFILL 2
3119 /* For the rare case where the device runs out of Rx descriptors and the
3120  * msix handler failed to refill new Rx descriptors (due to a lack of memory
3121  * for example).
3122  * This case will lead to a deadlock:
3123  * The device won't send interrupts since all the new Rx packets will be dropped
3124  * The msix handler won't allocate new Rx descriptors so the device won't be
3125  * able to send new packets.
3126  *
3127  * When such a situation is detected - execute rx cleanup task in another thread
3128  */
3129 static void
3130 check_for_empty_rx_ring(struct ena_adapter *adapter)
3131 {
3132 	struct ena_ring *rx_ring;
3133 	int i, refill_required;
3134 
3135 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3136 		return;
3137 
3138 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3139 		return;
3140 
3141 	for (i = 0; i < adapter->num_io_queues; i++) {
3142 		rx_ring = &adapter->rx_ring[i];
3143 
3144 		refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3145 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3146 			rx_ring->empty_rx_queue++;
3147 
3148 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL)	{
3149 				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3150 				    1);
3151 
3152 				device_printf(adapter->pdev,
3153 				    "trigger refill for ring %d\n", i);
3154 
3155 				taskqueue_enqueue(rx_ring->que->cleanup_tq,
3156 				    &rx_ring->que->cleanup_task);
3157 				rx_ring->empty_rx_queue = 0;
3158 			}
3159 		} else {
3160 			rx_ring->empty_rx_queue = 0;
3161 		}
3162 	}
3163 }
3164 
3165 static void ena_update_hints(struct ena_adapter *adapter,
3166 			     struct ena_admin_ena_hw_hints *hints)
3167 {
3168 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3169 
3170 	if (hints->admin_completion_tx_timeout)
3171 		ena_dev->admin_queue.completion_timeout =
3172 		    hints->admin_completion_tx_timeout * 1000;
3173 
3174 	if (hints->mmio_read_timeout)
3175 		/* convert to usec */
3176 		ena_dev->mmio_read.reg_read_to =
3177 		    hints->mmio_read_timeout * 1000;
3178 
3179 	if (hints->missed_tx_completion_count_threshold_to_reset)
3180 		adapter->missing_tx_threshold =
3181 		    hints->missed_tx_completion_count_threshold_to_reset;
3182 
3183 	if (hints->missing_tx_completion_timeout) {
3184 		if (hints->missing_tx_completion_timeout ==
3185 		     ENA_HW_HINTS_NO_TIMEOUT)
3186 			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3187 		else
3188 			adapter->missing_tx_timeout =
3189 			    SBT_1MS * hints->missing_tx_completion_timeout;
3190 	}
3191 
3192 	if (hints->driver_watchdog_timeout) {
3193 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3194 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3195 		else
3196 			adapter->keep_alive_timeout =
3197 			    SBT_1MS * hints->driver_watchdog_timeout;
3198 	}
3199 }
3200 
3201 static void
3202 ena_timer_service(void *data)
3203 {
3204 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3205 	struct ena_admin_host_info *host_info =
3206 	    adapter->ena_dev->host_attr.host_info;
3207 
3208 	check_for_missing_keep_alive(adapter);
3209 
3210 	check_for_admin_com_state(adapter);
3211 
3212 	check_for_missing_completions(adapter);
3213 
3214 	check_for_empty_rx_ring(adapter);
3215 
3216 	if (host_info != NULL)
3217 		ena_update_host_info(host_info, adapter->ifp);
3218 
3219 	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3220 		device_printf(adapter->pdev, "Trigger reset is on\n");
3221 		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3222 		return;
3223 	}
3224 
3225 	/*
3226 	 * Schedule another timeout one second from now.
3227 	 */
3228 	callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3229 }
3230 
3231 void
3232 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3233 {
3234 	if_t ifp = adapter->ifp;
3235 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3236 	bool dev_up;
3237 
3238 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3239 		return;
3240 
3241 	if_link_state_change(ifp, LINK_STATE_DOWN);
3242 
3243 	callout_drain(&adapter->timer_service);
3244 
3245 	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3246 	if (dev_up)
3247 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3248 
3249 	if (!graceful)
3250 		ena_com_set_admin_running_state(ena_dev, false);
3251 
3252 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3253 		ena_down(adapter);
3254 
3255 	/*
3256 	 * Stop the device from sending AENQ events (if the device was up, and
3257 	 * the trigger reset was on, ena_down already performs device reset)
3258 	 */
3259 	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3260 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3261 
3262 	ena_free_mgmnt_irq(adapter);
3263 
3264 	ena_disable_msix(adapter);
3265 
3266 	/*
3267 	 * IO rings resources should be freed because `ena_restore_device()`
3268 	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3269 	 * vectors. The amount of MSIX vectors after destroy-restore may be
3270 	 * different than before. Therefore, IO rings resources should be
3271 	 * established from scratch each time.
3272 	 */
3273 	ena_free_all_io_rings_resources(adapter);
3274 
3275 	ena_com_abort_admin_commands(ena_dev);
3276 
3277 	ena_com_wait_for_abort_completion(ena_dev);
3278 
3279 	ena_com_admin_destroy(ena_dev);
3280 
3281 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3282 
3283 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3284 
3285 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3286 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3287 }
3288 
3289 static int
3290 ena_device_validate_params(struct ena_adapter *adapter,
3291     struct ena_com_dev_get_features_ctx *get_feat_ctx)
3292 {
3293 
3294 	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3295 	    ETHER_ADDR_LEN) != 0) {
3296 		device_printf(adapter->pdev,
3297 		    "Error, mac address are different\n");
3298 		return (EINVAL);
3299 	}
3300 
3301 	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3302 		device_printf(adapter->pdev,
3303 		    "Error, device max mtu is smaller than ifp MTU\n");
3304 		return (EINVAL);
3305 	}
3306 
3307 	return 0;
3308 }
3309 
3310 int
3311 ena_restore_device(struct ena_adapter *adapter)
3312 {
3313 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3314 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3315 	if_t ifp = adapter->ifp;
3316 	device_t dev = adapter->pdev;
3317 	int wd_active;
3318 	int rc;
3319 
3320 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3321 
3322 	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3323 	if (rc != 0) {
3324 		device_printf(dev, "Cannot initialize device\n");
3325 		goto err;
3326 	}
3327 	/*
3328 	 * Only enable WD if it was enabled before reset, so it won't override
3329 	 * value set by the user by the sysctl.
3330 	 */
3331 	if (adapter->wd_active != 0)
3332 		adapter->wd_active = wd_active;
3333 
3334 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3335 	if (rc != 0) {
3336 		device_printf(dev, "Validation of device parameters failed\n");
3337 		goto err_device_destroy;
3338 	}
3339 
3340 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3341 	/* Make sure we don't have a race with AENQ Links state handler */
3342 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3343 		if_link_state_change(ifp, LINK_STATE_UP);
3344 
3345 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3346 	if (rc != 0) {
3347 		device_printf(dev, "Enable MSI-X failed\n");
3348 		goto err_device_destroy;
3349 	}
3350 
3351 	/*
3352 	 * Effective value of used MSIX vectors should be the same as before
3353 	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3354 	 * are available.
3355 	 */
3356 	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3357 		adapter->num_io_queues =
3358 		    adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3359 
3360 	/* Re-initialize rings basic information */
3361 	ena_init_io_rings(adapter);
3362 
3363 	/* If the interface was up before the reset bring it up */
3364 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3365 		rc = ena_up(adapter);
3366 		if (rc != 0) {
3367 			device_printf(dev, "Failed to create I/O queues\n");
3368 			goto err_disable_msix;
3369 		}
3370 	}
3371 
3372 	/* Indicate that device is running again and ready to work */
3373 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3374 
3375 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3376 		/*
3377 		 * As the AENQ handlers weren't executed during reset because
3378 		 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3379 		 * timestamp must be updated again That will prevent next reset
3380 		 * caused by missing keep alive.
3381 		 */
3382 		adapter->keep_alive_timestamp = getsbinuptime();
3383 		callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3384 		    ena_timer_service, (void *)adapter, 0);
3385 	}
3386 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3387 
3388 	device_printf(dev,
3389 	    "Device reset completed successfully, Driver info: %s\n", ena_version);
3390 
3391 	return (rc);
3392 
3393 err_disable_msix:
3394 	ena_free_mgmnt_irq(adapter);
3395 	ena_disable_msix(adapter);
3396 err_device_destroy:
3397 	ena_com_abort_admin_commands(ena_dev);
3398 	ena_com_wait_for_abort_completion(ena_dev);
3399 	ena_com_admin_destroy(ena_dev);
3400 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3401 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3402 err:
3403 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3404 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3405 	device_printf(dev, "Reset attempt failed. Can not reset the device\n");
3406 
3407 	return (rc);
3408 }
3409 
3410 static void
3411 ena_reset_task(void *arg, int pending)
3412 {
3413 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3414 
3415 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3416 		device_printf(adapter->pdev,
3417 		    "device reset scheduled but trigger_reset is off\n");
3418 		return;
3419 	}
3420 
3421 	ENA_LOCK_LOCK(adapter);
3422 	ena_destroy_device(adapter, false);
3423 	ena_restore_device(adapter);
3424 	ENA_LOCK_UNLOCK(adapter);
3425 }
3426 
3427 /**
3428  * ena_attach - Device Initialization Routine
3429  * @pdev: device information struct
3430  *
3431  * Returns 0 on success, otherwise on failure.
3432  *
3433  * ena_attach initializes an adapter identified by a device structure.
3434  * The OS initialization, configuring of the adapter private structure,
3435  * and a hardware reset occur.
3436  **/
3437 static int
3438 ena_attach(device_t pdev)
3439 {
3440 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3441 	struct ena_llq_configurations llq_config;
3442 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3443 	static int version_printed;
3444 	struct ena_adapter *adapter;
3445 	struct ena_com_dev *ena_dev = NULL;
3446 	uint32_t max_num_io_queues;
3447 	int rid, rc;
3448 
3449 	adapter = device_get_softc(pdev);
3450 	adapter->pdev = pdev;
3451 
3452 	ENA_LOCK_INIT(adapter);
3453 
3454 	/*
3455 	 * Set up the timer service - driver is responsible for avoiding
3456 	 * concurrency, as the callout won't be using any locking inside.
3457 	 */
3458 	callout_init(&adapter->timer_service, true);
3459 	adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3460 	adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3461 	adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3462 	adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3463 
3464 	if (version_printed++ == 0)
3465 		device_printf(pdev, "%s\n", ena_version);
3466 
3467 	/* Allocate memory for ena_dev structure */
3468 	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3469 	    M_WAITOK | M_ZERO);
3470 
3471 	adapter->ena_dev = ena_dev;
3472 	ena_dev->dmadev = pdev;
3473 
3474 	rid = PCIR_BAR(ENA_REG_BAR);
3475 	adapter->memory = NULL;
3476 	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3477 	    &rid, RF_ACTIVE);
3478 	if (unlikely(adapter->registers == NULL)) {
3479 		device_printf(pdev,
3480 		    "unable to allocate bus resource: registers!\n");
3481 		rc = ENOMEM;
3482 		goto err_dev_free;
3483 	}
3484 
3485 	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3486 	    M_WAITOK | M_ZERO);
3487 
3488 	/* Store register resources */
3489 	((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3490 	    rman_get_bustag(adapter->registers);
3491 	((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3492 	    rman_get_bushandle(adapter->registers);
3493 
3494 	if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3495 		device_printf(pdev, "failed to pmap registers bar\n");
3496 		rc = ENXIO;
3497 		goto err_bus_free;
3498 	}
3499 
3500 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3501 
3502 	/* Initially clear all the flags */
3503 	ENA_FLAG_ZERO(adapter);
3504 
3505 	/* Device initialization */
3506 	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3507 	if (unlikely(rc != 0)) {
3508 		device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3509 		rc = ENXIO;
3510 		goto err_bus_free;
3511 	}
3512 
3513 	set_default_llq_configurations(&llq_config);
3514 
3515 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3516 	     &llq_config);
3517 	if (unlikely(rc != 0)) {
3518 		device_printf(pdev, "failed to set placement policy\n");
3519 		goto err_com_free;
3520 	}
3521 
3522 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3523 		adapter->disable_meta_caching =
3524 		    !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3525 		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
3526 
3527 	adapter->keep_alive_timestamp = getsbinuptime();
3528 
3529 	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3530 
3531 	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3532 	    ETHER_ADDR_LEN);
3533 
3534 	calc_queue_ctx.pdev = pdev;
3535 	calc_queue_ctx.ena_dev = ena_dev;
3536 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3537 
3538 	/* Calculate initial and maximum IO queue number and size */
3539 	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3540 	    &get_feat_ctx);
3541 	rc = ena_calc_io_queue_size(&calc_queue_ctx);
3542 	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3543 		rc = EFAULT;
3544 		goto err_com_free;
3545 	}
3546 
3547 	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3548 	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3549 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3550 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3551 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3552 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3553 
3554 	adapter->max_num_io_queues = max_num_io_queues;
3555 
3556 	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3557 
3558 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3559 
3560 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3561 
3562 	/* set up dma tags for rx and tx buffers */
3563 	rc = ena_setup_tx_dma_tag(adapter);
3564 	if (unlikely(rc != 0)) {
3565 		device_printf(pdev, "Failed to create TX DMA tag\n");
3566 		goto err_com_free;
3567 	}
3568 
3569 	rc = ena_setup_rx_dma_tag(adapter);
3570 	if (unlikely(rc != 0)) {
3571 		device_printf(pdev, "Failed to create RX DMA tag\n");
3572 		goto err_tx_tag_free;
3573 	}
3574 
3575 	/*
3576 	 * The amount of requested MSIX vectors is equal to
3577 	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3578 	 * number of admin queue interrupts. The former is initially determined
3579 	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3580 	 * achieved if there are not enough system resources. By default, the
3581 	 * number of effectively used IO queues is the same but later on it can
3582 	 * be limited by the user using sysctl interface.
3583 	 */
3584 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3585 	if (unlikely(rc != 0)) {
3586 		device_printf(pdev,
3587 		    "Failed to enable and set the admin interrupts\n");
3588 		goto err_io_free;
3589 	}
3590 	/* By default all of allocated MSIX vectors are actively used */
3591 	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3592 
3593 	/* initialize rings basic information */
3594 	ena_init_io_rings(adapter);
3595 
3596 	/* setup network interface */
3597 	rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3598 	if (unlikely(rc != 0)) {
3599 		device_printf(pdev, "Error with network interface setup\n");
3600 		goto err_msix_free;
3601 	}
3602 
3603 	/* Initialize reset task queue */
3604 	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3605 	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3606 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3607 	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3608 	    "%s rstq", device_get_nameunit(adapter->pdev));
3609 
3610 	/* Initialize statistics */
3611 	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3612 	    sizeof(struct ena_stats_dev));
3613 	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3614 	    sizeof(struct ena_hw_stats));
3615 	ena_sysctl_add_nodes(adapter);
3616 
3617 #ifdef DEV_NETMAP
3618 	rc = ena_netmap_attach(adapter);
3619 	if (rc != 0) {
3620 		device_printf(pdev, "netmap attach failed: %d\n", rc);
3621 		goto err_detach;
3622 	}
3623 #endif /* DEV_NETMAP */
3624 
3625 	/* Tell the stack that the interface is not active */
3626 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3627 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3628 
3629 	return (0);
3630 
3631 #ifdef DEV_NETMAP
3632 err_detach:
3633 	ether_ifdetach(adapter->ifp);
3634 #endif /* DEV_NETMAP */
3635 err_msix_free:
3636 	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3637 	ena_free_mgmnt_irq(adapter);
3638 	ena_disable_msix(adapter);
3639 err_io_free:
3640 	ena_free_all_io_rings_resources(adapter);
3641 	ena_free_rx_dma_tag(adapter);
3642 err_tx_tag_free:
3643 	ena_free_tx_dma_tag(adapter);
3644 err_com_free:
3645 	ena_com_admin_destroy(ena_dev);
3646 	ena_com_delete_host_info(ena_dev);
3647 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3648 err_bus_free:
3649 	free(ena_dev->bus, M_DEVBUF);
3650 	ena_free_pci_resources(adapter);
3651 err_dev_free:
3652 	free(ena_dev, M_DEVBUF);
3653 
3654 	return (rc);
3655 }
3656 
3657 /**
3658  * ena_detach - Device Removal Routine
3659  * @pdev: device information struct
3660  *
3661  * ena_detach is called by the device subsystem to alert the driver
3662  * that it should release a PCI device.
3663  **/
3664 static int
3665 ena_detach(device_t pdev)
3666 {
3667 	struct ena_adapter *adapter = device_get_softc(pdev);
3668 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3669 	int rc;
3670 
3671 	/* Make sure VLANS are not using driver */
3672 	if (adapter->ifp->if_vlantrunk != NULL) {
3673 		device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3674 		return (EBUSY);
3675 	}
3676 
3677 	ether_ifdetach(adapter->ifp);
3678 
3679 	/* Stop timer service */
3680 	ENA_LOCK_LOCK(adapter);
3681 	callout_drain(&adapter->timer_service);
3682 	ENA_LOCK_UNLOCK(adapter);
3683 
3684 	/* Release reset task */
3685 	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3686 		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3687 	taskqueue_free(adapter->reset_tq);
3688 
3689 	ENA_LOCK_LOCK(adapter);
3690 	ena_down(adapter);
3691 	ena_destroy_device(adapter, true);
3692 	ENA_LOCK_UNLOCK(adapter);
3693 
3694 #ifdef DEV_NETMAP
3695 	netmap_detach(adapter->ifp);
3696 #endif /* DEV_NETMAP */
3697 
3698 	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3699 	    sizeof(struct ena_hw_stats));
3700 	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3701 	    sizeof(struct ena_stats_dev));
3702 
3703 	rc = ena_free_rx_dma_tag(adapter);
3704 	if (unlikely(rc != 0))
3705 		device_printf(adapter->pdev,
3706 		    "Unmapped RX DMA tag associations\n");
3707 
3708 	rc = ena_free_tx_dma_tag(adapter);
3709 	if (unlikely(rc != 0))
3710 		device_printf(adapter->pdev,
3711 		    "Unmapped TX DMA tag associations\n");
3712 
3713 	ena_free_irqs(adapter);
3714 
3715 	ena_free_pci_resources(adapter);
3716 
3717 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3718 		ena_com_rss_destroy(ena_dev);
3719 
3720 	ena_com_delete_host_info(ena_dev);
3721 
3722 	ENA_LOCK_DESTROY(adapter);
3723 
3724 	if_free(adapter->ifp);
3725 
3726 	if (ena_dev->bus != NULL)
3727 		free(ena_dev->bus, M_DEVBUF);
3728 
3729 	if (ena_dev != NULL)
3730 		free(ena_dev, M_DEVBUF);
3731 
3732 	return (bus_generic_detach(pdev));
3733 }
3734 
3735 /******************************************************************************
3736  ******************************** AENQ Handlers *******************************
3737  *****************************************************************************/
3738 /**
3739  * ena_update_on_link_change:
3740  * Notify the network interface about the change in link status
3741  **/
3742 static void
3743 ena_update_on_link_change(void *adapter_data,
3744     struct ena_admin_aenq_entry *aenq_e)
3745 {
3746 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3747 	struct ena_admin_aenq_link_change_desc *aenq_desc;
3748 	int status;
3749 	if_t ifp;
3750 
3751 	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3752 	ifp = adapter->ifp;
3753 	status = aenq_desc->flags &
3754 	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3755 
3756 	if (status != 0) {
3757 		device_printf(adapter->pdev, "link is UP\n");
3758 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3759 		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3760 			if_link_state_change(ifp, LINK_STATE_UP);
3761 	} else {
3762 		device_printf(adapter->pdev, "link is DOWN\n");
3763 		if_link_state_change(ifp, LINK_STATE_DOWN);
3764 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3765 	}
3766 }
3767 
3768 static void ena_notification(void *adapter_data,
3769     struct ena_admin_aenq_entry *aenq_e)
3770 {
3771 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3772 	struct ena_admin_ena_hw_hints *hints;
3773 
3774 	ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3775 	    "Invalid group(%x) expected %x\n",	aenq_e->aenq_common_desc.group,
3776 	    ENA_ADMIN_NOTIFICATION);
3777 
3778 	switch (aenq_e->aenq_common_desc.syndrom) {
3779 	case ENA_ADMIN_UPDATE_HINTS:
3780 		hints =
3781 		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3782 		ena_update_hints(adapter, hints);
3783 		break;
3784 	default:
3785 		device_printf(adapter->pdev,
3786 		    "Invalid aenq notification link state %d\n",
3787 		    aenq_e->aenq_common_desc.syndrom);
3788 	}
3789 }
3790 
3791 /**
3792  * This handler will called for unknown event group or unimplemented handlers
3793  **/
3794 static void
3795 unimplemented_aenq_handler(void *adapter_data,
3796     struct ena_admin_aenq_entry *aenq_e)
3797 {
3798 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3799 
3800 	device_printf(adapter->pdev,
3801 	    "Unknown event was received or event with unimplemented handler\n");
3802 }
3803 
3804 static struct ena_aenq_handlers aenq_handlers = {
3805     .handlers = {
3806 	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3807 	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
3808 	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3809     },
3810     .unimplemented_handler = unimplemented_aenq_handler
3811 };
3812 
3813 /*********************************************************************
3814  *  FreeBSD Device Interface Entry Points
3815  *********************************************************************/
3816 
3817 static device_method_t ena_methods[] = {
3818     /* Device interface */
3819     DEVMETHOD(device_probe, ena_probe),
3820     DEVMETHOD(device_attach, ena_attach),
3821     DEVMETHOD(device_detach, ena_detach),
3822     DEVMETHOD_END
3823 };
3824 
3825 static driver_t ena_driver = {
3826     "ena", ena_methods, sizeof(struct ena_adapter),
3827 };
3828 
3829 devclass_t ena_devclass;
3830 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3831 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3832     nitems(ena_vendor_info_array) - 1);
3833 MODULE_DEPEND(ena, pci, 1, 1, 1);
3834 MODULE_DEPEND(ena, ether, 1, 1, 1);
3835 #ifdef DEV_NETMAP
3836 MODULE_DEPEND(ena, netmap, 1, 1, 1);
3837 #endif /* DEV_NETMAP */
3838 
3839 /*********************************************************************/
3840