xref: /freebsd/sys/dev/ena/ena.c (revision 0aa4a9fc859fd43343e2d7b5094a50d1ca0948eb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 #include "opt_rss.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/smp.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 #include <sys/time.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 
54 #include <machine/atomic.h>
55 #include <machine/bus.h>
56 #include <machine/in_cksum.h>
57 #include <machine/resource.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_var.h>
70 #include <net/if_vlan_var.h>
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
78 
79 #include "ena.h"
80 #include "ena_datapath.h"
81 #include "ena_rss.h"
82 #include "ena_sysctl.h"
83 
84 #ifdef DEV_NETMAP
85 #include "ena_netmap.h"
86 #endif /* DEV_NETMAP */
87 
88 /*********************************************************
89  *  Function prototypes
90  *********************************************************/
91 static int ena_probe(device_t);
92 static void ena_intr_msix_mgmnt(void *);
93 static void ena_free_pci_resources(struct ena_adapter *);
94 static int ena_change_mtu(if_t, int);
95 static inline void ena_alloc_counters(counter_u64_t *, int);
96 static inline void ena_free_counters(counter_u64_t *, int);
97 static inline void ena_reset_counters(counter_u64_t *, int);
98 static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *,
99     uint16_t);
100 static void ena_init_io_rings_basic(struct ena_adapter *);
101 static void ena_init_io_rings_advanced(struct ena_adapter *);
102 static void ena_init_io_rings(struct ena_adapter *);
103 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
104 static void ena_free_all_io_rings_resources(struct ena_adapter *);
105 static int ena_setup_tx_dma_tag(struct ena_adapter *);
106 static int ena_free_tx_dma_tag(struct ena_adapter *);
107 static int ena_setup_rx_dma_tag(struct ena_adapter *);
108 static int ena_free_rx_dma_tag(struct ena_adapter *);
109 static void ena_release_all_tx_dmamap(struct ena_ring *);
110 static int ena_setup_tx_resources(struct ena_adapter *, int);
111 static void ena_free_tx_resources(struct ena_adapter *, int);
112 static int ena_setup_all_tx_resources(struct ena_adapter *);
113 static void ena_free_all_tx_resources(struct ena_adapter *);
114 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
115 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
116 static int ena_setup_all_rx_resources(struct ena_adapter *);
117 static void ena_free_all_rx_resources(struct ena_adapter *);
118 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
119     struct ena_rx_buffer *);
120 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
121     struct ena_rx_buffer *);
122 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
123 static void ena_refill_all_rx_bufs(struct ena_adapter *);
124 static void ena_free_all_rx_bufs(struct ena_adapter *);
125 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
126 static void ena_free_all_tx_bufs(struct ena_adapter *);
127 static void ena_destroy_all_tx_queues(struct ena_adapter *);
128 static void ena_destroy_all_rx_queues(struct ena_adapter *);
129 static void ena_destroy_all_io_queues(struct ena_adapter *);
130 static int ena_create_io_queues(struct ena_adapter *);
131 static int ena_handle_msix(void *);
132 static int ena_enable_msix(struct ena_adapter *);
133 static void ena_setup_mgmnt_intr(struct ena_adapter *);
134 static int ena_setup_io_intr(struct ena_adapter *);
135 static int ena_request_mgmnt_irq(struct ena_adapter *);
136 static int ena_request_io_irq(struct ena_adapter *);
137 static void ena_free_mgmnt_irq(struct ena_adapter *);
138 static void ena_free_io_irq(struct ena_adapter *);
139 static void ena_free_irqs(struct ena_adapter *);
140 static void ena_disable_msix(struct ena_adapter *);
141 static void ena_unmask_all_io_irqs(struct ena_adapter *);
142 static int ena_up_complete(struct ena_adapter *);
143 static uint64_t ena_get_counter(if_t, ift_counter);
144 static int ena_media_change(if_t);
145 static void ena_media_status(if_t, struct ifmediareq *);
146 static void ena_init(void *);
147 static int ena_ioctl(if_t, u_long, caddr_t);
148 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
149 static void ena_update_host_info(struct ena_admin_host_info *, if_t);
150 static void ena_update_hwassist(struct ena_adapter *);
151 static void ena_setup_ifnet(device_t, struct ena_adapter *,
152     struct ena_com_dev_get_features_ctx *);
153 static int ena_enable_wc(device_t, struct resource *);
154 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
155     struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
156 static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
157 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
158     struct ena_com_dev_get_features_ctx *);
159 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *);
160 static void ena_config_host_info(struct ena_com_dev *, device_t);
161 static int ena_attach(device_t);
162 static int ena_detach(device_t);
163 static int ena_device_init(struct ena_adapter *, device_t,
164     struct ena_com_dev_get_features_ctx *, int *);
165 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
166 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
167 static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *);
168 static int ena_copy_eni_metrics(struct ena_adapter *);
169 static int ena_copy_srd_metrics(struct ena_adapter *);
170 static int ena_copy_customer_metrics(struct ena_adapter *);
171 static void ena_timer_service(void *);
172 static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *,
173     struct ena_ring *);
174 #ifdef DEV_NETMAP
175 static int ena_reinit_netmap(struct ena_adapter *adapter);
176 #endif
177 
178 static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
179     " v" ENA_DRV_MODULE_VERSION;
180 
181 static ena_vendor_info_t ena_vendor_info_array[] = {
182 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 },
183 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 },
184 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 },
185 	{ PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 },
186 	/* Last entry */
187 	{ 0, 0, 0 }
188 };
189 
190 struct sx ena_global_lock;
191 
192 /*
193  * Contains pointers to event handlers, e.g. link state chage.
194  */
195 static struct ena_aenq_handlers aenq_handlers;
196 
197 void
198 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
199 {
200 	if (error != 0)
201 		return;
202 	*(bus_addr_t *)arg = segs[0].ds_addr;
203 }
204 
205 int
206 ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
207     int mapflags, bus_size_t alignment, int domain)
208 {
209 	struct ena_adapter *adapter = device_get_softc(dmadev);
210 	device_t pdev = adapter->pdev;
211 	uint32_t maxsize;
212 	uint64_t dma_space_addr;
213 	int error;
214 
215 	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
216 
217 	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
218 	if (unlikely(dma_space_addr == 0))
219 		dma_space_addr = BUS_SPACE_MAXADDR;
220 
221 	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
222 	    alignment, 0,      /* alignment, bounds 		*/
223 	    dma_space_addr,    /* lowaddr of exclusion window	*/
224 	    BUS_SPACE_MAXADDR, /* highaddr of exclusion window	*/
225 	    NULL, NULL,	       /* filter, filterarg 		*/
226 	    maxsize,	       /* maxsize 			*/
227 	    1,		       /* nsegments 			*/
228 	    maxsize,	       /* maxsegsize 			*/
229 	    BUS_DMA_ALLOCNOW,  /* flags 			*/
230 	    NULL,	       /* lockfunc 			*/
231 	    NULL,	       /* lockarg 			*/
232 	    &dma->tag);
233 	if (unlikely(error != 0)) {
234 		ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
235 		goto fail_tag;
236 	}
237 
238 	error = bus_dma_tag_set_domain(dma->tag, domain);
239 	if (unlikely(error != 0)) {
240 		ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n",
241 		    error);
242 		goto fail_map_create;
243 	}
244 
245 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
246 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
247 	if (unlikely(error != 0)) {
248 		ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
249 		    (uintmax_t)size, error);
250 		goto fail_map_create;
251 	}
252 
253 	dma->paddr = 0;
254 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
255 	    ena_dmamap_callback, &dma->paddr, mapflags);
256 	if (unlikely((error != 0) || (dma->paddr == 0))) {
257 		ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
258 		goto fail_map_load;
259 	}
260 
261 	bus_dmamap_sync(dma->tag, dma->map,
262 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
263 
264 	return (0);
265 
266 fail_map_load:
267 	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
268 fail_map_create:
269 	bus_dma_tag_destroy(dma->tag);
270 fail_tag:
271 	dma->tag = NULL;
272 	dma->vaddr = NULL;
273 	dma->paddr = 0;
274 
275 	return (error);
276 }
277 
278 static void
279 ena_free_pci_resources(struct ena_adapter *adapter)
280 {
281 	device_t pdev = adapter->pdev;
282 
283 	if (adapter->memory != NULL) {
284 		bus_release_resource(pdev, SYS_RES_MEMORY,
285 		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
286 	}
287 
288 	if (adapter->registers != NULL) {
289 		bus_release_resource(pdev, SYS_RES_MEMORY,
290 		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
291 	}
292 
293 	if (adapter->msix != NULL) {
294 		bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
295 		    adapter->msix);
296 	}
297 }
298 
299 static int
300 ena_probe(device_t dev)
301 {
302 	ena_vendor_info_t *ent;
303 	uint16_t pci_vendor_id = 0;
304 	uint16_t pci_device_id = 0;
305 
306 	pci_vendor_id = pci_get_vendor(dev);
307 	pci_device_id = pci_get_device(dev);
308 
309 	ent = ena_vendor_info_array;
310 	while (ent->vendor_id != 0) {
311 		if ((pci_vendor_id == ent->vendor_id) &&
312 		    (pci_device_id == ent->device_id)) {
313 			ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
314 			    pci_device_id);
315 
316 			device_set_desc(dev, ENA_DEVICE_DESC);
317 			return (BUS_PROBE_DEFAULT);
318 		}
319 
320 		ent++;
321 	}
322 
323 	return (ENXIO);
324 }
325 
326 static int
327 ena_change_mtu(if_t ifp, int new_mtu)
328 {
329 	struct ena_adapter *adapter = if_getsoftc(ifp);
330 	device_t pdev = adapter->pdev;
331 	int rc;
332 
333 	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
334 		ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n",
335 		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
336 		return (EINVAL);
337 	}
338 
339 	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
340 	if (likely(rc == 0)) {
341 		ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
342 		if_setmtu(ifp, new_mtu);
343 	} else {
344 		ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
345 	}
346 
347 	return (rc);
348 }
349 
350 static inline void
351 ena_alloc_counters(counter_u64_t *begin, int size)
352 {
353 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
354 
355 	for (; begin < end; ++begin)
356 		*begin = counter_u64_alloc(M_WAITOK);
357 }
358 
359 static inline void
360 ena_free_counters(counter_u64_t *begin, int size)
361 {
362 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
363 
364 	for (; begin < end; ++begin)
365 		counter_u64_free(*begin);
366 }
367 
368 static inline void
369 ena_reset_counters(counter_u64_t *begin, int size)
370 {
371 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
372 
373 	for (; begin < end; ++begin)
374 		counter_u64_zero(*begin);
375 }
376 
377 static void
378 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
379     uint16_t qid)
380 {
381 	ring->qid = qid;
382 	ring->adapter = adapter;
383 	ring->ena_dev = adapter->ena_dev;
384 	atomic_store_8(&ring->first_interrupt, 0);
385 	ring->no_interrupt_event_cnt = 0;
386 }
387 
388 static void
389 ena_init_io_rings_basic(struct ena_adapter *adapter)
390 {
391 	struct ena_com_dev *ena_dev;
392 	struct ena_ring *txr, *rxr;
393 	struct ena_que *que;
394 	int i;
395 
396 	ena_dev = adapter->ena_dev;
397 
398 	for (i = 0; i < adapter->num_io_queues; i++) {
399 		txr = &adapter->tx_ring[i];
400 		rxr = &adapter->rx_ring[i];
401 
402 		/* TX/RX common ring state */
403 		ena_init_io_rings_common(adapter, txr, i);
404 		ena_init_io_rings_common(adapter, rxr, i);
405 
406 		/* TX specific ring state */
407 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
408 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
409 
410 		que = &adapter->que[i];
411 		que->adapter = adapter;
412 		que->id = i;
413 		que->tx_ring = txr;
414 		que->rx_ring = rxr;
415 
416 		txr->que = que;
417 		rxr->que = que;
418 
419 		rxr->empty_rx_queue = 0;
420 		rxr->rx_mbuf_sz = ena_mbuf_sz;
421 	}
422 }
423 
424 static void
425 ena_init_io_rings_advanced(struct ena_adapter *adapter)
426 {
427 	struct ena_ring *txr, *rxr;
428 	int i;
429 
430 	for (i = 0; i < adapter->num_io_queues; i++) {
431 		txr = &adapter->tx_ring[i];
432 		rxr = &adapter->rx_ring[i];
433 
434 		/* Allocate a buf ring */
435 		txr->buf_ring_size = adapter->buf_ring_size;
436 		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
437 		    &txr->ring_mtx);
438 
439 		/* Allocate Tx statistics. */
440 		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
441 		    sizeof(txr->tx_stats));
442 		txr->tx_last_cleanup_ticks = ticks;
443 
444 		/* Allocate Rx statistics. */
445 		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
446 		    sizeof(rxr->rx_stats));
447 
448 		/* Initialize locks */
449 		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
450 		    device_get_nameunit(adapter->pdev), i);
451 		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
452 		    device_get_nameunit(adapter->pdev), i);
453 
454 		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
455 	}
456 }
457 
458 static void
459 ena_init_io_rings(struct ena_adapter *adapter)
460 {
461 	/*
462 	 * IO rings initialization can be divided into the 2 steps:
463 	 *   1. Initialize variables and fields with initial values and copy
464 	 *      them from adapter/ena_dev (basic)
465 	 *   2. Allocate mutex, counters and buf_ring (advanced)
466 	 */
467 	ena_init_io_rings_basic(adapter);
468 	ena_init_io_rings_advanced(adapter);
469 }
470 
471 static void
472 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
473 {
474 	struct ena_ring *txr = &adapter->tx_ring[qid];
475 	struct ena_ring *rxr = &adapter->rx_ring[qid];
476 
477 	ena_free_counters((counter_u64_t *)&txr->tx_stats,
478 	    sizeof(txr->tx_stats));
479 	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
480 	    sizeof(rxr->rx_stats));
481 
482 	ENA_RING_MTX_LOCK(txr);
483 	drbr_free(txr->br, M_DEVBUF);
484 	ENA_RING_MTX_UNLOCK(txr);
485 
486 	mtx_destroy(&txr->ring_mtx);
487 }
488 
489 static void
490 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
491 {
492 	int i;
493 
494 	for (i = 0; i < adapter->num_io_queues; i++)
495 		ena_free_io_ring_resources(adapter, i);
496 }
497 
498 static int
499 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
500 {
501 	int ret;
502 
503 	/* Create DMA tag for Tx buffers */
504 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
505 	    1, 0,				  /* alignment, bounds 	     */
506 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
507 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
508 	    NULL, NULL,				  /* filter, filterarg 	     */
509 	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
510 	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
511 	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
512 	    0,					  /* flags 		     */
513 	    NULL,				  /* lockfunc 		     */
514 	    NULL,				  /* lockfuncarg 	     */
515 	    &adapter->tx_buf_tag);
516 
517 	return (ret);
518 }
519 
520 static int
521 ena_free_tx_dma_tag(struct ena_adapter *adapter)
522 {
523 	int ret;
524 
525 	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
526 
527 	if (likely(ret == 0))
528 		adapter->tx_buf_tag = NULL;
529 
530 	return (ret);
531 }
532 
533 static int
534 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
535 {
536 	int ret;
537 
538 	/* Create DMA tag for Rx buffers*/
539 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
540 	    1, 0,				  /* alignment, bounds 	     */
541 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
542 	    BUS_SPACE_MAXADDR,			  /* highaddr of excl window */
543 	    NULL, NULL,				  /* filter, filterarg 	     */
544 	    ena_mbuf_sz,			  /* maxsize 		     */
545 	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
546 	    ena_mbuf_sz,			  /* maxsegsize 	     */
547 	    0,					  /* flags 		     */
548 	    NULL,				  /* lockfunc 		     */
549 	    NULL,				  /* lockarg 		     */
550 	    &adapter->rx_buf_tag);
551 
552 	return (ret);
553 }
554 
555 static int
556 ena_free_rx_dma_tag(struct ena_adapter *adapter)
557 {
558 	int ret;
559 
560 	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
561 
562 	if (likely(ret == 0))
563 		adapter->rx_buf_tag = NULL;
564 
565 	return (ret);
566 }
567 
568 int
569 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc)
570 {
571 	struct ena_adapter *adapter = tx_ring->adapter;
572 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
573 
574 	if (unlikely(tx_req_id_rc != 0)) {
575 		if (tx_req_id_rc == ENA_COM_FAULT) {
576 			reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED;
577 			ena_log(adapter->pdev, ERR,
578 			    "TX descriptor malformed. req_id %hu qid %hu\n",
579 			    req_id, tx_ring->qid);
580 		} else if (tx_req_id_rc == ENA_COM_INVAL) {
581 			ena_log_nm(adapter->pdev, WARN,
582 			    "Invalid req_id %hu in qid %hu\n",
583 			    req_id, tx_ring->qid);
584 			counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
585 		}
586 
587 		ena_trigger_reset(adapter, reset_reason);
588 		return (EFAULT);
589 	}
590 
591 	return (0);
592 }
593 
594 static void
595 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
596 {
597 	struct ena_adapter *adapter = tx_ring->adapter;
598 	struct ena_tx_buffer *tx_info;
599 	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
600 	int i;
601 #ifdef DEV_NETMAP
602 	struct ena_netmap_tx_info *nm_info;
603 	int j;
604 #endif /* DEV_NETMAP */
605 
606 	for (i = 0; i < tx_ring->ring_size; ++i) {
607 		tx_info = &tx_ring->tx_buffer_info[i];
608 #ifdef DEV_NETMAP
609 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
610 			nm_info = &tx_info->nm_info;
611 			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
612 				if (nm_info->map_seg[j] != NULL) {
613 					bus_dmamap_destroy(tx_tag,
614 					    nm_info->map_seg[j]);
615 					nm_info->map_seg[j] = NULL;
616 				}
617 			}
618 		}
619 #endif /* DEV_NETMAP */
620 		if (tx_info->dmamap != NULL) {
621 			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
622 			tx_info->dmamap = NULL;
623 		}
624 	}
625 }
626 
627 /**
628  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
629  * @adapter: network interface device structure
630  * @qid: queue index
631  *
632  * Returns 0 on success, otherwise on failure.
633  **/
634 static int
635 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
636 {
637 	device_t pdev = adapter->pdev;
638 	char thread_name[MAXCOMLEN + 1];
639 	struct ena_que *que = &adapter->que[qid];
640 	struct ena_ring *tx_ring = que->tx_ring;
641 	cpuset_t *cpu_mask = NULL;
642 	int size, i, err;
643 #ifdef DEV_NETMAP
644 	bus_dmamap_t *map;
645 	int j;
646 
647 	ena_netmap_reset_tx_ring(adapter, qid);
648 #endif /* DEV_NETMAP */
649 
650 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
651 
652 	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
653 	if (unlikely(tx_ring->tx_buffer_info == NULL))
654 		return (ENOMEM);
655 
656 	size = sizeof(uint16_t) * tx_ring->ring_size;
657 	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
658 	if (unlikely(tx_ring->free_tx_ids == NULL))
659 		goto err_buf_info_free;
660 
661 	size = tx_ring->tx_max_header_size;
662 	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
663 	    M_NOWAIT | M_ZERO);
664 	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
665 		goto err_tx_ids_free;
666 
667 	/* Req id stack for TX OOO completions */
668 	for (i = 0; i < tx_ring->ring_size; i++)
669 		tx_ring->free_tx_ids[i] = i;
670 
671 	/* Reset TX statistics. */
672 	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
673 	    sizeof(tx_ring->tx_stats));
674 
675 	tx_ring->next_to_use = 0;
676 	tx_ring->next_to_clean = 0;
677 	tx_ring->acum_pkts = 0;
678 
679 	/* Make sure that drbr is empty */
680 	ENA_RING_MTX_LOCK(tx_ring);
681 	drbr_flush(adapter->ifp, tx_ring->br);
682 	ENA_RING_MTX_UNLOCK(tx_ring);
683 
684 	/* ... and create the buffer DMA maps */
685 	for (i = 0; i < tx_ring->ring_size; i++) {
686 		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
687 		    &tx_ring->tx_buffer_info[i].dmamap);
688 		if (unlikely(err != 0)) {
689 			ena_log(pdev, ERR,
690 			    "Unable to create Tx DMA map for buffer %d\n", i);
691 			goto err_map_release;
692 		}
693 
694 #ifdef DEV_NETMAP
695 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
696 			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
697 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
698 				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
699 				    &map[j]);
700 				if (unlikely(err != 0)) {
701 					ena_log(pdev, ERR,
702 					    "Unable to create Tx DMA for buffer %d %d\n",
703 					    i, j);
704 					goto err_map_release;
705 				}
706 			}
707 		}
708 #endif /* DEV_NETMAP */
709 	}
710 
711 	/* Allocate taskqueues */
712 	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
713 	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
714 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
715 	if (unlikely(tx_ring->enqueue_tq == NULL)) {
716 		ena_log(pdev, ERR,
717 		    "Unable to create taskqueue for enqueue task\n");
718 		i = tx_ring->ring_size;
719 		goto err_map_release;
720 	}
721 
722 	tx_ring->running = true;
723 
724 #ifdef RSS
725 	cpu_mask = &que->cpu_mask;
726 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
727 	    device_get_nameunit(adapter->pdev), que->cpu);
728 #else
729 	snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
730 	    device_get_nameunit(adapter->pdev), que->id);
731 #endif
732 	taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
733 	    cpu_mask, "%s", thread_name);
734 
735 	return (0);
736 
737 err_map_release:
738 	ena_release_all_tx_dmamap(tx_ring);
739 err_tx_ids_free:
740 	free(tx_ring->free_tx_ids, M_DEVBUF);
741 	tx_ring->free_tx_ids = NULL;
742 err_buf_info_free:
743 	free(tx_ring->tx_buffer_info, M_DEVBUF);
744 	tx_ring->tx_buffer_info = NULL;
745 
746 	return (ENOMEM);
747 }
748 
749 /**
750  * ena_free_tx_resources - Free Tx Resources per Queue
751  * @adapter: network interface device structure
752  * @qid: queue index
753  *
754  * Free all transmit software resources
755  **/
756 static void
757 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
758 {
759 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
760 #ifdef DEV_NETMAP
761 	struct ena_netmap_tx_info *nm_info;
762 	int j;
763 #endif /* DEV_NETMAP */
764 
765 	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
766 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
767 
768 	taskqueue_free(tx_ring->enqueue_tq);
769 
770 	ENA_RING_MTX_LOCK(tx_ring);
771 	/* Flush buffer ring, */
772 	drbr_flush(adapter->ifp, tx_ring->br);
773 
774 	/* Free buffer DMA maps, */
775 	for (int i = 0; i < tx_ring->ring_size; i++) {
776 		bus_dmamap_sync(adapter->tx_buf_tag,
777 		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
778 		bus_dmamap_unload(adapter->tx_buf_tag,
779 		    tx_ring->tx_buffer_info[i].dmamap);
780 		bus_dmamap_destroy(adapter->tx_buf_tag,
781 		    tx_ring->tx_buffer_info[i].dmamap);
782 
783 #ifdef DEV_NETMAP
784 		if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
785 			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
786 			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
787 				if (nm_info->socket_buf_idx[j] != 0) {
788 					bus_dmamap_sync(adapter->tx_buf_tag,
789 					    nm_info->map_seg[j],
790 					    BUS_DMASYNC_POSTWRITE);
791 					ena_netmap_unload(adapter,
792 					    nm_info->map_seg[j]);
793 				}
794 				bus_dmamap_destroy(adapter->tx_buf_tag,
795 				    nm_info->map_seg[j]);
796 				nm_info->socket_buf_idx[j] = 0;
797 			}
798 		}
799 #endif /* DEV_NETMAP */
800 
801 		m_freem(tx_ring->tx_buffer_info[i].mbuf);
802 		tx_ring->tx_buffer_info[i].mbuf = NULL;
803 	}
804 	ENA_RING_MTX_UNLOCK(tx_ring);
805 
806 	/* And free allocated memory. */
807 	free(tx_ring->tx_buffer_info, M_DEVBUF);
808 	tx_ring->tx_buffer_info = NULL;
809 
810 	free(tx_ring->free_tx_ids, M_DEVBUF);
811 	tx_ring->free_tx_ids = NULL;
812 
813 	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
814 	tx_ring->push_buf_intermediate_buf = NULL;
815 }
816 
817 /**
818  * ena_setup_all_tx_resources - allocate all queues Tx resources
819  * @adapter: network interface device structure
820  *
821  * Returns 0 on success, otherwise on failure.
822  **/
823 static int
824 ena_setup_all_tx_resources(struct ena_adapter *adapter)
825 {
826 	int i, rc;
827 
828 	for (i = 0; i < adapter->num_io_queues; i++) {
829 		rc = ena_setup_tx_resources(adapter, i);
830 		if (rc != 0) {
831 			ena_log(adapter->pdev, ERR,
832 			    "Allocation for Tx Queue %u failed\n", i);
833 			goto err_setup_tx;
834 		}
835 	}
836 
837 	return (0);
838 
839 err_setup_tx:
840 	/* Rewind the index freeing the rings as we go */
841 	while (i--)
842 		ena_free_tx_resources(adapter, i);
843 	return (rc);
844 }
845 
846 /**
847  * ena_free_all_tx_resources - Free Tx Resources for All Queues
848  * @adapter: network interface device structure
849  *
850  * Free all transmit software resources
851  **/
852 static void
853 ena_free_all_tx_resources(struct ena_adapter *adapter)
854 {
855 	int i;
856 
857 	for (i = 0; i < adapter->num_io_queues; i++)
858 		ena_free_tx_resources(adapter, i);
859 }
860 
861 /**
862  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
863  * @adapter: network interface device structure
864  * @qid: queue index
865  *
866  * Returns 0 on success, otherwise on failure.
867  **/
868 static int
869 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
870 {
871 	device_t pdev = adapter->pdev;
872 	struct ena_que *que = &adapter->que[qid];
873 	struct ena_ring *rx_ring = que->rx_ring;
874 	int size, err, i;
875 
876 	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
877 
878 #ifdef DEV_NETMAP
879 	ena_netmap_reset_rx_ring(adapter, qid);
880 	rx_ring->initialized = false;
881 #endif /* DEV_NETMAP */
882 
883 	/*
884 	 * Alloc extra element so in rx path
885 	 * we can always prefetch rx_info + 1
886 	 */
887 	size += sizeof(struct ena_rx_buffer);
888 
889 	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
890 
891 	size = sizeof(uint16_t) * rx_ring->ring_size;
892 	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
893 
894 	for (i = 0; i < rx_ring->ring_size; i++)
895 		rx_ring->free_rx_ids[i] = i;
896 
897 	/* Reset RX statistics. */
898 	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
899 	    sizeof(rx_ring->rx_stats));
900 
901 	rx_ring->next_to_clean = 0;
902 	rx_ring->next_to_use = 0;
903 
904 	/* ... and create the buffer DMA maps */
905 	for (i = 0; i < rx_ring->ring_size; i++) {
906 		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
907 		    &(rx_ring->rx_buffer_info[i].map));
908 		if (err != 0) {
909 			ena_log(pdev, ERR,
910 			    "Unable to create Rx DMA map for buffer %d\n", i);
911 			goto err_buf_info_unmap;
912 		}
913 	}
914 
915 	/* Create LRO for the ring */
916 	if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
917 		int err = tcp_lro_init(&rx_ring->lro);
918 		if (err != 0) {
919 			ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
920 			    qid);
921 		} else {
922 			ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
923 			    qid);
924 			rx_ring->lro.ifp = adapter->ifp;
925 		}
926 	}
927 
928 	return (0);
929 
930 err_buf_info_unmap:
931 	while (i--) {
932 		bus_dmamap_destroy(adapter->rx_buf_tag,
933 		    rx_ring->rx_buffer_info[i].map);
934 	}
935 
936 	free(rx_ring->free_rx_ids, M_DEVBUF);
937 	rx_ring->free_rx_ids = NULL;
938 	free(rx_ring->rx_buffer_info, M_DEVBUF);
939 	rx_ring->rx_buffer_info = NULL;
940 	return (ENOMEM);
941 }
942 
943 /**
944  * ena_free_rx_resources - Free Rx Resources
945  * @adapter: network interface device structure
946  * @qid: queue index
947  *
948  * Free all receive software resources
949  **/
950 static void
951 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
952 {
953 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
954 
955 	/* Free buffer DMA maps, */
956 	for (int i = 0; i < rx_ring->ring_size; i++) {
957 		bus_dmamap_sync(adapter->rx_buf_tag,
958 		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
959 		m_freem(rx_ring->rx_buffer_info[i].mbuf);
960 		rx_ring->rx_buffer_info[i].mbuf = NULL;
961 		bus_dmamap_unload(adapter->rx_buf_tag,
962 		    rx_ring->rx_buffer_info[i].map);
963 		bus_dmamap_destroy(adapter->rx_buf_tag,
964 		    rx_ring->rx_buffer_info[i].map);
965 	}
966 
967 	/* free LRO resources, */
968 	tcp_lro_free(&rx_ring->lro);
969 
970 	/* free allocated memory */
971 	free(rx_ring->rx_buffer_info, M_DEVBUF);
972 	rx_ring->rx_buffer_info = NULL;
973 
974 	free(rx_ring->free_rx_ids, M_DEVBUF);
975 	rx_ring->free_rx_ids = NULL;
976 }
977 
978 /**
979  * ena_setup_all_rx_resources - allocate all queues Rx resources
980  * @adapter: network interface device structure
981  *
982  * Returns 0 on success, otherwise on failure.
983  **/
984 static int
985 ena_setup_all_rx_resources(struct ena_adapter *adapter)
986 {
987 	int i, rc = 0;
988 
989 	for (i = 0; i < adapter->num_io_queues; i++) {
990 		rc = ena_setup_rx_resources(adapter, i);
991 		if (rc != 0) {
992 			ena_log(adapter->pdev, ERR,
993 			    "Allocation for Rx Queue %u failed\n", i);
994 			goto err_setup_rx;
995 		}
996 	}
997 	return (0);
998 
999 err_setup_rx:
1000 	/* rewind the index freeing the rings as we go */
1001 	while (i--)
1002 		ena_free_rx_resources(adapter, i);
1003 	return (rc);
1004 }
1005 
1006 /**
1007  * ena_free_all_rx_resources - Free Rx resources for all queues
1008  * @adapter: network interface device structure
1009  *
1010  * Free all receive software resources
1011  **/
1012 static void
1013 ena_free_all_rx_resources(struct ena_adapter *adapter)
1014 {
1015 	int i;
1016 
1017 	for (i = 0; i < adapter->num_io_queues; i++)
1018 		ena_free_rx_resources(adapter, i);
1019 }
1020 
1021 static inline int
1022 ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1023     struct ena_rx_buffer *rx_info)
1024 {
1025 	device_t pdev = adapter->pdev;
1026 	struct ena_com_buf *ena_buf;
1027 	bus_dma_segment_t segs[1];
1028 	int nsegs, error;
1029 	int mlen;
1030 
1031 	/* if previous allocated frag is not used */
1032 	if (unlikely(rx_info->mbuf != NULL))
1033 		return (0);
1034 
1035 	/* Get mbuf using UMA allocator */
1036 	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1037 	    rx_ring->rx_mbuf_sz);
1038 
1039 	if (unlikely(rx_info->mbuf == NULL)) {
1040 		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1041 		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1042 		if (unlikely(rx_info->mbuf == NULL)) {
1043 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1044 			return (ENOMEM);
1045 		}
1046 		mlen = MCLBYTES;
1047 	} else {
1048 		mlen = rx_ring->rx_mbuf_sz;
1049 	}
1050 	/* Set mbuf length*/
1051 	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1052 
1053 	/* Map packets for DMA */
1054 	ena_log(pdev, DBG,
1055 	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1056 	    adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
1057 	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1058 	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1059 	if (unlikely((error != 0) || (nsegs != 1))) {
1060 		ena_log(pdev, WARN,
1061 		    "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
1062 		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1063 		goto exit;
1064 	}
1065 
1066 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1067 
1068 	ena_buf = &rx_info->ena_buf;
1069 	ena_buf->paddr = segs[0].ds_addr;
1070 	ena_buf->len = mlen;
1071 
1072 	ena_log(pdev, DBG,
1073 	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1074 	    rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
1075 
1076 	return (0);
1077 
1078 exit:
1079 	m_freem(rx_info->mbuf);
1080 	rx_info->mbuf = NULL;
1081 	return (EFAULT);
1082 }
1083 
1084 static void
1085 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1086     struct ena_rx_buffer *rx_info)
1087 {
1088 	if (rx_info->mbuf == NULL) {
1089 		ena_log(adapter->pdev, WARN,
1090 		    "Trying to free unallocated buffer\n");
1091 		return;
1092 	}
1093 
1094 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1095 	    BUS_DMASYNC_POSTREAD);
1096 	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1097 	m_freem(rx_info->mbuf);
1098 	rx_info->mbuf = NULL;
1099 }
1100 
1101 /**
1102  * ena_refill_rx_bufs - Refills ring with descriptors
1103  * @rx_ring: the ring which we want to feed with free descriptors
1104  * @num: number of descriptors to refill
1105  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1106  **/
1107 int
1108 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1109 {
1110 	struct ena_adapter *adapter = rx_ring->adapter;
1111 	device_t pdev = adapter->pdev;
1112 	uint16_t next_to_use, req_id;
1113 	uint32_t i;
1114 	int rc;
1115 
1116 	ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
1117 
1118 	next_to_use = rx_ring->next_to_use;
1119 
1120 	for (i = 0; i < num; i++) {
1121 		struct ena_rx_buffer *rx_info;
1122 
1123 		ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
1124 		    next_to_use);
1125 
1126 		req_id = rx_ring->free_rx_ids[next_to_use];
1127 		rx_info = &rx_ring->rx_buffer_info[req_id];
1128 #ifdef DEV_NETMAP
1129 		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1130 			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
1131 			    rx_info);
1132 		else
1133 #endif /* DEV_NETMAP */
1134 			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1135 		if (unlikely(rc != 0)) {
1136 			ena_log_io(pdev, WARN,
1137 			    "failed to alloc buffer for rx queue %d\n",
1138 			    rx_ring->qid);
1139 			break;
1140 		}
1141 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1142 		    &rx_info->ena_buf, req_id);
1143 		if (unlikely(rc != 0)) {
1144 			ena_log_io(pdev, WARN,
1145 			    "failed to add buffer for rx queue %d\n",
1146 			    rx_ring->qid);
1147 			break;
1148 		}
1149 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1150 		    rx_ring->ring_size);
1151 	}
1152 
1153 	if (unlikely(i < num)) {
1154 		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1155 		ena_log_io(pdev, WARN,
1156 		    "refilled rx qid %d with only %d mbufs (from %d)\n",
1157 		    rx_ring->qid, i, num);
1158 	}
1159 
1160 	if (likely(i != 0))
1161 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1162 
1163 	rx_ring->next_to_use = next_to_use;
1164 	return (i);
1165 }
1166 
1167 #ifdef DEV_NETMAP
1168 static int
1169 ena_reinit_netmap(struct ena_adapter *adapter)
1170 {
1171 	int rc;
1172 
1173 	netmap_detach(adapter->ifp);
1174 	rc = ena_netmap_attach(adapter);
1175 	if (rc != 0)
1176 		ena_log(adapter->pdev, ERR, "netmap attach failed: %d\n", rc);
1177 
1178 	return rc;
1179 }
1180 
1181 #endif /* DEV_NETMAP */
1182 int
1183 ena_update_buf_ring_size(struct ena_adapter *adapter,
1184     uint32_t new_buf_ring_size)
1185 {
1186 	uint32_t old_buf_ring_size;
1187 	int rc = 0;
1188 	bool dev_was_up;
1189 
1190 	old_buf_ring_size = adapter->buf_ring_size;
1191 	adapter->buf_ring_size = new_buf_ring_size;
1192 
1193 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1194 	ena_down(adapter);
1195 
1196 	/* Reconfigure buf ring for all Tx rings. */
1197 	ena_free_all_io_rings_resources(adapter);
1198 	ena_init_io_rings_advanced(adapter);
1199 #ifdef DEV_NETMAP
1200 	rc = ena_reinit_netmap(adapter);
1201 	if (rc != 0)
1202 		return rc;
1203 
1204 #endif /* DEV_NETMAP */
1205 	if (dev_was_up) {
1206 		/*
1207 		 * If ena_up() fails, it's not because of recent buf_ring size
1208 		 * changes. Because of that, we just want to revert old drbr
1209 		 * value and trigger the reset because something else had to
1210 		 * go wrong.
1211 		 */
1212 		rc = ena_up(adapter);
1213 		if (unlikely(rc != 0)) {
1214 			ena_log(adapter->pdev, ERR,
1215 			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1216 			    new_buf_ring_size, old_buf_ring_size);
1217 
1218 			/* Revert old size and trigger the reset */
1219 			adapter->buf_ring_size = old_buf_ring_size;
1220 			ena_free_all_io_rings_resources(adapter);
1221 			ena_init_io_rings_advanced(adapter);
1222 #ifdef DEV_NETMAP
1223 			rc = ena_reinit_netmap(adapter);
1224 			if (rc != 0)
1225 				return rc;
1226 
1227 #endif /* DEV_NETMAP */
1228 			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1229 			    adapter);
1230 			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1231 		}
1232 	}
1233 
1234 	return (rc);
1235 }
1236 
1237 int
1238 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1239     uint32_t new_rx_size)
1240 {
1241 	uint32_t old_tx_size, old_rx_size;
1242 	int rc = 0;
1243 	bool dev_was_up;
1244 
1245 	old_tx_size = adapter->requested_tx_ring_size;
1246 	old_rx_size = adapter->requested_rx_ring_size;
1247 	adapter->requested_tx_ring_size = new_tx_size;
1248 	adapter->requested_rx_ring_size = new_rx_size;
1249 
1250 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1251 	ena_down(adapter);
1252 
1253 	/* Configure queues with new size. */
1254 	ena_init_io_rings_basic(adapter);
1255 #ifdef DEV_NETMAP
1256 	rc = ena_reinit_netmap(adapter);
1257 	if (rc != 0)
1258 		return rc;
1259 
1260 #endif /* DEV_NETMAP */
1261 	if (dev_was_up) {
1262 		rc = ena_up(adapter);
1263 		if (unlikely(rc != 0)) {
1264 			ena_log(adapter->pdev, ERR,
1265 			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1266 			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1267 
1268 			/* Revert old size. */
1269 			adapter->requested_tx_ring_size = old_tx_size;
1270 			adapter->requested_rx_ring_size = old_rx_size;
1271 			ena_init_io_rings_basic(adapter);
1272 #ifdef DEV_NETMAP
1273 			rc = ena_reinit_netmap(adapter);
1274 			if (rc != 0)
1275 				return rc;
1276 
1277 #endif /* DEV_NETMAP */
1278 			/* And try again. */
1279 			rc = ena_up(adapter);
1280 			if (unlikely(rc != 0)) {
1281 				ena_log(adapter->pdev, ERR,
1282 				    "Failed to revert old queue sizes. Triggering device reset.\n");
1283 				/*
1284 				 * If we've failed again, something had to go
1285 				 * wrong. After reset, the device should try to
1286 				 * go up
1287 				 */
1288 				ENA_FLAG_SET_ATOMIC(
1289 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1290 				ena_trigger_reset(adapter,
1291 				    ENA_REGS_RESET_OS_TRIGGER);
1292 			}
1293 		}
1294 	}
1295 
1296 	return (rc);
1297 }
1298 
1299 static void
1300 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1301 {
1302 	ena_free_all_io_rings_resources(adapter);
1303 	/* Force indirection table to be reinitialized */
1304 	ena_com_rss_destroy(adapter->ena_dev);
1305 
1306 	adapter->num_io_queues = num;
1307 	ena_init_io_rings(adapter);
1308 }
1309 
1310 int
1311 ena_update_base_cpu(struct ena_adapter *adapter, int new_num)
1312 {
1313 	int old_num;
1314 	int rc = 0;
1315 	bool dev_was_up;
1316 
1317 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1318 	old_num = adapter->irq_cpu_base;
1319 
1320 	ena_down(adapter);
1321 
1322 	adapter->irq_cpu_base = new_num;
1323 
1324 	if (dev_was_up) {
1325 		rc = ena_up(adapter);
1326 		if (unlikely(rc != 0)) {
1327 			ena_log(adapter->pdev, ERR,
1328 			    "Failed to configure device %d IRQ base CPU. "
1329 			    "Reverting to previous value: %d\n",
1330 			    new_num, old_num);
1331 
1332 			adapter->irq_cpu_base = old_num;
1333 
1334 			rc = ena_up(adapter);
1335 			if (unlikely(rc != 0)) {
1336 				ena_log(adapter->pdev, ERR,
1337 				    "Failed to revert to previous setup."
1338 				    "Triggering device reset.\n");
1339 				ENA_FLAG_SET_ATOMIC(
1340 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1341 				ena_trigger_reset(adapter,
1342 				    ENA_REGS_RESET_OS_TRIGGER);
1343 			}
1344 		}
1345 	}
1346 	return (rc);
1347 }
1348 
1349 int
1350 ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num)
1351 {
1352 	uint32_t old_num;
1353 	int rc = 0;
1354 	bool dev_was_up;
1355 
1356 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1357 	old_num = adapter->irq_cpu_stride;
1358 
1359 	ena_down(adapter);
1360 
1361 	adapter->irq_cpu_stride = new_num;
1362 
1363 	if (dev_was_up) {
1364 		rc = ena_up(adapter);
1365 		if (unlikely(rc != 0)) {
1366 			ena_log(adapter->pdev, ERR,
1367 			    "Failed to configure device %d IRQ CPU stride. "
1368 			    "Reverting to previous value: %d\n",
1369 			    new_num, old_num);
1370 
1371 			adapter->irq_cpu_stride = old_num;
1372 
1373 			rc = ena_up(adapter);
1374 			if (unlikely(rc != 0)) {
1375 				ena_log(adapter->pdev, ERR,
1376 				    "Failed to revert to previous setup."
1377 				    "Triggering device reset.\n");
1378 				ENA_FLAG_SET_ATOMIC(
1379 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1380 				ena_trigger_reset(adapter,
1381 				    ENA_REGS_RESET_OS_TRIGGER);
1382 			}
1383 		}
1384 	}
1385 	return (rc);
1386 }
1387 
1388 /* Caller should sanitize new_num */
1389 int
1390 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1391 {
1392 	uint32_t old_num;
1393 	int rc = 0;
1394 	bool dev_was_up;
1395 
1396 	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1397 	old_num = adapter->num_io_queues;
1398 	ena_down(adapter);
1399 
1400 	ena_update_io_rings(adapter, new_num);
1401 #ifdef DEV_NETMAP
1402 	rc = ena_reinit_netmap(adapter);
1403 	if (rc != 0)
1404 		return rc;
1405 
1406 #endif /* DEV_NETMAP */
1407 	if (dev_was_up) {
1408 		rc = ena_up(adapter);
1409 		if (unlikely(rc != 0)) {
1410 			ena_log(adapter->pdev, ERR,
1411 			    "Failed to configure device with %u IO queues. "
1412 			    "Reverting to previous value: %u\n",
1413 			    new_num, old_num);
1414 
1415 			ena_update_io_rings(adapter, old_num);
1416 #ifdef DEV_NETMAP
1417 			rc = ena_reinit_netmap(adapter);
1418 			if (rc != 0)
1419 				return rc;
1420 
1421 #endif /* DEV_NETMAP */
1422 			rc = ena_up(adapter);
1423 			if (unlikely(rc != 0)) {
1424 				ena_log(adapter->pdev, ERR,
1425 				    "Failed to revert to previous setup IO "
1426 				    "queues. Triggering device reset.\n");
1427 				ENA_FLAG_SET_ATOMIC(
1428 				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1429 				ena_trigger_reset(adapter,
1430 				    ENA_REGS_RESET_OS_TRIGGER);
1431 			}
1432 		}
1433 	}
1434 
1435 	return (rc);
1436 }
1437 
1438 static void
1439 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1440 {
1441 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1442 	unsigned int i;
1443 
1444 	for (i = 0; i < rx_ring->ring_size; i++) {
1445 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1446 
1447 		if (rx_info->mbuf != NULL)
1448 			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1449 #ifdef DEV_NETMAP
1450 		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1451 		    (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
1452 			if (rx_info->netmap_buf_idx != 0)
1453 				ena_netmap_free_rx_slot(adapter, rx_ring,
1454 				    rx_info);
1455 		}
1456 #endif /* DEV_NETMAP */
1457 	}
1458 }
1459 
1460 /**
1461  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1462  * @adapter: network interface device structure
1463  *
1464  */
1465 static void
1466 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1467 {
1468 	struct ena_ring *rx_ring;
1469 	int i, rc, bufs_num;
1470 
1471 	for (i = 0; i < adapter->num_io_queues; i++) {
1472 		rx_ring = &adapter->rx_ring[i];
1473 		bufs_num = rx_ring->ring_size - 1;
1474 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1475 		if (unlikely(rc != bufs_num))
1476 			ena_log_io(adapter->pdev, WARN,
1477 			    "refilling Queue %d failed. "
1478 			    "Allocated %d buffers from: %d\n",
1479 			    i, rc, bufs_num);
1480 #ifdef DEV_NETMAP
1481 		rx_ring->initialized = true;
1482 #endif /* DEV_NETMAP */
1483 	}
1484 }
1485 
1486 static void
1487 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1488 {
1489 	int i;
1490 
1491 	for (i = 0; i < adapter->num_io_queues; i++)
1492 		ena_free_rx_bufs(adapter, i);
1493 }
1494 
1495 /**
1496  * ena_free_tx_bufs - Free Tx Buffers per Queue
1497  * @adapter: network interface device structure
1498  * @qid: queue index
1499  **/
1500 static void
1501 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1502 {
1503 	bool print_once = true;
1504 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1505 
1506 	ENA_RING_MTX_LOCK(tx_ring);
1507 	for (int i = 0; i < tx_ring->ring_size; i++) {
1508 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1509 
1510 		if (tx_info->mbuf == NULL)
1511 			continue;
1512 
1513 		if (print_once) {
1514 			ena_log(adapter->pdev, WARN,
1515 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1516 			    i);
1517 			print_once = false;
1518 		} else {
1519 			ena_log(adapter->pdev, DBG,
1520 			    "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1521 			    i);
1522 		}
1523 
1524 		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1525 		    BUS_DMASYNC_POSTWRITE);
1526 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1527 
1528 		m_free(tx_info->mbuf);
1529 		tx_info->mbuf = NULL;
1530 	}
1531 	ENA_RING_MTX_UNLOCK(tx_ring);
1532 }
1533 
1534 static void
1535 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1536 {
1537 	for (int i = 0; i < adapter->num_io_queues; i++)
1538 		ena_free_tx_bufs(adapter, i);
1539 }
1540 
1541 static void
1542 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1543 {
1544 	uint16_t ena_qid;
1545 	int i;
1546 
1547 	for (i = 0; i < adapter->num_io_queues; i++) {
1548 		ena_qid = ENA_IO_TXQ_IDX(i);
1549 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1550 	}
1551 }
1552 
1553 static void
1554 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1555 {
1556 	uint16_t ena_qid;
1557 	int i;
1558 
1559 	for (i = 0; i < adapter->num_io_queues; i++) {
1560 		ena_qid = ENA_IO_RXQ_IDX(i);
1561 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1562 	}
1563 }
1564 
1565 static void
1566 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1567 {
1568 	struct ena_que *queue;
1569 	int i;
1570 
1571 	for (i = 0; i < adapter->num_io_queues; i++) {
1572 		queue = &adapter->que[i];
1573 		while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
1574 			taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
1575 		taskqueue_free(queue->cleanup_tq);
1576 	}
1577 
1578 	ena_destroy_all_tx_queues(adapter);
1579 	ena_destroy_all_rx_queues(adapter);
1580 }
1581 
1582 static int
1583 ena_create_io_queues(struct ena_adapter *adapter)
1584 {
1585 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1586 	struct ena_com_create_io_ctx ctx;
1587 	struct ena_ring *ring;
1588 	struct ena_que *queue;
1589 	uint16_t ena_qid;
1590 	uint32_t msix_vector;
1591 	cpuset_t *cpu_mask = NULL;
1592 	int rc, i;
1593 
1594 	/* Create TX queues */
1595 	for (i = 0; i < adapter->num_io_queues; i++) {
1596 		msix_vector = ENA_IO_IRQ_IDX(i);
1597 		ena_qid = ENA_IO_TXQ_IDX(i);
1598 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1599 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1600 		ctx.queue_size = adapter->requested_tx_ring_size;
1601 		ctx.msix_vector = msix_vector;
1602 		ctx.qid = ena_qid;
1603 		ctx.numa_node = adapter->que[i].domain;
1604 
1605 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1606 		if (rc != 0) {
1607 			ena_log(adapter->pdev, ERR,
1608 			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1609 			goto err_tx;
1610 		}
1611 		ring = &adapter->tx_ring[i];
1612 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1613 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1614 		if (rc != 0) {
1615 			ena_log(adapter->pdev, ERR,
1616 			    "Failed to get TX queue handlers. TX queue num"
1617 			    " %d rc: %d\n",
1618 			    i, rc);
1619 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1620 			goto err_tx;
1621 		}
1622 
1623 		if (ctx.numa_node >= 0) {
1624 			ena_com_update_numa_node(ring->ena_com_io_cq,
1625 			    ctx.numa_node);
1626 		}
1627 	}
1628 
1629 	/* Create RX queues */
1630 	for (i = 0; i < adapter->num_io_queues; i++) {
1631 		msix_vector = ENA_IO_IRQ_IDX(i);
1632 		ena_qid = ENA_IO_RXQ_IDX(i);
1633 		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1634 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1635 		ctx.queue_size = adapter->requested_rx_ring_size;
1636 		ctx.msix_vector = msix_vector;
1637 		ctx.qid = ena_qid;
1638 		ctx.numa_node = adapter->que[i].domain;
1639 
1640 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1641 		if (unlikely(rc != 0)) {
1642 			ena_log(adapter->pdev, ERR,
1643 			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1644 			goto err_rx;
1645 		}
1646 
1647 		ring = &adapter->rx_ring[i];
1648 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1649 		    &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1650 		if (unlikely(rc != 0)) {
1651 			ena_log(adapter->pdev, ERR,
1652 			    "Failed to get RX queue handlers. RX queue num"
1653 			    " %d rc: %d\n",
1654 			    i, rc);
1655 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1656 			goto err_rx;
1657 		}
1658 
1659 		if (ctx.numa_node >= 0) {
1660 			ena_com_update_numa_node(ring->ena_com_io_cq,
1661 			    ctx.numa_node);
1662 		}
1663 	}
1664 
1665 	for (i = 0; i < adapter->num_io_queues; i++) {
1666 		queue = &adapter->que[i];
1667 
1668 		NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1669 		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1670 		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1671 
1672 #ifdef RSS
1673 		cpu_mask = &queue->cpu_mask;
1674 #endif
1675 		taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
1676 		    cpu_mask, "%s queue %d cleanup",
1677 		    device_get_nameunit(adapter->pdev), i);
1678 	}
1679 
1680 	return (0);
1681 
1682 err_rx:
1683 	while (i--)
1684 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1685 	i = adapter->num_io_queues;
1686 err_tx:
1687 	while (i--)
1688 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1689 
1690 	return (ENXIO);
1691 }
1692 
1693 /*********************************************************************
1694  *
1695  *  MSIX & Interrupt Service routine
1696  *
1697  **********************************************************************/
1698 
1699 /**
1700  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1701  * @arg: interrupt number
1702  **/
1703 static void
1704 ena_intr_msix_mgmnt(void *arg)
1705 {
1706 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1707 
1708 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1709 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1710 		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1711 }
1712 
1713 /**
1714  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1715  * @arg: queue
1716  **/
1717 static int
1718 ena_handle_msix(void *arg)
1719 {
1720 	struct ena_que *queue = arg;
1721 	struct ena_adapter *adapter = queue->adapter;
1722 	if_t ifp = adapter->ifp;
1723 
1724 	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1725 		return (FILTER_STRAY);
1726 
1727 	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1728 
1729 	return (FILTER_HANDLED);
1730 }
1731 
1732 static int
1733 ena_enable_msix(struct ena_adapter *adapter)
1734 {
1735 	device_t dev = adapter->pdev;
1736 	int msix_vecs, msix_req;
1737 	int i, rc = 0;
1738 
1739 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1740 		ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
1741 		return (EINVAL);
1742 	}
1743 
1744 	/* Reserved the max msix vectors we might need */
1745 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1746 
1747 	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1748 	    M_DEVBUF, M_WAITOK | M_ZERO);
1749 
1750 	ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1751 
1752 	for (i = 0; i < msix_vecs; i++) {
1753 		adapter->msix_entries[i].entry = i;
1754 		/* Vectors must start from 1 */
1755 		adapter->msix_entries[i].vector = i + 1;
1756 	}
1757 
1758 	msix_req = msix_vecs;
1759 	rc = pci_alloc_msix(dev, &msix_vecs);
1760 	if (unlikely(rc != 0)) {
1761 		ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n",
1762 		    msix_vecs, rc);
1763 
1764 		rc = ENOSPC;
1765 		goto err_msix_free;
1766 	}
1767 
1768 	if (msix_vecs != msix_req) {
1769 		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1770 			ena_log(dev, ERR,
1771 			    "Not enough number of MSI-x allocated: %d\n",
1772 			    msix_vecs);
1773 			pci_release_msi(dev);
1774 			rc = ENOSPC;
1775 			goto err_msix_free;
1776 		}
1777 		ena_log(dev, ERR,
1778 		    "Enable only %d MSI-x (out of %d), reduce "
1779 		    "the number of queues\n",
1780 		    msix_vecs, msix_req);
1781 	}
1782 
1783 	adapter->msix_vecs = msix_vecs;
1784 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1785 
1786 	return (0);
1787 
1788 err_msix_free:
1789 	free(adapter->msix_entries, M_DEVBUF);
1790 	adapter->msix_entries = NULL;
1791 
1792 	return (rc);
1793 }
1794 
1795 static void
1796 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1797 {
1798 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
1799 	    "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
1800 	/*
1801 	 * Handler is NULL on purpose, it will be set
1802 	 * when mgmnt interrupt is acquired
1803 	 */
1804 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1805 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1806 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1807 	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1808 }
1809 
1810 static int
1811 ena_setup_io_intr(struct ena_adapter *adapter)
1812 {
1813 #ifdef RSS
1814 	int num_buckets = rss_getnumbuckets();
1815 	static int last_bind = 0;
1816 	int cur_bind;
1817 	int idx;
1818 #endif
1819 	int irq_idx;
1820 
1821 	if (adapter->msix_entries == NULL)
1822 		return (EINVAL);
1823 
1824 #ifdef RSS
1825 	if (adapter->first_bind < 0) {
1826 		adapter->first_bind = last_bind;
1827 		last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
1828 	}
1829 	cur_bind = adapter->first_bind;
1830 #endif
1831 
1832 	for (int i = 0; i < adapter->num_io_queues; i++) {
1833 		irq_idx = ENA_IO_IRQ_IDX(i);
1834 
1835 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1836 		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1837 		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1838 		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1839 		adapter->irq_tbl[irq_idx].vector =
1840 		    adapter->msix_entries[irq_idx].vector;
1841 		ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
1842 		    adapter->msix_entries[irq_idx].vector);
1843 
1844 		if (adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1845 			adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1846 			    (unsigned)(adapter->irq_cpu_base +
1847 			    i * adapter->irq_cpu_stride) % (unsigned)mp_ncpus;
1848 			CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1849 		}
1850 
1851 #ifdef RSS
1852 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1853 		    rss_getcpu(cur_bind);
1854 		cur_bind = (cur_bind + 1) % num_buckets;
1855 		CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1856 
1857 		for (idx = 0; idx < MAXMEMDOM; ++idx) {
1858 			if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1859 				break;
1860 		}
1861 		adapter->que[i].domain = idx;
1862 #else
1863 		adapter->que[i].domain = -1;
1864 #endif
1865 	}
1866 
1867 	return (0);
1868 }
1869 
1870 static int
1871 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1872 {
1873 	device_t pdev = adapter->pdev;
1874 	struct ena_irq *irq;
1875 	unsigned long flags;
1876 	int rc, rcc;
1877 
1878 	flags = RF_ACTIVE | RF_SHAREABLE;
1879 
1880 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1881 	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1882 	    &irq->vector, flags);
1883 
1884 	if (unlikely(irq->res == NULL)) {
1885 		ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
1886 		    irq->vector);
1887 		return (ENXIO);
1888 	}
1889 
1890 	rc = bus_setup_intr(adapter->pdev, irq->res,
1891 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
1892 	    &irq->cookie);
1893 	if (unlikely(rc != 0)) {
1894 		ena_log(pdev, ERR,
1895 		    "failed to register interrupt handler for irq %ju: %d\n",
1896 		    rman_get_start(irq->res), rc);
1897 		goto err_res_free;
1898 	}
1899 	irq->requested = true;
1900 
1901 	return (rc);
1902 
1903 err_res_free:
1904 	ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
1905 	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
1906 	    irq->res);
1907 	if (unlikely(rcc != 0))
1908 		ena_log(pdev, ERR,
1909 		    "dev has no parent while releasing res for irq: %d\n",
1910 		    irq->vector);
1911 	irq->res = NULL;
1912 
1913 	return (rc);
1914 }
1915 
1916 static int
1917 ena_request_io_irq(struct ena_adapter *adapter)
1918 {
1919 	device_t pdev = adapter->pdev;
1920 	struct ena_irq *irq;
1921 	unsigned long flags = 0;
1922 	int rc = 0, i, rcc;
1923 
1924 	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1925 		ena_log(pdev, ERR,
1926 		    "failed to request I/O IRQ: MSI-X is not enabled\n");
1927 		return (EINVAL);
1928 	} else {
1929 		flags = RF_ACTIVE | RF_SHAREABLE;
1930 	}
1931 
1932 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1933 		irq = &adapter->irq_tbl[i];
1934 
1935 		if (unlikely(irq->requested))
1936 			continue;
1937 
1938 		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1939 		    &irq->vector, flags);
1940 		if (unlikely(irq->res == NULL)) {
1941 			rc = ENOMEM;
1942 			ena_log(pdev, ERR,
1943 			    "could not allocate irq vector: %d\n", irq->vector);
1944 			goto err;
1945 		}
1946 
1947 		rc = bus_setup_intr(adapter->pdev, irq->res,
1948 		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
1949 		    &irq->cookie);
1950 		if (unlikely(rc != 0)) {
1951 			ena_log(pdev, ERR,
1952 			    "failed to register interrupt handler for irq %ju: %d\n",
1953 			    rman_get_start(irq->res), rc);
1954 			goto err;
1955 		}
1956 		irq->requested = true;
1957 
1958 		if (adapter->rss_enabled || adapter->irq_cpu_base > ENA_BASE_CPU_UNSPECIFIED) {
1959 			rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
1960 			if (unlikely(rc != 0)) {
1961 				ena_log(pdev, ERR,
1962 				    "failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
1963 				    rman_get_start(irq->res), irq->cpu, rc);
1964 				goto err;
1965 			}
1966 
1967 			ena_log(pdev, INFO, "queue %d - cpu %d\n",
1968 			    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1969 		}
1970 	}
1971 	return (rc);
1972 
1973 err:
1974 
1975 	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1976 		irq = &adapter->irq_tbl[i];
1977 		rcc = 0;
1978 
1979 		/* Once we entered err: section and irq->requested is true we
1980 		   free both intr and resources */
1981 		if (irq->requested) {
1982 			rcc = bus_teardown_intr(adapter->pdev, irq->res,
1983 			    irq->cookie);
1984 			if (unlikely(rcc != 0))
1985 				ena_log(pdev, ERR,
1986 				    "could not release irq: %d, error: %d\n",
1987 				    irq->vector, rcc);
1988 		}
1989 
1990 		/* If we entered err: section without irq->requested set we know
1991 		   it was bus_alloc_resource_any() that needs cleanup, provided
1992 		   res is not NULL. In case res is NULL no work in needed in
1993 		   this iteration */
1994 		rcc = 0;
1995 		if (irq->res != NULL) {
1996 			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1997 			    irq->vector, irq->res);
1998 		}
1999 		if (unlikely(rcc != 0))
2000 			ena_log(pdev, ERR,
2001 			    "dev has no parent while releasing res for irq: %d\n",
2002 			    irq->vector);
2003 		irq->requested = false;
2004 		irq->res = NULL;
2005 	}
2006 
2007 	return (rc);
2008 }
2009 
2010 static void
2011 ena_free_mgmnt_irq(struct ena_adapter *adapter)
2012 {
2013 	device_t pdev = adapter->pdev;
2014 	struct ena_irq *irq;
2015 	int rc;
2016 
2017 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2018 	if (irq->requested) {
2019 		ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
2020 		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
2021 		if (unlikely(rc != 0))
2022 			ena_log(pdev, ERR, "failed to tear down irq: %d\n",
2023 			    irq->vector);
2024 		irq->requested = 0;
2025 	}
2026 
2027 	if (irq->res != NULL) {
2028 		ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
2029 		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2030 		    irq->vector, irq->res);
2031 		irq->res = NULL;
2032 		if (unlikely(rc != 0))
2033 			ena_log(pdev, ERR,
2034 			    "dev has no parent while releasing res for irq: %d\n",
2035 			    irq->vector);
2036 	}
2037 }
2038 
2039 static void
2040 ena_free_io_irq(struct ena_adapter *adapter)
2041 {
2042 	device_t pdev = adapter->pdev;
2043 	struct ena_irq *irq;
2044 	int rc;
2045 
2046 	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
2047 		irq = &adapter->irq_tbl[i];
2048 		if (irq->requested) {
2049 			ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
2050 			rc = bus_teardown_intr(adapter->pdev, irq->res,
2051 			    irq->cookie);
2052 			if (unlikely(rc != 0)) {
2053 				ena_log(pdev, ERR,
2054 				    "failed to tear down irq: %d\n",
2055 				    irq->vector);
2056 			}
2057 			irq->requested = 0;
2058 		}
2059 
2060 		if (irq->res != NULL) {
2061 			ena_log(pdev, DBG, "release resource irq: %d\n",
2062 			    irq->vector);
2063 			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2064 			    irq->vector, irq->res);
2065 			irq->res = NULL;
2066 			if (unlikely(rc != 0)) {
2067 				ena_log(pdev, ERR,
2068 				    "dev has no parent while releasing res for irq: %d\n",
2069 				    irq->vector);
2070 			}
2071 		}
2072 	}
2073 }
2074 
2075 static void
2076 ena_free_irqs(struct ena_adapter *adapter)
2077 {
2078 	ena_free_io_irq(adapter);
2079 	ena_free_mgmnt_irq(adapter);
2080 	ena_disable_msix(adapter);
2081 }
2082 
2083 static void
2084 ena_disable_msix(struct ena_adapter *adapter)
2085 {
2086 	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
2087 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
2088 		pci_release_msi(adapter->pdev);
2089 	}
2090 
2091 	adapter->msix_vecs = 0;
2092 	free(adapter->msix_entries, M_DEVBUF);
2093 	adapter->msix_entries = NULL;
2094 }
2095 
2096 static void
2097 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
2098 {
2099 	struct ena_com_io_cq *io_cq;
2100 	struct ena_eth_io_intr_reg intr_reg;
2101 	struct ena_ring *tx_ring;
2102 	uint16_t ena_qid;
2103 	int i;
2104 
2105 	/* Unmask interrupts for all queues */
2106 	for (i = 0; i < adapter->num_io_queues; i++) {
2107 		ena_qid = ENA_IO_TXQ_IDX(i);
2108 		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2109 		ena_com_update_intr_reg(&intr_reg, 0, 0, true, false);
2110 		tx_ring = &adapter->tx_ring[i];
2111 		counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
2112 		ena_com_unmask_intr(io_cq, &intr_reg);
2113 	}
2114 }
2115 
2116 static int
2117 ena_up_complete(struct ena_adapter *adapter)
2118 {
2119 	int rc;
2120 
2121 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
2122 		rc = ena_rss_configure(adapter);
2123 		if (rc != 0) {
2124 			ena_log(adapter->pdev, ERR,
2125 			    "Failed to configure RSS\n");
2126 			return (rc);
2127 		}
2128 	}
2129 
2130 	rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
2131 	if (unlikely(rc != 0))
2132 		return (rc);
2133 
2134 	ena_refill_all_rx_bufs(adapter);
2135 	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
2136 	    sizeof(adapter->hw_stats));
2137 
2138 	return (0);
2139 }
2140 
2141 static void
2142 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size)
2143 {
2144 	int i;
2145 
2146 	for (i = 0; i < adapter->num_io_queues; i++) {
2147 		adapter->tx_ring[i].ring_size = new_tx_size;
2148 		adapter->rx_ring[i].ring_size = new_rx_size;
2149 	}
2150 }
2151 
2152 static int
2153 create_queues_with_size_backoff(struct ena_adapter *adapter)
2154 {
2155 	device_t pdev = adapter->pdev;
2156 	int rc;
2157 	uint32_t cur_rx_ring_size, cur_tx_ring_size;
2158 	uint32_t new_rx_ring_size, new_tx_ring_size;
2159 
2160 	/*
2161 	 * Current queue sizes might be set to smaller than the requested
2162 	 * ones due to past queue allocation failures.
2163 	 */
2164 	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2165 	    adapter->requested_rx_ring_size);
2166 
2167 	while (1) {
2168 		/* Allocate transmit descriptors */
2169 		rc = ena_setup_all_tx_resources(adapter);
2170 		if (unlikely(rc != 0)) {
2171 			ena_log(pdev, ERR, "err_setup_tx\n");
2172 			goto err_setup_tx;
2173 		}
2174 
2175 		/* Allocate receive descriptors */
2176 		rc = ena_setup_all_rx_resources(adapter);
2177 		if (unlikely(rc != 0)) {
2178 			ena_log(pdev, ERR, "err_setup_rx\n");
2179 			goto err_setup_rx;
2180 		}
2181 
2182 		/* Create IO queues for Rx & Tx */
2183 		rc = ena_create_io_queues(adapter);
2184 		if (unlikely(rc != 0)) {
2185 			ena_log(pdev, ERR, "create IO queues failed\n");
2186 			goto err_io_que;
2187 		}
2188 
2189 		return (0);
2190 
2191 err_io_que:
2192 		ena_free_all_rx_resources(adapter);
2193 err_setup_rx:
2194 		ena_free_all_tx_resources(adapter);
2195 err_setup_tx:
2196 		/*
2197 		 * Lower the ring size if ENOMEM. Otherwise, return the
2198 		 * error straightaway.
2199 		 */
2200 		if (unlikely(rc != ENOMEM)) {
2201 			ena_log(pdev, ERR,
2202 			    "Queue creation failed with error code: %d\n", rc);
2203 			return (rc);
2204 		}
2205 
2206 		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2207 		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2208 
2209 		ena_log(pdev, ERR,
2210 		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2211 		    cur_tx_ring_size, cur_rx_ring_size);
2212 
2213 		new_tx_ring_size = cur_tx_ring_size;
2214 		new_rx_ring_size = cur_rx_ring_size;
2215 
2216 		/*
2217 		 * Decrease the size of a larger queue, or decrease both if they
2218 		 * are the same size.
2219 		 */
2220 		if (cur_rx_ring_size <= cur_tx_ring_size)
2221 			new_tx_ring_size = cur_tx_ring_size / 2;
2222 		if (cur_rx_ring_size >= cur_tx_ring_size)
2223 			new_rx_ring_size = cur_rx_ring_size / 2;
2224 
2225 		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2226 		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2227 			ena_log(pdev, ERR,
2228 			    "Queue creation failed with the smallest possible queue size"
2229 			    "of %d for both queues. Not retrying with smaller queues\n",
2230 			    ENA_MIN_RING_SIZE);
2231 			return (rc);
2232 		}
2233 
2234 		ena_log(pdev, INFO,
2235 		    "Retrying queue creation with sizes TX=%d, RX=%d\n",
2236 		    new_tx_ring_size, new_rx_ring_size);
2237 
2238 		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2239 	}
2240 }
2241 
2242 int
2243 ena_up(struct ena_adapter *adapter)
2244 {
2245 	int rc = 0;
2246 
2247 	ENA_LOCK_ASSERT();
2248 
2249 	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2250 		ena_log(adapter->pdev, ERR, "device is not attached!\n");
2251 		return (ENXIO);
2252 	}
2253 
2254 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2255 		return (0);
2256 
2257 	ena_log(adapter->pdev, INFO, "device is going UP\n");
2258 
2259 	/* setup interrupts for IO queues */
2260 	rc = ena_setup_io_intr(adapter);
2261 	if (unlikely(rc != 0)) {
2262 		ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
2263 		goto error;
2264 	}
2265 	rc = ena_request_io_irq(adapter);
2266 	if (unlikely(rc != 0)) {
2267 		ena_log(adapter->pdev, ERR, "err_req_irq\n");
2268 		goto error;
2269 	}
2270 
2271 	ena_log(adapter->pdev, INFO,
2272 	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n",
2273 	    adapter->num_io_queues,
2274 	    adapter->requested_rx_ring_size,
2275 	    adapter->requested_tx_ring_size,
2276 	    (adapter->ena_dev->tx_mem_queue_type ==
2277 		ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
2278 
2279 	rc = create_queues_with_size_backoff(adapter);
2280 	if (unlikely(rc != 0)) {
2281 		ena_log(adapter->pdev, ERR,
2282 		    "error creating queues with size backoff\n");
2283 		goto err_create_queues_with_backoff;
2284 	}
2285 
2286 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2287 		if_link_state_change(adapter->ifp, LINK_STATE_UP);
2288 
2289 	rc = ena_up_complete(adapter);
2290 	if (unlikely(rc != 0))
2291 		goto err_up_complete;
2292 
2293 	counter_u64_add(adapter->dev_stats.interface_up, 1);
2294 
2295 	ena_update_hwassist(adapter);
2296 
2297 	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2298 
2299 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2300 
2301 	ena_unmask_all_io_irqs(adapter);
2302 
2303 	return (0);
2304 
2305 err_up_complete:
2306 	ena_destroy_all_io_queues(adapter);
2307 	ena_free_all_rx_resources(adapter);
2308 	ena_free_all_tx_resources(adapter);
2309 err_create_queues_with_backoff:
2310 	ena_free_io_irq(adapter);
2311 error:
2312 	return (rc);
2313 }
2314 
2315 static uint64_t
2316 ena_get_counter(if_t ifp, ift_counter cnt)
2317 {
2318 	struct ena_adapter *adapter;
2319 	struct ena_hw_stats *stats;
2320 
2321 	adapter = if_getsoftc(ifp);
2322 	stats = &adapter->hw_stats;
2323 
2324 	switch (cnt) {
2325 	case IFCOUNTER_IPACKETS:
2326 		return (counter_u64_fetch(stats->rx_packets));
2327 	case IFCOUNTER_OPACKETS:
2328 		return (counter_u64_fetch(stats->tx_packets));
2329 	case IFCOUNTER_IBYTES:
2330 		return (counter_u64_fetch(stats->rx_bytes));
2331 	case IFCOUNTER_OBYTES:
2332 		return (counter_u64_fetch(stats->tx_bytes));
2333 	case IFCOUNTER_IQDROPS:
2334 		return (counter_u64_fetch(stats->rx_drops));
2335 	case IFCOUNTER_OQDROPS:
2336 		return (counter_u64_fetch(stats->tx_drops));
2337 	default:
2338 		return (if_get_counter_default(ifp, cnt));
2339 	}
2340 }
2341 
2342 static int
2343 ena_media_change(if_t ifp)
2344 {
2345 	/* Media Change is not supported by firmware */
2346 	return (0);
2347 }
2348 
2349 static void
2350 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2351 {
2352 	struct ena_adapter *adapter = if_getsoftc(ifp);
2353 	ena_log(adapter->pdev, DBG, "Media status update\n");
2354 
2355 	ENA_LOCK_LOCK();
2356 
2357 	ifmr->ifm_status = IFM_AVALID;
2358 	ifmr->ifm_active = IFM_ETHER;
2359 
2360 	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2361 		ENA_LOCK_UNLOCK();
2362 		ena_log(adapter->pdev, INFO, "Link is down\n");
2363 		return;
2364 	}
2365 
2366 	ifmr->ifm_status |= IFM_ACTIVE;
2367 	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2368 
2369 	ENA_LOCK_UNLOCK();
2370 }
2371 
2372 static void
2373 ena_init(void *arg)
2374 {
2375 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2376 
2377 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2378 		ENA_LOCK_LOCK();
2379 		ena_up(adapter);
2380 		ENA_LOCK_UNLOCK();
2381 	}
2382 }
2383 
2384 static int
2385 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2386 {
2387 	struct ena_adapter *adapter;
2388 	struct ifreq *ifr;
2389 	int rc;
2390 
2391 	adapter = if_getsoftc(ifp);
2392 	ifr = (struct ifreq *)data;
2393 
2394 	/*
2395 	 * Acquiring lock to prevent from running up and down routines parallel.
2396 	 */
2397 	rc = 0;
2398 	switch (command) {
2399 	case SIOCSIFMTU:
2400 		if (if_getmtu(ifp) == ifr->ifr_mtu)
2401 			break;
2402 		ENA_LOCK_LOCK();
2403 		ena_down(adapter);
2404 
2405 		ena_change_mtu(ifp, ifr->ifr_mtu);
2406 
2407 		rc = ena_up(adapter);
2408 		ENA_LOCK_UNLOCK();
2409 		break;
2410 
2411 	case SIOCSIFFLAGS:
2412 		if ((if_getflags(ifp) & IFF_UP) != 0) {
2413 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2414 				if ((if_getflags(ifp) & (IFF_PROMISC |
2415 				    IFF_ALLMULTI)) != 0) {
2416 					ena_log(adapter->pdev, INFO,
2417 					    "ioctl promisc/allmulti\n");
2418 				}
2419 			} else {
2420 				ENA_LOCK_LOCK();
2421 				rc = ena_up(adapter);
2422 				ENA_LOCK_UNLOCK();
2423 			}
2424 		} else {
2425 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2426 				ENA_LOCK_LOCK();
2427 				ena_down(adapter);
2428 				ENA_LOCK_UNLOCK();
2429 			}
2430 		}
2431 		break;
2432 
2433 	case SIOCADDMULTI:
2434 	case SIOCDELMULTI:
2435 		break;
2436 
2437 	case SIOCSIFMEDIA:
2438 	case SIOCGIFMEDIA:
2439 		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2440 		break;
2441 
2442 	case SIOCSIFCAP:
2443 		{
2444 			int reinit = 0;
2445 
2446 			if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
2447 				if_setcapenable(ifp, ifr->ifr_reqcap);
2448 				reinit = 1;
2449 			}
2450 
2451 			if ((reinit != 0) &&
2452 			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2453 				ENA_LOCK_LOCK();
2454 				ena_down(adapter);
2455 				rc = ena_up(adapter);
2456 				ENA_LOCK_UNLOCK();
2457 			}
2458 		}
2459 
2460 		break;
2461 	default:
2462 		rc = ether_ioctl(ifp, command, data);
2463 		break;
2464 	}
2465 
2466 	return (rc);
2467 }
2468 
2469 static int
2470 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2471 {
2472 	int caps = 0;
2473 
2474 	if ((feat->offload.tx &
2475 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2476 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2477 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2478 		caps |= IFCAP_TXCSUM;
2479 
2480 	if ((feat->offload.tx &
2481 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2482 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2483 		caps |= IFCAP_TXCSUM_IPV6;
2484 
2485 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2486 		caps |= IFCAP_TSO4;
2487 
2488 	if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2489 		caps |= IFCAP_TSO6;
2490 
2491 	if ((feat->offload.rx_supported &
2492 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2493 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2494 		caps |= IFCAP_RXCSUM;
2495 
2496 	if ((feat->offload.rx_supported &
2497 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2498 		caps |= IFCAP_RXCSUM_IPV6;
2499 
2500 	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2501 
2502 	return (caps);
2503 }
2504 
2505 static void
2506 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2507 {
2508 	host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
2509 }
2510 
2511 static void
2512 ena_update_hwassist(struct ena_adapter *adapter)
2513 {
2514 	if_t ifp = adapter->ifp;
2515 	uint32_t feat = adapter->tx_offload_cap;
2516 	int cap = if_getcapenable(ifp);
2517 	int flags = 0;
2518 
2519 	if_clearhwassist(ifp);
2520 
2521 	if ((cap & IFCAP_TXCSUM) != 0) {
2522 		if ((feat &
2523 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2524 			flags |= CSUM_IP;
2525 		if ((feat &
2526 		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2527 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2528 			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2529 	}
2530 
2531 	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2532 		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2533 
2534 	if ((cap & IFCAP_TSO4) != 0)
2535 		flags |= CSUM_IP_TSO;
2536 
2537 	if ((cap & IFCAP_TSO6) != 0)
2538 		flags |= CSUM_IP6_TSO;
2539 
2540 	if_sethwassistbits(ifp, flags, 0);
2541 }
2542 
2543 static void
2544 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2545     struct ena_com_dev_get_features_ctx *feat)
2546 {
2547 	if_t ifp;
2548 	int caps = 0;
2549 
2550 	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2551 	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2552 	if_setdev(ifp, pdev);
2553 	if_setsoftc(ifp, adapter);
2554 
2555 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2556 	if_setinitfn(ifp, ena_init);
2557 	if_settransmitfn(ifp, ena_mq_start);
2558 	if_setqflushfn(ifp, ena_qflush);
2559 	if_setioctlfn(ifp, ena_ioctl);
2560 	if_setgetcounterfn(ifp, ena_get_counter);
2561 
2562 	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2563 	if_setsendqready(ifp);
2564 	if_setmtu(ifp, ETHERMTU);
2565 	if_setbaudrate(ifp, 0);
2566 	/* Zeroize capabilities... */
2567 	if_setcapabilities(ifp, 0);
2568 	if_setcapenable(ifp, 0);
2569 	/* check hardware support */
2570 	caps = ena_get_dev_offloads(feat);
2571 	/* ... and set them */
2572 	if_setcapabilitiesbit(ifp, caps, 0);
2573 
2574 	/* TSO parameters */
2575 	if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
2576 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2577 	if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
2578 	if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE);
2579 
2580 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2581 	if_setcapenable(ifp, if_getcapabilities(ifp));
2582 
2583 	/*
2584 	 * Specify the media types supported by this adapter and register
2585 	 * callbacks to update media and link information
2586 	 */
2587 	ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
2588 	    ena_media_status);
2589 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2590 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2591 
2592 	ether_ifattach(ifp, adapter->mac_addr);
2593 }
2594 
2595 void
2596 ena_down(struct ena_adapter *adapter)
2597 {
2598 	int rc;
2599 
2600 	ENA_LOCK_ASSERT();
2601 
2602 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2603 		return;
2604 
2605 	ena_log(adapter->pdev, INFO, "device is going DOWN\n");
2606 
2607 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2608 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2609 
2610 	ena_free_io_irq(adapter);
2611 
2612 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2613 		rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2614 		if (unlikely(rc != 0))
2615 			ena_log(adapter->pdev, ERR, "Device reset failed\n");
2616 	}
2617 
2618 	ena_destroy_all_io_queues(adapter);
2619 
2620 	ena_free_all_tx_bufs(adapter);
2621 	ena_free_all_rx_bufs(adapter);
2622 	ena_free_all_tx_resources(adapter);
2623 	ena_free_all_rx_resources(adapter);
2624 
2625 	counter_u64_add(adapter->dev_stats.interface_down, 1);
2626 }
2627 
2628 static uint32_t
2629 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2630     struct ena_com_dev_get_features_ctx *get_feat_ctx)
2631 {
2632 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2633 
2634 	/* Regular queues capabilities */
2635 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2636 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2637 		    &get_feat_ctx->max_queue_ext.max_queue_ext;
2638 		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2639 		    max_queue_ext->max_rx_cq_num);
2640 
2641 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2642 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2643 	} else {
2644 		struct ena_admin_queue_feature_desc *max_queues =
2645 		    &get_feat_ctx->max_queues;
2646 		io_tx_sq_num = max_queues->max_sq_num;
2647 		io_tx_cq_num = max_queues->max_cq_num;
2648 		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2649 	}
2650 
2651 	/* In case of LLQ use the llq fields for the tx SQ/CQ */
2652 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2653 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2654 
2655 	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2656 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2657 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2658 	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2659 	/* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
2660 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2661 	    pci_msix_count(pdev) - 1);
2662 #ifdef RSS
2663 	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2664 	    rss_getnumbuckets());
2665 #endif
2666 
2667 	return (max_num_io_queues);
2668 }
2669 
2670 static int
2671 ena_enable_wc(device_t pdev, struct resource *res)
2672 {
2673 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2674 	vm_offset_t va;
2675 	vm_size_t len;
2676 	int rc;
2677 
2678 	va = (vm_offset_t)rman_get_virtual(res);
2679 	len = rman_get_size(res);
2680 	/* Enable write combining */
2681 	rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2682 	if (unlikely(rc != 0)) {
2683 		ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
2684 		return (rc);
2685 	}
2686 
2687 	return (0);
2688 #endif
2689 	return (EOPNOTSUPP);
2690 }
2691 
2692 static int
2693 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2694     struct ena_admin_feature_llq_desc *llq,
2695     struct ena_llq_configurations *llq_default_configurations)
2696 {
2697 	int rc;
2698 	uint32_t llq_feature_mask;
2699 
2700 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2701 	if (!(ena_dev->supported_features & llq_feature_mask)) {
2702 		ena_log(pdev, WARN,
2703 		    "LLQ is not supported. Fallback to host mode policy.\n");
2704 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2705 		return (0);
2706 	}
2707 
2708 	if (ena_dev->mem_bar == NULL) {
2709 		ena_log(pdev, WARN,
2710 		    "LLQ is advertised as supported but device doesn't expose mem bar.\n");
2711 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2712 		return (0);
2713 	}
2714 
2715 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2716 	if (unlikely(rc != 0)) {
2717 		ena_log(pdev, WARN,
2718 		    "Failed to configure the device mode. "
2719 		    "Fallback to host mode policy.\n");
2720 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2721 	}
2722 
2723 	return (0);
2724 }
2725 
2726 static int
2727 ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
2728 {
2729 	struct ena_adapter *adapter = device_get_softc(pdev);
2730 	int rc, rid;
2731 
2732 	/* Try to allocate resources for LLQ bar */
2733 	rid = PCIR_BAR(ENA_MEM_BAR);
2734 	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
2735 	    RF_ACTIVE);
2736 	if (unlikely(adapter->memory == NULL)) {
2737 		ena_log(pdev, WARN,
2738 		    "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n");
2739 		return (0);
2740 	}
2741 
2742 	/* Enable write combining for better LLQ performance */
2743 	rc = ena_enable_wc(adapter->pdev, adapter->memory);
2744 	if (unlikely(rc != 0)) {
2745 		ena_log(pdev, ERR, "failed to enable write combining.\n");
2746 		return (rc);
2747 	}
2748 
2749 	/*
2750 	 * Save virtual address of the device's memory region
2751 	 * for the ena_com layer.
2752 	 */
2753 	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2754 
2755 	return (0);
2756 }
2757 
2758 static inline void
2759 ena_set_llq_configurations(struct ena_llq_configurations *llq_config,
2760     struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter)
2761 {
2762 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2763 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2764 	llq_config->llq_num_decs_before_header =
2765 	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2766 	if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) {
2767 		if ((ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_LARGE) ||
2768 		    (ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT &&
2769 		    llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
2770 			llq_config->llq_ring_entry_size =
2771 			    ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2772 			llq_config->llq_ring_entry_size_value = 256;
2773 			adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2774 		}
2775 	} else {
2776 		llq_config->llq_ring_entry_size =
2777 		    ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2778 		llq_config->llq_ring_entry_size_value = 128;
2779 		adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2780 	}
2781 }
2782 
2783 static int
2784 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter)
2785 {
2786 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2787 	struct ena_com_dev *ena_dev = ctx->ena_dev;
2788 	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2789 	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2790 	uint32_t max_tx_queue_size;
2791 	uint32_t max_rx_queue_size;
2792 
2793 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2794 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2795 		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2796 		max_rx_queue_size = min_t(uint32_t,
2797 		    max_queue_ext->max_rx_cq_depth,
2798 		    max_queue_ext->max_rx_sq_depth);
2799 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2800 
2801 		if (ena_dev->tx_mem_queue_type ==
2802 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2803 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2804 			    llq->max_llq_depth);
2805 		else
2806 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2807 			    max_queue_ext->max_tx_sq_depth);
2808 
2809 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2810 		    max_queue_ext->max_per_packet_tx_descs);
2811 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2812 		    max_queue_ext->max_per_packet_rx_descs);
2813 	} else {
2814 		struct ena_admin_queue_feature_desc *max_queues =
2815 		    &ctx->get_feat_ctx->max_queues;
2816 		max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
2817 		    max_queues->max_sq_depth);
2818 		max_tx_queue_size = max_queues->max_cq_depth;
2819 
2820 		if (ena_dev->tx_mem_queue_type ==
2821 		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2822 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2823 			    llq->max_llq_depth);
2824 		else
2825 			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2826 			    max_queues->max_sq_depth);
2827 
2828 		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2829 		    max_queues->max_packet_tx_descs);
2830 		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2831 		    max_queues->max_packet_rx_descs);
2832 	}
2833 
2834 	if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
2835 		if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2836 			if (llq->max_wide_llq_depth != max_tx_queue_size) {
2837 				if (llq->max_wide_llq_depth == 0) {
2838 					/* if there is no large llq max depth from device, we divide
2839 					* the queue size by 2, leaving the amount of memory
2840 					* used by the queues unchanged.
2841 					*/
2842 					max_tx_queue_size /= 2;
2843 				} else {
2844 					max_tx_queue_size = llq->max_wide_llq_depth;
2845 				}
2846 				ena_log(ctx->pdev, INFO,
2847 				    "Using large LLQ headers and decreasing maximum Tx queue size to %d\n",
2848 				    max_tx_queue_size);
2849 			} else {
2850 				ena_log(ctx->pdev, INFO, "Using large LLQ headers\n");
2851 			}
2852 		} else {
2853 			ena_log(ctx->pdev, WARN,
2854 			    "Using large headers failed: LLQ is disabled or device does not support large headers\n");
2855 		}
2856 	}
2857 
2858 	/* round down to the nearest power of 2 */
2859 	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2860 	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2861 
2862 	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2863 	    max_tx_queue_size);
2864 	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2865 	    max_rx_queue_size);
2866 
2867 	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2868 	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2869 
2870 	ctx->max_tx_queue_size = max_tx_queue_size;
2871 	ctx->max_rx_queue_size = max_rx_queue_size;
2872 	ctx->tx_queue_size = tx_queue_size;
2873 	ctx->rx_queue_size = rx_queue_size;
2874 
2875 	return (0);
2876 }
2877 
2878 static void
2879 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2880 {
2881 	struct ena_admin_host_info *host_info;
2882 	uintptr_t rid;
2883 	int rc;
2884 
2885 	/* Allocate only the host info */
2886 	rc = ena_com_allocate_host_info(ena_dev);
2887 	if (unlikely(rc != 0)) {
2888 		ena_log(dev, ERR, "Cannot allocate host info\n");
2889 		return;
2890 	}
2891 
2892 	host_info = ena_dev->host_attr.host_info;
2893 
2894 	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2895 		host_info->bdf = rid;
2896 	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2897 	host_info->kernel_ver = osreldate;
2898 
2899 	sprintf(host_info->kernel_ver_str, "%d", osreldate);
2900 	host_info->os_dist = 0;
2901 	strncpy(host_info->os_dist_str, osrelease,
2902 	    sizeof(host_info->os_dist_str) - 1);
2903 
2904 	host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
2905 	    (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2906 	    (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2907 	host_info->num_cpus = mp_ncpus;
2908 	host_info->driver_supported_features =
2909 	    ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
2910 	    ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
2911 
2912 	rc = ena_com_set_host_attributes(ena_dev);
2913 	if (unlikely(rc != 0)) {
2914 		if (rc == EOPNOTSUPP)
2915 			ena_log(dev, WARN, "Cannot set host attributes\n");
2916 		else
2917 			ena_log(dev, ERR, "Cannot set host attributes\n");
2918 
2919 		goto err;
2920 	}
2921 
2922 	return;
2923 
2924 err:
2925 	ena_com_delete_host_info(ena_dev);
2926 }
2927 
2928 static int
2929 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2930     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2931 {
2932 	struct ena_llq_configurations llq_config;
2933 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2934 	bool readless_supported;
2935 	uint32_t aenq_groups;
2936 	int dma_width;
2937 	int rc;
2938 
2939 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2940 	if (unlikely(rc != 0)) {
2941 		ena_log(pdev, ERR, "failed to init mmio read less\n");
2942 		return (rc);
2943 	}
2944 
2945 	/*
2946 	 * The PCIe configuration space revision id indicate if mmio reg
2947 	 * read is disabled
2948 	 */
2949 	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2950 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2951 
2952 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2953 	if (unlikely(rc != 0)) {
2954 		ena_log(pdev, ERR, "Can not reset device\n");
2955 		goto err_mmio_read_less;
2956 	}
2957 
2958 	rc = ena_com_validate_version(ena_dev);
2959 	if (unlikely(rc != 0)) {
2960 		ena_log(pdev, ERR, "device version is too low\n");
2961 		goto err_mmio_read_less;
2962 	}
2963 
2964 	dma_width = ena_com_get_dma_width(ena_dev);
2965 	if (unlikely(dma_width < 0)) {
2966 		ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
2967 		rc = dma_width;
2968 		goto err_mmio_read_less;
2969 	}
2970 	adapter->dma_width = dma_width;
2971 
2972 	/* ENA admin level init */
2973 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2974 	if (unlikely(rc != 0)) {
2975 		ena_log(pdev, ERR,
2976 		    "Can not initialize ena admin queue with device\n");
2977 		goto err_mmio_read_less;
2978 	}
2979 
2980 	/*
2981 	 * To enable the msix interrupts the driver needs to know the number
2982 	 * of queues. So the driver uses polling mode to retrieve this
2983 	 * information
2984 	 */
2985 	ena_com_set_admin_polling_mode(ena_dev, true);
2986 
2987 	ena_config_host_info(ena_dev, pdev);
2988 
2989 	/* Get Device Attributes */
2990 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2991 	if (unlikely(rc != 0)) {
2992 		ena_log(pdev, ERR,
2993 		    "Cannot get attribute for ena device rc: %d\n", rc);
2994 		goto err_admin_init;
2995 	}
2996 
2997 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2998 	    BIT(ENA_ADMIN_FATAL_ERROR) |
2999 	    BIT(ENA_ADMIN_WARNING) |
3000 	    BIT(ENA_ADMIN_NOTIFICATION) |
3001 	    BIT(ENA_ADMIN_KEEP_ALIVE) |
3002 	    BIT(ENA_ADMIN_CONF_NOTIFICATIONS) |
3003 	    BIT(ENA_ADMIN_DEVICE_REQUEST_RESET);
3004 
3005 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
3006 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3007 	if (unlikely(rc != 0)) {
3008 		ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
3009 		goto err_admin_init;
3010 	}
3011 
3012 	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3013 
3014 	ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter);
3015 
3016 	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3017 	    &llq_config);
3018 	if (unlikely(rc != 0)) {
3019 		ena_log(pdev, ERR, "Failed to set placement policy\n");
3020 		goto err_admin_init;
3021 	}
3022 
3023 	return (0);
3024 
3025 err_admin_init:
3026 	ena_com_delete_host_info(ena_dev);
3027 	ena_com_admin_destroy(ena_dev);
3028 err_mmio_read_less:
3029 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3030 
3031 	return (rc);
3032 }
3033 
3034 static int
3035 ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3036 {
3037 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3038 	int rc;
3039 
3040 	rc = ena_enable_msix(adapter);
3041 	if (unlikely(rc != 0)) {
3042 		ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
3043 		return (rc);
3044 	}
3045 
3046 	ena_setup_mgmnt_intr(adapter);
3047 
3048 	rc = ena_request_mgmnt_irq(adapter);
3049 	if (unlikely(rc != 0)) {
3050 		ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
3051 		goto err_disable_msix;
3052 	}
3053 
3054 	ena_com_set_admin_polling_mode(ena_dev, false);
3055 
3056 	ena_com_admin_aenq_enable(ena_dev);
3057 
3058 	return (0);
3059 
3060 err_disable_msix:
3061 	ena_disable_msix(adapter);
3062 
3063 	return (rc);
3064 }
3065 
3066 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
3067 static void
3068 ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
3069 {
3070 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3071 	struct ena_admin_aenq_keep_alive_desc *desc;
3072 	sbintime_t stime;
3073 	uint64_t rx_drops;
3074 	uint64_t tx_drops;
3075 
3076 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3077 
3078 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3079 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3080 	counter_u64_zero(adapter->hw_stats.rx_drops);
3081 	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
3082 	counter_u64_zero(adapter->hw_stats.tx_drops);
3083 	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
3084 
3085 	stime = getsbinuptime();
3086 	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
3087 }
3088 
3089 /* Check for keep alive expiration */
3090 static void
3091 check_for_missing_keep_alive(struct ena_adapter *adapter)
3092 {
3093 	sbintime_t timestamp, time;
3094 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3095 
3096 	if (adapter->wd_active == 0)
3097 		return;
3098 
3099 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3100 		return;
3101 
3102 	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
3103 	time = getsbinuptime() - timestamp;
3104 	if (unlikely(time > adapter->keep_alive_timeout)) {
3105 		ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
3106 		if (ena_com_aenq_has_keep_alive(adapter->ena_dev))
3107 			reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
3108 
3109 		ena_trigger_reset(adapter, reset_reason);
3110 	}
3111 }
3112 
3113 /* Check if admin queue is enabled */
3114 static void
3115 check_for_admin_com_state(struct ena_adapter *adapter)
3116 {
3117 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO;
3118 	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
3119 		ena_log(adapter->pdev, ERR,
3120 		    "ENA admin queue is not in running state!\n");
3121 		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3122 		if (ena_com_get_missing_admin_interrupt(adapter->ena_dev))
3123 			reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
3124 
3125 		ena_trigger_reset(adapter, reset_reason);
3126 	}
3127 }
3128 
3129 static int
3130 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3131     struct ena_ring *rx_ring)
3132 {
3133 	if (likely(atomic_load_8(&rx_ring->first_interrupt)))
3134 		return (0);
3135 
3136 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3137 		return (0);
3138 
3139 	rx_ring->no_interrupt_event_cnt++;
3140 
3141 	if (rx_ring->no_interrupt_event_cnt ==
3142 	    ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3143 		ena_log(adapter->pdev, ERR,
3144 		    "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3145 		    rx_ring->qid);
3146 		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3147 		return (EIO);
3148 	}
3149 
3150 	return (0);
3151 }
3152 
3153 static enum ena_regs_reset_reason_types
3154 check_cdesc_in_tx_cq(struct ena_adapter *adapter,
3155     struct ena_ring *tx_ring)
3156 {
3157 	device_t pdev = adapter->pdev;
3158 	int rc;
3159 	u16 req_id;
3160 
3161 	rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
3162 	/* TX CQ is empty */
3163 	if (rc == ENA_COM_TRY_AGAIN) {
3164 		ena_log(pdev, ERR,
3165 		    "No completion descriptors found in CQ %d\n",
3166 		    tx_ring->qid);
3167 		return ENA_REGS_RESET_MISS_TX_CMPL;
3168 	}
3169 
3170 	/* TX CQ has cdescs */
3171 	ena_log(pdev, ERR,
3172 	    "Completion descriptors found in CQ %d",
3173 	    tx_ring->qid);
3174 
3175 	return ENA_REGS_RESET_MISS_INTERRUPT;
3176 }
3177 
3178 static int
3179 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3180     struct ena_ring *tx_ring)
3181 {
3182 	uint32_t missed_tx = 0, new_missed_tx = 0;
3183 	device_t pdev = adapter->pdev;
3184 	struct bintime curtime, time;
3185 	struct ena_tx_buffer *tx_buf;
3186 	int time_since_last_cleanup;
3187 	int missing_tx_comp_to;
3188 	sbintime_t time_offset;
3189 	int i, rc = 0;
3190 	enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
3191 	bool cleanup_scheduled, cleanup_running;
3192 
3193 	getbinuptime(&curtime);
3194 
3195 	for (i = 0; i < tx_ring->ring_size; i++) {
3196 		tx_buf = &tx_ring->tx_buffer_info[i];
3197 
3198 		if (bintime_isset(&tx_buf->timestamp) == 0)
3199 			continue;
3200 
3201 		time = curtime;
3202 		bintime_sub(&time, &tx_buf->timestamp);
3203 		time_offset = bttosbt(time);
3204 
3205 		if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3206 		    time_offset > 2 * adapter->missing_tx_timeout)) {
3207 			/*
3208 			 * If after graceful period interrupt is still not
3209 			 * received, we schedule a reset.
3210 			 */
3211 			ena_log(pdev, ERR,
3212 			    "Potential MSIX issue on Tx side Queue = %d. "
3213 			    "Reset the device\n",
3214 			    tx_ring->qid);
3215 			ena_trigger_reset(adapter,
3216 			    ENA_REGS_RESET_MISS_INTERRUPT);
3217 			return (EIO);
3218 		}
3219 
3220 		/* Check again if packet is still waiting */
3221 		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3222 
3223 			if (tx_buf->print_once) {
3224 				time_since_last_cleanup = TICKS_2_MSEC(ticks -
3225 				    tx_ring->tx_last_cleanup_ticks);
3226 				missing_tx_comp_to = sbttoms(
3227 				    adapter->missing_tx_timeout);
3228 				ena_log(pdev, WARN,
3229 				    "Found a Tx that wasn't completed on time, qid %d, index %d. "
3230 				    "%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
3231 				    tx_ring->qid, i, time_since_last_cleanup,
3232 				    missing_tx_comp_to);
3233 				/* Add new TX completions which are missed */
3234 				new_missed_tx++;
3235 			}
3236 
3237 			tx_buf->print_once = false;
3238 			missed_tx++;
3239 		}
3240 	}
3241 	/* Checking if this TX ring missing TX completions have passed the threshold */
3242 	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3243 		ena_log(pdev, ERR,
3244 		    "The number of lost tx completion is above the threshold "
3245 		    "(%d > %d). Reset the device\n",
3246 		    missed_tx, adapter->missing_tx_threshold);
3247 		/* Set the reset flag to prevent ena_cleanup() from running */
3248 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3249 		/* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and
3250 		 * that cleanup_running is visible to check_missing_comp_in_tx_queue() to
3251 		 * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq()
3252 		 */
3253 		mb();
3254 		cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
3255 		cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
3256 		if (!(cleanup_scheduled || cleanup_running))
3257 			reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring);
3258 
3259 		adapter->reset_reason = reset_reason;
3260 		rc = EIO;
3261 	}
3262 	/* Add the newly discovered missing TX completions */
3263 	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
3264 
3265 	return (rc);
3266 }
3267 
3268 /*
3269  * Check for TX which were not completed on time.
3270  * Timeout is defined by "missing_tx_timeout".
3271  * Reset will be performed if number of incompleted
3272  * transactions exceeds "missing_tx_threshold".
3273  */
3274 static void
3275 check_for_missing_completions(struct ena_adapter *adapter)
3276 {
3277 	struct ena_ring *tx_ring;
3278 	struct ena_ring *rx_ring;
3279 	int i, budget, rc;
3280 
3281 	/* Make sure the driver doesn't turn the device in other process */
3282 	rmb();
3283 
3284 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3285 		return;
3286 
3287 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3288 		return;
3289 
3290 	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3291 		return;
3292 
3293 	budget = adapter->missing_tx_max_queues;
3294 
3295 	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3296 		tx_ring = &adapter->tx_ring[i];
3297 		rx_ring = &adapter->rx_ring[i];
3298 
3299 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3300 		if (unlikely(rc != 0))
3301 			return;
3302 
3303 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3304 		if (unlikely(rc != 0))
3305 			return;
3306 
3307 		budget--;
3308 		if (budget == 0) {
3309 			i++;
3310 			break;
3311 		}
3312 	}
3313 
3314 	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3315 }
3316 
3317 /* trigger rx cleanup after 2 consecutive detections */
3318 #define EMPTY_RX_REFILL 2
3319 /* For the rare case where the device runs out of Rx descriptors and the
3320  * msix handler failed to refill new Rx descriptors (due to a lack of memory
3321  * for example).
3322  * This case will lead to a deadlock:
3323  * The device won't send interrupts since all the new Rx packets will be dropped
3324  * The msix handler won't allocate new Rx descriptors so the device won't be
3325  * able to send new packets.
3326  *
3327  * When such a situation is detected - execute rx cleanup task in another thread
3328  */
3329 static void
3330 check_for_empty_rx_ring(struct ena_adapter *adapter)
3331 {
3332 	struct ena_ring *rx_ring;
3333 	int i, refill_required;
3334 
3335 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3336 		return;
3337 
3338 	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3339 		return;
3340 
3341 	for (i = 0; i < adapter->num_io_queues; i++) {
3342 		rx_ring = &adapter->rx_ring[i];
3343 
3344 		refill_required = ena_com_free_q_entries(
3345 		    rx_ring->ena_com_io_sq);
3346 		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3347 			rx_ring->empty_rx_queue++;
3348 
3349 			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3350 				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3351 				    1);
3352 
3353 				ena_log(adapter->pdev, WARN,
3354 				    "Rx ring %d is stalled. Triggering the refill function\n",
3355 				    i);
3356 
3357 				taskqueue_enqueue(rx_ring->que->cleanup_tq,
3358 				    &rx_ring->que->cleanup_task);
3359 				rx_ring->empty_rx_queue = 0;
3360 			}
3361 		} else {
3362 			rx_ring->empty_rx_queue = 0;
3363 		}
3364 	}
3365 }
3366 
3367 static void
3368 ena_update_hints(struct ena_adapter *adapter,
3369     struct ena_admin_ena_hw_hints *hints)
3370 {
3371 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3372 
3373 	if (hints->admin_completion_tx_timeout)
3374 		ena_dev->admin_queue.completion_timeout =
3375 		    hints->admin_completion_tx_timeout * 1000;
3376 
3377 	if (hints->mmio_read_timeout)
3378 		/* convert to usec */
3379 		ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
3380 
3381 	if (hints->missed_tx_completion_count_threshold_to_reset)
3382 		adapter->missing_tx_threshold =
3383 		    hints->missed_tx_completion_count_threshold_to_reset;
3384 
3385 	if (hints->missing_tx_completion_timeout) {
3386 		if (hints->missing_tx_completion_timeout ==
3387 		    ENA_HW_HINTS_NO_TIMEOUT)
3388 			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3389 		else
3390 			adapter->missing_tx_timeout = SBT_1MS *
3391 			    hints->missing_tx_completion_timeout;
3392 	}
3393 
3394 	if (hints->driver_watchdog_timeout) {
3395 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3396 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3397 		else
3398 			adapter->keep_alive_timeout = SBT_1MS *
3399 			    hints->driver_watchdog_timeout;
3400 	}
3401 }
3402 
3403 /**
3404  * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3405  * @adapter: ENA device adapter
3406  *
3407  * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3408  * and other error codes on failure.
3409  *
3410  * This function can possibly cause a race with other calls to the admin queue.
3411  * Because of that, the caller should either lock this function or make sure
3412  * that there is no race in the current context.
3413  */
3414 static int
3415 ena_copy_eni_metrics(struct ena_adapter *adapter)
3416 {
3417 	static bool print_once = true;
3418 	int rc;
3419 
3420 	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3421 
3422 	if (rc != 0) {
3423 		if (rc == ENA_COM_UNSUPPORTED) {
3424 			if (print_once) {
3425 				ena_log(adapter->pdev, WARN,
3426 				    "Retrieving ENI metrics is not supported.\n");
3427 				print_once = false;
3428 			} else {
3429 				ena_log(adapter->pdev, DBG,
3430 				    "Retrieving ENI metrics is not supported.\n");
3431 			}
3432 		} else {
3433 			ena_log(adapter->pdev, ERR,
3434 			    "Failed to get ENI metrics: %d\n", rc);
3435 		}
3436 	}
3437 
3438 	return (rc);
3439 }
3440 
3441 static int
3442 ena_copy_srd_metrics(struct ena_adapter *adapter)
3443 {
3444 	return ena_com_get_ena_srd_info(adapter->ena_dev, &adapter->ena_srd_info);
3445 }
3446 
3447 static int
3448 ena_copy_customer_metrics(struct ena_adapter *adapter)
3449 {
3450 	struct ena_com_dev *dev;
3451 	u32 supported_metrics_count;
3452 	int rc, len;
3453 
3454 	dev = adapter->ena_dev;
3455 
3456 	supported_metrics_count = ena_com_get_customer_metric_count(dev);
3457 	len = supported_metrics_count * sizeof(u64);
3458 
3459 	/* Fill the data buffer */
3460 	rc = ena_com_get_customer_metrics(adapter->ena_dev,
3461 	    (char *)(adapter->customer_metrics_array), len);
3462 
3463 	return (rc);
3464 }
3465 
3466 static void
3467 ena_timer_service(void *data)
3468 {
3469 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3470 	struct ena_admin_host_info *host_info =
3471 	    adapter->ena_dev->host_attr.host_info;
3472 
3473 	check_for_missing_keep_alive(adapter);
3474 
3475 	check_for_admin_com_state(adapter);
3476 
3477 	check_for_missing_completions(adapter);
3478 
3479 	check_for_empty_rx_ring(adapter);
3480 
3481 	/*
3482 	 * User controller update of the ENA metrics.
3483 	 * If the delay was set to 0, then the stats shouldn't be updated at
3484 	 * all.
3485 	 * Otherwise, wait 'metrics_sample_interval' seconds, before
3486 	 * updating stats.
3487 	 * As timer service is executed every second, it's enough to increment
3488 	 * appropriate counter each time the timer service is executed.
3489 	 */
3490 	if ((adapter->metrics_sample_interval != 0) &&
3491 	    (++adapter->metrics_sample_interval_cnt >=
3492 	    adapter->metrics_sample_interval)) {
3493 		taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
3494 		adapter->metrics_sample_interval_cnt = 0;
3495 	}
3496 
3497 
3498 	if (host_info != NULL)
3499 		ena_update_host_info(host_info, adapter->ifp);
3500 
3501 	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3502 		/*
3503 		 * Timeout when validating version indicates that the device
3504 		 * became unresponsive. If that happens skip the reset and
3505 		 * reschedule timer service, so the reset can be retried later.
3506 		 */
3507 		if (ena_com_validate_version(adapter->ena_dev) ==
3508 		    ENA_COM_TIMER_EXPIRED) {
3509 			ena_log(adapter->pdev, WARN,
3510 			    "FW unresponsive, skipping reset\n");
3511 			ENA_TIMER_RESET(adapter);
3512 			return;
3513 		}
3514 		ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
3515 		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3516 		return;
3517 	}
3518 
3519 	/*
3520 	 * Schedule another timeout one second from now.
3521 	 */
3522 	ENA_TIMER_RESET(adapter);
3523 }
3524 
3525 void
3526 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3527 {
3528 	if_t ifp = adapter->ifp;
3529 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3530 	bool dev_up;
3531 
3532 	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3533 		return;
3534 
3535 	if (!graceful)
3536 		if_link_state_change(ifp, LINK_STATE_DOWN);
3537 
3538 	ENA_TIMER_DRAIN(adapter);
3539 
3540 	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3541 	if (dev_up)
3542 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3543 
3544 	if (!graceful)
3545 		ena_com_set_admin_running_state(ena_dev, false);
3546 
3547 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3548 		ena_down(adapter);
3549 
3550 	/*
3551 	 * Stop the device from sending AENQ events (if the device was up, and
3552 	 * the trigger reset was on, ena_down already performs device reset)
3553 	 */
3554 	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3555 		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3556 
3557 	ena_free_mgmnt_irq(adapter);
3558 
3559 	ena_disable_msix(adapter);
3560 
3561 	/*
3562 	 * IO rings resources should be freed because `ena_restore_device()`
3563 	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3564 	 * vectors. The amount of MSIX vectors after destroy-restore may be
3565 	 * different than before. Therefore, IO rings resources should be
3566 	 * established from scratch each time.
3567 	 */
3568 	ena_free_all_io_rings_resources(adapter);
3569 
3570 	ena_com_abort_admin_commands(ena_dev);
3571 
3572 	ena_com_wait_for_abort_completion(ena_dev);
3573 
3574 	ena_com_admin_destroy(ena_dev);
3575 
3576 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3577 
3578 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3579 
3580 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3581 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3582 }
3583 
3584 static int
3585 ena_device_validate_params(struct ena_adapter *adapter,
3586     struct ena_com_dev_get_features_ctx *get_feat_ctx)
3587 {
3588 	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3589 	    ETHER_ADDR_LEN) != 0) {
3590 		ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
3591 		return (EINVAL);
3592 	}
3593 
3594 	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3595 		ena_log(adapter->pdev, ERR,
3596 		    "Error, device max mtu is smaller than ifp MTU\n");
3597 		return (EINVAL);
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 int
3604 ena_restore_device(struct ena_adapter *adapter)
3605 {
3606 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3607 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3608 	if_t ifp = adapter->ifp;
3609 	device_t dev = adapter->pdev;
3610 	int wd_active;
3611 	int rc;
3612 
3613 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3614 
3615 	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3616 	if (rc != 0) {
3617 		ena_log(dev, ERR, "Cannot initialize device\n");
3618 		goto err;
3619 	}
3620 	/*
3621 	 * Only enable WD if it was enabled before reset, so it won't override
3622 	 * value set by the user by the sysctl.
3623 	 */
3624 	if (adapter->wd_active != 0)
3625 		adapter->wd_active = wd_active;
3626 
3627 	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3628 	if (rc != 0) {
3629 		ena_log(dev, ERR, "Validation of device parameters failed\n");
3630 		goto err_device_destroy;
3631 	}
3632 
3633 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3634 	/* Make sure we don't have a race with AENQ Links state handler */
3635 	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3636 		if_link_state_change(ifp, LINK_STATE_UP);
3637 
3638 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3639 	if (rc != 0) {
3640 		ena_log(dev, ERR, "Enable MSI-X failed\n");
3641 		goto err_device_destroy;
3642 	}
3643 
3644 	/*
3645 	 * Effective value of used MSIX vectors should be the same as before
3646 	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3647 	 * are available.
3648 	 */
3649 	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3650 		adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3651 
3652 	/* Re-initialize rings basic information */
3653 	ena_init_io_rings(adapter);
3654 
3655 	/* If the interface was up before the reset bring it up */
3656 	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3657 		rc = ena_up(adapter);
3658 		if (rc != 0) {
3659 			ena_log(dev, ERR, "Failed to create I/O queues\n");
3660 			goto err_disable_msix;
3661 		}
3662 	}
3663 
3664 	/* Indicate that device is running again and ready to work */
3665 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3666 
3667 	/*
3668 	 * As the AENQ handlers weren't executed during reset because
3669 	 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3670 	 * timestamp must be updated again That will prevent next reset
3671 	 * caused by missing keep alive.
3672 	 */
3673 	adapter->keep_alive_timestamp = getsbinuptime();
3674 	ENA_TIMER_RESET(adapter);
3675 
3676 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3677 
3678 	return (rc);
3679 
3680 err_disable_msix:
3681 	ena_free_mgmnt_irq(adapter);
3682 	ena_disable_msix(adapter);
3683 err_device_destroy:
3684 	ena_com_abort_admin_commands(ena_dev);
3685 	ena_com_wait_for_abort_completion(ena_dev);
3686 	ena_com_admin_destroy(ena_dev);
3687 	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3688 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3689 err:
3690 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3691 	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3692 	ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
3693 
3694 	return (rc);
3695 }
3696 
3697 static void
3698 ena_metrics_task(void *arg, int pending)
3699 {
3700 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3701 
3702 	ENA_LOCK_LOCK();
3703 
3704 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
3705 		(void)ena_copy_customer_metrics(adapter);
3706 	else if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS))
3707 		(void)ena_copy_eni_metrics(adapter);
3708 
3709 	if (ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
3710 		(void)ena_copy_srd_metrics(adapter);
3711 
3712 	ENA_LOCK_UNLOCK();
3713 }
3714 
3715 static void
3716 ena_reset_task(void *arg, int pending)
3717 {
3718 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3719 
3720 	ENA_LOCK_LOCK();
3721 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3722 		ena_increment_reset_counter(adapter);
3723 		ena_destroy_device(adapter, false);
3724 		ena_restore_device(adapter);
3725 
3726 		ena_log(adapter->pdev, INFO,
3727 		    "Device reset completed successfully, Driver info: %s\n",
3728 		    ena_version);
3729 	}
3730 	ENA_LOCK_UNLOCK();
3731 }
3732 
3733 static void
3734 ena_free_stats(struct ena_adapter *adapter)
3735 {
3736 	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3737 	    sizeof(struct ena_hw_stats));
3738 	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3739 	    sizeof(struct ena_stats_dev));
3740 
3741 }
3742 /**
3743  * ena_attach - Device Initialization Routine
3744  * @pdev: device information struct
3745  *
3746  * Returns 0 on success, otherwise on failure.
3747  *
3748  * ena_attach initializes an adapter identified by a device structure.
3749  * The OS initialization, configuring of the adapter private structure,
3750  * and a hardware reset occur.
3751  **/
3752 static int
3753 ena_attach(device_t pdev)
3754 {
3755 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3756 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3757 	static int version_printed;
3758 	struct ena_adapter *adapter;
3759 	struct ena_com_dev *ena_dev = NULL;
3760 	uint32_t max_num_io_queues;
3761 	int msix_rid;
3762 	int rid, rc;
3763 
3764 	adapter = device_get_softc(pdev);
3765 	adapter->pdev = pdev;
3766 	adapter->first_bind = -1;
3767 
3768 	/*
3769 	 * Set up the timer service - driver is responsible for avoiding
3770 	 * concurrency, as the callout won't be using any locking inside.
3771 	 */
3772 	ENA_TIMER_INIT(adapter);
3773 	adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
3774 	adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
3775 	adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
3776 	adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
3777 
3778 	adapter->irq_cpu_base = ENA_BASE_CPU_UNSPECIFIED;
3779 	adapter->irq_cpu_stride = 0;
3780 
3781 #ifdef RSS
3782 	adapter->rss_enabled = 1;
3783 #endif
3784 
3785 	if (version_printed++ == 0)
3786 		ena_log(pdev, INFO, "%s\n", ena_version);
3787 
3788 	/* Allocate memory for ena_dev structure */
3789 	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3790 	    M_WAITOK | M_ZERO);
3791 
3792 	adapter->ena_dev = ena_dev;
3793 	ena_dev->dmadev = pdev;
3794 
3795 	rid = PCIR_BAR(ENA_REG_BAR);
3796 	adapter->memory = NULL;
3797 	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
3798 	    RF_ACTIVE);
3799 	if (unlikely(adapter->registers == NULL)) {
3800 		ena_log(pdev, ERR,
3801 		    "unable to allocate bus resource: registers!\n");
3802 		rc = ENOMEM;
3803 		goto err_dev_free;
3804 	}
3805 
3806 	/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
3807 	msix_rid = pci_msix_table_bar(pdev);
3808 	if (msix_rid != rid) {
3809 		adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3810 		    &msix_rid, RF_ACTIVE);
3811 		if (unlikely(adapter->msix == NULL)) {
3812 			ena_log(pdev, ERR,
3813 			    "unable to allocate bus resource: msix!\n");
3814 			rc = ENOMEM;
3815 			goto err_pci_free;
3816 		}
3817 		adapter->msix_rid = msix_rid;
3818 	}
3819 
3820 	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3821 	    M_WAITOK | M_ZERO);
3822 
3823 	/* Store register resources */
3824 	((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
3825 	    adapter->registers);
3826 	((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
3827 	    adapter->registers);
3828 
3829 	if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
3830 		ena_log(pdev, ERR, "failed to pmap registers bar\n");
3831 		rc = ENXIO;
3832 		goto err_bus_free;
3833 	}
3834 
3835 	rc = ena_map_llq_mem_bar(pdev, ena_dev);
3836 	if (unlikely(rc != 0)) {
3837 		ena_log(pdev, ERR, "Failed to map ENA mem bar");
3838 		goto err_bus_free;
3839 	}
3840 
3841 	ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
3842 
3843 	/* Initially clear all the flags */
3844 	ENA_FLAG_ZERO(adapter);
3845 
3846 	/* Device initialization */
3847 	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3848 	if (unlikely(rc != 0)) {
3849 		ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
3850 		rc = ENXIO;
3851 		goto err_bus_free;
3852 	}
3853 
3854 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3855 		adapter->disable_meta_caching = !!(
3856 		    get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3857 		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
3858 
3859 	adapter->keep_alive_timestamp = getsbinuptime();
3860 
3861 	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3862 
3863 	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3864 	    ETHER_ADDR_LEN);
3865 
3866 	calc_queue_ctx.pdev = pdev;
3867 	calc_queue_ctx.ena_dev = ena_dev;
3868 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3869 
3870 	/* Calculate initial and maximum IO queue number and size */
3871 	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3872 	    &get_feat_ctx);
3873 	rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter);
3874 	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3875 		rc = EFAULT;
3876 		goto err_com_free;
3877 	}
3878 
3879 	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3880 	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3881 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3882 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3883 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3884 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3885 
3886 	adapter->max_num_io_queues = max_num_io_queues;
3887 
3888 	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3889 
3890 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3891 
3892 	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3893 
3894 	/* set up dma tags for rx and tx buffers */
3895 	rc = ena_setup_tx_dma_tag(adapter);
3896 	if (unlikely(rc != 0)) {
3897 		ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
3898 		goto err_com_free;
3899 	}
3900 
3901 	rc = ena_setup_rx_dma_tag(adapter);
3902 	if (unlikely(rc != 0)) {
3903 		ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
3904 		goto err_tx_tag_free;
3905 	}
3906 
3907 	/*
3908 	 * The amount of requested MSIX vectors is equal to
3909 	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3910 	 * number of admin queue interrupts. The former is initially determined
3911 	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3912 	 * achieved if there are not enough system resources. By default, the
3913 	 * number of effectively used IO queues is the same but later on it can
3914 	 * be limited by the user using sysctl interface.
3915 	 */
3916 	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3917 	if (unlikely(rc != 0)) {
3918 		ena_log(pdev, ERR,
3919 		    "Failed to enable and set the admin interrupts\n");
3920 		goto err_io_free;
3921 	}
3922 	/* By default all of allocated MSIX vectors are actively used */
3923 	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3924 
3925 	/* initialize rings basic information */
3926 	ena_init_io_rings(adapter);
3927 
3928 	rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
3929 	if (rc) {
3930 		ena_log(pdev, ERR, "Failed to allocate customer metrics buffer.\n");
3931 		goto err_msix_free;
3932 	}
3933 
3934 	rc = ena_sysctl_allocate_customer_metrics_buffer(adapter);
3935 	if (unlikely(rc)){
3936 		ena_log(pdev, ERR, "Failed to allocate sysctl customer metrics buffer.\n");
3937 		goto err_metrics_buffer_destroy;
3938 	}
3939 
3940 	/* Initialize statistics */
3941 	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3942 	    sizeof(struct ena_stats_dev));
3943 	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3944 	    sizeof(struct ena_hw_stats));
3945 	ena_sysctl_add_nodes(adapter);
3946 
3947 	/* setup network interface */
3948 	ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3949 
3950 	/* Initialize reset task queue */
3951 	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3952 	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3953 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3954 	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
3955 	    device_get_nameunit(adapter->pdev));
3956 
3957 	/* Initialize metrics task queue */
3958 	TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
3959 	adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
3960 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
3961 	taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
3962 	    device_get_nameunit(adapter->pdev));
3963 
3964 #ifdef DEV_NETMAP
3965 	rc = ena_netmap_attach(adapter);
3966 	if (rc != 0) {
3967 		ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
3968 		goto err_detach;
3969 	}
3970 #endif /* DEV_NETMAP */
3971 
3972 	/* Tell the stack that the interface is not active */
3973 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3974 	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3975 
3976 	/* Run the timer service */
3977 	ENA_TIMER_RESET(adapter);
3978 
3979 	return (0);
3980 
3981 #ifdef DEV_NETMAP
3982 err_detach:
3983 	ether_ifdetach(adapter->ifp);
3984 	ifmedia_removeall(&adapter->media);
3985 	free(adapter->customer_metrics_array, M_DEVBUF);
3986 #endif /* DEV_NETMAP */
3987 err_metrics_buffer_destroy:
3988 	ena_com_delete_customer_metrics_buffer(ena_dev);
3989 err_msix_free:
3990 	ena_free_stats(adapter);
3991 	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3992 	ena_free_mgmnt_irq(adapter);
3993 	ena_disable_msix(adapter);
3994 err_io_free:
3995 	ena_free_all_io_rings_resources(adapter);
3996 	ena_free_rx_dma_tag(adapter);
3997 err_tx_tag_free:
3998 	ena_free_tx_dma_tag(adapter);
3999 err_com_free:
4000 	ena_com_admin_destroy(ena_dev);
4001 	ena_com_delete_host_info(ena_dev);
4002 	ena_com_mmio_reg_read_request_destroy(ena_dev);
4003 err_bus_free:
4004 	free(ena_dev->bus, M_DEVBUF);
4005 err_pci_free:
4006 	ena_free_pci_resources(adapter);
4007 err_dev_free:
4008 	free(ena_dev, M_DEVBUF);
4009 
4010 	return (rc);
4011 }
4012 
4013 /**
4014  * ena_detach - Device Removal Routine
4015  * @pdev: device information struct
4016  *
4017  * ena_detach is called by the device subsystem to alert the driver
4018  * that it should release a PCI device.
4019  **/
4020 static int
4021 ena_detach(device_t pdev)
4022 {
4023 	struct ena_adapter *adapter = device_get_softc(pdev);
4024 	struct ena_com_dev *ena_dev = adapter->ena_dev;
4025 	int rc;
4026 
4027 	/* Make sure VLANS are not using driver */
4028 	if (if_vlantrunkinuse(adapter->ifp)) {
4029 		ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
4030 		return (EBUSY);
4031 	}
4032 
4033 	ether_ifdetach(adapter->ifp);
4034 
4035 	ifmedia_removeall(&adapter->media);
4036 
4037 	/* Stop timer service */
4038 	ENA_LOCK_LOCK();
4039 	ENA_TIMER_DRAIN(adapter);
4040 	ENA_LOCK_UNLOCK();
4041 
4042 	/* Release metrics task */
4043 	while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
4044 		taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
4045 	taskqueue_free(adapter->metrics_tq);
4046 
4047 	/* Release reset task */
4048 	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
4049 		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
4050 	taskqueue_free(adapter->reset_tq);
4051 
4052 	ENA_LOCK_LOCK();
4053 	ena_down(adapter);
4054 	ena_destroy_device(adapter, true);
4055 	ENA_LOCK_UNLOCK();
4056 
4057 	/* Restore unregistered sysctl queue nodes. */
4058 	ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
4059 	    adapter->max_num_io_queues);
4060 
4061 #ifdef DEV_NETMAP
4062 	netmap_detach(adapter->ifp);
4063 #endif /* DEV_NETMAP */
4064 
4065 	ena_free_stats(adapter);
4066 
4067 	rc = ena_free_rx_dma_tag(adapter);
4068 	if (unlikely(rc != 0))
4069 		ena_log(adapter->pdev, WARN,
4070 		    "Unmapped RX DMA tag associations\n");
4071 
4072 	rc = ena_free_tx_dma_tag(adapter);
4073 	if (unlikely(rc != 0))
4074 		ena_log(adapter->pdev, WARN,
4075 		    "Unmapped TX DMA tag associations\n");
4076 
4077 	ena_free_irqs(adapter);
4078 
4079 	ena_free_pci_resources(adapter);
4080 
4081 	if (adapter->rss_indir != NULL)
4082 		free(adapter->rss_indir, M_DEVBUF);
4083 
4084 	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
4085 		ena_com_rss_destroy(ena_dev);
4086 
4087 	ena_com_delete_host_info(ena_dev);
4088 
4089 	free(adapter->customer_metrics_array, M_DEVBUF);
4090 
4091 	ena_com_delete_customer_metrics_buffer(ena_dev);
4092 
4093 	if_free(adapter->ifp);
4094 
4095 	free(ena_dev->bus, M_DEVBUF);
4096 
4097 	free(ena_dev, M_DEVBUF);
4098 
4099 	return (bus_generic_detach(pdev));
4100 }
4101 
4102 /******************************************************************************
4103  ******************************** AENQ Handlers *******************************
4104  *****************************************************************************/
4105 /**
4106  * ena_update_on_link_change:
4107  * Notify the network interface about the change in link status
4108  **/
4109 static void
4110 ena_update_on_link_change(void *adapter_data,
4111     struct ena_admin_aenq_entry *aenq_e)
4112 {
4113 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4114 	struct ena_admin_aenq_link_change_desc *aenq_desc;
4115 	int status;
4116 	if_t ifp;
4117 
4118 	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
4119 	ifp = adapter->ifp;
4120 	status = aenq_desc->flags &
4121 	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4122 
4123 	if (status != 0) {
4124 		ena_log(adapter->pdev, INFO, "link is UP\n");
4125 		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
4126 		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
4127 			if_link_state_change(ifp, LINK_STATE_UP);
4128 	} else {
4129 		ena_log(adapter->pdev, INFO, "link is DOWN\n");
4130 		if_link_state_change(ifp, LINK_STATE_DOWN);
4131 		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
4132 	}
4133 }
4134 
4135 static void
4136 ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
4137 {
4138 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4139 	struct ena_admin_ena_hw_hints *hints;
4140 
4141 	ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4142 	    adapter->ena_dev, "Invalid group(%x) expected %x\n",
4143 	    aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
4144 
4145 	switch (aenq_e->aenq_common_desc.syndrome) {
4146 	case ENA_ADMIN_UPDATE_HINTS:
4147 		hints =
4148 		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
4149 		ena_update_hints(adapter, hints);
4150 		break;
4151 	default:
4152 		ena_log(adapter->pdev, ERR,
4153 		    "Invalid aenq notification link state %d\n",
4154 		    aenq_e->aenq_common_desc.syndrome);
4155 	}
4156 }
4157 
4158 static void
4159 ena_lock_init(void *arg)
4160 {
4161 	ENA_LOCK_INIT();
4162 }
4163 SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
4164 
4165 static void
4166 ena_lock_uninit(void *arg)
4167 {
4168 	ENA_LOCK_DESTROY();
4169 }
4170 SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
4171 
4172 /**
4173  * This handler will called for unknown event group or unimplemented handlers
4174  **/
4175 static void
4176 unimplemented_aenq_handler(void *adapter_data,
4177     struct ena_admin_aenq_entry *aenq_e)
4178 {
4179 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4180 
4181 	ena_log(adapter->pdev, ERR,
4182 	    "Unknown event was received or event with unimplemented handler\n");
4183 }
4184 
4185 static void ena_conf_notification(void *adapter_data,
4186     struct ena_admin_aenq_entry *aenq_e)
4187 {
4188 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4189 	struct ena_admin_aenq_conf_notifications_desc *desc;
4190 	u64 bitmap, bit;
4191 
4192 	desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e;
4193 	bitmap = desc->notifications_bitmap;
4194 
4195 	if (bitmap == 0) {
4196 		ena_log(adapter->pdev, INFO,
4197 		    "Empty configuration notification bitmap\n");
4198 		return;
4199 	}
4200 
4201 	for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) {
4202 		bit--;
4203 		ena_log(adapter->pdev, INFO,
4204 		    "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n",
4205 		    bit + 1);
4206 		// Clear the processed bit
4207 		bitmap &= ~(1UL << bit);
4208 	}
4209 }
4210 
4211 static void ena_admin_device_request_reset(void *adapter_data,
4212     struct ena_admin_aenq_entry *aenq_e)
4213 {
4214 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4215 	ena_log(adapter->pdev, WARN,
4216 	    "The device has detected an unhealthy state, reset is requested\n");
4217 	ena_trigger_reset(adapter, ENA_REGS_RESET_DEVICE_REQUEST);
4218 }
4219 
4220 static struct ena_aenq_handlers aenq_handlers = {
4221     .handlers = {
4222 	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4223 	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
4224 	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4225 	    [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification,
4226 	    [ENA_ADMIN_DEVICE_REQUEST_RESET] = ena_admin_device_request_reset,
4227     },
4228     .unimplemented_handler = unimplemented_aenq_handler
4229 };
4230 
4231 /*********************************************************************
4232  *  FreeBSD Device Interface Entry Points
4233  *********************************************************************/
4234 
4235 static device_method_t ena_methods[] = { /* Device interface */
4236 	DEVMETHOD(device_probe, ena_probe),
4237 	DEVMETHOD(device_attach, ena_attach),
4238 	DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END
4239 };
4240 
4241 static driver_t ena_driver = {
4242 	"ena",
4243 	ena_methods,
4244 	sizeof(struct ena_adapter),
4245 };
4246 
4247 DRIVER_MODULE(ena, pci, ena_driver, 0, 0);
4248 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
4249     nitems(ena_vendor_info_array) - 1);
4250 MODULE_DEPEND(ena, pci, 1, 1, 1);
4251 MODULE_DEPEND(ena, ether, 1, 1, 1);
4252 #ifdef DEV_NETMAP
4253 MODULE_DEPEND(ena, netmap, 1, 1, 1);
4254 #endif /* DEV_NETMAP */
4255 
4256 /*********************************************************************/
4257