xref: /freebsd/sys/dev/ena/ena.c (revision 8a573700af75fc60b59bd29b0a014c1b1e7afcc6)
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48 #include <sys/time.h>
49 #include <sys/eventhandler.h>
50 
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <machine/in_cksum.h>
54 
55 #include <net/bpf.h>
56 #include <net/ethernet.h>
57 #include <net/if.h>
58 #include <net/if_var.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/rss_config.h>
63 #include <net/if_types.h>
64 #include <net/if_vlan_var.h>
65 
66 #include <netinet/in_rss.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcireg.h>
77 
78 #include "ena.h"
79 #include "ena_sysctl.h"
80 
81 /*********************************************************
82  *  Function prototypes
83  *********************************************************/
84 static int	ena_probe(device_t);
85 static void	ena_intr_msix_mgmnt(void *);
86 static int	ena_allocate_pci_resources(struct ena_adapter*);
87 static void	ena_free_pci_resources(struct ena_adapter *);
88 static int	ena_change_mtu(if_t, int);
89 static inline void ena_alloc_counters(counter_u64_t *, int);
90 static inline void ena_free_counters(counter_u64_t *, int);
91 static inline void ena_reset_counters(counter_u64_t *, int);
92 static void	ena_init_io_rings_common(struct ena_adapter *,
93     struct ena_ring *, uint16_t);
94 static int	ena_init_io_rings(struct ena_adapter *);
95 static void	ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
96 static void	ena_free_all_io_rings_resources(struct ena_adapter *);
97 static int	ena_setup_tx_dma_tag(struct ena_adapter *);
98 static int	ena_free_tx_dma_tag(struct ena_adapter *);
99 static int	ena_setup_rx_dma_tag(struct ena_adapter *);
100 static int	ena_free_rx_dma_tag(struct ena_adapter *);
101 static int	ena_setup_tx_resources(struct ena_adapter *, int);
102 static void	ena_free_tx_resources(struct ena_adapter *, int);
103 static int	ena_setup_all_tx_resources(struct ena_adapter *);
104 static void	ena_free_all_tx_resources(struct ena_adapter *);
105 static int	ena_setup_rx_resources(struct ena_adapter *, unsigned int);
106 static void	ena_free_rx_resources(struct ena_adapter *, unsigned int);
107 static int	ena_setup_all_rx_resources(struct ena_adapter *);
108 static void	ena_free_all_rx_resources(struct ena_adapter *);
109 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
110     struct ena_rx_buffer *);
111 static void	ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
112     struct ena_rx_buffer *);
113 static int	ena_refill_rx_bufs(struct ena_ring *, uint32_t);
114 static void	ena_free_rx_bufs(struct ena_adapter *, unsigned int);
115 static void	ena_refill_all_rx_bufs(struct ena_adapter *);
116 static void	ena_free_all_rx_bufs(struct ena_adapter *);
117 static void	ena_free_tx_bufs(struct ena_adapter *, unsigned int);
118 static void	ena_free_all_tx_bufs(struct ena_adapter *);
119 static void	ena_destroy_all_tx_queues(struct ena_adapter *);
120 static void	ena_destroy_all_rx_queues(struct ena_adapter *);
121 static void	ena_destroy_all_io_queues(struct ena_adapter *);
122 static int	ena_create_io_queues(struct ena_adapter *);
123 static int	ena_tx_cleanup(struct ena_ring *);
124 static int	ena_rx_cleanup(struct ena_ring *);
125 static int	validate_tx_req_id(struct ena_ring *, uint16_t);
126 static void	ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *,
127     struct mbuf *);
128 static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
129     struct ena_com_rx_ctx *, uint16_t *);
130 static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
131     struct mbuf *);
132 static void	ena_handle_msix(void *);
133 static int	ena_enable_msix(struct ena_adapter *);
134 static void	ena_setup_mgmnt_intr(struct ena_adapter *);
135 static void	ena_setup_io_intr(struct ena_adapter *);
136 static int	ena_request_mgmnt_irq(struct ena_adapter *);
137 static int	ena_request_io_irq(struct ena_adapter *);
138 static void	ena_free_mgmnt_irq(struct ena_adapter *);
139 static void	ena_free_io_irq(struct ena_adapter *);
140 static void	ena_free_irqs(struct ena_adapter*);
141 static void	ena_disable_msix(struct ena_adapter *);
142 static void	ena_unmask_all_io_irqs(struct ena_adapter *);
143 static int	ena_rss_configure(struct ena_adapter *);
144 static void	ena_update_hw_stats(void *, int);
145 static int	ena_up_complete(struct ena_adapter *);
146 static int	ena_up(struct ena_adapter *);
147 static void	ena_down(struct ena_adapter *);
148 static uint64_t	ena_get_counter(if_t, ift_counter);
149 static int	ena_media_change(if_t);
150 static void	ena_media_status(if_t, struct ifmediareq *);
151 static void	ena_init(void *);
152 static int	ena_ioctl(if_t, u_long, caddr_t);
153 static int	ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
154 static void	ena_update_host_info(struct ena_admin_host_info *, if_t);
155 static void	ena_update_hwassist(struct ena_adapter *);
156 static int	ena_setup_ifnet(device_t, struct ena_adapter *,
157     struct ena_com_dev_get_features_ctx *);
158 static void	ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *);
159 static int	ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
160 static void	ena_start_xmit(struct ena_ring *);
161 static int	ena_mq_start(if_t, struct mbuf *);
162 static void	ena_deferred_mq_start(void *, int);
163 static void	ena_qflush(if_t);
164 static int	ena_calc_io_queue_num(struct ena_adapter *,
165     struct ena_com_dev_get_features_ctx *);
166 static int	ena_calc_queue_size(struct ena_adapter *, uint16_t *,
167     uint16_t *, struct ena_com_dev_get_features_ctx *);
168 static int	ena_rss_init_default(struct ena_adapter *);
169 static void	ena_rss_init_default_deferred(void *);
170 static void	ena_config_host_info(struct ena_com_dev *);
171 static int	ena_attach(device_t);
172 static int	ena_detach(device_t);
173 static int	ena_device_init(struct ena_adapter *, device_t,
174     struct ena_com_dev_get_features_ctx *, int *);
175 static int	ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *,
176     int);
177 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
178 static void	unimplemented_aenq_handler(void *,
179     struct ena_admin_aenq_entry *);
180 static void	ena_timer_service(void *);
181 
182 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
183 
184 static SYSCTL_NODE(_hw, OID_AUTO, ena, CTLFLAG_RD, 0, "ENA driver parameters");
185 
186 /*
187  * Tuneable number of buffers in the buf-ring (drbr)
188  */
189 static int ena_buf_ring_size = 4096;
190 SYSCTL_INT(_hw_ena, OID_AUTO, buf_ring_size, CTLFLAG_RWTUN,
191     &ena_buf_ring_size, 0, "Size of the bufring");
192 
193 
194 static ena_vendor_info_t ena_vendor_info_array[] = {
195     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
196     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
197     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
198     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
199     /* Last entry */
200     { 0, 0, 0 }
201 };
202 
203 /*
204  * Contains pointers to event handlers, e.g. link state chage.
205  */
206 static struct ena_aenq_handlers aenq_handlers;
207 
208 void
209 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
210 {
211 	if (error)
212 		return;
213 	*(bus_addr_t *) arg = segs[0].ds_addr;
214 	return;
215 }
216 
217 int
218 ena_dma_alloc(device_t dmadev, bus_size_t size,
219     ena_mem_handle_t *dma , int mapflags)
220 {
221 	struct ena_adapter* adapter = device_get_softc(dmadev);
222 	uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
223 	uint64_t dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
224 	int error;
225 
226 	if (dma_space_addr == 0)
227 		dma_space_addr = BUS_SPACE_MAXADDR;
228 	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
229 	    8, 0,	      /* alignment, bounds 		*/
230 	    dma_space_addr,   /* lowaddr of exclusion window	*/
231 	    BUS_SPACE_MAXADDR,/* highaddr of exclusion window	*/
232 	    NULL, NULL,	      /* filter, filterarg 		*/
233 	    maxsize,	      /* maxsize 			*/
234 	    1,		      /* nsegments 			*/
235 	    maxsize,	      /* maxsegsize 			*/
236 	    BUS_DMA_ALLOCNOW, /* flags 				*/
237 	    NULL,	      /* lockfunc 			*/
238 	    NULL,	      /* lockarg 			*/
239 	    &dma->tag);
240 	if (error) {
241 		device_printf(dmadev,
242 		"%s: bus_dma_tag_create failed: %d\n",
243 		__func__, error);
244 		goto fail_tag;
245 	}
246 
247 	error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
248 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
249 	if (error) {
250 		device_printf(dmadev,
251 		"%s: bus_dmamem_alloc(%ju) failed: %d\n",
252 		__func__, (uintmax_t)size, error);
253 		goto fail_map_create;
254 	}
255 
256 	dma->paddr = 0;
257 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
258 	    size, ena_dmamap_callback, &dma->paddr, mapflags);
259 	if (error || dma->paddr == 0) {
260 		device_printf(dmadev,
261 		"%s: bus_dmamap_load failed: %d\n",
262 		__func__, error);
263 		goto fail_map_load;
264 	}
265 
266 	return (0);
267 
268 fail_map_load:
269 	bus_dmamap_unload(dma->tag, dma->map);
270 fail_map_create:
271 	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
272 	bus_dma_tag_destroy(dma->tag);
273 fail_tag:
274 	dma->tag = NULL;
275 
276 	return (error);
277 }
278 
279 static int
280 ena_allocate_pci_resources(struct ena_adapter* adapter)
281 {
282 	device_t pdev = adapter->pdev;
283 	int rid;
284 
285 	rid = PCIR_BAR(ENA_REG_BAR);
286 	adapter->memory = NULL;
287 	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
288 	    &rid, RF_ACTIVE);
289 	if (adapter->registers == NULL) {
290 		device_printf(pdev, "Unable to allocate bus resource: "
291 		    "registers\n");
292 		return (ENXIO);
293 	}
294 
295 	return (0);
296 }
297 
298 static void
299 ena_free_pci_resources(struct ena_adapter *adapter)
300 {
301 	device_t pdev = adapter->pdev;
302 
303 	if (adapter->memory != NULL) {
304 		bus_release_resource(pdev, SYS_RES_MEMORY,
305 		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
306 	}
307 
308 	if (adapter->registers != NULL) {
309 		bus_release_resource(pdev, SYS_RES_MEMORY,
310 		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
311 	}
312 
313 	return;
314 }
315 
316 static int
317 ena_probe(device_t dev)
318 {
319 	ena_vendor_info_t *ent;
320 	char		adapter_name[60];
321 	uint16_t	pci_vendor_id = 0;
322 	uint16_t	pci_device_id = 0;
323 
324 	pci_vendor_id = pci_get_vendor(dev);
325 	pci_device_id = pci_get_device(dev);
326 
327 	ent = ena_vendor_info_array;
328 	while (ent->vendor_id != 0) {
329 		if ((pci_vendor_id == ent->vendor_id) &&
330 		    (pci_device_id == ent->device_id)) {
331 			ena_trace(ENA_DBG, "vendor=%x device=%x ",
332 			    pci_vendor_id, pci_device_id);
333 
334 			sprintf(adapter_name, DEVICE_DESC);
335 			device_set_desc_copy(dev, adapter_name);
336 			return (BUS_PROBE_DEFAULT);
337 		}
338 
339 		ent++;
340 
341 	}
342 
343 	return (ENXIO);
344 }
345 
346 static int
347 ena_change_mtu(if_t ifp, int new_mtu)
348 {
349 	struct ena_adapter *adapter = if_getsoftc(ifp);
350 	struct ena_com_dev_get_features_ctx get_feat_ctx;
351 	int rc, old_mtu, max_frame;
352 
353 	rc = ena_com_get_dev_attr_feat(adapter->ena_dev, &get_feat_ctx);
354 	if (rc) {
355 		device_printf(adapter->pdev,
356 		    "Cannot get attribute for ena device\n");
357 		return (ENXIO);
358 	}
359 
360 	/* Save old MTU in case of fail */
361 	old_mtu = if_getmtu(ifp);
362 
363 	/* Change MTU and calculate max frame */
364 	if_setmtu(ifp, new_mtu);
365 	max_frame = ETHER_MAX_FRAME(ifp, ETHERTYPE_VLAN, 1);
366 
367 	if ((new_mtu < ENA_MIN_FRAME_LEN) ||
368 	    (new_mtu > get_feat_ctx.dev_attr.max_mtu) ||
369 	    (max_frame > ENA_MAX_FRAME_LEN)) {
370 		device_printf(adapter->pdev, "Invalid MTU setting. "
371 		    "new_mtu: %d\n", new_mtu);
372 		goto error;
373 	}
374 
375 	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
376 	if (rc != 0)
377 		goto error;
378 
379 	return (0);
380 error:
381 	if_setmtu(ifp, old_mtu);
382 	return (EINVAL);
383 }
384 
385 static inline void
386 ena_alloc_counters(counter_u64_t *begin, int size)
387 {
388 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
389 
390 	for (; begin < end; ++begin)
391 		*begin = counter_u64_alloc(M_WAITOK);
392 }
393 
394 static inline void
395 ena_free_counters(counter_u64_t *begin, int size)
396 {
397 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
398 
399 	for (; begin < end; ++begin)
400 		counter_u64_free(*begin);
401 }
402 
403 static inline void
404 ena_reset_counters(counter_u64_t *begin, int size)
405 {
406 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
407 
408 	for (; begin < end; ++begin)
409 		counter_u64_zero(*begin);
410 }
411 
412 static void
413 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
414     uint16_t qid)
415 {
416 
417 	ring->qid = qid;
418 	ring->adapter = adapter;
419 	ring->ena_dev = adapter->ena_dev;
420 }
421 
422 static int
423 ena_init_io_rings(struct ena_adapter *adapter)
424 {
425 	struct ena_com_dev *ena_dev;
426 	struct ena_ring *txr, *rxr;
427 	struct ena_que *que;
428 	int i;
429 	int rc;
430 
431 	ena_dev = adapter->ena_dev;
432 
433 	for (i = 0; i < adapter->num_queues; i++) {
434 		txr = &adapter->tx_ring[i];
435 		rxr = &adapter->rx_ring[i];
436 
437 		/* TX/RX common ring state */
438 		ena_init_io_rings_common(adapter, txr, i);
439 		ena_init_io_rings_common(adapter, rxr, i);
440 
441 		/* TX specific ring state */
442 		txr->ring_size = adapter->tx_ring_size;
443 		txr->tx_max_header_size = ena_dev->tx_max_header_size;
444 		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
445 		txr->smoothed_interval =
446 		    ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
447 
448 		/* Allocate a buf ring */
449 		txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF,
450 		    M_WAITOK, &txr->ring_mtx);
451 		if (txr->br == NULL) {
452 			device_printf(adapter->pdev,
453 			    "Error while setting up bufring\n");
454 			rc = ENOMEM;
455 			goto err_bufr_free;
456 		}
457 
458 		/* Alloc TX statistics. */
459 		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
460 		    sizeof(txr->tx_stats));
461 
462 		/* RX specific ring state */
463 		rxr->ring_size = adapter->rx_ring_size;
464 		rxr->rx_small_copy_len = adapter->small_copy_len;
465 		rxr->smoothed_interval =
466 		    ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
467 
468 		/* Alloc RX statistics. */
469 		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
470 		    sizeof(rxr->rx_stats));
471 
472 		/* Initialize locks */
473 		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
474 		    device_get_nameunit(adapter->pdev), i);
475 		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
476 		    device_get_nameunit(adapter->pdev), i);
477 
478 		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
479 
480 		que = &adapter->que[i];
481 		que->adapter = adapter;
482 		que->id = i;
483 		que->tx_ring = txr;
484 		que->rx_ring = rxr;
485 
486 		txr->que = que;
487 		rxr->que = que;
488 	}
489 
490 	return 0;
491 
492 err_bufr_free:
493 	while (i--)
494 		ena_free_io_ring_resources(adapter, i);
495 
496 	return (rc);
497 }
498 
499 static void
500 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
501 {
502 	struct ena_ring *txr = &adapter->tx_ring[qid];
503 	struct ena_ring *rxr = &adapter->rx_ring[qid];
504 
505 	ena_free_counters((counter_u64_t *)&txr->tx_stats,
506 	    sizeof(txr->tx_stats));
507 	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
508 	    sizeof(rxr->rx_stats));
509 
510 	mtx_destroy(&txr->ring_mtx);
511 
512 	drbr_free(txr->br, M_DEVBUF);
513 
514 }
515 
516 static void
517 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
518 {
519 	int i;
520 
521 	for (i = 0; i < adapter->num_queues; i++)
522 		ena_free_io_ring_resources(adapter, i);
523 
524 }
525 
526 static int
527 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
528 {
529 	int ret;
530 
531 	/* Create DMA tag for Tx buffers */
532 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
533 	    1, 0,				  /* alignment, bounds 	     */
534 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
535 	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
536 	    NULL, NULL,				  /* filter, filterarg 	     */
537 	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
538 	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
539 	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
540 	    0,					  /* flags 		     */
541 	    NULL,				  /* lockfunc 		     */
542 	    NULL,				  /* lockfuncarg 	     */
543 	    &adapter->tx_buf_tag);
544 
545 	if (ret != 0)
546 		device_printf(adapter->pdev, "Unable to create Tx DMA tag\n");
547 
548 	return (ret);
549 }
550 
551 static int
552 ena_free_tx_dma_tag(struct ena_adapter *adapter)
553 {
554 	int ret;
555 
556 	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
557 
558 	if (ret == 0)
559 		adapter->tx_buf_tag = NULL;
560 
561 	return (ret);
562 }
563 
564 static int
565 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
566 {
567 	int ret;
568 
569 	/* Create DMA tag for Rx buffers*/
570 	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
571 	    1, 0,				  /* alignment, bounds 	     */
572 	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
573 	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
574 	    NULL, NULL,				  /* filter, filterarg 	     */
575 	    MJUM16BYTES,			  /* maxsize 		     */
576 	    1,					  /* nsegments 		     */
577 	    MJUM16BYTES,			  /* maxsegsize 	     */
578 	    0,					  /* flags 		     */
579 	    NULL,				  /* lockfunc 		     */
580 	    NULL,				  /* lockarg 		     */
581 	    &adapter->rx_buf_tag);
582 
583 	if (ret != 0)
584 		device_printf(adapter->pdev, "Unable to create Rx DMA tag\n");
585 
586 	return (ret);
587 }
588 
589 static int
590 ena_free_rx_dma_tag(struct ena_adapter *adapter)
591 {
592 	int ret;
593 
594 	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
595 
596 	if (ret == 0)
597 		adapter->rx_buf_tag = NULL;
598 
599 	return (ret);
600 }
601 
602 
603 /**
604  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
605  * @adapter: network interface device structure
606  * @qid: queue index
607  *
608  * Returns 0 on success, otherwise on failure.
609  **/
610 static int
611 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
612 {
613 	struct ena_que *que = &adapter->que[qid];
614 	struct ena_ring *tx_ring = que->tx_ring;
615 	int size, i, err;
616 #ifdef	RSS
617 	cpuset_t cpu_mask;
618 #endif
619 
620 	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
621 
622 	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
623 	if (!tx_ring->tx_buffer_info)
624 		goto err_tx_buffer_info;
625 
626 	size = sizeof(uint16_t) * tx_ring->ring_size;
627 	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
628 	if (!tx_ring->free_tx_ids)
629 		goto err_tx_reqs;
630 
631 	/* Req id stack for TX OOO completions */
632 	for (i = 0; i < tx_ring->ring_size; i++)
633 		tx_ring->free_tx_ids[i] = i;
634 
635 	/* Reset TX statistics. */
636 	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
637 	    sizeof(tx_ring->tx_stats));
638 
639 	tx_ring->next_to_use = 0;
640 	tx_ring->next_to_clean = 0;
641 
642 	/* Make sure that drbr is empty */
643 	ENA_RING_MTX_LOCK(tx_ring);
644 	drbr_flush(adapter->ifp, tx_ring->br);
645 	ENA_RING_MTX_UNLOCK(tx_ring);
646 
647 	/* ... and create the buffer DMA maps */
648 	for (i = 0; i < tx_ring->ring_size; i++) {
649 		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
650 		    &tx_ring->tx_buffer_info[i].map);
651 		if (err != 0) {
652 			device_printf(adapter->pdev,
653 			    "Unable to create Tx DMA map for buffer %d\n", i);
654 			goto err_tx_map;
655 		}
656 	}
657 
658 	/* Allocate taskqueues */
659 	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
660 	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
661 	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
662 	if (tx_ring->enqueue_tq == NULL) {
663 		device_printf(adapter->pdev,
664 		    "Unable to create taskqueue for enqueue task\n");
665 		i = tx_ring->ring_size;
666 		goto err_tx_map;
667 	}
668 
669 	/* RSS set cpu for thread */
670 #ifdef RSS
671 	CPU_SETOF(que->cpu, &cpu_mask);
672 	taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
673 	    &cpu_mask, "%s tx_ring enq (bucket %d)",
674 	    device_get_nameunit(adapter->pdev), que->cpu);
675 #else /* RSS */
676 	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
677 	    "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
678 #endif /* RSS */
679 
680 	return (0);
681 
682 err_tx_map:
683 	while (i--) {
684 		bus_dmamap_destroy(adapter->tx_buf_tag,
685 		    tx_ring->tx_buffer_info[i].map);
686 	}
687 	ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->free_tx_ids);
688 err_tx_reqs:
689 	ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->tx_buffer_info);
690 err_tx_buffer_info:
691 	return (ENOMEM);
692 }
693 
694 /**
695  * ena_free_tx_resources - Free Tx Resources per Queue
696  * @adapter: network interface device structure
697  * @qid: queue index
698  *
699  * Free all transmit software resources
700  **/
701 static void
702 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
703 {
704 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
705 
706 	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
707 	    NULL))
708 		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
709 
710 	taskqueue_free(tx_ring->enqueue_tq);
711 
712 	ENA_RING_MTX_LOCK(tx_ring);
713 	/* Flush buffer ring, */
714 	drbr_flush(adapter->ifp, tx_ring->br);
715 
716 	/* Free buffer DMA maps, */
717 	for (int i = 0; i < tx_ring->ring_size; i++) {
718 		m_freem(tx_ring->tx_buffer_info[i].mbuf);
719 		tx_ring->tx_buffer_info[i].mbuf = NULL;
720 		bus_dmamap_unload(adapter->tx_buf_tag,
721 		    tx_ring->tx_buffer_info[i].map);
722 		bus_dmamap_destroy(adapter->tx_buf_tag,
723 		    tx_ring->tx_buffer_info[i].map);
724 	}
725 	ENA_RING_MTX_UNLOCK(tx_ring);
726 
727 	/* And free allocated memory. */
728 	ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->tx_buffer_info);
729 	tx_ring->tx_buffer_info = NULL;
730 
731 	ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->free_tx_ids);
732 	tx_ring->free_tx_ids = NULL;
733 }
734 
735 /**
736  * ena_setup_all_tx_resources - allocate all queues Tx resources
737  * @adapter: network interface device structure
738  *
739  * Returns 0 on success, otherwise on failure.
740  **/
741 static int
742 ena_setup_all_tx_resources(struct ena_adapter *adapter)
743 {
744 	int i, rc;
745 
746 	for (i = 0; i < adapter->num_queues; i++) {
747 		rc = ena_setup_tx_resources(adapter, i);
748 		if (!rc)
749 			continue;
750 
751 		device_printf(adapter->pdev,
752 		    "Allocation for Tx Queue %u failed\n", i);
753 		goto err_setup_tx;
754 	}
755 
756 	return (0);
757 
758 err_setup_tx:
759 	/* Rewind the index freeing the rings as we go */
760 	while (i--)
761 		ena_free_tx_resources(adapter, i);
762 	return (rc);
763 }
764 
765 /**
766  * ena_free_all_tx_resources - Free Tx Resources for All Queues
767  * @adapter: network interface device structure
768  *
769  * Free all transmit software resources
770  **/
771 static void
772 ena_free_all_tx_resources(struct ena_adapter *adapter)
773 {
774 	int i;
775 
776 	for (i = 0; i < adapter->num_queues; i++)
777 		ena_free_tx_resources(adapter, i);
778 
779 	return;
780 }
781 
782 /**
783  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
784  * @adapter: network interface device structure
785  * @qid: queue index
786  *
787  * Returns 0 on success, otherwise on failure.
788  **/
789 static int
790 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
791 {
792 	struct ena_que *que = &adapter->que[qid];
793 	struct ena_ring *rx_ring = que->rx_ring;
794 	int size, err, i;
795 #ifdef	RSS
796 	cpuset_t cpu_mask;
797 #endif
798 
799 	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
800 
801 	/*
802 	 * Alloc extra element so in rx path
803 	 * we can always prefetch rx_info + 1
804 	 */
805 	size += sizeof(struct ena_rx_buffer);
806 
807 	rx_ring->rx_buffer_info = ENA_MEM_ALLOC(adapter->ena_dev->dmadev, size);
808 	if (!rx_ring->rx_buffer_info)
809 		return (ENOMEM);
810 
811 	/* Reset RX statistics. */
812 	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
813 	    sizeof(rx_ring->rx_stats));
814 
815 	rx_ring->next_to_clean = 0;
816 	rx_ring->next_to_use = 0;
817 
818 	/* ... and create the buffer DMA maps */
819 	for (i = 0; i < rx_ring->ring_size; i++) {
820 		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
821 		    &(rx_ring->rx_buffer_info[i].map));
822 		if (err != 0) {
823 			device_printf(adapter->pdev,
824 			    "Unable to create Rx DMA map for buffer %d\n", i);
825 			goto err_rx_dma;
826 		}
827 	}
828 
829 	/* Create LRO for the ring */
830 	if (adapter->ifp->if_capenable & IFCAP_LRO) {
831 		int err = tcp_lro_init(&rx_ring->lro);
832 		if (err) {
833 			device_printf(adapter->pdev,
834 			    "LRO[%d] Initialization failed!\n", qid);
835 		} else {
836 			ena_trace(ENA_INFO,
837 			    "RX Soft LRO[%d] Initialized\n", qid);
838 			rx_ring->lro.ifp = adapter->ifp;
839 		}
840 	}
841 
842 	return (0);
843 
844 err_rx_dma:
845 	while (i--) {
846 		bus_dmamap_destroy(adapter->rx_buf_tag,
847 		    rx_ring->rx_buffer_info[i].map);
848 	}
849 
850 	ENA_MEM_FREE(adapter->ena_dev->dmadev, rx_ring->rx_buffer_info);
851 	rx_ring->rx_buffer_info = NULL;
852 	ena_trace(ENA_ALERT, "RX resource allocation fail");
853 	return (ENOMEM);
854 }
855 
856 /**
857  * ena_free_rx_resources - Free Rx Resources
858  * @adapter: network interface device structure
859  * @qid: queue index
860  *
861  * Free all receive software resources
862  **/
863 static void
864 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
865 {
866 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
867 
868 	ena_trace(ENA_INFO, "%s qid %d\n", __func__, qid);
869 
870 	/* Free buffer DMA maps, */
871 	for (int i = 0; i < rx_ring->ring_size; i++) {
872 		m_freem(rx_ring->rx_buffer_info[i].mbuf);
873 		rx_ring->rx_buffer_info[i].mbuf = NULL;
874 		bus_dmamap_unload(adapter->rx_buf_tag,
875 		    rx_ring->rx_buffer_info[i].map);
876 		bus_dmamap_destroy(adapter->rx_buf_tag,
877 		    rx_ring->rx_buffer_info[i].map);
878 	}
879 
880 	/* free LRO resources, */
881 	tcp_lro_free(&rx_ring->lro);
882 
883 	/* free allocated memory */
884 	ENA_MEM_FREE(adapter->ena_dev->dmadev, rx_ring->rx_buffer_info);
885 	rx_ring->rx_buffer_info = NULL;
886 
887 	return;
888 }
889 
890 /**
891  * ena_setup_all_rx_resources - allocate all queues Rx resources
892  * @adapter: network interface device structure
893  *
894  * Returns 0 on success, otherwise on failure.
895  **/
896 static int
897 ena_setup_all_rx_resources(struct ena_adapter *adapter)
898 {
899 	int i, rc = 0;
900 
901 	for (i = 0; i < adapter->num_queues; i++) {
902 		rc = ena_setup_rx_resources(adapter, i);
903 		if (!rc)
904 			continue;
905 
906 		device_printf(adapter->pdev,
907 		    "Allocation for Rx Queue %u failed\n", i);
908 		goto err_setup_rx;
909 	}
910 	return (0);
911 
912 err_setup_rx:
913 	/* rewind the index freeing the rings as we go */
914 	while (i--)
915 		ena_free_rx_resources(adapter, i);
916 	return (rc);
917 }
918 
919 /**
920  * ena_free_all_rx_resources - Free Rx resources for all queues
921  * @adapter: network interface device structure
922  *
923  * Free all receive software resources
924  **/
925 static void
926 ena_free_all_rx_resources(struct ena_adapter *adapter)
927 {
928 	int i;
929 
930 	for (i = 0; i < adapter->num_queues; i++)
931 		ena_free_rx_resources(adapter, i);
932 
933 	return;
934 }
935 
936 static inline int
937 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
938     struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
939 {
940 	struct ena_com_buf *ena_buf;
941 	bus_dma_segment_t segs[1];
942 	int nsegs, error;
943 
944 	/* if previous allocated frag is not used */
945 	if (rx_info->mbuf != NULL)
946 		return (0);
947 
948 	/* Get mbuf using UMA allocator */
949 	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES);
950 
951 	if (!rx_info->mbuf) {
952 		counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
953 		return (ENOMEM);
954 	}
955 	/* Set mbuf length*/
956 	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = MJUM16BYTES;
957 
958 	/* Map packets for DMA */
959 	ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
960 	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d",
961 	    adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
962 	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
963 	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
964 	if (error || (nsegs != 1)) {
965 		device_printf(adapter->pdev, "failed to map mbuf, error: %d, "
966 		    "nsegs: %d\n", error, nsegs);
967 		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
968 		goto exit;
969 
970 	}
971 
972 	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
973 
974 	ena_buf = &rx_info->ena_buf;
975 	ena_buf->paddr = segs[0].ds_addr;
976 	ena_buf->len = MJUM16BYTES;
977 
978 	ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
979 	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
980 	    rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
981 
982 	return (0);
983 
984 exit:
985 	m_freem(rx_info->mbuf);
986 	rx_info->mbuf = NULL;
987 	return (EFAULT);
988 }
989 
990 static void
991 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
992     struct ena_rx_buffer *rx_info)
993 {
994 
995 	if (!rx_info->mbuf)
996 		return;
997 
998 	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
999 	m_freem(rx_info->mbuf);
1000 	rx_info->mbuf = NULL;
1001 
1002 	return;
1003 }
1004 
1005 
1006 /**
1007  * ena_refill_rx_bufs - Refills ring with descriptors
1008  * @rx_ring: the ring which we want to feed with free descriptors
1009  * @num: number of descriptors to refill
1010  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1011  **/
1012 static int
1013 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1014 {
1015 	struct ena_adapter *adapter = rx_ring->adapter;
1016 	uint16_t next_to_use;
1017 	uint32_t i;
1018 	int rc;
1019 
1020 	ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d",
1021 	    rx_ring->qid);
1022 
1023 	next_to_use = rx_ring->next_to_use;
1024 
1025 	for (i = 0; i < num; i++) {
1026 		ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC,
1027 		    "RX buffer - next to use: %d", next_to_use);
1028 
1029 		struct ena_rx_buffer *rx_info =
1030 		    &rx_ring->rx_buffer_info[next_to_use];
1031 
1032 		rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1033 		if (rc < 0) {
1034 			device_printf(adapter->pdev,
1035 			    "failed to alloc buffer for rx queue\n");
1036 			break;
1037 		}
1038 		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1039 		    &rx_info->ena_buf, next_to_use);
1040 		if (unlikely(rc)) {
1041 			device_printf(adapter->pdev,
1042 			    "failed to add buffer for rx queue %d\n",
1043 			    rx_ring->qid);
1044 			break;
1045 		}
1046 		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1047 		    rx_ring->ring_size);
1048 	}
1049 
1050 	if (i < num) {
1051 		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1052 		device_printf(adapter->pdev,
1053 		    "refilled rx queue %d with %d pages only\n",
1054 		    rx_ring->qid, i);
1055 	}
1056 
1057 	if (i != 0) {
1058 		wmb();
1059 		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1060 	}
1061 	rx_ring->next_to_use = next_to_use;
1062 	return (i);
1063 }
1064 
1065 static void
1066 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1067 {
1068 	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1069 	unsigned int i;
1070 
1071 	for (i = 0; i < rx_ring->ring_size; i++) {
1072 		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1073 
1074 		if (rx_info->mbuf)
1075 			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1076 	}
1077 
1078 	return;
1079 }
1080 
1081 /**
1082  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1083  * @adapter: network interface device structure
1084  *
1085  */
1086 static void
1087 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1088 {
1089 	struct ena_ring *rx_ring;
1090 	int i, rc, bufs_num;
1091 
1092 	for (i = 0; i < adapter->num_queues; i++) {
1093 		rx_ring = &adapter->rx_ring[i];
1094 		bufs_num = rx_ring->ring_size - 1;
1095 		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1096 
1097 		if (unlikely(rc != bufs_num))
1098 			device_printf(adapter->pdev,
1099 			    "refilling Queue %d failed. allocated %d buffers"
1100 			    " from: %d\n", i, rc, bufs_num);
1101 	}
1102 }
1103 
1104 static void
1105 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1106 {
1107 	int i;
1108 
1109 	for (i = 0; i < adapter->num_queues; i++)
1110 		ena_free_rx_bufs(adapter, i);
1111 	return;
1112 }
1113 
1114 /**
1115  * ena_free_tx_bufs - Free Tx Buffers per Queue
1116  * @adapter: network interface device structure
1117  * @qid: queue index
1118  **/
1119 static void
1120 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1121 {
1122 	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1123 
1124 	ENA_RING_MTX_LOCK(tx_ring);
1125 	for (int i = 0; i < tx_ring->ring_size; i++) {
1126 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1127 
1128 		if (tx_info->mbuf == NULL)
1129 			continue;
1130 
1131 		ena_trace(ENA_DBG | ENA_TXPTH | ENA_RSC,
1132 		    "free uncompleted Tx mbufs qid[%d] idx: 0x%x", qid, i);
1133 
1134 		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
1135 		m_free(tx_info->mbuf);
1136 		tx_info->mbuf = NULL;
1137 	}
1138 	ENA_RING_MTX_UNLOCK(tx_ring);
1139 
1140 	return;
1141 }
1142 
1143 static void
1144 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1145 {
1146 
1147 	for (int i = 0; i < adapter->num_queues; i++)
1148 		ena_free_tx_bufs(adapter, i);
1149 
1150 	return;
1151 }
1152 
1153 static void
1154 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1155 {
1156 	uint16_t ena_qid;
1157 	int i;
1158 
1159 	for (i = 0; i < adapter->num_queues; i++) {
1160 		ena_qid = ENA_IO_TXQ_IDX(i);
1161 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1162 	}
1163 }
1164 
1165 static void
1166 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1167 {
1168 	uint16_t ena_qid;
1169 	int i;
1170 
1171 	for (i = 0; i < adapter->num_queues; i++) {
1172 		ena_qid = ENA_IO_RXQ_IDX(i);
1173 		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1174 	}
1175 }
1176 
1177 static void
1178 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1179 {
1180 	ena_destroy_all_tx_queues(adapter);
1181 	ena_destroy_all_rx_queues(adapter);
1182 }
1183 
1184 static int
1185 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
1186 {
1187 	struct ena_tx_buffer *tx_info = NULL;
1188 
1189 	if (likely(req_id < tx_ring->ring_size)) {
1190 		tx_info = &tx_ring->tx_buffer_info[req_id];
1191 		if (tx_info->mbuf)
1192 			return 0;
1193 	}
1194 
1195 	counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
1196 
1197 	return (EFAULT);
1198 }
1199 
1200 static int
1201 ena_create_io_queues(struct ena_adapter *adapter)
1202 {
1203 	struct ena_com_dev *ena_dev = adapter->ena_dev;
1204 	struct ena_com_create_io_ctx ctx;
1205 	struct ena_ring *ring;
1206 	uint16_t ena_qid;
1207 	uint32_t msix_vector;
1208 	int rc, i;
1209 
1210 	/* Create TX queues */
1211 	for (i = 0; i < adapter->num_queues; i++) {
1212 		msix_vector = ENA_IO_IRQ_IDX(i);
1213 		ena_qid = ENA_IO_TXQ_IDX(i);
1214 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1215 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1216 		ctx.queue_size = adapter->tx_ring_size;
1217 		ctx.msix_vector = msix_vector;
1218 		ctx.qid = ena_qid;
1219 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1220 		if (rc) {
1221 			device_printf(adapter->pdev,
1222 			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1223 			goto err_tx;
1224 		}
1225 		ring = &adapter->tx_ring[i];
1226 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1227 		    &ring->ena_com_io_sq,
1228 		    &ring->ena_com_io_cq);
1229 		if (rc) {
1230 			device_printf(adapter->pdev,
1231 			    "Failed to get TX queue handlers. TX queue num"
1232 			    " %d rc: %d\n", i, rc);
1233 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1234 			goto err_tx;
1235 		}
1236 	}
1237 
1238 	/* Create RX queues */
1239 	for (i = 0; i < adapter->num_queues; i++) {
1240 		msix_vector = ENA_IO_IRQ_IDX(i);
1241 		ena_qid = ENA_IO_RXQ_IDX(i);
1242 		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1243 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1244 		ctx.queue_size = adapter->rx_ring_size;
1245 		ctx.msix_vector = msix_vector;
1246 		ctx.qid = ena_qid;
1247 		rc = ena_com_create_io_queue(ena_dev, &ctx);
1248 		if (rc) {
1249 			device_printf(adapter->pdev,
1250 			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1251 			goto err_rx;
1252 		}
1253 
1254 		ring = &adapter->rx_ring[i];
1255 		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1256 		    &ring->ena_com_io_sq,
1257 		    &ring->ena_com_io_cq);
1258 		if (rc) {
1259 			device_printf(adapter->pdev,
1260 			    "Failed to get RX queue handlers. RX queue num"
1261 			    " %d rc: %d\n", i, rc);
1262 			ena_com_destroy_io_queue(ena_dev, ena_qid);
1263 			goto err_rx;
1264 		}
1265 	}
1266 
1267 	return (0);
1268 
1269 err_rx:
1270 	while (i--)
1271 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1272 	i = adapter->num_queues;
1273 err_tx:
1274 	while (i--)
1275 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1276 
1277 	return (ENXIO);
1278 }
1279 
1280 /**
1281  * ena_tx_cleanup - clear sent packets and corresponding descriptors
1282  * @tx_ring: ring for which we want to clean packets
1283  *
1284  * Once packets are sent, we ask the device in a loop for no longer used
1285  * descriptors. We find the related mbuf chain in a map (index in an array)
1286  * and free it, then update ring state.
1287  * This is performed in "endless" loop, updating ring pointers every
1288  * TX_COMMIT. The first check of free descriptor is performed before the actual
1289  * loop, then repeated at the loop end.
1290  **/
1291 static int
1292 ena_tx_cleanup(struct ena_ring *tx_ring)
1293 {
1294 	struct ena_adapter *adapter;
1295 	struct ena_com_io_cq* io_cq;
1296 	uint16_t next_to_clean;
1297 	uint16_t req_id;
1298 	uint16_t ena_qid;
1299 	unsigned int total_done = 0;
1300 	int rc;
1301 	int commit = TX_COMMIT;
1302 	int budget = TX_BUDGET;
1303 	int work_done;
1304 
1305 	adapter = tx_ring->que->adapter;
1306 	ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1307 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1308 	next_to_clean = tx_ring->next_to_clean;
1309 
1310 	do {
1311 		struct ena_tx_buffer *tx_info;
1312 		struct mbuf *mbuf;
1313 
1314 		rc = ena_com_tx_comp_req_id_get(io_cq, &req_id);
1315 		if (rc != 0)
1316 			break;
1317 
1318 		rc = validate_tx_req_id(tx_ring, req_id);
1319 		if (rc)
1320 			break;
1321 
1322 		tx_info = &tx_ring->tx_buffer_info[req_id];
1323 
1324 		mbuf = tx_info->mbuf;
1325 
1326 		tx_info->mbuf = NULL;
1327 		bintime_clear(&tx_info->timestamp);
1328 
1329 		if (tx_info->num_of_bufs != 0) {
1330 			/* Map is no longer required */
1331 			bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
1332 		}
1333 
1334 		m_freem(mbuf);
1335 
1336 		total_done += tx_info->tx_descs;
1337 
1338 		tx_ring->free_tx_ids[next_to_clean] = req_id;
1339 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1340 		    tx_ring->ring_size);
1341 
1342 		if (--commit == 0) {
1343 			commit = TX_COMMIT;
1344 			/* update ring state every TX_COMMIT descriptor */
1345 			tx_ring->next_to_clean = next_to_clean;
1346 			ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], total_done);
1347 			ena_com_update_dev_comp_head(io_cq);
1348 			total_done = 0;
1349 		}
1350 	} while (--budget);
1351 
1352 	work_done = TX_BUDGET - budget;
1353 
1354 	/* If there is still something to commit update ring state */
1355 	if (commit != TX_COMMIT) {
1356 		tx_ring->next_to_clean = next_to_clean;
1357 		ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid], total_done);
1358 		ena_com_update_dev_comp_head(io_cq);
1359 	}
1360 
1361 	taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1362 
1363 	return (work_done);
1364 }
1365 
1366 static void
1367 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1368     struct mbuf *mbuf)
1369 {
1370 	struct ena_adapter *adapter = rx_ring->adapter;
1371 
1372 	if (adapter->rss_support) {
1373 		mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
1374 
1375 		if (ena_rx_ctx->frag &&
1376 		    ena_rx_ctx->l3_proto != ENA_ETH_IO_L4_PROTO_UNKNOWN) {
1377 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1378 			return;
1379 		}
1380 
1381 		switch (ena_rx_ctx->l3_proto) {
1382 		case ENA_ETH_IO_L3_PROTO_IPV4:
1383 			switch (ena_rx_ctx->l4_proto) {
1384 			case ENA_ETH_IO_L4_PROTO_TCP:
1385 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1386 				break;
1387 			case ENA_ETH_IO_L4_PROTO_UDP:
1388 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1389 				break;
1390 			default:
1391 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1392 			}
1393 			break;
1394 		case ENA_ETH_IO_L3_PROTO_IPV6:
1395 			switch (ena_rx_ctx->l4_proto) {
1396 			case ENA_ETH_IO_L4_PROTO_TCP:
1397 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1398 				break;
1399 			case ENA_ETH_IO_L4_PROTO_UDP:
1400 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1401 				break;
1402 			default:
1403 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1404 			}
1405 			break;
1406 		case ENA_ETH_IO_L3_PROTO_UNKNOWN:
1407 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1408 			break;
1409 		default:
1410 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1411 		}
1412 	} else {
1413 		mbuf->m_pkthdr.flowid = rx_ring->qid;
1414 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1415 	}
1416 }
1417 
1418 /**
1419  * ena_rx_mbuf - assemble mbuf from descriptors
1420  * @rx_ring: ring for which we want to clean packets
1421  * @ena_bufs: buffer info
1422  * @ena_rx_ctx: metadata for this packet(s)
1423  * @next_to_clean: ring pointer
1424  *
1425  **/
1426 static struct mbuf*
1427 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
1428     struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
1429 {
1430 	struct mbuf *mbuf;
1431 	struct ena_rx_buffer *rx_info;
1432 	struct ena_adapter *adapter;
1433 	unsigned int len, buf = 0;
1434 	unsigned int descs = ena_rx_ctx->descs;
1435 
1436 	adapter = rx_ring->adapter;
1437 	rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
1438 
1439 	ENA_ASSERT(rx_info->mbuf, "Invalid alloc frag buffer\n");
1440 
1441 	len = ena_bufs[0].len;
1442 	ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx",
1443 	    rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
1444 
1445 	mbuf = rx_info->mbuf;
1446 	mbuf->m_flags |= M_PKTHDR;
1447 	mbuf->m_pkthdr.len = len;
1448 	mbuf->m_len = len;
1449 	mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
1450 
1451 	/* Fill mbuf with hash key and it's interpretation for optimization */
1452 	ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
1453 
1454 	ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d",
1455 	    mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
1456 
1457 	/* DMA address is not needed anymore, unmap it */
1458 	bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
1459 
1460 	rx_info->mbuf = NULL;
1461 	*next_to_clean = ENA_RX_RING_IDX_NEXT(*next_to_clean,
1462 	    rx_ring->ring_size);
1463 
1464 	/*
1465 	 * While we have more than 1 descriptors for one rcvd packet, append
1466 	 * other mbufs to the main one
1467 	 */
1468 	while (--descs) {
1469 		rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
1470 		len = ena_bufs[++buf].len;
1471 
1472 		if (!m_append(mbuf, len, rx_info->mbuf->m_data)) {
1473 			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1474 			ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p",
1475 			    mbuf);
1476 		}
1477 		/* Free already appended mbuf, it won't be useful anymore */
1478 		bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
1479 		m_freem(rx_info->mbuf);
1480 		rx_info->mbuf = NULL;
1481 
1482 		*next_to_clean = ENA_RX_RING_IDX_NEXT(*next_to_clean,
1483 		    rx_ring->ring_size);
1484 	}
1485 
1486 	return (mbuf);
1487 }
1488 
1489 /**
1490  * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
1491  **/
1492 static inline void
1493 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1494     struct mbuf *mbuf)
1495 {
1496 
1497 	/* if IP and error */
1498 	if ((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1499 	    (ena_rx_ctx->l3_csum_err)) {
1500 		/* ipv4 checksum error */
1501 		mbuf->m_pkthdr.csum_flags = 0;
1502 		counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1503 		return;
1504 	}
1505 
1506 	/* if TCP/UDP */
1507 	if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1508 	    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
1509 		if (ena_rx_ctx->l4_csum_err) {
1510 			/* TCP/UDP checksum error */
1511 			mbuf->m_pkthdr.csum_flags = 0;
1512 			counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1513 		} else {
1514 			mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1515 			mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1516 		}
1517 	}
1518 
1519 	return;
1520 }
1521 
1522 /**
1523  * ena_rx_cleanup - handle rx irq
1524  * @arg: ring for which irq is being handled
1525  **/
1526 static int
1527 ena_rx_cleanup(struct ena_ring *rx_ring)
1528 {
1529 	struct ena_adapter *adapter;
1530 	struct mbuf *mbuf;
1531 	struct ena_com_rx_ctx ena_rx_ctx;
1532 	struct ena_com_io_cq* io_cq;
1533 	struct ena_com_io_sq* io_sq;
1534 	/* struct ena_eth_io_intr_reg intr_reg; */
1535 	if_t ifp;
1536 	uint16_t ena_qid;
1537 	uint16_t next_to_clean;
1538 	uint32_t refill_required;
1539 	uint32_t refill_threshold;
1540 	uint32_t do_if_input = 0;
1541 	unsigned int qid;
1542 	int rc;
1543 	int budget = RX_BUDGET;
1544 
1545 	adapter = rx_ring->que->adapter;
1546 	ifp = adapter->ifp;
1547 	qid = rx_ring->que->id;
1548 	ena_qid = ENA_IO_RXQ_IDX(qid);
1549 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1550 	io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
1551 	next_to_clean = rx_ring->next_to_clean;
1552 
1553 	do {
1554 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1555 		ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
1556 		ena_rx_ctx.descs = 0;
1557 		rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
1558 
1559 		if (unlikely(rc))
1560 			goto error;
1561 
1562 		if (unlikely(ena_rx_ctx.descs == 0))
1563 			break;
1564 
1565 		/* Receive mbuf from the ring */
1566 		mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
1567 		    &ena_rx_ctx, &next_to_clean);
1568 
1569 		/* Exit if we failed to retrieve a buffer */
1570 		if (unlikely(!mbuf)) {
1571 			next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
1572 			    ena_rx_ctx.descs, rx_ring->ring_size);
1573 			break;
1574 		}
1575 		ena_trace(ENA_DBG | ENA_RXPTH, "Rx: %d bytes",
1576 		    mbuf->m_pkthdr.len);
1577 
1578 		if ((ifp->if_capenable & IFCAP_RXCSUM) ||
1579 		    (ifp->if_capenable & IFCAP_RXCSUM_IPV6)) {
1580 			ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
1581 		}
1582 
1583 		counter_u64_add(rx_ring->rx_stats.bytes, mbuf->m_pkthdr.len);
1584 		/*
1585 		 * LRO is only for IP/TCP packets and TCP checksum of the packet
1586 		 * should be computed by hardware.
1587 		 */
1588 		do_if_input = 1;
1589 		if ((ifp->if_capenable & IFCAP_LRO) &&
1590 		    (mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) &&
1591 		    ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP) {
1592 			/*
1593 			 * Send to the stack if:
1594 			 *  - LRO not enabled, or
1595 			 *  - no LRO resources, or
1596 			 *  - lro enqueue fails
1597 			 */
1598 			if (rx_ring->lro.lro_cnt != 0 &&
1599 			    tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1600 					do_if_input = 0;
1601 		}
1602 		if (do_if_input) {
1603 			ena_trace(ENA_DBG | ENA_RXPTH, "calling if_input() with mbuf %p",
1604 			    mbuf);
1605 			(*ifp->if_input)(ifp, mbuf);
1606 		}
1607 
1608 		counter_u64_add(rx_ring->rx_stats.cnt, 1);
1609 	} while (--budget);
1610 
1611 	rx_ring->next_to_clean = next_to_clean;
1612 
1613 	refill_required = ena_com_sq_empty_space(io_sq);
1614 	refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DEVIDER;
1615 
1616 	if (refill_required > refill_threshold) {
1617 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1618 		ena_refill_rx_bufs(rx_ring, refill_required);
1619 	}
1620 
1621 	tcp_lro_flush_all(&rx_ring->lro);
1622 
1623 	return (RX_BUDGET - budget);
1624 
1625 error:
1626 	counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
1627 	return (RX_BUDGET - budget);
1628 }
1629 
1630 /*********************************************************************
1631  *
1632  *  MSIX & Interrupt Service routine
1633  *
1634  **********************************************************************/
1635 
1636 /**
1637  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1638  * @arg: interrupt number
1639  **/
1640 static void
1641 ena_intr_msix_mgmnt(void *arg)
1642 {
1643 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1644 
1645 	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1646 	if (likely(adapter->running))
1647 		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1648 }
1649 
1650 /**
1651  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1652  * @arg: interrupt number
1653  **/
1654 static void
1655 ena_handle_msix(void *arg)
1656 {
1657 	struct ena_que	*que = arg;
1658 	struct ena_adapter *adapter = que->adapter;
1659 	if_t ifp = adapter->ifp;
1660 	struct ena_ring *tx_ring;
1661 	struct ena_ring *rx_ring;
1662 	struct ena_com_io_cq* io_cq;
1663 	struct ena_eth_io_intr_reg intr_reg;
1664 	int qid, ena_qid;
1665 	int txc, rxc, i;
1666 
1667 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1668 		return;
1669 
1670 	ena_trace(ENA_DBG, "MSI-X TX/RX routine");
1671 
1672 	tx_ring = que->tx_ring;
1673 	rx_ring = que->rx_ring;
1674 	qid = que->id;
1675 	ena_qid = ENA_IO_TXQ_IDX(qid);
1676 	io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1677 
1678 	for (i = 0; i < CLEAN_BUDGET; ++i) {
1679 		rxc = ena_rx_cleanup(rx_ring);
1680 
1681 		/* Protection from calling ena_tx_cleanup from ena_start_xmit */
1682 		ENA_RING_MTX_LOCK(tx_ring);
1683 		txc = ena_tx_cleanup(tx_ring);
1684 		ENA_RING_MTX_UNLOCK(tx_ring);
1685 
1686 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1687 			return;
1688 
1689 		if (txc != TX_BUDGET && rxc != RX_BUDGET)
1690 		       break;
1691 	}
1692 
1693 	/* Signal that work is done and unmask interrupt */
1694 	ena_com_update_intr_reg(&intr_reg,
1695 	    RX_IRQ_INTERVAL,
1696 	    TX_IRQ_INTERVAL,
1697 	    true);
1698 	ena_com_unmask_intr(io_cq, &intr_reg);
1699 }
1700 
1701 static int
1702 ena_enable_msix(struct ena_adapter *adapter)
1703 {
1704 	device_t dev = adapter->pdev;
1705 	int i, msix_vecs, rc = 0;
1706 
1707 	/* Reserved the max msix vectors we might need */
1708 	msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues);
1709 
1710 	adapter->msix_entries = ENA_MEM_ALLOC(adapter->ena_dev->dmadev,
1711 	    msix_vecs * sizeof(struct msix_entry));
1712 	if (!adapter->msix_entries) {
1713 		device_printf(dev,
1714 		    "Failed to allocate msix_entries, vectors %d\n", msix_vecs);
1715 		rc = ENOMEM;
1716 		goto error;
1717 	}
1718 	device_printf(dev, "Allocated msix_entries, vectors (cnt: %d)\n",
1719 	    msix_vecs);
1720 
1721 	for (i = 0; i < msix_vecs; i++) {
1722 		adapter->msix_entries[i].entry = i;
1723 		/* Vectors must start from 1 */
1724 		adapter->msix_entries[i].vector = i + 1;
1725 	}
1726 
1727 	rc = pci_alloc_msix(dev, &msix_vecs);
1728 	if (rc != 0) {
1729 		device_printf(dev,
1730 		    "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1731 		ENA_MEM_FREE(adapter->ena_dev->dmadev, adapter->msix_entries);
1732 		adapter->msix_entries = NULL;
1733 		rc = ENOSPC;
1734 		goto error;
1735 	}
1736 
1737 	adapter->msix_vecs = msix_vecs;
1738 	adapter->msix_enabled = true;
1739 
1740 error:
1741 	return (rc);
1742 }
1743 
1744 static void
1745 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1746 {
1747 
1748 	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1749 	    ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1750 	    device_get_nameunit(adapter->pdev));
1751 	/*
1752 	 * Handler is NULL on purpose, it will be set
1753 	 * when mgmnt interrupt is acquired
1754 	 */
1755 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1756 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1757 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1758 	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1759 
1760 	return;
1761 }
1762 
1763 static void
1764 ena_setup_io_intr(struct ena_adapter *adapter)
1765 {
1766 	static int last_bind_cpu = -1;
1767 	int irq_idx;
1768 	ena_trace(ENA_DBG, "enter");
1769 
1770 	for (int i = 0; i < adapter->num_queues; i++) {
1771 		irq_idx = ENA_IO_IRQ_IDX(i);
1772 
1773 		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1774 		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1775 		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1776 		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1777 		adapter->irq_tbl[irq_idx].vector =
1778 		    adapter->msix_entries[irq_idx].vector;
1779 		ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1780 		    adapter->msix_entries[irq_idx].vector);
1781 #ifdef	RSS
1782 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1783 		    rss_getcpu(i % rss_getnumbuckets());
1784 #else
1785 		/*
1786 		 * We still want to bind rings to the corresponding cpu
1787 		 * using something similar to the RSS round-robin technique.
1788 		 */
1789 		if (last_bind_cpu < 0)
1790 			last_bind_cpu = CPU_FIRST();
1791 		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1792 		    last_bind_cpu;
1793 		last_bind_cpu = CPU_NEXT(last_bind_cpu);
1794 #endif
1795 	}
1796 
1797 	return;
1798 }
1799 
1800 static int
1801 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1802 {
1803 	struct ena_irq *irq;
1804 	unsigned long flags;
1805 	int rc, rcc;
1806 
1807 	flags = RF_ACTIVE | RF_SHAREABLE;
1808 
1809 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1810 	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1811 	    &irq->vector, flags);
1812 
1813 	if (irq->res == NULL) {
1814 		device_printf(adapter->pdev, "could not allocate "
1815 		    "irq vector: %d\n", irq->vector);
1816 		rc = ENXIO;
1817 		goto exit_res;
1818 	}
1819 
1820 	if ((rc = bus_activate_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
1821 	    irq->res)) != 0) {
1822 		device_printf(adapter->pdev, "could not activate "
1823 		    "irq vector: %d\n", irq->vector);
1824 		goto exit_intr;
1825 	}
1826 
1827 	if ((rc = bus_setup_intr(adapter->pdev, irq->res,
1828 	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1829 	    ena_intr_msix_mgmnt, irq->data, &irq->cookie)) != 0) {
1830 		device_printf(adapter->pdev, "failed to register "
1831 		    "interrupt handler for irq %ju: %d\n",
1832 		    rman_get_start(irq->res), rc);
1833 		goto exit_intr;
1834 	}
1835 	irq->requested = true;
1836 
1837 	return (rc);
1838 
1839 exit_intr:
1840 	device_printf(adapter->pdev, "exit_intr: releasing resource"
1841 	    " for irq %d\n", irq->vector);
1842 	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1843 	    irq->vector, irq->res);
1844 	if (rcc)
1845 		device_printf(adapter->pdev, "dev has no parent while "
1846 		    "releasing res for irq: %d\n", irq->vector);
1847 	irq->res = NULL;
1848 
1849 exit_res:
1850 	return (rc);
1851 }
1852 
1853 static int
1854 ena_request_io_irq(struct ena_adapter *adapter)
1855 {
1856 	struct ena_irq *irq;
1857 	unsigned long flags = 0;
1858 	int rc = 0, i, rcc;
1859 
1860 	if (!adapter->msix_enabled) {
1861 		device_printf(adapter->pdev, "failed to request irq\n");
1862 		return (EINVAL);
1863 	} else {
1864 		flags = RF_ACTIVE | RF_SHAREABLE;
1865 	}
1866 
1867 	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1868 		irq = &adapter->irq_tbl[i];
1869 
1870 		if (irq->requested)
1871 			continue;
1872 
1873 		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1874 		    &irq->vector, flags);
1875 		if (irq->res == NULL) {
1876 			device_printf(adapter->pdev, "could not allocate "
1877 			    "irq vector: %d\n", irq->vector);
1878 			goto err;
1879 		}
1880 
1881 		if ((rc = bus_setup_intr(adapter->pdev, irq->res,
1882 			    INTR_TYPE_NET | INTR_MPSAFE, NULL, irq->handler,
1883 			    irq->data, &irq->cookie)) != 0) {
1884 			device_printf(adapter->pdev, "failed to register "
1885 			    "interrupt handler for irq %ju: %d\n",
1886 			    rman_get_start(irq->res), rc);
1887 			goto err;
1888 		}
1889 		irq->requested = true;
1890 
1891 #ifdef	RSS
1892 		device_printf(adapter->pdev, "queue %d - RSS bucket %d\n",
1893 		    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1894 #else
1895 		device_printf(adapter->pdev, "queue %d - cpu %d\n",
1896 		    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1897 #endif
1898 	}
1899 
1900 	return (rc);
1901 
1902 err:
1903 
1904 	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1905 		irq = &adapter->irq_tbl[i];
1906 		rcc = 0;
1907 
1908 		/* Once we entered err: section and irq->requested is true we
1909 		   free both intr and resources */
1910 		if (irq->requested == true)
1911 			rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1912 		if (rcc)
1913 			device_printf(adapter->pdev, "could not release"
1914 			    " irq: %d, error: %d\n", irq->vector, rcc);
1915 
1916 		/* If we entred err: section without irq->requested set we know
1917 		   it was bus_alloc_resource_any() that needs cleanup, provided
1918 		   res is not NULL. In case res is NULL no work in needed in
1919 		   this iteration */
1920 		rcc = 0;
1921 		if (irq->res != NULL) {
1922 			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1923 			    irq->vector, irq->res);
1924 		}
1925 		if (rcc)
1926 			device_printf(adapter->pdev, "dev has no parent while "
1927 			    "releasing res for irq: %d\n", irq->vector);
1928 		irq->requested = false;
1929 		irq->res = NULL;
1930 	}
1931 
1932 	return (rc);
1933 }
1934 
1935 static void
1936 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1937 {
1938 	struct ena_irq *irq;
1939 	int rc;
1940 
1941 	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1942 	if (irq->requested) {
1943 		ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1944 		    irq->vector);
1945 		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1946 		if (rc)
1947 			device_printf(adapter->pdev, "failed to tear "
1948 			    "down irq: %d\n", irq->vector);
1949 		irq->requested = 0;
1950 	}
1951 
1952 	if (irq->res != NULL) {
1953 		ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1954 		    irq->vector);
1955 		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1956 		    irq->vector, irq->res);
1957 		irq->res = NULL;
1958 		if (rc)
1959 			device_printf(adapter->pdev, "dev has no parent while "
1960 			    "releasing res for irq: %d\n", irq->vector);
1961 	}
1962 
1963 	return;
1964 }
1965 
1966 static void
1967 ena_free_io_irq(struct ena_adapter *adapter)
1968 {
1969 	struct ena_irq *irq;
1970 	int rc;
1971 
1972 	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1973 		irq = &adapter->irq_tbl[i];
1974 		if (irq->requested) {
1975 			ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1976 			    irq->vector);
1977 			rc = bus_teardown_intr(adapter->pdev, irq->res,
1978 			    irq->cookie);
1979 			if (rc) {
1980 				device_printf(adapter->pdev, "failed to tear "
1981 				    "down irq: %d\n", irq->vector);
1982 			}
1983 			irq->requested = 0;
1984 		}
1985 
1986 		if (irq->res != NULL) {
1987 			ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1988 			    irq->vector);
1989 			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1990 			    irq->vector, irq->res);
1991 			irq->res = NULL;
1992 			if (rc) {
1993 				device_printf(adapter->pdev, "dev has no parent"
1994 				    " while releasing res for irq: %d\n",
1995 				    irq->vector);
1996 			}
1997 		}
1998 	}
1999 
2000 	return;
2001 }
2002 
2003 static void
2004 ena_free_irqs(struct ena_adapter* adapter)
2005 {
2006 
2007 	ena_free_io_irq(adapter);
2008 	ena_free_mgmnt_irq(adapter);
2009 	ena_disable_msix(adapter);
2010 }
2011 
2012 static void
2013 ena_disable_msix(struct ena_adapter *adapter)
2014 {
2015 
2016 	pci_release_msi(adapter->pdev);
2017 
2018 	adapter->msix_vecs = 0;
2019 	ENA_MEM_FREE(adapter->ena_dev->dmadev, adapter->msix_entries);
2020 	adapter->msix_entries = NULL;
2021 }
2022 
2023 static void
2024 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
2025 {
2026 	struct ena_com_io_cq* io_cq;
2027 	struct ena_eth_io_intr_reg intr_reg;
2028 	uint16_t ena_qid;
2029 	int i;
2030 
2031 	/* Unmask interrupts for all queues */
2032 	for (i = 0; i < adapter->num_queues; i++) {
2033 		ena_qid = ENA_IO_TXQ_IDX(i);
2034 		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2035 		ena_com_update_intr_reg(&intr_reg, 0, 0, true);
2036 		ena_com_unmask_intr(io_cq, &intr_reg);
2037 	}
2038 }
2039 
2040 /* Configure the Rx forwarding */
2041 static int ena_rss_configure(struct ena_adapter *adapter)
2042 {
2043 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2044 	int rc;
2045 
2046 	/* Set indirect table */
2047 	rc = ena_com_indirect_table_set(ena_dev);
2048 	if (unlikely(rc && rc != EPERM))
2049 		return rc;
2050 
2051 	/* Configure hash function (if supported) */
2052 	rc = ena_com_set_hash_function(ena_dev);
2053 	if (unlikely(rc && (rc != EPERM)))
2054 		return rc;
2055 
2056 	/* Configure hash inputs (if supported) */
2057 	rc = ena_com_set_hash_ctrl(ena_dev);
2058 	if (unlikely(rc && (rc != EPERM)))
2059 		return rc;
2060 
2061 	return 0;
2062 }
2063 
2064 static void
2065 ena_update_hw_stats(void *arg, int pending)
2066 {
2067 	struct ena_adapter *adapter = arg;
2068 	int rc;
2069 
2070 	for (;;) {
2071 		if (!adapter->up)
2072 			return;
2073 
2074 		rc = ena_update_stats_counters(adapter);
2075 		if (rc)
2076 			ena_trace(ENA_WARNING,
2077 			    "Error updating stats counters, rc = %d", rc);
2078 
2079 		pause("ena update hw stats", hz);
2080 	}
2081 }
2082 
2083 static int
2084 ena_up_complete(struct ena_adapter *adapter)
2085 {
2086 	int rc;
2087 
2088 	if (adapter->rss_support) {
2089 		rc = ena_rss_configure(adapter);
2090 		if (rc)
2091 			return (rc);
2092 	}
2093 
2094 	ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
2095 	ena_refill_all_rx_bufs(adapter);
2096 
2097 	return (0);
2098 }
2099 
2100 static int
2101 ena_up(struct ena_adapter *adapter)
2102 {
2103 	int rc = 0;
2104 
2105 	if (!device_is_attached(adapter->pdev)) {
2106 		device_printf(adapter->pdev, "device is not attached!\n");
2107 		return (ENXIO);
2108 	}
2109 
2110 	if (!adapter->running) {
2111 		device_printf(adapter->pdev, "device is not running!\n");
2112 		return (ENXIO);
2113 	}
2114 
2115 	if (!adapter->up) {
2116 		device_printf(adapter->pdev, "device is going UP\n");
2117 
2118 		/* setup interrupts for IO queues */
2119 		ena_setup_io_intr(adapter);
2120 		rc = ena_request_io_irq(adapter);
2121 		if (rc) {
2122 			ena_trace(ENA_ALERT, "err_req_irq");
2123 			goto err_req_irq;
2124 		}
2125 
2126 		/* allocate transmit descriptors */
2127 		rc = ena_setup_all_tx_resources(adapter);
2128 		if (rc) {
2129 			ena_trace(ENA_ALERT, "err_setup_tx");
2130 			goto err_setup_tx;
2131 		}
2132 
2133 		/* allocate receive descriptors */
2134 		rc = ena_setup_all_rx_resources(adapter);
2135 		if (rc) {
2136 			ena_trace(ENA_ALERT, "err_setup_rx");
2137 			goto err_setup_rx;
2138 		}
2139 
2140 		/* create IO queues for Rx & Tx */
2141 		rc = ena_create_io_queues(adapter);
2142 		if (rc) {
2143 			ena_trace(ENA_ALERT,
2144 			    "create IO queues failed");
2145 			goto err_io_que;
2146 		}
2147 
2148 		if (adapter->link_status)
2149 			if_link_state_change(adapter->ifp, LINK_STATE_UP);
2150 
2151 		rc = ena_up_complete(adapter);
2152 		if (rc)
2153 			goto err_up_complete;
2154 
2155 		counter_u64_add(adapter->dev_stats.interface_up, 1);
2156 
2157 		ena_update_hwassist(adapter);
2158 
2159 		if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2160 		    IFF_DRV_OACTIVE);
2161 
2162 		callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
2163 		    ena_timer_service, (void *)adapter, 0);
2164 
2165 		taskqueue_enqueue(adapter->stats_tq, &adapter->stats_task);
2166 
2167 		adapter->up = true;
2168 
2169 		ena_unmask_all_io_irqs(adapter);
2170 	}
2171 
2172 	return (0);
2173 
2174 err_up_complete:
2175 	ena_destroy_all_io_queues(adapter);
2176 err_io_que:
2177 	ena_free_all_rx_resources(adapter);
2178 err_setup_rx:
2179 	ena_free_all_tx_resources(adapter);
2180 err_setup_tx:
2181 	ena_free_io_irq(adapter);
2182 err_req_irq:
2183 	return (rc);
2184 }
2185 
2186 int
2187 ena_update_stats_counters(struct ena_adapter *adapter)
2188 {
2189 	struct ena_admin_basic_stats ena_stats;
2190 	struct ena_hw_stats *stats = &adapter->hw_stats;
2191 	int rc = 0;
2192 
2193 	if (!adapter->up)
2194 		return (rc);
2195 
2196 	rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
2197 	if (rc)
2198 		return (rc);
2199 
2200 	stats->tx_bytes = ((uint64_t)ena_stats.tx_bytes_high << 32) |
2201 		ena_stats.tx_bytes_low;
2202 	stats->rx_bytes = ((uint64_t)ena_stats.rx_bytes_high << 32) |
2203 		ena_stats.rx_bytes_low;
2204 
2205 	stats->rx_packets = ((uint64_t)ena_stats.rx_pkts_high << 32) |
2206 		ena_stats.rx_pkts_low;
2207 	stats->tx_packets = ((uint64_t)ena_stats.tx_pkts_high << 32) |
2208 		ena_stats.tx_pkts_low;
2209 
2210 	stats->rx_drops = ((uint64_t)ena_stats.rx_drops_high << 32) |
2211 		ena_stats.rx_drops_low;
2212 
2213 	return (0);
2214 }
2215 
2216 static uint64_t
2217 ena_get_counter(if_t ifp, ift_counter cnt)
2218 {
2219 	struct ena_adapter *adapter;
2220 	struct ena_hw_stats *stats;
2221 
2222 	adapter = if_getsoftc(ifp);
2223 	stats = &adapter->hw_stats;
2224 
2225 	switch (cnt) {
2226 	case IFCOUNTER_IPACKETS:
2227 		return (stats->rx_packets);
2228 	case IFCOUNTER_OPACKETS:
2229 		return (stats->tx_packets);
2230 	case IFCOUNTER_IBYTES:
2231 		return (stats->rx_bytes);
2232 	case IFCOUNTER_OBYTES:
2233 		return (stats->tx_bytes);
2234 	case IFCOUNTER_IQDROPS:
2235 		return (stats->rx_drops);
2236 	default:
2237 		return (if_get_counter_default(ifp, cnt));
2238 	}
2239 }
2240 
2241 static int
2242 ena_media_change(if_t ifp)
2243 {
2244 	/* Media Change is not supported by firmware */
2245 	return (0);
2246 }
2247 
2248 static void
2249 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2250 {
2251 	struct ena_adapter *adapter = if_getsoftc(ifp);
2252 	ena_trace(ENA_DBG, "enter");
2253 
2254 	ENA_DEV_LOCK;
2255 
2256 	ifmr->ifm_status = IFM_AVALID;
2257 	ifmr->ifm_active = IFM_ETHER;
2258 
2259 	if (!adapter->link_status) {
2260 		ENA_DEV_UNLOCK;
2261 		ena_trace(ENA_WARNING, "link_status = false");
2262 		return;
2263 	}
2264 
2265 	ifmr->ifm_status |= IFM_ACTIVE;
2266 	ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2267 
2268 	ENA_DEV_UNLOCK;
2269 
2270 	return;
2271 }
2272 
2273 static void
2274 ena_init(void *arg)
2275 {
2276 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2277 
2278 	if (adapter->up == false) {
2279 		sx_xlock(&adapter->ioctl_sx);
2280 		ena_up(adapter);
2281 		sx_unlock(&adapter->ioctl_sx);
2282 	}
2283 
2284 	return;
2285 }
2286 
2287 static int
2288 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2289 {
2290 	struct ena_adapter *adapter;
2291 	struct ifreq *ifr;
2292 	int rc;
2293 
2294 	adapter = ifp->if_softc;
2295 	ifr = (struct ifreq *)data;
2296 
2297 	/*
2298 	 * Acquiring lock to prevent from running up and down routines parallel.
2299 	 */
2300 	rc = 0;
2301 	switch (command) {
2302 	case SIOCSIFMTU:
2303 		sx_xlock(&adapter->ioctl_sx);
2304 		ena_down(adapter);
2305 
2306 		ena_change_mtu(ifp, ifr->ifr_mtu);
2307 
2308 		rc = ena_up(adapter);
2309 		sx_unlock(&adapter->ioctl_sx);
2310 		break;
2311 
2312 	case SIOCSIFFLAGS:
2313 		if (ifp->if_flags & IFF_UP) {
2314 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2315 				if (ifp->if_flags & (IFF_PROMISC |
2316 				    IFF_ALLMULTI)) {
2317 					device_printf(adapter->pdev,
2318 					    "ioctl promisc/allmulti\n");
2319 				}
2320 			} else {
2321 				sx_xlock(&adapter->ioctl_sx);
2322 				rc = ena_up(adapter);
2323 				sx_unlock(&adapter->ioctl_sx);
2324 			}
2325 		} else {
2326 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2327 				sx_xlock(&adapter->ioctl_sx);
2328 				ena_down(adapter);
2329 				sx_unlock(&adapter->ioctl_sx);
2330 			}
2331 		}
2332 		break;
2333 
2334 	case SIOCADDMULTI:
2335 	case SIOCDELMULTI:
2336 		break;
2337 
2338 	case SIOCSIFMEDIA:
2339 	case SIOCGIFMEDIA:
2340 		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2341 		break;
2342 
2343 	case SIOCSIFCAP:
2344 		{
2345 			int reinit = 0;
2346 
2347 			if (ifr->ifr_reqcap != ifp->if_capenable) {
2348 				ifp->if_capenable = ifr->ifr_reqcap;
2349 				reinit = 1;
2350 			}
2351 
2352 			if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2353 				sx_xlock(&adapter->ioctl_sx);
2354 				ena_down(adapter);
2355 				rc = ena_up(adapter);
2356 				sx_unlock(&adapter->ioctl_sx);
2357 			}
2358 		}
2359 
2360 		break;
2361 	default:
2362 		rc = ether_ioctl(ifp, command, data);
2363 		break;
2364 	}
2365 
2366 	return (rc);
2367 }
2368 
2369 static int
2370 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2371 {
2372 	int caps = 0;
2373 
2374 	if (feat->offload.tx &
2375 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2376 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2377 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK))
2378 		caps |= IFCAP_TXCSUM;
2379 
2380 	if (feat->offload.tx &
2381 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2382 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK))
2383 		caps |= IFCAP_TXCSUM_IPV6;
2384 
2385 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2386 		caps |= IFCAP_TSO4;
2387 
2388 	if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
2389 		caps |= IFCAP_TSO6;
2390 
2391 	if (feat->offload.rx_supported &
2392 	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2393 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK))
2394 		caps |= IFCAP_RXCSUM;
2395 
2396 	if (feat->offload.rx_supported &
2397 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2398 		caps |= IFCAP_RXCSUM_IPV6;
2399 
2400 	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2401 
2402 	return (caps);
2403 }
2404 
2405 static void
2406 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2407 {
2408 
2409 	host_info->supported_network_features[0] =
2410 	    (uint32_t)if_getcapabilities(ifp);
2411 }
2412 
2413 static void
2414 ena_update_hwassist(struct ena_adapter *adapter)
2415 {
2416 	if_t ifp = adapter->ifp;
2417 	uint32_t feat = adapter->tx_offload_cap;
2418 	int cap = if_getcapenable(ifp);
2419 	int flags = 0;
2420 
2421 	if_clearhwassist(ifp);
2422 
2423 	if (cap & IFCAP_TXCSUM) {
2424 		if (feat & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
2425 			flags |= CSUM_IP;
2426 		if (feat &
2427 		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2428 		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK))
2429 			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2430 	}
2431 
2432 	if (cap & IFCAP_TXCSUM_IPV6)
2433 		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2434 
2435 	if (cap & IFCAP_TSO4)
2436 		flags |= CSUM_IP_TSO;
2437 
2438 	if (cap & IFCAP_TSO6)
2439 		flags |= CSUM_IP6_TSO;
2440 
2441 	if_sethwassistbits(ifp, flags, 0);
2442 }
2443 
2444 static int
2445 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2446     struct ena_com_dev_get_features_ctx *feat)
2447 {
2448 	if_t ifp;
2449 	int caps = 0;
2450 
2451 	ena_trace(ENA_DBG, "enter");
2452 
2453 	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2454 	if (ifp == 0) {
2455 		device_printf(pdev, "can not allocate ifnet structure\n");
2456 		return (ENXIO);
2457 	}
2458 	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2459 	if_setdev(ifp, pdev);
2460 	if_setsoftc(ifp, adapter);
2461 
2462 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2463 	if_setinitfn(ifp, ena_init);
2464 	if_settransmitfn(ifp, ena_mq_start);
2465 	if_setqflushfn(ifp, ena_qflush);
2466 	if_setioctlfn(ifp, ena_ioctl);
2467 	if_setgetcounterfn(ifp, ena_get_counter);
2468 
2469 	if_setsendqlen(ifp, adapter->tx_ring_size);
2470 	if_setsendqready(ifp);
2471 	if_setmtu(ifp, ETHERMTU);
2472 	if_setbaudrate(ifp, 0);
2473 	/* Zeroize capabilities... */
2474 	if_setcapabilities(ifp, 0);
2475 	if_setcapenable(ifp, 0);
2476 	/* check hardware support */
2477 	caps = ena_get_dev_offloads(feat);
2478 	/* ... and set them */
2479 	if_setcapabilitiesbit(ifp, caps, 0);
2480 
2481 	/* TSO parameters */
2482 	ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2483 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2484 	ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2485 	ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2486 
2487 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2488 	if_setcapenable(ifp, if_getcapabilities(ifp));
2489 
2490 	/*
2491 	 * Specify the media types supported by this adapter and register
2492 	 * callbacks to update media and link information
2493 	 */
2494 	ifmedia_init(&adapter->media, IFM_IMASK,
2495 	    ena_media_change, ena_media_status);
2496 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2497 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2498 
2499 	ether_ifattach(ifp, adapter->mac_addr);
2500 
2501 	return (0);
2502 }
2503 
2504 static void
2505 ena_down(struct ena_adapter *adapter)
2506 {
2507 
2508 	if (adapter->up) {
2509 		device_printf(adapter->pdev, "device is going DOWN\n");
2510 
2511 		callout_drain(&adapter->timer_service);
2512 
2513 		adapter->up = false;
2514 		if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2515 		    IFF_DRV_RUNNING);
2516 
2517 		/* Drain task responsible for updating hw stats */
2518 		while (taskqueue_cancel(adapter->stats_tq, &adapter->stats_task, NULL))
2519 			taskqueue_drain(adapter->stats_tq, &adapter->stats_task);
2520 
2521 		ena_free_io_irq(adapter);
2522 
2523 		ena_destroy_all_io_queues(adapter);
2524 
2525 		ena_free_all_tx_bufs(adapter);
2526 		ena_free_all_rx_bufs(adapter);
2527 		ena_free_all_tx_resources(adapter);
2528 		ena_free_all_rx_resources(adapter);
2529 
2530 		counter_u64_add(adapter->dev_stats.interface_down, 1);
2531 	}
2532 
2533 	return;
2534 }
2535 
2536 static void
2537 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf)
2538 {
2539 	struct ena_com_tx_meta *ena_meta;
2540 	struct ether_vlan_header *eh;
2541 	u32 mss;
2542 	bool offload;
2543 	uint16_t etype;
2544 	int ehdrlen;
2545 	struct ip *ip;
2546 	int iphlen;
2547 	struct tcphdr *th;
2548 
2549 	offload = false;
2550 	ena_meta = &ena_tx_ctx->ena_meta;
2551 	mss = mbuf->m_pkthdr.tso_segsz;
2552 
2553 	if (mss != 0)
2554 		offload = true;
2555 
2556 	if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0)
2557 		offload = true;
2558 
2559 	if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
2560 		offload = true;
2561 
2562 	if (offload == false) {
2563 		ena_tx_ctx->meta_valid = 0;
2564 		return;
2565 	}
2566 
2567 	/* Determine where frame payload starts. */
2568 	eh = mtod(mbuf, struct ether_vlan_header *);
2569 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2570 		etype = ntohs(eh->evl_proto);
2571 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2572 	} else {
2573 		etype = ntohs(eh->evl_encap_proto);
2574 		ehdrlen = ETHER_HDR_LEN;
2575 	}
2576 
2577 	ip = (struct ip *)(mbuf->m_data + ehdrlen);
2578 	iphlen = ip->ip_hl << 2;
2579 	th = (struct tcphdr *)((caddr_t)ip + iphlen);
2580 
2581 	if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2582 		ena_tx_ctx->l3_csum_enable = 1;
2583 	}
2584 	if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2585 		ena_tx_ctx->tso_enable = 1;
2586 		ena_meta->l4_hdr_len = (th->th_off);
2587 	}
2588 
2589 	switch (etype) {
2590 	case ETHERTYPE_IP:
2591 		ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2592 		if (ip->ip_off == 0)
2593 			ena_tx_ctx->df = 1;
2594 		break;
2595 	case ETHERTYPE_IPV6:
2596 		ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2597 
2598 	default:
2599 		break;
2600 	}
2601 
2602 	if (ip->ip_p == IPPROTO_TCP) {
2603 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2604 		if (mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
2605 		    ena_tx_ctx->l4_csum_enable = 1;
2606 		else
2607 		    ena_tx_ctx->l4_csum_enable = 0;
2608 	} else if (ip->ip_p == IPPROTO_UDP) {
2609 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2610 		if (mbuf->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
2611 		    ena_tx_ctx->l4_csum_enable = 1;
2612 		else
2613 		    ena_tx_ctx->l4_csum_enable = 0;
2614 	} else {
2615 		ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
2616 		ena_tx_ctx->l4_csum_enable = 0;
2617 	}
2618 
2619 	ena_meta->mss = mss;
2620 	ena_meta->l3_hdr_len = iphlen;
2621 	ena_meta->l3_hdr_offset = ehdrlen;
2622 	ena_tx_ctx->meta_valid = 1;
2623 }
2624 
2625 static int
2626 ena_check_and_defragment_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2627 {
2628 	struct ena_adapter *adapter;
2629 	struct mbuf *defrag_mbuf;
2630 	int num_frags;
2631 
2632 	adapter = tx_ring->adapter;
2633 	num_frags = ena_mbuf_count(*mbuf);
2634 
2635 	/* One segment must be reserved for configuration descriptor. */
2636 	if (num_frags < adapter->max_tx_sgl_size)
2637 		return (0);
2638 	counter_u64_add(tx_ring->tx_stats.defragment, 1);
2639 
2640 	defrag_mbuf = m_defrag(*mbuf, M_NOWAIT);
2641 	if (defrag_mbuf == NULL) {
2642 		counter_u64_add(tx_ring->tx_stats.defragment_err, 1);
2643 		return (ENOMEM);
2644 	}
2645 
2646 	/* If mbuf was defragmented succesfully, original mbuf is released. */
2647 	*mbuf = defrag_mbuf;
2648 
2649 	return (0);
2650 }
2651 
2652 static int
2653 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2654 {
2655 	struct ena_adapter *adapter;
2656 	struct ena_tx_buffer *tx_info;
2657 	struct ena_com_tx_ctx ena_tx_ctx;
2658 	struct ena_com_dev *ena_dev;
2659 	struct ena_com_buf *ena_buf;
2660 	struct ena_com_io_sq* io_sq;
2661 	bus_dma_segment_t segs[ENA_BUS_DMA_SEGS];
2662 	void *push_hdr;
2663 	uint16_t next_to_use;
2664 	uint16_t req_id;
2665 	uint16_t push_len;
2666 	uint16_t ena_qid;
2667 	uint32_t len, nsegs, header_len;
2668 	int i, rc;
2669 	int nb_hw_desc;
2670 
2671 	ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2672 	adapter = tx_ring->que->adapter;
2673 	ena_dev = adapter->ena_dev;
2674 	io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
2675 
2676 	ENA_ASSERT(*mbuf, "mbuf is NULL\n");
2677 
2678 	rc = ena_check_and_defragment_mbuf(tx_ring, mbuf);
2679 	if (rc) {
2680 		ena_trace(ENA_WARNING,
2681 		    "Failed to defragment mbuf! err: %d", rc);
2682 		return (rc);
2683 	}
2684 
2685 	next_to_use = tx_ring->next_to_use;
2686 	req_id = tx_ring->free_tx_ids[next_to_use];
2687 	tx_info = &tx_ring->tx_buffer_info[req_id];
2688 
2689 	tx_info->mbuf = *mbuf;
2690 	tx_info->num_of_bufs = 0;
2691 
2692 	ena_buf = tx_info->bufs;
2693 	len = (*mbuf)->m_len;
2694 
2695 	ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len);
2696 
2697 	push_len = 0;
2698 	header_len = min_t(uint32_t, len, tx_ring->tx_max_header_size);
2699 	push_hdr = NULL;
2700 
2701 	rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map,
2702 	    *mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
2703 
2704 	if (rc || (nsegs == 0)) {
2705 		ena_trace(ENA_WARNING,
2706 		    "dmamap load failed! err: %d nsegs: %d", rc, nsegs);
2707 		counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
2708 		tx_info->mbuf = NULL;
2709 		if (rc == ENOMEM)
2710 			return (ENA_COM_NO_MEM);
2711 		else
2712 			return (ENA_COM_INVAL);
2713 	}
2714 
2715 	for (i = 0; i < nsegs; i++) {
2716 		ena_buf->len = segs[i].ds_len;
2717 		ena_buf->paddr = segs[i].ds_addr;
2718 		ena_buf++;
2719 	}
2720 	tx_info->num_of_bufs = nsegs;
2721 
2722 	memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2723 	ena_tx_ctx.ena_bufs = tx_info->bufs;
2724 	ena_tx_ctx.push_header = push_hdr;
2725 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2726 	ena_tx_ctx.req_id = req_id;
2727 	ena_tx_ctx.header_len = header_len;
2728 
2729 	/* Set flags and meta data */
2730 	ena_tx_csum(&ena_tx_ctx, *mbuf);
2731 	/* Prepare the packet's descriptors and send them to device */
2732 	rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
2733 	if (rc != 0) {
2734 		ena_trace(ENA_WARNING, "failed to prepare tx bufs\n");
2735 		counter_enter();
2736 		counter_u64_add_protected(tx_ring->tx_stats.queue_stop, 1);
2737 		counter_u64_add_protected(tx_ring->tx_stats.prepare_ctx_err, 1);
2738 		counter_exit();
2739 		goto dma_error;
2740 	}
2741 
2742 	counter_enter();
2743 	counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
2744 	counter_u64_add_protected(tx_ring->tx_stats.bytes,  (*mbuf)->m_pkthdr.len);
2745 	counter_exit();
2746 
2747 	tx_info->tx_descs = nb_hw_desc;
2748 	getbinuptime(&tx_info->timestamp);
2749 	tx_info->print_once = true;
2750 
2751 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2752 	    tx_ring->ring_size);
2753 
2754 	bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map, BUS_DMASYNC_PREWRITE);
2755 
2756 	return (0);
2757 
2758 dma_error:
2759 	tx_info->mbuf = NULL;
2760 	bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
2761 
2762 	return (rc);
2763 }
2764 
2765 static void
2766 ena_start_xmit(struct ena_ring *tx_ring)
2767 {
2768 	struct mbuf *mbuf;
2769 	struct ena_adapter *adapter = tx_ring->adapter;
2770 	struct ena_com_io_sq* io_sq;
2771 	int ena_qid;
2772 	int acum_pkts = 0;
2773 	int ret = 0;
2774 
2775 	if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2776 		return;
2777 
2778 	if (!adapter->link_status)
2779 		return;
2780 
2781 	ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2782 	io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
2783 
2784 	while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
2785 		ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
2786 		    " header csum flags %#jx",
2787 		    mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags);
2788 
2789 		if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
2790 			ena_tx_cleanup(tx_ring);
2791 
2792 		if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) {
2793 			if (ret == ENA_COM_NO_MEM) {
2794 				drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2795 			} else if (ret == ENA_COM_NO_SPACE) {
2796 				drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2797 			} else {
2798 				m_freem(mbuf);
2799 				drbr_advance(adapter->ifp, tx_ring->br);
2800 			}
2801 
2802 			break;
2803 		}
2804 
2805 		drbr_advance(adapter->ifp, tx_ring->br);
2806 
2807 		if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2808 			return;
2809 
2810 		acum_pkts++;
2811 
2812 		BPF_MTAP(adapter->ifp, mbuf);
2813 
2814 		if (acum_pkts == DB_THRESHOLD) {
2815 			acum_pkts = 0;
2816 			wmb();
2817 			/* Trigger the dma engine */
2818 			ena_com_write_sq_doorbell(io_sq);
2819 			counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2820 		}
2821 
2822 	}
2823 
2824 	if (acum_pkts) {
2825 		wmb();
2826 		/* Trigger the dma engine */
2827 		ena_com_write_sq_doorbell(io_sq);
2828 		counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2829 	}
2830 
2831 	if (ena_com_sq_empty_space(io_sq) < ENA_TX_CLEANUP_TRESHOLD)
2832 		ena_tx_cleanup(tx_ring);
2833 }
2834 
2835 static void
2836 ena_deferred_mq_start(void *arg, int pending)
2837 {
2838 	struct ena_ring *tx_ring = (struct ena_ring *)arg;
2839 	struct ifnet *ifp = tx_ring->adapter->ifp;
2840 
2841 	while (drbr_empty(ifp, tx_ring->br) == FALSE &&
2842 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2843 		ENA_RING_MTX_LOCK(tx_ring);
2844 		ena_start_xmit(tx_ring);
2845 		ENA_RING_MTX_UNLOCK(tx_ring);
2846 	}
2847 }
2848 
2849 static int
2850 ena_mq_start(if_t ifp, struct mbuf *m)
2851 {
2852 	struct ena_adapter *adapter = ifp->if_softc;
2853 	struct ena_ring *tx_ring;
2854 	int ret, is_drbr_empty;
2855 	uint32_t i;
2856 
2857 	if ((adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2858 		return (ENODEV);
2859 
2860 	/* Which queue to use */
2861 	/*
2862 	 * If everything is setup correctly, it should be the
2863 	 * same bucket that the current CPU we're on is.
2864 	 * It should improve performance.
2865 	 */
2866 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2867 #ifdef	RSS
2868 		if (rss_hash2bucket(m->m_pkthdr.flowid,
2869 		    M_HASHTYPE_GET(m), &i) == 0) {
2870 			i = i % adapter->num_queues;
2871 
2872 		} else
2873 #endif
2874 		{
2875 			i = m->m_pkthdr.flowid % adapter->num_queues;
2876 		}
2877 	} else {
2878 		i = curcpu % adapter->num_queues;
2879 	}
2880 	tx_ring = &adapter->tx_ring[i];
2881 
2882 	/* Check if drbr is empty before putting packet */
2883 	is_drbr_empty = drbr_empty(ifp, tx_ring->br);
2884 	ret = drbr_enqueue(ifp, tx_ring->br, m);
2885 	if (ret) {
2886 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2887 		return (ret);
2888 	}
2889 
2890 	if (is_drbr_empty && ENA_RING_MTX_TRYLOCK(tx_ring)) {
2891 		ena_start_xmit(tx_ring);
2892 		ENA_RING_MTX_UNLOCK(tx_ring);
2893 	} else {
2894 		taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2895 	}
2896 
2897 	return (0);
2898 }
2899 
2900 static void
2901 ena_qflush(if_t ifp)
2902 {
2903 	struct ena_adapter *adapter = ifp->if_softc;
2904 	struct ena_ring *tx_ring = adapter->tx_ring;
2905 	int i;
2906 
2907 	for(i = 0; i < adapter->num_queues; ++i, ++tx_ring)
2908 		if (drbr_empty(ifp, tx_ring->br) == FALSE) {
2909 			ENA_RING_MTX_LOCK(tx_ring);
2910 			drbr_flush(ifp, tx_ring->br);
2911 			ENA_RING_MTX_UNLOCK(tx_ring);
2912 		}
2913 
2914 	if_qflush(ifp);
2915 
2916 	return;
2917 }
2918 
2919 static int ena_calc_io_queue_num(struct ena_adapter *adapter,
2920     struct ena_com_dev_get_features_ctx *get_feat_ctx)
2921 {
2922 	int io_sq_num, io_cq_num, io_queue_num;
2923 
2924 	io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2925 	io_cq_num = get_feat_ctx->max_queues.max_sq_num;
2926 
2927 	io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2928 	io_queue_num = min_t(int, io_queue_num, io_sq_num);
2929 	io_queue_num = min_t(int, io_queue_num, io_cq_num);
2930 	/* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2931 	io_queue_num = min_t(int, io_queue_num,
2932 	    pci_msix_count(adapter->pdev) - 1);
2933 #ifdef	RSS
2934 	io_queue_num = min_t(int, io_queue_num, rss_getnumbuckets());
2935 #endif
2936 
2937 	return io_queue_num;
2938 }
2939 
2940 static int ena_calc_queue_size(struct ena_adapter *adapter,
2941     uint16_t *max_tx_sgl_size,  uint16_t *max_rx_sgl_size,
2942     struct ena_com_dev_get_features_ctx *feat)
2943 {
2944 	uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
2945 	uint32_t v;
2946 	uint32_t q;
2947 
2948 	queue_size = min_t(uint32_t, queue_size,
2949 	    feat->max_queues.max_cq_depth);
2950 	queue_size = min_t(uint32_t, queue_size,
2951 	    feat->max_queues.max_sq_depth);
2952 
2953 	/* round down to the nearest power of 2 */
2954 	v = queue_size;
2955 	while (v != 0) {
2956 		if (powerof2(queue_size))
2957 			break;
2958 		v /= 2;
2959 		q = rounddown2(queue_size, v);
2960 		if (q != 0) {
2961 			queue_size = q;
2962 			break;
2963 		}
2964 	}
2965 
2966 	if (unlikely(!queue_size)) {
2967 		device_printf(adapter->pdev, "Invalid queue size\n");
2968 		return ENA_COM_FAULT;
2969 	}
2970 
2971 	*max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2972 	    feat->max_queues.max_packet_tx_descs);
2973 	*max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2974 	    feat->max_queues.max_packet_rx_descs);
2975 
2976 	return queue_size;
2977 }
2978 
2979 static int ena_rss_init_default(struct ena_adapter *adapter)
2980 {
2981 	struct ena_com_dev *ena_dev = adapter->ena_dev;
2982 	device_t dev = adapter->pdev;
2983 	int qid, rc, i;
2984 
2985 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2986 	if (unlikely(rc)) {
2987 		device_printf(dev, "Cannot init RSS\n");
2988 		goto err_rss_init;
2989 	}
2990 
2991 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2992 #ifdef	RSS
2993 		qid = rss_get_indirection_to_bucket(i);
2994 		qid = qid % adapter->num_queues;
2995 #else
2996 		qid = i % adapter->num_queues;
2997 #endif
2998 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2999 						       ENA_IO_RXQ_IDX(qid));
3000 		if (unlikely(rc && (rc != EPERM))) {
3001 			device_printf(dev, "Cannot fill indirect table\n");
3002 			goto err_fill_indir;
3003 		}
3004 	}
3005 
3006 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3007 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3008 	if (unlikely(rc && (rc != EPERM))) {
3009 		device_printf(dev, "Cannot fill hash function\n");
3010 		goto err_fill_indir;
3011 	}
3012 
3013 	rc = ena_com_set_default_hash_ctrl(ena_dev);
3014 	if (unlikely(rc && (rc != EPERM))) {
3015 		device_printf(dev, "Cannot fill hash control\n");
3016 		goto err_fill_indir;
3017 	}
3018 
3019 	return (0);
3020 
3021 err_fill_indir:
3022 	ena_com_rss_destroy(ena_dev);
3023 err_rss_init:
3024 	return (rc);
3025 }
3026 
3027 static void
3028 ena_rss_init_default_deferred(void *arg)
3029 {
3030 	struct ena_adapter *adapter;
3031 	devclass_t dc;
3032 	int max;
3033 	int rc;
3034 
3035 	dc = devclass_find("ena");
3036 	if (dc == NULL) {
3037 		ena_trace(ENA_DBG, "No devclass ena\n");
3038 		return;
3039 	}
3040 
3041 	max = devclass_get_maxunit(dc);
3042 	while (max-- >= 0) {
3043 		adapter = devclass_get_softc(dc, max);
3044 		if (adapter != NULL) {
3045 			rc = ena_rss_init_default(adapter);
3046 			adapter->rss_support = true;
3047 			if (rc) {
3048 				device_printf(adapter->pdev,
3049 				    "WARNING: RSS was not properly initialized,"
3050 				    " it will affect bandwith\n");
3051 				adapter->rss_support = false;
3052 			}
3053 		}
3054 	}
3055 }
3056 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
3057 
3058 static void ena_config_host_info(struct ena_com_dev *ena_dev)
3059 {
3060 	struct ena_admin_host_info *host_info;
3061 	int rc;
3062 
3063 	/* Allocate only the host info */
3064 	rc = ena_com_allocate_host_info(ena_dev);
3065 	if (rc) {
3066 		ena_trace(ENA_ALERT, "Cannot allocate host info\n");
3067 		return;
3068 	}
3069 
3070 	host_info = ena_dev->host_attr.host_info;
3071 
3072 	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
3073 	host_info->kernel_ver = osreldate;
3074 
3075 	sprintf(host_info->kernel_ver_str, "%d", osreldate);
3076 	host_info->os_dist = 0;
3077 	strncpy(host_info->os_dist_str, osrelease,
3078 	    sizeof(host_info->os_dist_str) - 1);
3079 
3080 	host_info->driver_version =
3081 		(DRV_MODULE_VER_MAJOR) |
3082 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3083 		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
3084 
3085 	rc = ena_com_set_host_attributes(ena_dev);
3086 	if (rc) {
3087 		if (rc == EPERM)
3088 			ena_trace(ENA_WARNING, "Cannot set host attributes\n");
3089 		else
3090 			ena_trace(ENA_ALERT, "Cannot set host attributes\n");
3091 
3092 		goto err;
3093 	}
3094 
3095 	return;
3096 
3097 err:
3098 	ena_com_delete_host_info(ena_dev);
3099 }
3100 
3101 static int
3102 ena_device_init(struct ena_adapter *adapter, device_t pdev,
3103 	struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
3104 {
3105 	struct ena_com_dev* ena_dev = adapter->ena_dev;
3106 	bool readless_supported;
3107 	uint32_t aenq_groups;
3108 	int dma_width;
3109 	int rc;
3110 
3111 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
3112 	if (rc) {
3113 		device_printf(pdev, "failed to init mmio read less\n");
3114 		return rc;
3115 	}
3116 
3117 	/*
3118 	 * The PCIe configuration space revision id indicate if mmio reg
3119 	 * read is disabled
3120 	 */
3121 	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
3122 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3123 
3124 	rc = ena_com_dev_reset(ena_dev);
3125 	if (rc) {
3126 		device_printf(pdev, "Can not reset device\n");
3127 		goto err_mmio_read_less;
3128 	}
3129 
3130 	rc = ena_com_validate_version(ena_dev);
3131 	if (rc) {
3132 		device_printf(pdev, "device version is too low\n");
3133 		goto err_mmio_read_less;
3134 	}
3135 
3136 	dma_width = ena_com_get_dma_width(ena_dev);
3137 	if (dma_width < 0) {
3138 		device_printf(pdev, "Invalid dma width value %d", dma_width);
3139 		rc = dma_width;
3140 		goto err_mmio_read_less;
3141 	}
3142 	adapter->dma_width = dma_width;
3143 
3144 	/* ENA admin level init */
3145 	rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
3146 	if (rc) {
3147 		device_printf(pdev,
3148 		    "Can not initialize ena admin queue with device\n");
3149 		goto err_mmio_read_less;
3150 	}
3151 
3152 	/*
3153 	 * To enable the msix interrupts the driver needs to know the number
3154 	 * of queues. So the driver uses polling mode to retrieve this
3155 	 * information
3156 	 */
3157 	ena_com_set_admin_polling_mode(ena_dev, true);
3158 
3159 	ena_config_host_info(ena_dev);
3160 
3161 	/* Get Device Attributes */
3162 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3163 	if (rc) {
3164 		device_printf(pdev,
3165 		    "Cannot get attribute for ena device rc: %d\n", rc);
3166 		goto err_admin_init;
3167 	}
3168 
3169 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3170 	    BIT(ENA_ADMIN_FATAL_ERROR) |
3171 	    BIT(ENA_ADMIN_WARNING) |
3172 	    BIT(ENA_ADMIN_NOTIFICATION) |
3173 	    BIT(ENA_ADMIN_KEEP_ALIVE);
3174 
3175 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
3176 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3177 	if (rc) {
3178 		device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
3179 		goto err_admin_init;
3180 	}
3181 
3182 	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3183 
3184 	return 0;
3185 
3186 err_admin_init:
3187 	ena_com_delete_host_info(ena_dev);
3188 	ena_com_admin_destroy(ena_dev);
3189 err_mmio_read_less:
3190 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3191 
3192 	return rc;
3193 }
3194 
3195 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
3196     int io_vectors)
3197 {
3198 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3199 	int rc;
3200 
3201 	rc = ena_enable_msix(adapter);
3202 	if (rc) {
3203 		device_printf(adapter->pdev, "Error with MSI-X enablement\n");
3204 		return rc;
3205 	}
3206 
3207 	ena_setup_mgmnt_intr(adapter);
3208 
3209 	rc = ena_request_mgmnt_irq(adapter);
3210 	if (rc) {
3211 		device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
3212 		goto err_disable_msix;
3213 	}
3214 
3215 	ena_com_set_admin_polling_mode(ena_dev, false);
3216 
3217 	ena_com_admin_aenq_enable(ena_dev);
3218 
3219 	return 0;
3220 
3221 err_disable_msix:
3222 	ena_disable_msix(adapter);
3223 
3224 	return rc;
3225 }
3226 
3227 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
3228 static void ena_keep_alive_wd(void *adapter_data,
3229     struct ena_admin_aenq_entry *aenq_e)
3230 {
3231 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3232 	sbintime_t stime;
3233 
3234 	stime = getsbinuptime();
3235 	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
3236 }
3237 
3238 /* Check for keep alive expiration */
3239 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3240 {
3241 	sbintime_t timestamp, time;
3242 
3243 	if (adapter->wd_active == 0)
3244 		return;
3245 
3246 	if (adapter->keep_alive_timeout == 0)
3247 		return;
3248 
3249 	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
3250 	time = getsbinuptime() - timestamp;
3251 	if (unlikely(time > adapter->keep_alive_timeout)) {
3252 		device_printf(adapter->pdev,
3253 		    "Keep alive watchdog timeout.\n");
3254 		counter_u64_add(adapter->dev_stats.wd_expired, 1);
3255 		adapter->trigger_reset = true;
3256 	}
3257 }
3258 
3259 /* Check if admin queue is enabled */
3260 static void check_for_admin_com_state(struct ena_adapter *adapter)
3261 {
3262 	if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3263 		device_printf(adapter->pdev,
3264 		    "ENA admin queue is not in running state!\n");
3265 		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3266 		adapter->trigger_reset = true;
3267 	}
3268 }
3269 
3270 /*
3271  * Check for TX which were not completed on time.
3272  * Timeout is defined by "missing_tx_timeout".
3273  * Reset will be performed if number of incompleted
3274  * transactions exceeds "missing_tx_threshold".
3275  */
3276 static void check_for_missing_tx_completions(struct ena_adapter *adapter)
3277 {
3278 	struct ena_ring *tx_ring;
3279 	struct ena_tx_buffer *tx_info;
3280 	struct bintime curtime, time;
3281 	int i, j, budget, missed_tx;
3282 
3283 	/* Make sure the driver doesn't turn the device in other process */
3284 	rmb();
3285 
3286 	if (!adapter->up)
3287 		return;
3288 
3289 	if (adapter->trigger_reset)
3290 		return;
3291 
3292 	if (adapter->missing_tx_timeout == 0)
3293 		return;
3294 
3295 	budget = adapter->missing_tx_max_queues;
3296 	getbinuptime(&curtime);
3297 
3298 	for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) {
3299 		tx_ring = &adapter->tx_ring[i];
3300 
3301 		missed_tx = 0;
3302 
3303 		for (j = 0; j < tx_ring->ring_size; j++) {
3304 			tx_info = &tx_ring->tx_buffer_info[j];
3305 
3306 			if (!bintime_isset(&tx_info->timestamp))
3307 				continue;
3308 
3309 			time = curtime;
3310 			bintime_sub(&time, &tx_info->timestamp);
3311 
3312 			/* Check again if packet is still waiting */
3313 			if (bintime_isset(&tx_info->timestamp) && unlikely(
3314 			    bttosbt(time) > adapter->missing_tx_timeout)) {
3315 				if (tx_info->print_once)
3316 					device_printf(adapter->pdev,
3317 					    "Found a Tx that wasn't completed "
3318 					    "on time, qid %d, index %d.\n",
3319 					    tx_ring->qid, j);
3320 
3321 				tx_info->print_once = false;
3322 				missed_tx++;
3323 
3324 				if (unlikely(missed_tx >
3325 				    adapter->missing_tx_threshold)) {
3326 					device_printf(adapter->pdev,
3327 					    "The number of lost tx completion "
3328 					    "is above the threshold (%d > %d). "
3329 					    "Reset the device\n", missed_tx,
3330 					    adapter->missing_tx_threshold);
3331 					adapter->trigger_reset = true;
3332 					return;
3333 				}
3334 			}
3335 		}
3336 
3337 		budget--;
3338 		if (!budget) {
3339 			i++;
3340 			break;
3341 		}
3342 	}
3343 
3344 	adapter->next_monitored_tx_qid = i % adapter->num_queues;
3345 }
3346 
3347 
3348 static void
3349 ena_timer_service(void *data)
3350 {
3351 	struct ena_adapter *adapter = (struct ena_adapter *)data;
3352 	struct ena_admin_host_info *host_info =
3353 	    adapter->ena_dev->host_attr.host_info;
3354 
3355 	check_for_missing_keep_alive(adapter);
3356 
3357 	check_for_admin_com_state(adapter);
3358 
3359 	check_for_missing_tx_completions(adapter);
3360 
3361 	if (host_info)
3362 		ena_update_host_info(host_info, adapter->ifp);
3363 
3364 	if (unlikely(adapter->trigger_reset)) {
3365 		device_printf(adapter->pdev, "Trigger reset is on\n");
3366 		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3367 		return;
3368 	}
3369 
3370 	/*
3371 	 * Schedule another timeout one second from now.
3372 	 */
3373 	callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3374 }
3375 
3376 static void
3377 ena_reset_task(void *arg, int pending)
3378 {
3379 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3380 	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3381 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3382 	bool dev_up;
3383 	int rc;
3384 
3385 	if (unlikely(!adapter->trigger_reset)) {
3386 		device_printf(adapter->pdev,
3387 		    "device reset scheduled but trigger_reset is off\n");
3388 		return;
3389 	}
3390 
3391 	sx_xlock(&adapter->ioctl_sx);
3392 
3393 	callout_drain(&adapter->timer_service);
3394 
3395 	dev_up = adapter->up;
3396 
3397 	ena_com_set_admin_running_state(ena_dev, false);
3398 	ena_free_mgmnt_irq(adapter);
3399 	ena_down(adapter);
3400 	ena_com_dev_reset(ena_dev);
3401 	ena_disable_msix(adapter);
3402 	ena_com_abort_admin_commands(ena_dev);
3403 	ena_com_wait_for_abort_completion(ena_dev);
3404 	ena_com_admin_destroy(ena_dev);
3405 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3406 
3407 	adapter->trigger_reset = false;
3408 
3409 	/* Finished destroy part. Restart the device */
3410 	rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx,
3411 	    &adapter->wd_active);
3412 	if (rc) {
3413 		device_printf(adapter->pdev,
3414 		    "ENA device init failed! (err: %d)\n", rc);
3415 		goto err_dev_free;
3416 	}
3417 
3418 	rc = ena_enable_msix_and_set_admin_interrupts(adapter,
3419 	    adapter->num_queues);
3420 	if (rc) {
3421 		device_printf(adapter->pdev, "Enable MSI-X failed\n");
3422 		goto err_com_free;
3423 	}
3424 
3425 	/* If the interface was up before the reset bring it up */
3426 	if (dev_up) {
3427 		rc = ena_up(adapter);
3428 		if (rc) {
3429 			device_printf(adapter->pdev,
3430 			    "Failed to create I/O queues\n");
3431 			goto err_msix_free;
3432 		}
3433 	}
3434 
3435 	callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3436 	    ena_timer_service, (void *)adapter, 0);
3437 
3438 	sx_unlock(&adapter->ioctl_sx);
3439 
3440 	return;
3441 
3442 err_msix_free:
3443 	ena_com_dev_reset(ena_dev);
3444 	ena_free_mgmnt_irq(adapter);
3445 	ena_disable_msix(adapter);
3446 err_com_free:
3447 	ena_com_admin_destroy(ena_dev);
3448 err_dev_free:
3449 	device_printf(adapter->pdev, "ENA reset failed!\n");
3450 	adapter->running = false;
3451 	sx_unlock(&adapter->ioctl_sx);
3452 }
3453 
3454 /**
3455  * ena_attach - Device Initialization Routine
3456  * @pdev: device information struct
3457  *
3458  * Returns 0 on success, otherwise on failure.
3459  *
3460  * ena_attach initializes an adapter identified by a device structure.
3461  * The OS initialization, configuring of the adapter private structure,
3462  * and a hardware reset occur.
3463  **/
3464 static int
3465 ena_attach(device_t pdev)
3466 {
3467 	struct ena_com_dev_get_features_ctx get_feat_ctx;
3468 	static int version_printed;
3469 	struct ena_adapter *adapter;
3470 	struct ena_com_dev *ena_dev = NULL;
3471 	uint16_t tx_sgl_size = 0;
3472 	uint16_t rx_sgl_size = 0;
3473 	int io_queue_num;
3474 	int queue_size;
3475 	int rc;
3476 	struct sysctl_ctx_list *ctx;
3477 	struct sysctl_oid_list *children;
3478 
3479 	adapter = device_get_softc(pdev);
3480 	adapter->pdev = pdev;
3481 	ctx = device_get_sysctl_ctx(pdev);
3482 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(pdev));
3483 
3484 	mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF);
3485 	sx_init(&adapter->ioctl_sx, "ENA ioctl sx");
3486 
3487 	/* Sysctl calls for Watchdog service */
3488 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "wd_active",
3489 	    CTLFLAG_RWTUN, &adapter->wd_active, 0,
3490 	    "Watchdog is active");
3491 
3492 	SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "keep_alive_timeout",
3493 	    CTLFLAG_RWTUN, &adapter->keep_alive_timeout,
3494 	    "Timeout for Keep Alive messages");
3495 
3496 	SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "missing_tx_timeout",
3497 	    CTLFLAG_RWTUN, &adapter->missing_tx_timeout,
3498 	    "Timeout for TX completion");
3499 
3500 	SYSCTL_ADD_U32(ctx, children, OID_AUTO, "missing_tx_max_queues",
3501 	    CTLFLAG_RWTUN, &adapter->missing_tx_max_queues, 0,
3502 	    "Number of TX queues to check per run");
3503 
3504 	SYSCTL_ADD_U32(ctx, children, OID_AUTO, "missing_tx_threshold",
3505 	    CTLFLAG_RWTUN, &adapter->missing_tx_threshold, 0,
3506 	    "Max number of timeouted packets");
3507 
3508 	/* Set up the timer service */
3509 	callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0);
3510 	adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3511 	adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3512 	adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3513 	adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3514 
3515 	if (version_printed++ == 0)
3516 		device_printf(pdev, "%s\n", ena_version);
3517 
3518 	rc = ena_allocate_pci_resources(adapter);
3519 	if (rc) {
3520 		device_printf(pdev, "PCI resource allocation failed!\n");
3521 		ena_free_pci_resources(adapter);
3522 		goto err_pci_res;
3523 	}
3524 
3525 	/* Allocate memory for ena_dev structure */
3526 	ena_dev = ENA_MEM_ALLOC(pdev, sizeof(struct ena_com_dev));
3527 	if (!ena_dev) {
3528 		device_printf(pdev, "allocating ena_dev failed\n");
3529 		rc = ENOMEM;
3530 		goto err_select_region;
3531 	}
3532 
3533 	adapter->ena_dev = ena_dev;
3534 	ena_dev->dmadev = pdev;
3535 	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3536 	    M_WAITOK | M_ZERO);
3537 
3538 	/* Store register resources */
3539 	((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3540 	    rman_get_bustag(adapter->registers);
3541 	((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3542 	    rman_get_bushandle(adapter->registers);
3543 
3544 	if (((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0) {
3545 		device_printf(pdev, "failed to pmap registers bar\n");
3546 		rc = ENXIO;
3547 		goto err_dev_free;
3548 	}
3549 
3550 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3551 
3552 	/* Device initialization */
3553 	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3554 	if (rc) {
3555 		device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3556 		rc = ENXIO;
3557 		goto err_bus_free;
3558 	}
3559 
3560 	adapter->keep_alive_timestamp = getsbinuptime();
3561 
3562 	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3563 
3564 	/* Set for sure that interface is not up */
3565 	adapter->up = false;
3566 
3567 	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3568 	    ETHER_ADDR_LEN);
3569 
3570 	adapter->small_copy_len =
3571 	    ENA_DEFAULT_SMALL_PACKET_LEN;
3572 
3573 	/* calculate IO queue number to create */
3574 	io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx);
3575 
3576 	ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n",
3577 	    io_queue_num);
3578 	adapter->num_queues = io_queue_num;
3579 
3580 	/* calculatre ring sizes */
3581 	queue_size = ena_calc_queue_size(adapter,&tx_sgl_size,
3582 	    &rx_sgl_size, &get_feat_ctx);
3583 	if ((queue_size <= 0) || (io_queue_num <= 0)) {
3584 		rc = ENA_COM_FAULT;
3585 		goto err_com_free;
3586 	}
3587 
3588 	adapter->tx_ring_size = queue_size;
3589 	adapter->rx_ring_size = queue_size;
3590 
3591 	adapter->max_tx_sgl_size = tx_sgl_size;
3592 	adapter->max_rx_sgl_size = rx_sgl_size;
3593 
3594 	/* set up dma tags for rx and tx buffers */
3595 	rc = ena_setup_tx_dma_tag(adapter);
3596 	if (rc)
3597 		goto dma_tx_err;
3598 
3599 	rc = ena_setup_rx_dma_tag(adapter);
3600 	if (rc)
3601 		goto dma_rx_err;
3602 
3603 	/* initialize rings basic information */
3604 	device_printf(pdev, "initalize %d io queues\n", io_queue_num);
3605 	rc = ena_init_io_rings(adapter);
3606 	if (rc) {
3607 		device_printf(pdev,"Error with initialization of IO rings\n");
3608 		goto err_io_init;
3609 	}
3610 
3611 	/* setup network interface */
3612 	rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3613 	if (rc) {
3614 		device_printf(pdev,"Error with network interface setup\n");
3615 		goto err_com_free;
3616 	}
3617 
3618 	rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3619 	if (rc) {
3620 		device_printf(pdev,
3621 		    "Failed to enable and set the admin interrupts\n");
3622 		goto err_ifp_free;
3623 	}
3624 
3625 	/* Initialize reset task queue */
3626 	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3627 	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3628 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3629 	if (adapter->reset_tq == NULL) {
3630 		device_printf(adapter->pdev,
3631 		    "Unable to create reset task queue\n");
3632 		goto err_reset_tq;
3633 	}
3634 	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3635 	    "%s rstq", device_get_nameunit(adapter->pdev));
3636 
3637 	/* Initialize task queue responsible for updating hw stats */
3638 	TASK_INIT(&adapter->stats_task, 0, ena_update_hw_stats, adapter);
3639 	adapter->stats_tq = taskqueue_create_fast("ena_stats_update",
3640 	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->stats_tq);
3641 	if (adapter->stats_tq == NULL) {
3642 		device_printf(adapter->pdev,
3643 		    "Unable to create taskqueue for updating hw stats\n");
3644 		goto err_stats_tq;
3645 	}
3646 	taskqueue_start_threads(&adapter->stats_tq, 1, PI_REALTIME,
3647 	    "%s stats tq", device_get_nameunit(adapter->pdev));
3648 
3649 	/* Initialize statistics */
3650 	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3651 	    sizeof(struct ena_stats_dev));
3652 	ena_update_stats_counters(adapter);
3653 	ena_sysctl_add_nodes(adapter);
3654 
3655 	/* Tell the stack that the interface is not active */
3656 	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3657 
3658 	adapter->running = true;
3659 	return (0);
3660 
3661 err_stats_tq:
3662 	taskqueue_free(adapter->reset_tq);
3663 err_reset_tq:
3664 	ena_free_mgmnt_irq(adapter);
3665 	ena_disable_msix(adapter);
3666 err_ifp_free:
3667 	if_detach(adapter->ifp);
3668 	if_free(adapter->ifp);
3669 err_com_free:
3670 	ena_free_all_io_rings_resources(adapter);
3671 err_io_init:
3672 	ena_free_rx_dma_tag(adapter);
3673 dma_rx_err:
3674 	ena_free_tx_dma_tag(adapter);
3675 dma_tx_err:
3676 	ena_com_admin_destroy(ena_dev);
3677 	ena_com_delete_host_info(ena_dev);
3678 err_bus_free:
3679 	free(ena_dev->bus, M_DEVBUF);
3680 err_dev_free:
3681 	free(ena_dev, M_DEVBUF);
3682 err_select_region:
3683 	ena_free_pci_resources(adapter);
3684 err_pci_res:
3685 	return (rc);
3686 }
3687 
3688 /**
3689  * ena_detach - Device Removal Routine
3690  * @pdev: device information struct
3691  *
3692  * ena_detach is called by the device subsystem to alert the driver
3693  * that it should release a PCI device.
3694  **/
3695 static int
3696 ena_detach(device_t pdev)
3697 {
3698 	struct ena_adapter *adapter = device_get_softc(pdev);
3699 	struct ena_com_dev *ena_dev = adapter->ena_dev;
3700 	int rc;
3701 
3702 	/* Make sure VLANS are not using driver */
3703 	if (adapter->ifp->if_vlantrunk != NULL) {
3704 		device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3705 		return (EBUSY);
3706 	}
3707 
3708 	/* Free reset task and callout */
3709 	callout_drain(&adapter->timer_service);
3710 	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3711 		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3712 	taskqueue_free(adapter->reset_tq);
3713 
3714 	sx_xlock(&adapter->ioctl_sx);
3715 	ena_down(adapter);
3716 	sx_unlock(&adapter->ioctl_sx);
3717 
3718 	taskqueue_free(adapter->stats_tq);
3719 
3720 	if (adapter->ifp != NULL) {
3721 		ether_ifdetach(adapter->ifp);
3722 		if_free(adapter->ifp);
3723 	}
3724 
3725 	ena_free_all_io_rings_resources(adapter);
3726 
3727 	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3728 	    sizeof(struct ena_stats_dev));
3729 
3730 	if (adapter->rss_support)
3731 		ena_com_rss_destroy(ena_dev);
3732 
3733 	rc = ena_free_rx_dma_tag(adapter);
3734 	if (rc)
3735 		device_printf(adapter->pdev,
3736 		    "Unmapped RX DMA tag associations\n");
3737 
3738 	rc = ena_free_tx_dma_tag(adapter);
3739 	if (rc)
3740 		device_printf(adapter->pdev,
3741 		    "Unmapped TX DMA tag associations\n");
3742 
3743 	/* Reset the device only if the device is running. */
3744 	if (adapter->running)
3745 		ena_com_dev_reset(ena_dev);
3746 
3747 	ena_com_delete_host_info(ena_dev);
3748 
3749 	ena_com_admin_destroy(ena_dev);
3750 
3751 	ena_free_irqs(adapter);
3752 
3753 	ena_com_mmio_reg_read_request_destroy(ena_dev);
3754 
3755 	ena_free_pci_resources(adapter);
3756 
3757 	mtx_destroy(&adapter->global_mtx);
3758 	sx_destroy(&adapter->ioctl_sx);
3759 
3760 	if (ena_dev->bus != NULL)
3761 		free(ena_dev->bus, M_DEVBUF);
3762 
3763 	if (ena_dev != NULL)
3764 		free(ena_dev, M_DEVBUF);
3765 
3766 	return (bus_generic_detach(pdev));
3767 }
3768 
3769 /******************************************************************************
3770  ******************************** AENQ Handlers *******************************
3771  *****************************************************************************/
3772 /**
3773  * ena_update_on_link_change:
3774  * Notify the network interface about the change in link status
3775  **/
3776 static void
3777 ena_update_on_link_change(void *adapter_data,
3778     struct ena_admin_aenq_entry *aenq_e)
3779 {
3780 	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3781 	struct ena_admin_aenq_link_change_desc *aenq_desc;
3782 	int status;
3783 	if_t ifp;
3784 
3785 	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3786 	ifp = adapter->ifp;
3787 	status = aenq_desc->flags &
3788 	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3789 
3790 	if (status != 0) {
3791 		device_printf(adapter->pdev, "link is UP\n");
3792 		if_link_state_change(ifp, LINK_STATE_UP);
3793 	} else if (status == 0) {
3794 		device_printf(adapter->pdev, "link is DOWN\n");
3795 		if_link_state_change(ifp, LINK_STATE_DOWN);
3796 	} else {
3797 		device_printf(adapter->pdev, "invalid value recvd\n");
3798 		BUG();
3799 	}
3800 
3801 	adapter->link_status = status;
3802 
3803 	return;
3804 }
3805 
3806 /**
3807  * This handler will called for unknown event group or unimplemented handlers
3808  **/
3809 static void
3810 unimplemented_aenq_handler(void *data,
3811     struct ena_admin_aenq_entry *aenq_e)
3812 {
3813 	return;
3814 }
3815 
3816 static struct ena_aenq_handlers aenq_handlers = {
3817     .handlers = {
3818 	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3819 	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3820     },
3821     .unimplemented_handler = unimplemented_aenq_handler
3822 };
3823 
3824 /*********************************************************************
3825  *  FreeBSD Device Interface Entry Points
3826  *********************************************************************/
3827 
3828 static device_method_t ena_methods[] = {
3829     /* Device interface */
3830     DEVMETHOD(device_probe, ena_probe),
3831     DEVMETHOD(device_attach, ena_attach),
3832     DEVMETHOD(device_detach, ena_detach),
3833     DEVMETHOD_END
3834 };
3835 
3836 static driver_t ena_driver = {
3837     "ena", ena_methods, sizeof(struct ena_adapter),
3838 };
3839 
3840 devclass_t ena_devclass;
3841 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3842 MODULE_DEPEND(ena, pci, 1, 1, 1);
3843 MODULE_DEPEND(ena, ether, 1, 1, 1);
3844 
3845 /*********************************************************************/
3846