xref: /freebsd/sys/net/iflib.c (revision 7474544bac68ff35697d09b293c1eaa6dcf3db03)
1 /*-
2  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_acpi.h"
34 #include "opt_sched.h"
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/bus.h>
39 #include <sys/eventhandler.h>
40 #include <sys/jail.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/md5.h>
44 #include <sys/mutex.h>
45 #include <sys/module.h>
46 #include <sys/kobj.h>
47 #include <sys/rman.h>
48 #include <sys/proc.h>
49 #include <sys/sbuf.h>
50 #include <sys/smp.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/taskqueue.h>
56 #include <sys/limits.h>
57 
58 #include <net/if.h>
59 #include <net/if_var.h>
60 #include <net/if_types.h>
61 #include <net/if_media.h>
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/mp_ring.h>
65 #include <net/vnet.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_lro.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/ip_var.h>
76 #include <netinet/netdump/netdump.h>
77 #include <netinet6/ip6_var.h>
78 
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
81 
82 #include <vm/vm.h>
83 #include <vm/pmap.h>
84 
85 #include <dev/led/led.h>
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pci_private.h>
89 
90 #include <net/iflib.h>
91 #include <net/iflib_private.h>
92 
93 #include "ifdi_if.h"
94 
95 #if defined(__i386__) || defined(__amd64__)
96 #include <sys/memdesc.h>
97 #include <machine/bus.h>
98 #include <machine/md_var.h>
99 #include <machine/specialreg.h>
100 #include <x86/include/busdma_impl.h>
101 #include <x86/iommu/busdma_dmar.h>
102 #endif
103 
104 #include <sys/bitstring.h>
105 /*
106  * enable accounting of every mbuf as it comes in to and goes out of
107  * iflib's software descriptor references
108  */
109 #define MEMORY_LOGGING 0
110 /*
111  * Enable mbuf vectors for compressing long mbuf chains
112  */
113 
114 /*
115  * NB:
116  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
117  *   we prefetch needs to be determined by the time spent in m_free vis a vis
118  *   the cost of a prefetch. This will of course vary based on the workload:
119  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
120  *        is quite expensive, thus suggesting very little prefetch.
121  *      - small packet forwarding which is just returning a single mbuf to
122  *        UMA will typically be very fast vis a vis the cost of a memory
123  *        access.
124  */
125 
126 
127 /*
128  * File organization:
129  *  - private structures
130  *  - iflib private utility functions
131  *  - ifnet functions
132  *  - vlan registry and other exported functions
133  *  - iflib public core functions
134  *
135  *
136  */
137 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
138 
139 struct iflib_txq;
140 typedef struct iflib_txq *iflib_txq_t;
141 struct iflib_rxq;
142 typedef struct iflib_rxq *iflib_rxq_t;
143 struct iflib_fl;
144 typedef struct iflib_fl *iflib_fl_t;
145 
146 struct iflib_ctx;
147 
148 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
149 static void iflib_timer(void *arg);
150 
151 typedef struct iflib_filter_info {
152 	driver_filter_t *ifi_filter;
153 	void *ifi_filter_arg;
154 	struct grouptask *ifi_task;
155 	void *ifi_ctx;
156 } *iflib_filter_info_t;
157 
158 struct iflib_ctx {
159 	KOBJ_FIELDS;
160    /*
161    * Pointer to hardware driver's softc
162    */
163 	void *ifc_softc;
164 	device_t ifc_dev;
165 	if_t ifc_ifp;
166 
167 	cpuset_t ifc_cpus;
168 	if_shared_ctx_t ifc_sctx;
169 	struct if_softc_ctx ifc_softc_ctx;
170 
171 	struct sx ifc_ctx_sx;
172 	struct mtx ifc_state_mtx;
173 
174 	uint16_t ifc_nhwtxqs;
175 
176 	iflib_txq_t ifc_txqs;
177 	iflib_rxq_t ifc_rxqs;
178 	uint32_t ifc_if_flags;
179 	uint32_t ifc_flags;
180 	uint32_t ifc_max_fl_buf_size;
181 	int ifc_in_detach;
182 
183 	int ifc_link_state;
184 	int ifc_link_irq;
185 	int ifc_watchdog_events;
186 	struct cdev *ifc_led_dev;
187 	struct resource *ifc_msix_mem;
188 
189 	struct if_irq ifc_legacy_irq;
190 	struct grouptask ifc_admin_task;
191 	struct grouptask ifc_vflr_task;
192 	struct iflib_filter_info ifc_filter_info;
193 	struct ifmedia	ifc_media;
194 
195 	struct sysctl_oid *ifc_sysctl_node;
196 	uint16_t ifc_sysctl_ntxqs;
197 	uint16_t ifc_sysctl_nrxqs;
198 	uint16_t ifc_sysctl_qs_eq_override;
199 	uint16_t ifc_sysctl_rx_budget;
200 	uint16_t ifc_sysctl_tx_abdicate;
201 
202 	qidx_t ifc_sysctl_ntxds[8];
203 	qidx_t ifc_sysctl_nrxds[8];
204 	struct if_txrx ifc_txrx;
205 #define isc_txd_encap  ifc_txrx.ift_txd_encap
206 #define isc_txd_flush  ifc_txrx.ift_txd_flush
207 #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
208 #define isc_rxd_available ifc_txrx.ift_rxd_available
209 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
211 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
212 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
213 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
214 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
215 	eventhandler_tag ifc_vlan_attach_event;
216 	eventhandler_tag ifc_vlan_detach_event;
217 	uint8_t ifc_mac[ETHER_ADDR_LEN];
218 	char ifc_mtx_name[16];
219 };
220 
221 
222 void *
223 iflib_get_softc(if_ctx_t ctx)
224 {
225 
226 	return (ctx->ifc_softc);
227 }
228 
229 device_t
230 iflib_get_dev(if_ctx_t ctx)
231 {
232 
233 	return (ctx->ifc_dev);
234 }
235 
236 if_t
237 iflib_get_ifp(if_ctx_t ctx)
238 {
239 
240 	return (ctx->ifc_ifp);
241 }
242 
243 struct ifmedia *
244 iflib_get_media(if_ctx_t ctx)
245 {
246 
247 	return (&ctx->ifc_media);
248 }
249 
250 uint32_t
251 iflib_get_flags(if_ctx_t ctx)
252 {
253 	return (ctx->ifc_flags);
254 }
255 
256 void
257 iflib_set_detach(if_ctx_t ctx)
258 {
259 	ctx->ifc_in_detach = 1;
260 }
261 
262 void
263 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
264 {
265 
266 	bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
267 }
268 
269 if_softc_ctx_t
270 iflib_get_softc_ctx(if_ctx_t ctx)
271 {
272 
273 	return (&ctx->ifc_softc_ctx);
274 }
275 
276 if_shared_ctx_t
277 iflib_get_sctx(if_ctx_t ctx)
278 {
279 
280 	return (ctx->ifc_sctx);
281 }
282 
283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
285 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
286 
287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
289 
290 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
291 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
292 #define RX_SW_DESC_INUSE        (1 << 3)
293 #define TX_SW_DESC_MAPPED       (1 << 4)
294 
295 #define	M_TOOBIG		M_PROTO1
296 
297 typedef struct iflib_sw_rx_desc_array {
298 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
299 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
300 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
301 	uint8_t		*ifsd_flags;
302 } iflib_rxsd_array_t;
303 
304 typedef struct iflib_sw_tx_desc_array {
305 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
306 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
307 	uint8_t		*ifsd_flags;
308 } if_txsd_vec_t;
309 
310 
311 /* magic number that should be high enough for any hardware */
312 #define IFLIB_MAX_TX_SEGS		128
313 /* bnxt supports 64 with hardware LRO enabled */
314 #define IFLIB_MAX_RX_SEGS		64
315 #define IFLIB_RX_COPY_THRESH		128
316 #define IFLIB_MAX_RX_REFRESH		32
317 /* The minimum descriptors per second before we start coalescing */
318 #define IFLIB_MIN_DESC_SEC		16384
319 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
320 #define IFLIB_QUEUE_IDLE		0
321 #define IFLIB_QUEUE_HUNG		1
322 #define IFLIB_QUEUE_WORKING		2
323 /* maximum number of txqs that can share an rx interrupt */
324 #define IFLIB_MAX_TX_SHARED_INTR	4
325 
326 /* this should really scale with ring size - this is a fairly arbitrary value */
327 #define TX_BATCH_SIZE			32
328 
329 #define IFLIB_RESTART_BUDGET		8
330 
331 
332 #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
333 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
334 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
335 struct iflib_txq {
336 	qidx_t		ift_in_use;
337 	qidx_t		ift_cidx;
338 	qidx_t		ift_cidx_processed;
339 	qidx_t		ift_pidx;
340 	uint8_t		ift_gen;
341 	uint8_t		ift_br_offset;
342 	uint16_t	ift_npending;
343 	uint16_t	ift_db_pending;
344 	uint16_t	ift_rs_pending;
345 	/* implicit pad */
346 	uint8_t		ift_txd_size[8];
347 	uint64_t	ift_processed;
348 	uint64_t	ift_cleaned;
349 	uint64_t	ift_cleaned_prev;
350 #if MEMORY_LOGGING
351 	uint64_t	ift_enqueued;
352 	uint64_t	ift_dequeued;
353 #endif
354 	uint64_t	ift_no_tx_dma_setup;
355 	uint64_t	ift_no_desc_avail;
356 	uint64_t	ift_mbuf_defrag_failed;
357 	uint64_t	ift_mbuf_defrag;
358 	uint64_t	ift_map_failed;
359 	uint64_t	ift_txd_encap_efbig;
360 	uint64_t	ift_pullups;
361 	uint64_t	ift_last_timer_tick;
362 
363 	struct mtx	ift_mtx;
364 	struct mtx	ift_db_mtx;
365 
366 	/* constant values */
367 	if_ctx_t	ift_ctx;
368 	struct ifmp_ring        *ift_br;
369 	struct grouptask	ift_task;
370 	qidx_t		ift_size;
371 	uint16_t	ift_id;
372 	struct callout	ift_timer;
373 
374 	if_txsd_vec_t	ift_sds;
375 	uint8_t		ift_qstatus;
376 	uint8_t		ift_closed;
377 	uint8_t		ift_update_freq;
378 	struct iflib_filter_info ift_filter_info;
379 	bus_dma_tag_t		ift_desc_tag;
380 	bus_dma_tag_t		ift_tso_desc_tag;
381 	iflib_dma_info_t	ift_ifdi;
382 #define MTX_NAME_LEN 16
383 	char                    ift_mtx_name[MTX_NAME_LEN];
384 	char                    ift_db_mtx_name[MTX_NAME_LEN];
385 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
386 #ifdef IFLIB_DIAGNOSTICS
387 	uint64_t ift_cpu_exec_count[256];
388 #endif
389 } __aligned(CACHE_LINE_SIZE);
390 
391 struct iflib_fl {
392 	qidx_t		ifl_cidx;
393 	qidx_t		ifl_pidx;
394 	qidx_t		ifl_credits;
395 	uint8_t		ifl_gen;
396 	uint8_t		ifl_rxd_size;
397 #if MEMORY_LOGGING
398 	uint64_t	ifl_m_enqueued;
399 	uint64_t	ifl_m_dequeued;
400 	uint64_t	ifl_cl_enqueued;
401 	uint64_t	ifl_cl_dequeued;
402 #endif
403 	/* implicit pad */
404 
405 	bitstr_t 	*ifl_rx_bitmap;
406 	qidx_t		ifl_fragidx;
407 	/* constant */
408 	qidx_t		ifl_size;
409 	uint16_t	ifl_buf_size;
410 	uint16_t	ifl_cltype;
411 	uma_zone_t	ifl_zone;
412 	iflib_rxsd_array_t	ifl_sds;
413 	iflib_rxq_t	ifl_rxq;
414 	uint8_t		ifl_id;
415 	bus_dma_tag_t           ifl_desc_tag;
416 	iflib_dma_info_t	ifl_ifdi;
417 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
418 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
419 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
420 }  __aligned(CACHE_LINE_SIZE);
421 
422 static inline qidx_t
423 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
424 {
425 	qidx_t used;
426 
427 	if (pidx > cidx)
428 		used = pidx - cidx;
429 	else if (pidx < cidx)
430 		used = size - cidx + pidx;
431 	else if (gen == 0 && pidx == cidx)
432 		used = 0;
433 	else if (gen == 1 && pidx == cidx)
434 		used = size;
435 	else
436 		panic("bad state");
437 
438 	return (used);
439 }
440 
441 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
442 
443 #define IDXDIFF(head, tail, wrap) \
444 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
445 
446 struct iflib_rxq {
447 	/* If there is a separate completion queue -
448 	 * these are the cq cidx and pidx. Otherwise
449 	 * these are unused.
450 	 */
451 	qidx_t		ifr_size;
452 	qidx_t		ifr_cq_cidx;
453 	qidx_t		ifr_cq_pidx;
454 	uint8_t		ifr_cq_gen;
455 	uint8_t		ifr_fl_offset;
456 
457 	if_ctx_t	ifr_ctx;
458 	iflib_fl_t	ifr_fl;
459 	uint64_t	ifr_rx_irq;
460 	uint16_t	ifr_id;
461 	uint8_t		ifr_lro_enabled;
462 	uint8_t		ifr_nfl;
463 	uint8_t		ifr_ntxqirq;
464 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
465 	struct lro_ctrl			ifr_lc;
466 	struct grouptask        ifr_task;
467 	struct iflib_filter_info ifr_filter_info;
468 	iflib_dma_info_t		ifr_ifdi;
469 
470 	/* dynamically allocate if any drivers need a value substantially larger than this */
471 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
472 #ifdef IFLIB_DIAGNOSTICS
473 	uint64_t ifr_cpu_exec_count[256];
474 #endif
475 }  __aligned(CACHE_LINE_SIZE);
476 
477 typedef struct if_rxsd {
478 	caddr_t *ifsd_cl;
479 	struct mbuf **ifsd_m;
480 	iflib_fl_t ifsd_fl;
481 	qidx_t ifsd_cidx;
482 } *if_rxsd_t;
483 
484 /* multiple of word size */
485 #ifdef __LP64__
486 #define PKT_INFO_SIZE	6
487 #define RXD_INFO_SIZE	5
488 #define PKT_TYPE uint64_t
489 #else
490 #define PKT_INFO_SIZE	11
491 #define RXD_INFO_SIZE	8
492 #define PKT_TYPE uint32_t
493 #endif
494 #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
495 #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
496 
497 typedef struct if_pkt_info_pad {
498 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
499 } *if_pkt_info_pad_t;
500 typedef struct if_rxd_info_pad {
501 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
502 } *if_rxd_info_pad_t;
503 
504 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
505 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
506 
507 
508 static inline void
509 pkt_info_zero(if_pkt_info_t pi)
510 {
511 	if_pkt_info_pad_t pi_pad;
512 
513 	pi_pad = (if_pkt_info_pad_t)pi;
514 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
515 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
516 #ifndef __LP64__
517 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
518 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
519 #endif
520 }
521 
522 static device_method_t iflib_pseudo_methods[] = {
523 	DEVMETHOD(device_attach, noop_attach),
524 	DEVMETHOD(device_detach, iflib_pseudo_detach),
525 	DEVMETHOD_END
526 };
527 
528 driver_t iflib_pseudodriver = {
529 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
530 };
531 
532 static inline void
533 rxd_info_zero(if_rxd_info_t ri)
534 {
535 	if_rxd_info_pad_t ri_pad;
536 	int i;
537 
538 	ri_pad = (if_rxd_info_pad_t)ri;
539 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
540 		ri_pad->rxd_val[i] = 0;
541 		ri_pad->rxd_val[i+1] = 0;
542 		ri_pad->rxd_val[i+2] = 0;
543 		ri_pad->rxd_val[i+3] = 0;
544 	}
545 #ifdef __LP64__
546 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
547 #endif
548 }
549 
550 /*
551  * Only allow a single packet to take up most 1/nth of the tx ring
552  */
553 #define MAX_SINGLE_PACKET_FRACTION 12
554 #define IF_BAD_DMA (bus_addr_t)-1
555 
556 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
557 
558 #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
559 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
560 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
561 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
562 
563 
564 #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
565 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
566 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
567 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
568 
569 
570 
571 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
572 #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
573 
574 
575 /* Our boot-time initialization hook */
576 static int	iflib_module_event_handler(module_t, int, void *);
577 
578 static moduledata_t iflib_moduledata = {
579 	"iflib",
580 	iflib_module_event_handler,
581 	NULL
582 };
583 
584 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
585 MODULE_VERSION(iflib, 1);
586 
587 MODULE_DEPEND(iflib, pci, 1, 1, 1);
588 MODULE_DEPEND(iflib, ether, 1, 1, 1);
589 
590 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
591 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
592 
593 #ifndef IFLIB_DEBUG_COUNTERS
594 #ifdef INVARIANTS
595 #define IFLIB_DEBUG_COUNTERS 1
596 #else
597 #define IFLIB_DEBUG_COUNTERS 0
598 #endif /* !INVARIANTS */
599 #endif
600 
601 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
602                    "iflib driver parameters");
603 
604 /*
605  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
606  */
607 static int iflib_min_tx_latency = 0;
608 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
609 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
610 static int iflib_no_tx_batch = 0;
611 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
612 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
613 
614 
615 #if IFLIB_DEBUG_COUNTERS
616 
617 static int iflib_tx_seen;
618 static int iflib_tx_sent;
619 static int iflib_tx_encap;
620 static int iflib_rx_allocs;
621 static int iflib_fl_refills;
622 static int iflib_fl_refills_large;
623 static int iflib_tx_frees;
624 
625 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
626 		   &iflib_tx_seen, 0, "# tx mbufs seen");
627 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
628 		   &iflib_tx_sent, 0, "# tx mbufs sent");
629 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
630 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
631 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
632 		   &iflib_tx_frees, 0, "# tx frees");
633 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
634 		   &iflib_rx_allocs, 0, "# rx allocations");
635 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
636 		   &iflib_fl_refills, 0, "# refills");
637 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
638 		   &iflib_fl_refills_large, 0, "# large refills");
639 
640 
641 static int iflib_txq_drain_flushing;
642 static int iflib_txq_drain_oactive;
643 static int iflib_txq_drain_notready;
644 static int iflib_txq_drain_encapfail;
645 
646 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
647 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
648 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
649 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
650 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
651 		   &iflib_txq_drain_notready, 0, "# drain notready");
652 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
653 		   &iflib_txq_drain_encapfail, 0, "# drain encap fails");
654 
655 
656 static int iflib_encap_load_mbuf_fail;
657 static int iflib_encap_pad_mbuf_fail;
658 static int iflib_encap_txq_avail_fail;
659 static int iflib_encap_txd_encap_fail;
660 
661 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
662 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
663 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
664 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
665 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
666 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
667 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
668 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
669 
670 static int iflib_task_fn_rxs;
671 static int iflib_rx_intr_enables;
672 static int iflib_fast_intrs;
673 static int iflib_intr_link;
674 static int iflib_intr_msix;
675 static int iflib_rx_unavail;
676 static int iflib_rx_ctx_inactive;
677 static int iflib_rx_zero_len;
678 static int iflib_rx_if_input;
679 static int iflib_rx_mbuf_null;
680 static int iflib_rxd_flush;
681 
682 static int iflib_verbose_debug;
683 
684 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
685 		   &iflib_intr_link, 0, "# intr link calls");
686 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
687 		   &iflib_intr_msix, 0, "# intr msix calls");
688 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
689 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
690 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
691 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
692 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
693 		   &iflib_fast_intrs, 0, "# fast_intr calls");
694 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
695 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
696 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
697 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
698 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
699 		   &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
700 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
701 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
702 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
703 		   &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
704 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
705 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
706 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
707 		   &iflib_verbose_debug, 0, "enable verbose debugging");
708 
709 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
710 static void
711 iflib_debug_reset(void)
712 {
713 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
714 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
715 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
716 		iflib_txq_drain_notready = iflib_txq_drain_encapfail =
717 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
718 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
719 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
720 		iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
721 		iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
722 		iflib_rx_mbuf_null = iflib_rxd_flush = 0;
723 }
724 
725 #else
726 #define DBG_COUNTER_INC(name)
727 static void iflib_debug_reset(void) {}
728 #endif
729 
730 #define IFLIB_DEBUG 0
731 
732 static void iflib_tx_structures_free(if_ctx_t ctx);
733 static void iflib_rx_structures_free(if_ctx_t ctx);
734 static int iflib_queues_alloc(if_ctx_t ctx);
735 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
736 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
737 static int iflib_qset_structures_setup(if_ctx_t ctx);
738 static int iflib_msix_init(if_ctx_t ctx);
739 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
740 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
741 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
742 static int iflib_register(if_ctx_t);
743 static void iflib_init_locked(if_ctx_t ctx);
744 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
745 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
746 static void iflib_ifmp_purge(iflib_txq_t txq);
747 static void _iflib_pre_assert(if_softc_ctx_t scctx);
748 static void iflib_if_init_locked(if_ctx_t ctx);
749 #ifndef __NO_STRICT_ALIGNMENT
750 static struct mbuf * iflib_fixup_rx(struct mbuf *m);
751 #endif
752 
753 NETDUMP_DEFINE(iflib);
754 
755 #ifdef DEV_NETMAP
756 #include <sys/selinfo.h>
757 #include <net/netmap.h>
758 #include <dev/netmap/netmap_kern.h>
759 
760 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
761 
762 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
763 
764 /*
765  * device-specific sysctl variables:
766  *
767  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
768  *	During regular operations the CRC is stripped, but on some
769  *	hardware reception of frames not multiple of 64 is slower,
770  *	so using crcstrip=0 helps in benchmarks.
771  *
772  * iflib_rx_miss, iflib_rx_miss_bufs:
773  *	count packets that might be missed due to lost interrupts.
774  */
775 SYSCTL_DECL(_dev_netmap);
776 /*
777  * The xl driver by default strips CRCs and we do not override it.
778  */
779 
780 int iflib_crcstrip = 1;
781 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
782     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
783 
784 int iflib_rx_miss, iflib_rx_miss_bufs;
785 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
786     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
787 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
788     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
789 
790 /*
791  * Register/unregister. We are already under netmap lock.
792  * Only called on the first register or the last unregister.
793  */
794 static int
795 iflib_netmap_register(struct netmap_adapter *na, int onoff)
796 {
797 	struct ifnet *ifp = na->ifp;
798 	if_ctx_t ctx = ifp->if_softc;
799 	int status;
800 
801 	CTX_LOCK(ctx);
802 	IFDI_INTR_DISABLE(ctx);
803 
804 	/* Tell the stack that the interface is no longer active */
805 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
806 
807 	if (!CTX_IS_VF(ctx))
808 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
809 
810 	/* enable or disable flags and callbacks in na and ifp */
811 	if (onoff) {
812 		nm_set_native_flags(na);
813 	} else {
814 		nm_clear_native_flags(na);
815 	}
816 	iflib_stop(ctx);
817 	iflib_init_locked(ctx);
818 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
819 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
820 	if (status)
821 		nm_clear_native_flags(na);
822 	CTX_UNLOCK(ctx);
823 	return (status);
824 }
825 
826 static int
827 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
828 {
829 	struct netmap_adapter *na = kring->na;
830 	u_int const lim = kring->nkr_num_slots - 1;
831 	u_int head = kring->rhead;
832 	struct netmap_ring *ring = kring->ring;
833 	bus_dmamap_t *map;
834 	struct if_rxd_update iru;
835 	if_ctx_t ctx = rxq->ifr_ctx;
836 	iflib_fl_t fl = &rxq->ifr_fl[0];
837 	uint32_t refill_pidx, nic_i;
838 
839 	if (nm_i == head && __predict_true(!init))
840 		return 0;
841 	iru_init(&iru, rxq, 0 /* flid */);
842 	map = fl->ifl_sds.ifsd_map;
843 	refill_pidx = netmap_idx_k2n(kring, nm_i);
844 	/*
845 	 * IMPORTANT: we must leave one free slot in the ring,
846 	 * so move head back by one unit
847 	 */
848 	head = nm_prev(head, lim);
849 	nic_i = UINT_MAX;
850 	while (nm_i != head) {
851 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
852 			struct netmap_slot *slot = &ring->slot[nm_i];
853 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
854 			uint32_t nic_i_dma = refill_pidx;
855 			nic_i = netmap_idx_k2n(kring, nm_i);
856 
857 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
858 
859 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
860 			        return netmap_ring_reinit(kring);
861 
862 			fl->ifl_vm_addrs[tmp_pidx] = addr;
863 			if (__predict_false(init) && map) {
864 				netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
865 			} else if (map && (slot->flags & NS_BUF_CHANGED)) {
866 				/* buffer has changed, reload map */
867 				netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
868 			}
869 			slot->flags &= ~NS_BUF_CHANGED;
870 
871 			nm_i = nm_next(nm_i, lim);
872 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
873 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
874 				continue;
875 
876 			iru.iru_pidx = refill_pidx;
877 			iru.iru_count = tmp_pidx+1;
878 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
879 
880 			refill_pidx = nic_i;
881 			if (map == NULL)
882 				continue;
883 
884 			for (int n = 0; n < iru.iru_count; n++) {
885 				bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
886 						BUS_DMASYNC_PREREAD);
887 				/* XXX - change this to not use the netmap func*/
888 				nic_i_dma = nm_next(nic_i_dma, lim);
889 			}
890 		}
891 	}
892 	kring->nr_hwcur = head;
893 
894 	if (map)
895 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
896 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
897 	if (__predict_true(nic_i != UINT_MAX))
898 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
899 	return (0);
900 }
901 
902 /*
903  * Reconcile kernel and user view of the transmit ring.
904  *
905  * All information is in the kring.
906  * Userspace wants to send packets up to the one before kring->rhead,
907  * kernel knows kring->nr_hwcur is the first unsent packet.
908  *
909  * Here we push packets out (as many as possible), and possibly
910  * reclaim buffers from previously completed transmission.
911  *
912  * The caller (netmap) guarantees that there is only one instance
913  * running at any time. Any interference with other driver
914  * methods should be handled by the individual drivers.
915  */
916 static int
917 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
918 {
919 	struct netmap_adapter *na = kring->na;
920 	struct ifnet *ifp = na->ifp;
921 	struct netmap_ring *ring = kring->ring;
922 	u_int nm_i;	/* index into the netmap kring */
923 	u_int nic_i;	/* index into the NIC ring */
924 	u_int n;
925 	u_int const lim = kring->nkr_num_slots - 1;
926 	u_int const head = kring->rhead;
927 	struct if_pkt_info pi;
928 
929 	/*
930 	 * interrupts on every tx packet are expensive so request
931 	 * them every half ring, or where NS_REPORT is set
932 	 */
933 	u_int report_frequency = kring->nkr_num_slots >> 1;
934 	/* device-specific */
935 	if_ctx_t ctx = ifp->if_softc;
936 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
937 
938 	if (txq->ift_sds.ifsd_map)
939 		bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
940 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
941 
942 
943 	/*
944 	 * First part: process new packets to send.
945 	 * nm_i is the current index in the netmap kring,
946 	 * nic_i is the corresponding index in the NIC ring.
947 	 *
948 	 * If we have packets to send (nm_i != head)
949 	 * iterate over the netmap ring, fetch length and update
950 	 * the corresponding slot in the NIC ring. Some drivers also
951 	 * need to update the buffer's physical address in the NIC slot
952 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
953 	 *
954 	 * The netmap_reload_map() calls is especially expensive,
955 	 * even when (as in this case) the tag is 0, so do only
956 	 * when the buffer has actually changed.
957 	 *
958 	 * If possible do not set the report/intr bit on all slots,
959 	 * but only a few times per ring or when NS_REPORT is set.
960 	 *
961 	 * Finally, on 10G and faster drivers, it might be useful
962 	 * to prefetch the next slot and txr entry.
963 	 */
964 
965 	nm_i = kring->nr_hwcur;
966 	if (nm_i != head) {	/* we have new packets to send */
967 		pkt_info_zero(&pi);
968 		pi.ipi_segs = txq->ift_segs;
969 		pi.ipi_qsidx = kring->ring_id;
970 		nic_i = netmap_idx_k2n(kring, nm_i);
971 
972 		__builtin_prefetch(&ring->slot[nm_i]);
973 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
974 		if (txq->ift_sds.ifsd_map)
975 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
976 
977 		for (n = 0; nm_i != head; n++) {
978 			struct netmap_slot *slot = &ring->slot[nm_i];
979 			u_int len = slot->len;
980 			uint64_t paddr;
981 			void *addr = PNMB(na, slot, &paddr);
982 			int flags = (slot->flags & NS_REPORT ||
983 				nic_i == 0 || nic_i == report_frequency) ?
984 				IPI_TX_INTR : 0;
985 
986 			/* device-specific */
987 			pi.ipi_len = len;
988 			pi.ipi_segs[0].ds_addr = paddr;
989 			pi.ipi_segs[0].ds_len = len;
990 			pi.ipi_nsegs = 1;
991 			pi.ipi_ndescs = 0;
992 			pi.ipi_pidx = nic_i;
993 			pi.ipi_flags = flags;
994 
995 			/* Fill the slot in the NIC ring. */
996 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
997 
998 			/* prefetch for next round */
999 			__builtin_prefetch(&ring->slot[nm_i + 1]);
1000 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1001 			if (txq->ift_sds.ifsd_map) {
1002 				__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1003 
1004 				NM_CHECK_ADDR_LEN(na, addr, len);
1005 
1006 				if (slot->flags & NS_BUF_CHANGED) {
1007 					/* buffer has changed, reload map */
1008 					netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
1009 				}
1010 				/* make sure changes to the buffer are synced */
1011 				bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
1012 						BUS_DMASYNC_PREWRITE);
1013 			}
1014 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
1015 			nm_i = nm_next(nm_i, lim);
1016 			nic_i = nm_next(nic_i, lim);
1017 		}
1018 		kring->nr_hwcur = nm_i;
1019 
1020 		/* synchronize the NIC ring */
1021 		if (txq->ift_sds.ifsd_map)
1022 			bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
1023 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1024 
1025 		/* (re)start the tx unit up to slot nic_i (excluded) */
1026 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1027 	}
1028 
1029 	/*
1030 	 * Second part: reclaim buffers for completed transmissions.
1031 	 *
1032 	 * If there are unclaimed buffers, attempt to reclaim them.
1033 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
1034 	 * minimal delay, then trigger the tx handler which will spin in the
1035 	 * group task queue.
1036 	 */
1037 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1038 		if (iflib_tx_credits_update(ctx, txq)) {
1039 			/* some tx completed, increment avail */
1040 			nic_i = txq->ift_cidx_processed;
1041 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1042 		}
1043 	}
1044 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1045 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1046 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1047 			    iflib_timer, txq, txq->ift_timer.c_cpu);
1048 	}
1049 	return (0);
1050 }
1051 
1052 /*
1053  * Reconcile kernel and user view of the receive ring.
1054  * Same as for the txsync, this routine must be efficient.
1055  * The caller guarantees a single invocations, but races against
1056  * the rest of the driver should be handled here.
1057  *
1058  * On call, kring->rhead is the first packet that userspace wants
1059  * to keep, and kring->rcur is the wakeup point.
1060  * The kernel has previously reported packets up to kring->rtail.
1061  *
1062  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1063  * of whether or not we received an interrupt.
1064  */
1065 static int
1066 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1067 {
1068 	struct netmap_adapter *na = kring->na;
1069 	struct netmap_ring *ring = kring->ring;
1070 	uint32_t nm_i;	/* index into the netmap ring */
1071 	uint32_t nic_i;	/* index into the NIC ring */
1072 	u_int i, n;
1073 	u_int const lim = kring->nkr_num_slots - 1;
1074 	u_int const head = kring->rhead;
1075 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1076 	struct if_rxd_info ri;
1077 
1078 	struct ifnet *ifp = na->ifp;
1079 	if_ctx_t ctx = ifp->if_softc;
1080 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1081 	iflib_fl_t fl = rxq->ifr_fl;
1082 	if (head > lim)
1083 		return netmap_ring_reinit(kring);
1084 
1085 	/* XXX check sync modes */
1086 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
1087 		if (fl->ifl_sds.ifsd_map == NULL)
1088 			continue;
1089 		bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
1090 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1091 	}
1092 	/*
1093 	 * First part: import newly received packets.
1094 	 *
1095 	 * nm_i is the index of the next free slot in the netmap ring,
1096 	 * nic_i is the index of the next received packet in the NIC ring,
1097 	 * and they may differ in case if_init() has been called while
1098 	 * in netmap mode. For the receive ring we have
1099 	 *
1100 	 *	nic_i = rxr->next_check;
1101 	 *	nm_i = kring->nr_hwtail (previous)
1102 	 * and
1103 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1104 	 *
1105 	 * rxr->next_check is set to 0 on a ring reinit
1106 	 */
1107 	if (netmap_no_pendintr || force_update) {
1108 		int crclen = iflib_crcstrip ? 0 : 4;
1109 		int error, avail;
1110 
1111 		for (i = 0; i < rxq->ifr_nfl; i++) {
1112 			fl = &rxq->ifr_fl[i];
1113 			nic_i = fl->ifl_cidx;
1114 			nm_i = netmap_idx_n2k(kring, nic_i);
1115 			avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
1116 			for (n = 0; avail > 0; n++, avail--) {
1117 				rxd_info_zero(&ri);
1118 				ri.iri_frags = rxq->ifr_frags;
1119 				ri.iri_qsidx = kring->ring_id;
1120 				ri.iri_ifp = ctx->ifc_ifp;
1121 				ri.iri_cidx = nic_i;
1122 
1123 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1124 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
1125 				ring->slot[nm_i].flags = 0;
1126 				if (fl->ifl_sds.ifsd_map)
1127 					bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
1128 							fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1129 				nm_i = nm_next(nm_i, lim);
1130 				nic_i = nm_next(nic_i, lim);
1131 			}
1132 			if (n) { /* update the state variables */
1133 				if (netmap_no_pendintr && !force_update) {
1134 					/* diagnostics */
1135 					iflib_rx_miss ++;
1136 					iflib_rx_miss_bufs += n;
1137 				}
1138 				fl->ifl_cidx = nic_i;
1139 				kring->nr_hwtail = nm_i;
1140 			}
1141 			kring->nr_kflags &= ~NKR_PENDINTR;
1142 		}
1143 	}
1144 	/*
1145 	 * Second part: skip past packets that userspace has released.
1146 	 * (kring->nr_hwcur to head excluded),
1147 	 * and make the buffers available for reception.
1148 	 * As usual nm_i is the index in the netmap ring,
1149 	 * nic_i is the index in the NIC ring, and
1150 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1151 	 */
1152 	/* XXX not sure how this will work with multiple free lists */
1153 	nm_i = kring->nr_hwcur;
1154 
1155 	return (netmap_fl_refill(rxq, kring, nm_i, false));
1156 }
1157 
1158 static void
1159 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1160 {
1161 	struct ifnet *ifp = na->ifp;
1162 	if_ctx_t ctx = ifp->if_softc;
1163 
1164 	CTX_LOCK(ctx);
1165 	if (onoff) {
1166 		IFDI_INTR_ENABLE(ctx);
1167 	} else {
1168 		IFDI_INTR_DISABLE(ctx);
1169 	}
1170 	CTX_UNLOCK(ctx);
1171 }
1172 
1173 
1174 static int
1175 iflib_netmap_attach(if_ctx_t ctx)
1176 {
1177 	struct netmap_adapter na;
1178 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1179 
1180 	bzero(&na, sizeof(na));
1181 
1182 	na.ifp = ctx->ifc_ifp;
1183 	na.na_flags = NAF_BDG_MAYSLEEP;
1184 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1185 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1186 
1187 	na.num_tx_desc = scctx->isc_ntxd[0];
1188 	na.num_rx_desc = scctx->isc_nrxd[0];
1189 	na.nm_txsync = iflib_netmap_txsync;
1190 	na.nm_rxsync = iflib_netmap_rxsync;
1191 	na.nm_register = iflib_netmap_register;
1192 	na.nm_intr = iflib_netmap_intr;
1193 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1194 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1195 	return (netmap_attach(&na));
1196 }
1197 
1198 static void
1199 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1200 {
1201 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1202 	struct netmap_slot *slot;
1203 
1204 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1205 	if (slot == NULL)
1206 		return;
1207 	if (txq->ift_sds.ifsd_map == NULL)
1208 		return;
1209 
1210 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1211 
1212 		/*
1213 		 * In netmap mode, set the map for the packet buffer.
1214 		 * NOTE: Some drivers (not this one) also need to set
1215 		 * the physical buffer address in the NIC ring.
1216 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1217 		 * netmap slot index, si
1218 		 */
1219 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1220 		netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
1221 	}
1222 }
1223 
1224 static void
1225 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1226 {
1227 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1228 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
1229 	struct netmap_slot *slot;
1230 	uint32_t nm_i;
1231 
1232 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1233 	if (slot == NULL)
1234 		return;
1235 	nm_i = netmap_idx_n2k(kring, 0);
1236 	netmap_fl_refill(rxq, kring, nm_i, true);
1237 }
1238 
1239 static void
1240 iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on)
1241 {
1242 	struct netmap_kring *kring;
1243 
1244 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1245 
1246 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
1247 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1248 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1249 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1250 			if (hz < 2000)
1251 				*reset_on = 1;
1252 			else
1253 				*reset_on = hz / 1000;
1254 		}
1255 	}
1256 }
1257 
1258 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1259 
1260 #else
1261 #define iflib_netmap_txq_init(ctx, txq)
1262 #define iflib_netmap_rxq_init(ctx, rxq)
1263 #define iflib_netmap_detach(ifp)
1264 
1265 #define iflib_netmap_attach(ctx) (0)
1266 #define netmap_rx_irq(ifp, qid, budget) (0)
1267 #define netmap_tx_irq(ifp, qid) do {} while (0)
1268 #define iflib_netmap_timer_adjust(ctx, txqid, reset_on)
1269 
1270 #endif
1271 
1272 #if defined(__i386__) || defined(__amd64__)
1273 static __inline void
1274 prefetch(void *x)
1275 {
1276 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1277 }
1278 static __inline void
1279 prefetch2cachelines(void *x)
1280 {
1281 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1282 #if (CACHE_LINE_SIZE < 128)
1283 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
1284 #endif
1285 }
1286 #else
1287 #define prefetch(x)
1288 #define prefetch2cachelines(x)
1289 #endif
1290 
1291 static void
1292 iflib_gen_mac(if_ctx_t ctx)
1293 {
1294 	struct thread *td;
1295 	MD5_CTX mdctx;
1296 	char uuid[HOSTUUIDLEN+1];
1297 	char buf[HOSTUUIDLEN+16];
1298 	uint8_t *mac;
1299 	unsigned char digest[16];
1300 
1301 	td = curthread;
1302 	mac = ctx->ifc_mac;
1303 	uuid[HOSTUUIDLEN] = 0;
1304 	bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
1305 	snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
1306 	/*
1307 	 * Generate a pseudo-random, deterministic MAC
1308 	 * address based on the UUID and unit number.
1309 	 * The FreeBSD Foundation OUI of 58-9C-FC is used.
1310 	 */
1311 	MD5Init(&mdctx);
1312 	MD5Update(&mdctx, buf, strlen(buf));
1313 	MD5Final(digest, &mdctx);
1314 
1315 	mac[0] = 0x58;
1316 	mac[1] = 0x9C;
1317 	mac[2] = 0xFC;
1318 	mac[3] = digest[0];
1319 	mac[4] = digest[1];
1320 	mac[5] = digest[2];
1321 }
1322 
1323 static void
1324 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1325 {
1326 	iflib_fl_t fl;
1327 
1328 	fl = &rxq->ifr_fl[flid];
1329 	iru->iru_paddrs = fl->ifl_bus_addrs;
1330 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
1331 	iru->iru_idxs = fl->ifl_rxd_idxs;
1332 	iru->iru_qsidx = rxq->ifr_id;
1333 	iru->iru_buf_size = fl->ifl_buf_size;
1334 	iru->iru_flidx = fl->ifl_id;
1335 }
1336 
1337 static void
1338 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1339 {
1340 	if (err)
1341 		return;
1342 	*(bus_addr_t *) arg = segs[0].ds_addr;
1343 }
1344 
1345 int
1346 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1347 {
1348 	int err;
1349 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1350 	device_t dev = ctx->ifc_dev;
1351 
1352 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1353 
1354 	err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1355 				sctx->isc_q_align, 0,	/* alignment, bounds */
1356 				BUS_SPACE_MAXADDR,	/* lowaddr */
1357 				BUS_SPACE_MAXADDR,	/* highaddr */
1358 				NULL, NULL,		/* filter, filterarg */
1359 				size,			/* maxsize */
1360 				1,			/* nsegments */
1361 				size,			/* maxsegsize */
1362 				BUS_DMA_ALLOCNOW,	/* flags */
1363 				NULL,			/* lockfunc */
1364 				NULL,			/* lockarg */
1365 				&dma->idi_tag);
1366 	if (err) {
1367 		device_printf(dev,
1368 		    "%s: bus_dma_tag_create failed: %d\n",
1369 		    __func__, err);
1370 		goto fail_0;
1371 	}
1372 
1373 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
1374 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1375 	if (err) {
1376 		device_printf(dev,
1377 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1378 		    __func__, (uintmax_t)size, err);
1379 		goto fail_1;
1380 	}
1381 
1382 	dma->idi_paddr = IF_BAD_DMA;
1383 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1384 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1385 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1386 		device_printf(dev,
1387 		    "%s: bus_dmamap_load failed: %d\n",
1388 		    __func__, err);
1389 		goto fail_2;
1390 	}
1391 
1392 	dma->idi_size = size;
1393 	return (0);
1394 
1395 fail_2:
1396 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1397 fail_1:
1398 	bus_dma_tag_destroy(dma->idi_tag);
1399 fail_0:
1400 	dma->idi_tag = NULL;
1401 
1402 	return (err);
1403 }
1404 
1405 int
1406 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1407 {
1408 	int i, err;
1409 	iflib_dma_info_t *dmaiter;
1410 
1411 	dmaiter = dmalist;
1412 	for (i = 0; i < count; i++, dmaiter++) {
1413 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1414 			break;
1415 	}
1416 	if (err)
1417 		iflib_dma_free_multi(dmalist, i);
1418 	return (err);
1419 }
1420 
1421 void
1422 iflib_dma_free(iflib_dma_info_t dma)
1423 {
1424 	if (dma->idi_tag == NULL)
1425 		return;
1426 	if (dma->idi_paddr != IF_BAD_DMA) {
1427 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1428 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1429 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1430 		dma->idi_paddr = IF_BAD_DMA;
1431 	}
1432 	if (dma->idi_vaddr != NULL) {
1433 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1434 		dma->idi_vaddr = NULL;
1435 	}
1436 	bus_dma_tag_destroy(dma->idi_tag);
1437 	dma->idi_tag = NULL;
1438 }
1439 
1440 void
1441 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1442 {
1443 	int i;
1444 	iflib_dma_info_t *dmaiter = dmalist;
1445 
1446 	for (i = 0; i < count; i++, dmaiter++)
1447 		iflib_dma_free(*dmaiter);
1448 }
1449 
1450 #ifdef EARLY_AP_STARTUP
1451 static const int iflib_started = 1;
1452 #else
1453 /*
1454  * We used to abuse the smp_started flag to decide if the queues have been
1455  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1456  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1457  * is set.  Run a SYSINIT() strictly after that to just set a usable
1458  * completion flag.
1459  */
1460 
1461 static int iflib_started;
1462 
1463 static void
1464 iflib_record_started(void *arg)
1465 {
1466 	iflib_started = 1;
1467 }
1468 
1469 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1470 	iflib_record_started, NULL);
1471 #endif
1472 
1473 static int
1474 iflib_fast_intr(void *arg)
1475 {
1476 	iflib_filter_info_t info = arg;
1477 	struct grouptask *gtask = info->ifi_task;
1478 	if (!iflib_started)
1479 		return (FILTER_HANDLED);
1480 
1481 	DBG_COUNTER_INC(fast_intrs);
1482 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1483 		return (FILTER_HANDLED);
1484 
1485 	GROUPTASK_ENQUEUE(gtask);
1486 	return (FILTER_HANDLED);
1487 }
1488 
1489 static int
1490 iflib_fast_intr_rxtx(void *arg)
1491 {
1492 	iflib_filter_info_t info = arg;
1493 	struct grouptask *gtask = info->ifi_task;
1494 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1495 	if_ctx_t ctx = NULL;;
1496 	int i, cidx;
1497 
1498 	if (!iflib_started)
1499 		return (FILTER_HANDLED);
1500 
1501 	DBG_COUNTER_INC(fast_intrs);
1502 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1503 		return (FILTER_HANDLED);
1504 
1505 	MPASS(rxq->ifr_ntxqirq);
1506 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1507 		qidx_t txqid = rxq->ifr_txqid[i];
1508 
1509 		ctx = rxq->ifr_ctx;
1510 
1511 		if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
1512 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1513 			continue;
1514 		}
1515 		GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
1516 	}
1517 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1518 		cidx = rxq->ifr_cq_cidx;
1519 	else
1520 		cidx = rxq->ifr_fl[0].ifl_cidx;
1521 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1522 		GROUPTASK_ENQUEUE(gtask);
1523 	else
1524 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1525 	return (FILTER_HANDLED);
1526 }
1527 
1528 
1529 static int
1530 iflib_fast_intr_ctx(void *arg)
1531 {
1532 	iflib_filter_info_t info = arg;
1533 	struct grouptask *gtask = info->ifi_task;
1534 
1535 	if (!iflib_started)
1536 		return (FILTER_HANDLED);
1537 
1538 	DBG_COUNTER_INC(fast_intrs);
1539 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1540 		return (FILTER_HANDLED);
1541 
1542 	GROUPTASK_ENQUEUE(gtask);
1543 	return (FILTER_HANDLED);
1544 }
1545 
1546 static int
1547 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1548 		 driver_filter_t filter, driver_intr_t handler, void *arg,
1549 		 const char *name)
1550 {
1551 	int rc, flags;
1552 	struct resource *res;
1553 	void *tag = NULL;
1554 	device_t dev = ctx->ifc_dev;
1555 
1556 	flags = RF_ACTIVE;
1557 	if (ctx->ifc_flags & IFC_LEGACY)
1558 		flags |= RF_SHAREABLE;
1559 	MPASS(rid < 512);
1560 	irq->ii_rid = rid;
1561 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
1562 	if (res == NULL) {
1563 		device_printf(dev,
1564 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1565 		return (ENOMEM);
1566 	}
1567 	irq->ii_res = res;
1568 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1569 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1570 						filter, handler, arg, &tag);
1571 	if (rc != 0) {
1572 		device_printf(dev,
1573 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1574 					  rid, name ? name : "unknown", rc);
1575 		return (rc);
1576 	} else if (name)
1577 		bus_describe_intr(dev, res, tag, "%s", name);
1578 
1579 	irq->ii_tag = tag;
1580 	return (0);
1581 }
1582 
1583 
1584 /*********************************************************************
1585  *
1586  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1587  *  the information needed to transmit a packet on the wire. This is
1588  *  called only once at attach, setup is done every reset.
1589  *
1590  **********************************************************************/
1591 
1592 static int
1593 iflib_txsd_alloc(iflib_txq_t txq)
1594 {
1595 	if_ctx_t ctx = txq->ift_ctx;
1596 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1597 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1598 	device_t dev = ctx->ifc_dev;
1599 	bus_size_t tsomaxsize;
1600 	int err, nsegments, ntsosegments;
1601 
1602 	nsegments = scctx->isc_tx_nsegments;
1603 	ntsosegments = scctx->isc_tx_tso_segments_max;
1604 	tsomaxsize = scctx->isc_tx_tso_size_max;
1605 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1606 		tsomaxsize += sizeof(struct ether_vlan_header);
1607 	MPASS(scctx->isc_ntxd[0] > 0);
1608 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1609 	MPASS(nsegments > 0);
1610 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1611 		MPASS(ntsosegments > 0);
1612 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1613 	}
1614 
1615 	/*
1616 	 * Setup DMA descriptor areas.
1617 	 */
1618 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1619 			       1, 0,			/* alignment, bounds */
1620 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1621 			       BUS_SPACE_MAXADDR,	/* highaddr */
1622 			       NULL, NULL,		/* filter, filterarg */
1623 			       sctx->isc_tx_maxsize,		/* maxsize */
1624 			       nsegments,	/* nsegments */
1625 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
1626 			       0,			/* flags */
1627 			       NULL,			/* lockfunc */
1628 			       NULL,			/* lockfuncarg */
1629 			       &txq->ift_desc_tag))) {
1630 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
1631 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1632 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1633 		goto fail;
1634 	}
1635 	if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) &
1636 	    (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1637 			       1, 0,			/* alignment, bounds */
1638 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1639 			       BUS_SPACE_MAXADDR,	/* highaddr */
1640 			       NULL, NULL,		/* filter, filterarg */
1641 			       tsomaxsize,		/* maxsize */
1642 			       ntsosegments,	/* nsegments */
1643 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
1644 			       0,			/* flags */
1645 			       NULL,			/* lockfunc */
1646 			       NULL,			/* lockfuncarg */
1647 			       &txq->ift_tso_desc_tag))) {
1648 		device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
1649 
1650 		goto fail;
1651 	}
1652 	if (!(txq->ift_sds.ifsd_flags =
1653 	    (uint8_t *) malloc(sizeof(uint8_t) *
1654 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1655 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1656 		err = ENOMEM;
1657 		goto fail;
1658 	}
1659 	if (!(txq->ift_sds.ifsd_m =
1660 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1661 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1662 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1663 		err = ENOMEM;
1664 		goto fail;
1665 	}
1666 
1667         /* Create the descriptor buffer dma maps */
1668 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1669 	if ((ctx->ifc_flags & IFC_DMAR) == 0)
1670 		return (0);
1671 
1672 	if (!(txq->ift_sds.ifsd_map =
1673 	    (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1674 		device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1675 		err = ENOMEM;
1676 		goto fail;
1677 	}
1678 
1679 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1680 		err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
1681 		if (err != 0) {
1682 			device_printf(dev, "Unable to create TX DMA map\n");
1683 			goto fail;
1684 		}
1685 	}
1686 #endif
1687 	return (0);
1688 fail:
1689 	/* We free all, it handles case where we are in the middle */
1690 	iflib_tx_structures_free(ctx);
1691 	return (err);
1692 }
1693 
1694 static void
1695 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1696 {
1697 	bus_dmamap_t map;
1698 
1699 	map = NULL;
1700 	if (txq->ift_sds.ifsd_map != NULL)
1701 		map = txq->ift_sds.ifsd_map[i];
1702 	if (map != NULL) {
1703 		bus_dmamap_unload(txq->ift_desc_tag, map);
1704 		bus_dmamap_destroy(txq->ift_desc_tag, map);
1705 		txq->ift_sds.ifsd_map[i] = NULL;
1706 	}
1707 }
1708 
1709 static void
1710 iflib_txq_destroy(iflib_txq_t txq)
1711 {
1712 	if_ctx_t ctx = txq->ift_ctx;
1713 
1714 	for (int i = 0; i < txq->ift_size; i++)
1715 		iflib_txsd_destroy(ctx, txq, i);
1716 	if (txq->ift_sds.ifsd_map != NULL) {
1717 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1718 		txq->ift_sds.ifsd_map = NULL;
1719 	}
1720 	if (txq->ift_sds.ifsd_m != NULL) {
1721 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1722 		txq->ift_sds.ifsd_m = NULL;
1723 	}
1724 	if (txq->ift_sds.ifsd_flags != NULL) {
1725 		free(txq->ift_sds.ifsd_flags, M_IFLIB);
1726 		txq->ift_sds.ifsd_flags = NULL;
1727 	}
1728 	if (txq->ift_desc_tag != NULL) {
1729 		bus_dma_tag_destroy(txq->ift_desc_tag);
1730 		txq->ift_desc_tag = NULL;
1731 	}
1732 	if (txq->ift_tso_desc_tag != NULL) {
1733 		bus_dma_tag_destroy(txq->ift_tso_desc_tag);
1734 		txq->ift_tso_desc_tag = NULL;
1735 	}
1736 }
1737 
1738 static void
1739 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1740 {
1741 	struct mbuf **mp;
1742 
1743 	mp = &txq->ift_sds.ifsd_m[i];
1744 	if (*mp == NULL)
1745 		return;
1746 
1747 	if (txq->ift_sds.ifsd_map != NULL) {
1748 		bus_dmamap_sync(txq->ift_desc_tag,
1749 				txq->ift_sds.ifsd_map[i],
1750 				BUS_DMASYNC_POSTWRITE);
1751 		bus_dmamap_unload(txq->ift_desc_tag,
1752 				  txq->ift_sds.ifsd_map[i]);
1753 	}
1754 	m_free(*mp);
1755 	DBG_COUNTER_INC(tx_frees);
1756 	*mp = NULL;
1757 }
1758 
1759 static int
1760 iflib_txq_setup(iflib_txq_t txq)
1761 {
1762 	if_ctx_t ctx = txq->ift_ctx;
1763 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1764 	iflib_dma_info_t di;
1765 	int i;
1766 
1767 	/* Set number of descriptors available */
1768 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1769 	/* XXX make configurable */
1770 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1771 
1772 	/* Reset indices */
1773 	txq->ift_cidx_processed = 0;
1774 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1775 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1776 
1777 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1778 		bzero((void *)di->idi_vaddr, di->idi_size);
1779 
1780 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1781 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1782 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1783 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1784 	return (0);
1785 }
1786 
1787 /*********************************************************************
1788  *
1789  *  Allocate memory for rx_buffer structures. Since we use one
1790  *  rx_buffer per received packet, the maximum number of rx_buffer's
1791  *  that we'll need is equal to the number of receive descriptors
1792  *  that we've allocated.
1793  *
1794  **********************************************************************/
1795 static int
1796 iflib_rxsd_alloc(iflib_rxq_t rxq)
1797 {
1798 	if_ctx_t ctx = rxq->ifr_ctx;
1799 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1800 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1801 	device_t dev = ctx->ifc_dev;
1802 	iflib_fl_t fl;
1803 	int			err;
1804 
1805 	MPASS(scctx->isc_nrxd[0] > 0);
1806 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1807 
1808 	fl = rxq->ifr_fl;
1809 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
1810 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1811 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1812 					 1, 0,			/* alignment, bounds */
1813 					 BUS_SPACE_MAXADDR,	/* lowaddr */
1814 					 BUS_SPACE_MAXADDR,	/* highaddr */
1815 					 NULL, NULL,		/* filter, filterarg */
1816 					 sctx->isc_rx_maxsize,	/* maxsize */
1817 					 sctx->isc_rx_nsegments,	/* nsegments */
1818 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
1819 					 0,			/* flags */
1820 					 NULL,			/* lockfunc */
1821 					 NULL,			/* lockarg */
1822 					 &fl->ifl_desc_tag);
1823 		if (err) {
1824 			device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
1825 				__func__, err);
1826 			goto fail;
1827 		}
1828 		if (!(fl->ifl_sds.ifsd_flags =
1829 		      (uint8_t *) malloc(sizeof(uint8_t) *
1830 					 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1831 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1832 			err = ENOMEM;
1833 			goto fail;
1834 		}
1835 		if (!(fl->ifl_sds.ifsd_m =
1836 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1837 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1838 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1839 			err = ENOMEM;
1840 			goto fail;
1841 		}
1842 		if (!(fl->ifl_sds.ifsd_cl =
1843 		      (caddr_t *) malloc(sizeof(caddr_t) *
1844 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1845 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1846 			err = ENOMEM;
1847 			goto fail;
1848 		}
1849 
1850 		/* Create the descriptor buffer dma maps */
1851 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1852 		if ((ctx->ifc_flags & IFC_DMAR) == 0)
1853 			continue;
1854 
1855 		if (!(fl->ifl_sds.ifsd_map =
1856 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1857 			device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1858 			err = ENOMEM;
1859 			goto fail;
1860 		}
1861 
1862 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1863 			err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
1864 			if (err != 0) {
1865 				device_printf(dev, "Unable to create RX buffer DMA map\n");
1866 				goto fail;
1867 			}
1868 		}
1869 #endif
1870 	}
1871 	return (0);
1872 
1873 fail:
1874 	iflib_rx_structures_free(ctx);
1875 	return (err);
1876 }
1877 
1878 
1879 /*
1880  * Internal service routines
1881  */
1882 
1883 struct rxq_refill_cb_arg {
1884 	int               error;
1885 	bus_dma_segment_t seg;
1886 	int               nseg;
1887 };
1888 
1889 static void
1890 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1891 {
1892 	struct rxq_refill_cb_arg *cb_arg = arg;
1893 
1894 	cb_arg->error = error;
1895 	cb_arg->seg = segs[0];
1896 	cb_arg->nseg = nseg;
1897 }
1898 
1899 
1900 #ifdef ACPI_DMAR
1901 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
1902 #else
1903 #define IS_DMAR(ctx) (0)
1904 #endif
1905 
1906 /**
1907  *	rxq_refill - refill an rxq  free-buffer list
1908  *	@ctx: the iflib context
1909  *	@rxq: the free-list to refill
1910  *	@n: the number of new buffers to allocate
1911  *
1912  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
1913  *	The caller must assure that @n does not exceed the queue's capacity.
1914  */
1915 static void
1916 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
1917 {
1918 	struct mbuf *m;
1919 	int idx, frag_idx = fl->ifl_fragidx;
1920         int pidx = fl->ifl_pidx;
1921 	caddr_t cl, *sd_cl;
1922 	struct mbuf **sd_m;
1923 	uint8_t *sd_flags;
1924 	struct if_rxd_update iru;
1925 	bus_dmamap_t *sd_map;
1926 	int n, i = 0;
1927 	uint64_t bus_addr;
1928 	int err;
1929 	qidx_t credits;
1930 
1931 	sd_m = fl->ifl_sds.ifsd_m;
1932 	sd_map = fl->ifl_sds.ifsd_map;
1933 	sd_cl = fl->ifl_sds.ifsd_cl;
1934 	sd_flags = fl->ifl_sds.ifsd_flags;
1935 	idx = pidx;
1936 	credits = fl->ifl_credits;
1937 
1938 	n  = count;
1939 	MPASS(n > 0);
1940 	MPASS(credits + n <= fl->ifl_size);
1941 
1942 	if (pidx < fl->ifl_cidx)
1943 		MPASS(pidx + n <= fl->ifl_cidx);
1944 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
1945 		MPASS(fl->ifl_gen == 0);
1946 	if (pidx > fl->ifl_cidx)
1947 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
1948 
1949 	DBG_COUNTER_INC(fl_refills);
1950 	if (n > 8)
1951 		DBG_COUNTER_INC(fl_refills_large);
1952 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
1953 	while (n--) {
1954 		/*
1955 		 * We allocate an uninitialized mbuf + cluster, mbuf is
1956 		 * initialized after rx.
1957 		 *
1958 		 * If the cluster is still set then we know a minimum sized packet was received
1959 		 */
1960 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,  &frag_idx);
1961 		if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
1962                 	bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
1963 		if ((cl = sd_cl[frag_idx]) == NULL) {
1964                        if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
1965 				break;
1966 #if MEMORY_LOGGING
1967 			fl->ifl_cl_enqueued++;
1968 #endif
1969 		}
1970 		if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
1971 			break;
1972 		}
1973 #if MEMORY_LOGGING
1974 		fl->ifl_m_enqueued++;
1975 #endif
1976 
1977 		DBG_COUNTER_INC(rx_allocs);
1978 #if defined(__i386__) || defined(__amd64__)
1979 		if (!IS_DMAR(ctx)) {
1980 			bus_addr = pmap_kextract((vm_offset_t)cl);
1981 		} else
1982 #endif
1983 		{
1984 			struct rxq_refill_cb_arg cb_arg;
1985 
1986 			cb_arg.error = 0;
1987 			MPASS(sd_map != NULL);
1988 			MPASS(sd_map[frag_idx] != NULL);
1989 			err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
1990 		         cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
1991 			bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
1992 					BUS_DMASYNC_PREREAD);
1993 
1994 			if (err != 0 || cb_arg.error) {
1995 				/*
1996 				 * !zone_pack ?
1997 				 */
1998 				if (fl->ifl_zone == zone_pack)
1999 					uma_zfree(fl->ifl_zone, cl);
2000 				m_free(m);
2001 				n = 0;
2002 				goto done;
2003 			}
2004 			bus_addr = cb_arg.seg.ds_addr;
2005 		}
2006                 bit_set(fl->ifl_rx_bitmap, frag_idx);
2007 		sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
2008 
2009 		MPASS(sd_m[frag_idx] == NULL);
2010 		sd_cl[frag_idx] = cl;
2011 		sd_m[frag_idx] = m;
2012 		fl->ifl_rxd_idxs[i] = frag_idx;
2013 		fl->ifl_bus_addrs[i] = bus_addr;
2014 		fl->ifl_vm_addrs[i] = cl;
2015 		credits++;
2016 		i++;
2017 		MPASS(credits <= fl->ifl_size);
2018 		if (++idx == fl->ifl_size) {
2019 			fl->ifl_gen = 1;
2020 			idx = 0;
2021 		}
2022 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2023 			iru.iru_pidx = pidx;
2024 			iru.iru_count = i;
2025 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2026 			i = 0;
2027 			pidx = idx;
2028 			fl->ifl_pidx = idx;
2029 			fl->ifl_credits = credits;
2030 		}
2031 
2032 	}
2033 done:
2034 	if (i) {
2035 		iru.iru_pidx = pidx;
2036 		iru.iru_count = i;
2037 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2038 		fl->ifl_pidx = idx;
2039 		fl->ifl_credits = credits;
2040 	}
2041 	DBG_COUNTER_INC(rxd_flush);
2042 	if (fl->ifl_pidx == 0)
2043 		pidx = fl->ifl_size - 1;
2044 	else
2045 		pidx = fl->ifl_pidx - 1;
2046 
2047 	if (sd_map)
2048 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2049 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2050 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
2051 	fl->ifl_fragidx = frag_idx;
2052 }
2053 
2054 static __inline void
2055 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
2056 {
2057 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
2058 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2059 #ifdef INVARIANTS
2060 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2061 #endif
2062 
2063 	MPASS(fl->ifl_credits <= fl->ifl_size);
2064 	MPASS(reclaimable == delta);
2065 
2066 	if (reclaimable > 0)
2067 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
2068 }
2069 
2070 static void
2071 iflib_fl_bufs_free(iflib_fl_t fl)
2072 {
2073 	iflib_dma_info_t idi = fl->ifl_ifdi;
2074 	uint32_t i;
2075 
2076 	for (i = 0; i < fl->ifl_size; i++) {
2077 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2078 		uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
2079 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2080 
2081 		if (*sd_flags & RX_SW_DESC_INUSE) {
2082 			if (fl->ifl_sds.ifsd_map != NULL) {
2083 				bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
2084 				bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
2085 				if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
2086 					bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
2087 			}
2088 			if (*sd_m != NULL) {
2089 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2090 				uma_zfree(zone_mbuf, *sd_m);
2091 			}
2092 			if (*sd_cl != NULL)
2093 				uma_zfree(fl->ifl_zone, *sd_cl);
2094 			*sd_flags = 0;
2095 		} else {
2096 			MPASS(*sd_cl == NULL);
2097 			MPASS(*sd_m == NULL);
2098 		}
2099 #if MEMORY_LOGGING
2100 		fl->ifl_m_dequeued++;
2101 		fl->ifl_cl_dequeued++;
2102 #endif
2103 		*sd_cl = NULL;
2104 		*sd_m = NULL;
2105 	}
2106 #ifdef INVARIANTS
2107 	for (i = 0; i < fl->ifl_size; i++) {
2108 		MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
2109 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2110 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2111 	}
2112 #endif
2113 	/*
2114 	 * Reset free list values
2115 	 */
2116 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2117 	bzero(idi->idi_vaddr, idi->idi_size);
2118 }
2119 
2120 /*********************************************************************
2121  *
2122  *  Initialize a receive ring and its buffers.
2123  *
2124  **********************************************************************/
2125 static int
2126 iflib_fl_setup(iflib_fl_t fl)
2127 {
2128 	iflib_rxq_t rxq = fl->ifl_rxq;
2129 	if_ctx_t ctx = rxq->ifr_ctx;
2130 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2131 
2132 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2133 	/*
2134 	** Free current RX buffer structs and their mbufs
2135 	*/
2136 	iflib_fl_bufs_free(fl);
2137 	/* Now replenish the mbufs */
2138 	MPASS(fl->ifl_credits == 0);
2139 	/*
2140 	 * XXX don't set the max_frame_size to larger
2141 	 * than the hardware can handle
2142 	 */
2143 	if (sctx->isc_max_frame_size <= 2048)
2144 		fl->ifl_buf_size = MCLBYTES;
2145 #ifndef CONTIGMALLOC_WORKS
2146 	else
2147 		fl->ifl_buf_size = MJUMPAGESIZE;
2148 #else
2149 	else if (sctx->isc_max_frame_size <= 4096)
2150 		fl->ifl_buf_size = MJUMPAGESIZE;
2151 	else if (sctx->isc_max_frame_size <= 9216)
2152 		fl->ifl_buf_size = MJUM9BYTES;
2153 	else
2154 		fl->ifl_buf_size = MJUM16BYTES;
2155 #endif
2156 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2157 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2158 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2159 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2160 
2161 
2162 	/* avoid pre-allocating zillions of clusters to an idle card
2163 	 * potentially speeding up attach
2164 	 */
2165 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
2166 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
2167 	if (min(128, fl->ifl_size) != fl->ifl_credits)
2168 		return (ENOBUFS);
2169 	/*
2170 	 * handle failure
2171 	 */
2172 	MPASS(rxq != NULL);
2173 	MPASS(fl->ifl_ifdi != NULL);
2174 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2175 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2176 	return (0);
2177 }
2178 
2179 /*********************************************************************
2180  *
2181  *  Free receive ring data structures
2182  *
2183  **********************************************************************/
2184 static void
2185 iflib_rx_sds_free(iflib_rxq_t rxq)
2186 {
2187 	iflib_fl_t fl;
2188 	int i;
2189 
2190 	if (rxq->ifr_fl != NULL) {
2191 		for (i = 0; i < rxq->ifr_nfl; i++) {
2192 			fl = &rxq->ifr_fl[i];
2193 			if (fl->ifl_desc_tag != NULL) {
2194 				bus_dma_tag_destroy(fl->ifl_desc_tag);
2195 				fl->ifl_desc_tag = NULL;
2196 			}
2197 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2198 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2199 			/* XXX destroy maps first */
2200 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2201 			fl->ifl_sds.ifsd_m = NULL;
2202 			fl->ifl_sds.ifsd_cl = NULL;
2203 			fl->ifl_sds.ifsd_map = NULL;
2204 		}
2205 		free(rxq->ifr_fl, M_IFLIB);
2206 		rxq->ifr_fl = NULL;
2207 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
2208 	}
2209 }
2210 
2211 /*
2212  * MI independent logic
2213  *
2214  */
2215 static void
2216 iflib_timer(void *arg)
2217 {
2218 	iflib_txq_t txq = arg;
2219 	if_ctx_t ctx = txq->ift_ctx;
2220 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2221 	uint64_t this_tick = ticks;
2222 	uint32_t reset_on = hz / 2;
2223 
2224 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2225 		return;
2226 	/*
2227 	** Check on the state of the TX queue(s), this
2228 	** can be done without the lock because its RO
2229 	** and the HUNG state will be static if set.
2230 	*/
2231 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2232 		txq->ift_last_timer_tick = this_tick;
2233 		IFDI_TIMER(ctx, txq->ift_id);
2234 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2235 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2236 		     (sctx->isc_pause_frames == 0)))
2237 			goto hung;
2238 
2239 		if (ifmp_ring_is_stalled(txq->ift_br))
2240 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2241 		txq->ift_cleaned_prev = txq->ift_cleaned;
2242 	}
2243 #ifdef DEV_NETMAP
2244 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
2245 		iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
2246 #endif
2247 	/* handle any laggards */
2248 	if (txq->ift_db_pending)
2249 		GROUPTASK_ENQUEUE(&txq->ift_task);
2250 
2251 	sctx->isc_pause_frames = 0;
2252 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2253 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2254 	return;
2255  hung:
2256 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2257 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2258 	STATE_LOCK(ctx);
2259 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2260 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2261 	iflib_admin_intr_deferred(ctx);
2262 	STATE_UNLOCK(ctx);
2263 }
2264 
2265 static void
2266 iflib_init_locked(if_ctx_t ctx)
2267 {
2268 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2269 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2270 	if_t ifp = ctx->ifc_ifp;
2271 	iflib_fl_t fl;
2272 	iflib_txq_t txq;
2273 	iflib_rxq_t rxq;
2274 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2275 
2276 
2277 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2278 	IFDI_INTR_DISABLE(ctx);
2279 
2280 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2281 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2282 	/* Set hardware offload abilities */
2283 	if_clearhwassist(ifp);
2284 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2285 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2286 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2287 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2288 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2289 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2290 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2291 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2292 
2293 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2294 		CALLOUT_LOCK(txq);
2295 		callout_stop(&txq->ift_timer);
2296 		CALLOUT_UNLOCK(txq);
2297 		iflib_netmap_txq_init(ctx, txq);
2298 	}
2299 #ifdef INVARIANTS
2300 	i = if_getdrvflags(ifp);
2301 #endif
2302 	IFDI_INIT(ctx);
2303 	MPASS(if_getdrvflags(ifp) == i);
2304 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
2305 		/* XXX this should really be done on a per-queue basis */
2306 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2307 			MPASS(rxq->ifr_id == i);
2308 			iflib_netmap_rxq_init(ctx, rxq);
2309 			continue;
2310 		}
2311 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2312 			if (iflib_fl_setup(fl)) {
2313 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
2314 				goto done;
2315 			}
2316 		}
2317 	}
2318 done:
2319 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2320 	IFDI_INTR_ENABLE(ctx);
2321 	txq = ctx->ifc_txqs;
2322 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2323 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2324 			txq->ift_timer.c_cpu);
2325 }
2326 
2327 static int
2328 iflib_media_change(if_t ifp)
2329 {
2330 	if_ctx_t ctx = if_getsoftc(ifp);
2331 	int err;
2332 
2333 	CTX_LOCK(ctx);
2334 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2335 		iflib_init_locked(ctx);
2336 	CTX_UNLOCK(ctx);
2337 	return (err);
2338 }
2339 
2340 static void
2341 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2342 {
2343 	if_ctx_t ctx = if_getsoftc(ifp);
2344 
2345 	CTX_LOCK(ctx);
2346 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2347 	IFDI_MEDIA_STATUS(ctx, ifmr);
2348 	CTX_UNLOCK(ctx);
2349 }
2350 
2351 void
2352 iflib_stop(if_ctx_t ctx)
2353 {
2354 	iflib_txq_t txq = ctx->ifc_txqs;
2355 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2356 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2357 	iflib_dma_info_t di;
2358 	iflib_fl_t fl;
2359 	int i, j;
2360 
2361 	/* Tell the stack that the interface is no longer active */
2362 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2363 
2364 	IFDI_INTR_DISABLE(ctx);
2365 	DELAY(1000);
2366 	IFDI_STOP(ctx);
2367 	DELAY(1000);
2368 
2369 	iflib_debug_reset();
2370 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2371 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2372 		/* make sure all transmitters have completed before proceeding XXX */
2373 
2374 		CALLOUT_LOCK(txq);
2375 		callout_stop(&txq->ift_timer);
2376 		CALLOUT_UNLOCK(txq);
2377 
2378 		/* clean any enqueued buffers */
2379 		iflib_ifmp_purge(txq);
2380 		/* Free any existing tx buffers. */
2381 		for (j = 0; j < txq->ift_size; j++) {
2382 			iflib_txsd_free(ctx, txq, j);
2383 		}
2384 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2385 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
2386 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2387 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2388 		txq->ift_pullups = 0;
2389 		ifmp_ring_reset_stats(txq->ift_br);
2390 		for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
2391 			bzero((void *)di->idi_vaddr, di->idi_size);
2392 	}
2393 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2394 		/* make sure all transmitters have completed before proceeding XXX */
2395 
2396 		for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++)
2397 			bzero((void *)di->idi_vaddr, di->idi_size);
2398 		/* also resets the free lists pidx/cidx */
2399 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2400 			iflib_fl_bufs_free(fl);
2401 	}
2402 }
2403 
2404 static inline caddr_t
2405 calc_next_rxd(iflib_fl_t fl, int cidx)
2406 {
2407 	qidx_t size;
2408 	int nrxd;
2409 	caddr_t start, end, cur, next;
2410 
2411 	nrxd = fl->ifl_size;
2412 	size = fl->ifl_rxd_size;
2413 	start = fl->ifl_ifdi->idi_vaddr;
2414 
2415 	if (__predict_false(size == 0))
2416 		return (start);
2417 	cur = start + size*cidx;
2418 	end = start + size*nrxd;
2419 	next = CACHE_PTR_NEXT(cur);
2420 	return (next < end ? next : start);
2421 }
2422 
2423 static inline void
2424 prefetch_pkts(iflib_fl_t fl, int cidx)
2425 {
2426 	int nextptr;
2427 	int nrxd = fl->ifl_size;
2428 	caddr_t next_rxd;
2429 
2430 
2431 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2432 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2433 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2434 	next_rxd = calc_next_rxd(fl, cidx);
2435 	prefetch(next_rxd);
2436 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2437 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2438 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2439 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2440 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2441 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2442 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2443 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2444 }
2445 
2446 static void
2447 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
2448 {
2449 	int flid, cidx;
2450 	bus_dmamap_t map;
2451 	iflib_fl_t fl;
2452 	iflib_dma_info_t di;
2453 	int next;
2454 
2455 	map = NULL;
2456 	flid = irf->irf_flid;
2457 	cidx = irf->irf_idx;
2458 	fl = &rxq->ifr_fl[flid];
2459 	sd->ifsd_fl = fl;
2460 	sd->ifsd_cidx = cidx;
2461 	sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
2462 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2463 	fl->ifl_credits--;
2464 #if MEMORY_LOGGING
2465 	fl->ifl_m_dequeued++;
2466 #endif
2467 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2468 		prefetch_pkts(fl, cidx);
2469 	if (fl->ifl_sds.ifsd_map != NULL) {
2470 		next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2471 		prefetch(&fl->ifl_sds.ifsd_map[next]);
2472 		map = fl->ifl_sds.ifsd_map[cidx];
2473 		di = fl->ifl_ifdi;
2474 		next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2475 		prefetch(&fl->ifl_sds.ifsd_flags[next]);
2476 		bus_dmamap_sync(di->idi_tag, di->idi_map,
2477 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2478 
2479 	/* not valid assert if bxe really does SGE from non-contiguous elements */
2480 		MPASS(fl->ifl_cidx == cidx);
2481 		if (unload)
2482 			bus_dmamap_unload(fl->ifl_desc_tag, map);
2483 	}
2484 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
2485 	if (__predict_false(fl->ifl_cidx == 0))
2486 		fl->ifl_gen = 0;
2487 	if (map != NULL)
2488 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2489 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2490         bit_clear(fl->ifl_rx_bitmap, cidx);
2491 }
2492 
2493 static struct mbuf *
2494 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
2495 {
2496 	int i, padlen , flags;
2497 	struct mbuf *m, *mh, *mt;
2498 	caddr_t cl;
2499 
2500 	i = 0;
2501 	mh = NULL;
2502 	do {
2503 		rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
2504 
2505 		MPASS(*sd->ifsd_cl != NULL);
2506 		MPASS(*sd->ifsd_m != NULL);
2507 
2508 		/* Don't include zero-length frags */
2509 		if (ri->iri_frags[i].irf_len == 0) {
2510 			/* XXX we can save the cluster here, but not the mbuf */
2511 			m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
2512 			m_free(*sd->ifsd_m);
2513 			*sd->ifsd_m = NULL;
2514 			continue;
2515 		}
2516 		m = *sd->ifsd_m;
2517 		*sd->ifsd_m = NULL;
2518 		if (mh == NULL) {
2519 			flags = M_PKTHDR|M_EXT;
2520 			mh = mt = m;
2521 			padlen = ri->iri_pad;
2522 		} else {
2523 			flags = M_EXT;
2524 			mt->m_next = m;
2525 			mt = m;
2526 			/* assuming padding is only on the first fragment */
2527 			padlen = 0;
2528 		}
2529 		cl = *sd->ifsd_cl;
2530 		*sd->ifsd_cl = NULL;
2531 
2532 		/* Can these two be made one ? */
2533 		m_init(m, M_NOWAIT, MT_DATA, flags);
2534 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2535 		/*
2536 		 * These must follow m_init and m_cljset
2537 		 */
2538 		m->m_data += padlen;
2539 		ri->iri_len -= padlen;
2540 		m->m_len = ri->iri_frags[i].irf_len;
2541 	} while (++i < ri->iri_nfrags);
2542 
2543 	return (mh);
2544 }
2545 
2546 /*
2547  * Process one software descriptor
2548  */
2549 static struct mbuf *
2550 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2551 {
2552 	struct if_rxsd sd;
2553 	struct mbuf *m;
2554 
2555 	/* should I merge this back in now that the two paths are basically duplicated? */
2556 	if (ri->iri_nfrags == 1 &&
2557 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2558 		rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
2559 		m = *sd.ifsd_m;
2560 		*sd.ifsd_m = NULL;
2561 		m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2562 #ifndef __NO_STRICT_ALIGNMENT
2563 		if (!IP_ALIGNED(m))
2564 			m->m_data += 2;
2565 #endif
2566 		memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2567 		m->m_len = ri->iri_frags[0].irf_len;
2568        } else {
2569 		m = assemble_segments(rxq, ri, &sd);
2570 	}
2571 	m->m_pkthdr.len = ri->iri_len;
2572 	m->m_pkthdr.rcvif = ri->iri_ifp;
2573 	m->m_flags |= ri->iri_flags;
2574 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2575 	m->m_pkthdr.flowid = ri->iri_flowid;
2576 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2577 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2578 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2579 	return (m);
2580 }
2581 
2582 #if defined(INET6) || defined(INET)
2583 static void
2584 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2585 {
2586 	CURVNET_SET(lc->ifp->if_vnet);
2587 #if defined(INET6)
2588 	*v6 = VNET(ip6_forwarding);
2589 #endif
2590 #if defined(INET)
2591 	*v4 = VNET(ipforwarding);
2592 #endif
2593 	CURVNET_RESTORE();
2594 }
2595 
2596 /*
2597  * Returns true if it's possible this packet could be LROed.
2598  * if it returns false, it is guaranteed that tcp_lro_rx()
2599  * would not return zero.
2600  */
2601 static bool
2602 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
2603 {
2604 	struct ether_header *eh;
2605 	uint16_t eh_type;
2606 
2607 	eh = mtod(m, struct ether_header *);
2608 	eh_type = ntohs(eh->ether_type);
2609 	switch (eh_type) {
2610 #if defined(INET6)
2611 		case ETHERTYPE_IPV6:
2612 			return !v6_forwarding;
2613 #endif
2614 #if defined (INET)
2615 		case ETHERTYPE_IP:
2616 			return !v4_forwarding;
2617 #endif
2618 	}
2619 
2620 	return false;
2621 }
2622 #else
2623 static void
2624 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2625 {
2626 }
2627 #endif
2628 
2629 static bool
2630 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2631 {
2632 	if_ctx_t ctx = rxq->ifr_ctx;
2633 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2634 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2635 	int avail, i;
2636 	qidx_t *cidxp;
2637 	struct if_rxd_info ri;
2638 	int err, budget_left, rx_bytes, rx_pkts;
2639 	iflib_fl_t fl;
2640 	struct ifnet *ifp;
2641 	int lro_enabled;
2642 	bool v4_forwarding, v6_forwarding, lro_possible;
2643 
2644 	/*
2645 	 * XXX early demux data packets so that if_input processing only handles
2646 	 * acks in interrupt context
2647 	 */
2648 	struct mbuf *m, *mh, *mt, *mf;
2649 
2650 	lro_possible = v4_forwarding = v6_forwarding = false;
2651 	ifp = ctx->ifc_ifp;
2652 	mh = mt = NULL;
2653 	MPASS(budget > 0);
2654 	rx_pkts	= rx_bytes = 0;
2655 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2656 		cidxp = &rxq->ifr_cq_cidx;
2657 	else
2658 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2659 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2660 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2661 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
2662 		DBG_COUNTER_INC(rx_unavail);
2663 		return (false);
2664 	}
2665 
2666 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
2667 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2668 			DBG_COUNTER_INC(rx_ctx_inactive);
2669 			break;
2670 		}
2671 		/*
2672 		 * Reset client set fields to their default values
2673 		 */
2674 		rxd_info_zero(&ri);
2675 		ri.iri_qsidx = rxq->ifr_id;
2676 		ri.iri_cidx = *cidxp;
2677 		ri.iri_ifp = ifp;
2678 		ri.iri_frags = rxq->ifr_frags;
2679 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2680 
2681 		if (err)
2682 			goto err;
2683 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2684 			*cidxp = ri.iri_cidx;
2685 			/* Update our consumer index */
2686 			/* XXX NB: shurd - check if this is still safe */
2687 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
2688 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2689 				rxq->ifr_cq_gen = 0;
2690 			}
2691 			/* was this only a completion queue message? */
2692 			if (__predict_false(ri.iri_nfrags == 0))
2693 				continue;
2694 		}
2695 		MPASS(ri.iri_nfrags != 0);
2696 		MPASS(ri.iri_len != 0);
2697 
2698 		/* will advance the cidx on the corresponding free lists */
2699 		m = iflib_rxd_pkt_get(rxq, &ri);
2700 		avail--;
2701 		budget_left--;
2702 		if (avail == 0 && budget_left)
2703 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2704 
2705 		if (__predict_false(m == NULL)) {
2706 			DBG_COUNTER_INC(rx_mbuf_null);
2707 			continue;
2708 		}
2709 		/* imm_pkt: -- cxgb */
2710 		if (mh == NULL)
2711 			mh = mt = m;
2712 		else {
2713 			mt->m_nextpkt = m;
2714 			mt = m;
2715 		}
2716 	}
2717 	/* make sure that we can refill faster than drain */
2718 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2719 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
2720 
2721 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2722 	if (lro_enabled)
2723 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
2724 	mt = mf = NULL;
2725 	while (mh != NULL) {
2726 		m = mh;
2727 		mh = mh->m_nextpkt;
2728 		m->m_nextpkt = NULL;
2729 #ifndef __NO_STRICT_ALIGNMENT
2730 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
2731 			continue;
2732 #endif
2733 		rx_bytes += m->m_pkthdr.len;
2734 		rx_pkts++;
2735 #if defined(INET6) || defined(INET)
2736 		if (lro_enabled) {
2737 			if (!lro_possible) {
2738 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
2739 				if (lro_possible && mf != NULL) {
2740 					ifp->if_input(ifp, mf);
2741 					DBG_COUNTER_INC(rx_if_input);
2742 					mt = mf = NULL;
2743 				}
2744 			}
2745 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
2746 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
2747 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
2748 					continue;
2749 			}
2750 		}
2751 #endif
2752 		if (lro_possible) {
2753 			ifp->if_input(ifp, m);
2754 			DBG_COUNTER_INC(rx_if_input);
2755 			continue;
2756 		}
2757 
2758 		if (mf == NULL)
2759 			mf = m;
2760 		if (mt != NULL)
2761 			mt->m_nextpkt = m;
2762 		mt = m;
2763 	}
2764 	if (mf != NULL) {
2765 		ifp->if_input(ifp, mf);
2766 		DBG_COUNTER_INC(rx_if_input);
2767 	}
2768 
2769 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
2770 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
2771 
2772 	/*
2773 	 * Flush any outstanding LRO work
2774 	 */
2775 #if defined(INET6) || defined(INET)
2776 	tcp_lro_flush_all(&rxq->ifr_lc);
2777 #endif
2778 	if (avail)
2779 		return true;
2780 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
2781 err:
2782 	STATE_LOCK(ctx);
2783 	ctx->ifc_flags |= IFC_DO_RESET;
2784 	iflib_admin_intr_deferred(ctx);
2785 	STATE_UNLOCK(ctx);
2786 	return (false);
2787 }
2788 
2789 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
2790 static inline qidx_t
2791 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
2792 {
2793 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2794 	qidx_t minthresh = txq->ift_size / 8;
2795 	if (in_use > 4*minthresh)
2796 		return (notify_count);
2797 	if (in_use > 2*minthresh)
2798 		return (notify_count >> 1);
2799 	if (in_use > minthresh)
2800 		return (notify_count >> 3);
2801 	return (0);
2802 }
2803 
2804 static inline qidx_t
2805 txq_max_rs_deferred(iflib_txq_t txq)
2806 {
2807 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2808 	qidx_t minthresh = txq->ift_size / 8;
2809 	if (txq->ift_in_use > 4*minthresh)
2810 		return (notify_count);
2811 	if (txq->ift_in_use > 2*minthresh)
2812 		return (notify_count >> 1);
2813 	if (txq->ift_in_use > minthresh)
2814 		return (notify_count >> 2);
2815 	return (2);
2816 }
2817 
2818 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
2819 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
2820 
2821 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
2822 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
2823 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
2824 
2825 /* forward compatibility for cxgb */
2826 #define FIRST_QSET(ctx) 0
2827 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
2828 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
2829 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
2830 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
2831 
2832 /* XXX we should be setting this to something other than zero */
2833 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
2834 #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
2835     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
2836 
2837 static inline bool
2838 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
2839 {
2840 	qidx_t dbval, max;
2841 	bool rang;
2842 
2843 	rang = false;
2844 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
2845 	if (ring || txq->ift_db_pending >= max) {
2846 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
2847 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
2848 		txq->ift_db_pending = txq->ift_npending = 0;
2849 		rang = true;
2850 	}
2851 	return (rang);
2852 }
2853 
2854 #ifdef PKT_DEBUG
2855 static void
2856 print_pkt(if_pkt_info_t pi)
2857 {
2858 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
2859 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
2860 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
2861 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
2862 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
2863 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
2864 }
2865 #endif
2866 
2867 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2868 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
2869 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2870 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
2871 
2872 static int
2873 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
2874 {
2875 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
2876 	struct ether_vlan_header *eh;
2877 	struct mbuf *m, *n;
2878 
2879 	m = *mp;
2880 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2881 	    M_WRITABLE(m) == 0) {
2882 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2883 			return (ENOMEM);
2884 		} else {
2885 			m_freem(*mp);
2886 			*mp = m;
2887 		}
2888 	}
2889 
2890 	/*
2891 	 * Determine where frame payload starts.
2892 	 * Jump over vlan headers if already present,
2893 	 * helpful for QinQ too.
2894 	 */
2895 	if (__predict_false(m->m_len < sizeof(*eh))) {
2896 		txq->ift_pullups++;
2897 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
2898 			return (ENOMEM);
2899 	}
2900 	eh = mtod(m, struct ether_vlan_header *);
2901 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2902 		pi->ipi_etype = ntohs(eh->evl_proto);
2903 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2904 	} else {
2905 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
2906 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
2907 	}
2908 
2909 	switch (pi->ipi_etype) {
2910 #ifdef INET
2911 	case ETHERTYPE_IP:
2912 	{
2913 		struct ip *ip = NULL;
2914 		struct tcphdr *th = NULL;
2915 		int minthlen;
2916 
2917 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
2918 		if (__predict_false(m->m_len < minthlen)) {
2919 			/*
2920 			 * if this code bloat is causing too much of a hit
2921 			 * move it to a separate function and mark it noinline
2922 			 */
2923 			if (m->m_len == pi->ipi_ehdrlen) {
2924 				n = m->m_next;
2925 				MPASS(n);
2926 				if (n->m_len >= sizeof(*ip))  {
2927 					ip = (struct ip *)n->m_data;
2928 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2929 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2930 				} else {
2931 					txq->ift_pullups++;
2932 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2933 						return (ENOMEM);
2934 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2935 				}
2936 			} else {
2937 				txq->ift_pullups++;
2938 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2939 					return (ENOMEM);
2940 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2941 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2942 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2943 			}
2944 		} else {
2945 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2946 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2947 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2948 		}
2949 		pi->ipi_ip_hlen = ip->ip_hl << 2;
2950 		pi->ipi_ipproto = ip->ip_p;
2951 		pi->ipi_flags |= IPI_TX_IPV4;
2952 
2953 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
2954                        ip->ip_sum = 0;
2955 
2956 		/* TCP checksum offload may require TCP header length */
2957 		if (IS_TX_OFFLOAD4(pi)) {
2958 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
2959 				if (__predict_false(th == NULL)) {
2960 					txq->ift_pullups++;
2961 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
2962 						return (ENOMEM);
2963 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
2964 				}
2965 				pi->ipi_tcp_hflags = th->th_flags;
2966 				pi->ipi_tcp_hlen = th->th_off << 2;
2967 				pi->ipi_tcp_seq = th->th_seq;
2968 			}
2969 			if (IS_TSO4(pi)) {
2970 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
2971 					return (ENXIO);
2972 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
2973 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2974 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
2975 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
2976 					ip->ip_sum = 0;
2977 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
2978 				}
2979 			}
2980 		}
2981 		break;
2982 	}
2983 #endif
2984 #ifdef INET6
2985 	case ETHERTYPE_IPV6:
2986 	{
2987 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
2988 		struct tcphdr *th;
2989 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
2990 
2991 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
2992 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
2993 				return (ENOMEM);
2994 		}
2995 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
2996 
2997 		/* XXX-BZ this will go badly in case of ext hdrs. */
2998 		pi->ipi_ipproto = ip6->ip6_nxt;
2999 		pi->ipi_flags |= IPI_TX_IPV6;
3000 
3001 		/* TCP checksum offload may require TCP header length */
3002 		if (IS_TX_OFFLOAD6(pi)) {
3003 			if (pi->ipi_ipproto == IPPROTO_TCP) {
3004 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3005 					txq->ift_pullups++;
3006 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3007 						return (ENOMEM);
3008 				}
3009 				pi->ipi_tcp_hflags = th->th_flags;
3010 				pi->ipi_tcp_hlen = th->th_off << 2;
3011 				pi->ipi_tcp_seq = th->th_seq;
3012 			}
3013 			if (IS_TSO6(pi)) {
3014 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3015 					return (ENXIO);
3016 				/*
3017 				 * The corresponding flag is set by the stack in the IPv4
3018 				 * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
3019 				 * So, set it here because the rest of the flow requires it.
3020 				 */
3021 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
3022 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3023 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3024 			}
3025 		}
3026 		break;
3027 	}
3028 #endif
3029 	default:
3030 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3031 		pi->ipi_ip_hlen = 0;
3032 		break;
3033 	}
3034 	*mp = m;
3035 
3036 	return (0);
3037 }
3038 
3039 static  __noinline  struct mbuf *
3040 collapse_pkthdr(struct mbuf *m0)
3041 {
3042 	struct mbuf *m, *m_next, *tmp;
3043 
3044 	m = m0;
3045 	m_next = m->m_next;
3046 	while (m_next != NULL && m_next->m_len == 0) {
3047 		m = m_next;
3048 		m->m_next = NULL;
3049 		m_free(m);
3050 		m_next = m_next->m_next;
3051 	}
3052 	m = m0;
3053 	m->m_next = m_next;
3054 	if (m_next == NULL)
3055 		return (m);
3056 	if ((m_next->m_flags & M_EXT) == 0) {
3057 		m = m_defrag(m, M_NOWAIT);
3058 	} else {
3059 		tmp = m_next->m_next;
3060 		memcpy(m_next, m, MPKTHSIZE);
3061 		m = m_next;
3062 		m->m_next = tmp;
3063 	}
3064 	return (m);
3065 }
3066 
3067 /*
3068  * If dodgy hardware rejects the scatter gather chain we've handed it
3069  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3070  * m_defrag'd mbufs
3071  */
3072 static __noinline struct mbuf *
3073 iflib_remove_mbuf(iflib_txq_t txq)
3074 {
3075 	int ntxd, i, pidx;
3076 	struct mbuf *m, *mh, **ifsd_m;
3077 
3078 	pidx = txq->ift_pidx;
3079 	ifsd_m = txq->ift_sds.ifsd_m;
3080 	ntxd = txq->ift_size;
3081 	mh = m = ifsd_m[pidx];
3082 	ifsd_m[pidx] = NULL;
3083 #if MEMORY_LOGGING
3084 	txq->ift_dequeued++;
3085 #endif
3086 	i = 1;
3087 
3088 	while (m) {
3089 		ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
3090 #if MEMORY_LOGGING
3091 		txq->ift_dequeued++;
3092 #endif
3093 		m = m->m_next;
3094 		i++;
3095 	}
3096 	return (mh);
3097 }
3098 
3099 static int
3100 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
3101 			  struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
3102 			  int max_segs, int flags)
3103 {
3104 	if_ctx_t ctx;
3105 	if_shared_ctx_t		sctx;
3106 	if_softc_ctx_t		scctx;
3107 	int i, next, pidx, err, ntxd, count;
3108 	struct mbuf *m, *tmp, **ifsd_m;
3109 
3110 	m = *m0;
3111 
3112 	/*
3113 	 * Please don't ever do this
3114 	 */
3115 	if (__predict_false(m->m_len == 0))
3116 		*m0 = collapse_pkthdr(m);
3117 
3118 	ctx = txq->ift_ctx;
3119 	sctx = ctx->ifc_sctx;
3120 	scctx = &ctx->ifc_softc_ctx;
3121 	ifsd_m = txq->ift_sds.ifsd_m;
3122 	ntxd = txq->ift_size;
3123 	pidx = txq->ift_pidx;
3124 	if (map != NULL) {
3125 		uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
3126 
3127 		err = bus_dmamap_load_mbuf_sg(tag, map,
3128 					      *m0, segs, nsegs, BUS_DMA_NOWAIT);
3129 		if (err)
3130 			return (err);
3131 		ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
3132 		count = 0;
3133 		m = *m0;
3134 		do {
3135 			if (__predict_false(m->m_len <= 0)) {
3136 				tmp = m;
3137 				m = m->m_next;
3138 				tmp->m_next = NULL;
3139 				m_free(tmp);
3140 				continue;
3141 			}
3142 			m = m->m_next;
3143 			count++;
3144 		} while (m != NULL);
3145 		if (count > *nsegs) {
3146 			ifsd_m[pidx] = *m0;
3147 			ifsd_m[pidx]->m_flags |= M_TOOBIG;
3148 			return (0);
3149 		}
3150 		m = *m0;
3151 		count = 0;
3152 		do {
3153 			next = (pidx + count) & (ntxd-1);
3154 			MPASS(ifsd_m[next] == NULL);
3155 			ifsd_m[next] = m;
3156 			count++;
3157 			tmp = m;
3158 			m = m->m_next;
3159 		} while (m != NULL);
3160 	} else {
3161 		int buflen, sgsize, maxsegsz, max_sgsize;
3162 		vm_offset_t vaddr;
3163 		vm_paddr_t curaddr;
3164 
3165 		count = i = 0;
3166 		m = *m0;
3167 		if (m->m_pkthdr.csum_flags & CSUM_TSO)
3168 			maxsegsz = scctx->isc_tx_tso_segsize_max;
3169 		else
3170 			maxsegsz = sctx->isc_tx_maxsegsize;
3171 
3172 		do {
3173 			if (__predict_false(m->m_len <= 0)) {
3174 				tmp = m;
3175 				m = m->m_next;
3176 				tmp->m_next = NULL;
3177 				m_free(tmp);
3178 				continue;
3179 			}
3180 			buflen = m->m_len;
3181 			vaddr = (vm_offset_t)m->m_data;
3182 			/*
3183 			 * see if we can't be smarter about physically
3184 			 * contiguous mappings
3185 			 */
3186 			next = (pidx + count) & (ntxd-1);
3187 			MPASS(ifsd_m[next] == NULL);
3188 #if MEMORY_LOGGING
3189 			txq->ift_enqueued++;
3190 #endif
3191 			ifsd_m[next] = m;
3192 			while (buflen > 0) {
3193 				if (i >= max_segs)
3194 					goto err;
3195 				max_sgsize = MIN(buflen, maxsegsz);
3196 				curaddr = pmap_kextract(vaddr);
3197 				sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
3198 				sgsize = MIN(sgsize, max_sgsize);
3199 				segs[i].ds_addr = curaddr;
3200 				segs[i].ds_len = sgsize;
3201 				vaddr += sgsize;
3202 				buflen -= sgsize;
3203 				i++;
3204 			}
3205 			count++;
3206 			tmp = m;
3207 			m = m->m_next;
3208 		} while (m != NULL);
3209 		*nsegs = i;
3210 	}
3211 	return (0);
3212 err:
3213 	*m0 = iflib_remove_mbuf(txq);
3214 	return (EFBIG);
3215 }
3216 
3217 static inline caddr_t
3218 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3219 {
3220 	qidx_t size;
3221 	int ntxd;
3222 	caddr_t start, end, cur, next;
3223 
3224 	ntxd = txq->ift_size;
3225 	size = txq->ift_txd_size[qid];
3226 	start = txq->ift_ifdi[qid].idi_vaddr;
3227 
3228 	if (__predict_false(size == 0))
3229 		return (start);
3230 	cur = start + size*cidx;
3231 	end = start + size*ntxd;
3232 	next = CACHE_PTR_NEXT(cur);
3233 	return (next < end ? next : start);
3234 }
3235 
3236 /*
3237  * Pad an mbuf to ensure a minimum ethernet frame size.
3238  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3239  */
3240 static __noinline int
3241 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3242 {
3243 	/*
3244 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3245 	 * and ARP message is the smallest common payload I can think of
3246 	 */
3247 	static char pad[18];	/* just zeros */
3248 	int n;
3249 	struct mbuf *new_head;
3250 
3251 	if (!M_WRITABLE(*m_head)) {
3252 		new_head = m_dup(*m_head, M_NOWAIT);
3253 		if (new_head == NULL) {
3254 			m_freem(*m_head);
3255 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3256 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3257 			return ENOMEM;
3258 		}
3259 		m_freem(*m_head);
3260 		*m_head = new_head;
3261 	}
3262 
3263 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3264 	     n > 0; n -= sizeof(pad))
3265 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3266 			break;
3267 
3268 	if (n > 0) {
3269 		m_freem(*m_head);
3270 		device_printf(dev, "cannot pad short frame\n");
3271 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3272 		return (ENOBUFS);
3273 	}
3274 
3275 	return 0;
3276 }
3277 
3278 static int
3279 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3280 {
3281 	if_ctx_t		ctx;
3282 	if_shared_ctx_t		sctx;
3283 	if_softc_ctx_t		scctx;
3284 	bus_dma_segment_t	*segs;
3285 	struct mbuf		*m_head;
3286 	void			*next_txd;
3287 	bus_dmamap_t		map;
3288 	struct if_pkt_info	pi;
3289 	int remap = 0;
3290 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3291 	bus_dma_tag_t desc_tag;
3292 
3293 	ctx = txq->ift_ctx;
3294 	sctx = ctx->ifc_sctx;
3295 	scctx = &ctx->ifc_softc_ctx;
3296 	segs = txq->ift_segs;
3297 	ntxd = txq->ift_size;
3298 	m_head = *m_headp;
3299 	map = NULL;
3300 
3301 	/*
3302 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3303 	 */
3304 	cidx = txq->ift_cidx;
3305 	pidx = txq->ift_pidx;
3306 	if (ctx->ifc_flags & IFC_PREFETCH) {
3307 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3308 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3309 			next_txd = calc_next_txd(txq, cidx, 0);
3310 			prefetch(next_txd);
3311 		}
3312 
3313 		/* prefetch the next cache line of mbuf pointers and flags */
3314 		prefetch(&txq->ift_sds.ifsd_m[next]);
3315 		if (txq->ift_sds.ifsd_map != NULL) {
3316 			prefetch(&txq->ift_sds.ifsd_map[next]);
3317 			next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3318 			prefetch(&txq->ift_sds.ifsd_flags[next]);
3319 		}
3320 	} else if (txq->ift_sds.ifsd_map != NULL)
3321 		map = txq->ift_sds.ifsd_map[pidx];
3322 
3323 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3324 		desc_tag = txq->ift_tso_desc_tag;
3325 		max_segs = scctx->isc_tx_tso_segments_max;
3326 		MPASS(desc_tag != NULL);
3327 		MPASS(max_segs > 0);
3328 	} else {
3329 		desc_tag = txq->ift_desc_tag;
3330 		max_segs = scctx->isc_tx_nsegments;
3331 	}
3332 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3333 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3334 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3335 		if (err)
3336 			return err;
3337 	}
3338 	m_head = *m_headp;
3339 
3340 	pkt_info_zero(&pi);
3341 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3342 	pi.ipi_pidx = pidx;
3343 	pi.ipi_qsidx = txq->ift_id;
3344 	pi.ipi_len = m_head->m_pkthdr.len;
3345 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3346 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3347 
3348 	/* deliberate bitwise OR to make one condition */
3349 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3350 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
3351 			return (err);
3352 		m_head = *m_headp;
3353 	}
3354 
3355 retry:
3356 	err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
3357 defrag:
3358 	if (__predict_false(err)) {
3359 		switch (err) {
3360 		case EFBIG:
3361 			/* try collapse once and defrag once */
3362 			if (remap == 0) {
3363 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3364 				/* try defrag if collapsing fails */
3365 				if (m_head == NULL)
3366 					remap++;
3367 			}
3368 			if (remap == 1)
3369 				m_head = m_defrag(*m_headp, M_NOWAIT);
3370 			remap++;
3371 			if (__predict_false(m_head == NULL))
3372 				goto defrag_failed;
3373 			txq->ift_mbuf_defrag++;
3374 			*m_headp = m_head;
3375 			goto retry;
3376 			break;
3377 		case ENOMEM:
3378 			txq->ift_no_tx_dma_setup++;
3379 			break;
3380 		default:
3381 			txq->ift_no_tx_dma_setup++;
3382 			m_freem(*m_headp);
3383 			DBG_COUNTER_INC(tx_frees);
3384 			*m_headp = NULL;
3385 			break;
3386 		}
3387 		txq->ift_map_failed++;
3388 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3389 		return (err);
3390 	}
3391 
3392 	/*
3393 	 * XXX assumes a 1 to 1 relationship between segments and
3394 	 *        descriptors - this does not hold true on all drivers, e.g.
3395 	 *        cxgb
3396 	 */
3397 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3398 		txq->ift_no_desc_avail++;
3399 		if (map != NULL)
3400 			bus_dmamap_unload(desc_tag, map);
3401 		DBG_COUNTER_INC(encap_txq_avail_fail);
3402 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3403 			GROUPTASK_ENQUEUE(&txq->ift_task);
3404 		return (ENOBUFS);
3405 	}
3406 	/*
3407 	 * On Intel cards we can greatly reduce the number of TX interrupts
3408 	 * we see by only setting report status on every Nth descriptor.
3409 	 * However, this also means that the driver will need to keep track
3410 	 * of the descriptors that RS was set on to check them for the DD bit.
3411 	 */
3412 	txq->ift_rs_pending += nsegs + 1;
3413 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3414 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3415 		pi.ipi_flags |= IPI_TX_INTR;
3416 		txq->ift_rs_pending = 0;
3417 	}
3418 
3419 	pi.ipi_segs = segs;
3420 	pi.ipi_nsegs = nsegs;
3421 
3422 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3423 #ifdef PKT_DEBUG
3424 	print_pkt(&pi);
3425 #endif
3426 	if (map != NULL)
3427 		bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
3428 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3429 		if (map != NULL)
3430 			bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3431 					BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3432 		DBG_COUNTER_INC(tx_encap);
3433 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3434 
3435 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3436 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3437 			ndesc += txq->ift_size;
3438 			txq->ift_gen = 1;
3439 		}
3440 		/*
3441 		 * drivers can need as many as
3442 		 * two sentinels
3443 		 */
3444 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3445 		MPASS(pi.ipi_new_pidx != pidx);
3446 		MPASS(ndesc > 0);
3447 		txq->ift_in_use += ndesc;
3448 
3449 		/*
3450 		 * We update the last software descriptor again here because there may
3451 		 * be a sentinel and/or there may be more mbufs than segments
3452 		 */
3453 		txq->ift_pidx = pi.ipi_new_pidx;
3454 		txq->ift_npending += pi.ipi_ndescs;
3455 	} else {
3456 		*m_headp = m_head = iflib_remove_mbuf(txq);
3457 		if (err == EFBIG) {
3458 			txq->ift_txd_encap_efbig++;
3459 			if (remap < 2) {
3460 				remap = 1;
3461 				goto defrag;
3462 			}
3463 		}
3464 		DBG_COUNTER_INC(encap_txd_encap_fail);
3465 		goto defrag_failed;
3466 	}
3467 	return (err);
3468 
3469 defrag_failed:
3470 	txq->ift_mbuf_defrag_failed++;
3471 	txq->ift_map_failed++;
3472 	m_freem(*m_headp);
3473 	DBG_COUNTER_INC(tx_frees);
3474 	*m_headp = NULL;
3475 	return (ENOMEM);
3476 }
3477 
3478 static void
3479 iflib_tx_desc_free(iflib_txq_t txq, int n)
3480 {
3481 	int hasmap;
3482 	uint32_t qsize, cidx, mask, gen;
3483 	struct mbuf *m, **ifsd_m;
3484 	uint8_t *ifsd_flags;
3485 	bus_dmamap_t *ifsd_map;
3486 	bool do_prefetch;
3487 
3488 	cidx = txq->ift_cidx;
3489 	gen = txq->ift_gen;
3490 	qsize = txq->ift_size;
3491 	mask = qsize-1;
3492 	hasmap = txq->ift_sds.ifsd_map != NULL;
3493 	ifsd_flags = txq->ift_sds.ifsd_flags;
3494 	ifsd_m = txq->ift_sds.ifsd_m;
3495 	ifsd_map = txq->ift_sds.ifsd_map;
3496 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3497 
3498 	while (n-- > 0) {
3499 		if (do_prefetch) {
3500 			prefetch(ifsd_m[(cidx + 3) & mask]);
3501 			prefetch(ifsd_m[(cidx + 4) & mask]);
3502 		}
3503 		if (ifsd_m[cidx] != NULL) {
3504 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3505 			prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
3506 			if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
3507 				/*
3508 				 * does it matter if it's not the TSO tag? If so we'll
3509 				 * have to add the type to flags
3510 				 */
3511 				bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
3512 				ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
3513 			}
3514 			if ((m = ifsd_m[cidx]) != NULL) {
3515 				/* XXX we don't support any drivers that batch packets yet */
3516 				MPASS(m->m_nextpkt == NULL);
3517 				/* if the number of clusters exceeds the number of segments
3518 				 * there won't be space on the ring to save a pointer to each
3519 				 * cluster so we simply free the list here
3520 				 */
3521 				if (m->m_flags & M_TOOBIG) {
3522 					m_freem(m);
3523 				} else {
3524 					m_free(m);
3525 				}
3526 				ifsd_m[cidx] = NULL;
3527 #if MEMORY_LOGGING
3528 				txq->ift_dequeued++;
3529 #endif
3530 				DBG_COUNTER_INC(tx_frees);
3531 			}
3532 		}
3533 		if (__predict_false(++cidx == qsize)) {
3534 			cidx = 0;
3535 			gen = 0;
3536 		}
3537 	}
3538 	txq->ift_cidx = cidx;
3539 	txq->ift_gen = gen;
3540 }
3541 
3542 static __inline int
3543 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3544 {
3545 	int reclaim;
3546 	if_ctx_t ctx = txq->ift_ctx;
3547 
3548 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3549 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3550 
3551 	/*
3552 	 * Need a rate-limiting check so that this isn't called every time
3553 	 */
3554 	iflib_tx_credits_update(ctx, txq);
3555 	reclaim = DESC_RECLAIMABLE(txq);
3556 
3557 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3558 #ifdef INVARIANTS
3559 		if (iflib_verbose_debug) {
3560 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
3561 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3562 			       reclaim, thresh);
3563 
3564 		}
3565 #endif
3566 		return (0);
3567 	}
3568 	iflib_tx_desc_free(txq, reclaim);
3569 	txq->ift_cleaned += reclaim;
3570 	txq->ift_in_use -= reclaim;
3571 
3572 	return (reclaim);
3573 }
3574 
3575 static struct mbuf **
3576 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3577 {
3578 	int next, size;
3579 	struct mbuf **items;
3580 
3581 	size = r->size;
3582 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3583 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3584 
3585 	prefetch(items[(cidx + offset) & (size-1)]);
3586 	if (remaining > 1) {
3587 		prefetch2cachelines(&items[next]);
3588 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3589 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3590 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3591 	}
3592 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3593 }
3594 
3595 static void
3596 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3597 {
3598 
3599 	ifmp_ring_check_drainage(txq->ift_br, budget);
3600 }
3601 
3602 static uint32_t
3603 iflib_txq_can_drain(struct ifmp_ring *r)
3604 {
3605 	iflib_txq_t txq = r->cookie;
3606 	if_ctx_t ctx = txq->ift_ctx;
3607 
3608 	return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
3609 		ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
3610 }
3611 
3612 static uint32_t
3613 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3614 {
3615 	iflib_txq_t txq = r->cookie;
3616 	if_ctx_t ctx = txq->ift_ctx;
3617 	struct ifnet *ifp = ctx->ifc_ifp;
3618 	struct mbuf **mp, *m;
3619 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
3620 	int reclaimed, err, in_use_prev, desc_used;
3621 	bool do_prefetch, ring, rang;
3622 
3623 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3624 			    !LINK_ACTIVE(ctx))) {
3625 		DBG_COUNTER_INC(txq_drain_notready);
3626 		return (0);
3627 	}
3628 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3629 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
3630 	avail = IDXDIFF(pidx, cidx, r->size);
3631 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3632 		DBG_COUNTER_INC(txq_drain_flushing);
3633 		for (i = 0; i < avail; i++) {
3634 			m_free(r->items[(cidx + i) & (r->size-1)]);
3635 			r->items[(cidx + i) & (r->size-1)] = NULL;
3636 		}
3637 		return (avail);
3638 	}
3639 
3640 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3641 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3642 		CALLOUT_LOCK(txq);
3643 		callout_stop(&txq->ift_timer);
3644 		CALLOUT_UNLOCK(txq);
3645 		DBG_COUNTER_INC(txq_drain_oactive);
3646 		return (0);
3647 	}
3648 	if (reclaimed)
3649 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3650 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
3651 	count = MIN(avail, TX_BATCH_SIZE);
3652 #ifdef INVARIANTS
3653 	if (iflib_verbose_debug)
3654 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3655 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3656 #endif
3657 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3658 	avail = TXQ_AVAIL(txq);
3659 	err = 0;
3660 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
3661 		int rem = do_prefetch ? count - i : 0;
3662 
3663 		mp = _ring_peek_one(r, cidx, i, rem);
3664 		MPASS(mp != NULL && *mp != NULL);
3665 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3666 			consumed++;
3667 			reclaimed++;
3668 			continue;
3669 		}
3670 		in_use_prev = txq->ift_in_use;
3671 		err = iflib_encap(txq, mp);
3672 		if (__predict_false(err)) {
3673 			DBG_COUNTER_INC(txq_drain_encapfail);
3674 			/* no room - bail out */
3675 			if (err == ENOBUFS)
3676 				break;
3677 			consumed++;
3678 			DBG_COUNTER_INC(txq_drain_encapfail);
3679 			/* we can't send this packet - skip it */
3680 			continue;
3681 		}
3682 		consumed++;
3683 		pkt_sent++;
3684 		m = *mp;
3685 		DBG_COUNTER_INC(tx_sent);
3686 		bytes_sent += m->m_pkthdr.len;
3687 		mcast_sent += !!(m->m_flags & M_MCAST);
3688 		avail = TXQ_AVAIL(txq);
3689 
3690 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
3691 		desc_used += (txq->ift_in_use - in_use_prev);
3692 		ETHER_BPF_MTAP(ifp, m);
3693 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
3694 			break;
3695 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
3696 	}
3697 
3698 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3699 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
3700 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
3701 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3702 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3703 	if (mcast_sent)
3704 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3705 #ifdef INVARIANTS
3706 	if (iflib_verbose_debug)
3707 		printf("consumed=%d\n", consumed);
3708 #endif
3709 	return (consumed);
3710 }
3711 
3712 static uint32_t
3713 iflib_txq_drain_always(struct ifmp_ring *r)
3714 {
3715 	return (1);
3716 }
3717 
3718 static uint32_t
3719 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3720 {
3721 	int i, avail;
3722 	struct mbuf **mp;
3723 	iflib_txq_t txq;
3724 
3725 	txq = r->cookie;
3726 
3727 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3728 	CALLOUT_LOCK(txq);
3729 	callout_stop(&txq->ift_timer);
3730 	CALLOUT_UNLOCK(txq);
3731 
3732 	avail = IDXDIFF(pidx, cidx, r->size);
3733 	for (i = 0; i < avail; i++) {
3734 		mp = _ring_peek_one(r, cidx, i, avail - i);
3735 		if (__predict_false(*mp == (struct mbuf *)txq))
3736 			continue;
3737 		m_freem(*mp);
3738 	}
3739 	MPASS(ifmp_ring_is_stalled(r) == 0);
3740 	return (avail);
3741 }
3742 
3743 static void
3744 iflib_ifmp_purge(iflib_txq_t txq)
3745 {
3746 	struct ifmp_ring *r;
3747 
3748 	r = txq->ift_br;
3749 	r->drain = iflib_txq_drain_free;
3750 	r->can_drain = iflib_txq_drain_always;
3751 
3752 	ifmp_ring_check_drainage(r, r->size);
3753 
3754 	r->drain = iflib_txq_drain;
3755 	r->can_drain = iflib_txq_can_drain;
3756 }
3757 
3758 static void
3759 _task_fn_tx(void *context)
3760 {
3761 	iflib_txq_t txq = context;
3762 	if_ctx_t ctx = txq->ift_ctx;
3763 	struct ifnet *ifp = ctx->ifc_ifp;
3764 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3765 
3766 #ifdef IFLIB_DIAGNOSTICS
3767 	txq->ift_cpu_exec_count[curcpu]++;
3768 #endif
3769 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3770 		return;
3771 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3772 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3773 			netmap_tx_irq(ifp, txq->ift_id);
3774 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3775 		return;
3776 	}
3777 	if (txq->ift_db_pending)
3778 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3779 	else if (!abdicate)
3780 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3781 	/*
3782 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3783 	 */
3784 	if (abdicate)
3785 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3786 	ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3787 	if (ctx->ifc_flags & IFC_LEGACY)
3788 		IFDI_INTR_ENABLE(ctx);
3789 	else {
3790 #ifdef INVARIANTS
3791 		int rc =
3792 #endif
3793 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3794 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3795 	}
3796 }
3797 
3798 static void
3799 _task_fn_rx(void *context)
3800 {
3801 	iflib_rxq_t rxq = context;
3802 	if_ctx_t ctx = rxq->ifr_ctx;
3803 	bool more;
3804 	uint16_t budget;
3805 
3806 #ifdef IFLIB_DIAGNOSTICS
3807 	rxq->ifr_cpu_exec_count[curcpu]++;
3808 #endif
3809 	DBG_COUNTER_INC(task_fn_rxs);
3810 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3811 		return;
3812 	more = true;
3813 #ifdef DEV_NETMAP
3814 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3815 		u_int work = 0;
3816 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3817 			more = false;
3818 		}
3819 	}
3820 #endif
3821 	budget = ctx->ifc_sysctl_rx_budget;
3822 	if (budget == 0)
3823 		budget = 16;	/* XXX */
3824 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
3825 		if (ctx->ifc_flags & IFC_LEGACY)
3826 			IFDI_INTR_ENABLE(ctx);
3827 		else {
3828 #ifdef INVARIANTS
3829 			int rc =
3830 #endif
3831 				IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
3832 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3833 			DBG_COUNTER_INC(rx_intr_enables);
3834 		}
3835 	}
3836 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3837 		return;
3838 	if (more)
3839 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3840 }
3841 
3842 static void
3843 _task_fn_admin(void *context)
3844 {
3845 	if_ctx_t ctx = context;
3846 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
3847 	iflib_txq_t txq;
3848 	int i;
3849 	bool oactive, running, do_reset, do_watchdog;
3850 	uint32_t reset_on = hz / 2;
3851 
3852 	STATE_LOCK(ctx);
3853 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
3854 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
3855 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
3856 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
3857 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
3858 	STATE_UNLOCK(ctx);
3859 
3860 	if ((!running & !oactive) &&
3861 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
3862 		return;
3863 
3864 	CTX_LOCK(ctx);
3865 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3866 		CALLOUT_LOCK(txq);
3867 		callout_stop(&txq->ift_timer);
3868 		CALLOUT_UNLOCK(txq);
3869 	}
3870 	if (do_watchdog) {
3871 		ctx->ifc_watchdog_events++;
3872 		IFDI_WATCHDOG_RESET(ctx);
3873 	}
3874 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3875 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3876 #ifdef DEV_NETMAP
3877 		reset_on = hz / 2;
3878 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
3879 			iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
3880 #endif
3881 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3882 	}
3883 	IFDI_LINK_INTR_ENABLE(ctx);
3884 	if (do_reset)
3885 		iflib_if_init_locked(ctx);
3886 	CTX_UNLOCK(ctx);
3887 
3888 	if (LINK_ACTIVE(ctx) == 0)
3889 		return;
3890 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3891 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
3892 }
3893 
3894 
3895 static void
3896 _task_fn_iov(void *context)
3897 {
3898 	if_ctx_t ctx = context;
3899 
3900 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3901 		return;
3902 
3903 	CTX_LOCK(ctx);
3904 	IFDI_VFLR_HANDLE(ctx);
3905 	CTX_UNLOCK(ctx);
3906 }
3907 
3908 static int
3909 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3910 {
3911 	int err;
3912 	if_int_delay_info_t info;
3913 	if_ctx_t ctx;
3914 
3915 	info = (if_int_delay_info_t)arg1;
3916 	ctx = info->iidi_ctx;
3917 	info->iidi_req = req;
3918 	info->iidi_oidp = oidp;
3919 	CTX_LOCK(ctx);
3920 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
3921 	CTX_UNLOCK(ctx);
3922 	return (err);
3923 }
3924 
3925 /*********************************************************************
3926  *
3927  *  IFNET FUNCTIONS
3928  *
3929  **********************************************************************/
3930 
3931 static void
3932 iflib_if_init_locked(if_ctx_t ctx)
3933 {
3934 	iflib_stop(ctx);
3935 	iflib_init_locked(ctx);
3936 }
3937 
3938 
3939 static void
3940 iflib_if_init(void *arg)
3941 {
3942 	if_ctx_t ctx = arg;
3943 
3944 	CTX_LOCK(ctx);
3945 	iflib_if_init_locked(ctx);
3946 	CTX_UNLOCK(ctx);
3947 }
3948 
3949 static int
3950 iflib_if_transmit(if_t ifp, struct mbuf *m)
3951 {
3952 	if_ctx_t	ctx = if_getsoftc(ifp);
3953 
3954 	iflib_txq_t txq;
3955 	int err, qidx;
3956 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3957 
3958 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
3959 		DBG_COUNTER_INC(tx_frees);
3960 		m_freem(m);
3961 		return (ENOBUFS);
3962 	}
3963 
3964 	MPASS(m->m_nextpkt == NULL);
3965 	qidx = 0;
3966 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
3967 		qidx = QIDX(ctx, m);
3968 	/*
3969 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
3970 	 */
3971 	txq = &ctx->ifc_txqs[qidx];
3972 
3973 #ifdef DRIVER_BACKPRESSURE
3974 	if (txq->ift_closed) {
3975 		while (m != NULL) {
3976 			next = m->m_nextpkt;
3977 			m->m_nextpkt = NULL;
3978 			m_freem(m);
3979 			m = next;
3980 		}
3981 		return (ENOBUFS);
3982 	}
3983 #endif
3984 #ifdef notyet
3985 	qidx = count = 0;
3986 	mp = marr;
3987 	next = m;
3988 	do {
3989 		count++;
3990 		next = next->m_nextpkt;
3991 	} while (next != NULL);
3992 
3993 	if (count > nitems(marr))
3994 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
3995 			/* XXX check nextpkt */
3996 			m_freem(m);
3997 			/* XXX simplify for now */
3998 			DBG_COUNTER_INC(tx_frees);
3999 			return (ENOBUFS);
4000 		}
4001 	for (next = m, i = 0; next != NULL; i++) {
4002 		mp[i] = next;
4003 		next = next->m_nextpkt;
4004 		mp[i]->m_nextpkt = NULL;
4005 	}
4006 #endif
4007 	DBG_COUNTER_INC(tx_seen);
4008 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4009 
4010 	if (abdicate)
4011 		GROUPTASK_ENQUEUE(&txq->ift_task);
4012  	if (err) {
4013 		if (!abdicate)
4014 			GROUPTASK_ENQUEUE(&txq->ift_task);
4015 		/* support forthcoming later */
4016 #ifdef DRIVER_BACKPRESSURE
4017 		txq->ift_closed = TRUE;
4018 #endif
4019 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4020 		m_freem(m);
4021 	}
4022 
4023 	return (err);
4024 }
4025 
4026 static void
4027 iflib_if_qflush(if_t ifp)
4028 {
4029 	if_ctx_t ctx = if_getsoftc(ifp);
4030 	iflib_txq_t txq = ctx->ifc_txqs;
4031 	int i;
4032 
4033 	STATE_LOCK(ctx);
4034 	ctx->ifc_flags |= IFC_QFLUSH;
4035 	STATE_UNLOCK(ctx);
4036 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4037 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4038 			iflib_txq_check_drain(txq, 0);
4039 	STATE_LOCK(ctx);
4040 	ctx->ifc_flags &= ~IFC_QFLUSH;
4041 	STATE_UNLOCK(ctx);
4042 
4043 	if_qflush(ifp);
4044 }
4045 
4046 
4047 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4048 		     IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4049 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
4050 
4051 static int
4052 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4053 {
4054 	if_ctx_t ctx = if_getsoftc(ifp);
4055 	struct ifreq	*ifr = (struct ifreq *)data;
4056 #if defined(INET) || defined(INET6)
4057 	struct ifaddr	*ifa = (struct ifaddr *)data;
4058 #endif
4059 	bool		avoid_reset = FALSE;
4060 	int		err = 0, reinit = 0, bits;
4061 
4062 	switch (command) {
4063 	case SIOCSIFADDR:
4064 #ifdef INET
4065 		if (ifa->ifa_addr->sa_family == AF_INET)
4066 			avoid_reset = TRUE;
4067 #endif
4068 #ifdef INET6
4069 		if (ifa->ifa_addr->sa_family == AF_INET6)
4070 			avoid_reset = TRUE;
4071 #endif
4072 		/*
4073 		** Calling init results in link renegotiation,
4074 		** so we avoid doing it when possible.
4075 		*/
4076 		if (avoid_reset) {
4077 			if_setflagbits(ifp, IFF_UP,0);
4078 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4079 				reinit = 1;
4080 #ifdef INET
4081 			if (!(if_getflags(ifp) & IFF_NOARP))
4082 				arp_ifinit(ifp, ifa);
4083 #endif
4084 		} else
4085 			err = ether_ioctl(ifp, command, data);
4086 		break;
4087 	case SIOCSIFMTU:
4088 		CTX_LOCK(ctx);
4089 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
4090 			CTX_UNLOCK(ctx);
4091 			break;
4092 		}
4093 		bits = if_getdrvflags(ifp);
4094 		/* stop the driver and free any clusters before proceeding */
4095 		iflib_stop(ctx);
4096 
4097 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4098 			STATE_LOCK(ctx);
4099 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4100 				ctx->ifc_flags |= IFC_MULTISEG;
4101 			else
4102 				ctx->ifc_flags &= ~IFC_MULTISEG;
4103 			STATE_UNLOCK(ctx);
4104 			err = if_setmtu(ifp, ifr->ifr_mtu);
4105 		}
4106 		iflib_init_locked(ctx);
4107 		STATE_LOCK(ctx);
4108 		if_setdrvflags(ifp, bits);
4109 		STATE_UNLOCK(ctx);
4110 		CTX_UNLOCK(ctx);
4111 		break;
4112 	case SIOCSIFFLAGS:
4113 		CTX_LOCK(ctx);
4114 		if (if_getflags(ifp) & IFF_UP) {
4115 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4116 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4117 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4118 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4119 				}
4120 			} else
4121 				reinit = 1;
4122 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4123 			iflib_stop(ctx);
4124 		}
4125 		ctx->ifc_if_flags = if_getflags(ifp);
4126 		CTX_UNLOCK(ctx);
4127 		break;
4128 	case SIOCADDMULTI:
4129 	case SIOCDELMULTI:
4130 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4131 			CTX_LOCK(ctx);
4132 			IFDI_INTR_DISABLE(ctx);
4133 			IFDI_MULTI_SET(ctx);
4134 			IFDI_INTR_ENABLE(ctx);
4135 			CTX_UNLOCK(ctx);
4136 		}
4137 		break;
4138 	case SIOCSIFMEDIA:
4139 		CTX_LOCK(ctx);
4140 		IFDI_MEDIA_SET(ctx);
4141 		CTX_UNLOCK(ctx);
4142 		/* falls thru */
4143 	case SIOCGIFMEDIA:
4144 	case SIOCGIFXMEDIA:
4145 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
4146 		break;
4147 	case SIOCGI2C:
4148 	{
4149 		struct ifi2creq i2c;
4150 
4151 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4152 		if (err != 0)
4153 			break;
4154 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4155 			err = EINVAL;
4156 			break;
4157 		}
4158 		if (i2c.len > sizeof(i2c.data)) {
4159 			err = EINVAL;
4160 			break;
4161 		}
4162 
4163 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4164 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4165 			    sizeof(i2c));
4166 		break;
4167 	}
4168 	case SIOCSIFCAP:
4169 	{
4170 		int mask, setmask;
4171 
4172 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
4173 		setmask = 0;
4174 #ifdef TCP_OFFLOAD
4175 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
4176 #endif
4177 		setmask |= (mask & IFCAP_FLAGS);
4178 
4179 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
4180 			setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4181 		if ((mask & IFCAP_WOL) &&
4182 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
4183 			setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
4184 		if_vlancap(ifp);
4185 		/*
4186 		 * want to ensure that traffic has stopped before we change any of the flags
4187 		 */
4188 		if (setmask) {
4189 			CTX_LOCK(ctx);
4190 			bits = if_getdrvflags(ifp);
4191 			if (bits & IFF_DRV_RUNNING)
4192 				iflib_stop(ctx);
4193 			STATE_LOCK(ctx);
4194 			if_togglecapenable(ifp, setmask);
4195 			STATE_UNLOCK(ctx);
4196 			if (bits & IFF_DRV_RUNNING)
4197 				iflib_init_locked(ctx);
4198 			STATE_LOCK(ctx);
4199 			if_setdrvflags(ifp, bits);
4200 			STATE_UNLOCK(ctx);
4201 			CTX_UNLOCK(ctx);
4202 		}
4203 		break;
4204 	}
4205 	case SIOCGPRIVATE_0:
4206 	case SIOCSDRVSPEC:
4207 	case SIOCGDRVSPEC:
4208 		CTX_LOCK(ctx);
4209 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4210 		CTX_UNLOCK(ctx);
4211 		break;
4212 	default:
4213 		err = ether_ioctl(ifp, command, data);
4214 		break;
4215 	}
4216 	if (reinit)
4217 		iflib_if_init(ctx);
4218 	return (err);
4219 }
4220 
4221 static uint64_t
4222 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4223 {
4224 	if_ctx_t ctx = if_getsoftc(ifp);
4225 
4226 	return (IFDI_GET_COUNTER(ctx, cnt));
4227 }
4228 
4229 /*********************************************************************
4230  *
4231  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4232  *
4233  **********************************************************************/
4234 
4235 static void
4236 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4237 {
4238 	if_ctx_t ctx = if_getsoftc(ifp);
4239 
4240 	if ((void *)ctx != arg)
4241 		return;
4242 
4243 	if ((vtag == 0) || (vtag > 4095))
4244 		return;
4245 
4246 	CTX_LOCK(ctx);
4247 	IFDI_VLAN_REGISTER(ctx, vtag);
4248 	/* Re-init to load the changes */
4249 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4250 		iflib_if_init_locked(ctx);
4251 	CTX_UNLOCK(ctx);
4252 }
4253 
4254 static void
4255 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4256 {
4257 	if_ctx_t ctx = if_getsoftc(ifp);
4258 
4259 	if ((void *)ctx != arg)
4260 		return;
4261 
4262 	if ((vtag == 0) || (vtag > 4095))
4263 		return;
4264 
4265 	CTX_LOCK(ctx);
4266 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4267 	/* Re-init to load the changes */
4268 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4269 		iflib_if_init_locked(ctx);
4270 	CTX_UNLOCK(ctx);
4271 }
4272 
4273 static void
4274 iflib_led_func(void *arg, int onoff)
4275 {
4276 	if_ctx_t ctx = arg;
4277 
4278 	CTX_LOCK(ctx);
4279 	IFDI_LED_FUNC(ctx, onoff);
4280 	CTX_UNLOCK(ctx);
4281 }
4282 
4283 /*********************************************************************
4284  *
4285  *  BUS FUNCTION DEFINITIONS
4286  *
4287  **********************************************************************/
4288 
4289 int
4290 iflib_device_probe(device_t dev)
4291 {
4292 	pci_vendor_info_t *ent;
4293 
4294 	uint16_t	pci_vendor_id, pci_device_id;
4295 	uint16_t	pci_subvendor_id, pci_subdevice_id;
4296 	uint16_t	pci_rev_id;
4297 	if_shared_ctx_t sctx;
4298 
4299 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4300 		return (ENOTSUP);
4301 
4302 	pci_vendor_id = pci_get_vendor(dev);
4303 	pci_device_id = pci_get_device(dev);
4304 	pci_subvendor_id = pci_get_subvendor(dev);
4305 	pci_subdevice_id = pci_get_subdevice(dev);
4306 	pci_rev_id = pci_get_revid(dev);
4307 	if (sctx->isc_parse_devinfo != NULL)
4308 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4309 
4310 	ent = sctx->isc_vendor_info;
4311 	while (ent->pvi_vendor_id != 0) {
4312 		if (pci_vendor_id != ent->pvi_vendor_id) {
4313 			ent++;
4314 			continue;
4315 		}
4316 		if ((pci_device_id == ent->pvi_device_id) &&
4317 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4318 		     (ent->pvi_subvendor_id == 0)) &&
4319 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4320 		     (ent->pvi_subdevice_id == 0)) &&
4321 		    ((pci_rev_id == ent->pvi_rev_id) ||
4322 		     (ent->pvi_rev_id == 0))) {
4323 
4324 			device_set_desc_copy(dev, ent->pvi_name);
4325 			/* this needs to be changed to zero if the bus probing code
4326 			 * ever stops re-probing on best match because the sctx
4327 			 * may have its values over written by register calls
4328 			 * in subsequent probes
4329 			 */
4330 			return (BUS_PROBE_DEFAULT);
4331 		}
4332 		ent++;
4333 	}
4334 	return (ENXIO);
4335 }
4336 
4337 static void
4338 iflib_reset_qvalues(if_ctx_t ctx)
4339 {
4340 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4341 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4342 	device_t dev = ctx->ifc_dev;
4343 	int i;
4344 
4345 	scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
4346 	scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
4347 	/*
4348 	 * XXX sanity check that ntxd & nrxd are a power of 2
4349 	 */
4350 	if (ctx->ifc_sysctl_ntxqs != 0)
4351 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4352 	if (ctx->ifc_sysctl_nrxqs != 0)
4353 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4354 
4355 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4356 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4357 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4358 		else
4359 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4360 	}
4361 
4362 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4363 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4364 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4365 		else
4366 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4367 	}
4368 
4369 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4370 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4371 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4372 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4373 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4374 		}
4375 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4376 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4377 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4378 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4379 		}
4380 	}
4381 
4382 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4383 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4384 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4385 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4386 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4387 		}
4388 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4389 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4390 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4391 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4392 		}
4393 	}
4394 }
4395 
4396 int
4397 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
4398 {
4399 	int err, rid, msix;
4400 	if_ctx_t ctx;
4401 	if_t ifp;
4402 	if_softc_ctx_t scctx;
4403 	int i;
4404 	uint16_t main_txq;
4405 	uint16_t main_rxq;
4406 
4407 
4408 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
4409 
4410 	if (sc == NULL) {
4411 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4412 		device_set_softc(dev, ctx);
4413 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
4414 	}
4415 
4416 	ctx->ifc_sctx = sctx;
4417 	ctx->ifc_dev = dev;
4418 	ctx->ifc_softc = sc;
4419 
4420 	if ((err = iflib_register(ctx)) != 0) {
4421 		if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4422 			free(sc, M_IFLIB);
4423 		free(ctx, M_IFLIB);
4424 		device_printf(dev, "iflib_register failed %d\n", err);
4425 		return (err);
4426 	}
4427 	iflib_add_device_sysctl_pre(ctx);
4428 
4429 	scctx = &ctx->ifc_softc_ctx;
4430 	ifp = ctx->ifc_ifp;
4431 
4432 	iflib_reset_qvalues(ctx);
4433 	CTX_LOCK(ctx);
4434 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4435 		CTX_UNLOCK(ctx);
4436 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4437 		return (err);
4438 	}
4439 	_iflib_pre_assert(scctx);
4440 	ctx->ifc_txrx = *scctx->isc_txrx;
4441 
4442 #ifdef INVARIANTS
4443 	MPASS(scctx->isc_capabilities);
4444 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4445 		MPASS(scctx->isc_tx_csum_flags);
4446 #endif
4447 
4448 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
4449 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
4450 
4451 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4452 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4453 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4454 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4455 
4456 #ifdef ACPI_DMAR
4457 	if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
4458 		ctx->ifc_flags |= IFC_DMAR;
4459 #elif !(defined(__i386__) || defined(__amd64__))
4460 	/* set unconditionally for !x86 */
4461 	ctx->ifc_flags |= IFC_DMAR;
4462 #endif
4463 
4464 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4465 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4466 
4467 	/* XXX change for per-queue sizes */
4468 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
4469 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4470 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4471 		if (!powerof2(scctx->isc_nrxd[i])) {
4472 			/* round down instead? */
4473 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4474 			err = EINVAL;
4475 			goto fail;
4476 		}
4477 	}
4478 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4479 		if (!powerof2(scctx->isc_ntxd[i])) {
4480 			device_printf(dev,
4481 			    "# tx descriptors must be a power of 2");
4482 			err = EINVAL;
4483 			goto fail;
4484 		}
4485 	}
4486 
4487 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4488 	    MAX_SINGLE_PACKET_FRACTION)
4489 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4490 		    MAX_SINGLE_PACKET_FRACTION);
4491 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4492 	    MAX_SINGLE_PACKET_FRACTION)
4493 		scctx->isc_tx_tso_segments_max = max(1,
4494 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4495 
4496 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4497 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4498 		/*
4499 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4500 		 * but some MACs do.
4501 		 */
4502 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4503 		    IP_MAXPACKET));
4504 		/*
4505 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4506 		 * into account.  In the worst case, each of these calls will
4507 		 * add another mbuf and, thus, the requirement for another DMA
4508 		 * segment.  So for best performance, it doesn't make sense to
4509 		 * advertize a maximum of TSO segments that typically will
4510 		 * require defragmentation in iflib_encap().
4511 		 */
4512 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4513 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4514 	}
4515 	if (scctx->isc_rss_table_size == 0)
4516 		scctx->isc_rss_table_size = 64;
4517 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4518 
4519 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4520 	/* XXX format name */
4521 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4522 
4523 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4524 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4525 		device_printf(dev, "Unable to fetch CPU list\n");
4526 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4527 	}
4528 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4529 
4530 	/*
4531 	** Now setup MSI or MSI/X, should
4532 	** return us the number of supported
4533 	** vectors. (Will be 1 for MSI)
4534 	*/
4535 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
4536 		msix = scctx->isc_vectors;
4537 	} else if (scctx->isc_msix_bar != 0)
4538 	       /*
4539 		* The simple fact that isc_msix_bar is not 0 does not mean we
4540 		* we have a good value there that is known to work.
4541 		*/
4542 		msix = iflib_msix_init(ctx);
4543 	else {
4544 		scctx->isc_vectors = 1;
4545 		scctx->isc_ntxqsets = 1;
4546 		scctx->isc_nrxqsets = 1;
4547 		scctx->isc_intr = IFLIB_INTR_LEGACY;
4548 		msix = 0;
4549 	}
4550 	/* Get memory for the station queues */
4551 	if ((err = iflib_queues_alloc(ctx))) {
4552 		device_printf(dev, "Unable to allocate queue memory\n");
4553 		goto fail;
4554 	}
4555 
4556 	if ((err = iflib_qset_structures_setup(ctx)))
4557 		goto fail_queues;
4558 
4559 	/*
4560 	 * Group taskqueues aren't properly set up until SMP is started,
4561 	 * so we disable interrupts until we can handle them post
4562 	 * SI_SUB_SMP.
4563 	 *
4564 	 * XXX: disabling interrupts doesn't actually work, at least for
4565 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4566 	 * we do null handling and depend on this not causing too large an
4567 	 * interrupt storm.
4568 	 */
4569 	IFDI_INTR_DISABLE(ctx);
4570 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
4571 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
4572 		goto fail_intr_free;
4573 	}
4574 	if (msix <= 1) {
4575 		rid = 0;
4576 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
4577 			MPASS(msix == 1);
4578 			rid = 1;
4579 		}
4580 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
4581 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
4582 			goto fail_intr_free;
4583 		}
4584 	}
4585 
4586 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4587 
4588 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4589 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4590 		goto fail_detach;
4591 	}
4592 
4593 	/*
4594 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4595 	 * This must appear after the call to ether_ifattach() because
4596 	 * ether_ifattach() sets if_hdrlen to the default value.
4597 	 */
4598 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4599 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4600 
4601 	if ((err = iflib_netmap_attach(ctx))) {
4602 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
4603 		goto fail_detach;
4604 	}
4605 	*ctxp = ctx;
4606 
4607 	NETDUMP_SET(ctx->ifc_ifp, iflib);
4608 
4609 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4610 	iflib_add_device_sysctl_post(ctx);
4611 	ctx->ifc_flags |= IFC_INIT_DONE;
4612 	CTX_UNLOCK(ctx);
4613 	return (0);
4614 fail_detach:
4615 	ether_ifdetach(ctx->ifc_ifp);
4616 fail_intr_free:
4617 	if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
4618 		pci_release_msi(ctx->ifc_dev);
4619 fail_queues:
4620 	iflib_tx_structures_free(ctx);
4621 	iflib_rx_structures_free(ctx);
4622 fail:
4623 	IFDI_DETACH(ctx);
4624 	CTX_UNLOCK(ctx);
4625 	return (err);
4626 }
4627 
4628 int
4629 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
4630 					  struct iflib_cloneattach_ctx *clctx)
4631 {
4632 	int err;
4633 	if_ctx_t ctx;
4634 	if_t ifp;
4635 	if_softc_ctx_t scctx;
4636 	int i;
4637 	void *sc;
4638 	uint16_t main_txq;
4639 	uint16_t main_rxq;
4640 
4641 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
4642 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4643 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
4644 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
4645 		ctx->ifc_flags |= IFC_PSEUDO;
4646 
4647 	ctx->ifc_sctx = sctx;
4648 	ctx->ifc_softc = sc;
4649 	ctx->ifc_dev = dev;
4650 
4651 	if ((err = iflib_register(ctx)) != 0) {
4652 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
4653 		free(sc, M_IFLIB);
4654 		free(ctx, M_IFLIB);
4655 		return (err);
4656 	}
4657 	iflib_add_device_sysctl_pre(ctx);
4658 
4659 	scctx = &ctx->ifc_softc_ctx;
4660 	ifp = ctx->ifc_ifp;
4661 
4662 	/*
4663 	 * XXX sanity check that ntxd & nrxd are a power of 2
4664 	 */
4665 	iflib_reset_qvalues(ctx);
4666 
4667 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4668 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4669 		return (err);
4670 	}
4671 	if (sctx->isc_flags & IFLIB_GEN_MAC)
4672 		iflib_gen_mac(ctx);
4673 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
4674 								clctx->cc_params)) != 0) {
4675 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
4676 		return (err);
4677 	}
4678 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
4679 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
4680 	ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
4681 
4682 #ifdef INVARIANTS
4683 	MPASS(scctx->isc_capabilities);
4684 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4685 		MPASS(scctx->isc_tx_csum_flags);
4686 #endif
4687 
4688 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4689 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4690 
4691 	ifp->if_flags |= IFF_NOGROUP;
4692 	if (sctx->isc_flags & IFLIB_PSEUDO) {
4693 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4694 
4695 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4696 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4697 			goto fail_detach;
4698 		}
4699 		*ctxp = ctx;
4700 
4701 		/*
4702 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4703 		 * This must appear after the call to ether_ifattach() because
4704 		 * ether_ifattach() sets if_hdrlen to the default value.
4705 		 */
4706 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4707 			if_setifheaderlen(ifp,
4708 			    sizeof(struct ether_vlan_header));
4709 
4710 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4711 		iflib_add_device_sysctl_post(ctx);
4712 		ctx->ifc_flags |= IFC_INIT_DONE;
4713 		return (0);
4714 	}
4715 	_iflib_pre_assert(scctx);
4716 	ctx->ifc_txrx = *scctx->isc_txrx;
4717 
4718 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4719 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4720 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4721 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4722 
4723 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4724 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4725 
4726 	/* XXX change for per-queue sizes */
4727 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
4728 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4729 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4730 		if (!powerof2(scctx->isc_nrxd[i])) {
4731 			/* round down instead? */
4732 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4733 			err = EINVAL;
4734 			goto fail;
4735 		}
4736 	}
4737 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4738 		if (!powerof2(scctx->isc_ntxd[i])) {
4739 			device_printf(dev,
4740 			    "# tx descriptors must be a power of 2");
4741 			err = EINVAL;
4742 			goto fail;
4743 		}
4744 	}
4745 
4746 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4747 	    MAX_SINGLE_PACKET_FRACTION)
4748 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4749 		    MAX_SINGLE_PACKET_FRACTION);
4750 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4751 	    MAX_SINGLE_PACKET_FRACTION)
4752 		scctx->isc_tx_tso_segments_max = max(1,
4753 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4754 
4755 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4756 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4757 		/*
4758 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4759 		 * but some MACs do.
4760 		 */
4761 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4762 		    IP_MAXPACKET));
4763 		/*
4764 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4765 		 * into account.  In the worst case, each of these calls will
4766 		 * add another mbuf and, thus, the requirement for another DMA
4767 		 * segment.  So for best performance, it doesn't make sense to
4768 		 * advertize a maximum of TSO segments that typically will
4769 		 * require defragmentation in iflib_encap().
4770 		 */
4771 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4772 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4773 	}
4774 	if (scctx->isc_rss_table_size == 0)
4775 		scctx->isc_rss_table_size = 64;
4776 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4777 
4778 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4779 	/* XXX format name */
4780 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4781 
4782 	/* XXX --- can support > 1 -- but keep it simple for now */
4783 	scctx->isc_intr = IFLIB_INTR_LEGACY;
4784 
4785 	/* Get memory for the station queues */
4786 	if ((err = iflib_queues_alloc(ctx))) {
4787 		device_printf(dev, "Unable to allocate queue memory\n");
4788 		goto fail;
4789 	}
4790 
4791 	if ((err = iflib_qset_structures_setup(ctx))) {
4792 		device_printf(dev, "qset structure setup failed %d\n", err);
4793 		goto fail_queues;
4794 	}
4795 
4796 	/*
4797 	 * XXX What if anything do we want to do about interrupts?
4798 	 */
4799 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4800 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4801 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4802 		goto fail_detach;
4803 	}
4804 
4805 	/*
4806 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4807 	 * This must appear after the call to ether_ifattach() because
4808 	 * ether_ifattach() sets if_hdrlen to the default value.
4809 	 */
4810 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4811 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4812 
4813 	/* XXX handle more than one queue */
4814 	for (i = 0; i < scctx->isc_nrxqsets; i++)
4815 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
4816 
4817 	*ctxp = ctx;
4818 
4819 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4820 	iflib_add_device_sysctl_post(ctx);
4821 	ctx->ifc_flags |= IFC_INIT_DONE;
4822 	return (0);
4823 fail_detach:
4824 	ether_ifdetach(ctx->ifc_ifp);
4825 fail_queues:
4826 	iflib_tx_structures_free(ctx);
4827 	iflib_rx_structures_free(ctx);
4828 fail:
4829 	IFDI_DETACH(ctx);
4830 	return (err);
4831 }
4832 
4833 int
4834 iflib_pseudo_deregister(if_ctx_t ctx)
4835 {
4836 	if_t ifp = ctx->ifc_ifp;
4837 	iflib_txq_t txq;
4838 	iflib_rxq_t rxq;
4839 	int i, j;
4840 	struct taskqgroup *tqg;
4841 	iflib_fl_t fl;
4842 
4843 	/* Unregister VLAN events */
4844 	if (ctx->ifc_vlan_attach_event != NULL)
4845 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4846 	if (ctx->ifc_vlan_detach_event != NULL)
4847 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4848 
4849 	ether_ifdetach(ifp);
4850 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4851 	CTX_LOCK_DESTROY(ctx);
4852 	/* XXX drain any dependent tasks */
4853 	tqg = qgroup_if_io_tqg;
4854 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4855 		callout_drain(&txq->ift_timer);
4856 		if (txq->ift_task.gt_uniq != NULL)
4857 			taskqgroup_detach(tqg, &txq->ift_task);
4858 	}
4859 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4860 		if (rxq->ifr_task.gt_uniq != NULL)
4861 			taskqgroup_detach(tqg, &rxq->ifr_task);
4862 
4863 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4864 			free(fl->ifl_rx_bitmap, M_IFLIB);
4865 	}
4866 	tqg = qgroup_if_config_tqg;
4867 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4868 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4869 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4870 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4871 
4872 	if_free(ifp);
4873 
4874 	iflib_tx_structures_free(ctx);
4875 	iflib_rx_structures_free(ctx);
4876 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4877 		free(ctx->ifc_softc, M_IFLIB);
4878 	free(ctx, M_IFLIB);
4879 	return (0);
4880 }
4881 
4882 int
4883 iflib_device_attach(device_t dev)
4884 {
4885 	if_ctx_t ctx;
4886 	if_shared_ctx_t sctx;
4887 
4888 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4889 		return (ENOTSUP);
4890 
4891 	pci_enable_busmaster(dev);
4892 
4893 	return (iflib_device_register(dev, NULL, sctx, &ctx));
4894 }
4895 
4896 int
4897 iflib_device_deregister(if_ctx_t ctx)
4898 {
4899 	if_t ifp = ctx->ifc_ifp;
4900 	iflib_txq_t txq;
4901 	iflib_rxq_t rxq;
4902 	device_t dev = ctx->ifc_dev;
4903 	int i, j;
4904 	struct taskqgroup *tqg;
4905 	iflib_fl_t fl;
4906 
4907 	/* Make sure VLANS are not using driver */
4908 	if (if_vlantrunkinuse(ifp)) {
4909 		device_printf(dev,"Vlan in use, detach first\n");
4910 		return (EBUSY);
4911 	}
4912 
4913 	CTX_LOCK(ctx);
4914 	ctx->ifc_in_detach = 1;
4915 	iflib_stop(ctx);
4916 	CTX_UNLOCK(ctx);
4917 
4918 	/* Unregister VLAN events */
4919 	if (ctx->ifc_vlan_attach_event != NULL)
4920 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4921 	if (ctx->ifc_vlan_detach_event != NULL)
4922 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4923 
4924 	iflib_netmap_detach(ifp);
4925 	ether_ifdetach(ifp);
4926 	if (ctx->ifc_led_dev != NULL)
4927 		led_destroy(ctx->ifc_led_dev);
4928 	/* XXX drain any dependent tasks */
4929 	tqg = qgroup_if_io_tqg;
4930 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4931 		callout_drain(&txq->ift_timer);
4932 		if (txq->ift_task.gt_uniq != NULL)
4933 			taskqgroup_detach(tqg, &txq->ift_task);
4934 	}
4935 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4936 		if (rxq->ifr_task.gt_uniq != NULL)
4937 			taskqgroup_detach(tqg, &rxq->ifr_task);
4938 
4939 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4940 			free(fl->ifl_rx_bitmap, M_IFLIB);
4941 
4942 	}
4943 	tqg = qgroup_if_config_tqg;
4944 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4945 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4946 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4947 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4948 	CTX_LOCK(ctx);
4949 	IFDI_DETACH(ctx);
4950 	CTX_UNLOCK(ctx);
4951 
4952 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4953 	CTX_LOCK_DESTROY(ctx);
4954 	device_set_softc(ctx->ifc_dev, NULL);
4955 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
4956 		pci_release_msi(dev);
4957 	}
4958 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
4959 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
4960 	}
4961 	if (ctx->ifc_msix_mem != NULL) {
4962 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
4963 			ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
4964 		ctx->ifc_msix_mem = NULL;
4965 	}
4966 
4967 	bus_generic_detach(dev);
4968 	if_free(ifp);
4969 
4970 	iflib_tx_structures_free(ctx);
4971 	iflib_rx_structures_free(ctx);
4972 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4973 		free(ctx->ifc_softc, M_IFLIB);
4974 	free(ctx, M_IFLIB);
4975 	return (0);
4976 }
4977 
4978 
4979 int
4980 iflib_device_detach(device_t dev)
4981 {
4982 	if_ctx_t ctx = device_get_softc(dev);
4983 
4984 	return (iflib_device_deregister(ctx));
4985 }
4986 
4987 int
4988 iflib_device_suspend(device_t dev)
4989 {
4990 	if_ctx_t ctx = device_get_softc(dev);
4991 
4992 	CTX_LOCK(ctx);
4993 	IFDI_SUSPEND(ctx);
4994 	CTX_UNLOCK(ctx);
4995 
4996 	return bus_generic_suspend(dev);
4997 }
4998 int
4999 iflib_device_shutdown(device_t dev)
5000 {
5001 	if_ctx_t ctx = device_get_softc(dev);
5002 
5003 	CTX_LOCK(ctx);
5004 	IFDI_SHUTDOWN(ctx);
5005 	CTX_UNLOCK(ctx);
5006 
5007 	return bus_generic_suspend(dev);
5008 }
5009 
5010 
5011 int
5012 iflib_device_resume(device_t dev)
5013 {
5014 	if_ctx_t ctx = device_get_softc(dev);
5015 	iflib_txq_t txq = ctx->ifc_txqs;
5016 
5017 	CTX_LOCK(ctx);
5018 	IFDI_RESUME(ctx);
5019 	iflib_init_locked(ctx);
5020 	CTX_UNLOCK(ctx);
5021 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5022 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5023 
5024 	return (bus_generic_resume(dev));
5025 }
5026 
5027 int
5028 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5029 {
5030 	int error;
5031 	if_ctx_t ctx = device_get_softc(dev);
5032 
5033 	CTX_LOCK(ctx);
5034 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
5035 	CTX_UNLOCK(ctx);
5036 
5037 	return (error);
5038 }
5039 
5040 void
5041 iflib_device_iov_uninit(device_t dev)
5042 {
5043 	if_ctx_t ctx = device_get_softc(dev);
5044 
5045 	CTX_LOCK(ctx);
5046 	IFDI_IOV_UNINIT(ctx);
5047 	CTX_UNLOCK(ctx);
5048 }
5049 
5050 int
5051 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5052 {
5053 	int error;
5054 	if_ctx_t ctx = device_get_softc(dev);
5055 
5056 	CTX_LOCK(ctx);
5057 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5058 	CTX_UNLOCK(ctx);
5059 
5060 	return (error);
5061 }
5062 
5063 /*********************************************************************
5064  *
5065  *  MODULE FUNCTION DEFINITIONS
5066  *
5067  **********************************************************************/
5068 
5069 /*
5070  * - Start a fast taskqueue thread for each core
5071  * - Start a taskqueue for control operations
5072  */
5073 static int
5074 iflib_module_init(void)
5075 {
5076 	return (0);
5077 }
5078 
5079 static int
5080 iflib_module_event_handler(module_t mod, int what, void *arg)
5081 {
5082 	int err;
5083 
5084 	switch (what) {
5085 	case MOD_LOAD:
5086 		if ((err = iflib_module_init()) != 0)
5087 			return (err);
5088 		break;
5089 	case MOD_UNLOAD:
5090 		return (EBUSY);
5091 	default:
5092 		return (EOPNOTSUPP);
5093 	}
5094 
5095 	return (0);
5096 }
5097 
5098 /*********************************************************************
5099  *
5100  *  PUBLIC FUNCTION DEFINITIONS
5101  *     ordered as in iflib.h
5102  *
5103  **********************************************************************/
5104 
5105 
5106 static void
5107 _iflib_assert(if_shared_ctx_t sctx)
5108 {
5109 	MPASS(sctx->isc_tx_maxsize);
5110 	MPASS(sctx->isc_tx_maxsegsize);
5111 
5112 	MPASS(sctx->isc_rx_maxsize);
5113 	MPASS(sctx->isc_rx_nsegments);
5114 	MPASS(sctx->isc_rx_maxsegsize);
5115 
5116 	MPASS(sctx->isc_nrxd_min[0]);
5117 	MPASS(sctx->isc_nrxd_max[0]);
5118 	MPASS(sctx->isc_nrxd_default[0]);
5119 	MPASS(sctx->isc_ntxd_min[0]);
5120 	MPASS(sctx->isc_ntxd_max[0]);
5121 	MPASS(sctx->isc_ntxd_default[0]);
5122 }
5123 
5124 static void
5125 _iflib_pre_assert(if_softc_ctx_t scctx)
5126 {
5127 
5128 	MPASS(scctx->isc_txrx->ift_txd_encap);
5129 	MPASS(scctx->isc_txrx->ift_txd_flush);
5130 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
5131 	MPASS(scctx->isc_txrx->ift_rxd_available);
5132 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5133 	MPASS(scctx->isc_txrx->ift_rxd_refill);
5134 	MPASS(scctx->isc_txrx->ift_rxd_flush);
5135 }
5136 
5137 static int
5138 iflib_register(if_ctx_t ctx)
5139 {
5140 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5141 	driver_t *driver = sctx->isc_driver;
5142 	device_t dev = ctx->ifc_dev;
5143 	if_t ifp;
5144 
5145 	_iflib_assert(sctx);
5146 
5147 	CTX_LOCK_INIT(ctx);
5148 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5149 	ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
5150 	if (ifp == NULL) {
5151 		device_printf(dev, "can not allocate ifnet structure\n");
5152 		return (ENOMEM);
5153 	}
5154 
5155 	/*
5156 	 * Initialize our context's device specific methods
5157 	 */
5158 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5159 	kobj_class_compile((kobj_class_t) driver);
5160 	driver->refs++;
5161 
5162 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5163 	if_setsoftc(ifp, ctx);
5164 	if_setdev(ifp, dev);
5165 	if_setinitfn(ifp, iflib_if_init);
5166 	if_setioctlfn(ifp, iflib_if_ioctl);
5167 	if_settransmitfn(ifp, iflib_if_transmit);
5168 	if_setqflushfn(ifp, iflib_if_qflush);
5169 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
5170 
5171 	ctx->ifc_vlan_attach_event =
5172 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5173 							  EVENTHANDLER_PRI_FIRST);
5174 	ctx->ifc_vlan_detach_event =
5175 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5176 							  EVENTHANDLER_PRI_FIRST);
5177 
5178 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
5179 					 iflib_media_change, iflib_media_status);
5180 
5181 	return (0);
5182 }
5183 
5184 
5185 static int
5186 iflib_queues_alloc(if_ctx_t ctx)
5187 {
5188 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5189 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5190 	device_t dev = ctx->ifc_dev;
5191 	int nrxqsets = scctx->isc_nrxqsets;
5192 	int ntxqsets = scctx->isc_ntxqsets;
5193 	iflib_txq_t txq;
5194 	iflib_rxq_t rxq;
5195 	iflib_fl_t fl = NULL;
5196 	int i, j, cpu, err, txconf, rxconf;
5197 	iflib_dma_info_t ifdip;
5198 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
5199 	uint32_t *txqsizes = scctx->isc_txqsizes;
5200 	uint8_t nrxqs = sctx->isc_nrxqs;
5201 	uint8_t ntxqs = sctx->isc_ntxqs;
5202 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5203 	caddr_t *vaddrs;
5204 	uint64_t *paddrs;
5205 
5206 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5207 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5208 
5209 	/* Allocate the TX ring struct memory */
5210 	if (!(ctx->ifc_txqs =
5211 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5212 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5213 		device_printf(dev, "Unable to allocate TX ring memory\n");
5214 		err = ENOMEM;
5215 		goto fail;
5216 	}
5217 
5218 	/* Now allocate the RX */
5219 	if (!(ctx->ifc_rxqs =
5220 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5221 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5222 		device_printf(dev, "Unable to allocate RX ring memory\n");
5223 		err = ENOMEM;
5224 		goto rx_fail;
5225 	}
5226 
5227 	txq = ctx->ifc_txqs;
5228 	rxq = ctx->ifc_rxqs;
5229 
5230 	/*
5231 	 * XXX handle allocation failure
5232 	 */
5233 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5234 		/* Set up some basics */
5235 
5236 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
5237 			device_printf(dev, "failed to allocate iflib_dma_info\n");
5238 			err = ENOMEM;
5239 			goto err_tx_desc;
5240 		}
5241 		txq->ift_ifdi = ifdip;
5242 		for (j = 0; j < ntxqs; j++, ifdip++) {
5243 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
5244 				device_printf(dev, "Unable to allocate Descriptor memory\n");
5245 				err = ENOMEM;
5246 				goto err_tx_desc;
5247 			}
5248 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5249 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5250 		}
5251 		txq->ift_ctx = ctx;
5252 		txq->ift_id = i;
5253 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5254 			txq->ift_br_offset = 1;
5255 		} else {
5256 			txq->ift_br_offset = 0;
5257 		}
5258 		/* XXX fix this */
5259 		txq->ift_timer.c_cpu = cpu;
5260 
5261 		if (iflib_txsd_alloc(txq)) {
5262 			device_printf(dev, "Critical Failure setting up TX buffers\n");
5263 			err = ENOMEM;
5264 			goto err_tx_desc;
5265 		}
5266 
5267 		/* Initialize the TX lock */
5268 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
5269 		    device_get_nameunit(dev), txq->ift_id);
5270 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5271 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5272 
5273 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
5274 			 device_get_nameunit(dev), txq->ift_id);
5275 
5276 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5277 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
5278 		if (err) {
5279 			/* XXX free any allocated rings */
5280 			device_printf(dev, "Unable to allocate buf_ring\n");
5281 			goto err_tx_desc;
5282 		}
5283 	}
5284 
5285 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
5286 		/* Set up some basics */
5287 
5288 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
5289 			device_printf(dev, "failed to allocate iflib_dma_info\n");
5290 			err = ENOMEM;
5291 			goto err_tx_desc;
5292 		}
5293 
5294 		rxq->ifr_ifdi = ifdip;
5295 		/* XXX this needs to be changed if #rx queues != #tx queues */
5296 		rxq->ifr_ntxqirq = 1;
5297 		rxq->ifr_txqid[0] = i;
5298 		for (j = 0; j < nrxqs; j++, ifdip++) {
5299 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
5300 				device_printf(dev, "Unable to allocate Descriptor memory\n");
5301 				err = ENOMEM;
5302 				goto err_tx_desc;
5303 			}
5304 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5305 		}
5306 		rxq->ifr_ctx = ctx;
5307 		rxq->ifr_id = i;
5308 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
5309 			rxq->ifr_fl_offset = 1;
5310 		} else {
5311 			rxq->ifr_fl_offset = 0;
5312 		}
5313 		rxq->ifr_nfl = nfree_lists;
5314 		if (!(fl =
5315 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
5316 			device_printf(dev, "Unable to allocate free list memory\n");
5317 			err = ENOMEM;
5318 			goto err_tx_desc;
5319 		}
5320 		rxq->ifr_fl = fl;
5321 		for (j = 0; j < nfree_lists; j++) {
5322 			fl[j].ifl_rxq = rxq;
5323 			fl[j].ifl_id = j;
5324 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5325 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
5326 		}
5327         /* Allocate receive buffers for the ring*/
5328 		if (iflib_rxsd_alloc(rxq)) {
5329 			device_printf(dev,
5330 			    "Critical Failure setting up receive buffers\n");
5331 			err = ENOMEM;
5332 			goto err_rx_desc;
5333 		}
5334 
5335 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5336 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
5337 	}
5338 
5339 	/* TXQs */
5340 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5341 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5342 	for (i = 0; i < ntxqsets; i++) {
5343 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
5344 
5345 		for (j = 0; j < ntxqs; j++, di++) {
5346 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
5347 			paddrs[i*ntxqs + j] = di->idi_paddr;
5348 		}
5349 	}
5350 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5351 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
5352 		iflib_tx_structures_free(ctx);
5353 		free(vaddrs, M_IFLIB);
5354 		free(paddrs, M_IFLIB);
5355 		goto err_rx_desc;
5356 	}
5357 	free(vaddrs, M_IFLIB);
5358 	free(paddrs, M_IFLIB);
5359 
5360 	/* RXQs */
5361 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5362 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5363 	for (i = 0; i < nrxqsets; i++) {
5364 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
5365 
5366 		for (j = 0; j < nrxqs; j++, di++) {
5367 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
5368 			paddrs[i*nrxqs + j] = di->idi_paddr;
5369 		}
5370 	}
5371 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5372 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
5373 		iflib_tx_structures_free(ctx);
5374 		free(vaddrs, M_IFLIB);
5375 		free(paddrs, M_IFLIB);
5376 		goto err_rx_desc;
5377 	}
5378 	free(vaddrs, M_IFLIB);
5379 	free(paddrs, M_IFLIB);
5380 
5381 	return (0);
5382 
5383 /* XXX handle allocation failure changes */
5384 err_rx_desc:
5385 err_tx_desc:
5386 rx_fail:
5387 	if (ctx->ifc_rxqs != NULL)
5388 		free(ctx->ifc_rxqs, M_IFLIB);
5389 	ctx->ifc_rxqs = NULL;
5390 	if (ctx->ifc_txqs != NULL)
5391 		free(ctx->ifc_txqs, M_IFLIB);
5392 	ctx->ifc_txqs = NULL;
5393 fail:
5394 	return (err);
5395 }
5396 
5397 static int
5398 iflib_tx_structures_setup(if_ctx_t ctx)
5399 {
5400 	iflib_txq_t txq = ctx->ifc_txqs;
5401 	int i;
5402 
5403 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
5404 		iflib_txq_setup(txq);
5405 
5406 	return (0);
5407 }
5408 
5409 static void
5410 iflib_tx_structures_free(if_ctx_t ctx)
5411 {
5412 	iflib_txq_t txq = ctx->ifc_txqs;
5413 	int i, j;
5414 
5415 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
5416 		iflib_txq_destroy(txq);
5417 		for (j = 0; j < ctx->ifc_nhwtxqs; j++)
5418 			iflib_dma_free(&txq->ift_ifdi[j]);
5419 	}
5420 	free(ctx->ifc_txqs, M_IFLIB);
5421 	ctx->ifc_txqs = NULL;
5422 	IFDI_QUEUES_FREE(ctx);
5423 }
5424 
5425 /*********************************************************************
5426  *
5427  *  Initialize all receive rings.
5428  *
5429  **********************************************************************/
5430 static int
5431 iflib_rx_structures_setup(if_ctx_t ctx)
5432 {
5433 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5434 	int q;
5435 #if defined(INET6) || defined(INET)
5436 	int i, err;
5437 #endif
5438 
5439 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5440 #if defined(INET6) || defined(INET)
5441 		tcp_lro_free(&rxq->ifr_lc);
5442 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
5443 		    TCP_LRO_ENTRIES, min(1024,
5444 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
5445 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
5446 			goto fail;
5447 		}
5448 		rxq->ifr_lro_enabled = TRUE;
5449 #endif
5450 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
5451 	}
5452 	return (0);
5453 #if defined(INET6) || defined(INET)
5454 fail:
5455 	/*
5456 	 * Free RX software descriptors allocated so far, we will only handle
5457 	 * the rings that completed, the failing case will have
5458 	 * cleaned up for itself. 'q' failed, so its the terminus.
5459 	 */
5460 	rxq = ctx->ifc_rxqs;
5461 	for (i = 0; i < q; ++i, rxq++) {
5462 		iflib_rx_sds_free(rxq);
5463 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
5464 	}
5465 	return (err);
5466 #endif
5467 }
5468 
5469 /*********************************************************************
5470  *
5471  *  Free all receive rings.
5472  *
5473  **********************************************************************/
5474 static void
5475 iflib_rx_structures_free(if_ctx_t ctx)
5476 {
5477 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5478 
5479 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5480 		iflib_rx_sds_free(rxq);
5481 	}
5482 }
5483 
5484 static int
5485 iflib_qset_structures_setup(if_ctx_t ctx)
5486 {
5487 	int err;
5488 
5489 	/*
5490 	 * It is expected that the caller takes care of freeing queues if this
5491 	 * fails.
5492 	 */
5493 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5494 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
5495 		return (err);
5496 	}
5497 
5498 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
5499 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
5500 
5501 	return (err);
5502 }
5503 
5504 int
5505 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
5506 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
5507 {
5508 
5509 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
5510 }
5511 
5512 #ifdef SMP
5513 static int
5514 find_nth(if_ctx_t ctx, int qid)
5515 {
5516 	cpuset_t cpus;
5517 	int i, cpuid, eqid, count;
5518 
5519 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5520 	count = CPU_COUNT(&cpus);
5521 	eqid = qid % count;
5522 	/* clear up to the qid'th bit */
5523 	for (i = 0; i < eqid; i++) {
5524 		cpuid = CPU_FFS(&cpus);
5525 		MPASS(cpuid != 0);
5526 		CPU_CLR(cpuid-1, &cpus);
5527 	}
5528 	cpuid = CPU_FFS(&cpus);
5529 	MPASS(cpuid != 0);
5530 	return (cpuid-1);
5531 }
5532 
5533 #ifdef SCHED_ULE
5534 extern struct cpu_group *cpu_top;              /* CPU topology */
5535 
5536 static int
5537 find_child_with_core(int cpu, struct cpu_group *grp)
5538 {
5539 	int i;
5540 
5541 	if (grp->cg_children == 0)
5542 		return -1;
5543 
5544 	MPASS(grp->cg_child);
5545 	for (i = 0; i < grp->cg_children; i++) {
5546 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5547 			return i;
5548 	}
5549 
5550 	return -1;
5551 }
5552 
5553 /*
5554  * Find the nth "close" core to the specified core
5555  * "close" is defined as the deepest level that shares
5556  * at least an L2 cache.  With threads, this will be
5557  * threads on the same core.  If the sahred cache is L3
5558  * or higher, simply returns the same core.
5559  */
5560 static int
5561 find_close_core(int cpu, int core_offset)
5562 {
5563 	struct cpu_group *grp;
5564 	int i;
5565 	int fcpu;
5566 	cpuset_t cs;
5567 
5568 	grp = cpu_top;
5569 	if (grp == NULL)
5570 		return cpu;
5571 	i = 0;
5572 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5573 		/* If the child only has one cpu, don't descend */
5574 		if (grp->cg_child[i].cg_count <= 1)
5575 			break;
5576 		grp = &grp->cg_child[i];
5577 	}
5578 
5579 	/* If they don't share at least an L2 cache, use the same CPU */
5580 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5581 		return cpu;
5582 
5583 	/* Now pick one */
5584 	CPU_COPY(&grp->cg_mask, &cs);
5585 
5586 	/* Add the selected CPU offset to core offset. */
5587 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
5588 		if (fcpu - 1 == cpu)
5589 			break;
5590 		CPU_CLR(fcpu - 1, &cs);
5591 	}
5592 	MPASS(fcpu);
5593 
5594 	core_offset += i;
5595 
5596 	CPU_COPY(&grp->cg_mask, &cs);
5597 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5598 		MPASS(CPU_FFS(&cs));
5599 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5600 	}
5601 	MPASS(CPU_FFS(&cs));
5602 	return CPU_FFS(&cs) - 1;
5603 }
5604 #else
5605 static int
5606 find_close_core(int cpu, int core_offset __unused)
5607 {
5608 	return cpu;
5609 }
5610 #endif
5611 
5612 static int
5613 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5614 {
5615 	switch (type) {
5616 	case IFLIB_INTR_TX:
5617 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
5618 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5619 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5620 	case IFLIB_INTR_RX:
5621 	case IFLIB_INTR_RXTX:
5622 		/* RX queues get the specified core */
5623 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5624 	default:
5625 		return -1;
5626 	}
5627 }
5628 #else
5629 #define get_core_offset(ctx, type, qid)	CPU_FIRST()
5630 #define find_close_core(cpuid, tid)	CPU_FIRST()
5631 #define find_nth(ctx, gid)		CPU_FIRST()
5632 #endif
5633 
5634 /* Just to avoid copy/paste */
5635 static inline int
5636 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
5637     struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name)
5638 {
5639 	int cpuid;
5640 	int err, tid;
5641 
5642 	cpuid = find_nth(ctx, qid);
5643 	tid = get_core_offset(ctx, type, qid);
5644 	MPASS(tid >= 0);
5645 	cpuid = find_close_core(cpuid, tid);
5646 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
5647 	if (err) {
5648 		device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
5649 		return (err);
5650 	}
5651 #ifdef notyet
5652 	if (cpuid > ctx->ifc_cpuid_highest)
5653 		ctx->ifc_cpuid_highest = cpuid;
5654 #endif
5655 	return 0;
5656 }
5657 
5658 int
5659 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
5660 			iflib_intr_type_t type, driver_filter_t *filter,
5661 			void *filter_arg, int qid, const char *name)
5662 {
5663 	struct grouptask *gtask;
5664 	struct taskqgroup *tqg;
5665 	iflib_filter_info_t info;
5666 	gtask_fn_t *fn;
5667 	int tqrid, err;
5668 	driver_filter_t *intr_fast;
5669 	void *q;
5670 
5671 	info = &ctx->ifc_filter_info;
5672 	tqrid = rid;
5673 
5674 	switch (type) {
5675 	/* XXX merge tx/rx for netmap? */
5676 	case IFLIB_INTR_TX:
5677 		q = &ctx->ifc_txqs[qid];
5678 		info = &ctx->ifc_txqs[qid].ift_filter_info;
5679 		gtask = &ctx->ifc_txqs[qid].ift_task;
5680 		tqg = qgroup_if_io_tqg;
5681 		fn = _task_fn_tx;
5682 		intr_fast = iflib_fast_intr;
5683 		GROUPTASK_INIT(gtask, 0, fn, q);
5684 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
5685 		break;
5686 	case IFLIB_INTR_RX:
5687 		q = &ctx->ifc_rxqs[qid];
5688 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5689 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5690 		tqg = qgroup_if_io_tqg;
5691 		fn = _task_fn_rx;
5692 		intr_fast = iflib_fast_intr;
5693 		GROUPTASK_INIT(gtask, 0, fn, q);
5694 		break;
5695 	case IFLIB_INTR_RXTX:
5696 		q = &ctx->ifc_rxqs[qid];
5697 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5698 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5699 		tqg = qgroup_if_io_tqg;
5700 		fn = _task_fn_rx;
5701 		intr_fast = iflib_fast_intr_rxtx;
5702 		GROUPTASK_INIT(gtask, 0, fn, q);
5703 		break;
5704 	case IFLIB_INTR_ADMIN:
5705 		q = ctx;
5706 		tqrid = -1;
5707 		info = &ctx->ifc_filter_info;
5708 		gtask = &ctx->ifc_admin_task;
5709 		tqg = qgroup_if_config_tqg;
5710 		fn = _task_fn_admin;
5711 		intr_fast = iflib_fast_intr_ctx;
5712 		break;
5713 	default:
5714 		panic("unknown net intr type");
5715 	}
5716 
5717 	info->ifi_filter = filter;
5718 	info->ifi_filter_arg = filter_arg;
5719 	info->ifi_task = gtask;
5720 	info->ifi_ctx = q;
5721 
5722 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5723 	if (err != 0) {
5724 		device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
5725 		return (err);
5726 	}
5727 	if (type == IFLIB_INTR_ADMIN)
5728 		return (0);
5729 
5730 	if (tqrid != -1) {
5731 		err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
5732 		if (err)
5733 			return (err);
5734 	} else {
5735 		taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5736 	}
5737 
5738 	return (0);
5739 }
5740 
5741 void
5742 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,  void *arg, int qid, const char *name)
5743 {
5744 	struct grouptask *gtask;
5745 	struct taskqgroup *tqg;
5746 	gtask_fn_t *fn;
5747 	void *q;
5748 	int irq_num = -1;
5749 	int err;
5750 
5751 	switch (type) {
5752 	case IFLIB_INTR_TX:
5753 		q = &ctx->ifc_txqs[qid];
5754 		gtask = &ctx->ifc_txqs[qid].ift_task;
5755 		tqg = qgroup_if_io_tqg;
5756 		fn = _task_fn_tx;
5757 		if (irq != NULL)
5758 			irq_num = rman_get_start(irq->ii_res);
5759 		break;
5760 	case IFLIB_INTR_RX:
5761 		q = &ctx->ifc_rxqs[qid];
5762 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5763 		tqg = qgroup_if_io_tqg;
5764 		fn = _task_fn_rx;
5765 		if (irq != NULL)
5766 			irq_num = rman_get_start(irq->ii_res);
5767 		break;
5768 	case IFLIB_INTR_IOV:
5769 		q = ctx;
5770 		gtask = &ctx->ifc_vflr_task;
5771 		tqg = qgroup_if_config_tqg;
5772 		fn = _task_fn_iov;
5773 		break;
5774 	default:
5775 		panic("unknown net intr type");
5776 	}
5777 	GROUPTASK_INIT(gtask, 0, fn, q);
5778 	if (irq_num != -1) {
5779 		err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
5780 		if (err)
5781 			taskqgroup_attach(tqg, gtask, q, irq_num, name);
5782 	}
5783 	else {
5784 		taskqgroup_attach(tqg, gtask, q, irq_num, name);
5785 	}
5786 }
5787 
5788 void
5789 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
5790 {
5791 	if (irq->ii_tag)
5792 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
5793 
5794 	if (irq->ii_res)
5795 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
5796 }
5797 
5798 static int
5799 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
5800 {
5801 	iflib_txq_t txq = ctx->ifc_txqs;
5802 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5803 	if_irq_t irq = &ctx->ifc_legacy_irq;
5804 	iflib_filter_info_t info;
5805 	struct grouptask *gtask;
5806 	struct taskqgroup *tqg;
5807 	gtask_fn_t *fn;
5808 	int tqrid;
5809 	void *q;
5810 	int err;
5811 
5812 	q = &ctx->ifc_rxqs[0];
5813 	info = &rxq[0].ifr_filter_info;
5814 	gtask = &rxq[0].ifr_task;
5815 	tqg = qgroup_if_io_tqg;
5816 	tqrid = irq->ii_rid = *rid;
5817 	fn = _task_fn_rx;
5818 
5819 	ctx->ifc_flags |= IFC_LEGACY;
5820 	info->ifi_filter = filter;
5821 	info->ifi_filter_arg = filter_arg;
5822 	info->ifi_task = gtask;
5823 	info->ifi_ctx = ctx;
5824 
5825 	/* We allocate a single interrupt resource */
5826 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
5827 		return (err);
5828 	GROUPTASK_INIT(gtask, 0, fn, q);
5829 	taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5830 
5831 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
5832 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
5833 	return (0);
5834 }
5835 
5836 void
5837 iflib_led_create(if_ctx_t ctx)
5838 {
5839 
5840 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
5841 	    device_get_nameunit(ctx->ifc_dev));
5842 }
5843 
5844 void
5845 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
5846 {
5847 
5848 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
5849 }
5850 
5851 void
5852 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
5853 {
5854 
5855 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
5856 }
5857 
5858 void
5859 iflib_admin_intr_deferred(if_ctx_t ctx)
5860 {
5861 #ifdef INVARIANTS
5862 	struct grouptask *gtask;
5863 
5864 	gtask = &ctx->ifc_admin_task;
5865 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
5866 #endif
5867 
5868 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
5869 }
5870 
5871 void
5872 iflib_iov_intr_deferred(if_ctx_t ctx)
5873 {
5874 
5875 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
5876 }
5877 
5878 void
5879 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
5880 {
5881 
5882 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
5883 }
5884 
5885 void
5886 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
5887 	const char *name)
5888 {
5889 
5890 	GROUPTASK_INIT(gtask, 0, fn, ctx);
5891 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
5892 }
5893 
5894 void
5895 iflib_config_gtask_deinit(struct grouptask *gtask)
5896 {
5897 
5898 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
5899 }
5900 
5901 void
5902 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
5903 {
5904 	if_t ifp = ctx->ifc_ifp;
5905 	iflib_txq_t txq = ctx->ifc_txqs;
5906 
5907 	if_setbaudrate(ifp, baudrate);
5908 	if (baudrate >= IF_Gbps(10)) {
5909 		STATE_LOCK(ctx);
5910 		ctx->ifc_flags |= IFC_PREFETCH;
5911 		STATE_UNLOCK(ctx);
5912 	}
5913 	/* If link down, disable watchdog */
5914 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
5915 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
5916 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
5917 	}
5918 	ctx->ifc_link_state = link_state;
5919 	if_link_state_change(ifp, link_state);
5920 }
5921 
5922 static int
5923 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
5924 {
5925 	int credits;
5926 #ifdef INVARIANTS
5927 	int credits_pre = txq->ift_cidx_processed;
5928 #endif
5929 
5930 	if (ctx->isc_txd_credits_update == NULL)
5931 		return (0);
5932 
5933 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
5934 		return (0);
5935 
5936 	txq->ift_processed += credits;
5937 	txq->ift_cidx_processed += credits;
5938 
5939 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
5940 	if (txq->ift_cidx_processed >= txq->ift_size)
5941 		txq->ift_cidx_processed -= txq->ift_size;
5942 	return (credits);
5943 }
5944 
5945 static int
5946 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
5947 {
5948 
5949 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
5950 	    budget));
5951 }
5952 
5953 void
5954 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
5955 	const char *description, if_int_delay_info_t info,
5956 	int offset, int value)
5957 {
5958 	info->iidi_ctx = ctx;
5959 	info->iidi_offset = offset;
5960 	info->iidi_value = value;
5961 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
5962 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
5963 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5964 	    info, 0, iflib_sysctl_int_delay, "I", description);
5965 }
5966 
5967 struct sx *
5968 iflib_ctx_lock_get(if_ctx_t ctx)
5969 {
5970 
5971 	return (&ctx->ifc_ctx_sx);
5972 }
5973 
5974 static int
5975 iflib_msix_init(if_ctx_t ctx)
5976 {
5977 	device_t dev = ctx->ifc_dev;
5978 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5979 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5980 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
5981 	int iflib_num_tx_queues, iflib_num_rx_queues;
5982 	int err, admincnt, bar;
5983 
5984 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
5985 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
5986 
5987 	device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
5988 
5989 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
5990 	admincnt = sctx->isc_admin_intrcnt;
5991 	/* Override by tuneable */
5992 	if (scctx->isc_disable_msix)
5993 		goto msi;
5994 
5995 	/*
5996 	 * bar == -1 => "trust me I know what I'm doing"
5997 	 * Some drivers are for hardware that is so shoddily
5998 	 * documented that no one knows which bars are which
5999 	 * so the developer has to map all bars. This hack
6000 	 * allows shoddy garbage to use msix in this framework.
6001 	 */
6002 	if (bar != -1) {
6003 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6004 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
6005 		if (ctx->ifc_msix_mem == NULL) {
6006 			/* May not be enabled */
6007 			device_printf(dev, "Unable to map MSIX table \n");
6008 			goto msi;
6009 		}
6010 	}
6011 	/* First try MSI/X */
6012 	if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
6013 		device_printf(dev, "System has MSIX disabled \n");
6014 		bus_release_resource(dev, SYS_RES_MEMORY,
6015 		    bar, ctx->ifc_msix_mem);
6016 		ctx->ifc_msix_mem = NULL;
6017 		goto msi;
6018 	}
6019 #if IFLIB_DEBUG
6020 	/* use only 1 qset in debug mode */
6021 	queuemsgs = min(msgs - admincnt, 1);
6022 #else
6023 	queuemsgs = msgs - admincnt;
6024 #endif
6025 #ifdef RSS
6026 	queues = imin(queuemsgs, rss_getnumbuckets());
6027 #else
6028 	queues = queuemsgs;
6029 #endif
6030 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6031 	device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
6032 				  CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6033 #ifdef  RSS
6034 	/* If we're doing RSS, clamp at the number of RSS buckets */
6035 	if (queues > rss_getnumbuckets())
6036 		queues = rss_getnumbuckets();
6037 #endif
6038 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6039 		rx_queues = iflib_num_rx_queues;
6040 	else
6041 		rx_queues = queues;
6042 
6043 	if (rx_queues > scctx->isc_nrxqsets)
6044 		rx_queues = scctx->isc_nrxqsets;
6045 
6046 	/*
6047 	 * We want this to be all logical CPUs by default
6048 	 */
6049 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6050 		tx_queues = iflib_num_tx_queues;
6051 	else
6052 		tx_queues = mp_ncpus;
6053 
6054 	if (tx_queues > scctx->isc_ntxqsets)
6055 		tx_queues = scctx->isc_ntxqsets;
6056 
6057 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
6058 #ifdef INVARIANTS
6059 		if (tx_queues != rx_queues)
6060 			device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6061 				      min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6062 #endif
6063 		tx_queues = min(rx_queues, tx_queues);
6064 		rx_queues = min(rx_queues, tx_queues);
6065 	}
6066 
6067 	device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
6068 
6069 	vectors = rx_queues + admincnt;
6070 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6071 		device_printf(dev,
6072 					  "Using MSIX interrupts with %d vectors\n", vectors);
6073 		scctx->isc_vectors = vectors;
6074 		scctx->isc_nrxqsets = rx_queues;
6075 		scctx->isc_ntxqsets = tx_queues;
6076 		scctx->isc_intr = IFLIB_INTR_MSIX;
6077 
6078 		return (vectors);
6079 	} else {
6080 		device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
6081 		bus_release_resource(dev, SYS_RES_MEMORY, bar,
6082 		    ctx->ifc_msix_mem);
6083 		ctx->ifc_msix_mem = NULL;
6084 	}
6085 msi:
6086 	vectors = pci_msi_count(dev);
6087 	scctx->isc_nrxqsets = 1;
6088 	scctx->isc_ntxqsets = 1;
6089 	scctx->isc_vectors = vectors;
6090 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6091 		device_printf(dev,"Using an MSI interrupt\n");
6092 		scctx->isc_intr = IFLIB_INTR_MSI;
6093 	} else {
6094 		scctx->isc_vectors = 1;
6095 		device_printf(dev,"Using a Legacy interrupt\n");
6096 		scctx->isc_intr = IFLIB_INTR_LEGACY;
6097 	}
6098 
6099 	return (vectors);
6100 }
6101 
6102 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6103 
6104 static int
6105 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6106 {
6107 	int rc;
6108 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6109 	struct sbuf *sb;
6110 	const char *ring_state = "UNKNOWN";
6111 
6112 	/* XXX needed ? */
6113 	rc = sysctl_wire_old_buffer(req, 0);
6114 	MPASS(rc == 0);
6115 	if (rc != 0)
6116 		return (rc);
6117 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6118 	MPASS(sb != NULL);
6119 	if (sb == NULL)
6120 		return (ENOMEM);
6121 	if (state[3] <= 3)
6122 		ring_state = ring_states[state[3]];
6123 
6124 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6125 		    state[0], state[1], state[2], ring_state);
6126 	rc = sbuf_finish(sb);
6127 	sbuf_delete(sb);
6128         return(rc);
6129 }
6130 
6131 enum iflib_ndesc_handler {
6132 	IFLIB_NTXD_HANDLER,
6133 	IFLIB_NRXD_HANDLER,
6134 };
6135 
6136 static int
6137 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6138 {
6139 	if_ctx_t ctx = (void *)arg1;
6140 	enum iflib_ndesc_handler type = arg2;
6141 	char buf[256] = {0};
6142 	qidx_t *ndesc;
6143 	char *p, *next;
6144 	int nqs, rc, i;
6145 
6146 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
6147 
6148 	nqs = 8;
6149 	switch(type) {
6150 	case IFLIB_NTXD_HANDLER:
6151 		ndesc = ctx->ifc_sysctl_ntxds;
6152 		if (ctx->ifc_sctx)
6153 			nqs = ctx->ifc_sctx->isc_ntxqs;
6154 		break;
6155 	case IFLIB_NRXD_HANDLER:
6156 		ndesc = ctx->ifc_sysctl_nrxds;
6157 		if (ctx->ifc_sctx)
6158 			nqs = ctx->ifc_sctx->isc_nrxqs;
6159 		break;
6160 	default:
6161 			panic("unhandled type");
6162 	}
6163 	if (nqs == 0)
6164 		nqs = 8;
6165 
6166 	for (i=0; i<8; i++) {
6167 		if (i >= nqs)
6168 			break;
6169 		if (i)
6170 			strcat(buf, ",");
6171 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
6172 	}
6173 
6174 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6175 	if (rc || req->newptr == NULL)
6176 		return rc;
6177 
6178 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6179 	    i++, p = strsep(&next, " ,")) {
6180 		ndesc[i] = strtoul(p, NULL, 10);
6181 	}
6182 
6183 	return(rc);
6184 }
6185 
6186 #define NAME_BUFLEN 32
6187 static void
6188 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6189 {
6190         device_t dev = iflib_get_dev(ctx);
6191 	struct sysctl_oid_list *child, *oid_list;
6192 	struct sysctl_ctx_list *ctx_list;
6193 	struct sysctl_oid *node;
6194 
6195 	ctx_list = device_get_sysctl_ctx(dev);
6196 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6197 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
6198 						      CTLFLAG_RD, NULL, "IFLIB fields");
6199 	oid_list = SYSCTL_CHILDREN(node);
6200 
6201 	SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6202 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
6203 		       "driver version");
6204 
6205 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6206 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6207 			"# of txqs to use, 0 => use default #");
6208 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6209 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6210 			"# of rxqs to use, 0 => use default #");
6211 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6212 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6213                        "permit #txq != #rxq");
6214 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6215                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6216                       "disable MSIX (default 0)");
6217 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6218 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6219                        "set the rx budget");
6220 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6221 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6222 		       "cause tx to abdicate instead of running to completion");
6223 
6224 	/* XXX change for per-queue sizes */
6225 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6226 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
6227                        mp_ndesc_handler, "A",
6228                        "list of # of tx descriptors to use, 0 = use default #");
6229 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6230 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
6231                        mp_ndesc_handler, "A",
6232                        "list of # of rx descriptors to use, 0 = use default #");
6233 }
6234 
6235 static void
6236 iflib_add_device_sysctl_post(if_ctx_t ctx)
6237 {
6238 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6239 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6240         device_t dev = iflib_get_dev(ctx);
6241 	struct sysctl_oid_list *child;
6242 	struct sysctl_ctx_list *ctx_list;
6243 	iflib_fl_t fl;
6244 	iflib_txq_t txq;
6245 	iflib_rxq_t rxq;
6246 	int i, j;
6247 	char namebuf[NAME_BUFLEN];
6248 	char *qfmt;
6249 	struct sysctl_oid *queue_node, *fl_node, *node;
6250 	struct sysctl_oid_list *queue_list, *fl_list;
6251 	ctx_list = device_get_sysctl_ctx(dev);
6252 
6253 	node = ctx->ifc_sysctl_node;
6254 	child = SYSCTL_CHILDREN(node);
6255 
6256 	if (scctx->isc_ntxqsets > 100)
6257 		qfmt = "txq%03d";
6258 	else if (scctx->isc_ntxqsets > 10)
6259 		qfmt = "txq%02d";
6260 	else
6261 		qfmt = "txq%d";
6262 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6263 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6264 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6265 					     CTLFLAG_RD, NULL, "Queue Name");
6266 		queue_list = SYSCTL_CHILDREN(queue_node);
6267 #if MEMORY_LOGGING
6268 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6269 				CTLFLAG_RD,
6270 				&txq->ift_dequeued, "total mbufs freed");
6271 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6272 				CTLFLAG_RD,
6273 				&txq->ift_enqueued, "total mbufs enqueued");
6274 #endif
6275 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6276 				   CTLFLAG_RD,
6277 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
6278 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6279 				   CTLFLAG_RD,
6280 				   &txq->ift_pullups, "# of times m_pullup was called");
6281 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
6282 				   CTLFLAG_RD,
6283 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6284 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
6285 				   CTLFLAG_RD,
6286 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
6287 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
6288 				   CTLFLAG_RD,
6289 				   &txq->ift_map_failed, "# of times dma map failed");
6290 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
6291 				   CTLFLAG_RD,
6292 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
6293 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
6294 				   CTLFLAG_RD,
6295 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
6296 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6297 				   CTLFLAG_RD,
6298 				   &txq->ift_pidx, 1, "Producer Index");
6299 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6300 				   CTLFLAG_RD,
6301 				   &txq->ift_cidx, 1, "Consumer Index");
6302 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
6303 				   CTLFLAG_RD,
6304 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
6305 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6306 				   CTLFLAG_RD,
6307 				   &txq->ift_in_use, 1, "descriptors in use");
6308 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
6309 				   CTLFLAG_RD,
6310 				   &txq->ift_processed, "descriptors procesed for clean");
6311 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6312 				   CTLFLAG_RD,
6313 				   &txq->ift_cleaned, "total cleaned");
6314 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
6315 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
6316 				0, mp_ring_state_handler, "A", "soft ring state");
6317 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
6318 				       CTLFLAG_RD, &txq->ift_br->enqueues,
6319 				       "# of enqueues to the mp_ring for this queue");
6320 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
6321 				       CTLFLAG_RD, &txq->ift_br->drops,
6322 				       "# of drops in the mp_ring for this queue");
6323 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
6324 				       CTLFLAG_RD, &txq->ift_br->starts,
6325 				       "# of normal consumer starts in the mp_ring for this queue");
6326 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
6327 				       CTLFLAG_RD, &txq->ift_br->stalls,
6328 					       "# of consumer stalls in the mp_ring for this queue");
6329 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
6330 			       CTLFLAG_RD, &txq->ift_br->restarts,
6331 				       "# of consumer restarts in the mp_ring for this queue");
6332 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
6333 				       CTLFLAG_RD, &txq->ift_br->abdications,
6334 				       "# of consumer abdications in the mp_ring for this queue");
6335 	}
6336 
6337 	if (scctx->isc_nrxqsets > 100)
6338 		qfmt = "rxq%03d";
6339 	else if (scctx->isc_nrxqsets > 10)
6340 		qfmt = "rxq%02d";
6341 	else
6342 		qfmt = "rxq%d";
6343 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
6344 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6345 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6346 					     CTLFLAG_RD, NULL, "Queue Name");
6347 		queue_list = SYSCTL_CHILDREN(queue_node);
6348 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
6349 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
6350 				       CTLFLAG_RD,
6351 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
6352 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
6353 				       CTLFLAG_RD,
6354 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
6355 		}
6356 
6357 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
6358 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
6359 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
6360 						     CTLFLAG_RD, NULL, "freelist Name");
6361 			fl_list = SYSCTL_CHILDREN(fl_node);
6362 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
6363 				       CTLFLAG_RD,
6364 				       &fl->ifl_pidx, 1, "Producer Index");
6365 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
6366 				       CTLFLAG_RD,
6367 				       &fl->ifl_cidx, 1, "Consumer Index");
6368 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
6369 				       CTLFLAG_RD,
6370 				       &fl->ifl_credits, 1, "credits available");
6371 #if MEMORY_LOGGING
6372 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
6373 					CTLFLAG_RD,
6374 					&fl->ifl_m_enqueued, "mbufs allocated");
6375 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
6376 					CTLFLAG_RD,
6377 					&fl->ifl_m_dequeued, "mbufs freed");
6378 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
6379 					CTLFLAG_RD,
6380 					&fl->ifl_cl_enqueued, "clusters allocated");
6381 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
6382 					CTLFLAG_RD,
6383 					&fl->ifl_cl_dequeued, "clusters freed");
6384 #endif
6385 
6386 		}
6387 	}
6388 
6389 }
6390 
6391 #ifndef __NO_STRICT_ALIGNMENT
6392 static struct mbuf *
6393 iflib_fixup_rx(struct mbuf *m)
6394 {
6395 	struct mbuf *n;
6396 
6397 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
6398 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
6399 		m->m_data += ETHER_HDR_LEN;
6400 		n = m;
6401 	} else {
6402 		MGETHDR(n, M_NOWAIT, MT_DATA);
6403 		if (n == NULL) {
6404 			m_freem(m);
6405 			return (NULL);
6406 		}
6407 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
6408 		m->m_data += ETHER_HDR_LEN;
6409 		m->m_len -= ETHER_HDR_LEN;
6410 		n->m_len = ETHER_HDR_LEN;
6411 		M_MOVE_PKTHDR(n, m);
6412 		n->m_next = m;
6413 	}
6414 	return (n);
6415 }
6416 #endif
6417 
6418 #ifdef NETDUMP
6419 static void
6420 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
6421 {
6422 	if_ctx_t ctx;
6423 
6424 	ctx = if_getsoftc(ifp);
6425 	CTX_LOCK(ctx);
6426 	*nrxr = NRXQSETS(ctx);
6427 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
6428 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
6429 	CTX_UNLOCK(ctx);
6430 }
6431 
6432 static void
6433 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
6434 {
6435 	if_ctx_t ctx;
6436 	if_softc_ctx_t scctx;
6437 	iflib_fl_t fl;
6438 	iflib_rxq_t rxq;
6439 	int i, j;
6440 
6441 	ctx = if_getsoftc(ifp);
6442 	scctx = &ctx->ifc_softc_ctx;
6443 
6444 	switch (event) {
6445 	case NETDUMP_START:
6446 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
6447 			rxq = &ctx->ifc_rxqs[i];
6448 			for (j = 0; j < rxq->ifr_nfl; j++) {
6449 				fl = rxq->ifr_fl;
6450 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
6451 			}
6452 		}
6453 		iflib_no_tx_batch = 1;
6454 		break;
6455 	default:
6456 		break;
6457 	}
6458 }
6459 
6460 static int
6461 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
6462 {
6463 	if_ctx_t ctx;
6464 	iflib_txq_t txq;
6465 	int error;
6466 
6467 	ctx = if_getsoftc(ifp);
6468 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6469 	    IFF_DRV_RUNNING)
6470 		return (EBUSY);
6471 
6472 	txq = &ctx->ifc_txqs[0];
6473 	error = iflib_encap(txq, &m);
6474 	if (error == 0)
6475 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
6476 	return (error);
6477 }
6478 
6479 static int
6480 iflib_netdump_poll(struct ifnet *ifp, int count)
6481 {
6482 	if_ctx_t ctx;
6483 	if_softc_ctx_t scctx;
6484 	iflib_txq_t txq;
6485 	int i;
6486 
6487 	ctx = if_getsoftc(ifp);
6488 	scctx = &ctx->ifc_softc_ctx;
6489 
6490 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6491 	    IFF_DRV_RUNNING)
6492 		return (EBUSY);
6493 
6494 	txq = &ctx->ifc_txqs[0];
6495 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
6496 
6497 	for (i = 0; i < scctx->isc_nrxqsets; i++)
6498 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
6499 	return (0);
6500 }
6501 #endif /* NETDUMP */
6502