xref: /freebsd/sys/net/iflib.c (revision 97cb52fa9aefd90fad38790fded50905aeeb9b9e)
1 /*-
2  * Copyright (c) 2014-2017, Matthew Macy <mmacy@nextbsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_acpi.h"
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/bus.h>
38 #include <sys/eventhandler.h>
39 #include <sys/sockio.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/module.h>
44 #include <sys/kobj.h>
45 #include <sys/rman.h>
46 #include <sys/sbuf.h>
47 #include <sys/smp.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
51 #include <sys/taskqueue.h>
52 #include <sys/limits.h>
53 
54 
55 #include <net/if.h>
56 #include <net/if_var.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/bpf.h>
60 #include <net/ethernet.h>
61 #include <net/mp_ring.h>
62 #include <net/vnet.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/tcp_lro.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/if_ether.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip6.h>
71 #include <netinet/tcp.h>
72 #include <netinet/ip_var.h>
73 #include <netinet6/ip6_var.h>
74 
75 #include <machine/bus.h>
76 #include <machine/in_cksum.h>
77 
78 #include <vm/vm.h>
79 #include <vm/pmap.h>
80 
81 #include <dev/led/led.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
84 #include <dev/pci/pci_private.h>
85 
86 #include <net/iflib.h>
87 
88 #include "ifdi_if.h"
89 
90 #if defined(__i386__) || defined(__amd64__)
91 #include <sys/memdesc.h>
92 #include <machine/bus.h>
93 #include <machine/md_var.h>
94 #include <machine/specialreg.h>
95 #include <x86/include/busdma_impl.h>
96 #include <x86/iommu/busdma_dmar.h>
97 #endif
98 
99 #include <sys/bitstring.h>
100 /*
101  * enable accounting of every mbuf as it comes in to and goes out of
102  * iflib's software descriptor references
103  */
104 #define MEMORY_LOGGING 0
105 /*
106  * Enable mbuf vectors for compressing long mbuf chains
107  */
108 
109 /*
110  * NB:
111  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
112  *   we prefetch needs to be determined by the time spent in m_free vis a vis
113  *   the cost of a prefetch. This will of course vary based on the workload:
114  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
115  *        is quite expensive, thus suggesting very little prefetch.
116  *      - small packet forwarding which is just returning a single mbuf to
117  *        UMA will typically be very fast vis a vis the cost of a memory
118  *        access.
119  */
120 
121 
122 /*
123  * File organization:
124  *  - private structures
125  *  - iflib private utility functions
126  *  - ifnet functions
127  *  - vlan registry and other exported functions
128  *  - iflib public core functions
129  *
130  *
131  */
132 static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
133 
134 struct iflib_txq;
135 typedef struct iflib_txq *iflib_txq_t;
136 struct iflib_rxq;
137 typedef struct iflib_rxq *iflib_rxq_t;
138 struct iflib_fl;
139 typedef struct iflib_fl *iflib_fl_t;
140 
141 struct iflib_ctx;
142 
143 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
144 
145 typedef struct iflib_filter_info {
146 	driver_filter_t *ifi_filter;
147 	void *ifi_filter_arg;
148 	struct grouptask *ifi_task;
149 	void *ifi_ctx;
150 } *iflib_filter_info_t;
151 
152 struct iflib_ctx {
153 	KOBJ_FIELDS;
154    /*
155    * Pointer to hardware driver's softc
156    */
157 	void *ifc_softc;
158 	device_t ifc_dev;
159 	if_t ifc_ifp;
160 
161 	cpuset_t ifc_cpus;
162 	if_shared_ctx_t ifc_sctx;
163 	struct if_softc_ctx ifc_softc_ctx;
164 
165 	struct mtx ifc_mtx;
166 
167 	uint16_t ifc_nhwtxqs;
168 	uint16_t ifc_nhwrxqs;
169 
170 	iflib_txq_t ifc_txqs;
171 	iflib_rxq_t ifc_rxqs;
172 	uint32_t ifc_if_flags;
173 	uint32_t ifc_flags;
174 	uint32_t ifc_max_fl_buf_size;
175 	int ifc_in_detach;
176 
177 	int ifc_link_state;
178 	int ifc_link_irq;
179 	int ifc_watchdog_events;
180 	struct cdev *ifc_led_dev;
181 	struct resource *ifc_msix_mem;
182 
183 	struct if_irq ifc_legacy_irq;
184 	struct grouptask ifc_admin_task;
185 	struct grouptask ifc_vflr_task;
186 	struct iflib_filter_info ifc_filter_info;
187 	struct ifmedia	ifc_media;
188 
189 	struct sysctl_oid *ifc_sysctl_node;
190 	uint16_t ifc_sysctl_ntxqs;
191 	uint16_t ifc_sysctl_nrxqs;
192 	uint16_t ifc_sysctl_qs_eq_override;
193 	uint16_t ifc_sysctl_rx_budget;
194 
195 	qidx_t ifc_sysctl_ntxds[8];
196 	qidx_t ifc_sysctl_nrxds[8];
197 	struct if_txrx ifc_txrx;
198 #define isc_txd_encap  ifc_txrx.ift_txd_encap
199 #define isc_txd_flush  ifc_txrx.ift_txd_flush
200 #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
201 #define isc_rxd_available ifc_txrx.ift_rxd_available
202 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
203 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
204 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
205 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
206 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
207 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
208 	eventhandler_tag ifc_vlan_attach_event;
209 	eventhandler_tag ifc_vlan_detach_event;
210 	uint8_t ifc_mac[ETHER_ADDR_LEN];
211 	char ifc_mtx_name[16];
212 };
213 
214 
215 void *
216 iflib_get_softc(if_ctx_t ctx)
217 {
218 
219 	return (ctx->ifc_softc);
220 }
221 
222 device_t
223 iflib_get_dev(if_ctx_t ctx)
224 {
225 
226 	return (ctx->ifc_dev);
227 }
228 
229 if_t
230 iflib_get_ifp(if_ctx_t ctx)
231 {
232 
233 	return (ctx->ifc_ifp);
234 }
235 
236 struct ifmedia *
237 iflib_get_media(if_ctx_t ctx)
238 {
239 
240 	return (&ctx->ifc_media);
241 }
242 
243 void
244 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
245 {
246 
247 	bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
248 }
249 
250 if_softc_ctx_t
251 iflib_get_softc_ctx(if_ctx_t ctx)
252 {
253 
254 	return (&ctx->ifc_softc_ctx);
255 }
256 
257 if_shared_ctx_t
258 iflib_get_sctx(if_ctx_t ctx)
259 {
260 
261 	return (ctx->ifc_sctx);
262 }
263 
264 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
265 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
266 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
267 
268 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
269 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
270 
271 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
272 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
273 #define RX_SW_DESC_INUSE        (1 << 3)
274 #define TX_SW_DESC_MAPPED       (1 << 4)
275 
276 #define	M_TOOBIG		M_PROTO1
277 
278 typedef struct iflib_sw_rx_desc_array {
279 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
280 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
281 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
282 	uint8_t		*ifsd_flags;
283 } iflib_rxsd_array_t;
284 
285 typedef struct iflib_sw_tx_desc_array {
286 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
287 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
288 	uint8_t		*ifsd_flags;
289 } if_txsd_vec_t;
290 
291 
292 /* magic number that should be high enough for any hardware */
293 #define IFLIB_MAX_TX_SEGS		128
294 /* bnxt supports 64 with hardware LRO enabled */
295 #define IFLIB_MAX_RX_SEGS		64
296 #define IFLIB_RX_COPY_THRESH		128
297 #define IFLIB_MAX_RX_REFRESH		32
298 /* The minimum descriptors per second before we start coalescing */
299 #define IFLIB_MIN_DESC_SEC		16384
300 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
301 #define IFLIB_QUEUE_IDLE		0
302 #define IFLIB_QUEUE_HUNG		1
303 #define IFLIB_QUEUE_WORKING		2
304 /* maximum number of txqs that can share an rx interrupt */
305 #define IFLIB_MAX_TX_SHARED_INTR	4
306 
307 /* this should really scale with ring size - this is a fairly arbitrary value */
308 #define TX_BATCH_SIZE			32
309 
310 #define IFLIB_RESTART_BUDGET		8
311 
312 #define	IFC_LEGACY		0x001
313 #define	IFC_QFLUSH		0x002
314 #define	IFC_MULTISEG		0x004
315 #define	IFC_DMAR		0x008
316 #define	IFC_SC_ALLOCATED	0x010
317 #define	IFC_INIT_DONE		0x020
318 #define	IFC_PREFETCH		0x040
319 #define	IFC_DO_RESET		0x080
320 #define	IFC_CHECK_HUNG		0x100
321 
322 #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
323 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
324 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
325 struct iflib_txq {
326 	qidx_t		ift_in_use;
327 	qidx_t		ift_cidx;
328 	qidx_t		ift_cidx_processed;
329 	qidx_t		ift_pidx;
330 	uint8_t		ift_gen;
331 	uint8_t		ift_br_offset;
332 	uint16_t	ift_npending;
333 	uint16_t	ift_db_pending;
334 	uint16_t	ift_rs_pending;
335 	/* implicit pad */
336 	uint8_t		ift_txd_size[8];
337 	uint64_t	ift_processed;
338 	uint64_t	ift_cleaned;
339 	uint64_t	ift_cleaned_prev;
340 #if MEMORY_LOGGING
341 	uint64_t	ift_enqueued;
342 	uint64_t	ift_dequeued;
343 #endif
344 	uint64_t	ift_no_tx_dma_setup;
345 	uint64_t	ift_no_desc_avail;
346 	uint64_t	ift_mbuf_defrag_failed;
347 	uint64_t	ift_mbuf_defrag;
348 	uint64_t	ift_map_failed;
349 	uint64_t	ift_txd_encap_efbig;
350 	uint64_t	ift_pullups;
351 
352 	struct mtx	ift_mtx;
353 	struct mtx	ift_db_mtx;
354 
355 	/* constant values */
356 	if_ctx_t	ift_ctx;
357 	struct ifmp_ring        *ift_br;
358 	struct grouptask	ift_task;
359 	qidx_t		ift_size;
360 	uint16_t	ift_id;
361 	struct callout	ift_timer;
362 
363 	if_txsd_vec_t	ift_sds;
364 	uint8_t		ift_qstatus;
365 	uint8_t		ift_closed;
366 	uint8_t		ift_update_freq;
367 	struct iflib_filter_info ift_filter_info;
368 	bus_dma_tag_t		ift_desc_tag;
369 	bus_dma_tag_t		ift_tso_desc_tag;
370 	iflib_dma_info_t	ift_ifdi;
371 #define MTX_NAME_LEN 16
372 	char                    ift_mtx_name[MTX_NAME_LEN];
373 	char                    ift_db_mtx_name[MTX_NAME_LEN];
374 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
375 #ifdef IFLIB_DIAGNOSTICS
376 	uint64_t ift_cpu_exec_count[256];
377 #endif
378 } __aligned(CACHE_LINE_SIZE);
379 
380 struct iflib_fl {
381 	qidx_t		ifl_cidx;
382 	qidx_t		ifl_pidx;
383 	qidx_t		ifl_credits;
384 	uint8_t		ifl_gen;
385 	uint8_t		ifl_rxd_size;
386 #if MEMORY_LOGGING
387 	uint64_t	ifl_m_enqueued;
388 	uint64_t	ifl_m_dequeued;
389 	uint64_t	ifl_cl_enqueued;
390 	uint64_t	ifl_cl_dequeued;
391 #endif
392 	/* implicit pad */
393 
394 	bitstr_t 	*ifl_rx_bitmap;
395 	qidx_t		ifl_fragidx;
396 	/* constant */
397 	qidx_t		ifl_size;
398 	uint16_t	ifl_buf_size;
399 	uint16_t	ifl_cltype;
400 	uma_zone_t	ifl_zone;
401 	iflib_rxsd_array_t	ifl_sds;
402 	iflib_rxq_t	ifl_rxq;
403 	uint8_t		ifl_id;
404 	bus_dma_tag_t           ifl_desc_tag;
405 	iflib_dma_info_t	ifl_ifdi;
406 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
407 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
408 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
409 }  __aligned(CACHE_LINE_SIZE);
410 
411 static inline qidx_t
412 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
413 {
414 	qidx_t used;
415 
416 	if (pidx > cidx)
417 		used = pidx - cidx;
418 	else if (pidx < cidx)
419 		used = size - cidx + pidx;
420 	else if (gen == 0 && pidx == cidx)
421 		used = 0;
422 	else if (gen == 1 && pidx == cidx)
423 		used = size;
424 	else
425 		panic("bad state");
426 
427 	return (used);
428 }
429 
430 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
431 
432 #define IDXDIFF(head, tail, wrap) \
433 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
434 
435 struct iflib_rxq {
436 	/* If there is a separate completion queue -
437 	 * these are the cq cidx and pidx. Otherwise
438 	 * these are unused.
439 	 */
440 	qidx_t		ifr_size;
441 	qidx_t		ifr_cq_cidx;
442 	qidx_t		ifr_cq_pidx;
443 	uint8_t		ifr_cq_gen;
444 	uint8_t		ifr_fl_offset;
445 
446 	if_ctx_t	ifr_ctx;
447 	iflib_fl_t	ifr_fl;
448 	uint64_t	ifr_rx_irq;
449 	uint16_t	ifr_id;
450 	uint8_t		ifr_lro_enabled;
451 	uint8_t		ifr_nfl;
452 	uint8_t		ifr_ntxqirq;
453 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
454 	struct lro_ctrl			ifr_lc;
455 	struct grouptask        ifr_task;
456 	struct iflib_filter_info ifr_filter_info;
457 	iflib_dma_info_t		ifr_ifdi;
458 
459 	/* dynamically allocate if any drivers need a value substantially larger than this */
460 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
461 #ifdef IFLIB_DIAGNOSTICS
462 	uint64_t ifr_cpu_exec_count[256];
463 #endif
464 }  __aligned(CACHE_LINE_SIZE);
465 
466 typedef struct if_rxsd {
467 	caddr_t *ifsd_cl;
468 	struct mbuf **ifsd_m;
469 	iflib_fl_t ifsd_fl;
470 	qidx_t ifsd_cidx;
471 } *if_rxsd_t;
472 
473 /* multiple of word size */
474 #ifdef __LP64__
475 #define PKT_INFO_SIZE	6
476 #define RXD_INFO_SIZE	5
477 #define PKT_TYPE uint64_t
478 #else
479 #define PKT_INFO_SIZE	11
480 #define RXD_INFO_SIZE	8
481 #define PKT_TYPE uint32_t
482 #endif
483 #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
484 #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
485 
486 typedef struct if_pkt_info_pad {
487 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
488 } *if_pkt_info_pad_t;
489 typedef struct if_rxd_info_pad {
490 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
491 } *if_rxd_info_pad_t;
492 
493 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
494 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
495 
496 
497 static inline void
498 pkt_info_zero(if_pkt_info_t pi)
499 {
500 	if_pkt_info_pad_t pi_pad;
501 
502 	pi_pad = (if_pkt_info_pad_t)pi;
503 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
504 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
505 #ifndef __LP64__
506 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
507 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
508 #endif
509 }
510 
511 static inline void
512 rxd_info_zero(if_rxd_info_t ri)
513 {
514 	if_rxd_info_pad_t ri_pad;
515 	int i;
516 
517 	ri_pad = (if_rxd_info_pad_t)ri;
518 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
519 		ri_pad->rxd_val[i] = 0;
520 		ri_pad->rxd_val[i+1] = 0;
521 		ri_pad->rxd_val[i+2] = 0;
522 		ri_pad->rxd_val[i+3] = 0;
523 	}
524 #ifdef __LP64__
525 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
526 #endif
527 }
528 
529 /*
530  * Only allow a single packet to take up most 1/nth of the tx ring
531  */
532 #define MAX_SINGLE_PACKET_FRACTION 12
533 #define IF_BAD_DMA (bus_addr_t)-1
534 
535 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
536 
537 #define CTX_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF)
538 
539 #define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx)
540 #define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx)
541 #define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx)
542 
543 
544 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
545 #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
546 
547 
548 /* Our boot-time initialization hook */
549 static int	iflib_module_event_handler(module_t, int, void *);
550 
551 static moduledata_t iflib_moduledata = {
552 	"iflib",
553 	iflib_module_event_handler,
554 	NULL
555 };
556 
557 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
558 MODULE_VERSION(iflib, 1);
559 
560 MODULE_DEPEND(iflib, pci, 1, 1, 1);
561 MODULE_DEPEND(iflib, ether, 1, 1, 1);
562 
563 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
564 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
565 
566 #ifndef IFLIB_DEBUG_COUNTERS
567 #ifdef INVARIANTS
568 #define IFLIB_DEBUG_COUNTERS 1
569 #else
570 #define IFLIB_DEBUG_COUNTERS 0
571 #endif /* !INVARIANTS */
572 #endif
573 
574 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
575                    "iflib driver parameters");
576 
577 /*
578  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
579  */
580 static int iflib_min_tx_latency = 0;
581 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
582 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
583 static int iflib_no_tx_batch = 0;
584 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
585 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
586 
587 
588 #if IFLIB_DEBUG_COUNTERS
589 
590 static int iflib_tx_seen;
591 static int iflib_tx_sent;
592 static int iflib_tx_encap;
593 static int iflib_rx_allocs;
594 static int iflib_fl_refills;
595 static int iflib_fl_refills_large;
596 static int iflib_tx_frees;
597 
598 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
599 		   &iflib_tx_seen, 0, "# tx mbufs seen");
600 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
601 		   &iflib_tx_sent, 0, "# tx mbufs sent");
602 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
603 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
604 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
605 		   &iflib_tx_frees, 0, "# tx frees");
606 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
607 		   &iflib_rx_allocs, 0, "# rx allocations");
608 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
609 		   &iflib_fl_refills, 0, "# refills");
610 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
611 		   &iflib_fl_refills_large, 0, "# large refills");
612 
613 
614 static int iflib_txq_drain_flushing;
615 static int iflib_txq_drain_oactive;
616 static int iflib_txq_drain_notready;
617 static int iflib_txq_drain_encapfail;
618 
619 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
620 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
621 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
622 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
623 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
624 		   &iflib_txq_drain_notready, 0, "# drain notready");
625 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
626 		   &iflib_txq_drain_encapfail, 0, "# drain encap fails");
627 
628 
629 static int iflib_encap_load_mbuf_fail;
630 static int iflib_encap_pad_mbuf_fail;
631 static int iflib_encap_txq_avail_fail;
632 static int iflib_encap_txd_encap_fail;
633 
634 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
635 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
636 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
637 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
638 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
639 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
640 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
641 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
642 
643 static int iflib_task_fn_rxs;
644 static int iflib_rx_intr_enables;
645 static int iflib_fast_intrs;
646 static int iflib_intr_link;
647 static int iflib_intr_msix;
648 static int iflib_rx_unavail;
649 static int iflib_rx_ctx_inactive;
650 static int iflib_rx_zero_len;
651 static int iflib_rx_if_input;
652 static int iflib_rx_mbuf_null;
653 static int iflib_rxd_flush;
654 
655 static int iflib_verbose_debug;
656 
657 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
658 		   &iflib_intr_link, 0, "# intr link calls");
659 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
660 		   &iflib_intr_msix, 0, "# intr msix calls");
661 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
662 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
663 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
664 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
665 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
666 		   &iflib_fast_intrs, 0, "# fast_intr calls");
667 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
668 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
669 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
670 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
671 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
672 		   &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
673 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
674 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
675 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
676 		   &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
677 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
678 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
679 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
680 		   &iflib_verbose_debug, 0, "enable verbose debugging");
681 
682 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
683 static void
684 iflib_debug_reset(void)
685 {
686 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
687 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
688 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
689 		iflib_txq_drain_notready = iflib_txq_drain_encapfail =
690 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
691 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
692 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
693 		iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
694 		iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
695 		iflib_rx_mbuf_null = iflib_rxd_flush = 0;
696 }
697 
698 #else
699 #define DBG_COUNTER_INC(name)
700 static void iflib_debug_reset(void) {}
701 #endif
702 
703 
704 
705 #define IFLIB_DEBUG 0
706 
707 static void iflib_tx_structures_free(if_ctx_t ctx);
708 static void iflib_rx_structures_free(if_ctx_t ctx);
709 static int iflib_queues_alloc(if_ctx_t ctx);
710 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
711 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
712 static int iflib_qset_structures_setup(if_ctx_t ctx);
713 static int iflib_msix_init(if_ctx_t ctx);
714 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str);
715 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
716 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
717 static int iflib_register(if_ctx_t);
718 static void iflib_init_locked(if_ctx_t ctx);
719 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
720 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
721 static void iflib_ifmp_purge(iflib_txq_t txq);
722 static void _iflib_pre_assert(if_softc_ctx_t scctx);
723 static void iflib_stop(if_ctx_t ctx);
724 static void iflib_if_init_locked(if_ctx_t ctx);
725 #ifndef __NO_STRICT_ALIGNMENT
726 static struct mbuf * iflib_fixup_rx(struct mbuf *m);
727 #endif
728 
729 #ifdef DEV_NETMAP
730 #include <sys/selinfo.h>
731 #include <net/netmap.h>
732 #include <dev/netmap/netmap_kern.h>
733 
734 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
735 
736 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
737 
738 /*
739  * device-specific sysctl variables:
740  *
741  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
742  *	During regular operations the CRC is stripped, but on some
743  *	hardware reception of frames not multiple of 64 is slower,
744  *	so using crcstrip=0 helps in benchmarks.
745  *
746  * iflib_rx_miss, iflib_rx_miss_bufs:
747  *	count packets that might be missed due to lost interrupts.
748  */
749 SYSCTL_DECL(_dev_netmap);
750 /*
751  * The xl driver by default strips CRCs and we do not override it.
752  */
753 
754 int iflib_crcstrip = 1;
755 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
756     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
757 
758 int iflib_rx_miss, iflib_rx_miss_bufs;
759 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
760     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
761 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
762     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
763 
764 /*
765  * Register/unregister. We are already under netmap lock.
766  * Only called on the first register or the last unregister.
767  */
768 static int
769 iflib_netmap_register(struct netmap_adapter *na, int onoff)
770 {
771 	struct ifnet *ifp = na->ifp;
772 	if_ctx_t ctx = ifp->if_softc;
773 	int status;
774 
775 	CTX_LOCK(ctx);
776 	IFDI_INTR_DISABLE(ctx);
777 
778 	/* Tell the stack that the interface is no longer active */
779 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
780 
781 	if (!CTX_IS_VF(ctx))
782 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
783 
784 	/* enable or disable flags and callbacks in na and ifp */
785 	if (onoff) {
786 		nm_set_native_flags(na);
787 	} else {
788 		nm_clear_native_flags(na);
789 	}
790 	iflib_stop(ctx);
791 	iflib_init_locked(ctx);
792 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
793 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
794 	if (status)
795 		nm_clear_native_flags(na);
796 	CTX_UNLOCK(ctx);
797 	return (status);
798 }
799 
800 static int
801 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
802 {
803 	struct netmap_adapter *na = kring->na;
804 	u_int const lim = kring->nkr_num_slots - 1;
805 	u_int head = kring->rhead;
806 	struct netmap_ring *ring = kring->ring;
807 	bus_dmamap_t *map;
808 	struct if_rxd_update iru;
809 	if_ctx_t ctx = rxq->ifr_ctx;
810 	iflib_fl_t fl = &rxq->ifr_fl[0];
811 	uint32_t refill_pidx, nic_i;
812 
813 	if (nm_i == head && __predict_true(!init))
814 		return 0;
815 	iru_init(&iru, rxq, 0 /* flid */);
816 	map = fl->ifl_sds.ifsd_map;
817 	refill_pidx = netmap_idx_k2n(kring, nm_i);
818 	/*
819 	 * IMPORTANT: we must leave one free slot in the ring,
820 	 * so move head back by one unit
821 	 */
822 	head = nm_prev(head, lim);
823 	while (nm_i != head) {
824 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
825 			struct netmap_slot *slot = &ring->slot[nm_i];
826 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
827 			uint32_t nic_i_dma = refill_pidx;
828 			nic_i = netmap_idx_k2n(kring, nm_i);
829 
830 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
831 
832 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
833 			        return netmap_ring_reinit(kring);
834 
835 			fl->ifl_vm_addrs[tmp_pidx] = addr;
836 			if (__predict_false(init) && map) {
837 				netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
838 			} else if (map && (slot->flags & NS_BUF_CHANGED)) {
839 				/* buffer has changed, reload map */
840 				netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
841 			}
842 			slot->flags &= ~NS_BUF_CHANGED;
843 
844 			nm_i = nm_next(nm_i, lim);
845 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
846 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
847 				continue;
848 
849 			iru.iru_pidx = refill_pidx;
850 			iru.iru_count = tmp_pidx+1;
851 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
852 
853 			refill_pidx = nic_i;
854 			if (map == NULL)
855 				continue;
856 
857 			for (int n = 0; n < iru.iru_count; n++) {
858 				bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
859 						BUS_DMASYNC_PREREAD);
860 				/* XXX - change this to not use the netmap func*/
861 				nic_i_dma = nm_next(nic_i_dma, lim);
862 			}
863 		}
864 	}
865 	kring->nr_hwcur = head;
866 
867 	if (map)
868 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
869 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870 	ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
871 	return (0);
872 }
873 
874 /*
875  * Reconcile kernel and user view of the transmit ring.
876  *
877  * All information is in the kring.
878  * Userspace wants to send packets up to the one before kring->rhead,
879  * kernel knows kring->nr_hwcur is the first unsent packet.
880  *
881  * Here we push packets out (as many as possible), and possibly
882  * reclaim buffers from previously completed transmission.
883  *
884  * The caller (netmap) guarantees that there is only one instance
885  * running at any time. Any interference with other driver
886  * methods should be handled by the individual drivers.
887  */
888 static int
889 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
890 {
891 	struct netmap_adapter *na = kring->na;
892 	struct ifnet *ifp = na->ifp;
893 	struct netmap_ring *ring = kring->ring;
894 	u_int nm_i;	/* index into the netmap ring */
895 	u_int nic_i;	/* index into the NIC ring */
896 	u_int n;
897 	u_int const lim = kring->nkr_num_slots - 1;
898 	u_int const head = kring->rhead;
899 	struct if_pkt_info pi;
900 
901 	/*
902 	 * interrupts on every tx packet are expensive so request
903 	 * them every half ring, or where NS_REPORT is set
904 	 */
905 	u_int report_frequency = kring->nkr_num_slots >> 1;
906 	/* device-specific */
907 	if_ctx_t ctx = ifp->if_softc;
908 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
909 
910 	if (txq->ift_sds.ifsd_map)
911 		bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
912 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
913 
914 
915 	/*
916 	 * First part: process new packets to send.
917 	 * nm_i is the current index in the netmap ring,
918 	 * nic_i is the corresponding index in the NIC ring.
919 	 *
920 	 * If we have packets to send (nm_i != head)
921 	 * iterate over the netmap ring, fetch length and update
922 	 * the corresponding slot in the NIC ring. Some drivers also
923 	 * need to update the buffer's physical address in the NIC slot
924 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
925 	 *
926 	 * The netmap_reload_map() calls is especially expensive,
927 	 * even when (as in this case) the tag is 0, so do only
928 	 * when the buffer has actually changed.
929 	 *
930 	 * If possible do not set the report/intr bit on all slots,
931 	 * but only a few times per ring or when NS_REPORT is set.
932 	 *
933 	 * Finally, on 10G and faster drivers, it might be useful
934 	 * to prefetch the next slot and txr entry.
935 	 */
936 
937 	nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
938 	pkt_info_zero(&pi);
939 	pi.ipi_segs = txq->ift_segs;
940 	pi.ipi_qsidx = kring->ring_id;
941 	if (nm_i != head) {	/* we have new packets to send */
942 		nic_i = netmap_idx_k2n(kring, nm_i);
943 
944 		__builtin_prefetch(&ring->slot[nm_i]);
945 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
946 		if (txq->ift_sds.ifsd_map)
947 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
948 
949 		for (n = 0; nm_i != head; n++) {
950 			struct netmap_slot *slot = &ring->slot[nm_i];
951 			u_int len = slot->len;
952 			uint64_t paddr;
953 			void *addr = PNMB(na, slot, &paddr);
954 			int flags = (slot->flags & NS_REPORT ||
955 				nic_i == 0 || nic_i == report_frequency) ?
956 				IPI_TX_INTR : 0;
957 
958 			/* device-specific */
959 			pi.ipi_len = len;
960 			pi.ipi_segs[0].ds_addr = paddr;
961 			pi.ipi_segs[0].ds_len = len;
962 			pi.ipi_nsegs = 1;
963 			pi.ipi_ndescs = 0;
964 			pi.ipi_pidx = nic_i;
965 			pi.ipi_flags = flags;
966 
967 			/* Fill the slot in the NIC ring. */
968 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
969 
970 			/* prefetch for next round */
971 			__builtin_prefetch(&ring->slot[nm_i + 1]);
972 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
973 			if (txq->ift_sds.ifsd_map) {
974 				__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
975 
976 				NM_CHECK_ADDR_LEN(na, addr, len);
977 
978 				if (slot->flags & NS_BUF_CHANGED) {
979 					/* buffer has changed, reload map */
980 					netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
981 				}
982 				/* make sure changes to the buffer are synced */
983 				bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
984 						BUS_DMASYNC_PREWRITE);
985 			}
986 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
987 			nm_i = nm_next(nm_i, lim);
988 			nic_i = nm_next(nic_i, lim);
989 		}
990 		kring->nr_hwcur = head;
991 
992 		/* synchronize the NIC ring */
993 		if (txq->ift_sds.ifsd_map)
994 			bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
995 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996 
997 		/* (re)start the tx unit up to slot nic_i (excluded) */
998 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
999 	}
1000 
1001 	/*
1002 	 * Second part: reclaim buffers for completed transmissions.
1003 	 */
1004 	if (iflib_tx_credits_update(ctx, txq)) {
1005 		/* some tx completed, increment avail */
1006 		nic_i = txq->ift_cidx_processed;
1007 		kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1008 	}
1009 	return (0);
1010 }
1011 
1012 /*
1013  * Reconcile kernel and user view of the receive ring.
1014  * Same as for the txsync, this routine must be efficient.
1015  * The caller guarantees a single invocations, but races against
1016  * the rest of the driver should be handled here.
1017  *
1018  * On call, kring->rhead is the first packet that userspace wants
1019  * to keep, and kring->rcur is the wakeup point.
1020  * The kernel has previously reported packets up to kring->rtail.
1021  *
1022  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1023  * of whether or not we received an interrupt.
1024  */
1025 static int
1026 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1027 {
1028 	struct netmap_adapter *na = kring->na;
1029 	struct netmap_ring *ring = kring->ring;
1030 	uint32_t nm_i;	/* index into the netmap ring */
1031 	uint32_t nic_i;	/* index into the NIC ring */
1032 	u_int i, n;
1033 	u_int const lim = kring->nkr_num_slots - 1;
1034 	u_int const head = netmap_idx_n2k(kring, kring->rhead);
1035 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1036 	struct if_rxd_info ri;
1037 
1038 	struct ifnet *ifp = na->ifp;
1039 	if_ctx_t ctx = ifp->if_softc;
1040 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1041 	iflib_fl_t fl = rxq->ifr_fl;
1042 	if (head > lim)
1043 		return netmap_ring_reinit(kring);
1044 
1045 	/* XXX check sync modes */
1046 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
1047 		if (fl->ifl_sds.ifsd_map == NULL)
1048 			continue;
1049 		bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
1050 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1051 	}
1052 	/*
1053 	 * First part: import newly received packets.
1054 	 *
1055 	 * nm_i is the index of the next free slot in the netmap ring,
1056 	 * nic_i is the index of the next received packet in the NIC ring,
1057 	 * and they may differ in case if_init() has been called while
1058 	 * in netmap mode. For the receive ring we have
1059 	 *
1060 	 *	nic_i = rxr->next_check;
1061 	 *	nm_i = kring->nr_hwtail (previous)
1062 	 * and
1063 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1064 	 *
1065 	 * rxr->next_check is set to 0 on a ring reinit
1066 	 */
1067 	if (netmap_no_pendintr || force_update) {
1068 		int crclen = iflib_crcstrip ? 0 : 4;
1069 		int error, avail;
1070 		uint16_t slot_flags = kring->nkr_slot_flags;
1071 
1072 		for (i = 0; i < rxq->ifr_nfl; i++) {
1073 			fl = &rxq->ifr_fl[i];
1074 			nic_i = fl->ifl_cidx;
1075 			nm_i = netmap_idx_n2k(kring, nic_i);
1076 			avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
1077 			for (n = 0; avail > 0; n++, avail--) {
1078 				rxd_info_zero(&ri);
1079 				ri.iri_frags = rxq->ifr_frags;
1080 				ri.iri_qsidx = kring->ring_id;
1081 				ri.iri_ifp = ctx->ifc_ifp;
1082 				ri.iri_cidx = nic_i;
1083 
1084 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1085 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
1086 				ring->slot[nm_i].flags = slot_flags;
1087 				if (fl->ifl_sds.ifsd_map)
1088 					bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
1089 							fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1090 				nm_i = nm_next(nm_i, lim);
1091 				nic_i = nm_next(nic_i, lim);
1092 			}
1093 			if (n) { /* update the state variables */
1094 				if (netmap_no_pendintr && !force_update) {
1095 					/* diagnostics */
1096 					iflib_rx_miss ++;
1097 					iflib_rx_miss_bufs += n;
1098 				}
1099 				fl->ifl_cidx = nic_i;
1100 				kring->nr_hwtail = netmap_idx_k2n(kring, nm_i);
1101 			}
1102 			kring->nr_kflags &= ~NKR_PENDINTR;
1103 		}
1104 	}
1105 	/*
1106 	 * Second part: skip past packets that userspace has released.
1107 	 * (kring->nr_hwcur to head excluded),
1108 	 * and make the buffers available for reception.
1109 	 * As usual nm_i is the index in the netmap ring,
1110 	 * nic_i is the index in the NIC ring, and
1111 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1112 	 */
1113 	/* XXX not sure how this will work with multiple free lists */
1114 	nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
1115 
1116 	return (netmap_fl_refill(rxq, kring, nm_i, false));
1117 }
1118 
1119 static void
1120 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1121 {
1122 	struct ifnet *ifp = na->ifp;
1123 	if_ctx_t ctx = ifp->if_softc;
1124 
1125 	CTX_LOCK(ctx);
1126 	if (onoff) {
1127 		IFDI_INTR_ENABLE(ctx);
1128 	} else {
1129 		IFDI_INTR_DISABLE(ctx);
1130 	}
1131 	CTX_UNLOCK(ctx);
1132 }
1133 
1134 
1135 static int
1136 iflib_netmap_attach(if_ctx_t ctx)
1137 {
1138 	struct netmap_adapter na;
1139 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1140 
1141 	bzero(&na, sizeof(na));
1142 
1143 	na.ifp = ctx->ifc_ifp;
1144 	na.na_flags = NAF_BDG_MAYSLEEP;
1145 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1146 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1147 
1148 	na.num_tx_desc = scctx->isc_ntxd[0];
1149 	na.num_rx_desc = scctx->isc_nrxd[0];
1150 	na.nm_txsync = iflib_netmap_txsync;
1151 	na.nm_rxsync = iflib_netmap_rxsync;
1152 	na.nm_register = iflib_netmap_register;
1153 	na.nm_intr = iflib_netmap_intr;
1154 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1155 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1156 	return (netmap_attach(&na));
1157 }
1158 
1159 static void
1160 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1161 {
1162 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1163 	struct netmap_slot *slot;
1164 
1165 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1166 	if (slot == NULL)
1167 		return;
1168 	if (txq->ift_sds.ifsd_map == NULL)
1169 		return;
1170 
1171 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1172 
1173 		/*
1174 		 * In netmap mode, set the map for the packet buffer.
1175 		 * NOTE: Some drivers (not this one) also need to set
1176 		 * the physical buffer address in the NIC ring.
1177 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1178 		 * netmap slot index, si
1179 		 */
1180 		int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i);
1181 		netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
1182 	}
1183 }
1184 
1185 static void
1186 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1187 {
1188 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1189 	struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id];
1190 	struct netmap_slot *slot;
1191 	uint32_t nm_i;
1192 
1193 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1194 	if (slot == NULL)
1195 		return;
1196 	nm_i = netmap_idx_n2k(kring, 0);
1197 	netmap_fl_refill(rxq, kring, nm_i, true);
1198 }
1199 
1200 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1201 
1202 #else
1203 #define iflib_netmap_txq_init(ctx, txq)
1204 #define iflib_netmap_rxq_init(ctx, rxq)
1205 #define iflib_netmap_detach(ifp)
1206 
1207 #define iflib_netmap_attach(ctx) (0)
1208 #define netmap_rx_irq(ifp, qid, budget) (0)
1209 #define netmap_tx_irq(ifp, qid) do {} while (0)
1210 
1211 #endif
1212 
1213 #if defined(__i386__) || defined(__amd64__)
1214 static __inline void
1215 prefetch(void *x)
1216 {
1217 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1218 }
1219 static __inline void
1220 prefetch2cachelines(void *x)
1221 {
1222 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1223 #if (CACHE_LINE_SIZE < 128)
1224 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
1225 #endif
1226 }
1227 #else
1228 #define prefetch(x)
1229 #define prefetch2cachelines(x)
1230 #endif
1231 
1232 static void
1233 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1234 {
1235 	iflib_fl_t fl;
1236 
1237 	fl = &rxq->ifr_fl[flid];
1238 	iru->iru_paddrs = fl->ifl_bus_addrs;
1239 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
1240 	iru->iru_idxs = fl->ifl_rxd_idxs;
1241 	iru->iru_qsidx = rxq->ifr_id;
1242 	iru->iru_buf_size = fl->ifl_buf_size;
1243 	iru->iru_flidx = fl->ifl_id;
1244 }
1245 
1246 static void
1247 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1248 {
1249 	if (err)
1250 		return;
1251 	*(bus_addr_t *) arg = segs[0].ds_addr;
1252 }
1253 
1254 int
1255 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1256 {
1257 	int err;
1258 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1259 	device_t dev = ctx->ifc_dev;
1260 
1261 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1262 
1263 	err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1264 				sctx->isc_q_align, 0,	/* alignment, bounds */
1265 				BUS_SPACE_MAXADDR,	/* lowaddr */
1266 				BUS_SPACE_MAXADDR,	/* highaddr */
1267 				NULL, NULL,		/* filter, filterarg */
1268 				size,			/* maxsize */
1269 				1,			/* nsegments */
1270 				size,			/* maxsegsize */
1271 				BUS_DMA_ALLOCNOW,	/* flags */
1272 				NULL,			/* lockfunc */
1273 				NULL,			/* lockarg */
1274 				&dma->idi_tag);
1275 	if (err) {
1276 		device_printf(dev,
1277 		    "%s: bus_dma_tag_create failed: %d\n",
1278 		    __func__, err);
1279 		goto fail_0;
1280 	}
1281 
1282 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
1283 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1284 	if (err) {
1285 		device_printf(dev,
1286 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1287 		    __func__, (uintmax_t)size, err);
1288 		goto fail_1;
1289 	}
1290 
1291 	dma->idi_paddr = IF_BAD_DMA;
1292 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1293 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1294 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1295 		device_printf(dev,
1296 		    "%s: bus_dmamap_load failed: %d\n",
1297 		    __func__, err);
1298 		goto fail_2;
1299 	}
1300 
1301 	dma->idi_size = size;
1302 	return (0);
1303 
1304 fail_2:
1305 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1306 fail_1:
1307 	bus_dma_tag_destroy(dma->idi_tag);
1308 fail_0:
1309 	dma->idi_tag = NULL;
1310 
1311 	return (err);
1312 }
1313 
1314 int
1315 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1316 {
1317 	int i, err;
1318 	iflib_dma_info_t *dmaiter;
1319 
1320 	dmaiter = dmalist;
1321 	for (i = 0; i < count; i++, dmaiter++) {
1322 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1323 			break;
1324 	}
1325 	if (err)
1326 		iflib_dma_free_multi(dmalist, i);
1327 	return (err);
1328 }
1329 
1330 void
1331 iflib_dma_free(iflib_dma_info_t dma)
1332 {
1333 	if (dma->idi_tag == NULL)
1334 		return;
1335 	if (dma->idi_paddr != IF_BAD_DMA) {
1336 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1337 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1338 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1339 		dma->idi_paddr = IF_BAD_DMA;
1340 	}
1341 	if (dma->idi_vaddr != NULL) {
1342 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1343 		dma->idi_vaddr = NULL;
1344 	}
1345 	bus_dma_tag_destroy(dma->idi_tag);
1346 	dma->idi_tag = NULL;
1347 }
1348 
1349 void
1350 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1351 {
1352 	int i;
1353 	iflib_dma_info_t *dmaiter = dmalist;
1354 
1355 	for (i = 0; i < count; i++, dmaiter++)
1356 		iflib_dma_free(*dmaiter);
1357 }
1358 
1359 #ifdef EARLY_AP_STARTUP
1360 static const int iflib_started = 1;
1361 #else
1362 /*
1363  * We used to abuse the smp_started flag to decide if the queues have been
1364  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1365  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1366  * is set.  Run a SYSINIT() strictly after that to just set a usable
1367  * completion flag.
1368  */
1369 
1370 static int iflib_started;
1371 
1372 static void
1373 iflib_record_started(void *arg)
1374 {
1375 	iflib_started = 1;
1376 }
1377 
1378 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1379 	iflib_record_started, NULL);
1380 #endif
1381 
1382 static int
1383 iflib_fast_intr(void *arg)
1384 {
1385 	iflib_filter_info_t info = arg;
1386 	struct grouptask *gtask = info->ifi_task;
1387 	if (!iflib_started)
1388 		return (FILTER_HANDLED);
1389 
1390 	DBG_COUNTER_INC(fast_intrs);
1391 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1392 		return (FILTER_HANDLED);
1393 
1394 	GROUPTASK_ENQUEUE(gtask);
1395 	return (FILTER_HANDLED);
1396 }
1397 
1398 static int
1399 iflib_fast_intr_rxtx(void *arg)
1400 {
1401 	iflib_filter_info_t info = arg;
1402 	struct grouptask *gtask = info->ifi_task;
1403 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1404 	if_ctx_t ctx;
1405 	int i, cidx;
1406 
1407 	if (!iflib_started)
1408 		return (FILTER_HANDLED);
1409 
1410 	DBG_COUNTER_INC(fast_intrs);
1411 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1412 		return (FILTER_HANDLED);
1413 
1414 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1415 		qidx_t txqid = rxq->ifr_txqid[i];
1416 
1417 		ctx = rxq->ifr_ctx;
1418 
1419 		if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
1420 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1421 			continue;
1422 		}
1423 		GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
1424 	}
1425 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1426 		cidx = rxq->ifr_cq_cidx;
1427 	else
1428 		cidx = rxq->ifr_fl[0].ifl_cidx;
1429 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1430 		GROUPTASK_ENQUEUE(gtask);
1431 	else
1432 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1433 	return (FILTER_HANDLED);
1434 }
1435 
1436 
1437 static int
1438 iflib_fast_intr_ctx(void *arg)
1439 {
1440 	iflib_filter_info_t info = arg;
1441 	struct grouptask *gtask = info->ifi_task;
1442 
1443 	if (!iflib_started)
1444 		return (FILTER_HANDLED);
1445 
1446 	DBG_COUNTER_INC(fast_intrs);
1447 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1448 		return (FILTER_HANDLED);
1449 
1450 	GROUPTASK_ENQUEUE(gtask);
1451 	return (FILTER_HANDLED);
1452 }
1453 
1454 static int
1455 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1456 	driver_filter_t filter, driver_intr_t handler, void *arg,
1457 				 char *name)
1458 {
1459 	int rc, flags;
1460 	struct resource *res;
1461 	void *tag = NULL;
1462 	device_t dev = ctx->ifc_dev;
1463 
1464 	flags = RF_ACTIVE;
1465 	if (ctx->ifc_flags & IFC_LEGACY)
1466 		flags |= RF_SHAREABLE;
1467 	MPASS(rid < 512);
1468 	irq->ii_rid = rid;
1469 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
1470 	if (res == NULL) {
1471 		device_printf(dev,
1472 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1473 		return (ENOMEM);
1474 	}
1475 	irq->ii_res = res;
1476 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1477 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1478 						filter, handler, arg, &tag);
1479 	if (rc != 0) {
1480 		device_printf(dev,
1481 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1482 					  rid, name ? name : "unknown", rc);
1483 		return (rc);
1484 	} else if (name)
1485 		bus_describe_intr(dev, res, tag, "%s", name);
1486 
1487 	irq->ii_tag = tag;
1488 	return (0);
1489 }
1490 
1491 
1492 /*********************************************************************
1493  *
1494  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1495  *  the information needed to transmit a packet on the wire. This is
1496  *  called only once at attach, setup is done every reset.
1497  *
1498  **********************************************************************/
1499 
1500 static int
1501 iflib_txsd_alloc(iflib_txq_t txq)
1502 {
1503 	if_ctx_t ctx = txq->ift_ctx;
1504 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1505 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1506 	device_t dev = ctx->ifc_dev;
1507 	int err, nsegments, ntsosegments;
1508 
1509 	nsegments = scctx->isc_tx_nsegments;
1510 	ntsosegments = scctx->isc_tx_tso_segments_max;
1511 	MPASS(scctx->isc_ntxd[0] > 0);
1512 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1513 	MPASS(nsegments > 0);
1514 	MPASS(ntsosegments > 0);
1515 	/*
1516 	 * Setup DMA descriptor areas.
1517 	 */
1518 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1519 			       1, 0,			/* alignment, bounds */
1520 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1521 			       BUS_SPACE_MAXADDR,	/* highaddr */
1522 			       NULL, NULL,		/* filter, filterarg */
1523 			       sctx->isc_tx_maxsize,		/* maxsize */
1524 			       nsegments,	/* nsegments */
1525 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
1526 			       0,			/* flags */
1527 			       NULL,			/* lockfunc */
1528 			       NULL,			/* lockfuncarg */
1529 			       &txq->ift_desc_tag))) {
1530 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
1531 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1532 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1533 		goto fail;
1534 	}
1535 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1536 			       1, 0,			/* alignment, bounds */
1537 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1538 			       BUS_SPACE_MAXADDR,	/* highaddr */
1539 			       NULL, NULL,		/* filter, filterarg */
1540 			       scctx->isc_tx_tso_size_max,		/* maxsize */
1541 			       ntsosegments,	/* nsegments */
1542 			       scctx->isc_tx_tso_segsize_max,	/* maxsegsize */
1543 			       0,			/* flags */
1544 			       NULL,			/* lockfunc */
1545 			       NULL,			/* lockfuncarg */
1546 			       &txq->ift_tso_desc_tag))) {
1547 		device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
1548 
1549 		goto fail;
1550 	}
1551 	if (!(txq->ift_sds.ifsd_flags =
1552 	    (uint8_t *) malloc(sizeof(uint8_t) *
1553 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1554 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1555 		err = ENOMEM;
1556 		goto fail;
1557 	}
1558 	if (!(txq->ift_sds.ifsd_m =
1559 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1560 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1561 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1562 		err = ENOMEM;
1563 		goto fail;
1564 	}
1565 
1566         /* Create the descriptor buffer dma maps */
1567 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1568 	if ((ctx->ifc_flags & IFC_DMAR) == 0)
1569 		return (0);
1570 
1571 	if (!(txq->ift_sds.ifsd_map =
1572 	    (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1573 		device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1574 		err = ENOMEM;
1575 		goto fail;
1576 	}
1577 
1578 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1579 		err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
1580 		if (err != 0) {
1581 			device_printf(dev, "Unable to create TX DMA map\n");
1582 			goto fail;
1583 		}
1584 	}
1585 #endif
1586 	return (0);
1587 fail:
1588 	/* We free all, it handles case where we are in the middle */
1589 	iflib_tx_structures_free(ctx);
1590 	return (err);
1591 }
1592 
1593 static void
1594 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1595 {
1596 	bus_dmamap_t map;
1597 
1598 	map = NULL;
1599 	if (txq->ift_sds.ifsd_map != NULL)
1600 		map = txq->ift_sds.ifsd_map[i];
1601 	if (map != NULL) {
1602 		bus_dmamap_unload(txq->ift_desc_tag, map);
1603 		bus_dmamap_destroy(txq->ift_desc_tag, map);
1604 		txq->ift_sds.ifsd_map[i] = NULL;
1605 	}
1606 }
1607 
1608 static void
1609 iflib_txq_destroy(iflib_txq_t txq)
1610 {
1611 	if_ctx_t ctx = txq->ift_ctx;
1612 
1613 	for (int i = 0; i < txq->ift_size; i++)
1614 		iflib_txsd_destroy(ctx, txq, i);
1615 	if (txq->ift_sds.ifsd_map != NULL) {
1616 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1617 		txq->ift_sds.ifsd_map = NULL;
1618 	}
1619 	if (txq->ift_sds.ifsd_m != NULL) {
1620 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1621 		txq->ift_sds.ifsd_m = NULL;
1622 	}
1623 	if (txq->ift_sds.ifsd_flags != NULL) {
1624 		free(txq->ift_sds.ifsd_flags, M_IFLIB);
1625 		txq->ift_sds.ifsd_flags = NULL;
1626 	}
1627 	if (txq->ift_desc_tag != NULL) {
1628 		bus_dma_tag_destroy(txq->ift_desc_tag);
1629 		txq->ift_desc_tag = NULL;
1630 	}
1631 	if (txq->ift_tso_desc_tag != NULL) {
1632 		bus_dma_tag_destroy(txq->ift_tso_desc_tag);
1633 		txq->ift_tso_desc_tag = NULL;
1634 	}
1635 }
1636 
1637 static void
1638 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1639 {
1640 	struct mbuf **mp;
1641 
1642 	mp = &txq->ift_sds.ifsd_m[i];
1643 	if (*mp == NULL)
1644 		return;
1645 
1646 	if (txq->ift_sds.ifsd_map != NULL) {
1647 		bus_dmamap_sync(txq->ift_desc_tag,
1648 				txq->ift_sds.ifsd_map[i],
1649 				BUS_DMASYNC_POSTWRITE);
1650 		bus_dmamap_unload(txq->ift_desc_tag,
1651 				  txq->ift_sds.ifsd_map[i]);
1652 	}
1653 	m_free(*mp);
1654 	DBG_COUNTER_INC(tx_frees);
1655 	*mp = NULL;
1656 }
1657 
1658 static int
1659 iflib_txq_setup(iflib_txq_t txq)
1660 {
1661 	if_ctx_t ctx = txq->ift_ctx;
1662 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1663 	iflib_dma_info_t di;
1664 	int i;
1665 
1666 	/* Set number of descriptors available */
1667 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1668 	/* XXX make configurable */
1669 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1670 
1671 	/* Reset indices */
1672 	txq->ift_cidx_processed = 0;
1673 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1674 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1675 
1676 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1677 		bzero((void *)di->idi_vaddr, di->idi_size);
1678 
1679 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1680 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1681 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1682 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1683 	return (0);
1684 }
1685 
1686 /*********************************************************************
1687  *
1688  *  Allocate memory for rx_buffer structures. Since we use one
1689  *  rx_buffer per received packet, the maximum number of rx_buffer's
1690  *  that we'll need is equal to the number of receive descriptors
1691  *  that we've allocated.
1692  *
1693  **********************************************************************/
1694 static int
1695 iflib_rxsd_alloc(iflib_rxq_t rxq)
1696 {
1697 	if_ctx_t ctx = rxq->ifr_ctx;
1698 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1699 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1700 	device_t dev = ctx->ifc_dev;
1701 	iflib_fl_t fl;
1702 	int			err;
1703 
1704 	MPASS(scctx->isc_nrxd[0] > 0);
1705 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1706 
1707 	fl = rxq->ifr_fl;
1708 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
1709 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1710 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1711 					 1, 0,			/* alignment, bounds */
1712 					 BUS_SPACE_MAXADDR,	/* lowaddr */
1713 					 BUS_SPACE_MAXADDR,	/* highaddr */
1714 					 NULL, NULL,		/* filter, filterarg */
1715 					 sctx->isc_rx_maxsize,	/* maxsize */
1716 					 sctx->isc_rx_nsegments,	/* nsegments */
1717 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
1718 					 0,			/* flags */
1719 					 NULL,			/* lockfunc */
1720 					 NULL,			/* lockarg */
1721 					 &fl->ifl_desc_tag);
1722 		if (err) {
1723 			device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
1724 				__func__, err);
1725 			goto fail;
1726 		}
1727 		if (!(fl->ifl_sds.ifsd_flags =
1728 		      (uint8_t *) malloc(sizeof(uint8_t) *
1729 					 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1730 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1731 			err = ENOMEM;
1732 			goto fail;
1733 		}
1734 		if (!(fl->ifl_sds.ifsd_m =
1735 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1736 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1737 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1738 			err = ENOMEM;
1739 			goto fail;
1740 		}
1741 		if (!(fl->ifl_sds.ifsd_cl =
1742 		      (caddr_t *) malloc(sizeof(caddr_t) *
1743 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1744 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1745 			err = ENOMEM;
1746 			goto fail;
1747 		}
1748 
1749 		/* Create the descriptor buffer dma maps */
1750 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1751 		if ((ctx->ifc_flags & IFC_DMAR) == 0)
1752 			continue;
1753 
1754 		if (!(fl->ifl_sds.ifsd_map =
1755 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1756 			device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1757 			err = ENOMEM;
1758 			goto fail;
1759 		}
1760 
1761 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1762 			err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
1763 			if (err != 0) {
1764 				device_printf(dev, "Unable to create RX buffer DMA map\n");
1765 				goto fail;
1766 			}
1767 		}
1768 #endif
1769 	}
1770 	return (0);
1771 
1772 fail:
1773 	iflib_rx_structures_free(ctx);
1774 	return (err);
1775 }
1776 
1777 
1778 /*
1779  * Internal service routines
1780  */
1781 
1782 struct rxq_refill_cb_arg {
1783 	int               error;
1784 	bus_dma_segment_t seg;
1785 	int               nseg;
1786 };
1787 
1788 static void
1789 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1790 {
1791 	struct rxq_refill_cb_arg *cb_arg = arg;
1792 
1793 	cb_arg->error = error;
1794 	cb_arg->seg = segs[0];
1795 	cb_arg->nseg = nseg;
1796 }
1797 
1798 
1799 #ifdef ACPI_DMAR
1800 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
1801 #else
1802 #define IS_DMAR(ctx) (0)
1803 #endif
1804 
1805 /**
1806  *	rxq_refill - refill an rxq  free-buffer list
1807  *	@ctx: the iflib context
1808  *	@rxq: the free-list to refill
1809  *	@n: the number of new buffers to allocate
1810  *
1811  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
1812  *	The caller must assure that @n does not exceed the queue's capacity.
1813  */
1814 static void
1815 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
1816 {
1817 	struct mbuf *m;
1818 	int idx, frag_idx = fl->ifl_fragidx;
1819         int pidx = fl->ifl_pidx;
1820 	caddr_t cl, *sd_cl;
1821 	struct mbuf **sd_m;
1822 	uint8_t *sd_flags;
1823 	struct if_rxd_update iru;
1824 	bus_dmamap_t *sd_map;
1825 	int n, i = 0;
1826 	uint64_t bus_addr;
1827 	int err;
1828 	qidx_t credits;
1829 
1830 	sd_m = fl->ifl_sds.ifsd_m;
1831 	sd_map = fl->ifl_sds.ifsd_map;
1832 	sd_cl = fl->ifl_sds.ifsd_cl;
1833 	sd_flags = fl->ifl_sds.ifsd_flags;
1834 	idx = pidx;
1835 	credits = fl->ifl_credits;
1836 
1837 	n  = count;
1838 	MPASS(n > 0);
1839 	MPASS(credits + n <= fl->ifl_size);
1840 
1841 	if (pidx < fl->ifl_cidx)
1842 		MPASS(pidx + n <= fl->ifl_cidx);
1843 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
1844 		MPASS(fl->ifl_gen == 0);
1845 	if (pidx > fl->ifl_cidx)
1846 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
1847 
1848 	DBG_COUNTER_INC(fl_refills);
1849 	if (n > 8)
1850 		DBG_COUNTER_INC(fl_refills_large);
1851 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
1852 	while (n--) {
1853 		/*
1854 		 * We allocate an uninitialized mbuf + cluster, mbuf is
1855 		 * initialized after rx.
1856 		 *
1857 		 * If the cluster is still set then we know a minimum sized packet was received
1858 		 */
1859 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,  &frag_idx);
1860 		if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
1861                 	bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
1862 		if ((cl = sd_cl[frag_idx]) == NULL) {
1863                        if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
1864 				break;
1865 #if MEMORY_LOGGING
1866 			fl->ifl_cl_enqueued++;
1867 #endif
1868 		}
1869 		if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
1870 			break;
1871 		}
1872 #if MEMORY_LOGGING
1873 		fl->ifl_m_enqueued++;
1874 #endif
1875 
1876 		DBG_COUNTER_INC(rx_allocs);
1877 #if defined(__i386__) || defined(__amd64__)
1878 		if (!IS_DMAR(ctx)) {
1879 			bus_addr = pmap_kextract((vm_offset_t)cl);
1880 		} else
1881 #endif
1882 		{
1883 			struct rxq_refill_cb_arg cb_arg;
1884 			iflib_rxq_t q;
1885 
1886 			cb_arg.error = 0;
1887 			q = fl->ifl_rxq;
1888 			MPASS(sd_map != NULL);
1889 			MPASS(sd_map[frag_idx] != NULL);
1890 			err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
1891 		         cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
1892 			bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
1893 					BUS_DMASYNC_PREREAD);
1894 
1895 			if (err != 0 || cb_arg.error) {
1896 				/*
1897 				 * !zone_pack ?
1898 				 */
1899 				if (fl->ifl_zone == zone_pack)
1900 					uma_zfree(fl->ifl_zone, cl);
1901 				m_free(m);
1902 				n = 0;
1903 				goto done;
1904 			}
1905 			bus_addr = cb_arg.seg.ds_addr;
1906 		}
1907                 bit_set(fl->ifl_rx_bitmap, frag_idx);
1908 		sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
1909 
1910 		MPASS(sd_m[frag_idx] == NULL);
1911 		sd_cl[frag_idx] = cl;
1912 		sd_m[frag_idx] = m;
1913 		fl->ifl_rxd_idxs[i] = frag_idx;
1914 		fl->ifl_bus_addrs[i] = bus_addr;
1915 		fl->ifl_vm_addrs[i] = cl;
1916 		credits++;
1917 		i++;
1918 		MPASS(credits <= fl->ifl_size);
1919 		if (++idx == fl->ifl_size) {
1920 			fl->ifl_gen = 1;
1921 			idx = 0;
1922 		}
1923 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
1924 			iru.iru_pidx = pidx;
1925 			iru.iru_count = i;
1926 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
1927 			i = 0;
1928 			pidx = idx;
1929 			fl->ifl_pidx = idx;
1930 			fl->ifl_credits = credits;
1931 		}
1932 
1933 	}
1934 done:
1935 	if (i) {
1936 		iru.iru_pidx = pidx;
1937 		iru.iru_count = i;
1938 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
1939 		fl->ifl_pidx = idx;
1940 		fl->ifl_credits = credits;
1941 	}
1942 	DBG_COUNTER_INC(rxd_flush);
1943 	if (fl->ifl_pidx == 0)
1944 		pidx = fl->ifl_size - 1;
1945 	else
1946 		pidx = fl->ifl_pidx - 1;
1947 
1948 	if (sd_map)
1949 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1950 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1951 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
1952 	fl->ifl_fragidx = frag_idx;
1953 }
1954 
1955 static __inline void
1956 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
1957 {
1958 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
1959 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
1960 #ifdef INVARIANTS
1961 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
1962 #endif
1963 
1964 	MPASS(fl->ifl_credits <= fl->ifl_size);
1965 	MPASS(reclaimable == delta);
1966 
1967 	if (reclaimable > 0)
1968 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
1969 }
1970 
1971 static void
1972 iflib_fl_bufs_free(iflib_fl_t fl)
1973 {
1974 	iflib_dma_info_t idi = fl->ifl_ifdi;
1975 	uint32_t i;
1976 
1977 	for (i = 0; i < fl->ifl_size; i++) {
1978 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
1979 		uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
1980 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
1981 
1982 		if (*sd_flags & RX_SW_DESC_INUSE) {
1983 			if (fl->ifl_sds.ifsd_map != NULL) {
1984 				bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
1985 				bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
1986 				bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
1987 			}
1988 			if (*sd_m != NULL) {
1989 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
1990 				uma_zfree(zone_mbuf, *sd_m);
1991 			}
1992 			if (*sd_cl != NULL)
1993 				uma_zfree(fl->ifl_zone, *sd_cl);
1994 			*sd_flags = 0;
1995 		} else {
1996 			MPASS(*sd_cl == NULL);
1997 			MPASS(*sd_m == NULL);
1998 		}
1999 #if MEMORY_LOGGING
2000 		fl->ifl_m_dequeued++;
2001 		fl->ifl_cl_dequeued++;
2002 #endif
2003 		*sd_cl = NULL;
2004 		*sd_m = NULL;
2005 	}
2006 #ifdef INVARIANTS
2007 	for (i = 0; i < fl->ifl_size; i++) {
2008 		MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
2009 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2010 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2011 	}
2012 #endif
2013 	/*
2014 	 * Reset free list values
2015 	 */
2016 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2017 	bzero(idi->idi_vaddr, idi->idi_size);
2018 }
2019 
2020 /*********************************************************************
2021  *
2022  *  Initialize a receive ring and its buffers.
2023  *
2024  **********************************************************************/
2025 static int
2026 iflib_fl_setup(iflib_fl_t fl)
2027 {
2028 	iflib_rxq_t rxq = fl->ifl_rxq;
2029 	if_ctx_t ctx = rxq->ifr_ctx;
2030 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2031 
2032 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2033 	/*
2034 	** Free current RX buffer structs and their mbufs
2035 	*/
2036 	iflib_fl_bufs_free(fl);
2037 	/* Now replenish the mbufs */
2038 	MPASS(fl->ifl_credits == 0);
2039 	/*
2040 	 * XXX don't set the max_frame_size to larger
2041 	 * than the hardware can handle
2042 	 */
2043 	if (sctx->isc_max_frame_size <= 2048)
2044 		fl->ifl_buf_size = MCLBYTES;
2045 #ifndef CONTIGMALLOC_WORKS
2046 	else
2047 		fl->ifl_buf_size = MJUMPAGESIZE;
2048 #else
2049 	else if (sctx->isc_max_frame_size <= 4096)
2050 		fl->ifl_buf_size = MJUMPAGESIZE;
2051 	else if (sctx->isc_max_frame_size <= 9216)
2052 		fl->ifl_buf_size = MJUM9BYTES;
2053 	else
2054 		fl->ifl_buf_size = MJUM16BYTES;
2055 #endif
2056 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2057 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2058 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2059 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2060 
2061 
2062 	/* avoid pre-allocating zillions of clusters to an idle card
2063 	 * potentially speeding up attach
2064 	 */
2065 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
2066 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
2067 	if (min(128, fl->ifl_size) != fl->ifl_credits)
2068 		return (ENOBUFS);
2069 	/*
2070 	 * handle failure
2071 	 */
2072 	MPASS(rxq != NULL);
2073 	MPASS(fl->ifl_ifdi != NULL);
2074 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2075 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2076 	return (0);
2077 }
2078 
2079 /*********************************************************************
2080  *
2081  *  Free receive ring data structures
2082  *
2083  **********************************************************************/
2084 static void
2085 iflib_rx_sds_free(iflib_rxq_t rxq)
2086 {
2087 	iflib_fl_t fl;
2088 	int i;
2089 
2090 	if (rxq->ifr_fl != NULL) {
2091 		for (i = 0; i < rxq->ifr_nfl; i++) {
2092 			fl = &rxq->ifr_fl[i];
2093 			if (fl->ifl_desc_tag != NULL) {
2094 				bus_dma_tag_destroy(fl->ifl_desc_tag);
2095 				fl->ifl_desc_tag = NULL;
2096 			}
2097 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2098 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2099 			/* XXX destroy maps first */
2100 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2101 			fl->ifl_sds.ifsd_m = NULL;
2102 			fl->ifl_sds.ifsd_cl = NULL;
2103 			fl->ifl_sds.ifsd_map = NULL;
2104 		}
2105 		free(rxq->ifr_fl, M_IFLIB);
2106 		rxq->ifr_fl = NULL;
2107 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
2108 	}
2109 }
2110 
2111 /*
2112  * MI independent logic
2113  *
2114  */
2115 static void
2116 iflib_timer(void *arg)
2117 {
2118 	iflib_txq_t txq = arg;
2119 	if_ctx_t ctx = txq->ift_ctx;
2120 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2121 
2122 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2123 		return;
2124 	/*
2125 	** Check on the state of the TX queue(s), this
2126 	** can be done without the lock because its RO
2127 	** and the HUNG state will be static if set.
2128 	*/
2129 	IFDI_TIMER(ctx, txq->ift_id);
2130 	if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2131 	    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2132 	     (sctx->isc_pause_frames == 0)))
2133 		goto hung;
2134 
2135 	if (ifmp_ring_is_stalled(txq->ift_br))
2136 		txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2137 	txq->ift_cleaned_prev = txq->ift_cleaned;
2138 	/* handle any laggards */
2139 	if (txq->ift_db_pending)
2140 		GROUPTASK_ENQUEUE(&txq->ift_task);
2141 
2142 	sctx->isc_pause_frames = 0;
2143 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2144 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
2145 	return;
2146 hung:
2147 	CTX_LOCK(ctx);
2148 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2149 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2150 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2151 
2152 	IFDI_WATCHDOG_RESET(ctx);
2153 	ctx->ifc_watchdog_events++;
2154 
2155 	ctx->ifc_flags |= IFC_DO_RESET;
2156 	iflib_admin_intr_deferred(ctx);
2157 	CTX_UNLOCK(ctx);
2158 }
2159 
2160 static void
2161 iflib_init_locked(if_ctx_t ctx)
2162 {
2163 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2164 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2165 	if_t ifp = ctx->ifc_ifp;
2166 	iflib_fl_t fl;
2167 	iflib_txq_t txq;
2168 	iflib_rxq_t rxq;
2169 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2170 
2171 
2172 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2173 	IFDI_INTR_DISABLE(ctx);
2174 
2175 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2176 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2177 	/* Set hardware offload abilities */
2178 	if_clearhwassist(ifp);
2179 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2180 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2181 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2182 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2183 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2184 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2185 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2186 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2187 
2188 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2189 		CALLOUT_LOCK(txq);
2190 		callout_stop(&txq->ift_timer);
2191 		CALLOUT_UNLOCK(txq);
2192 		iflib_netmap_txq_init(ctx, txq);
2193 	}
2194 #ifdef INVARIANTS
2195 	i = if_getdrvflags(ifp);
2196 #endif
2197 	IFDI_INIT(ctx);
2198 	MPASS(if_getdrvflags(ifp) == i);
2199 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
2200 		/* XXX this should really be done on a per-queue basis */
2201 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2202 			MPASS(rxq->ifr_id == i);
2203 			iflib_netmap_rxq_init(ctx, rxq);
2204 			continue;
2205 		}
2206 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2207 			if (iflib_fl_setup(fl)) {
2208 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
2209 				goto done;
2210 			}
2211 		}
2212 	}
2213 	done:
2214 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2215 	IFDI_INTR_ENABLE(ctx);
2216 	txq = ctx->ifc_txqs;
2217 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2218 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2219 			txq->ift_timer.c_cpu);
2220 }
2221 
2222 static int
2223 iflib_media_change(if_t ifp)
2224 {
2225 	if_ctx_t ctx = if_getsoftc(ifp);
2226 	int err;
2227 
2228 	CTX_LOCK(ctx);
2229 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2230 		iflib_init_locked(ctx);
2231 	CTX_UNLOCK(ctx);
2232 	return (err);
2233 }
2234 
2235 static void
2236 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2237 {
2238 	if_ctx_t ctx = if_getsoftc(ifp);
2239 
2240 	CTX_LOCK(ctx);
2241 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2242 	IFDI_MEDIA_STATUS(ctx, ifmr);
2243 	CTX_UNLOCK(ctx);
2244 }
2245 
2246 static void
2247 iflib_stop(if_ctx_t ctx)
2248 {
2249 	iflib_txq_t txq = ctx->ifc_txqs;
2250 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2251 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2252 	iflib_dma_info_t di;
2253 	iflib_fl_t fl;
2254 	int i, j;
2255 
2256 	/* Tell the stack that the interface is no longer active */
2257 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2258 
2259 	IFDI_INTR_DISABLE(ctx);
2260 	DELAY(1000);
2261 	IFDI_STOP(ctx);
2262 	DELAY(1000);
2263 
2264 	iflib_debug_reset();
2265 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2266 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2267 		/* make sure all transmitters have completed before proceeding XXX */
2268 
2269 		/* clean any enqueued buffers */
2270 		iflib_ifmp_purge(txq);
2271 		/* Free any existing tx buffers. */
2272 		for (j = 0; j < txq->ift_size; j++) {
2273 			iflib_txsd_free(ctx, txq, j);
2274 		}
2275 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2276 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
2277 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2278 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2279 		txq->ift_pullups = 0;
2280 		ifmp_ring_reset_stats(txq->ift_br);
2281 		for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
2282 			bzero((void *)di->idi_vaddr, di->idi_size);
2283 	}
2284 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2285 		/* make sure all transmitters have completed before proceeding XXX */
2286 
2287 		for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwrxqs; j++, di++)
2288 			bzero((void *)di->idi_vaddr, di->idi_size);
2289 		/* also resets the free lists pidx/cidx */
2290 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2291 			iflib_fl_bufs_free(fl);
2292 	}
2293 }
2294 
2295 static inline caddr_t
2296 calc_next_rxd(iflib_fl_t fl, int cidx)
2297 {
2298 	qidx_t size;
2299 	int nrxd;
2300 	caddr_t start, end, cur, next;
2301 
2302 	nrxd = fl->ifl_size;
2303 	size = fl->ifl_rxd_size;
2304 	start = fl->ifl_ifdi->idi_vaddr;
2305 
2306 	if (__predict_false(size == 0))
2307 		return (start);
2308 	cur = start + size*cidx;
2309 	end = start + size*nrxd;
2310 	next = CACHE_PTR_NEXT(cur);
2311 	return (next < end ? next : start);
2312 }
2313 
2314 static inline void
2315 prefetch_pkts(iflib_fl_t fl, int cidx)
2316 {
2317 	int nextptr;
2318 	int nrxd = fl->ifl_size;
2319 	caddr_t next_rxd;
2320 
2321 
2322 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2323 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2324 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2325 	next_rxd = calc_next_rxd(fl, cidx);
2326 	prefetch(next_rxd);
2327 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2328 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2329 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2330 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2331 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2332 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2333 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2334 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2335 }
2336 
2337 static void
2338 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
2339 {
2340 	int flid, cidx;
2341 	bus_dmamap_t map;
2342 	iflib_fl_t fl;
2343 	iflib_dma_info_t di;
2344 	int next;
2345 
2346 	map = NULL;
2347 	flid = irf->irf_flid;
2348 	cidx = irf->irf_idx;
2349 	fl = &rxq->ifr_fl[flid];
2350 	sd->ifsd_fl = fl;
2351 	sd->ifsd_cidx = cidx;
2352 	sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
2353 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2354 	fl->ifl_credits--;
2355 #if MEMORY_LOGGING
2356 	fl->ifl_m_dequeued++;
2357 #endif
2358 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2359 		prefetch_pkts(fl, cidx);
2360 	if (fl->ifl_sds.ifsd_map != NULL) {
2361 		next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2362 		prefetch(&fl->ifl_sds.ifsd_map[next]);
2363 		map = fl->ifl_sds.ifsd_map[cidx];
2364 		di = fl->ifl_ifdi;
2365 		next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2366 		prefetch(&fl->ifl_sds.ifsd_flags[next]);
2367 		bus_dmamap_sync(di->idi_tag, di->idi_map,
2368 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2369 
2370 	/* not valid assert if bxe really does SGE from non-contiguous elements */
2371 		MPASS(fl->ifl_cidx == cidx);
2372 		if (unload)
2373 			bus_dmamap_unload(fl->ifl_desc_tag, map);
2374 	}
2375 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
2376 	if (__predict_false(fl->ifl_cidx == 0))
2377 		fl->ifl_gen = 0;
2378 	if (map != NULL)
2379 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2380 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2381         bit_clear(fl->ifl_rx_bitmap, cidx);
2382 }
2383 
2384 static struct mbuf *
2385 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
2386 {
2387 	int i, padlen , flags;
2388 	struct mbuf *m, *mh, *mt;
2389 	caddr_t cl;
2390 
2391 	i = 0;
2392 	mh = NULL;
2393 	do {
2394 		rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
2395 
2396 		MPASS(*sd->ifsd_cl != NULL);
2397 		MPASS(*sd->ifsd_m != NULL);
2398 
2399 		/* Don't include zero-length frags */
2400 		if (ri->iri_frags[i].irf_len == 0) {
2401 			/* XXX we can save the cluster here, but not the mbuf */
2402 			m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
2403 			m_free(*sd->ifsd_m);
2404 			*sd->ifsd_m = NULL;
2405 			continue;
2406 		}
2407 		m = *sd->ifsd_m;
2408 		*sd->ifsd_m = NULL;
2409 		if (mh == NULL) {
2410 			flags = M_PKTHDR|M_EXT;
2411 			mh = mt = m;
2412 			padlen = ri->iri_pad;
2413 		} else {
2414 			flags = M_EXT;
2415 			mt->m_next = m;
2416 			mt = m;
2417 			/* assuming padding is only on the first fragment */
2418 			padlen = 0;
2419 		}
2420 		cl = *sd->ifsd_cl;
2421 		*sd->ifsd_cl = NULL;
2422 
2423 		/* Can these two be made one ? */
2424 		m_init(m, M_NOWAIT, MT_DATA, flags);
2425 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2426 		/*
2427 		 * These must follow m_init and m_cljset
2428 		 */
2429 		m->m_data += padlen;
2430 		ri->iri_len -= padlen;
2431 		m->m_len = ri->iri_frags[i].irf_len;
2432 	} while (++i < ri->iri_nfrags);
2433 
2434 	return (mh);
2435 }
2436 
2437 /*
2438  * Process one software descriptor
2439  */
2440 static struct mbuf *
2441 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2442 {
2443 	struct if_rxsd sd;
2444 	struct mbuf *m;
2445 
2446 	/* should I merge this back in now that the two paths are basically duplicated? */
2447 	if (ri->iri_nfrags == 1 &&
2448 	    ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
2449 		rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
2450 		m = *sd.ifsd_m;
2451 		*sd.ifsd_m = NULL;
2452 		m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2453 #ifndef __NO_STRICT_ALIGNMENT
2454 		if (!IP_ALIGNED(m))
2455 			m->m_data += 2;
2456 #endif
2457 		memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2458 		m->m_len = ri->iri_frags[0].irf_len;
2459        } else {
2460 		m = assemble_segments(rxq, ri, &sd);
2461 	}
2462 	m->m_pkthdr.len = ri->iri_len;
2463 	m->m_pkthdr.rcvif = ri->iri_ifp;
2464 	m->m_flags |= ri->iri_flags;
2465 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2466 	m->m_pkthdr.flowid = ri->iri_flowid;
2467 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2468 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2469 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2470 	return (m);
2471 }
2472 
2473 #if defined(INET6) || defined(INET)
2474 static void
2475 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2476 {
2477 	CURVNET_SET(lc->ifp->if_vnet);
2478 #if defined(INET6)
2479 	*v6 = VNET(ip6_forwarding);
2480 #endif
2481 #if defined(INET)
2482 	*v4 = VNET(ipforwarding);
2483 #endif
2484 	CURVNET_RESTORE();
2485 }
2486 
2487 /*
2488  * Returns true if it's possible this packet could be LROed.
2489  * if it returns false, it is guaranteed that tcp_lro_rx()
2490  * would not return zero.
2491  */
2492 static bool
2493 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
2494 {
2495 	struct ether_header *eh;
2496 	uint16_t eh_type;
2497 
2498 	eh = mtod(m, struct ether_header *);
2499 	eh_type = ntohs(eh->ether_type);
2500 	switch (eh_type) {
2501 #if defined(INET6)
2502 		case ETHERTYPE_IPV6:
2503 			return !v6_forwarding;
2504 #endif
2505 #if defined (INET)
2506 		case ETHERTYPE_IP:
2507 			return !v4_forwarding;
2508 #endif
2509 	}
2510 
2511 	return false;
2512 }
2513 #else
2514 static void
2515 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2516 {
2517 }
2518 #endif
2519 
2520 static bool
2521 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2522 {
2523 	if_ctx_t ctx = rxq->ifr_ctx;
2524 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2525 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2526 	int avail, i;
2527 	qidx_t *cidxp;
2528 	struct if_rxd_info ri;
2529 	int err, budget_left, rx_bytes, rx_pkts;
2530 	iflib_fl_t fl;
2531 	struct ifnet *ifp;
2532 	int lro_enabled;
2533 	bool lro_possible = false;
2534 	bool v4_forwarding, v6_forwarding;
2535 
2536 	/*
2537 	 * XXX early demux data packets so that if_input processing only handles
2538 	 * acks in interrupt context
2539 	 */
2540 	struct mbuf *m, *mh, *mt, *mf;
2541 
2542 	ifp = ctx->ifc_ifp;
2543 	mh = mt = NULL;
2544 	MPASS(budget > 0);
2545 	rx_pkts	= rx_bytes = 0;
2546 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2547 		cidxp = &rxq->ifr_cq_cidx;
2548 	else
2549 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2550 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2551 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2552 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
2553 		DBG_COUNTER_INC(rx_unavail);
2554 		return (false);
2555 	}
2556 
2557 	for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) {
2558 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2559 			DBG_COUNTER_INC(rx_ctx_inactive);
2560 			break;
2561 		}
2562 		/*
2563 		 * Reset client set fields to their default values
2564 		 */
2565 		rxd_info_zero(&ri);
2566 		ri.iri_qsidx = rxq->ifr_id;
2567 		ri.iri_cidx = *cidxp;
2568 		ri.iri_ifp = ifp;
2569 		ri.iri_frags = rxq->ifr_frags;
2570 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2571 
2572 		if (err)
2573 			goto err;
2574 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2575 			*cidxp = ri.iri_cidx;
2576 			/* Update our consumer index */
2577 			/* XXX NB: shurd - check if this is still safe */
2578 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
2579 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2580 				rxq->ifr_cq_gen = 0;
2581 			}
2582 			/* was this only a completion queue message? */
2583 			if (__predict_false(ri.iri_nfrags == 0))
2584 				continue;
2585 		}
2586 		MPASS(ri.iri_nfrags != 0);
2587 		MPASS(ri.iri_len != 0);
2588 
2589 		/* will advance the cidx on the corresponding free lists */
2590 		m = iflib_rxd_pkt_get(rxq, &ri);
2591 		if (avail == 0 && budget_left)
2592 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2593 
2594 		if (__predict_false(m == NULL)) {
2595 			DBG_COUNTER_INC(rx_mbuf_null);
2596 			continue;
2597 		}
2598 		/* imm_pkt: -- cxgb */
2599 		if (mh == NULL)
2600 			mh = mt = m;
2601 		else {
2602 			mt->m_nextpkt = m;
2603 			mt = m;
2604 		}
2605 	}
2606 	/* make sure that we can refill faster than drain */
2607 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2608 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
2609 
2610 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2611 	if (lro_enabled)
2612 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
2613 	mt = mf = NULL;
2614 	while (mh != NULL) {
2615 		m = mh;
2616 		mh = mh->m_nextpkt;
2617 		m->m_nextpkt = NULL;
2618 #ifndef __NO_STRICT_ALIGNMENT
2619 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
2620 			continue;
2621 #endif
2622 		rx_bytes += m->m_pkthdr.len;
2623 		rx_pkts++;
2624 #if defined(INET6) || defined(INET)
2625 		if (lro_enabled) {
2626 			if (!lro_possible) {
2627 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
2628 				if (lro_possible && mf != NULL) {
2629 					ifp->if_input(ifp, mf);
2630 					DBG_COUNTER_INC(rx_if_input);
2631 					mt = mf = NULL;
2632 				}
2633 			}
2634 			if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
2635 				continue;
2636 		}
2637 #endif
2638 		if (lro_possible) {
2639 			ifp->if_input(ifp, m);
2640 			DBG_COUNTER_INC(rx_if_input);
2641 			continue;
2642 		}
2643 
2644 		if (mf == NULL)
2645 			mf = m;
2646 		if (mt != NULL)
2647 			mt->m_nextpkt = m;
2648 		mt = m;
2649 	}
2650 	if (mf != NULL) {
2651 		ifp->if_input(ifp, mf);
2652 		DBG_COUNTER_INC(rx_if_input);
2653 	}
2654 
2655 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
2656 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
2657 
2658 	/*
2659 	 * Flush any outstanding LRO work
2660 	 */
2661 #if defined(INET6) || defined(INET)
2662 	tcp_lro_flush_all(&rxq->ifr_lc);
2663 #endif
2664 	if (avail)
2665 		return true;
2666 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
2667 err:
2668 	CTX_LOCK(ctx);
2669 	ctx->ifc_flags |= IFC_DO_RESET;
2670 	iflib_admin_intr_deferred(ctx);
2671 	CTX_UNLOCK(ctx);
2672 	return (false);
2673 }
2674 
2675 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
2676 static inline qidx_t
2677 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
2678 {
2679 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2680 	qidx_t minthresh = txq->ift_size / 8;
2681 	if (in_use > 4*minthresh)
2682 		return (notify_count);
2683 	if (in_use > 2*minthresh)
2684 		return (notify_count >> 1);
2685 	if (in_use > minthresh)
2686 		return (notify_count >> 3);
2687 	return (0);
2688 }
2689 
2690 static inline qidx_t
2691 txq_max_rs_deferred(iflib_txq_t txq)
2692 {
2693 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2694 	qidx_t minthresh = txq->ift_size / 8;
2695 	if (txq->ift_in_use > 4*minthresh)
2696 		return (notify_count);
2697 	if (txq->ift_in_use > 2*minthresh)
2698 		return (notify_count >> 1);
2699 	if (txq->ift_in_use > minthresh)
2700 		return (notify_count >> 2);
2701 	return (2);
2702 }
2703 
2704 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
2705 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
2706 
2707 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
2708 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
2709 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
2710 
2711 /* forward compatibility for cxgb */
2712 #define FIRST_QSET(ctx) 0
2713 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
2714 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
2715 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
2716 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
2717 
2718 /* XXX we should be setting this to something other than zero */
2719 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
2720 #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
2721 
2722 static inline bool
2723 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
2724 {
2725 	qidx_t dbval, max;
2726 	bool rang;
2727 
2728 	rang = false;
2729 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
2730 	if (ring || txq->ift_db_pending >= max) {
2731 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
2732 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
2733 		txq->ift_db_pending = txq->ift_npending = 0;
2734 		rang = true;
2735 	}
2736 	return (rang);
2737 }
2738 
2739 #ifdef PKT_DEBUG
2740 static void
2741 print_pkt(if_pkt_info_t pi)
2742 {
2743 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
2744 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
2745 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
2746 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
2747 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
2748 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
2749 }
2750 #endif
2751 
2752 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2753 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2754 
2755 static int
2756 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
2757 {
2758 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
2759 	struct ether_vlan_header *eh;
2760 	struct mbuf *m, *n;
2761 
2762 	n = m = *mp;
2763 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2764 	    M_WRITABLE(m) == 0) {
2765 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2766 			return (ENOMEM);
2767 		} else {
2768 			m_freem(*mp);
2769 			n = *mp = m;
2770 		}
2771 	}
2772 
2773 	/*
2774 	 * Determine where frame payload starts.
2775 	 * Jump over vlan headers if already present,
2776 	 * helpful for QinQ too.
2777 	 */
2778 	if (__predict_false(m->m_len < sizeof(*eh))) {
2779 		txq->ift_pullups++;
2780 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
2781 			return (ENOMEM);
2782 	}
2783 	eh = mtod(m, struct ether_vlan_header *);
2784 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2785 		pi->ipi_etype = ntohs(eh->evl_proto);
2786 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2787 	} else {
2788 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
2789 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
2790 	}
2791 
2792 	switch (pi->ipi_etype) {
2793 #ifdef INET
2794 	case ETHERTYPE_IP:
2795 	{
2796 		struct ip *ip = NULL;
2797 		struct tcphdr *th = NULL;
2798 		int minthlen;
2799 
2800 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
2801 		if (__predict_false(m->m_len < minthlen)) {
2802 			/*
2803 			 * if this code bloat is causing too much of a hit
2804 			 * move it to a separate function and mark it noinline
2805 			 */
2806 			if (m->m_len == pi->ipi_ehdrlen) {
2807 				n = m->m_next;
2808 				MPASS(n);
2809 				if (n->m_len >= sizeof(*ip))  {
2810 					ip = (struct ip *)n->m_data;
2811 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2812 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2813 				} else {
2814 					txq->ift_pullups++;
2815 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2816 						return (ENOMEM);
2817 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2818 				}
2819 			} else {
2820 				txq->ift_pullups++;
2821 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2822 					return (ENOMEM);
2823 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2824 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2825 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2826 			}
2827 		} else {
2828 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2829 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2830 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2831 		}
2832 		pi->ipi_ip_hlen = ip->ip_hl << 2;
2833 		pi->ipi_ipproto = ip->ip_p;
2834 		pi->ipi_flags |= IPI_TX_IPV4;
2835 
2836 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
2837                        ip->ip_sum = 0;
2838 
2839 		if (IS_TSO4(pi)) {
2840 			if (pi->ipi_ipproto == IPPROTO_TCP) {
2841 				if (__predict_false(th == NULL)) {
2842 					txq->ift_pullups++;
2843 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
2844 						return (ENOMEM);
2845 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
2846 				}
2847 				pi->ipi_tcp_hflags = th->th_flags;
2848 				pi->ipi_tcp_hlen = th->th_off << 2;
2849 				pi->ipi_tcp_seq = th->th_seq;
2850 			}
2851 			if (__predict_false(ip->ip_p != IPPROTO_TCP))
2852 				return (ENXIO);
2853 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
2854 					       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2855 			pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
2856 			if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
2857 				ip->ip_sum = 0;
2858 				ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
2859 			}
2860 		}
2861 		break;
2862 	}
2863 #endif
2864 #ifdef INET6
2865 	case ETHERTYPE_IPV6:
2866 	{
2867 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
2868 		struct tcphdr *th;
2869 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
2870 
2871 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
2872 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
2873 				return (ENOMEM);
2874 		}
2875 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
2876 
2877 		/* XXX-BZ this will go badly in case of ext hdrs. */
2878 		pi->ipi_ipproto = ip6->ip6_nxt;
2879 		pi->ipi_flags |= IPI_TX_IPV6;
2880 
2881 		if (IS_TSO6(pi)) {
2882 			if (pi->ipi_ipproto == IPPROTO_TCP) {
2883 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
2884 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
2885 						return (ENOMEM);
2886 				}
2887 				pi->ipi_tcp_hflags = th->th_flags;
2888 				pi->ipi_tcp_hlen = th->th_off << 2;
2889 			}
2890 
2891 			if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
2892 				return (ENXIO);
2893 			/*
2894 			 * The corresponding flag is set by the stack in the IPv4
2895 			 * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
2896 			 * So, set it here because the rest of the flow requires it.
2897 			 */
2898 			pi->ipi_csum_flags |= CSUM_TCP_IPV6;
2899 			th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
2900 			pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
2901 		}
2902 		break;
2903 	}
2904 #endif
2905 	default:
2906 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
2907 		pi->ipi_ip_hlen = 0;
2908 		break;
2909 	}
2910 	*mp = m;
2911 
2912 	return (0);
2913 }
2914 
2915 static  __noinline  struct mbuf *
2916 collapse_pkthdr(struct mbuf *m0)
2917 {
2918 	struct mbuf *m, *m_next, *tmp;
2919 
2920 	m = m0;
2921 	m_next = m->m_next;
2922 	while (m_next != NULL && m_next->m_len == 0) {
2923 		m = m_next;
2924 		m->m_next = NULL;
2925 		m_free(m);
2926 		m_next = m_next->m_next;
2927 	}
2928 	m = m0;
2929 	m->m_next = m_next;
2930 	if ((m_next->m_flags & M_EXT) == 0) {
2931 		m = m_defrag(m, M_NOWAIT);
2932 	} else {
2933 		tmp = m_next->m_next;
2934 		memcpy(m_next, m, MPKTHSIZE);
2935 		m = m_next;
2936 		m->m_next = tmp;
2937 	}
2938 	return (m);
2939 }
2940 
2941 /*
2942  * If dodgy hardware rejects the scatter gather chain we've handed it
2943  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
2944  * m_defrag'd mbufs
2945  */
2946 static __noinline struct mbuf *
2947 iflib_remove_mbuf(iflib_txq_t txq)
2948 {
2949 	int ntxd, i, pidx;
2950 	struct mbuf *m, *mh, **ifsd_m;
2951 
2952 	pidx = txq->ift_pidx;
2953 	ifsd_m = txq->ift_sds.ifsd_m;
2954 	ntxd = txq->ift_size;
2955 	mh = m = ifsd_m[pidx];
2956 	ifsd_m[pidx] = NULL;
2957 #if MEMORY_LOGGING
2958 	txq->ift_dequeued++;
2959 #endif
2960 	i = 1;
2961 
2962 	while (m) {
2963 		ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
2964 #if MEMORY_LOGGING
2965 		txq->ift_dequeued++;
2966 #endif
2967 		m = m->m_next;
2968 		i++;
2969 	}
2970 	return (mh);
2971 }
2972 
2973 static int
2974 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
2975 			  struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
2976 			  int max_segs, int flags)
2977 {
2978 	if_ctx_t ctx;
2979 	if_shared_ctx_t		sctx;
2980 	if_softc_ctx_t		scctx;
2981 	int i, next, pidx, err, ntxd, count;
2982 	struct mbuf *m, *tmp, **ifsd_m;
2983 
2984 	m = *m0;
2985 
2986 	/*
2987 	 * Please don't ever do this
2988 	 */
2989 	if (__predict_false(m->m_len == 0))
2990 		*m0 = m = collapse_pkthdr(m);
2991 
2992 	ctx = txq->ift_ctx;
2993 	sctx = ctx->ifc_sctx;
2994 	scctx = &ctx->ifc_softc_ctx;
2995 	ifsd_m = txq->ift_sds.ifsd_m;
2996 	ntxd = txq->ift_size;
2997 	pidx = txq->ift_pidx;
2998 	if (map != NULL) {
2999 		uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
3000 
3001 		err = bus_dmamap_load_mbuf_sg(tag, map,
3002 					      *m0, segs, nsegs, BUS_DMA_NOWAIT);
3003 		if (err)
3004 			return (err);
3005 		ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
3006 		count = 0;
3007 		m = *m0;
3008 		do {
3009 			if (__predict_false(m->m_len <= 0)) {
3010 				tmp = m;
3011 				m = m->m_next;
3012 				tmp->m_next = NULL;
3013 				m_free(tmp);
3014 				continue;
3015 			}
3016 			m = m->m_next;
3017 			count++;
3018 		} while (m != NULL);
3019 		if (count > *nsegs) {
3020 			ifsd_m[pidx] = *m0;
3021 			ifsd_m[pidx]->m_flags |= M_TOOBIG;
3022 			return (0);
3023 		}
3024 		m = *m0;
3025 		count = 0;
3026 		do {
3027 			next = (pidx + count) & (ntxd-1);
3028 			MPASS(ifsd_m[next] == NULL);
3029 			ifsd_m[next] = m;
3030 			count++;
3031 			tmp = m;
3032 			m = m->m_next;
3033 		} while (m != NULL);
3034 	} else {
3035 		int buflen, sgsize, maxsegsz, max_sgsize;
3036 		vm_offset_t vaddr;
3037 		vm_paddr_t curaddr;
3038 
3039 		count = i = 0;
3040 		m = *m0;
3041 		if (m->m_pkthdr.csum_flags & CSUM_TSO)
3042 			maxsegsz = scctx->isc_tx_tso_segsize_max;
3043 		else
3044 			maxsegsz = sctx->isc_tx_maxsegsize;
3045 
3046 		do {
3047 			if (__predict_false(m->m_len <= 0)) {
3048 				tmp = m;
3049 				m = m->m_next;
3050 				tmp->m_next = NULL;
3051 				m_free(tmp);
3052 				continue;
3053 			}
3054 			buflen = m->m_len;
3055 			vaddr = (vm_offset_t)m->m_data;
3056 			/*
3057 			 * see if we can't be smarter about physically
3058 			 * contiguous mappings
3059 			 */
3060 			next = (pidx + count) & (ntxd-1);
3061 			MPASS(ifsd_m[next] == NULL);
3062 #if MEMORY_LOGGING
3063 			txq->ift_enqueued++;
3064 #endif
3065 			ifsd_m[next] = m;
3066 			while (buflen > 0) {
3067 				if (i >= max_segs)
3068 					goto err;
3069 				max_sgsize = MIN(buflen, maxsegsz);
3070 				curaddr = pmap_kextract(vaddr);
3071 				sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
3072 				sgsize = MIN(sgsize, max_sgsize);
3073 				segs[i].ds_addr = curaddr;
3074 				segs[i].ds_len = sgsize;
3075 				vaddr += sgsize;
3076 				buflen -= sgsize;
3077 				i++;
3078 			}
3079 			count++;
3080 			tmp = m;
3081 			m = m->m_next;
3082 		} while (m != NULL);
3083 		*nsegs = i;
3084 	}
3085 	return (0);
3086 err:
3087 	*m0 = iflib_remove_mbuf(txq);
3088 	return (EFBIG);
3089 }
3090 
3091 static inline caddr_t
3092 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3093 {
3094 	qidx_t size;
3095 	int ntxd;
3096 	caddr_t start, end, cur, next;
3097 
3098 	ntxd = txq->ift_size;
3099 	size = txq->ift_txd_size[qid];
3100 	start = txq->ift_ifdi[qid].idi_vaddr;
3101 
3102 	if (__predict_false(size == 0))
3103 		return (start);
3104 	cur = start + size*cidx;
3105 	end = start + size*ntxd;
3106 	next = CACHE_PTR_NEXT(cur);
3107 	return (next < end ? next : start);
3108 }
3109 
3110 /*
3111  * Pad an mbuf to ensure a minimum ethernet frame size.
3112  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3113  */
3114 static __noinline int
3115 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3116 {
3117 	/*
3118 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3119 	 * and ARP message is the smallest common payload I can think of
3120 	 */
3121 	static char pad[18];	/* just zeros */
3122 	int n;
3123 	struct mbuf *new_head;
3124 
3125 	if (!M_WRITABLE(*m_head)) {
3126 		new_head = m_dup(*m_head, M_NOWAIT);
3127 		if (new_head == NULL) {
3128 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3129 			return ENOMEM;
3130 		}
3131 		m_freem(*m_head);
3132 		*m_head = new_head;
3133 	}
3134 
3135 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3136 	     n > 0; n -= sizeof(pad))
3137 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3138 			break;
3139 
3140 	if (n > 0) {
3141 		m_freem(*m_head);
3142 		device_printf(dev, "cannot pad short frame\n");
3143 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3144 		return (ENOBUFS);
3145 	}
3146 
3147 	return 0;
3148 }
3149 
3150 static int
3151 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3152 {
3153 	if_ctx_t		ctx;
3154 	if_shared_ctx_t		sctx;
3155 	if_softc_ctx_t		scctx;
3156 	bus_dma_segment_t	*segs;
3157 	struct mbuf		*m_head;
3158 	void			*next_txd;
3159 	bus_dmamap_t		map;
3160 	struct if_pkt_info	pi;
3161 	int remap = 0;
3162 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3163 	bus_dma_tag_t desc_tag;
3164 
3165 	segs = txq->ift_segs;
3166 	ctx = txq->ift_ctx;
3167 	sctx = ctx->ifc_sctx;
3168 	scctx = &ctx->ifc_softc_ctx;
3169 	segs = txq->ift_segs;
3170 	ntxd = txq->ift_size;
3171 	m_head = *m_headp;
3172 	map = NULL;
3173 
3174 	/*
3175 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3176 	 */
3177 	cidx = txq->ift_cidx;
3178 	pidx = txq->ift_pidx;
3179 	if (ctx->ifc_flags & IFC_PREFETCH) {
3180 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3181 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3182 			next_txd = calc_next_txd(txq, cidx, 0);
3183 			prefetch(next_txd);
3184 		}
3185 
3186 		/* prefetch the next cache line of mbuf pointers and flags */
3187 		prefetch(&txq->ift_sds.ifsd_m[next]);
3188 		if (txq->ift_sds.ifsd_map != NULL) {
3189 			prefetch(&txq->ift_sds.ifsd_map[next]);
3190 			next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3191 			prefetch(&txq->ift_sds.ifsd_flags[next]);
3192 		}
3193 	} else if (txq->ift_sds.ifsd_map != NULL)
3194 		map = txq->ift_sds.ifsd_map[pidx];
3195 
3196 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3197 		desc_tag = txq->ift_tso_desc_tag;
3198 		max_segs = scctx->isc_tx_tso_segments_max;
3199 	} else {
3200 		desc_tag = txq->ift_desc_tag;
3201 		max_segs = scctx->isc_tx_nsegments;
3202 	}
3203 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3204 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3205 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3206 		if (err)
3207 			return err;
3208 	}
3209 	m_head = *m_headp;
3210 
3211 	pkt_info_zero(&pi);
3212 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3213 	pi.ipi_pidx = pidx;
3214 	pi.ipi_qsidx = txq->ift_id;
3215 	pi.ipi_len = m_head->m_pkthdr.len;
3216 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3217 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3218 
3219 	/* deliberate bitwise OR to make one condition */
3220 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3221 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
3222 			return (err);
3223 		m_head = *m_headp;
3224 	}
3225 
3226 retry:
3227 	err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
3228 defrag:
3229 	if (__predict_false(err)) {
3230 		switch (err) {
3231 		case EFBIG:
3232 			/* try collapse once and defrag once */
3233 			if (remap == 0)
3234 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3235 			if (remap == 1)
3236 				m_head = m_defrag(*m_headp, M_NOWAIT);
3237 			remap++;
3238 			if (__predict_false(m_head == NULL))
3239 				goto defrag_failed;
3240 			txq->ift_mbuf_defrag++;
3241 			*m_headp = m_head;
3242 			goto retry;
3243 			break;
3244 		case ENOMEM:
3245 			txq->ift_no_tx_dma_setup++;
3246 			break;
3247 		default:
3248 			txq->ift_no_tx_dma_setup++;
3249 			m_freem(*m_headp);
3250 			DBG_COUNTER_INC(tx_frees);
3251 			*m_headp = NULL;
3252 			break;
3253 		}
3254 		txq->ift_map_failed++;
3255 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3256 		return (err);
3257 	}
3258 
3259 	/*
3260 	 * XXX assumes a 1 to 1 relationship between segments and
3261 	 *        descriptors - this does not hold true on all drivers, e.g.
3262 	 *        cxgb
3263 	 */
3264 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3265 		txq->ift_no_desc_avail++;
3266 		if (map != NULL)
3267 			bus_dmamap_unload(desc_tag, map);
3268 		DBG_COUNTER_INC(encap_txq_avail_fail);
3269 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3270 			GROUPTASK_ENQUEUE(&txq->ift_task);
3271 		return (ENOBUFS);
3272 	}
3273 	/*
3274 	 * On Intel cards we can greatly reduce the number of TX interrupts
3275 	 * we see by only setting report status on every Nth descriptor.
3276 	 * However, this also means that the driver will need to keep track
3277 	 * of the descriptors that RS was set on to check them for the DD bit.
3278 	 */
3279 	txq->ift_rs_pending += nsegs + 1;
3280 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3281 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) {
3282 		pi.ipi_flags |= IPI_TX_INTR;
3283 		txq->ift_rs_pending = 0;
3284 	}
3285 
3286 	pi.ipi_segs = segs;
3287 	pi.ipi_nsegs = nsegs;
3288 
3289 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3290 #ifdef PKT_DEBUG
3291 	print_pkt(&pi);
3292 #endif
3293 	if (map != NULL)
3294 		bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
3295 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3296 		if (map != NULL)
3297 			bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3298 					BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3299 		DBG_COUNTER_INC(tx_encap);
3300 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3301 
3302 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3303 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3304 			ndesc += txq->ift_size;
3305 			txq->ift_gen = 1;
3306 		}
3307 		/*
3308 		 * drivers can need as many as
3309 		 * two sentinels
3310 		 */
3311 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3312 		MPASS(pi.ipi_new_pidx != pidx);
3313 		MPASS(ndesc > 0);
3314 		txq->ift_in_use += ndesc;
3315 
3316 		/*
3317 		 * We update the last software descriptor again here because there may
3318 		 * be a sentinel and/or there may be more mbufs than segments
3319 		 */
3320 		txq->ift_pidx = pi.ipi_new_pidx;
3321 		txq->ift_npending += pi.ipi_ndescs;
3322 	} else if (__predict_false(err == EFBIG && remap < 2)) {
3323 		*m_headp = m_head = iflib_remove_mbuf(txq);
3324 		remap = 1;
3325 		txq->ift_txd_encap_efbig++;
3326 		goto defrag;
3327 	} else
3328 		DBG_COUNTER_INC(encap_txd_encap_fail);
3329 	return (err);
3330 
3331 defrag_failed:
3332 	txq->ift_mbuf_defrag_failed++;
3333 	txq->ift_map_failed++;
3334 	m_freem(*m_headp);
3335 	DBG_COUNTER_INC(tx_frees);
3336 	*m_headp = NULL;
3337 	return (ENOMEM);
3338 }
3339 
3340 static void
3341 iflib_tx_desc_free(iflib_txq_t txq, int n)
3342 {
3343 	int hasmap;
3344 	uint32_t qsize, cidx, mask, gen;
3345 	struct mbuf *m, **ifsd_m;
3346 	uint8_t *ifsd_flags;
3347 	bus_dmamap_t *ifsd_map;
3348 	bool do_prefetch;
3349 
3350 	cidx = txq->ift_cidx;
3351 	gen = txq->ift_gen;
3352 	qsize = txq->ift_size;
3353 	mask = qsize-1;
3354 	hasmap = txq->ift_sds.ifsd_map != NULL;
3355 	ifsd_flags = txq->ift_sds.ifsd_flags;
3356 	ifsd_m = txq->ift_sds.ifsd_m;
3357 	ifsd_map = txq->ift_sds.ifsd_map;
3358 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3359 
3360 	while (n--) {
3361 		if (do_prefetch) {
3362 			prefetch(ifsd_m[(cidx + 3) & mask]);
3363 			prefetch(ifsd_m[(cidx + 4) & mask]);
3364 		}
3365 		if (ifsd_m[cidx] != NULL) {
3366 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3367 			prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
3368 			if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
3369 				/*
3370 				 * does it matter if it's not the TSO tag? If so we'll
3371 				 * have to add the type to flags
3372 				 */
3373 				bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
3374 				ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
3375 			}
3376 			if ((m = ifsd_m[cidx]) != NULL) {
3377 				/* XXX we don't support any drivers that batch packets yet */
3378 				MPASS(m->m_nextpkt == NULL);
3379 				/* if the number of clusters exceeds the number of segments
3380 				 * there won't be space on the ring to save a pointer to each
3381 				 * cluster so we simply free the list here
3382 				 */
3383 				if (m->m_flags & M_TOOBIG) {
3384 					m_freem(m);
3385 				} else {
3386 					m_free(m);
3387 				}
3388 				ifsd_m[cidx] = NULL;
3389 #if MEMORY_LOGGING
3390 				txq->ift_dequeued++;
3391 #endif
3392 				DBG_COUNTER_INC(tx_frees);
3393 			}
3394 		}
3395 		if (__predict_false(++cidx == qsize)) {
3396 			cidx = 0;
3397 			gen = 0;
3398 		}
3399 	}
3400 	txq->ift_cidx = cidx;
3401 	txq->ift_gen = gen;
3402 }
3403 
3404 static __inline int
3405 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3406 {
3407 	int reclaim;
3408 	if_ctx_t ctx = txq->ift_ctx;
3409 
3410 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3411 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3412 
3413 	/*
3414 	 * Need a rate-limiting check so that this isn't called every time
3415 	 */
3416 	iflib_tx_credits_update(ctx, txq);
3417 	reclaim = DESC_RECLAIMABLE(txq);
3418 
3419 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3420 #ifdef INVARIANTS
3421 		if (iflib_verbose_debug) {
3422 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
3423 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3424 			       reclaim, thresh);
3425 
3426 		}
3427 #endif
3428 		return (0);
3429 	}
3430 	iflib_tx_desc_free(txq, reclaim);
3431 	txq->ift_cleaned += reclaim;
3432 	txq->ift_in_use -= reclaim;
3433 
3434 	return (reclaim);
3435 }
3436 
3437 static struct mbuf **
3438 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3439 {
3440 	int next, size;
3441 	struct mbuf **items;
3442 
3443 	size = r->size;
3444 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3445 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3446 
3447 	prefetch(items[(cidx + offset) & (size-1)]);
3448 	if (remaining > 1) {
3449 		prefetch2cachelines(&items[next]);
3450 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3451 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3452 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3453 	}
3454 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3455 }
3456 
3457 static void
3458 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3459 {
3460 
3461 	ifmp_ring_check_drainage(txq->ift_br, budget);
3462 }
3463 
3464 static uint32_t
3465 iflib_txq_can_drain(struct ifmp_ring *r)
3466 {
3467 	iflib_txq_t txq = r->cookie;
3468 	if_ctx_t ctx = txq->ift_ctx;
3469 
3470 	return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
3471 		ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
3472 }
3473 
3474 static uint32_t
3475 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3476 {
3477 	iflib_txq_t txq = r->cookie;
3478 	if_ctx_t ctx = txq->ift_ctx;
3479 	struct ifnet *ifp = ctx->ifc_ifp;
3480 	struct mbuf **mp, *m;
3481 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
3482 	int reclaimed, err, in_use_prev, desc_used;
3483 	bool do_prefetch, ring, rang;
3484 
3485 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3486 			    !LINK_ACTIVE(ctx))) {
3487 		DBG_COUNTER_INC(txq_drain_notready);
3488 		return (0);
3489 	}
3490 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3491 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
3492 	avail = IDXDIFF(pidx, cidx, r->size);
3493 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3494 		DBG_COUNTER_INC(txq_drain_flushing);
3495 		for (i = 0; i < avail; i++) {
3496 			m_free(r->items[(cidx + i) & (r->size-1)]);
3497 			r->items[(cidx + i) & (r->size-1)] = NULL;
3498 		}
3499 		return (avail);
3500 	}
3501 
3502 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3503 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3504 		CALLOUT_LOCK(txq);
3505 		callout_stop(&txq->ift_timer);
3506 		CALLOUT_UNLOCK(txq);
3507 		DBG_COUNTER_INC(txq_drain_oactive);
3508 		return (0);
3509 	}
3510 	if (reclaimed)
3511 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3512 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
3513 	count = MIN(avail, TX_BATCH_SIZE);
3514 #ifdef INVARIANTS
3515 	if (iflib_verbose_debug)
3516 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3517 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3518 #endif
3519 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3520 	avail = TXQ_AVAIL(txq);
3521 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
3522 		int pidx_prev, rem = do_prefetch ? count - i : 0;
3523 
3524 		mp = _ring_peek_one(r, cidx, i, rem);
3525 		MPASS(mp != NULL && *mp != NULL);
3526 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3527 			consumed++;
3528 			reclaimed++;
3529 			continue;
3530 		}
3531 		in_use_prev = txq->ift_in_use;
3532 		pidx_prev = txq->ift_pidx;
3533 		err = iflib_encap(txq, mp);
3534 		if (__predict_false(err)) {
3535 			DBG_COUNTER_INC(txq_drain_encapfail);
3536 			/* no room - bail out */
3537 			if (err == ENOBUFS)
3538 				break;
3539 			consumed++;
3540 			DBG_COUNTER_INC(txq_drain_encapfail);
3541 			/* we can't send this packet - skip it */
3542 			continue;
3543 		}
3544 		consumed++;
3545 		pkt_sent++;
3546 		m = *mp;
3547 		DBG_COUNTER_INC(tx_sent);
3548 		bytes_sent += m->m_pkthdr.len;
3549 		mcast_sent += !!(m->m_flags & M_MCAST);
3550 		avail = TXQ_AVAIL(txq);
3551 
3552 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
3553 		desc_used += (txq->ift_in_use - in_use_prev);
3554 		ETHER_BPF_MTAP(ifp, m);
3555 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
3556 			break;
3557 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
3558 	}
3559 
3560 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3561 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
3562 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
3563 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3564 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3565 	if (mcast_sent)
3566 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3567 #ifdef INVARIANTS
3568 	if (iflib_verbose_debug)
3569 		printf("consumed=%d\n", consumed);
3570 #endif
3571 	return (consumed);
3572 }
3573 
3574 static uint32_t
3575 iflib_txq_drain_always(struct ifmp_ring *r)
3576 {
3577 	return (1);
3578 }
3579 
3580 static uint32_t
3581 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3582 {
3583 	int i, avail;
3584 	struct mbuf **mp;
3585 	iflib_txq_t txq;
3586 
3587 	txq = r->cookie;
3588 
3589 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3590 	CALLOUT_LOCK(txq);
3591 	callout_stop(&txq->ift_timer);
3592 	CALLOUT_UNLOCK(txq);
3593 
3594 	avail = IDXDIFF(pidx, cidx, r->size);
3595 	for (i = 0; i < avail; i++) {
3596 		mp = _ring_peek_one(r, cidx, i, avail - i);
3597 		if (__predict_false(*mp == (struct mbuf *)txq))
3598 			continue;
3599 		m_freem(*mp);
3600 	}
3601 	MPASS(ifmp_ring_is_stalled(r) == 0);
3602 	return (avail);
3603 }
3604 
3605 static void
3606 iflib_ifmp_purge(iflib_txq_t txq)
3607 {
3608 	struct ifmp_ring *r;
3609 
3610 	r = txq->ift_br;
3611 	r->drain = iflib_txq_drain_free;
3612 	r->can_drain = iflib_txq_drain_always;
3613 
3614 	ifmp_ring_check_drainage(r, r->size);
3615 
3616 	r->drain = iflib_txq_drain;
3617 	r->can_drain = iflib_txq_can_drain;
3618 }
3619 
3620 static void
3621 _task_fn_tx(void *context)
3622 {
3623 	iflib_txq_t txq = context;
3624 	if_ctx_t ctx = txq->ift_ctx;
3625 	struct ifnet *ifp = ctx->ifc_ifp;
3626 	int rc;
3627 
3628 #ifdef IFLIB_DIAGNOSTICS
3629 	txq->ift_cpu_exec_count[curcpu]++;
3630 #endif
3631 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3632 		return;
3633 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3634 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3635 			netmap_tx_irq(ifp, txq->ift_id);
3636 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3637 		return;
3638 	}
3639 	if (txq->ift_db_pending)
3640 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE);
3641 	ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3642 	if (ctx->ifc_flags & IFC_LEGACY)
3643 		IFDI_INTR_ENABLE(ctx);
3644 	else {
3645 		rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3646 		KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3647 	}
3648 }
3649 
3650 static void
3651 _task_fn_rx(void *context)
3652 {
3653 	iflib_rxq_t rxq = context;
3654 	if_ctx_t ctx = rxq->ifr_ctx;
3655 	bool more;
3656 	int rc;
3657 	uint16_t budget;
3658 
3659 #ifdef IFLIB_DIAGNOSTICS
3660 	rxq->ifr_cpu_exec_count[curcpu]++;
3661 #endif
3662 	DBG_COUNTER_INC(task_fn_rxs);
3663 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3664 		return;
3665 	more = true;
3666 #ifdef DEV_NETMAP
3667 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3668 		u_int work = 0;
3669 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3670 			more = false;
3671 		}
3672 	}
3673 #endif
3674 	budget = ctx->ifc_sysctl_rx_budget;
3675 	if (budget == 0)
3676 		budget = 16;	/* XXX */
3677 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
3678 		if (ctx->ifc_flags & IFC_LEGACY)
3679 			IFDI_INTR_ENABLE(ctx);
3680 		else {
3681 			DBG_COUNTER_INC(rx_intr_enables);
3682 			rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
3683 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3684 		}
3685 	}
3686 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3687 		return;
3688 	if (more)
3689 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3690 }
3691 
3692 static void
3693 _task_fn_admin(void *context)
3694 {
3695 	if_ctx_t ctx = context;
3696 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
3697 	iflib_txq_t txq;
3698 	int i;
3699 
3700 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) {
3701 		if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3702 			return;
3703 		}
3704 	}
3705 
3706 	CTX_LOCK(ctx);
3707 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3708 		CALLOUT_LOCK(txq);
3709 		callout_stop(&txq->ift_timer);
3710 		CALLOUT_UNLOCK(txq);
3711 	}
3712 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3713 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3714 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
3715 	IFDI_LINK_INTR_ENABLE(ctx);
3716 	if (ctx->ifc_flags & IFC_DO_RESET) {
3717 		ctx->ifc_flags &= ~IFC_DO_RESET;
3718 		iflib_if_init_locked(ctx);
3719 	}
3720 	CTX_UNLOCK(ctx);
3721 
3722 	if (LINK_ACTIVE(ctx) == 0)
3723 		return;
3724 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3725 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
3726 }
3727 
3728 
3729 static void
3730 _task_fn_iov(void *context)
3731 {
3732 	if_ctx_t ctx = context;
3733 
3734 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3735 		return;
3736 
3737 	CTX_LOCK(ctx);
3738 	IFDI_VFLR_HANDLE(ctx);
3739 	CTX_UNLOCK(ctx);
3740 }
3741 
3742 static int
3743 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3744 {
3745 	int err;
3746 	if_int_delay_info_t info;
3747 	if_ctx_t ctx;
3748 
3749 	info = (if_int_delay_info_t)arg1;
3750 	ctx = info->iidi_ctx;
3751 	info->iidi_req = req;
3752 	info->iidi_oidp = oidp;
3753 	CTX_LOCK(ctx);
3754 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
3755 	CTX_UNLOCK(ctx);
3756 	return (err);
3757 }
3758 
3759 /*********************************************************************
3760  *
3761  *  IFNET FUNCTIONS
3762  *
3763  **********************************************************************/
3764 
3765 static void
3766 iflib_if_init_locked(if_ctx_t ctx)
3767 {
3768 	iflib_stop(ctx);
3769 	iflib_init_locked(ctx);
3770 }
3771 
3772 
3773 static void
3774 iflib_if_init(void *arg)
3775 {
3776 	if_ctx_t ctx = arg;
3777 
3778 	CTX_LOCK(ctx);
3779 	iflib_if_init_locked(ctx);
3780 	CTX_UNLOCK(ctx);
3781 }
3782 
3783 static int
3784 iflib_if_transmit(if_t ifp, struct mbuf *m)
3785 {
3786 	if_ctx_t	ctx = if_getsoftc(ifp);
3787 
3788 	iflib_txq_t txq;
3789 	int err, qidx;
3790 
3791 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
3792 		DBG_COUNTER_INC(tx_frees);
3793 		m_freem(m);
3794 		return (ENOBUFS);
3795 	}
3796 
3797 	MPASS(m->m_nextpkt == NULL);
3798 	qidx = 0;
3799 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
3800 		qidx = QIDX(ctx, m);
3801 	/*
3802 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
3803 	 */
3804 	txq = &ctx->ifc_txqs[qidx];
3805 
3806 #ifdef DRIVER_BACKPRESSURE
3807 	if (txq->ift_closed) {
3808 		while (m != NULL) {
3809 			next = m->m_nextpkt;
3810 			m->m_nextpkt = NULL;
3811 			m_freem(m);
3812 			m = next;
3813 		}
3814 		return (ENOBUFS);
3815 	}
3816 #endif
3817 #ifdef notyet
3818 	qidx = count = 0;
3819 	mp = marr;
3820 	next = m;
3821 	do {
3822 		count++;
3823 		next = next->m_nextpkt;
3824 	} while (next != NULL);
3825 
3826 	if (count > nitems(marr))
3827 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
3828 			/* XXX check nextpkt */
3829 			m_freem(m);
3830 			/* XXX simplify for now */
3831 			DBG_COUNTER_INC(tx_frees);
3832 			return (ENOBUFS);
3833 		}
3834 	for (next = m, i = 0; next != NULL; i++) {
3835 		mp[i] = next;
3836 		next = next->m_nextpkt;
3837 		mp[i]->m_nextpkt = NULL;
3838 	}
3839 #endif
3840 	DBG_COUNTER_INC(tx_seen);
3841 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE);
3842 
3843 	GROUPTASK_ENQUEUE(&txq->ift_task);
3844 	if (err) {
3845 		/* support forthcoming later */
3846 #ifdef DRIVER_BACKPRESSURE
3847 		txq->ift_closed = TRUE;
3848 #endif
3849 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3850 		m_freem(m);
3851 	}
3852 
3853 	return (err);
3854 }
3855 
3856 static void
3857 iflib_if_qflush(if_t ifp)
3858 {
3859 	if_ctx_t ctx = if_getsoftc(ifp);
3860 	iflib_txq_t txq = ctx->ifc_txqs;
3861 	int i;
3862 
3863 	CTX_LOCK(ctx);
3864 	ctx->ifc_flags |= IFC_QFLUSH;
3865 	CTX_UNLOCK(ctx);
3866 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
3867 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
3868 			iflib_txq_check_drain(txq, 0);
3869 	CTX_LOCK(ctx);
3870 	ctx->ifc_flags &= ~IFC_QFLUSH;
3871 	CTX_UNLOCK(ctx);
3872 
3873 	if_qflush(ifp);
3874 }
3875 
3876 
3877 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
3878 		     IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
3879 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
3880 
3881 static int
3882 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
3883 {
3884 	if_ctx_t ctx = if_getsoftc(ifp);
3885 	struct ifreq	*ifr = (struct ifreq *)data;
3886 #if defined(INET) || defined(INET6)
3887 	struct ifaddr	*ifa = (struct ifaddr *)data;
3888 #endif
3889 	bool		avoid_reset = FALSE;
3890 	int		err = 0, reinit = 0, bits;
3891 
3892 	switch (command) {
3893 	case SIOCSIFADDR:
3894 #ifdef INET
3895 		if (ifa->ifa_addr->sa_family == AF_INET)
3896 			avoid_reset = TRUE;
3897 #endif
3898 #ifdef INET6
3899 		if (ifa->ifa_addr->sa_family == AF_INET6)
3900 			avoid_reset = TRUE;
3901 #endif
3902 		/*
3903 		** Calling init results in link renegotiation,
3904 		** so we avoid doing it when possible.
3905 		*/
3906 		if (avoid_reset) {
3907 			if_setflagbits(ifp, IFF_UP,0);
3908 			if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
3909 				reinit = 1;
3910 #ifdef INET
3911 			if (!(if_getflags(ifp) & IFF_NOARP))
3912 				arp_ifinit(ifp, ifa);
3913 #endif
3914 		} else
3915 			err = ether_ioctl(ifp, command, data);
3916 		break;
3917 	case SIOCSIFMTU:
3918 		CTX_LOCK(ctx);
3919 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
3920 			CTX_UNLOCK(ctx);
3921 			break;
3922 		}
3923 		bits = if_getdrvflags(ifp);
3924 		/* stop the driver and free any clusters before proceeding */
3925 		iflib_stop(ctx);
3926 
3927 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
3928 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
3929 				ctx->ifc_flags |= IFC_MULTISEG;
3930 			else
3931 				ctx->ifc_flags &= ~IFC_MULTISEG;
3932 			err = if_setmtu(ifp, ifr->ifr_mtu);
3933 		}
3934 		iflib_init_locked(ctx);
3935 		if_setdrvflags(ifp, bits);
3936 		CTX_UNLOCK(ctx);
3937 		break;
3938 	case SIOCSIFFLAGS:
3939 		CTX_LOCK(ctx);
3940 		if (if_getflags(ifp) & IFF_UP) {
3941 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3942 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
3943 				    (IFF_PROMISC | IFF_ALLMULTI)) {
3944 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
3945 				}
3946 			} else
3947 				reinit = 1;
3948 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3949 			iflib_stop(ctx);
3950 		}
3951 		ctx->ifc_if_flags = if_getflags(ifp);
3952 		CTX_UNLOCK(ctx);
3953 		break;
3954 	case SIOCADDMULTI:
3955 	case SIOCDELMULTI:
3956 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3957 			CTX_LOCK(ctx);
3958 			IFDI_INTR_DISABLE(ctx);
3959 			IFDI_MULTI_SET(ctx);
3960 			IFDI_INTR_ENABLE(ctx);
3961 			CTX_UNLOCK(ctx);
3962 		}
3963 		break;
3964 	case SIOCSIFMEDIA:
3965 		CTX_LOCK(ctx);
3966 		IFDI_MEDIA_SET(ctx);
3967 		CTX_UNLOCK(ctx);
3968 		/* falls thru */
3969 	case SIOCGIFMEDIA:
3970 	case SIOCGIFXMEDIA:
3971 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
3972 		break;
3973 	case SIOCGI2C:
3974 	{
3975 		struct ifi2creq i2c;
3976 
3977 		err = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
3978 		if (err != 0)
3979 			break;
3980 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
3981 			err = EINVAL;
3982 			break;
3983 		}
3984 		if (i2c.len > sizeof(i2c.data)) {
3985 			err = EINVAL;
3986 			break;
3987 		}
3988 
3989 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
3990 			err = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
3991 		break;
3992 	}
3993 	case SIOCSIFCAP:
3994 	{
3995 		int mask, setmask;
3996 
3997 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3998 		setmask = 0;
3999 #ifdef TCP_OFFLOAD
4000 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
4001 #endif
4002 		setmask |= (mask & IFCAP_FLAGS);
4003 
4004 		if (setmask  & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
4005 			setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4006 		if ((mask & IFCAP_WOL) &&
4007 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
4008 			setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
4009 		if_vlancap(ifp);
4010 		/*
4011 		 * want to ensure that traffic has stopped before we change any of the flags
4012 		 */
4013 		if (setmask) {
4014 			CTX_LOCK(ctx);
4015 			bits = if_getdrvflags(ifp);
4016 			if (bits & IFF_DRV_RUNNING)
4017 				iflib_stop(ctx);
4018 			if_togglecapenable(ifp, setmask);
4019 			if (bits & IFF_DRV_RUNNING)
4020 				iflib_init_locked(ctx);
4021 			if_setdrvflags(ifp, bits);
4022 			CTX_UNLOCK(ctx);
4023 		}
4024 		break;
4025 	    }
4026 	case SIOCGPRIVATE_0:
4027 	case SIOCSDRVSPEC:
4028 	case SIOCGDRVSPEC:
4029 		CTX_LOCK(ctx);
4030 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4031 		CTX_UNLOCK(ctx);
4032 		break;
4033 	default:
4034 		err = ether_ioctl(ifp, command, data);
4035 		break;
4036 	}
4037 	if (reinit)
4038 		iflib_if_init(ctx);
4039 	return (err);
4040 }
4041 
4042 static uint64_t
4043 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4044 {
4045 	if_ctx_t ctx = if_getsoftc(ifp);
4046 
4047 	return (IFDI_GET_COUNTER(ctx, cnt));
4048 }
4049 
4050 /*********************************************************************
4051  *
4052  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4053  *
4054  **********************************************************************/
4055 
4056 static void
4057 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4058 {
4059 	if_ctx_t ctx = if_getsoftc(ifp);
4060 
4061 	if ((void *)ctx != arg)
4062 		return;
4063 
4064 	if ((vtag == 0) || (vtag > 4095))
4065 		return;
4066 
4067 	CTX_LOCK(ctx);
4068 	IFDI_VLAN_REGISTER(ctx, vtag);
4069 	/* Re-init to load the changes */
4070 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4071 		iflib_if_init_locked(ctx);
4072 	CTX_UNLOCK(ctx);
4073 }
4074 
4075 static void
4076 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4077 {
4078 	if_ctx_t ctx = if_getsoftc(ifp);
4079 
4080 	if ((void *)ctx != arg)
4081 		return;
4082 
4083 	if ((vtag == 0) || (vtag > 4095))
4084 		return;
4085 
4086 	CTX_LOCK(ctx);
4087 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4088 	/* Re-init to load the changes */
4089 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4090 		iflib_if_init_locked(ctx);
4091 	CTX_UNLOCK(ctx);
4092 }
4093 
4094 static void
4095 iflib_led_func(void *arg, int onoff)
4096 {
4097 	if_ctx_t ctx = arg;
4098 
4099 	CTX_LOCK(ctx);
4100 	IFDI_LED_FUNC(ctx, onoff);
4101 	CTX_UNLOCK(ctx);
4102 }
4103 
4104 /*********************************************************************
4105  *
4106  *  BUS FUNCTION DEFINITIONS
4107  *
4108  **********************************************************************/
4109 
4110 int
4111 iflib_device_probe(device_t dev)
4112 {
4113 	pci_vendor_info_t *ent;
4114 
4115 	uint16_t	pci_vendor_id, pci_device_id;
4116 	uint16_t	pci_subvendor_id, pci_subdevice_id;
4117 	uint16_t	pci_rev_id;
4118 	if_shared_ctx_t sctx;
4119 
4120 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4121 		return (ENOTSUP);
4122 
4123 	pci_vendor_id = pci_get_vendor(dev);
4124 	pci_device_id = pci_get_device(dev);
4125 	pci_subvendor_id = pci_get_subvendor(dev);
4126 	pci_subdevice_id = pci_get_subdevice(dev);
4127 	pci_rev_id = pci_get_revid(dev);
4128 	if (sctx->isc_parse_devinfo != NULL)
4129 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4130 
4131 	ent = sctx->isc_vendor_info;
4132 	while (ent->pvi_vendor_id != 0) {
4133 		if (pci_vendor_id != ent->pvi_vendor_id) {
4134 			ent++;
4135 			continue;
4136 		}
4137 		if ((pci_device_id == ent->pvi_device_id) &&
4138 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4139 		     (ent->pvi_subvendor_id == 0)) &&
4140 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4141 		     (ent->pvi_subdevice_id == 0)) &&
4142 		    ((pci_rev_id == ent->pvi_rev_id) ||
4143 		     (ent->pvi_rev_id == 0))) {
4144 
4145 			device_set_desc_copy(dev, ent->pvi_name);
4146 			/* this needs to be changed to zero if the bus probing code
4147 			 * ever stops re-probing on best match because the sctx
4148 			 * may have its values over written by register calls
4149 			 * in subsequent probes
4150 			 */
4151 			return (BUS_PROBE_DEFAULT);
4152 		}
4153 		ent++;
4154 	}
4155 	return (ENXIO);
4156 }
4157 
4158 int
4159 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
4160 {
4161 	int err, rid, msix, msix_bar;
4162 	if_ctx_t ctx;
4163 	if_t ifp;
4164 	if_softc_ctx_t scctx;
4165 	int i;
4166 	uint16_t main_txq;
4167 	uint16_t main_rxq;
4168 
4169 
4170 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
4171 
4172 	if (sc == NULL) {
4173 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4174 		device_set_softc(dev, ctx);
4175 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
4176 	}
4177 
4178 	ctx->ifc_sctx = sctx;
4179 	ctx->ifc_dev = dev;
4180 	ctx->ifc_softc = sc;
4181 
4182 	if ((err = iflib_register(ctx)) != 0) {
4183 		device_printf(dev, "iflib_register failed %d\n", err);
4184 		return (err);
4185 	}
4186 	iflib_add_device_sysctl_pre(ctx);
4187 
4188 	scctx = &ctx->ifc_softc_ctx;
4189 	ifp = ctx->ifc_ifp;
4190 
4191 	/*
4192 	 * XXX sanity check that ntxd & nrxd are a power of 2
4193 	 */
4194 	if (ctx->ifc_sysctl_ntxqs != 0)
4195 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4196 	if (ctx->ifc_sysctl_nrxqs != 0)
4197 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4198 
4199 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4200 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4201 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4202 		else
4203 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4204 	}
4205 
4206 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4207 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4208 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4209 		else
4210 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4211 	}
4212 
4213 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4214 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4215 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4216 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4217 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4218 		}
4219 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4220 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4221 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4222 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4223 		}
4224 	}
4225 
4226 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4227 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4228 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4229 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4230 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4231 		}
4232 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4233 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4234 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4235 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4236 		}
4237 	}
4238 
4239 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4240 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4241 		return (err);
4242 	}
4243 	_iflib_pre_assert(scctx);
4244 	ctx->ifc_txrx = *scctx->isc_txrx;
4245 
4246 #ifdef INVARIANTS
4247 	MPASS(scctx->isc_capenable);
4248 	if (scctx->isc_capenable & IFCAP_TXCSUM)
4249 		MPASS(scctx->isc_tx_csum_flags);
4250 #endif
4251 
4252 	if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
4253 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
4254 
4255 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4256 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4257 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4258 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4259 
4260 #ifdef ACPI_DMAR
4261 	if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
4262 		ctx->ifc_flags |= IFC_DMAR;
4263 #elif !(defined(__i386__) || defined(__amd64__))
4264 	/* set unconditionally for !x86 */
4265 	ctx->ifc_flags |= IFC_DMAR;
4266 #endif
4267 
4268 	msix_bar = scctx->isc_msix_bar;
4269 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4270 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4271 
4272 	/* XXX change for per-queue sizes */
4273 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
4274 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4275 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4276 		if (!powerof2(scctx->isc_nrxd[i])) {
4277 			/* round down instead? */
4278 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4279 			err = EINVAL;
4280 			goto fail;
4281 		}
4282 	}
4283 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4284 		if (!powerof2(scctx->isc_ntxd[i])) {
4285 			device_printf(dev,
4286 			    "# tx descriptors must be a power of 2");
4287 			err = EINVAL;
4288 			goto fail;
4289 		}
4290 	}
4291 
4292 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4293 	    MAX_SINGLE_PACKET_FRACTION)
4294 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4295 		    MAX_SINGLE_PACKET_FRACTION);
4296 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4297 	    MAX_SINGLE_PACKET_FRACTION)
4298 		scctx->isc_tx_tso_segments_max = max(1,
4299 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4300 
4301 	/*
4302 	 * Protect the stack against modern hardware
4303 	 */
4304 	if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
4305 		scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
4306 
4307 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4308 	ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
4309 	ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
4310 	ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
4311 	if (scctx->isc_rss_table_size == 0)
4312 		scctx->isc_rss_table_size = 64;
4313 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4314 
4315 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4316 	/* XXX format name */
4317 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4318 
4319 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4320 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4321 		device_printf(dev, "Unable to fetch CPU list\n");
4322 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4323 	}
4324 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4325 
4326 	/*
4327 	** Now setup MSI or MSI/X, should
4328 	** return us the number of supported
4329 	** vectors. (Will be 1 for MSI)
4330 	*/
4331 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
4332 		msix = scctx->isc_vectors;
4333 	} else if (scctx->isc_msix_bar != 0)
4334 	       /*
4335 		* The simple fact that isc_msix_bar is not 0 does not mean we
4336 		* we have a good value there that is known to work.
4337 		*/
4338 		msix = iflib_msix_init(ctx);
4339 	else {
4340 		scctx->isc_vectors = 1;
4341 		scctx->isc_ntxqsets = 1;
4342 		scctx->isc_nrxqsets = 1;
4343 		scctx->isc_intr = IFLIB_INTR_LEGACY;
4344 		msix = 0;
4345 	}
4346 	/* Get memory for the station queues */
4347 	if ((err = iflib_queues_alloc(ctx))) {
4348 		device_printf(dev, "Unable to allocate queue memory\n");
4349 		goto fail;
4350 	}
4351 
4352 	if ((err = iflib_qset_structures_setup(ctx))) {
4353 		device_printf(dev, "qset structure setup failed %d\n", err);
4354 		goto fail_queues;
4355 	}
4356 
4357 	/*
4358 	 * Group taskqueues aren't properly set up until SMP is started,
4359 	 * so we disable interrupts until we can handle them post
4360 	 * SI_SUB_SMP.
4361 	 *
4362 	 * XXX: disabling interrupts doesn't actually work, at least for
4363 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4364 	 * we do null handling and depend on this not causing too large an
4365 	 * interrupt storm.
4366 	 */
4367 	IFDI_INTR_DISABLE(ctx);
4368 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
4369 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
4370 		goto fail_intr_free;
4371 	}
4372 	if (msix <= 1) {
4373 		rid = 0;
4374 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
4375 			MPASS(msix == 1);
4376 			rid = 1;
4377 		}
4378 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
4379 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
4380 			goto fail_intr_free;
4381 		}
4382 	}
4383 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4384 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4385 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4386 		goto fail_detach;
4387 	}
4388 	if ((err = iflib_netmap_attach(ctx))) {
4389 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
4390 		goto fail_detach;
4391 	}
4392 	*ctxp = ctx;
4393 
4394 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4395 	iflib_add_device_sysctl_post(ctx);
4396 	ctx->ifc_flags |= IFC_INIT_DONE;
4397 	return (0);
4398 fail_detach:
4399 	ether_ifdetach(ctx->ifc_ifp);
4400 fail_intr_free:
4401 	if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
4402 		pci_release_msi(ctx->ifc_dev);
4403 fail_queues:
4404 	/* XXX free queues */
4405 fail:
4406 	IFDI_DETACH(ctx);
4407 	return (err);
4408 }
4409 
4410 int
4411 iflib_device_attach(device_t dev)
4412 {
4413 	if_ctx_t ctx;
4414 	if_shared_ctx_t sctx;
4415 
4416 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4417 		return (ENOTSUP);
4418 
4419 	pci_enable_busmaster(dev);
4420 
4421 	return (iflib_device_register(dev, NULL, sctx, &ctx));
4422 }
4423 
4424 int
4425 iflib_device_deregister(if_ctx_t ctx)
4426 {
4427 	if_t ifp = ctx->ifc_ifp;
4428 	iflib_txq_t txq;
4429 	iflib_rxq_t rxq;
4430 	device_t dev = ctx->ifc_dev;
4431 	int i, j;
4432 	struct taskqgroup *tqg;
4433 	iflib_fl_t fl;
4434 
4435 	/* Make sure VLANS are not using driver */
4436 	if (if_vlantrunkinuse(ifp)) {
4437 		device_printf(dev,"Vlan in use, detach first\n");
4438 		return (EBUSY);
4439 	}
4440 
4441 	CTX_LOCK(ctx);
4442 	ctx->ifc_in_detach = 1;
4443 	iflib_stop(ctx);
4444 	CTX_UNLOCK(ctx);
4445 
4446 	/* Unregister VLAN events */
4447 	if (ctx->ifc_vlan_attach_event != NULL)
4448 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4449 	if (ctx->ifc_vlan_detach_event != NULL)
4450 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4451 
4452 	iflib_netmap_detach(ifp);
4453 	ether_ifdetach(ifp);
4454 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4455 	CTX_LOCK_DESTROY(ctx);
4456 	if (ctx->ifc_led_dev != NULL)
4457 		led_destroy(ctx->ifc_led_dev);
4458 	/* XXX drain any dependent tasks */
4459 	tqg = qgroup_if_io_tqg;
4460 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4461 		callout_drain(&txq->ift_timer);
4462 		if (txq->ift_task.gt_uniq != NULL)
4463 			taskqgroup_detach(tqg, &txq->ift_task);
4464 	}
4465 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4466 		if (rxq->ifr_task.gt_uniq != NULL)
4467 			taskqgroup_detach(tqg, &rxq->ifr_task);
4468 
4469 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4470 			free(fl->ifl_rx_bitmap, M_IFLIB);
4471 
4472 	}
4473 	tqg = qgroup_if_config_tqg;
4474 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4475 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4476 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4477 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4478 
4479 	IFDI_DETACH(ctx);
4480 	device_set_softc(ctx->ifc_dev, NULL);
4481 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
4482 		pci_release_msi(dev);
4483 	}
4484 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
4485 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
4486 	}
4487 	if (ctx->ifc_msix_mem != NULL) {
4488 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
4489 			ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
4490 		ctx->ifc_msix_mem = NULL;
4491 	}
4492 
4493 	bus_generic_detach(dev);
4494 	if_free(ifp);
4495 
4496 	iflib_tx_structures_free(ctx);
4497 	iflib_rx_structures_free(ctx);
4498 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4499 		free(ctx->ifc_softc, M_IFLIB);
4500 	free(ctx, M_IFLIB);
4501 	return (0);
4502 }
4503 
4504 
4505 int
4506 iflib_device_detach(device_t dev)
4507 {
4508 	if_ctx_t ctx = device_get_softc(dev);
4509 
4510 	return (iflib_device_deregister(ctx));
4511 }
4512 
4513 int
4514 iflib_device_suspend(device_t dev)
4515 {
4516 	if_ctx_t ctx = device_get_softc(dev);
4517 
4518 	CTX_LOCK(ctx);
4519 	IFDI_SUSPEND(ctx);
4520 	CTX_UNLOCK(ctx);
4521 
4522 	return bus_generic_suspend(dev);
4523 }
4524 int
4525 iflib_device_shutdown(device_t dev)
4526 {
4527 	if_ctx_t ctx = device_get_softc(dev);
4528 
4529 	CTX_LOCK(ctx);
4530 	IFDI_SHUTDOWN(ctx);
4531 	CTX_UNLOCK(ctx);
4532 
4533 	return bus_generic_suspend(dev);
4534 }
4535 
4536 
4537 int
4538 iflib_device_resume(device_t dev)
4539 {
4540 	if_ctx_t ctx = device_get_softc(dev);
4541 	iflib_txq_t txq = ctx->ifc_txqs;
4542 
4543 	CTX_LOCK(ctx);
4544 	IFDI_RESUME(ctx);
4545 	iflib_init_locked(ctx);
4546 	CTX_UNLOCK(ctx);
4547 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
4548 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
4549 
4550 	return (bus_generic_resume(dev));
4551 }
4552 
4553 int
4554 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
4555 {
4556 	int error;
4557 	if_ctx_t ctx = device_get_softc(dev);
4558 
4559 	CTX_LOCK(ctx);
4560 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
4561 	CTX_UNLOCK(ctx);
4562 
4563 	return (error);
4564 }
4565 
4566 void
4567 iflib_device_iov_uninit(device_t dev)
4568 {
4569 	if_ctx_t ctx = device_get_softc(dev);
4570 
4571 	CTX_LOCK(ctx);
4572 	IFDI_IOV_UNINIT(ctx);
4573 	CTX_UNLOCK(ctx);
4574 }
4575 
4576 int
4577 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
4578 {
4579 	int error;
4580 	if_ctx_t ctx = device_get_softc(dev);
4581 
4582 	CTX_LOCK(ctx);
4583 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
4584 	CTX_UNLOCK(ctx);
4585 
4586 	return (error);
4587 }
4588 
4589 /*********************************************************************
4590  *
4591  *  MODULE FUNCTION DEFINITIONS
4592  *
4593  **********************************************************************/
4594 
4595 /*
4596  * - Start a fast taskqueue thread for each core
4597  * - Start a taskqueue for control operations
4598  */
4599 static int
4600 iflib_module_init(void)
4601 {
4602 	return (0);
4603 }
4604 
4605 static int
4606 iflib_module_event_handler(module_t mod, int what, void *arg)
4607 {
4608 	int err;
4609 
4610 	switch (what) {
4611 	case MOD_LOAD:
4612 		if ((err = iflib_module_init()) != 0)
4613 			return (err);
4614 		break;
4615 	case MOD_UNLOAD:
4616 		return (EBUSY);
4617 	default:
4618 		return (EOPNOTSUPP);
4619 	}
4620 
4621 	return (0);
4622 }
4623 
4624 /*********************************************************************
4625  *
4626  *  PUBLIC FUNCTION DEFINITIONS
4627  *     ordered as in iflib.h
4628  *
4629  **********************************************************************/
4630 
4631 
4632 static void
4633 _iflib_assert(if_shared_ctx_t sctx)
4634 {
4635 	MPASS(sctx->isc_tx_maxsize);
4636 	MPASS(sctx->isc_tx_maxsegsize);
4637 
4638 	MPASS(sctx->isc_rx_maxsize);
4639 	MPASS(sctx->isc_rx_nsegments);
4640 	MPASS(sctx->isc_rx_maxsegsize);
4641 
4642 	MPASS(sctx->isc_nrxd_min[0]);
4643 	MPASS(sctx->isc_nrxd_max[0]);
4644 	MPASS(sctx->isc_nrxd_default[0]);
4645 	MPASS(sctx->isc_ntxd_min[0]);
4646 	MPASS(sctx->isc_ntxd_max[0]);
4647 	MPASS(sctx->isc_ntxd_default[0]);
4648 }
4649 
4650 static void
4651 _iflib_pre_assert(if_softc_ctx_t scctx)
4652 {
4653 
4654 	MPASS(scctx->isc_txrx->ift_txd_encap);
4655 	MPASS(scctx->isc_txrx->ift_txd_flush);
4656 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
4657 	MPASS(scctx->isc_txrx->ift_rxd_available);
4658 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
4659 	MPASS(scctx->isc_txrx->ift_rxd_refill);
4660 	MPASS(scctx->isc_txrx->ift_rxd_flush);
4661 }
4662 
4663 static int
4664 iflib_register(if_ctx_t ctx)
4665 {
4666 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4667 	driver_t *driver = sctx->isc_driver;
4668 	device_t dev = ctx->ifc_dev;
4669 	if_t ifp;
4670 
4671 	_iflib_assert(sctx);
4672 
4673 	CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
4674 
4675 	ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
4676 	if (ifp == NULL) {
4677 		device_printf(dev, "can not allocate ifnet structure\n");
4678 		return (ENOMEM);
4679 	}
4680 
4681 	/*
4682 	 * Initialize our context's device specific methods
4683 	 */
4684 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
4685 	kobj_class_compile((kobj_class_t) driver);
4686 	driver->refs++;
4687 
4688 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
4689 	if_setsoftc(ifp, ctx);
4690 	if_setdev(ifp, dev);
4691 	if_setinitfn(ifp, iflib_if_init);
4692 	if_setioctlfn(ifp, iflib_if_ioctl);
4693 	if_settransmitfn(ifp, iflib_if_transmit);
4694 	if_setqflushfn(ifp, iflib_if_qflush);
4695 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
4696 
4697 	ctx->ifc_vlan_attach_event =
4698 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
4699 							  EVENTHANDLER_PRI_FIRST);
4700 	ctx->ifc_vlan_detach_event =
4701 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
4702 							  EVENTHANDLER_PRI_FIRST);
4703 
4704 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
4705 					 iflib_media_change, iflib_media_status);
4706 
4707 	return (0);
4708 }
4709 
4710 
4711 static int
4712 iflib_queues_alloc(if_ctx_t ctx)
4713 {
4714 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4715 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4716 	device_t dev = ctx->ifc_dev;
4717 	int nrxqsets = scctx->isc_nrxqsets;
4718 	int ntxqsets = scctx->isc_ntxqsets;
4719 	iflib_txq_t txq;
4720 	iflib_rxq_t rxq;
4721 	iflib_fl_t fl = NULL;
4722 	int i, j, cpu, err, txconf, rxconf;
4723 	iflib_dma_info_t ifdip;
4724 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
4725 	uint32_t *txqsizes = scctx->isc_txqsizes;
4726 	uint8_t nrxqs = sctx->isc_nrxqs;
4727 	uint8_t ntxqs = sctx->isc_ntxqs;
4728 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
4729 	caddr_t *vaddrs;
4730 	uint64_t *paddrs;
4731 	struct ifmp_ring **brscp;
4732 
4733 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
4734 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
4735 
4736 	brscp = NULL;
4737 	txq = NULL;
4738 	rxq = NULL;
4739 
4740 /* Allocate the TX ring struct memory */
4741 	if (!(txq =
4742 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
4743 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
4744 		device_printf(dev, "Unable to allocate TX ring memory\n");
4745 		err = ENOMEM;
4746 		goto fail;
4747 	}
4748 
4749 	/* Now allocate the RX */
4750 	if (!(rxq =
4751 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
4752 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
4753 		device_printf(dev, "Unable to allocate RX ring memory\n");
4754 		err = ENOMEM;
4755 		goto rx_fail;
4756 	}
4757 
4758 	ctx->ifc_txqs = txq;
4759 	ctx->ifc_rxqs = rxq;
4760 
4761 	/*
4762 	 * XXX handle allocation failure
4763 	 */
4764 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
4765 		/* Set up some basics */
4766 
4767 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
4768 			device_printf(dev, "failed to allocate iflib_dma_info\n");
4769 			err = ENOMEM;
4770 			goto err_tx_desc;
4771 		}
4772 		txq->ift_ifdi = ifdip;
4773 		for (j = 0; j < ntxqs; j++, ifdip++) {
4774 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
4775 				device_printf(dev, "Unable to allocate Descriptor memory\n");
4776 				err = ENOMEM;
4777 				goto err_tx_desc;
4778 			}
4779 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
4780 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
4781 		}
4782 		txq->ift_ctx = ctx;
4783 		txq->ift_id = i;
4784 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
4785 			txq->ift_br_offset = 1;
4786 		} else {
4787 			txq->ift_br_offset = 0;
4788 		}
4789 		/* XXX fix this */
4790 		txq->ift_timer.c_cpu = cpu;
4791 
4792 		if (iflib_txsd_alloc(txq)) {
4793 			device_printf(dev, "Critical Failure setting up TX buffers\n");
4794 			err = ENOMEM;
4795 			goto err_tx_desc;
4796 		}
4797 
4798 		/* Initialize the TX lock */
4799 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
4800 		    device_get_nameunit(dev), txq->ift_id);
4801 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
4802 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
4803 
4804 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
4805 			 device_get_nameunit(dev), txq->ift_id);
4806 
4807 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
4808 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
4809 		if (err) {
4810 			/* XXX free any allocated rings */
4811 			device_printf(dev, "Unable to allocate buf_ring\n");
4812 			goto err_tx_desc;
4813 		}
4814 	}
4815 
4816 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
4817 		/* Set up some basics */
4818 
4819 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
4820 			device_printf(dev, "failed to allocate iflib_dma_info\n");
4821 			err = ENOMEM;
4822 			goto err_tx_desc;
4823 		}
4824 
4825 		rxq->ifr_ifdi = ifdip;
4826 		/* XXX this needs to be changed if #rx queues != #tx queues */
4827 		rxq->ifr_ntxqirq = 1;
4828 		rxq->ifr_txqid[0] = i;
4829 		for (j = 0; j < nrxqs; j++, ifdip++) {
4830 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
4831 				device_printf(dev, "Unable to allocate Descriptor memory\n");
4832 				err = ENOMEM;
4833 				goto err_tx_desc;
4834 			}
4835 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
4836 		}
4837 		rxq->ifr_ctx = ctx;
4838 		rxq->ifr_id = i;
4839 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
4840 			rxq->ifr_fl_offset = 1;
4841 		} else {
4842 			rxq->ifr_fl_offset = 0;
4843 		}
4844 		rxq->ifr_nfl = nfree_lists;
4845 		if (!(fl =
4846 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
4847 			device_printf(dev, "Unable to allocate free list memory\n");
4848 			err = ENOMEM;
4849 			goto err_tx_desc;
4850 		}
4851 		rxq->ifr_fl = fl;
4852 		for (j = 0; j < nfree_lists; j++) {
4853 			fl[j].ifl_rxq = rxq;
4854 			fl[j].ifl_id = j;
4855 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
4856 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
4857 		}
4858         /* Allocate receive buffers for the ring*/
4859 		if (iflib_rxsd_alloc(rxq)) {
4860 			device_printf(dev,
4861 			    "Critical Failure setting up receive buffers\n");
4862 			err = ENOMEM;
4863 			goto err_rx_desc;
4864 		}
4865 
4866 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4867 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
4868 	}
4869 
4870 	/* TXQs */
4871 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
4872 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
4873 	for (i = 0; i < ntxqsets; i++) {
4874 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
4875 
4876 		for (j = 0; j < ntxqs; j++, di++) {
4877 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
4878 			paddrs[i*ntxqs + j] = di->idi_paddr;
4879 		}
4880 	}
4881 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
4882 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
4883 		iflib_tx_structures_free(ctx);
4884 		free(vaddrs, M_IFLIB);
4885 		free(paddrs, M_IFLIB);
4886 		goto err_rx_desc;
4887 	}
4888 	free(vaddrs, M_IFLIB);
4889 	free(paddrs, M_IFLIB);
4890 
4891 	/* RXQs */
4892 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
4893 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
4894 	for (i = 0; i < nrxqsets; i++) {
4895 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
4896 
4897 		for (j = 0; j < nrxqs; j++, di++) {
4898 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
4899 			paddrs[i*nrxqs + j] = di->idi_paddr;
4900 		}
4901 	}
4902 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
4903 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
4904 		iflib_tx_structures_free(ctx);
4905 		free(vaddrs, M_IFLIB);
4906 		free(paddrs, M_IFLIB);
4907 		goto err_rx_desc;
4908 	}
4909 	free(vaddrs, M_IFLIB);
4910 	free(paddrs, M_IFLIB);
4911 
4912 	return (0);
4913 
4914 /* XXX handle allocation failure changes */
4915 err_rx_desc:
4916 err_tx_desc:
4917 	if (ctx->ifc_rxqs != NULL)
4918 		free(ctx->ifc_rxqs, M_IFLIB);
4919 	ctx->ifc_rxqs = NULL;
4920 	if (ctx->ifc_txqs != NULL)
4921 		free(ctx->ifc_txqs, M_IFLIB);
4922 	ctx->ifc_txqs = NULL;
4923 rx_fail:
4924 	if (brscp != NULL)
4925 		free(brscp, M_IFLIB);
4926 	if (rxq != NULL)
4927 		free(rxq, M_IFLIB);
4928 	if (txq != NULL)
4929 		free(txq, M_IFLIB);
4930 fail:
4931 	return (err);
4932 }
4933 
4934 static int
4935 iflib_tx_structures_setup(if_ctx_t ctx)
4936 {
4937 	iflib_txq_t txq = ctx->ifc_txqs;
4938 	int i;
4939 
4940 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4941 		iflib_txq_setup(txq);
4942 
4943 	return (0);
4944 }
4945 
4946 static void
4947 iflib_tx_structures_free(if_ctx_t ctx)
4948 {
4949 	iflib_txq_t txq = ctx->ifc_txqs;
4950 	int i, j;
4951 
4952 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
4953 		iflib_txq_destroy(txq);
4954 		for (j = 0; j < ctx->ifc_nhwtxqs; j++)
4955 			iflib_dma_free(&txq->ift_ifdi[j]);
4956 	}
4957 	free(ctx->ifc_txqs, M_IFLIB);
4958 	ctx->ifc_txqs = NULL;
4959 	IFDI_QUEUES_FREE(ctx);
4960 }
4961 
4962 /*********************************************************************
4963  *
4964  *  Initialize all receive rings.
4965  *
4966  **********************************************************************/
4967 static int
4968 iflib_rx_structures_setup(if_ctx_t ctx)
4969 {
4970 	iflib_rxq_t rxq = ctx->ifc_rxqs;
4971 	int q;
4972 #if defined(INET6) || defined(INET)
4973 	int i, err;
4974 #endif
4975 
4976 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
4977 #if defined(INET6) || defined(INET)
4978 		tcp_lro_free(&rxq->ifr_lc);
4979 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
4980 		    TCP_LRO_ENTRIES, min(1024,
4981 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
4982 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
4983 			goto fail;
4984 		}
4985 		rxq->ifr_lro_enabled = TRUE;
4986 #endif
4987 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
4988 	}
4989 	return (0);
4990 #if defined(INET6) || defined(INET)
4991 fail:
4992 	/*
4993 	 * Free RX software descriptors allocated so far, we will only handle
4994 	 * the rings that completed, the failing case will have
4995 	 * cleaned up for itself. 'q' failed, so its the terminus.
4996 	 */
4997 	rxq = ctx->ifc_rxqs;
4998 	for (i = 0; i < q; ++i, rxq++) {
4999 		iflib_rx_sds_free(rxq);
5000 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
5001 	}
5002 	return (err);
5003 #endif
5004 }
5005 
5006 /*********************************************************************
5007  *
5008  *  Free all receive rings.
5009  *
5010  **********************************************************************/
5011 static void
5012 iflib_rx_structures_free(if_ctx_t ctx)
5013 {
5014 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5015 
5016 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5017 		iflib_rx_sds_free(rxq);
5018 	}
5019 }
5020 
5021 static int
5022 iflib_qset_structures_setup(if_ctx_t ctx)
5023 {
5024 	int err;
5025 
5026 	if ((err = iflib_tx_structures_setup(ctx)) != 0)
5027 		return (err);
5028 
5029 	if ((err = iflib_rx_structures_setup(ctx)) != 0) {
5030 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
5031 		iflib_tx_structures_free(ctx);
5032 		iflib_rx_structures_free(ctx);
5033 	}
5034 	return (err);
5035 }
5036 
5037 int
5038 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
5039 				driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name)
5040 {
5041 
5042 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
5043 }
5044 
5045 static int
5046 find_nth(if_ctx_t ctx, cpuset_t *cpus, int qid)
5047 {
5048 	int i, cpuid, eqid, count;
5049 
5050 	CPU_COPY(&ctx->ifc_cpus, cpus);
5051 	count = CPU_COUNT(cpus);
5052 	eqid = qid % count;
5053 	/* clear up to the qid'th bit */
5054 	for (i = 0; i < eqid; i++) {
5055 		cpuid = CPU_FFS(cpus);
5056 		MPASS(cpuid != 0);
5057 		CPU_CLR(cpuid-1, cpus);
5058 	}
5059 	cpuid = CPU_FFS(cpus);
5060 	MPASS(cpuid != 0);
5061 	return (cpuid-1);
5062 }
5063 
5064 int
5065 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
5066 						iflib_intr_type_t type, driver_filter_t *filter,
5067 						void *filter_arg, int qid, char *name)
5068 {
5069 	struct grouptask *gtask;
5070 	struct taskqgroup *tqg;
5071 	iflib_filter_info_t info;
5072 	cpuset_t cpus;
5073 	gtask_fn_t *fn;
5074 	int tqrid, err, cpuid;
5075 	driver_filter_t *intr_fast;
5076 	void *q;
5077 
5078 	info = &ctx->ifc_filter_info;
5079 	tqrid = rid;
5080 
5081 	switch (type) {
5082 	/* XXX merge tx/rx for netmap? */
5083 	case IFLIB_INTR_TX:
5084 		q = &ctx->ifc_txqs[qid];
5085 		info = &ctx->ifc_txqs[qid].ift_filter_info;
5086 		gtask = &ctx->ifc_txqs[qid].ift_task;
5087 		tqg = qgroup_if_io_tqg;
5088 		fn = _task_fn_tx;
5089 		intr_fast = iflib_fast_intr;
5090 		GROUPTASK_INIT(gtask, 0, fn, q);
5091 		break;
5092 	case IFLIB_INTR_RX:
5093 		q = &ctx->ifc_rxqs[qid];
5094 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5095 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5096 		tqg = qgroup_if_io_tqg;
5097 		fn = _task_fn_rx;
5098 		intr_fast = iflib_fast_intr;
5099 		GROUPTASK_INIT(gtask, 0, fn, q);
5100 		break;
5101 	case IFLIB_INTR_RXTX:
5102 		q = &ctx->ifc_rxqs[qid];
5103 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5104 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5105 		tqg = qgroup_if_io_tqg;
5106 		fn = _task_fn_rx;
5107 		intr_fast = iflib_fast_intr_rxtx;
5108 		GROUPTASK_INIT(gtask, 0, fn, q);
5109 		break;
5110 	case IFLIB_INTR_ADMIN:
5111 		q = ctx;
5112 		tqrid = -1;
5113 		info = &ctx->ifc_filter_info;
5114 		gtask = &ctx->ifc_admin_task;
5115 		tqg = qgroup_if_config_tqg;
5116 		fn = _task_fn_admin;
5117 		intr_fast = iflib_fast_intr_ctx;
5118 		break;
5119 	default:
5120 		panic("unknown net intr type");
5121 	}
5122 
5123 	info->ifi_filter = filter;
5124 	info->ifi_filter_arg = filter_arg;
5125 	info->ifi_task = gtask;
5126 	info->ifi_ctx = q;
5127 
5128 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5129 	if (err != 0) {
5130 		device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
5131 		return (err);
5132 	}
5133 	if (type == IFLIB_INTR_ADMIN)
5134 		return (0);
5135 
5136 	if (tqrid != -1) {
5137 		cpuid = find_nth(ctx, &cpus, qid);
5138 		taskqgroup_attach_cpu(tqg, gtask, q, cpuid, rman_get_start(irq->ii_res), name);
5139 	} else {
5140 		taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5141 	}
5142 
5143 	return (0);
5144 }
5145 
5146 void
5147 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,  void *arg, int qid, char *name)
5148 {
5149 	struct grouptask *gtask;
5150 	struct taskqgroup *tqg;
5151 	gtask_fn_t *fn;
5152 	void *q;
5153 	int irq_num = -1;
5154 
5155 	switch (type) {
5156 	case IFLIB_INTR_TX:
5157 		q = &ctx->ifc_txqs[qid];
5158 		gtask = &ctx->ifc_txqs[qid].ift_task;
5159 		tqg = qgroup_if_io_tqg;
5160 		fn = _task_fn_tx;
5161 		if (irq != NULL)
5162 			irq_num = rman_get_start(irq->ii_res);
5163 		break;
5164 	case IFLIB_INTR_RX:
5165 		q = &ctx->ifc_rxqs[qid];
5166 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5167 		tqg = qgroup_if_io_tqg;
5168 		fn = _task_fn_rx;
5169 		if (irq != NULL)
5170 			irq_num = rman_get_start(irq->ii_res);
5171 		break;
5172 	case IFLIB_INTR_IOV:
5173 		q = ctx;
5174 		gtask = &ctx->ifc_vflr_task;
5175 		tqg = qgroup_if_config_tqg;
5176 		fn = _task_fn_iov;
5177 		break;
5178 	default:
5179 		panic("unknown net intr type");
5180 	}
5181 	GROUPTASK_INIT(gtask, 0, fn, q);
5182 	taskqgroup_attach(tqg, gtask, q, irq_num, name);
5183 }
5184 
5185 void
5186 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
5187 {
5188 	if (irq->ii_tag)
5189 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
5190 
5191 	if (irq->ii_res)
5192 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
5193 }
5194 
5195 static int
5196 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name)
5197 {
5198 	iflib_txq_t txq = ctx->ifc_txqs;
5199 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5200 	if_irq_t irq = &ctx->ifc_legacy_irq;
5201 	iflib_filter_info_t info;
5202 	struct grouptask *gtask;
5203 	struct taskqgroup *tqg;
5204 	gtask_fn_t *fn;
5205 	int tqrid;
5206 	void *q;
5207 	int err;
5208 
5209 	q = &ctx->ifc_rxqs[0];
5210 	info = &rxq[0].ifr_filter_info;
5211 	gtask = &rxq[0].ifr_task;
5212 	tqg = qgroup_if_io_tqg;
5213 	tqrid = irq->ii_rid = *rid;
5214 	fn = _task_fn_rx;
5215 
5216 	ctx->ifc_flags |= IFC_LEGACY;
5217 	info->ifi_filter = filter;
5218 	info->ifi_filter_arg = filter_arg;
5219 	info->ifi_task = gtask;
5220 	info->ifi_ctx = ctx;
5221 
5222 	/* We allocate a single interrupt resource */
5223 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
5224 		return (err);
5225 	GROUPTASK_INIT(gtask, 0, fn, q);
5226 	taskqgroup_attach(tqg, gtask, q, tqrid, name);
5227 
5228 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
5229 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, tqrid, "tx");
5230 	return (0);
5231 }
5232 
5233 void
5234 iflib_led_create(if_ctx_t ctx)
5235 {
5236 
5237 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
5238 	    device_get_nameunit(ctx->ifc_dev));
5239 }
5240 
5241 void
5242 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
5243 {
5244 
5245 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
5246 }
5247 
5248 void
5249 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
5250 {
5251 
5252 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
5253 }
5254 
5255 void
5256 iflib_admin_intr_deferred(if_ctx_t ctx)
5257 {
5258 #ifdef INVARIANTS
5259 	struct grouptask *gtask;
5260 
5261 	gtask = &ctx->ifc_admin_task;
5262 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
5263 #endif
5264 
5265 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
5266 }
5267 
5268 void
5269 iflib_iov_intr_deferred(if_ctx_t ctx)
5270 {
5271 
5272 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
5273 }
5274 
5275 void
5276 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
5277 {
5278 
5279 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
5280 }
5281 
5282 void
5283 iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn,
5284 	char *name)
5285 {
5286 
5287 	GROUPTASK_INIT(gtask, 0, fn, ctx);
5288 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
5289 }
5290 
5291 void
5292 iflib_config_gtask_deinit(struct grouptask *gtask)
5293 {
5294 
5295 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
5296 }
5297 
5298 void
5299 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
5300 {
5301 	if_t ifp = ctx->ifc_ifp;
5302 	iflib_txq_t txq = ctx->ifc_txqs;
5303 
5304 	if_setbaudrate(ifp, baudrate);
5305 	if (baudrate >= IF_Gbps(10))
5306 		ctx->ifc_flags |= IFC_PREFETCH;
5307 
5308 	/* If link down, disable watchdog */
5309 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
5310 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
5311 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
5312 	}
5313 	ctx->ifc_link_state = link_state;
5314 	if_link_state_change(ifp, link_state);
5315 }
5316 
5317 static int
5318 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
5319 {
5320 	int credits;
5321 #ifdef INVARIANTS
5322 	int credits_pre = txq->ift_cidx_processed;
5323 #endif
5324 
5325 	if (ctx->isc_txd_credits_update == NULL)
5326 		return (0);
5327 
5328 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
5329 		return (0);
5330 
5331 	txq->ift_processed += credits;
5332 	txq->ift_cidx_processed += credits;
5333 
5334 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
5335 	if (txq->ift_cidx_processed >= txq->ift_size)
5336 		txq->ift_cidx_processed -= txq->ift_size;
5337 	return (credits);
5338 }
5339 
5340 static int
5341 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
5342 {
5343 
5344 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
5345 	    budget));
5346 }
5347 
5348 void
5349 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
5350 	const char *description, if_int_delay_info_t info,
5351 	int offset, int value)
5352 {
5353 	info->iidi_ctx = ctx;
5354 	info->iidi_offset = offset;
5355 	info->iidi_value = value;
5356 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
5357 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
5358 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5359 	    info, 0, iflib_sysctl_int_delay, "I", description);
5360 }
5361 
5362 struct mtx *
5363 iflib_ctx_lock_get(if_ctx_t ctx)
5364 {
5365 
5366 	return (&ctx->ifc_mtx);
5367 }
5368 
5369 static int
5370 iflib_msix_init(if_ctx_t ctx)
5371 {
5372 	device_t dev = ctx->ifc_dev;
5373 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5374 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5375 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
5376 	int iflib_num_tx_queues, iflib_num_rx_queues;
5377 	int err, admincnt, bar;
5378 
5379 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
5380 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
5381 
5382 	device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
5383 
5384 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
5385 	admincnt = sctx->isc_admin_intrcnt;
5386 	/* Override by global tuneable */
5387 	{
5388 		int i;
5389 		size_t len = sizeof(i);
5390 		err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0);
5391 		if (err == 0) {
5392 			if (i == 0)
5393 				goto msi;
5394 		}
5395 		else {
5396 			device_printf(dev, "unable to read hw.pci.enable_msix.");
5397 		}
5398 	}
5399 	/* Override by tuneable */
5400 	if (scctx->isc_disable_msix)
5401 		goto msi;
5402 
5403 	/*
5404 	** When used in a virtualized environment
5405 	** PCI BUSMASTER capability may not be set
5406 	** so explicity set it here and rewrite
5407 	** the ENABLE in the MSIX control register
5408 	** at this point to cause the host to
5409 	** successfully initialize us.
5410 	*/
5411 	{
5412 		int msix_ctrl, rid;
5413 
5414  		pci_enable_busmaster(dev);
5415 		rid = 0;
5416 		if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) {
5417 			rid += PCIR_MSIX_CTRL;
5418 			msix_ctrl = pci_read_config(dev, rid, 2);
5419 			msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
5420 			pci_write_config(dev, rid, msix_ctrl, 2);
5421 		} else {
5422 			device_printf(dev, "PCIY_MSIX capability not found; "
5423 			                   "or rid %d == 0.\n", rid);
5424 			goto msi;
5425 		}
5426 	}
5427 
5428 	/*
5429 	 * bar == -1 => "trust me I know what I'm doing"
5430 	 * Some drivers are for hardware that is so shoddily
5431 	 * documented that no one knows which bars are which
5432 	 * so the developer has to map all bars. This hack
5433 	 * allows shoddy garbage to use msix in this framework.
5434 	 */
5435 	if (bar != -1) {
5436 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
5437 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
5438 		if (ctx->ifc_msix_mem == NULL) {
5439 			/* May not be enabled */
5440 			device_printf(dev, "Unable to map MSIX table \n");
5441 			goto msi;
5442 		}
5443 	}
5444 	/* First try MSI/X */
5445 	if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
5446 		device_printf(dev, "System has MSIX disabled \n");
5447 		bus_release_resource(dev, SYS_RES_MEMORY,
5448 		    bar, ctx->ifc_msix_mem);
5449 		ctx->ifc_msix_mem = NULL;
5450 		goto msi;
5451 	}
5452 #if IFLIB_DEBUG
5453 	/* use only 1 qset in debug mode */
5454 	queuemsgs = min(msgs - admincnt, 1);
5455 #else
5456 	queuemsgs = msgs - admincnt;
5457 #endif
5458 #ifdef RSS
5459 	queues = imin(queuemsgs, rss_getnumbuckets());
5460 #else
5461 	queues = queuemsgs;
5462 #endif
5463 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
5464 	device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
5465 				  CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
5466 #ifdef  RSS
5467 	/* If we're doing RSS, clamp at the number of RSS buckets */
5468 	if (queues > rss_getnumbuckets())
5469 		queues = rss_getnumbuckets();
5470 #endif
5471 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
5472 		rx_queues = iflib_num_rx_queues;
5473 	else
5474 		rx_queues = queues;
5475 
5476 	if (rx_queues > scctx->isc_nrxqsets)
5477 		rx_queues = scctx->isc_nrxqsets;
5478 
5479 	/*
5480 	 * We want this to be all logical CPUs by default
5481 	 */
5482 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
5483 		tx_queues = iflib_num_tx_queues;
5484 	else
5485 		tx_queues = mp_ncpus;
5486 
5487 	if (tx_queues > scctx->isc_ntxqsets)
5488 		tx_queues = scctx->isc_ntxqsets;
5489 
5490 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
5491 #ifdef INVARIANTS
5492 		if (tx_queues != rx_queues)
5493 			device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
5494 				      min(rx_queues, tx_queues), min(rx_queues, tx_queues));
5495 #endif
5496 		tx_queues = min(rx_queues, tx_queues);
5497 		rx_queues = min(rx_queues, tx_queues);
5498 	}
5499 
5500 	device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
5501 
5502 	vectors = rx_queues + admincnt;
5503 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
5504 		device_printf(dev,
5505 					  "Using MSIX interrupts with %d vectors\n", vectors);
5506 		scctx->isc_vectors = vectors;
5507 		scctx->isc_nrxqsets = rx_queues;
5508 		scctx->isc_ntxqsets = tx_queues;
5509 		scctx->isc_intr = IFLIB_INTR_MSIX;
5510 
5511 		return (vectors);
5512 	} else {
5513 		device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
5514 	}
5515 msi:
5516 	vectors = pci_msi_count(dev);
5517 	scctx->isc_nrxqsets = 1;
5518 	scctx->isc_ntxqsets = 1;
5519 	scctx->isc_vectors = vectors;
5520 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
5521 		device_printf(dev,"Using an MSI interrupt\n");
5522 		scctx->isc_intr = IFLIB_INTR_MSI;
5523 	} else {
5524 		device_printf(dev,"Using a Legacy interrupt\n");
5525 		scctx->isc_intr = IFLIB_INTR_LEGACY;
5526 	}
5527 
5528 	return (vectors);
5529 }
5530 
5531 char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
5532 
5533 static int
5534 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
5535 {
5536 	int rc;
5537 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
5538 	struct sbuf *sb;
5539 	char *ring_state = "UNKNOWN";
5540 
5541 	/* XXX needed ? */
5542 	rc = sysctl_wire_old_buffer(req, 0);
5543 	MPASS(rc == 0);
5544 	if (rc != 0)
5545 		return (rc);
5546 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
5547 	MPASS(sb != NULL);
5548 	if (sb == NULL)
5549 		return (ENOMEM);
5550 	if (state[3] <= 3)
5551 		ring_state = ring_states[state[3]];
5552 
5553 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
5554 		    state[0], state[1], state[2], ring_state);
5555 	rc = sbuf_finish(sb);
5556 	sbuf_delete(sb);
5557         return(rc);
5558 }
5559 
5560 enum iflib_ndesc_handler {
5561 	IFLIB_NTXD_HANDLER,
5562 	IFLIB_NRXD_HANDLER,
5563 };
5564 
5565 static int
5566 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
5567 {
5568 	if_ctx_t ctx = (void *)arg1;
5569 	enum iflib_ndesc_handler type = arg2;
5570 	char buf[256] = {0};
5571 	qidx_t *ndesc;
5572 	char *p, *next;
5573 	int nqs, rc, i;
5574 
5575 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
5576 
5577 	nqs = 8;
5578 	switch(type) {
5579 	case IFLIB_NTXD_HANDLER:
5580 		ndesc = ctx->ifc_sysctl_ntxds;
5581 		if (ctx->ifc_sctx)
5582 			nqs = ctx->ifc_sctx->isc_ntxqs;
5583 		break;
5584 	case IFLIB_NRXD_HANDLER:
5585 		ndesc = ctx->ifc_sysctl_nrxds;
5586 		if (ctx->ifc_sctx)
5587 			nqs = ctx->ifc_sctx->isc_nrxqs;
5588 		break;
5589 	}
5590 	if (nqs == 0)
5591 		nqs = 8;
5592 
5593 	for (i=0; i<8; i++) {
5594 		if (i >= nqs)
5595 			break;
5596 		if (i)
5597 			strcat(buf, ",");
5598 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
5599 	}
5600 
5601 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
5602 	if (rc || req->newptr == NULL)
5603 		return rc;
5604 
5605 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
5606 	    i++, p = strsep(&next, " ,")) {
5607 		ndesc[i] = strtoul(p, NULL, 10);
5608 	}
5609 
5610 	return(rc);
5611 }
5612 
5613 #define NAME_BUFLEN 32
5614 static void
5615 iflib_add_device_sysctl_pre(if_ctx_t ctx)
5616 {
5617         device_t dev = iflib_get_dev(ctx);
5618 	struct sysctl_oid_list *child, *oid_list;
5619 	struct sysctl_ctx_list *ctx_list;
5620 	struct sysctl_oid *node;
5621 
5622 	ctx_list = device_get_sysctl_ctx(dev);
5623 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
5624 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
5625 						      CTLFLAG_RD, NULL, "IFLIB fields");
5626 	oid_list = SYSCTL_CHILDREN(node);
5627 
5628 	SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
5629 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
5630 		       "driver version");
5631 
5632 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
5633 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
5634 			"# of txqs to use, 0 => use default #");
5635 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
5636 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
5637 			"# of rxqs to use, 0 => use default #");
5638 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
5639 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
5640                        "permit #txq != #rxq");
5641 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
5642                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
5643                       "disable MSIX (default 0)");
5644 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
5645 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
5646                        "set the rx budget");
5647 
5648 	/* XXX change for per-queue sizes */
5649 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
5650 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
5651                        mp_ndesc_handler, "A",
5652                        "list of # of tx descriptors to use, 0 = use default #");
5653 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
5654 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
5655                        mp_ndesc_handler, "A",
5656                        "list of # of rx descriptors to use, 0 = use default #");
5657 }
5658 
5659 static void
5660 iflib_add_device_sysctl_post(if_ctx_t ctx)
5661 {
5662 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5663 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5664         device_t dev = iflib_get_dev(ctx);
5665 	struct sysctl_oid_list *child;
5666 	struct sysctl_ctx_list *ctx_list;
5667 	iflib_fl_t fl;
5668 	iflib_txq_t txq;
5669 	iflib_rxq_t rxq;
5670 	int i, j;
5671 	char namebuf[NAME_BUFLEN];
5672 	char *qfmt;
5673 	struct sysctl_oid *queue_node, *fl_node, *node;
5674 	struct sysctl_oid_list *queue_list, *fl_list;
5675 	ctx_list = device_get_sysctl_ctx(dev);
5676 
5677 	node = ctx->ifc_sysctl_node;
5678 	child = SYSCTL_CHILDREN(node);
5679 
5680 	if (scctx->isc_ntxqsets > 100)
5681 		qfmt = "txq%03d";
5682 	else if (scctx->isc_ntxqsets > 10)
5683 		qfmt = "txq%02d";
5684 	else
5685 		qfmt = "txq%d";
5686 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
5687 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
5688 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
5689 					     CTLFLAG_RD, NULL, "Queue Name");
5690 		queue_list = SYSCTL_CHILDREN(queue_node);
5691 #if MEMORY_LOGGING
5692 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
5693 				CTLFLAG_RD,
5694 				&txq->ift_dequeued, "total mbufs freed");
5695 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
5696 				CTLFLAG_RD,
5697 				&txq->ift_enqueued, "total mbufs enqueued");
5698 #endif
5699 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
5700 				   CTLFLAG_RD,
5701 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
5702 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
5703 				   CTLFLAG_RD,
5704 				   &txq->ift_pullups, "# of times m_pullup was called");
5705 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
5706 				   CTLFLAG_RD,
5707 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
5708 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
5709 				   CTLFLAG_RD,
5710 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
5711 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
5712 				   CTLFLAG_RD,
5713 				   &txq->ift_map_failed, "# of times dma map failed");
5714 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
5715 				   CTLFLAG_RD,
5716 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
5717 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
5718 				   CTLFLAG_RD,
5719 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
5720 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
5721 				   CTLFLAG_RD,
5722 				   &txq->ift_pidx, 1, "Producer Index");
5723 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
5724 				   CTLFLAG_RD,
5725 				   &txq->ift_cidx, 1, "Consumer Index");
5726 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
5727 				   CTLFLAG_RD,
5728 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
5729 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
5730 				   CTLFLAG_RD,
5731 				   &txq->ift_in_use, 1, "descriptors in use");
5732 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
5733 				   CTLFLAG_RD,
5734 				   &txq->ift_processed, "descriptors procesed for clean");
5735 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
5736 				   CTLFLAG_RD,
5737 				   &txq->ift_cleaned, "total cleaned");
5738 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
5739 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
5740 				0, mp_ring_state_handler, "A", "soft ring state");
5741 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
5742 				       CTLFLAG_RD, &txq->ift_br->enqueues,
5743 				       "# of enqueues to the mp_ring for this queue");
5744 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
5745 				       CTLFLAG_RD, &txq->ift_br->drops,
5746 				       "# of drops in the mp_ring for this queue");
5747 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
5748 				       CTLFLAG_RD, &txq->ift_br->starts,
5749 				       "# of normal consumer starts in the mp_ring for this queue");
5750 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
5751 				       CTLFLAG_RD, &txq->ift_br->stalls,
5752 					       "# of consumer stalls in the mp_ring for this queue");
5753 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
5754 			       CTLFLAG_RD, &txq->ift_br->restarts,
5755 				       "# of consumer restarts in the mp_ring for this queue");
5756 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
5757 				       CTLFLAG_RD, &txq->ift_br->abdications,
5758 				       "# of consumer abdications in the mp_ring for this queue");
5759 	}
5760 
5761 	if (scctx->isc_nrxqsets > 100)
5762 		qfmt = "rxq%03d";
5763 	else if (scctx->isc_nrxqsets > 10)
5764 		qfmt = "rxq%02d";
5765 	else
5766 		qfmt = "rxq%d";
5767 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
5768 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
5769 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
5770 					     CTLFLAG_RD, NULL, "Queue Name");
5771 		queue_list = SYSCTL_CHILDREN(queue_node);
5772 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
5773 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
5774 				       CTLFLAG_RD,
5775 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
5776 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
5777 				       CTLFLAG_RD,
5778 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
5779 		}
5780 
5781 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
5782 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
5783 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
5784 						     CTLFLAG_RD, NULL, "freelist Name");
5785 			fl_list = SYSCTL_CHILDREN(fl_node);
5786 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
5787 				       CTLFLAG_RD,
5788 				       &fl->ifl_pidx, 1, "Producer Index");
5789 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
5790 				       CTLFLAG_RD,
5791 				       &fl->ifl_cidx, 1, "Consumer Index");
5792 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
5793 				       CTLFLAG_RD,
5794 				       &fl->ifl_credits, 1, "credits available");
5795 #if MEMORY_LOGGING
5796 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
5797 					CTLFLAG_RD,
5798 					&fl->ifl_m_enqueued, "mbufs allocated");
5799 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
5800 					CTLFLAG_RD,
5801 					&fl->ifl_m_dequeued, "mbufs freed");
5802 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
5803 					CTLFLAG_RD,
5804 					&fl->ifl_cl_enqueued, "clusters allocated");
5805 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
5806 					CTLFLAG_RD,
5807 					&fl->ifl_cl_dequeued, "clusters freed");
5808 #endif
5809 
5810 		}
5811 	}
5812 
5813 }
5814 
5815 #ifndef __NO_STRICT_ALIGNMENT
5816 static struct mbuf *
5817 iflib_fixup_rx(struct mbuf *m)
5818 {
5819 	struct mbuf *n;
5820 
5821 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
5822 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
5823 		m->m_data += ETHER_HDR_LEN;
5824 		n = m;
5825 	} else {
5826 		MGETHDR(n, M_NOWAIT, MT_DATA);
5827 		if (n == NULL) {
5828 			m_freem(m);
5829 			return (NULL);
5830 		}
5831 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
5832 		m->m_data += ETHER_HDR_LEN;
5833 		m->m_len -= ETHER_HDR_LEN;
5834 		n->m_len = ETHER_HDR_LEN;
5835 		M_MOVE_PKTHDR(n, m);
5836 		n->m_next = m;
5837 	}
5838 	return (n);
5839 }
5840 #endif
5841