xref: /freebsd/sys/net/iflib.c (revision eb81f38a62c9ae246955feceedb8c043e78f871f)
1 /*-
2  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_acpi.h"
34 #include "opt_sched.h"
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/bus.h>
39 #include <sys/eventhandler.h>
40 #include <sys/jail.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/md5.h>
44 #include <sys/mutex.h>
45 #include <sys/module.h>
46 #include <sys/kobj.h>
47 #include <sys/rman.h>
48 #include <sys/proc.h>
49 #include <sys/sbuf.h>
50 #include <sys/smp.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/taskqueue.h>
56 #include <sys/limits.h>
57 
58 #include <net/if.h>
59 #include <net/if_var.h>
60 #include <net/if_types.h>
61 #include <net/if_media.h>
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/mp_ring.h>
65 #include <net/vnet.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_lro.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/ip_var.h>
76 #include <netinet/netdump/netdump.h>
77 #include <netinet6/ip6_var.h>
78 
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
81 
82 #include <vm/vm.h>
83 #include <vm/pmap.h>
84 
85 #include <dev/led/led.h>
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pci_private.h>
89 
90 #include <net/iflib.h>
91 #include <net/iflib_private.h>
92 
93 #include "ifdi_if.h"
94 
95 #if defined(__i386__) || defined(__amd64__)
96 #include <sys/memdesc.h>
97 #include <machine/bus.h>
98 #include <machine/md_var.h>
99 #include <machine/specialreg.h>
100 #include <x86/include/busdma_impl.h>
101 #include <x86/iommu/busdma_dmar.h>
102 #endif
103 
104 #include <sys/bitstring.h>
105 /*
106  * enable accounting of every mbuf as it comes in to and goes out of
107  * iflib's software descriptor references
108  */
109 #define MEMORY_LOGGING 0
110 /*
111  * Enable mbuf vectors for compressing long mbuf chains
112  */
113 
114 /*
115  * NB:
116  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
117  *   we prefetch needs to be determined by the time spent in m_free vis a vis
118  *   the cost of a prefetch. This will of course vary based on the workload:
119  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
120  *        is quite expensive, thus suggesting very little prefetch.
121  *      - small packet forwarding which is just returning a single mbuf to
122  *        UMA will typically be very fast vis a vis the cost of a memory
123  *        access.
124  */
125 
126 
127 /*
128  * File organization:
129  *  - private structures
130  *  - iflib private utility functions
131  *  - ifnet functions
132  *  - vlan registry and other exported functions
133  *  - iflib public core functions
134  *
135  *
136  */
137 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
138 
139 struct iflib_txq;
140 typedef struct iflib_txq *iflib_txq_t;
141 struct iflib_rxq;
142 typedef struct iflib_rxq *iflib_rxq_t;
143 struct iflib_fl;
144 typedef struct iflib_fl *iflib_fl_t;
145 
146 struct iflib_ctx;
147 
148 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
149 static void iflib_timer(void *arg);
150 
151 typedef struct iflib_filter_info {
152 	driver_filter_t *ifi_filter;
153 	void *ifi_filter_arg;
154 	struct grouptask *ifi_task;
155 	void *ifi_ctx;
156 } *iflib_filter_info_t;
157 
158 struct iflib_ctx {
159 	KOBJ_FIELDS;
160    /*
161    * Pointer to hardware driver's softc
162    */
163 	void *ifc_softc;
164 	device_t ifc_dev;
165 	if_t ifc_ifp;
166 
167 	cpuset_t ifc_cpus;
168 	if_shared_ctx_t ifc_sctx;
169 	struct if_softc_ctx ifc_softc_ctx;
170 
171 	struct sx ifc_ctx_sx;
172 	struct mtx ifc_state_mtx;
173 
174 	uint16_t ifc_nhwtxqs;
175 
176 	iflib_txq_t ifc_txqs;
177 	iflib_rxq_t ifc_rxqs;
178 	uint32_t ifc_if_flags;
179 	uint32_t ifc_flags;
180 	uint32_t ifc_max_fl_buf_size;
181 	int ifc_in_detach;
182 
183 	int ifc_link_state;
184 	int ifc_link_irq;
185 	int ifc_watchdog_events;
186 	struct cdev *ifc_led_dev;
187 	struct resource *ifc_msix_mem;
188 
189 	struct if_irq ifc_legacy_irq;
190 	struct grouptask ifc_admin_task;
191 	struct grouptask ifc_vflr_task;
192 	struct iflib_filter_info ifc_filter_info;
193 	struct ifmedia	ifc_media;
194 
195 	struct sysctl_oid *ifc_sysctl_node;
196 	uint16_t ifc_sysctl_ntxqs;
197 	uint16_t ifc_sysctl_nrxqs;
198 	uint16_t ifc_sysctl_qs_eq_override;
199 	uint16_t ifc_sysctl_rx_budget;
200 	uint16_t ifc_sysctl_tx_abdicate;
201 
202 	qidx_t ifc_sysctl_ntxds[8];
203 	qidx_t ifc_sysctl_nrxds[8];
204 	struct if_txrx ifc_txrx;
205 #define isc_txd_encap  ifc_txrx.ift_txd_encap
206 #define isc_txd_flush  ifc_txrx.ift_txd_flush
207 #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
208 #define isc_rxd_available ifc_txrx.ift_rxd_available
209 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
211 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
212 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
213 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
214 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
215 	eventhandler_tag ifc_vlan_attach_event;
216 	eventhandler_tag ifc_vlan_detach_event;
217 	uint8_t ifc_mac[ETHER_ADDR_LEN];
218 	char ifc_mtx_name[16];
219 };
220 
221 
222 void *
223 iflib_get_softc(if_ctx_t ctx)
224 {
225 
226 	return (ctx->ifc_softc);
227 }
228 
229 device_t
230 iflib_get_dev(if_ctx_t ctx)
231 {
232 
233 	return (ctx->ifc_dev);
234 }
235 
236 if_t
237 iflib_get_ifp(if_ctx_t ctx)
238 {
239 
240 	return (ctx->ifc_ifp);
241 }
242 
243 struct ifmedia *
244 iflib_get_media(if_ctx_t ctx)
245 {
246 
247 	return (&ctx->ifc_media);
248 }
249 
250 uint32_t
251 iflib_get_flags(if_ctx_t ctx)
252 {
253 	return (ctx->ifc_flags);
254 }
255 
256 void
257 iflib_set_detach(if_ctx_t ctx)
258 {
259 	ctx->ifc_in_detach = 1;
260 }
261 
262 void
263 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
264 {
265 
266 	bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
267 }
268 
269 if_softc_ctx_t
270 iflib_get_softc_ctx(if_ctx_t ctx)
271 {
272 
273 	return (&ctx->ifc_softc_ctx);
274 }
275 
276 if_shared_ctx_t
277 iflib_get_sctx(if_ctx_t ctx)
278 {
279 
280 	return (ctx->ifc_sctx);
281 }
282 
283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
285 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
286 
287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
289 
290 #define RX_SW_DESC_MAP_CREATED	(1 << 0)
291 #define TX_SW_DESC_MAP_CREATED	(1 << 1)
292 #define RX_SW_DESC_INUSE        (1 << 3)
293 #define TX_SW_DESC_MAPPED       (1 << 4)
294 
295 #define	M_TOOBIG		M_PROTO1
296 
297 typedef struct iflib_sw_rx_desc_array {
298 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
299 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
300 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
301 	uint8_t		*ifsd_flags;
302 } iflib_rxsd_array_t;
303 
304 typedef struct iflib_sw_tx_desc_array {
305 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
306 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
307 	uint8_t		*ifsd_flags;
308 } if_txsd_vec_t;
309 
310 
311 /* magic number that should be high enough for any hardware */
312 #define IFLIB_MAX_TX_SEGS		128
313 /* bnxt supports 64 with hardware LRO enabled */
314 #define IFLIB_MAX_RX_SEGS		64
315 #define IFLIB_RX_COPY_THRESH		128
316 #define IFLIB_MAX_RX_REFRESH		32
317 /* The minimum descriptors per second before we start coalescing */
318 #define IFLIB_MIN_DESC_SEC		16384
319 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
320 #define IFLIB_QUEUE_IDLE		0
321 #define IFLIB_QUEUE_HUNG		1
322 #define IFLIB_QUEUE_WORKING		2
323 /* maximum number of txqs that can share an rx interrupt */
324 #define IFLIB_MAX_TX_SHARED_INTR	4
325 
326 /* this should really scale with ring size - this is a fairly arbitrary value */
327 #define TX_BATCH_SIZE			32
328 
329 #define IFLIB_RESTART_BUDGET		8
330 
331 
332 #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
333 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
334 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
335 struct iflib_txq {
336 	qidx_t		ift_in_use;
337 	qidx_t		ift_cidx;
338 	qidx_t		ift_cidx_processed;
339 	qidx_t		ift_pidx;
340 	uint8_t		ift_gen;
341 	uint8_t		ift_br_offset;
342 	uint16_t	ift_npending;
343 	uint16_t	ift_db_pending;
344 	uint16_t	ift_rs_pending;
345 	/* implicit pad */
346 	uint8_t		ift_txd_size[8];
347 	uint64_t	ift_processed;
348 	uint64_t	ift_cleaned;
349 	uint64_t	ift_cleaned_prev;
350 #if MEMORY_LOGGING
351 	uint64_t	ift_enqueued;
352 	uint64_t	ift_dequeued;
353 #endif
354 	uint64_t	ift_no_tx_dma_setup;
355 	uint64_t	ift_no_desc_avail;
356 	uint64_t	ift_mbuf_defrag_failed;
357 	uint64_t	ift_mbuf_defrag;
358 	uint64_t	ift_map_failed;
359 	uint64_t	ift_txd_encap_efbig;
360 	uint64_t	ift_pullups;
361 	uint64_t	ift_last_timer_tick;
362 
363 	struct mtx	ift_mtx;
364 	struct mtx	ift_db_mtx;
365 
366 	/* constant values */
367 	if_ctx_t	ift_ctx;
368 	struct ifmp_ring        *ift_br;
369 	struct grouptask	ift_task;
370 	qidx_t		ift_size;
371 	uint16_t	ift_id;
372 	struct callout	ift_timer;
373 
374 	if_txsd_vec_t	ift_sds;
375 	uint8_t		ift_qstatus;
376 	uint8_t		ift_closed;
377 	uint8_t		ift_update_freq;
378 	struct iflib_filter_info ift_filter_info;
379 	bus_dma_tag_t		ift_desc_tag;
380 	bus_dma_tag_t		ift_tso_desc_tag;
381 	iflib_dma_info_t	ift_ifdi;
382 #define MTX_NAME_LEN 16
383 	char                    ift_mtx_name[MTX_NAME_LEN];
384 	char                    ift_db_mtx_name[MTX_NAME_LEN];
385 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
386 #ifdef IFLIB_DIAGNOSTICS
387 	uint64_t ift_cpu_exec_count[256];
388 #endif
389 } __aligned(CACHE_LINE_SIZE);
390 
391 struct iflib_fl {
392 	qidx_t		ifl_cidx;
393 	qidx_t		ifl_pidx;
394 	qidx_t		ifl_credits;
395 	uint8_t		ifl_gen;
396 	uint8_t		ifl_rxd_size;
397 #if MEMORY_LOGGING
398 	uint64_t	ifl_m_enqueued;
399 	uint64_t	ifl_m_dequeued;
400 	uint64_t	ifl_cl_enqueued;
401 	uint64_t	ifl_cl_dequeued;
402 #endif
403 	/* implicit pad */
404 
405 	bitstr_t 	*ifl_rx_bitmap;
406 	qidx_t		ifl_fragidx;
407 	/* constant */
408 	qidx_t		ifl_size;
409 	uint16_t	ifl_buf_size;
410 	uint16_t	ifl_cltype;
411 	uma_zone_t	ifl_zone;
412 	iflib_rxsd_array_t	ifl_sds;
413 	iflib_rxq_t	ifl_rxq;
414 	uint8_t		ifl_id;
415 	bus_dma_tag_t           ifl_desc_tag;
416 	iflib_dma_info_t	ifl_ifdi;
417 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
418 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
419 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
420 }  __aligned(CACHE_LINE_SIZE);
421 
422 static inline qidx_t
423 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
424 {
425 	qidx_t used;
426 
427 	if (pidx > cidx)
428 		used = pidx - cidx;
429 	else if (pidx < cidx)
430 		used = size - cidx + pidx;
431 	else if (gen == 0 && pidx == cidx)
432 		used = 0;
433 	else if (gen == 1 && pidx == cidx)
434 		used = size;
435 	else
436 		panic("bad state");
437 
438 	return (used);
439 }
440 
441 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
442 
443 #define IDXDIFF(head, tail, wrap) \
444 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
445 
446 struct iflib_rxq {
447 	/* If there is a separate completion queue -
448 	 * these are the cq cidx and pidx. Otherwise
449 	 * these are unused.
450 	 */
451 	qidx_t		ifr_size;
452 	qidx_t		ifr_cq_cidx;
453 	qidx_t		ifr_cq_pidx;
454 	uint8_t		ifr_cq_gen;
455 	uint8_t		ifr_fl_offset;
456 
457 	if_ctx_t	ifr_ctx;
458 	iflib_fl_t	ifr_fl;
459 	uint64_t	ifr_rx_irq;
460 	uint16_t	ifr_id;
461 	uint8_t		ifr_lro_enabled;
462 	uint8_t		ifr_nfl;
463 	uint8_t		ifr_ntxqirq;
464 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
465 	struct lro_ctrl			ifr_lc;
466 	struct grouptask        ifr_task;
467 	struct iflib_filter_info ifr_filter_info;
468 	iflib_dma_info_t		ifr_ifdi;
469 
470 	/* dynamically allocate if any drivers need a value substantially larger than this */
471 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
472 #ifdef IFLIB_DIAGNOSTICS
473 	uint64_t ifr_cpu_exec_count[256];
474 #endif
475 }  __aligned(CACHE_LINE_SIZE);
476 
477 typedef struct if_rxsd {
478 	caddr_t *ifsd_cl;
479 	struct mbuf **ifsd_m;
480 	iflib_fl_t ifsd_fl;
481 	qidx_t ifsd_cidx;
482 } *if_rxsd_t;
483 
484 /* multiple of word size */
485 #ifdef __LP64__
486 #define PKT_INFO_SIZE	6
487 #define RXD_INFO_SIZE	5
488 #define PKT_TYPE uint64_t
489 #else
490 #define PKT_INFO_SIZE	11
491 #define RXD_INFO_SIZE	8
492 #define PKT_TYPE uint32_t
493 #endif
494 #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
495 #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
496 
497 typedef struct if_pkt_info_pad {
498 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
499 } *if_pkt_info_pad_t;
500 typedef struct if_rxd_info_pad {
501 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
502 } *if_rxd_info_pad_t;
503 
504 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
505 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
506 
507 
508 static inline void
509 pkt_info_zero(if_pkt_info_t pi)
510 {
511 	if_pkt_info_pad_t pi_pad;
512 
513 	pi_pad = (if_pkt_info_pad_t)pi;
514 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
515 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
516 #ifndef __LP64__
517 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
518 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
519 #endif
520 }
521 
522 static device_method_t iflib_pseudo_methods[] = {
523 	DEVMETHOD(device_attach, noop_attach),
524 	DEVMETHOD(device_detach, iflib_pseudo_detach),
525 	DEVMETHOD_END
526 };
527 
528 driver_t iflib_pseudodriver = {
529 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
530 };
531 
532 static inline void
533 rxd_info_zero(if_rxd_info_t ri)
534 {
535 	if_rxd_info_pad_t ri_pad;
536 	int i;
537 
538 	ri_pad = (if_rxd_info_pad_t)ri;
539 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
540 		ri_pad->rxd_val[i] = 0;
541 		ri_pad->rxd_val[i+1] = 0;
542 		ri_pad->rxd_val[i+2] = 0;
543 		ri_pad->rxd_val[i+3] = 0;
544 	}
545 #ifdef __LP64__
546 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
547 #endif
548 }
549 
550 /*
551  * Only allow a single packet to take up most 1/nth of the tx ring
552  */
553 #define MAX_SINGLE_PACKET_FRACTION 12
554 #define IF_BAD_DMA (bus_addr_t)-1
555 
556 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
557 
558 #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
559 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
560 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
561 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
562 
563 
564 #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
565 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
566 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
567 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
568 
569 
570 
571 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
572 #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
573 
574 
575 /* Our boot-time initialization hook */
576 static int	iflib_module_event_handler(module_t, int, void *);
577 
578 static moduledata_t iflib_moduledata = {
579 	"iflib",
580 	iflib_module_event_handler,
581 	NULL
582 };
583 
584 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
585 MODULE_VERSION(iflib, 1);
586 
587 MODULE_DEPEND(iflib, pci, 1, 1, 1);
588 MODULE_DEPEND(iflib, ether, 1, 1, 1);
589 
590 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
591 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
592 
593 #ifndef IFLIB_DEBUG_COUNTERS
594 #ifdef INVARIANTS
595 #define IFLIB_DEBUG_COUNTERS 1
596 #else
597 #define IFLIB_DEBUG_COUNTERS 0
598 #endif /* !INVARIANTS */
599 #endif
600 
601 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
602                    "iflib driver parameters");
603 
604 /*
605  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
606  */
607 static int iflib_min_tx_latency = 0;
608 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
609 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
610 static int iflib_no_tx_batch = 0;
611 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
612 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
613 
614 
615 #if IFLIB_DEBUG_COUNTERS
616 
617 static int iflib_tx_seen;
618 static int iflib_tx_sent;
619 static int iflib_tx_encap;
620 static int iflib_rx_allocs;
621 static int iflib_fl_refills;
622 static int iflib_fl_refills_large;
623 static int iflib_tx_frees;
624 
625 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
626 		   &iflib_tx_seen, 0, "# tx mbufs seen");
627 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
628 		   &iflib_tx_sent, 0, "# tx mbufs sent");
629 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
630 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
631 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
632 		   &iflib_tx_frees, 0, "# tx frees");
633 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
634 		   &iflib_rx_allocs, 0, "# rx allocations");
635 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
636 		   &iflib_fl_refills, 0, "# refills");
637 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
638 		   &iflib_fl_refills_large, 0, "# large refills");
639 
640 
641 static int iflib_txq_drain_flushing;
642 static int iflib_txq_drain_oactive;
643 static int iflib_txq_drain_notready;
644 static int iflib_txq_drain_encapfail;
645 
646 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
647 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
648 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
649 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
650 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
651 		   &iflib_txq_drain_notready, 0, "# drain notready");
652 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
653 		   &iflib_txq_drain_encapfail, 0, "# drain encap fails");
654 
655 
656 static int iflib_encap_load_mbuf_fail;
657 static int iflib_encap_pad_mbuf_fail;
658 static int iflib_encap_txq_avail_fail;
659 static int iflib_encap_txd_encap_fail;
660 
661 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
662 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
663 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
664 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
665 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
666 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
667 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
668 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
669 
670 static int iflib_task_fn_rxs;
671 static int iflib_rx_intr_enables;
672 static int iflib_fast_intrs;
673 static int iflib_intr_link;
674 static int iflib_intr_msix;
675 static int iflib_rx_unavail;
676 static int iflib_rx_ctx_inactive;
677 static int iflib_rx_zero_len;
678 static int iflib_rx_if_input;
679 static int iflib_rx_mbuf_null;
680 static int iflib_rxd_flush;
681 
682 static int iflib_verbose_debug;
683 
684 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
685 		   &iflib_intr_link, 0, "# intr link calls");
686 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
687 		   &iflib_intr_msix, 0, "# intr msix calls");
688 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
689 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
690 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
691 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
692 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
693 		   &iflib_fast_intrs, 0, "# fast_intr calls");
694 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
695 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
696 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
697 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
698 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
699 		   &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
700 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
701 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
702 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
703 		   &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
704 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
705 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
706 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
707 		   &iflib_verbose_debug, 0, "enable verbose debugging");
708 
709 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
710 static void
711 iflib_debug_reset(void)
712 {
713 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
714 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
715 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
716 		iflib_txq_drain_notready = iflib_txq_drain_encapfail =
717 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
718 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
719 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
720 		iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
721 		iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
722 		iflib_rx_mbuf_null = iflib_rxd_flush = 0;
723 }
724 
725 #else
726 #define DBG_COUNTER_INC(name)
727 static void iflib_debug_reset(void) {}
728 #endif
729 
730 #define IFLIB_DEBUG 0
731 
732 static void iflib_tx_structures_free(if_ctx_t ctx);
733 static void iflib_rx_structures_free(if_ctx_t ctx);
734 static int iflib_queues_alloc(if_ctx_t ctx);
735 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
736 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
737 static int iflib_qset_structures_setup(if_ctx_t ctx);
738 static int iflib_msix_init(if_ctx_t ctx);
739 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
740 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
741 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
742 #ifdef ALTQ
743 static void iflib_altq_if_start(if_t ifp);
744 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
745 #endif
746 static int iflib_register(if_ctx_t);
747 static void iflib_init_locked(if_ctx_t ctx);
748 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
749 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
750 static void iflib_ifmp_purge(iflib_txq_t txq);
751 static void _iflib_pre_assert(if_softc_ctx_t scctx);
752 static void iflib_if_init_locked(if_ctx_t ctx);
753 #ifndef __NO_STRICT_ALIGNMENT
754 static struct mbuf * iflib_fixup_rx(struct mbuf *m);
755 #endif
756 
757 NETDUMP_DEFINE(iflib);
758 
759 #ifdef DEV_NETMAP
760 #include <sys/selinfo.h>
761 #include <net/netmap.h>
762 #include <dev/netmap/netmap_kern.h>
763 
764 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
765 
766 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
767 
768 /*
769  * device-specific sysctl variables:
770  *
771  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
772  *	During regular operations the CRC is stripped, but on some
773  *	hardware reception of frames not multiple of 64 is slower,
774  *	so using crcstrip=0 helps in benchmarks.
775  *
776  * iflib_rx_miss, iflib_rx_miss_bufs:
777  *	count packets that might be missed due to lost interrupts.
778  */
779 SYSCTL_DECL(_dev_netmap);
780 /*
781  * The xl driver by default strips CRCs and we do not override it.
782  */
783 
784 int iflib_crcstrip = 1;
785 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
786     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
787 
788 int iflib_rx_miss, iflib_rx_miss_bufs;
789 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
790     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
791 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
792     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
793 
794 /*
795  * Register/unregister. We are already under netmap lock.
796  * Only called on the first register or the last unregister.
797  */
798 static int
799 iflib_netmap_register(struct netmap_adapter *na, int onoff)
800 {
801 	struct ifnet *ifp = na->ifp;
802 	if_ctx_t ctx = ifp->if_softc;
803 	int status;
804 
805 	CTX_LOCK(ctx);
806 	IFDI_INTR_DISABLE(ctx);
807 
808 	/* Tell the stack that the interface is no longer active */
809 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
810 
811 	if (!CTX_IS_VF(ctx))
812 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
813 
814 	/* enable or disable flags and callbacks in na and ifp */
815 	if (onoff) {
816 		nm_set_native_flags(na);
817 	} else {
818 		nm_clear_native_flags(na);
819 	}
820 	iflib_stop(ctx);
821 	iflib_init_locked(ctx);
822 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
823 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
824 	if (status)
825 		nm_clear_native_flags(na);
826 	CTX_UNLOCK(ctx);
827 	return (status);
828 }
829 
830 static int
831 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
832 {
833 	struct netmap_adapter *na = kring->na;
834 	u_int const lim = kring->nkr_num_slots - 1;
835 	u_int head = kring->rhead;
836 	struct netmap_ring *ring = kring->ring;
837 	bus_dmamap_t *map;
838 	struct if_rxd_update iru;
839 	if_ctx_t ctx = rxq->ifr_ctx;
840 	iflib_fl_t fl = &rxq->ifr_fl[0];
841 	uint32_t refill_pidx, nic_i;
842 
843 	if (nm_i == head && __predict_true(!init))
844 		return 0;
845 	iru_init(&iru, rxq, 0 /* flid */);
846 	map = fl->ifl_sds.ifsd_map;
847 	refill_pidx = netmap_idx_k2n(kring, nm_i);
848 	/*
849 	 * IMPORTANT: we must leave one free slot in the ring,
850 	 * so move head back by one unit
851 	 */
852 	head = nm_prev(head, lim);
853 	nic_i = UINT_MAX;
854 	while (nm_i != head) {
855 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
856 			struct netmap_slot *slot = &ring->slot[nm_i];
857 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
858 			uint32_t nic_i_dma = refill_pidx;
859 			nic_i = netmap_idx_k2n(kring, nm_i);
860 
861 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
862 
863 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
864 			        return netmap_ring_reinit(kring);
865 
866 			fl->ifl_vm_addrs[tmp_pidx] = addr;
867 			if (__predict_false(init) && map) {
868 				netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
869 			} else if (map && (slot->flags & NS_BUF_CHANGED)) {
870 				/* buffer has changed, reload map */
871 				netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
872 			}
873 			slot->flags &= ~NS_BUF_CHANGED;
874 
875 			nm_i = nm_next(nm_i, lim);
876 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
877 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
878 				continue;
879 
880 			iru.iru_pidx = refill_pidx;
881 			iru.iru_count = tmp_pidx+1;
882 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
883 
884 			refill_pidx = nic_i;
885 			if (map == NULL)
886 				continue;
887 
888 			for (int n = 0; n < iru.iru_count; n++) {
889 				bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
890 						BUS_DMASYNC_PREREAD);
891 				/* XXX - change this to not use the netmap func*/
892 				nic_i_dma = nm_next(nic_i_dma, lim);
893 			}
894 		}
895 	}
896 	kring->nr_hwcur = head;
897 
898 	if (map)
899 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
900 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 	if (__predict_true(nic_i != UINT_MAX))
902 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
903 	return (0);
904 }
905 
906 /*
907  * Reconcile kernel and user view of the transmit ring.
908  *
909  * All information is in the kring.
910  * Userspace wants to send packets up to the one before kring->rhead,
911  * kernel knows kring->nr_hwcur is the first unsent packet.
912  *
913  * Here we push packets out (as many as possible), and possibly
914  * reclaim buffers from previously completed transmission.
915  *
916  * The caller (netmap) guarantees that there is only one instance
917  * running at any time. Any interference with other driver
918  * methods should be handled by the individual drivers.
919  */
920 static int
921 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
922 {
923 	struct netmap_adapter *na = kring->na;
924 	struct ifnet *ifp = na->ifp;
925 	struct netmap_ring *ring = kring->ring;
926 	u_int nm_i;	/* index into the netmap kring */
927 	u_int nic_i;	/* index into the NIC ring */
928 	u_int n;
929 	u_int const lim = kring->nkr_num_slots - 1;
930 	u_int const head = kring->rhead;
931 	struct if_pkt_info pi;
932 
933 	/*
934 	 * interrupts on every tx packet are expensive so request
935 	 * them every half ring, or where NS_REPORT is set
936 	 */
937 	u_int report_frequency = kring->nkr_num_slots >> 1;
938 	/* device-specific */
939 	if_ctx_t ctx = ifp->if_softc;
940 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
941 
942 	if (txq->ift_sds.ifsd_map)
943 		bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
944 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
945 
946 
947 	/*
948 	 * First part: process new packets to send.
949 	 * nm_i is the current index in the netmap kring,
950 	 * nic_i is the corresponding index in the NIC ring.
951 	 *
952 	 * If we have packets to send (nm_i != head)
953 	 * iterate over the netmap ring, fetch length and update
954 	 * the corresponding slot in the NIC ring. Some drivers also
955 	 * need to update the buffer's physical address in the NIC slot
956 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
957 	 *
958 	 * The netmap_reload_map() calls is especially expensive,
959 	 * even when (as in this case) the tag is 0, so do only
960 	 * when the buffer has actually changed.
961 	 *
962 	 * If possible do not set the report/intr bit on all slots,
963 	 * but only a few times per ring or when NS_REPORT is set.
964 	 *
965 	 * Finally, on 10G and faster drivers, it might be useful
966 	 * to prefetch the next slot and txr entry.
967 	 */
968 
969 	nm_i = kring->nr_hwcur;
970 	if (nm_i != head) {	/* we have new packets to send */
971 		pkt_info_zero(&pi);
972 		pi.ipi_segs = txq->ift_segs;
973 		pi.ipi_qsidx = kring->ring_id;
974 		nic_i = netmap_idx_k2n(kring, nm_i);
975 
976 		__builtin_prefetch(&ring->slot[nm_i]);
977 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
978 		if (txq->ift_sds.ifsd_map)
979 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
980 
981 		for (n = 0; nm_i != head; n++) {
982 			struct netmap_slot *slot = &ring->slot[nm_i];
983 			u_int len = slot->len;
984 			uint64_t paddr;
985 			void *addr = PNMB(na, slot, &paddr);
986 			int flags = (slot->flags & NS_REPORT ||
987 				nic_i == 0 || nic_i == report_frequency) ?
988 				IPI_TX_INTR : 0;
989 
990 			/* device-specific */
991 			pi.ipi_len = len;
992 			pi.ipi_segs[0].ds_addr = paddr;
993 			pi.ipi_segs[0].ds_len = len;
994 			pi.ipi_nsegs = 1;
995 			pi.ipi_ndescs = 0;
996 			pi.ipi_pidx = nic_i;
997 			pi.ipi_flags = flags;
998 
999 			/* Fill the slot in the NIC ring. */
1000 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
1001 
1002 			/* prefetch for next round */
1003 			__builtin_prefetch(&ring->slot[nm_i + 1]);
1004 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1005 			if (txq->ift_sds.ifsd_map) {
1006 				__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1007 
1008 				NM_CHECK_ADDR_LEN(na, addr, len);
1009 
1010 				if (slot->flags & NS_BUF_CHANGED) {
1011 					/* buffer has changed, reload map */
1012 					netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
1013 				}
1014 				/* make sure changes to the buffer are synced */
1015 				bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
1016 						BUS_DMASYNC_PREWRITE);
1017 			}
1018 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
1019 			nm_i = nm_next(nm_i, lim);
1020 			nic_i = nm_next(nic_i, lim);
1021 		}
1022 		kring->nr_hwcur = nm_i;
1023 
1024 		/* synchronize the NIC ring */
1025 		if (txq->ift_sds.ifsd_map)
1026 			bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
1027 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1028 
1029 		/* (re)start the tx unit up to slot nic_i (excluded) */
1030 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1031 	}
1032 
1033 	/*
1034 	 * Second part: reclaim buffers for completed transmissions.
1035 	 *
1036 	 * If there are unclaimed buffers, attempt to reclaim them.
1037 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
1038 	 * minimal delay, then trigger the tx handler which will spin in the
1039 	 * group task queue.
1040 	 */
1041 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1042 		if (iflib_tx_credits_update(ctx, txq)) {
1043 			/* some tx completed, increment avail */
1044 			nic_i = txq->ift_cidx_processed;
1045 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1046 		}
1047 	}
1048 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1049 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1050 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1051 			    iflib_timer, txq, txq->ift_timer.c_cpu);
1052 	}
1053 	return (0);
1054 }
1055 
1056 /*
1057  * Reconcile kernel and user view of the receive ring.
1058  * Same as for the txsync, this routine must be efficient.
1059  * The caller guarantees a single invocations, but races against
1060  * the rest of the driver should be handled here.
1061  *
1062  * On call, kring->rhead is the first packet that userspace wants
1063  * to keep, and kring->rcur is the wakeup point.
1064  * The kernel has previously reported packets up to kring->rtail.
1065  *
1066  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1067  * of whether or not we received an interrupt.
1068  */
1069 static int
1070 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1071 {
1072 	struct netmap_adapter *na = kring->na;
1073 	struct netmap_ring *ring = kring->ring;
1074 	uint32_t nm_i;	/* index into the netmap ring */
1075 	uint32_t nic_i;	/* index into the NIC ring */
1076 	u_int i, n;
1077 	u_int const lim = kring->nkr_num_slots - 1;
1078 	u_int const head = kring->rhead;
1079 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1080 	struct if_rxd_info ri;
1081 
1082 	struct ifnet *ifp = na->ifp;
1083 	if_ctx_t ctx = ifp->if_softc;
1084 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1085 	iflib_fl_t fl = rxq->ifr_fl;
1086 	if (head > lim)
1087 		return netmap_ring_reinit(kring);
1088 
1089 	/* XXX check sync modes */
1090 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
1091 		if (fl->ifl_sds.ifsd_map == NULL)
1092 			continue;
1093 		bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
1094 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1095 	}
1096 	/*
1097 	 * First part: import newly received packets.
1098 	 *
1099 	 * nm_i is the index of the next free slot in the netmap ring,
1100 	 * nic_i is the index of the next received packet in the NIC ring,
1101 	 * and they may differ in case if_init() has been called while
1102 	 * in netmap mode. For the receive ring we have
1103 	 *
1104 	 *	nic_i = rxr->next_check;
1105 	 *	nm_i = kring->nr_hwtail (previous)
1106 	 * and
1107 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1108 	 *
1109 	 * rxr->next_check is set to 0 on a ring reinit
1110 	 */
1111 	if (netmap_no_pendintr || force_update) {
1112 		int crclen = iflib_crcstrip ? 0 : 4;
1113 		int error, avail;
1114 
1115 		for (i = 0; i < rxq->ifr_nfl; i++) {
1116 			fl = &rxq->ifr_fl[i];
1117 			nic_i = fl->ifl_cidx;
1118 			nm_i = netmap_idx_n2k(kring, nic_i);
1119 			avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
1120 			for (n = 0; avail > 0; n++, avail--) {
1121 				rxd_info_zero(&ri);
1122 				ri.iri_frags = rxq->ifr_frags;
1123 				ri.iri_qsidx = kring->ring_id;
1124 				ri.iri_ifp = ctx->ifc_ifp;
1125 				ri.iri_cidx = nic_i;
1126 
1127 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1128 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
1129 				ring->slot[nm_i].flags = 0;
1130 				if (fl->ifl_sds.ifsd_map)
1131 					bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
1132 							fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1133 				nm_i = nm_next(nm_i, lim);
1134 				nic_i = nm_next(nic_i, lim);
1135 			}
1136 			if (n) { /* update the state variables */
1137 				if (netmap_no_pendintr && !force_update) {
1138 					/* diagnostics */
1139 					iflib_rx_miss ++;
1140 					iflib_rx_miss_bufs += n;
1141 				}
1142 				fl->ifl_cidx = nic_i;
1143 				kring->nr_hwtail = nm_i;
1144 			}
1145 			kring->nr_kflags &= ~NKR_PENDINTR;
1146 		}
1147 	}
1148 	/*
1149 	 * Second part: skip past packets that userspace has released.
1150 	 * (kring->nr_hwcur to head excluded),
1151 	 * and make the buffers available for reception.
1152 	 * As usual nm_i is the index in the netmap ring,
1153 	 * nic_i is the index in the NIC ring, and
1154 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1155 	 */
1156 	/* XXX not sure how this will work with multiple free lists */
1157 	nm_i = kring->nr_hwcur;
1158 
1159 	return (netmap_fl_refill(rxq, kring, nm_i, false));
1160 }
1161 
1162 static void
1163 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1164 {
1165 	struct ifnet *ifp = na->ifp;
1166 	if_ctx_t ctx = ifp->if_softc;
1167 
1168 	CTX_LOCK(ctx);
1169 	if (onoff) {
1170 		IFDI_INTR_ENABLE(ctx);
1171 	} else {
1172 		IFDI_INTR_DISABLE(ctx);
1173 	}
1174 	CTX_UNLOCK(ctx);
1175 }
1176 
1177 
1178 static int
1179 iflib_netmap_attach(if_ctx_t ctx)
1180 {
1181 	struct netmap_adapter na;
1182 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1183 
1184 	bzero(&na, sizeof(na));
1185 
1186 	na.ifp = ctx->ifc_ifp;
1187 	na.na_flags = NAF_BDG_MAYSLEEP;
1188 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1189 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1190 
1191 	na.num_tx_desc = scctx->isc_ntxd[0];
1192 	na.num_rx_desc = scctx->isc_nrxd[0];
1193 	na.nm_txsync = iflib_netmap_txsync;
1194 	na.nm_rxsync = iflib_netmap_rxsync;
1195 	na.nm_register = iflib_netmap_register;
1196 	na.nm_intr = iflib_netmap_intr;
1197 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1198 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1199 	return (netmap_attach(&na));
1200 }
1201 
1202 static void
1203 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1204 {
1205 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1206 	struct netmap_slot *slot;
1207 
1208 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1209 	if (slot == NULL)
1210 		return;
1211 	if (txq->ift_sds.ifsd_map == NULL)
1212 		return;
1213 
1214 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1215 
1216 		/*
1217 		 * In netmap mode, set the map for the packet buffer.
1218 		 * NOTE: Some drivers (not this one) also need to set
1219 		 * the physical buffer address in the NIC ring.
1220 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1221 		 * netmap slot index, si
1222 		 */
1223 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1224 		netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
1225 	}
1226 }
1227 
1228 static void
1229 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1230 {
1231 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1232 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
1233 	struct netmap_slot *slot;
1234 	uint32_t nm_i;
1235 
1236 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1237 	if (slot == NULL)
1238 		return;
1239 	nm_i = netmap_idx_n2k(kring, 0);
1240 	netmap_fl_refill(rxq, kring, nm_i, true);
1241 }
1242 
1243 static void
1244 iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on)
1245 {
1246 	struct netmap_kring *kring;
1247 
1248 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1249 
1250 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
1251 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1252 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1253 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1254 			if (hz < 2000)
1255 				*reset_on = 1;
1256 			else
1257 				*reset_on = hz / 1000;
1258 		}
1259 	}
1260 }
1261 
1262 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1263 
1264 #else
1265 #define iflib_netmap_txq_init(ctx, txq)
1266 #define iflib_netmap_rxq_init(ctx, rxq)
1267 #define iflib_netmap_detach(ifp)
1268 
1269 #define iflib_netmap_attach(ctx) (0)
1270 #define netmap_rx_irq(ifp, qid, budget) (0)
1271 #define netmap_tx_irq(ifp, qid) do {} while (0)
1272 #define iflib_netmap_timer_adjust(ctx, txqid, reset_on)
1273 
1274 #endif
1275 
1276 #if defined(__i386__) || defined(__amd64__)
1277 static __inline void
1278 prefetch(void *x)
1279 {
1280 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1281 }
1282 static __inline void
1283 prefetch2cachelines(void *x)
1284 {
1285 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1286 #if (CACHE_LINE_SIZE < 128)
1287 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
1288 #endif
1289 }
1290 #else
1291 #define prefetch(x)
1292 #define prefetch2cachelines(x)
1293 #endif
1294 
1295 static void
1296 iflib_gen_mac(if_ctx_t ctx)
1297 {
1298 	struct thread *td;
1299 	MD5_CTX mdctx;
1300 	char uuid[HOSTUUIDLEN+1];
1301 	char buf[HOSTUUIDLEN+16];
1302 	uint8_t *mac;
1303 	unsigned char digest[16];
1304 
1305 	td = curthread;
1306 	mac = ctx->ifc_mac;
1307 	uuid[HOSTUUIDLEN] = 0;
1308 	bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
1309 	snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
1310 	/*
1311 	 * Generate a pseudo-random, deterministic MAC
1312 	 * address based on the UUID and unit number.
1313 	 * The FreeBSD Foundation OUI of 58-9C-FC is used.
1314 	 */
1315 	MD5Init(&mdctx);
1316 	MD5Update(&mdctx, buf, strlen(buf));
1317 	MD5Final(digest, &mdctx);
1318 
1319 	mac[0] = 0x58;
1320 	mac[1] = 0x9C;
1321 	mac[2] = 0xFC;
1322 	mac[3] = digest[0];
1323 	mac[4] = digest[1];
1324 	mac[5] = digest[2];
1325 }
1326 
1327 static void
1328 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1329 {
1330 	iflib_fl_t fl;
1331 
1332 	fl = &rxq->ifr_fl[flid];
1333 	iru->iru_paddrs = fl->ifl_bus_addrs;
1334 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
1335 	iru->iru_idxs = fl->ifl_rxd_idxs;
1336 	iru->iru_qsidx = rxq->ifr_id;
1337 	iru->iru_buf_size = fl->ifl_buf_size;
1338 	iru->iru_flidx = fl->ifl_id;
1339 }
1340 
1341 static void
1342 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1343 {
1344 	if (err)
1345 		return;
1346 	*(bus_addr_t *) arg = segs[0].ds_addr;
1347 }
1348 
1349 int
1350 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1351 {
1352 	int err;
1353 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1354 	device_t dev = ctx->ifc_dev;
1355 
1356 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1357 
1358 	err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1359 				sctx->isc_q_align, 0,	/* alignment, bounds */
1360 				BUS_SPACE_MAXADDR,	/* lowaddr */
1361 				BUS_SPACE_MAXADDR,	/* highaddr */
1362 				NULL, NULL,		/* filter, filterarg */
1363 				size,			/* maxsize */
1364 				1,			/* nsegments */
1365 				size,			/* maxsegsize */
1366 				BUS_DMA_ALLOCNOW,	/* flags */
1367 				NULL,			/* lockfunc */
1368 				NULL,			/* lockarg */
1369 				&dma->idi_tag);
1370 	if (err) {
1371 		device_printf(dev,
1372 		    "%s: bus_dma_tag_create failed: %d\n",
1373 		    __func__, err);
1374 		goto fail_0;
1375 	}
1376 
1377 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
1378 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1379 	if (err) {
1380 		device_printf(dev,
1381 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1382 		    __func__, (uintmax_t)size, err);
1383 		goto fail_1;
1384 	}
1385 
1386 	dma->idi_paddr = IF_BAD_DMA;
1387 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1388 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1389 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1390 		device_printf(dev,
1391 		    "%s: bus_dmamap_load failed: %d\n",
1392 		    __func__, err);
1393 		goto fail_2;
1394 	}
1395 
1396 	dma->idi_size = size;
1397 	return (0);
1398 
1399 fail_2:
1400 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1401 fail_1:
1402 	bus_dma_tag_destroy(dma->idi_tag);
1403 fail_0:
1404 	dma->idi_tag = NULL;
1405 
1406 	return (err);
1407 }
1408 
1409 int
1410 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1411 {
1412 	int i, err;
1413 	iflib_dma_info_t *dmaiter;
1414 
1415 	dmaiter = dmalist;
1416 	for (i = 0; i < count; i++, dmaiter++) {
1417 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1418 			break;
1419 	}
1420 	if (err)
1421 		iflib_dma_free_multi(dmalist, i);
1422 	return (err);
1423 }
1424 
1425 void
1426 iflib_dma_free(iflib_dma_info_t dma)
1427 {
1428 	if (dma->idi_tag == NULL)
1429 		return;
1430 	if (dma->idi_paddr != IF_BAD_DMA) {
1431 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1432 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1433 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1434 		dma->idi_paddr = IF_BAD_DMA;
1435 	}
1436 	if (dma->idi_vaddr != NULL) {
1437 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1438 		dma->idi_vaddr = NULL;
1439 	}
1440 	bus_dma_tag_destroy(dma->idi_tag);
1441 	dma->idi_tag = NULL;
1442 }
1443 
1444 void
1445 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1446 {
1447 	int i;
1448 	iflib_dma_info_t *dmaiter = dmalist;
1449 
1450 	for (i = 0; i < count; i++, dmaiter++)
1451 		iflib_dma_free(*dmaiter);
1452 }
1453 
1454 #ifdef EARLY_AP_STARTUP
1455 static const int iflib_started = 1;
1456 #else
1457 /*
1458  * We used to abuse the smp_started flag to decide if the queues have been
1459  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1460  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1461  * is set.  Run a SYSINIT() strictly after that to just set a usable
1462  * completion flag.
1463  */
1464 
1465 static int iflib_started;
1466 
1467 static void
1468 iflib_record_started(void *arg)
1469 {
1470 	iflib_started = 1;
1471 }
1472 
1473 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1474 	iflib_record_started, NULL);
1475 #endif
1476 
1477 static int
1478 iflib_fast_intr(void *arg)
1479 {
1480 	iflib_filter_info_t info = arg;
1481 	struct grouptask *gtask = info->ifi_task;
1482 	if (!iflib_started)
1483 		return (FILTER_HANDLED);
1484 
1485 	DBG_COUNTER_INC(fast_intrs);
1486 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1487 		return (FILTER_HANDLED);
1488 
1489 	GROUPTASK_ENQUEUE(gtask);
1490 	return (FILTER_HANDLED);
1491 }
1492 
1493 static int
1494 iflib_fast_intr_rxtx(void *arg)
1495 {
1496 	iflib_filter_info_t info = arg;
1497 	struct grouptask *gtask = info->ifi_task;
1498 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1499 	if_ctx_t ctx = NULL;;
1500 	int i, cidx;
1501 
1502 	if (!iflib_started)
1503 		return (FILTER_HANDLED);
1504 
1505 	DBG_COUNTER_INC(fast_intrs);
1506 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1507 		return (FILTER_HANDLED);
1508 
1509 	MPASS(rxq->ifr_ntxqirq);
1510 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1511 		qidx_t txqid = rxq->ifr_txqid[i];
1512 
1513 		ctx = rxq->ifr_ctx;
1514 
1515 		if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
1516 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1517 			continue;
1518 		}
1519 		GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
1520 	}
1521 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1522 		cidx = rxq->ifr_cq_cidx;
1523 	else
1524 		cidx = rxq->ifr_fl[0].ifl_cidx;
1525 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1526 		GROUPTASK_ENQUEUE(gtask);
1527 	else
1528 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1529 	return (FILTER_HANDLED);
1530 }
1531 
1532 
1533 static int
1534 iflib_fast_intr_ctx(void *arg)
1535 {
1536 	iflib_filter_info_t info = arg;
1537 	struct grouptask *gtask = info->ifi_task;
1538 
1539 	if (!iflib_started)
1540 		return (FILTER_HANDLED);
1541 
1542 	DBG_COUNTER_INC(fast_intrs);
1543 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
1544 		return (FILTER_HANDLED);
1545 
1546 	GROUPTASK_ENQUEUE(gtask);
1547 	return (FILTER_HANDLED);
1548 }
1549 
1550 static int
1551 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1552 		 driver_filter_t filter, driver_intr_t handler, void *arg,
1553 		 const char *name)
1554 {
1555 	int rc, flags;
1556 	struct resource *res;
1557 	void *tag = NULL;
1558 	device_t dev = ctx->ifc_dev;
1559 
1560 	flags = RF_ACTIVE;
1561 	if (ctx->ifc_flags & IFC_LEGACY)
1562 		flags |= RF_SHAREABLE;
1563 	MPASS(rid < 512);
1564 	irq->ii_rid = rid;
1565 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
1566 	if (res == NULL) {
1567 		device_printf(dev,
1568 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1569 		return (ENOMEM);
1570 	}
1571 	irq->ii_res = res;
1572 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1573 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1574 						filter, handler, arg, &tag);
1575 	if (rc != 0) {
1576 		device_printf(dev,
1577 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1578 					  rid, name ? name : "unknown", rc);
1579 		return (rc);
1580 	} else if (name)
1581 		bus_describe_intr(dev, res, tag, "%s", name);
1582 
1583 	irq->ii_tag = tag;
1584 	return (0);
1585 }
1586 
1587 
1588 /*********************************************************************
1589  *
1590  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1591  *  the information needed to transmit a packet on the wire. This is
1592  *  called only once at attach, setup is done every reset.
1593  *
1594  **********************************************************************/
1595 
1596 static int
1597 iflib_txsd_alloc(iflib_txq_t txq)
1598 {
1599 	if_ctx_t ctx = txq->ift_ctx;
1600 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1601 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1602 	device_t dev = ctx->ifc_dev;
1603 	bus_size_t tsomaxsize;
1604 	int err, nsegments, ntsosegments;
1605 
1606 	nsegments = scctx->isc_tx_nsegments;
1607 	ntsosegments = scctx->isc_tx_tso_segments_max;
1608 	tsomaxsize = scctx->isc_tx_tso_size_max;
1609 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1610 		tsomaxsize += sizeof(struct ether_vlan_header);
1611 	MPASS(scctx->isc_ntxd[0] > 0);
1612 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1613 	MPASS(nsegments > 0);
1614 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1615 		MPASS(ntsosegments > 0);
1616 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1617 	}
1618 
1619 	/*
1620 	 * Setup DMA descriptor areas.
1621 	 */
1622 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1623 			       1, 0,			/* alignment, bounds */
1624 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1625 			       BUS_SPACE_MAXADDR,	/* highaddr */
1626 			       NULL, NULL,		/* filter, filterarg */
1627 			       sctx->isc_tx_maxsize,		/* maxsize */
1628 			       nsegments,	/* nsegments */
1629 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
1630 			       0,			/* flags */
1631 			       NULL,			/* lockfunc */
1632 			       NULL,			/* lockfuncarg */
1633 			       &txq->ift_desc_tag))) {
1634 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
1635 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1636 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1637 		goto fail;
1638 	}
1639 	if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) &
1640 	    (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1641 			       1, 0,			/* alignment, bounds */
1642 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1643 			       BUS_SPACE_MAXADDR,	/* highaddr */
1644 			       NULL, NULL,		/* filter, filterarg */
1645 			       tsomaxsize,		/* maxsize */
1646 			       ntsosegments,	/* nsegments */
1647 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
1648 			       0,			/* flags */
1649 			       NULL,			/* lockfunc */
1650 			       NULL,			/* lockfuncarg */
1651 			       &txq->ift_tso_desc_tag))) {
1652 		device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
1653 
1654 		goto fail;
1655 	}
1656 	if (!(txq->ift_sds.ifsd_flags =
1657 	    (uint8_t *) malloc(sizeof(uint8_t) *
1658 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1659 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1660 		err = ENOMEM;
1661 		goto fail;
1662 	}
1663 	if (!(txq->ift_sds.ifsd_m =
1664 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1665 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1666 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
1667 		err = ENOMEM;
1668 		goto fail;
1669 	}
1670 
1671         /* Create the descriptor buffer dma maps */
1672 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1673 	if ((ctx->ifc_flags & IFC_DMAR) == 0)
1674 		return (0);
1675 
1676 	if (!(txq->ift_sds.ifsd_map =
1677 	    (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1678 		device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1679 		err = ENOMEM;
1680 		goto fail;
1681 	}
1682 
1683 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1684 		err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
1685 		if (err != 0) {
1686 			device_printf(dev, "Unable to create TX DMA map\n");
1687 			goto fail;
1688 		}
1689 	}
1690 #endif
1691 	return (0);
1692 fail:
1693 	/* We free all, it handles case where we are in the middle */
1694 	iflib_tx_structures_free(ctx);
1695 	return (err);
1696 }
1697 
1698 static void
1699 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1700 {
1701 	bus_dmamap_t map;
1702 
1703 	map = NULL;
1704 	if (txq->ift_sds.ifsd_map != NULL)
1705 		map = txq->ift_sds.ifsd_map[i];
1706 	if (map != NULL) {
1707 		bus_dmamap_unload(txq->ift_desc_tag, map);
1708 		bus_dmamap_destroy(txq->ift_desc_tag, map);
1709 		txq->ift_sds.ifsd_map[i] = NULL;
1710 	}
1711 }
1712 
1713 static void
1714 iflib_txq_destroy(iflib_txq_t txq)
1715 {
1716 	if_ctx_t ctx = txq->ift_ctx;
1717 
1718 	for (int i = 0; i < txq->ift_size; i++)
1719 		iflib_txsd_destroy(ctx, txq, i);
1720 	if (txq->ift_sds.ifsd_map != NULL) {
1721 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1722 		txq->ift_sds.ifsd_map = NULL;
1723 	}
1724 	if (txq->ift_sds.ifsd_m != NULL) {
1725 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1726 		txq->ift_sds.ifsd_m = NULL;
1727 	}
1728 	if (txq->ift_sds.ifsd_flags != NULL) {
1729 		free(txq->ift_sds.ifsd_flags, M_IFLIB);
1730 		txq->ift_sds.ifsd_flags = NULL;
1731 	}
1732 	if (txq->ift_desc_tag != NULL) {
1733 		bus_dma_tag_destroy(txq->ift_desc_tag);
1734 		txq->ift_desc_tag = NULL;
1735 	}
1736 	if (txq->ift_tso_desc_tag != NULL) {
1737 		bus_dma_tag_destroy(txq->ift_tso_desc_tag);
1738 		txq->ift_tso_desc_tag = NULL;
1739 	}
1740 }
1741 
1742 static void
1743 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1744 {
1745 	struct mbuf **mp;
1746 
1747 	mp = &txq->ift_sds.ifsd_m[i];
1748 	if (*mp == NULL)
1749 		return;
1750 
1751 	if (txq->ift_sds.ifsd_map != NULL) {
1752 		bus_dmamap_sync(txq->ift_desc_tag,
1753 				txq->ift_sds.ifsd_map[i],
1754 				BUS_DMASYNC_POSTWRITE);
1755 		bus_dmamap_unload(txq->ift_desc_tag,
1756 				  txq->ift_sds.ifsd_map[i]);
1757 	}
1758 	m_free(*mp);
1759 	DBG_COUNTER_INC(tx_frees);
1760 	*mp = NULL;
1761 }
1762 
1763 static int
1764 iflib_txq_setup(iflib_txq_t txq)
1765 {
1766 	if_ctx_t ctx = txq->ift_ctx;
1767 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1768 	iflib_dma_info_t di;
1769 	int i;
1770 
1771 	/* Set number of descriptors available */
1772 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1773 	/* XXX make configurable */
1774 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1775 
1776 	/* Reset indices */
1777 	txq->ift_cidx_processed = 0;
1778 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1779 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1780 
1781 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1782 		bzero((void *)di->idi_vaddr, di->idi_size);
1783 
1784 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1785 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1786 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1787 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1788 	return (0);
1789 }
1790 
1791 /*********************************************************************
1792  *
1793  *  Allocate memory for rx_buffer structures. Since we use one
1794  *  rx_buffer per received packet, the maximum number of rx_buffer's
1795  *  that we'll need is equal to the number of receive descriptors
1796  *  that we've allocated.
1797  *
1798  **********************************************************************/
1799 static int
1800 iflib_rxsd_alloc(iflib_rxq_t rxq)
1801 {
1802 	if_ctx_t ctx = rxq->ifr_ctx;
1803 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1804 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1805 	device_t dev = ctx->ifc_dev;
1806 	iflib_fl_t fl;
1807 	int			err;
1808 
1809 	MPASS(scctx->isc_nrxd[0] > 0);
1810 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1811 
1812 	fl = rxq->ifr_fl;
1813 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
1814 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1815 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1816 					 1, 0,			/* alignment, bounds */
1817 					 BUS_SPACE_MAXADDR,	/* lowaddr */
1818 					 BUS_SPACE_MAXADDR,	/* highaddr */
1819 					 NULL, NULL,		/* filter, filterarg */
1820 					 sctx->isc_rx_maxsize,	/* maxsize */
1821 					 sctx->isc_rx_nsegments,	/* nsegments */
1822 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
1823 					 0,			/* flags */
1824 					 NULL,			/* lockfunc */
1825 					 NULL,			/* lockarg */
1826 					 &fl->ifl_desc_tag);
1827 		if (err) {
1828 			device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
1829 				__func__, err);
1830 			goto fail;
1831 		}
1832 		if (!(fl->ifl_sds.ifsd_flags =
1833 		      (uint8_t *) malloc(sizeof(uint8_t) *
1834 					 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1835 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1836 			err = ENOMEM;
1837 			goto fail;
1838 		}
1839 		if (!(fl->ifl_sds.ifsd_m =
1840 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1841 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1842 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1843 			err = ENOMEM;
1844 			goto fail;
1845 		}
1846 		if (!(fl->ifl_sds.ifsd_cl =
1847 		      (caddr_t *) malloc(sizeof(caddr_t) *
1848 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1849 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1850 			err = ENOMEM;
1851 			goto fail;
1852 		}
1853 
1854 		/* Create the descriptor buffer dma maps */
1855 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1856 		if ((ctx->ifc_flags & IFC_DMAR) == 0)
1857 			continue;
1858 
1859 		if (!(fl->ifl_sds.ifsd_map =
1860 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1861 			device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1862 			err = ENOMEM;
1863 			goto fail;
1864 		}
1865 
1866 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1867 			err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
1868 			if (err != 0) {
1869 				device_printf(dev, "Unable to create RX buffer DMA map\n");
1870 				goto fail;
1871 			}
1872 		}
1873 #endif
1874 	}
1875 	return (0);
1876 
1877 fail:
1878 	iflib_rx_structures_free(ctx);
1879 	return (err);
1880 }
1881 
1882 
1883 /*
1884  * Internal service routines
1885  */
1886 
1887 struct rxq_refill_cb_arg {
1888 	int               error;
1889 	bus_dma_segment_t seg;
1890 	int               nseg;
1891 };
1892 
1893 static void
1894 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1895 {
1896 	struct rxq_refill_cb_arg *cb_arg = arg;
1897 
1898 	cb_arg->error = error;
1899 	cb_arg->seg = segs[0];
1900 	cb_arg->nseg = nseg;
1901 }
1902 
1903 
1904 #ifdef ACPI_DMAR
1905 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
1906 #else
1907 #define IS_DMAR(ctx) (0)
1908 #endif
1909 
1910 /**
1911  *	rxq_refill - refill an rxq  free-buffer list
1912  *	@ctx: the iflib context
1913  *	@rxq: the free-list to refill
1914  *	@n: the number of new buffers to allocate
1915  *
1916  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
1917  *	The caller must assure that @n does not exceed the queue's capacity.
1918  */
1919 static void
1920 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
1921 {
1922 	struct mbuf *m;
1923 	int idx, frag_idx = fl->ifl_fragidx;
1924         int pidx = fl->ifl_pidx;
1925 	caddr_t cl, *sd_cl;
1926 	struct mbuf **sd_m;
1927 	uint8_t *sd_flags;
1928 	struct if_rxd_update iru;
1929 	bus_dmamap_t *sd_map;
1930 	int n, i = 0;
1931 	uint64_t bus_addr;
1932 	int err;
1933 	qidx_t credits;
1934 
1935 	sd_m = fl->ifl_sds.ifsd_m;
1936 	sd_map = fl->ifl_sds.ifsd_map;
1937 	sd_cl = fl->ifl_sds.ifsd_cl;
1938 	sd_flags = fl->ifl_sds.ifsd_flags;
1939 	idx = pidx;
1940 	credits = fl->ifl_credits;
1941 
1942 	n  = count;
1943 	MPASS(n > 0);
1944 	MPASS(credits + n <= fl->ifl_size);
1945 
1946 	if (pidx < fl->ifl_cidx)
1947 		MPASS(pidx + n <= fl->ifl_cidx);
1948 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
1949 		MPASS(fl->ifl_gen == 0);
1950 	if (pidx > fl->ifl_cidx)
1951 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
1952 
1953 	DBG_COUNTER_INC(fl_refills);
1954 	if (n > 8)
1955 		DBG_COUNTER_INC(fl_refills_large);
1956 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
1957 	while (n--) {
1958 		/*
1959 		 * We allocate an uninitialized mbuf + cluster, mbuf is
1960 		 * initialized after rx.
1961 		 *
1962 		 * If the cluster is still set then we know a minimum sized packet was received
1963 		 */
1964 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,  &frag_idx);
1965 		if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
1966                 	bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
1967 		if ((cl = sd_cl[frag_idx]) == NULL) {
1968                        if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
1969 				break;
1970 #if MEMORY_LOGGING
1971 			fl->ifl_cl_enqueued++;
1972 #endif
1973 		}
1974 		if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
1975 			break;
1976 		}
1977 #if MEMORY_LOGGING
1978 		fl->ifl_m_enqueued++;
1979 #endif
1980 
1981 		DBG_COUNTER_INC(rx_allocs);
1982 #if defined(__i386__) || defined(__amd64__)
1983 		if (!IS_DMAR(ctx)) {
1984 			bus_addr = pmap_kextract((vm_offset_t)cl);
1985 		} else
1986 #endif
1987 		{
1988 			struct rxq_refill_cb_arg cb_arg;
1989 
1990 			cb_arg.error = 0;
1991 			MPASS(sd_map != NULL);
1992 			MPASS(sd_map[frag_idx] != NULL);
1993 			err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
1994 		         cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
1995 			bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
1996 					BUS_DMASYNC_PREREAD);
1997 
1998 			if (err != 0 || cb_arg.error) {
1999 				/*
2000 				 * !zone_pack ?
2001 				 */
2002 				if (fl->ifl_zone == zone_pack)
2003 					uma_zfree(fl->ifl_zone, cl);
2004 				m_free(m);
2005 				n = 0;
2006 				goto done;
2007 			}
2008 			bus_addr = cb_arg.seg.ds_addr;
2009 		}
2010                 bit_set(fl->ifl_rx_bitmap, frag_idx);
2011 		sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
2012 
2013 		MPASS(sd_m[frag_idx] == NULL);
2014 		sd_cl[frag_idx] = cl;
2015 		sd_m[frag_idx] = m;
2016 		fl->ifl_rxd_idxs[i] = frag_idx;
2017 		fl->ifl_bus_addrs[i] = bus_addr;
2018 		fl->ifl_vm_addrs[i] = cl;
2019 		credits++;
2020 		i++;
2021 		MPASS(credits <= fl->ifl_size);
2022 		if (++idx == fl->ifl_size) {
2023 			fl->ifl_gen = 1;
2024 			idx = 0;
2025 		}
2026 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2027 			iru.iru_pidx = pidx;
2028 			iru.iru_count = i;
2029 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2030 			i = 0;
2031 			pidx = idx;
2032 			fl->ifl_pidx = idx;
2033 			fl->ifl_credits = credits;
2034 		}
2035 
2036 	}
2037 done:
2038 	if (i) {
2039 		iru.iru_pidx = pidx;
2040 		iru.iru_count = i;
2041 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2042 		fl->ifl_pidx = idx;
2043 		fl->ifl_credits = credits;
2044 	}
2045 	DBG_COUNTER_INC(rxd_flush);
2046 	if (fl->ifl_pidx == 0)
2047 		pidx = fl->ifl_size - 1;
2048 	else
2049 		pidx = fl->ifl_pidx - 1;
2050 
2051 	if (sd_map)
2052 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2053 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2054 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
2055 	fl->ifl_fragidx = frag_idx;
2056 }
2057 
2058 static __inline void
2059 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
2060 {
2061 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
2062 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2063 #ifdef INVARIANTS
2064 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2065 #endif
2066 
2067 	MPASS(fl->ifl_credits <= fl->ifl_size);
2068 	MPASS(reclaimable == delta);
2069 
2070 	if (reclaimable > 0)
2071 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
2072 }
2073 
2074 static void
2075 iflib_fl_bufs_free(iflib_fl_t fl)
2076 {
2077 	iflib_dma_info_t idi = fl->ifl_ifdi;
2078 	uint32_t i;
2079 
2080 	for (i = 0; i < fl->ifl_size; i++) {
2081 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2082 		uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
2083 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2084 
2085 		if (*sd_flags & RX_SW_DESC_INUSE) {
2086 			if (fl->ifl_sds.ifsd_map != NULL) {
2087 				bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
2088 				bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
2089 				if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
2090 					bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
2091 			}
2092 			if (*sd_m != NULL) {
2093 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2094 				uma_zfree(zone_mbuf, *sd_m);
2095 			}
2096 			if (*sd_cl != NULL)
2097 				uma_zfree(fl->ifl_zone, *sd_cl);
2098 			*sd_flags = 0;
2099 		} else {
2100 			MPASS(*sd_cl == NULL);
2101 			MPASS(*sd_m == NULL);
2102 		}
2103 #if MEMORY_LOGGING
2104 		fl->ifl_m_dequeued++;
2105 		fl->ifl_cl_dequeued++;
2106 #endif
2107 		*sd_cl = NULL;
2108 		*sd_m = NULL;
2109 	}
2110 #ifdef INVARIANTS
2111 	for (i = 0; i < fl->ifl_size; i++) {
2112 		MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
2113 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2114 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2115 	}
2116 #endif
2117 	/*
2118 	 * Reset free list values
2119 	 */
2120 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2121 	bzero(idi->idi_vaddr, idi->idi_size);
2122 }
2123 
2124 /*********************************************************************
2125  *
2126  *  Initialize a receive ring and its buffers.
2127  *
2128  **********************************************************************/
2129 static int
2130 iflib_fl_setup(iflib_fl_t fl)
2131 {
2132 	iflib_rxq_t rxq = fl->ifl_rxq;
2133 	if_ctx_t ctx = rxq->ifr_ctx;
2134 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2135 
2136 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2137 	/*
2138 	** Free current RX buffer structs and their mbufs
2139 	*/
2140 	iflib_fl_bufs_free(fl);
2141 	/* Now replenish the mbufs */
2142 	MPASS(fl->ifl_credits == 0);
2143 	/*
2144 	 * XXX don't set the max_frame_size to larger
2145 	 * than the hardware can handle
2146 	 */
2147 	if (sctx->isc_max_frame_size <= 2048)
2148 		fl->ifl_buf_size = MCLBYTES;
2149 #ifndef CONTIGMALLOC_WORKS
2150 	else
2151 		fl->ifl_buf_size = MJUMPAGESIZE;
2152 #else
2153 	else if (sctx->isc_max_frame_size <= 4096)
2154 		fl->ifl_buf_size = MJUMPAGESIZE;
2155 	else if (sctx->isc_max_frame_size <= 9216)
2156 		fl->ifl_buf_size = MJUM9BYTES;
2157 	else
2158 		fl->ifl_buf_size = MJUM16BYTES;
2159 #endif
2160 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2161 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2162 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2163 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2164 
2165 
2166 	/* avoid pre-allocating zillions of clusters to an idle card
2167 	 * potentially speeding up attach
2168 	 */
2169 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
2170 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
2171 	if (min(128, fl->ifl_size) != fl->ifl_credits)
2172 		return (ENOBUFS);
2173 	/*
2174 	 * handle failure
2175 	 */
2176 	MPASS(rxq != NULL);
2177 	MPASS(fl->ifl_ifdi != NULL);
2178 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2179 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2180 	return (0);
2181 }
2182 
2183 /*********************************************************************
2184  *
2185  *  Free receive ring data structures
2186  *
2187  **********************************************************************/
2188 static void
2189 iflib_rx_sds_free(iflib_rxq_t rxq)
2190 {
2191 	iflib_fl_t fl;
2192 	int i;
2193 
2194 	if (rxq->ifr_fl != NULL) {
2195 		for (i = 0; i < rxq->ifr_nfl; i++) {
2196 			fl = &rxq->ifr_fl[i];
2197 			if (fl->ifl_desc_tag != NULL) {
2198 				bus_dma_tag_destroy(fl->ifl_desc_tag);
2199 				fl->ifl_desc_tag = NULL;
2200 			}
2201 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2202 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2203 			/* XXX destroy maps first */
2204 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2205 			fl->ifl_sds.ifsd_m = NULL;
2206 			fl->ifl_sds.ifsd_cl = NULL;
2207 			fl->ifl_sds.ifsd_map = NULL;
2208 		}
2209 		free(rxq->ifr_fl, M_IFLIB);
2210 		rxq->ifr_fl = NULL;
2211 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
2212 	}
2213 }
2214 
2215 /*
2216  * MI independent logic
2217  *
2218  */
2219 static void
2220 iflib_timer(void *arg)
2221 {
2222 	iflib_txq_t txq = arg;
2223 	if_ctx_t ctx = txq->ift_ctx;
2224 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2225 	uint64_t this_tick = ticks;
2226 	uint32_t reset_on = hz / 2;
2227 
2228 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2229 		return;
2230 	/*
2231 	** Check on the state of the TX queue(s), this
2232 	** can be done without the lock because its RO
2233 	** and the HUNG state will be static if set.
2234 	*/
2235 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2236 		txq->ift_last_timer_tick = this_tick;
2237 		IFDI_TIMER(ctx, txq->ift_id);
2238 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2239 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2240 		     (sctx->isc_pause_frames == 0)))
2241 			goto hung;
2242 
2243 		if (ifmp_ring_is_stalled(txq->ift_br))
2244 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2245 		txq->ift_cleaned_prev = txq->ift_cleaned;
2246 	}
2247 #ifdef DEV_NETMAP
2248 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
2249 		iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
2250 #endif
2251 	/* handle any laggards */
2252 	if (txq->ift_db_pending)
2253 		GROUPTASK_ENQUEUE(&txq->ift_task);
2254 
2255 	sctx->isc_pause_frames = 0;
2256 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2257 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2258 	return;
2259  hung:
2260 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2261 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2262 	STATE_LOCK(ctx);
2263 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2264 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2265 	iflib_admin_intr_deferred(ctx);
2266 	STATE_UNLOCK(ctx);
2267 }
2268 
2269 static void
2270 iflib_init_locked(if_ctx_t ctx)
2271 {
2272 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2273 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2274 	if_t ifp = ctx->ifc_ifp;
2275 	iflib_fl_t fl;
2276 	iflib_txq_t txq;
2277 	iflib_rxq_t rxq;
2278 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2279 
2280 
2281 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2282 	IFDI_INTR_DISABLE(ctx);
2283 
2284 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2285 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2286 	/* Set hardware offload abilities */
2287 	if_clearhwassist(ifp);
2288 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2289 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2290 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2291 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2292 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2293 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2294 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2295 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2296 
2297 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2298 		CALLOUT_LOCK(txq);
2299 		callout_stop(&txq->ift_timer);
2300 		CALLOUT_UNLOCK(txq);
2301 		iflib_netmap_txq_init(ctx, txq);
2302 	}
2303 #ifdef INVARIANTS
2304 	i = if_getdrvflags(ifp);
2305 #endif
2306 	IFDI_INIT(ctx);
2307 	MPASS(if_getdrvflags(ifp) == i);
2308 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
2309 		/* XXX this should really be done on a per-queue basis */
2310 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2311 			MPASS(rxq->ifr_id == i);
2312 			iflib_netmap_rxq_init(ctx, rxq);
2313 			continue;
2314 		}
2315 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2316 			if (iflib_fl_setup(fl)) {
2317 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
2318 				goto done;
2319 			}
2320 		}
2321 	}
2322 done:
2323 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2324 	IFDI_INTR_ENABLE(ctx);
2325 	txq = ctx->ifc_txqs;
2326 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2327 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2328 			txq->ift_timer.c_cpu);
2329 }
2330 
2331 static int
2332 iflib_media_change(if_t ifp)
2333 {
2334 	if_ctx_t ctx = if_getsoftc(ifp);
2335 	int err;
2336 
2337 	CTX_LOCK(ctx);
2338 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2339 		iflib_init_locked(ctx);
2340 	CTX_UNLOCK(ctx);
2341 	return (err);
2342 }
2343 
2344 static void
2345 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2346 {
2347 	if_ctx_t ctx = if_getsoftc(ifp);
2348 
2349 	CTX_LOCK(ctx);
2350 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2351 	IFDI_MEDIA_STATUS(ctx, ifmr);
2352 	CTX_UNLOCK(ctx);
2353 }
2354 
2355 void
2356 iflib_stop(if_ctx_t ctx)
2357 {
2358 	iflib_txq_t txq = ctx->ifc_txqs;
2359 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2360 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2361 	iflib_dma_info_t di;
2362 	iflib_fl_t fl;
2363 	int i, j;
2364 
2365 	/* Tell the stack that the interface is no longer active */
2366 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2367 
2368 	IFDI_INTR_DISABLE(ctx);
2369 	DELAY(1000);
2370 	IFDI_STOP(ctx);
2371 	DELAY(1000);
2372 
2373 	iflib_debug_reset();
2374 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2375 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2376 		/* make sure all transmitters have completed before proceeding XXX */
2377 
2378 		CALLOUT_LOCK(txq);
2379 		callout_stop(&txq->ift_timer);
2380 		CALLOUT_UNLOCK(txq);
2381 
2382 		/* clean any enqueued buffers */
2383 		iflib_ifmp_purge(txq);
2384 		/* Free any existing tx buffers. */
2385 		for (j = 0; j < txq->ift_size; j++) {
2386 			iflib_txsd_free(ctx, txq, j);
2387 		}
2388 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2389 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
2390 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2391 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2392 		txq->ift_pullups = 0;
2393 		ifmp_ring_reset_stats(txq->ift_br);
2394 		for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
2395 			bzero((void *)di->idi_vaddr, di->idi_size);
2396 	}
2397 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2398 		/* make sure all transmitters have completed before proceeding XXX */
2399 
2400 		for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++)
2401 			bzero((void *)di->idi_vaddr, di->idi_size);
2402 		/* also resets the free lists pidx/cidx */
2403 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2404 			iflib_fl_bufs_free(fl);
2405 	}
2406 }
2407 
2408 static inline caddr_t
2409 calc_next_rxd(iflib_fl_t fl, int cidx)
2410 {
2411 	qidx_t size;
2412 	int nrxd;
2413 	caddr_t start, end, cur, next;
2414 
2415 	nrxd = fl->ifl_size;
2416 	size = fl->ifl_rxd_size;
2417 	start = fl->ifl_ifdi->idi_vaddr;
2418 
2419 	if (__predict_false(size == 0))
2420 		return (start);
2421 	cur = start + size*cidx;
2422 	end = start + size*nrxd;
2423 	next = CACHE_PTR_NEXT(cur);
2424 	return (next < end ? next : start);
2425 }
2426 
2427 static inline void
2428 prefetch_pkts(iflib_fl_t fl, int cidx)
2429 {
2430 	int nextptr;
2431 	int nrxd = fl->ifl_size;
2432 	caddr_t next_rxd;
2433 
2434 
2435 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2436 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2437 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2438 	next_rxd = calc_next_rxd(fl, cidx);
2439 	prefetch(next_rxd);
2440 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2441 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2442 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2443 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2444 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2445 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2446 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2447 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2448 }
2449 
2450 static void
2451 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
2452 {
2453 	int flid, cidx;
2454 	bus_dmamap_t map;
2455 	iflib_fl_t fl;
2456 	iflib_dma_info_t di;
2457 	int next;
2458 
2459 	map = NULL;
2460 	flid = irf->irf_flid;
2461 	cidx = irf->irf_idx;
2462 	fl = &rxq->ifr_fl[flid];
2463 	sd->ifsd_fl = fl;
2464 	sd->ifsd_cidx = cidx;
2465 	sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
2466 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2467 	fl->ifl_credits--;
2468 #if MEMORY_LOGGING
2469 	fl->ifl_m_dequeued++;
2470 #endif
2471 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2472 		prefetch_pkts(fl, cidx);
2473 	if (fl->ifl_sds.ifsd_map != NULL) {
2474 		next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2475 		prefetch(&fl->ifl_sds.ifsd_map[next]);
2476 		map = fl->ifl_sds.ifsd_map[cidx];
2477 		di = fl->ifl_ifdi;
2478 		next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2479 		prefetch(&fl->ifl_sds.ifsd_flags[next]);
2480 		bus_dmamap_sync(di->idi_tag, di->idi_map,
2481 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2482 
2483 	/* not valid assert if bxe really does SGE from non-contiguous elements */
2484 		MPASS(fl->ifl_cidx == cidx);
2485 		if (unload)
2486 			bus_dmamap_unload(fl->ifl_desc_tag, map);
2487 	}
2488 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
2489 	if (__predict_false(fl->ifl_cidx == 0))
2490 		fl->ifl_gen = 0;
2491 	if (map != NULL)
2492 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2493 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2494         bit_clear(fl->ifl_rx_bitmap, cidx);
2495 }
2496 
2497 static struct mbuf *
2498 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
2499 {
2500 	int i, padlen , flags;
2501 	struct mbuf *m, *mh, *mt;
2502 	caddr_t cl;
2503 
2504 	i = 0;
2505 	mh = NULL;
2506 	do {
2507 		rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
2508 
2509 		MPASS(*sd->ifsd_cl != NULL);
2510 		MPASS(*sd->ifsd_m != NULL);
2511 
2512 		/* Don't include zero-length frags */
2513 		if (ri->iri_frags[i].irf_len == 0) {
2514 			/* XXX we can save the cluster here, but not the mbuf */
2515 			m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
2516 			m_free(*sd->ifsd_m);
2517 			*sd->ifsd_m = NULL;
2518 			continue;
2519 		}
2520 		m = *sd->ifsd_m;
2521 		*sd->ifsd_m = NULL;
2522 		if (mh == NULL) {
2523 			flags = M_PKTHDR|M_EXT;
2524 			mh = mt = m;
2525 			padlen = ri->iri_pad;
2526 		} else {
2527 			flags = M_EXT;
2528 			mt->m_next = m;
2529 			mt = m;
2530 			/* assuming padding is only on the first fragment */
2531 			padlen = 0;
2532 		}
2533 		cl = *sd->ifsd_cl;
2534 		*sd->ifsd_cl = NULL;
2535 
2536 		/* Can these two be made one ? */
2537 		m_init(m, M_NOWAIT, MT_DATA, flags);
2538 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2539 		/*
2540 		 * These must follow m_init and m_cljset
2541 		 */
2542 		m->m_data += padlen;
2543 		ri->iri_len -= padlen;
2544 		m->m_len = ri->iri_frags[i].irf_len;
2545 	} while (++i < ri->iri_nfrags);
2546 
2547 	return (mh);
2548 }
2549 
2550 /*
2551  * Process one software descriptor
2552  */
2553 static struct mbuf *
2554 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2555 {
2556 	struct if_rxsd sd;
2557 	struct mbuf *m;
2558 
2559 	/* should I merge this back in now that the two paths are basically duplicated? */
2560 	if (ri->iri_nfrags == 1 &&
2561 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2562 		rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
2563 		m = *sd.ifsd_m;
2564 		*sd.ifsd_m = NULL;
2565 		m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2566 #ifndef __NO_STRICT_ALIGNMENT
2567 		if (!IP_ALIGNED(m))
2568 			m->m_data += 2;
2569 #endif
2570 		memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2571 		m->m_len = ri->iri_frags[0].irf_len;
2572        } else {
2573 		m = assemble_segments(rxq, ri, &sd);
2574 	}
2575 	m->m_pkthdr.len = ri->iri_len;
2576 	m->m_pkthdr.rcvif = ri->iri_ifp;
2577 	m->m_flags |= ri->iri_flags;
2578 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2579 	m->m_pkthdr.flowid = ri->iri_flowid;
2580 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2581 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2582 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2583 	return (m);
2584 }
2585 
2586 #if defined(INET6) || defined(INET)
2587 static void
2588 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2589 {
2590 	CURVNET_SET(lc->ifp->if_vnet);
2591 #if defined(INET6)
2592 	*v6 = VNET(ip6_forwarding);
2593 #endif
2594 #if defined(INET)
2595 	*v4 = VNET(ipforwarding);
2596 #endif
2597 	CURVNET_RESTORE();
2598 }
2599 
2600 /*
2601  * Returns true if it's possible this packet could be LROed.
2602  * if it returns false, it is guaranteed that tcp_lro_rx()
2603  * would not return zero.
2604  */
2605 static bool
2606 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
2607 {
2608 	struct ether_header *eh;
2609 	uint16_t eh_type;
2610 
2611 	eh = mtod(m, struct ether_header *);
2612 	eh_type = ntohs(eh->ether_type);
2613 	switch (eh_type) {
2614 #if defined(INET6)
2615 		case ETHERTYPE_IPV6:
2616 			return !v6_forwarding;
2617 #endif
2618 #if defined (INET)
2619 		case ETHERTYPE_IP:
2620 			return !v4_forwarding;
2621 #endif
2622 	}
2623 
2624 	return false;
2625 }
2626 #else
2627 static void
2628 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2629 {
2630 }
2631 #endif
2632 
2633 static bool
2634 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2635 {
2636 	if_ctx_t ctx = rxq->ifr_ctx;
2637 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2638 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2639 	int avail, i;
2640 	qidx_t *cidxp;
2641 	struct if_rxd_info ri;
2642 	int err, budget_left, rx_bytes, rx_pkts;
2643 	iflib_fl_t fl;
2644 	struct ifnet *ifp;
2645 	int lro_enabled;
2646 	bool v4_forwarding, v6_forwarding, lro_possible;
2647 
2648 	/*
2649 	 * XXX early demux data packets so that if_input processing only handles
2650 	 * acks in interrupt context
2651 	 */
2652 	struct mbuf *m, *mh, *mt, *mf;
2653 
2654 	lro_possible = v4_forwarding = v6_forwarding = false;
2655 	ifp = ctx->ifc_ifp;
2656 	mh = mt = NULL;
2657 	MPASS(budget > 0);
2658 	rx_pkts	= rx_bytes = 0;
2659 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2660 		cidxp = &rxq->ifr_cq_cidx;
2661 	else
2662 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2663 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2664 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2665 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
2666 		DBG_COUNTER_INC(rx_unavail);
2667 		return (false);
2668 	}
2669 
2670 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
2671 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2672 			DBG_COUNTER_INC(rx_ctx_inactive);
2673 			break;
2674 		}
2675 		/*
2676 		 * Reset client set fields to their default values
2677 		 */
2678 		rxd_info_zero(&ri);
2679 		ri.iri_qsidx = rxq->ifr_id;
2680 		ri.iri_cidx = *cidxp;
2681 		ri.iri_ifp = ifp;
2682 		ri.iri_frags = rxq->ifr_frags;
2683 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2684 
2685 		if (err)
2686 			goto err;
2687 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2688 			*cidxp = ri.iri_cidx;
2689 			/* Update our consumer index */
2690 			/* XXX NB: shurd - check if this is still safe */
2691 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
2692 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2693 				rxq->ifr_cq_gen = 0;
2694 			}
2695 			/* was this only a completion queue message? */
2696 			if (__predict_false(ri.iri_nfrags == 0))
2697 				continue;
2698 		}
2699 		MPASS(ri.iri_nfrags != 0);
2700 		MPASS(ri.iri_len != 0);
2701 
2702 		/* will advance the cidx on the corresponding free lists */
2703 		m = iflib_rxd_pkt_get(rxq, &ri);
2704 		avail--;
2705 		budget_left--;
2706 		if (avail == 0 && budget_left)
2707 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2708 
2709 		if (__predict_false(m == NULL)) {
2710 			DBG_COUNTER_INC(rx_mbuf_null);
2711 			continue;
2712 		}
2713 		/* imm_pkt: -- cxgb */
2714 		if (mh == NULL)
2715 			mh = mt = m;
2716 		else {
2717 			mt->m_nextpkt = m;
2718 			mt = m;
2719 		}
2720 	}
2721 	/* make sure that we can refill faster than drain */
2722 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2723 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
2724 
2725 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2726 	if (lro_enabled)
2727 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
2728 	mt = mf = NULL;
2729 	while (mh != NULL) {
2730 		m = mh;
2731 		mh = mh->m_nextpkt;
2732 		m->m_nextpkt = NULL;
2733 #ifndef __NO_STRICT_ALIGNMENT
2734 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
2735 			continue;
2736 #endif
2737 		rx_bytes += m->m_pkthdr.len;
2738 		rx_pkts++;
2739 #if defined(INET6) || defined(INET)
2740 		if (lro_enabled) {
2741 			if (!lro_possible) {
2742 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
2743 				if (lro_possible && mf != NULL) {
2744 					ifp->if_input(ifp, mf);
2745 					DBG_COUNTER_INC(rx_if_input);
2746 					mt = mf = NULL;
2747 				}
2748 			}
2749 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
2750 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
2751 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
2752 					continue;
2753 			}
2754 		}
2755 #endif
2756 		if (lro_possible) {
2757 			ifp->if_input(ifp, m);
2758 			DBG_COUNTER_INC(rx_if_input);
2759 			continue;
2760 		}
2761 
2762 		if (mf == NULL)
2763 			mf = m;
2764 		if (mt != NULL)
2765 			mt->m_nextpkt = m;
2766 		mt = m;
2767 	}
2768 	if (mf != NULL) {
2769 		ifp->if_input(ifp, mf);
2770 		DBG_COUNTER_INC(rx_if_input);
2771 	}
2772 
2773 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
2774 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
2775 
2776 	/*
2777 	 * Flush any outstanding LRO work
2778 	 */
2779 #if defined(INET6) || defined(INET)
2780 	tcp_lro_flush_all(&rxq->ifr_lc);
2781 #endif
2782 	if (avail)
2783 		return true;
2784 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
2785 err:
2786 	STATE_LOCK(ctx);
2787 	ctx->ifc_flags |= IFC_DO_RESET;
2788 	iflib_admin_intr_deferred(ctx);
2789 	STATE_UNLOCK(ctx);
2790 	return (false);
2791 }
2792 
2793 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
2794 static inline qidx_t
2795 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
2796 {
2797 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2798 	qidx_t minthresh = txq->ift_size / 8;
2799 	if (in_use > 4*minthresh)
2800 		return (notify_count);
2801 	if (in_use > 2*minthresh)
2802 		return (notify_count >> 1);
2803 	if (in_use > minthresh)
2804 		return (notify_count >> 3);
2805 	return (0);
2806 }
2807 
2808 static inline qidx_t
2809 txq_max_rs_deferred(iflib_txq_t txq)
2810 {
2811 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2812 	qidx_t minthresh = txq->ift_size / 8;
2813 	if (txq->ift_in_use > 4*minthresh)
2814 		return (notify_count);
2815 	if (txq->ift_in_use > 2*minthresh)
2816 		return (notify_count >> 1);
2817 	if (txq->ift_in_use > minthresh)
2818 		return (notify_count >> 2);
2819 	return (2);
2820 }
2821 
2822 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
2823 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
2824 
2825 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
2826 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
2827 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
2828 
2829 /* forward compatibility for cxgb */
2830 #define FIRST_QSET(ctx) 0
2831 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
2832 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
2833 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
2834 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
2835 
2836 /* XXX we should be setting this to something other than zero */
2837 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
2838 #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
2839     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
2840 
2841 static inline bool
2842 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
2843 {
2844 	qidx_t dbval, max;
2845 	bool rang;
2846 
2847 	rang = false;
2848 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
2849 	if (ring || txq->ift_db_pending >= max) {
2850 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
2851 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
2852 		txq->ift_db_pending = txq->ift_npending = 0;
2853 		rang = true;
2854 	}
2855 	return (rang);
2856 }
2857 
2858 #ifdef PKT_DEBUG
2859 static void
2860 print_pkt(if_pkt_info_t pi)
2861 {
2862 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
2863 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
2864 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
2865 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
2866 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
2867 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
2868 }
2869 #endif
2870 
2871 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2872 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
2873 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2874 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
2875 
2876 static int
2877 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
2878 {
2879 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
2880 	struct ether_vlan_header *eh;
2881 	struct mbuf *m;
2882 
2883 	m = *mp;
2884 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2885 	    M_WRITABLE(m) == 0) {
2886 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2887 			return (ENOMEM);
2888 		} else {
2889 			m_freem(*mp);
2890 			*mp = m;
2891 		}
2892 	}
2893 
2894 	/*
2895 	 * Determine where frame payload starts.
2896 	 * Jump over vlan headers if already present,
2897 	 * helpful for QinQ too.
2898 	 */
2899 	if (__predict_false(m->m_len < sizeof(*eh))) {
2900 		txq->ift_pullups++;
2901 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
2902 			return (ENOMEM);
2903 	}
2904 	eh = mtod(m, struct ether_vlan_header *);
2905 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2906 		pi->ipi_etype = ntohs(eh->evl_proto);
2907 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2908 	} else {
2909 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
2910 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
2911 	}
2912 
2913 	switch (pi->ipi_etype) {
2914 #ifdef INET
2915 	case ETHERTYPE_IP:
2916 	{
2917 		struct mbuf *n;
2918 		struct ip *ip = NULL;
2919 		struct tcphdr *th = NULL;
2920 		int minthlen;
2921 
2922 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
2923 		if (__predict_false(m->m_len < minthlen)) {
2924 			/*
2925 			 * if this code bloat is causing too much of a hit
2926 			 * move it to a separate function and mark it noinline
2927 			 */
2928 			if (m->m_len == pi->ipi_ehdrlen) {
2929 				n = m->m_next;
2930 				MPASS(n);
2931 				if (n->m_len >= sizeof(*ip))  {
2932 					ip = (struct ip *)n->m_data;
2933 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2934 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2935 				} else {
2936 					txq->ift_pullups++;
2937 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2938 						return (ENOMEM);
2939 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2940 				}
2941 			} else {
2942 				txq->ift_pullups++;
2943 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2944 					return (ENOMEM);
2945 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2946 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2947 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2948 			}
2949 		} else {
2950 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2951 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2952 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2953 		}
2954 		pi->ipi_ip_hlen = ip->ip_hl << 2;
2955 		pi->ipi_ipproto = ip->ip_p;
2956 		pi->ipi_flags |= IPI_TX_IPV4;
2957 
2958 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
2959                        ip->ip_sum = 0;
2960 
2961 		/* TCP checksum offload may require TCP header length */
2962 		if (IS_TX_OFFLOAD4(pi)) {
2963 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
2964 				if (__predict_false(th == NULL)) {
2965 					txq->ift_pullups++;
2966 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
2967 						return (ENOMEM);
2968 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
2969 				}
2970 				pi->ipi_tcp_hflags = th->th_flags;
2971 				pi->ipi_tcp_hlen = th->th_off << 2;
2972 				pi->ipi_tcp_seq = th->th_seq;
2973 			}
2974 			if (IS_TSO4(pi)) {
2975 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
2976 					return (ENXIO);
2977 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
2978 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2979 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
2980 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
2981 					ip->ip_sum = 0;
2982 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
2983 				}
2984 			}
2985 		}
2986 		break;
2987 	}
2988 #endif
2989 #ifdef INET6
2990 	case ETHERTYPE_IPV6:
2991 	{
2992 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
2993 		struct tcphdr *th;
2994 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
2995 
2996 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
2997 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
2998 				return (ENOMEM);
2999 		}
3000 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3001 
3002 		/* XXX-BZ this will go badly in case of ext hdrs. */
3003 		pi->ipi_ipproto = ip6->ip6_nxt;
3004 		pi->ipi_flags |= IPI_TX_IPV6;
3005 
3006 		/* TCP checksum offload may require TCP header length */
3007 		if (IS_TX_OFFLOAD6(pi)) {
3008 			if (pi->ipi_ipproto == IPPROTO_TCP) {
3009 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3010 					txq->ift_pullups++;
3011 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3012 						return (ENOMEM);
3013 				}
3014 				pi->ipi_tcp_hflags = th->th_flags;
3015 				pi->ipi_tcp_hlen = th->th_off << 2;
3016 				pi->ipi_tcp_seq = th->th_seq;
3017 			}
3018 			if (IS_TSO6(pi)) {
3019 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3020 					return (ENXIO);
3021 				/*
3022 				 * The corresponding flag is set by the stack in the IPv4
3023 				 * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
3024 				 * So, set it here because the rest of the flow requires it.
3025 				 */
3026 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
3027 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3028 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3029 			}
3030 		}
3031 		break;
3032 	}
3033 #endif
3034 	default:
3035 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3036 		pi->ipi_ip_hlen = 0;
3037 		break;
3038 	}
3039 	*mp = m;
3040 
3041 	return (0);
3042 }
3043 
3044 static  __noinline  struct mbuf *
3045 collapse_pkthdr(struct mbuf *m0)
3046 {
3047 	struct mbuf *m, *m_next, *tmp;
3048 
3049 	m = m0;
3050 	m_next = m->m_next;
3051 	while (m_next != NULL && m_next->m_len == 0) {
3052 		m = m_next;
3053 		m->m_next = NULL;
3054 		m_free(m);
3055 		m_next = m_next->m_next;
3056 	}
3057 	m = m0;
3058 	m->m_next = m_next;
3059 	if (m_next == NULL)
3060 		return (m);
3061 	if ((m_next->m_flags & M_EXT) == 0) {
3062 		m = m_defrag(m, M_NOWAIT);
3063 	} else {
3064 		tmp = m_next->m_next;
3065 		memcpy(m_next, m, MPKTHSIZE);
3066 		m = m_next;
3067 		m->m_next = tmp;
3068 	}
3069 	return (m);
3070 }
3071 
3072 /*
3073  * If dodgy hardware rejects the scatter gather chain we've handed it
3074  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3075  * m_defrag'd mbufs
3076  */
3077 static __noinline struct mbuf *
3078 iflib_remove_mbuf(iflib_txq_t txq)
3079 {
3080 	int ntxd, i, pidx;
3081 	struct mbuf *m, *mh, **ifsd_m;
3082 
3083 	pidx = txq->ift_pidx;
3084 	ifsd_m = txq->ift_sds.ifsd_m;
3085 	ntxd = txq->ift_size;
3086 	mh = m = ifsd_m[pidx];
3087 	ifsd_m[pidx] = NULL;
3088 #if MEMORY_LOGGING
3089 	txq->ift_dequeued++;
3090 #endif
3091 	i = 1;
3092 
3093 	while (m) {
3094 		ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
3095 #if MEMORY_LOGGING
3096 		txq->ift_dequeued++;
3097 #endif
3098 		m = m->m_next;
3099 		i++;
3100 	}
3101 	return (mh);
3102 }
3103 
3104 static int
3105 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
3106 			  struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
3107 			  int max_segs, int flags)
3108 {
3109 	if_ctx_t ctx;
3110 	if_shared_ctx_t		sctx;
3111 	if_softc_ctx_t		scctx;
3112 	int i, next, pidx, err, ntxd, count;
3113 	struct mbuf *m, *tmp, **ifsd_m;
3114 
3115 	m = *m0;
3116 
3117 	/*
3118 	 * Please don't ever do this
3119 	 */
3120 	if (__predict_false(m->m_len == 0))
3121 		*m0 = collapse_pkthdr(m);
3122 
3123 	ctx = txq->ift_ctx;
3124 	sctx = ctx->ifc_sctx;
3125 	scctx = &ctx->ifc_softc_ctx;
3126 	ifsd_m = txq->ift_sds.ifsd_m;
3127 	ntxd = txq->ift_size;
3128 	pidx = txq->ift_pidx;
3129 	if (map != NULL) {
3130 		uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
3131 
3132 		err = bus_dmamap_load_mbuf_sg(tag, map,
3133 					      *m0, segs, nsegs, BUS_DMA_NOWAIT);
3134 		if (err)
3135 			return (err);
3136 		ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
3137 		count = 0;
3138 		m = *m0;
3139 		do {
3140 			if (__predict_false(m->m_len <= 0)) {
3141 				tmp = m;
3142 				m = m->m_next;
3143 				tmp->m_next = NULL;
3144 				m_free(tmp);
3145 				continue;
3146 			}
3147 			m = m->m_next;
3148 			count++;
3149 		} while (m != NULL);
3150 		if (count > *nsegs) {
3151 			ifsd_m[pidx] = *m0;
3152 			ifsd_m[pidx]->m_flags |= M_TOOBIG;
3153 			return (0);
3154 		}
3155 		m = *m0;
3156 		count = 0;
3157 		do {
3158 			next = (pidx + count) & (ntxd-1);
3159 			MPASS(ifsd_m[next] == NULL);
3160 			ifsd_m[next] = m;
3161 			count++;
3162 			tmp = m;
3163 			m = m->m_next;
3164 		} while (m != NULL);
3165 	} else {
3166 		int buflen, sgsize, maxsegsz, max_sgsize;
3167 		vm_offset_t vaddr;
3168 		vm_paddr_t curaddr;
3169 
3170 		count = i = 0;
3171 		m = *m0;
3172 		if (m->m_pkthdr.csum_flags & CSUM_TSO)
3173 			maxsegsz = scctx->isc_tx_tso_segsize_max;
3174 		else
3175 			maxsegsz = sctx->isc_tx_maxsegsize;
3176 
3177 		do {
3178 			if (__predict_false(m->m_len <= 0)) {
3179 				tmp = m;
3180 				m = m->m_next;
3181 				tmp->m_next = NULL;
3182 				m_free(tmp);
3183 				continue;
3184 			}
3185 			buflen = m->m_len;
3186 			vaddr = (vm_offset_t)m->m_data;
3187 			/*
3188 			 * see if we can't be smarter about physically
3189 			 * contiguous mappings
3190 			 */
3191 			next = (pidx + count) & (ntxd-1);
3192 			MPASS(ifsd_m[next] == NULL);
3193 #if MEMORY_LOGGING
3194 			txq->ift_enqueued++;
3195 #endif
3196 			ifsd_m[next] = m;
3197 			while (buflen > 0) {
3198 				if (i >= max_segs)
3199 					goto err;
3200 				max_sgsize = MIN(buflen, maxsegsz);
3201 				curaddr = pmap_kextract(vaddr);
3202 				sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
3203 				sgsize = MIN(sgsize, max_sgsize);
3204 				segs[i].ds_addr = curaddr;
3205 				segs[i].ds_len = sgsize;
3206 				vaddr += sgsize;
3207 				buflen -= sgsize;
3208 				i++;
3209 			}
3210 			count++;
3211 			tmp = m;
3212 			m = m->m_next;
3213 		} while (m != NULL);
3214 		*nsegs = i;
3215 	}
3216 	return (0);
3217 err:
3218 	*m0 = iflib_remove_mbuf(txq);
3219 	return (EFBIG);
3220 }
3221 
3222 static inline caddr_t
3223 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3224 {
3225 	qidx_t size;
3226 	int ntxd;
3227 	caddr_t start, end, cur, next;
3228 
3229 	ntxd = txq->ift_size;
3230 	size = txq->ift_txd_size[qid];
3231 	start = txq->ift_ifdi[qid].idi_vaddr;
3232 
3233 	if (__predict_false(size == 0))
3234 		return (start);
3235 	cur = start + size*cidx;
3236 	end = start + size*ntxd;
3237 	next = CACHE_PTR_NEXT(cur);
3238 	return (next < end ? next : start);
3239 }
3240 
3241 /*
3242  * Pad an mbuf to ensure a minimum ethernet frame size.
3243  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3244  */
3245 static __noinline int
3246 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3247 {
3248 	/*
3249 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3250 	 * and ARP message is the smallest common payload I can think of
3251 	 */
3252 	static char pad[18];	/* just zeros */
3253 	int n;
3254 	struct mbuf *new_head;
3255 
3256 	if (!M_WRITABLE(*m_head)) {
3257 		new_head = m_dup(*m_head, M_NOWAIT);
3258 		if (new_head == NULL) {
3259 			m_freem(*m_head);
3260 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3261 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3262 			return ENOMEM;
3263 		}
3264 		m_freem(*m_head);
3265 		*m_head = new_head;
3266 	}
3267 
3268 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3269 	     n > 0; n -= sizeof(pad))
3270 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3271 			break;
3272 
3273 	if (n > 0) {
3274 		m_freem(*m_head);
3275 		device_printf(dev, "cannot pad short frame\n");
3276 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3277 		return (ENOBUFS);
3278 	}
3279 
3280 	return 0;
3281 }
3282 
3283 static int
3284 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3285 {
3286 	if_ctx_t		ctx;
3287 	if_shared_ctx_t		sctx;
3288 	if_softc_ctx_t		scctx;
3289 	bus_dma_segment_t	*segs;
3290 	struct mbuf		*m_head;
3291 	void			*next_txd;
3292 	bus_dmamap_t		map;
3293 	struct if_pkt_info	pi;
3294 	int remap = 0;
3295 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3296 	bus_dma_tag_t desc_tag;
3297 
3298 	ctx = txq->ift_ctx;
3299 	sctx = ctx->ifc_sctx;
3300 	scctx = &ctx->ifc_softc_ctx;
3301 	segs = txq->ift_segs;
3302 	ntxd = txq->ift_size;
3303 	m_head = *m_headp;
3304 	map = NULL;
3305 
3306 	/*
3307 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3308 	 */
3309 	cidx = txq->ift_cidx;
3310 	pidx = txq->ift_pidx;
3311 	if (ctx->ifc_flags & IFC_PREFETCH) {
3312 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3313 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3314 			next_txd = calc_next_txd(txq, cidx, 0);
3315 			prefetch(next_txd);
3316 		}
3317 
3318 		/* prefetch the next cache line of mbuf pointers and flags */
3319 		prefetch(&txq->ift_sds.ifsd_m[next]);
3320 		if (txq->ift_sds.ifsd_map != NULL) {
3321 			prefetch(&txq->ift_sds.ifsd_map[next]);
3322 			next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3323 			prefetch(&txq->ift_sds.ifsd_flags[next]);
3324 		}
3325 	} else if (txq->ift_sds.ifsd_map != NULL)
3326 		map = txq->ift_sds.ifsd_map[pidx];
3327 
3328 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3329 		desc_tag = txq->ift_tso_desc_tag;
3330 		max_segs = scctx->isc_tx_tso_segments_max;
3331 		MPASS(desc_tag != NULL);
3332 		MPASS(max_segs > 0);
3333 	} else {
3334 		desc_tag = txq->ift_desc_tag;
3335 		max_segs = scctx->isc_tx_nsegments;
3336 	}
3337 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3338 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3339 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3340 		if (err)
3341 			return err;
3342 	}
3343 	m_head = *m_headp;
3344 
3345 	pkt_info_zero(&pi);
3346 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3347 	pi.ipi_pidx = pidx;
3348 	pi.ipi_qsidx = txq->ift_id;
3349 	pi.ipi_len = m_head->m_pkthdr.len;
3350 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3351 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3352 
3353 	/* deliberate bitwise OR to make one condition */
3354 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3355 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
3356 			return (err);
3357 		m_head = *m_headp;
3358 	}
3359 
3360 retry:
3361 	err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
3362 defrag:
3363 	if (__predict_false(err)) {
3364 		switch (err) {
3365 		case EFBIG:
3366 			/* try collapse once and defrag once */
3367 			if (remap == 0) {
3368 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3369 				/* try defrag if collapsing fails */
3370 				if (m_head == NULL)
3371 					remap++;
3372 			}
3373 			if (remap == 1)
3374 				m_head = m_defrag(*m_headp, M_NOWAIT);
3375 			remap++;
3376 			if (__predict_false(m_head == NULL))
3377 				goto defrag_failed;
3378 			txq->ift_mbuf_defrag++;
3379 			*m_headp = m_head;
3380 			goto retry;
3381 			break;
3382 		case ENOMEM:
3383 			txq->ift_no_tx_dma_setup++;
3384 			break;
3385 		default:
3386 			txq->ift_no_tx_dma_setup++;
3387 			m_freem(*m_headp);
3388 			DBG_COUNTER_INC(tx_frees);
3389 			*m_headp = NULL;
3390 			break;
3391 		}
3392 		txq->ift_map_failed++;
3393 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3394 		return (err);
3395 	}
3396 
3397 	/*
3398 	 * XXX assumes a 1 to 1 relationship between segments and
3399 	 *        descriptors - this does not hold true on all drivers, e.g.
3400 	 *        cxgb
3401 	 */
3402 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3403 		txq->ift_no_desc_avail++;
3404 		if (map != NULL)
3405 			bus_dmamap_unload(desc_tag, map);
3406 		DBG_COUNTER_INC(encap_txq_avail_fail);
3407 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3408 			GROUPTASK_ENQUEUE(&txq->ift_task);
3409 		return (ENOBUFS);
3410 	}
3411 	/*
3412 	 * On Intel cards we can greatly reduce the number of TX interrupts
3413 	 * we see by only setting report status on every Nth descriptor.
3414 	 * However, this also means that the driver will need to keep track
3415 	 * of the descriptors that RS was set on to check them for the DD bit.
3416 	 */
3417 	txq->ift_rs_pending += nsegs + 1;
3418 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3419 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3420 		pi.ipi_flags |= IPI_TX_INTR;
3421 		txq->ift_rs_pending = 0;
3422 	}
3423 
3424 	pi.ipi_segs = segs;
3425 	pi.ipi_nsegs = nsegs;
3426 
3427 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3428 #ifdef PKT_DEBUG
3429 	print_pkt(&pi);
3430 #endif
3431 	if (map != NULL)
3432 		bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
3433 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3434 		if (map != NULL)
3435 			bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3436 					BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3437 		DBG_COUNTER_INC(tx_encap);
3438 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3439 
3440 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3441 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3442 			ndesc += txq->ift_size;
3443 			txq->ift_gen = 1;
3444 		}
3445 		/*
3446 		 * drivers can need as many as
3447 		 * two sentinels
3448 		 */
3449 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3450 		MPASS(pi.ipi_new_pidx != pidx);
3451 		MPASS(ndesc > 0);
3452 		txq->ift_in_use += ndesc;
3453 
3454 		/*
3455 		 * We update the last software descriptor again here because there may
3456 		 * be a sentinel and/or there may be more mbufs than segments
3457 		 */
3458 		txq->ift_pidx = pi.ipi_new_pidx;
3459 		txq->ift_npending += pi.ipi_ndescs;
3460 	} else {
3461 		*m_headp = m_head = iflib_remove_mbuf(txq);
3462 		if (err == EFBIG) {
3463 			txq->ift_txd_encap_efbig++;
3464 			if (remap < 2) {
3465 				remap = 1;
3466 				goto defrag;
3467 			}
3468 		}
3469 		DBG_COUNTER_INC(encap_txd_encap_fail);
3470 		goto defrag_failed;
3471 	}
3472 	return (err);
3473 
3474 defrag_failed:
3475 	txq->ift_mbuf_defrag_failed++;
3476 	txq->ift_map_failed++;
3477 	m_freem(*m_headp);
3478 	DBG_COUNTER_INC(tx_frees);
3479 	*m_headp = NULL;
3480 	return (ENOMEM);
3481 }
3482 
3483 static void
3484 iflib_tx_desc_free(iflib_txq_t txq, int n)
3485 {
3486 	int hasmap;
3487 	uint32_t qsize, cidx, mask, gen;
3488 	struct mbuf *m, **ifsd_m;
3489 	uint8_t *ifsd_flags;
3490 	bus_dmamap_t *ifsd_map;
3491 	bool do_prefetch;
3492 
3493 	cidx = txq->ift_cidx;
3494 	gen = txq->ift_gen;
3495 	qsize = txq->ift_size;
3496 	mask = qsize-1;
3497 	hasmap = txq->ift_sds.ifsd_map != NULL;
3498 	ifsd_flags = txq->ift_sds.ifsd_flags;
3499 	ifsd_m = txq->ift_sds.ifsd_m;
3500 	ifsd_map = txq->ift_sds.ifsd_map;
3501 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3502 
3503 	while (n-- > 0) {
3504 		if (do_prefetch) {
3505 			prefetch(ifsd_m[(cidx + 3) & mask]);
3506 			prefetch(ifsd_m[(cidx + 4) & mask]);
3507 		}
3508 		if (ifsd_m[cidx] != NULL) {
3509 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3510 			prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
3511 			if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
3512 				/*
3513 				 * does it matter if it's not the TSO tag? If so we'll
3514 				 * have to add the type to flags
3515 				 */
3516 				bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
3517 				ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
3518 			}
3519 			if ((m = ifsd_m[cidx]) != NULL) {
3520 				/* XXX we don't support any drivers that batch packets yet */
3521 				MPASS(m->m_nextpkt == NULL);
3522 				/* if the number of clusters exceeds the number of segments
3523 				 * there won't be space on the ring to save a pointer to each
3524 				 * cluster so we simply free the list here
3525 				 */
3526 				if (m->m_flags & M_TOOBIG) {
3527 					m_freem(m);
3528 				} else {
3529 					m_free(m);
3530 				}
3531 				ifsd_m[cidx] = NULL;
3532 #if MEMORY_LOGGING
3533 				txq->ift_dequeued++;
3534 #endif
3535 				DBG_COUNTER_INC(tx_frees);
3536 			}
3537 		}
3538 		if (__predict_false(++cidx == qsize)) {
3539 			cidx = 0;
3540 			gen = 0;
3541 		}
3542 	}
3543 	txq->ift_cidx = cidx;
3544 	txq->ift_gen = gen;
3545 }
3546 
3547 static __inline int
3548 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3549 {
3550 	int reclaim;
3551 	if_ctx_t ctx = txq->ift_ctx;
3552 
3553 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3554 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3555 
3556 	/*
3557 	 * Need a rate-limiting check so that this isn't called every time
3558 	 */
3559 	iflib_tx_credits_update(ctx, txq);
3560 	reclaim = DESC_RECLAIMABLE(txq);
3561 
3562 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3563 #ifdef INVARIANTS
3564 		if (iflib_verbose_debug) {
3565 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
3566 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3567 			       reclaim, thresh);
3568 
3569 		}
3570 #endif
3571 		return (0);
3572 	}
3573 	iflib_tx_desc_free(txq, reclaim);
3574 	txq->ift_cleaned += reclaim;
3575 	txq->ift_in_use -= reclaim;
3576 
3577 	return (reclaim);
3578 }
3579 
3580 static struct mbuf **
3581 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3582 {
3583 	int next, size;
3584 	struct mbuf **items;
3585 
3586 	size = r->size;
3587 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3588 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3589 
3590 	prefetch(items[(cidx + offset) & (size-1)]);
3591 	if (remaining > 1) {
3592 		prefetch2cachelines(&items[next]);
3593 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3594 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3595 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3596 	}
3597 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3598 }
3599 
3600 static void
3601 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3602 {
3603 
3604 	ifmp_ring_check_drainage(txq->ift_br, budget);
3605 }
3606 
3607 static uint32_t
3608 iflib_txq_can_drain(struct ifmp_ring *r)
3609 {
3610 	iflib_txq_t txq = r->cookie;
3611 	if_ctx_t ctx = txq->ift_ctx;
3612 
3613 	return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
3614 		ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
3615 }
3616 
3617 static uint32_t
3618 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3619 {
3620 	iflib_txq_t txq = r->cookie;
3621 	if_ctx_t ctx = txq->ift_ctx;
3622 	struct ifnet *ifp = ctx->ifc_ifp;
3623 	struct mbuf **mp, *m;
3624 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
3625 	int reclaimed, err, in_use_prev, desc_used;
3626 	bool do_prefetch, ring, rang;
3627 
3628 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3629 			    !LINK_ACTIVE(ctx))) {
3630 		DBG_COUNTER_INC(txq_drain_notready);
3631 		return (0);
3632 	}
3633 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3634 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
3635 	avail = IDXDIFF(pidx, cidx, r->size);
3636 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3637 		DBG_COUNTER_INC(txq_drain_flushing);
3638 		for (i = 0; i < avail; i++) {
3639 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
3640 				m_free(r->items[(cidx + i) & (r->size-1)]);
3641 			r->items[(cidx + i) & (r->size-1)] = NULL;
3642 		}
3643 		return (avail);
3644 	}
3645 
3646 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3647 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3648 		CALLOUT_LOCK(txq);
3649 		callout_stop(&txq->ift_timer);
3650 		CALLOUT_UNLOCK(txq);
3651 		DBG_COUNTER_INC(txq_drain_oactive);
3652 		return (0);
3653 	}
3654 	if (reclaimed)
3655 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3656 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
3657 	count = MIN(avail, TX_BATCH_SIZE);
3658 #ifdef INVARIANTS
3659 	if (iflib_verbose_debug)
3660 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3661 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3662 #endif
3663 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3664 	avail = TXQ_AVAIL(txq);
3665 	err = 0;
3666 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
3667 		int rem = do_prefetch ? count - i : 0;
3668 
3669 		mp = _ring_peek_one(r, cidx, i, rem);
3670 		MPASS(mp != NULL && *mp != NULL);
3671 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3672 			consumed++;
3673 			reclaimed++;
3674 			continue;
3675 		}
3676 		in_use_prev = txq->ift_in_use;
3677 		err = iflib_encap(txq, mp);
3678 		if (__predict_false(err)) {
3679 			DBG_COUNTER_INC(txq_drain_encapfail);
3680 			/* no room - bail out */
3681 			if (err == ENOBUFS)
3682 				break;
3683 			consumed++;
3684 			DBG_COUNTER_INC(txq_drain_encapfail);
3685 			/* we can't send this packet - skip it */
3686 			continue;
3687 		}
3688 		consumed++;
3689 		pkt_sent++;
3690 		m = *mp;
3691 		DBG_COUNTER_INC(tx_sent);
3692 		bytes_sent += m->m_pkthdr.len;
3693 		mcast_sent += !!(m->m_flags & M_MCAST);
3694 		avail = TXQ_AVAIL(txq);
3695 
3696 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
3697 		desc_used += (txq->ift_in_use - in_use_prev);
3698 		ETHER_BPF_MTAP(ifp, m);
3699 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
3700 			break;
3701 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
3702 	}
3703 
3704 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3705 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
3706 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
3707 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3708 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3709 	if (mcast_sent)
3710 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3711 #ifdef INVARIANTS
3712 	if (iflib_verbose_debug)
3713 		printf("consumed=%d\n", consumed);
3714 #endif
3715 	return (consumed);
3716 }
3717 
3718 static uint32_t
3719 iflib_txq_drain_always(struct ifmp_ring *r)
3720 {
3721 	return (1);
3722 }
3723 
3724 static uint32_t
3725 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3726 {
3727 	int i, avail;
3728 	struct mbuf **mp;
3729 	iflib_txq_t txq;
3730 
3731 	txq = r->cookie;
3732 
3733 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3734 	CALLOUT_LOCK(txq);
3735 	callout_stop(&txq->ift_timer);
3736 	CALLOUT_UNLOCK(txq);
3737 
3738 	avail = IDXDIFF(pidx, cidx, r->size);
3739 	for (i = 0; i < avail; i++) {
3740 		mp = _ring_peek_one(r, cidx, i, avail - i);
3741 		if (__predict_false(*mp == (struct mbuf *)txq))
3742 			continue;
3743 		m_freem(*mp);
3744 	}
3745 	MPASS(ifmp_ring_is_stalled(r) == 0);
3746 	return (avail);
3747 }
3748 
3749 static void
3750 iflib_ifmp_purge(iflib_txq_t txq)
3751 {
3752 	struct ifmp_ring *r;
3753 
3754 	r = txq->ift_br;
3755 	r->drain = iflib_txq_drain_free;
3756 	r->can_drain = iflib_txq_drain_always;
3757 
3758 	ifmp_ring_check_drainage(r, r->size);
3759 
3760 	r->drain = iflib_txq_drain;
3761 	r->can_drain = iflib_txq_can_drain;
3762 }
3763 
3764 static void
3765 _task_fn_tx(void *context)
3766 {
3767 	iflib_txq_t txq = context;
3768 	if_ctx_t ctx = txq->ift_ctx;
3769 	struct ifnet *ifp = ctx->ifc_ifp;
3770 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3771 
3772 #ifdef IFLIB_DIAGNOSTICS
3773 	txq->ift_cpu_exec_count[curcpu]++;
3774 #endif
3775 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3776 		return;
3777 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3778 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3779 			netmap_tx_irq(ifp, txq->ift_id);
3780 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3781 		return;
3782 	}
3783 #ifdef ALTQ
3784 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3785 		iflib_altq_if_start(ifp);
3786 #endif
3787 	if (txq->ift_db_pending)
3788 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3789 	else if (!abdicate)
3790 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3791 	/*
3792 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3793 	 */
3794 	if (abdicate)
3795 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3796 	ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3797 	if (ctx->ifc_flags & IFC_LEGACY)
3798 		IFDI_INTR_ENABLE(ctx);
3799 	else {
3800 #ifdef INVARIANTS
3801 		int rc =
3802 #endif
3803 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3804 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3805 	}
3806 }
3807 
3808 static void
3809 _task_fn_rx(void *context)
3810 {
3811 	iflib_rxq_t rxq = context;
3812 	if_ctx_t ctx = rxq->ifr_ctx;
3813 	bool more;
3814 	uint16_t budget;
3815 
3816 #ifdef IFLIB_DIAGNOSTICS
3817 	rxq->ifr_cpu_exec_count[curcpu]++;
3818 #endif
3819 	DBG_COUNTER_INC(task_fn_rxs);
3820 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3821 		return;
3822 	more = true;
3823 #ifdef DEV_NETMAP
3824 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3825 		u_int work = 0;
3826 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3827 			more = false;
3828 		}
3829 	}
3830 #endif
3831 	budget = ctx->ifc_sysctl_rx_budget;
3832 	if (budget == 0)
3833 		budget = 16;	/* XXX */
3834 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
3835 		if (ctx->ifc_flags & IFC_LEGACY)
3836 			IFDI_INTR_ENABLE(ctx);
3837 		else {
3838 #ifdef INVARIANTS
3839 			int rc =
3840 #endif
3841 				IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
3842 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3843 			DBG_COUNTER_INC(rx_intr_enables);
3844 		}
3845 	}
3846 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3847 		return;
3848 	if (more)
3849 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3850 }
3851 
3852 static void
3853 _task_fn_admin(void *context)
3854 {
3855 	if_ctx_t ctx = context;
3856 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
3857 	iflib_txq_t txq;
3858 	int i;
3859 	bool oactive, running, do_reset, do_watchdog;
3860 	uint32_t reset_on = hz / 2;
3861 
3862 	STATE_LOCK(ctx);
3863 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
3864 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
3865 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
3866 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
3867 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
3868 	STATE_UNLOCK(ctx);
3869 
3870 	if ((!running & !oactive) &&
3871 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
3872 		return;
3873 
3874 	CTX_LOCK(ctx);
3875 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3876 		CALLOUT_LOCK(txq);
3877 		callout_stop(&txq->ift_timer);
3878 		CALLOUT_UNLOCK(txq);
3879 	}
3880 	if (do_watchdog) {
3881 		ctx->ifc_watchdog_events++;
3882 		IFDI_WATCHDOG_RESET(ctx);
3883 	}
3884 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3885 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3886 #ifdef DEV_NETMAP
3887 		reset_on = hz / 2;
3888 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
3889 			iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
3890 #endif
3891 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3892 	}
3893 	IFDI_LINK_INTR_ENABLE(ctx);
3894 	if (do_reset)
3895 		iflib_if_init_locked(ctx);
3896 	CTX_UNLOCK(ctx);
3897 
3898 	if (LINK_ACTIVE(ctx) == 0)
3899 		return;
3900 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3901 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
3902 }
3903 
3904 
3905 static void
3906 _task_fn_iov(void *context)
3907 {
3908 	if_ctx_t ctx = context;
3909 
3910 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3911 		return;
3912 
3913 	CTX_LOCK(ctx);
3914 	IFDI_VFLR_HANDLE(ctx);
3915 	CTX_UNLOCK(ctx);
3916 }
3917 
3918 static int
3919 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3920 {
3921 	int err;
3922 	if_int_delay_info_t info;
3923 	if_ctx_t ctx;
3924 
3925 	info = (if_int_delay_info_t)arg1;
3926 	ctx = info->iidi_ctx;
3927 	info->iidi_req = req;
3928 	info->iidi_oidp = oidp;
3929 	CTX_LOCK(ctx);
3930 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
3931 	CTX_UNLOCK(ctx);
3932 	return (err);
3933 }
3934 
3935 /*********************************************************************
3936  *
3937  *  IFNET FUNCTIONS
3938  *
3939  **********************************************************************/
3940 
3941 static void
3942 iflib_if_init_locked(if_ctx_t ctx)
3943 {
3944 	iflib_stop(ctx);
3945 	iflib_init_locked(ctx);
3946 }
3947 
3948 
3949 static void
3950 iflib_if_init(void *arg)
3951 {
3952 	if_ctx_t ctx = arg;
3953 
3954 	CTX_LOCK(ctx);
3955 	iflib_if_init_locked(ctx);
3956 	CTX_UNLOCK(ctx);
3957 }
3958 
3959 static int
3960 iflib_if_transmit(if_t ifp, struct mbuf *m)
3961 {
3962 	if_ctx_t	ctx = if_getsoftc(ifp);
3963 
3964 	iflib_txq_t txq;
3965 	int err, qidx;
3966 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3967 
3968 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
3969 		DBG_COUNTER_INC(tx_frees);
3970 		m_freem(m);
3971 		return (ENOBUFS);
3972 	}
3973 
3974 	MPASS(m->m_nextpkt == NULL);
3975 	/* ALTQ-enabled interfaces always use queue 0. */
3976 	qidx = 0;
3977 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
3978 		qidx = QIDX(ctx, m);
3979 	/*
3980 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
3981 	 */
3982 	txq = &ctx->ifc_txqs[qidx];
3983 
3984 #ifdef DRIVER_BACKPRESSURE
3985 	if (txq->ift_closed) {
3986 		while (m != NULL) {
3987 			next = m->m_nextpkt;
3988 			m->m_nextpkt = NULL;
3989 			m_freem(m);
3990 			m = next;
3991 		}
3992 		return (ENOBUFS);
3993 	}
3994 #endif
3995 #ifdef notyet
3996 	qidx = count = 0;
3997 	mp = marr;
3998 	next = m;
3999 	do {
4000 		count++;
4001 		next = next->m_nextpkt;
4002 	} while (next != NULL);
4003 
4004 	if (count > nitems(marr))
4005 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
4006 			/* XXX check nextpkt */
4007 			m_freem(m);
4008 			/* XXX simplify for now */
4009 			DBG_COUNTER_INC(tx_frees);
4010 			return (ENOBUFS);
4011 		}
4012 	for (next = m, i = 0; next != NULL; i++) {
4013 		mp[i] = next;
4014 		next = next->m_nextpkt;
4015 		mp[i]->m_nextpkt = NULL;
4016 	}
4017 #endif
4018 	DBG_COUNTER_INC(tx_seen);
4019 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4020 
4021 	if (abdicate)
4022 		GROUPTASK_ENQUEUE(&txq->ift_task);
4023  	if (err) {
4024 		if (!abdicate)
4025 			GROUPTASK_ENQUEUE(&txq->ift_task);
4026 		/* support forthcoming later */
4027 #ifdef DRIVER_BACKPRESSURE
4028 		txq->ift_closed = TRUE;
4029 #endif
4030 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4031 		m_freem(m);
4032 	}
4033 
4034 	return (err);
4035 }
4036 
4037 #ifdef ALTQ
4038 /*
4039  * The overall approach to integrating iflib with ALTQ is to continue to use
4040  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4041  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4042  * is redundant/unnecessary, but doing so minimizes the amount of
4043  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4044  * redundantly queueing to an intermediate mp_ring is swamped by the
4045  * performance limitations inherent in using ALTQ.
4046  *
4047  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4048  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4049  * given interface.  If ALTQ is enabled for an interface, then all
4050  * transmitted packets for that interface will be submitted to the ALTQ
4051  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4052  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4053  * update stats that the iflib machinery handles, and which is sensitve to
4054  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4055  * will be installed as the start routine for use by ALTQ facilities that
4056  * need to trigger queue drains on a scheduled basis.
4057  *
4058  */
4059 static void
4060 iflib_altq_if_start(if_t ifp)
4061 {
4062 	struct ifaltq *ifq = &ifp->if_snd;
4063 	struct mbuf *m;
4064 
4065 	IFQ_LOCK(ifq);
4066 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4067 	while (m != NULL) {
4068 		iflib_if_transmit(ifp, m);
4069 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4070 	}
4071 	IFQ_UNLOCK(ifq);
4072 }
4073 
4074 static int
4075 iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4076 {
4077 	int err;
4078 
4079 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4080 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
4081 		if (err == 0)
4082 			iflib_altq_if_start(ifp);
4083 	} else
4084 		err = iflib_if_transmit(ifp, m);
4085 
4086 	return (err);
4087 }
4088 #endif /* ALTQ */
4089 
4090 static void
4091 iflib_if_qflush(if_t ifp)
4092 {
4093 	if_ctx_t ctx = if_getsoftc(ifp);
4094 	iflib_txq_t txq = ctx->ifc_txqs;
4095 	int i;
4096 
4097 	STATE_LOCK(ctx);
4098 	ctx->ifc_flags |= IFC_QFLUSH;
4099 	STATE_UNLOCK(ctx);
4100 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4101 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4102 			iflib_txq_check_drain(txq, 0);
4103 	STATE_LOCK(ctx);
4104 	ctx->ifc_flags &= ~IFC_QFLUSH;
4105 	STATE_UNLOCK(ctx);
4106 
4107 	/*
4108 	 * When ALTQ is enabled, this will also take care of purging the
4109 	 * ALTQ queue(s).
4110 	 */
4111 	if_qflush(ifp);
4112 }
4113 
4114 
4115 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4116 		     IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4117 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
4118 
4119 static int
4120 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4121 {
4122 	if_ctx_t ctx = if_getsoftc(ifp);
4123 	struct ifreq	*ifr = (struct ifreq *)data;
4124 #if defined(INET) || defined(INET6)
4125 	struct ifaddr	*ifa = (struct ifaddr *)data;
4126 #endif
4127 	bool		avoid_reset = FALSE;
4128 	int		err = 0, reinit = 0, bits;
4129 
4130 	switch (command) {
4131 	case SIOCSIFADDR:
4132 #ifdef INET
4133 		if (ifa->ifa_addr->sa_family == AF_INET)
4134 			avoid_reset = TRUE;
4135 #endif
4136 #ifdef INET6
4137 		if (ifa->ifa_addr->sa_family == AF_INET6)
4138 			avoid_reset = TRUE;
4139 #endif
4140 		/*
4141 		** Calling init results in link renegotiation,
4142 		** so we avoid doing it when possible.
4143 		*/
4144 		if (avoid_reset) {
4145 			if_setflagbits(ifp, IFF_UP,0);
4146 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4147 				reinit = 1;
4148 #ifdef INET
4149 			if (!(if_getflags(ifp) & IFF_NOARP))
4150 				arp_ifinit(ifp, ifa);
4151 #endif
4152 		} else
4153 			err = ether_ioctl(ifp, command, data);
4154 		break;
4155 	case SIOCSIFMTU:
4156 		CTX_LOCK(ctx);
4157 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
4158 			CTX_UNLOCK(ctx);
4159 			break;
4160 		}
4161 		bits = if_getdrvflags(ifp);
4162 		/* stop the driver and free any clusters before proceeding */
4163 		iflib_stop(ctx);
4164 
4165 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4166 			STATE_LOCK(ctx);
4167 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4168 				ctx->ifc_flags |= IFC_MULTISEG;
4169 			else
4170 				ctx->ifc_flags &= ~IFC_MULTISEG;
4171 			STATE_UNLOCK(ctx);
4172 			err = if_setmtu(ifp, ifr->ifr_mtu);
4173 		}
4174 		iflib_init_locked(ctx);
4175 		STATE_LOCK(ctx);
4176 		if_setdrvflags(ifp, bits);
4177 		STATE_UNLOCK(ctx);
4178 		CTX_UNLOCK(ctx);
4179 		break;
4180 	case SIOCSIFFLAGS:
4181 		CTX_LOCK(ctx);
4182 		if (if_getflags(ifp) & IFF_UP) {
4183 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4184 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4185 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4186 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4187 				}
4188 			} else
4189 				reinit = 1;
4190 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4191 			iflib_stop(ctx);
4192 		}
4193 		ctx->ifc_if_flags = if_getflags(ifp);
4194 		CTX_UNLOCK(ctx);
4195 		break;
4196 	case SIOCADDMULTI:
4197 	case SIOCDELMULTI:
4198 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4199 			CTX_LOCK(ctx);
4200 			IFDI_INTR_DISABLE(ctx);
4201 			IFDI_MULTI_SET(ctx);
4202 			IFDI_INTR_ENABLE(ctx);
4203 			CTX_UNLOCK(ctx);
4204 		}
4205 		break;
4206 	case SIOCSIFMEDIA:
4207 		CTX_LOCK(ctx);
4208 		IFDI_MEDIA_SET(ctx);
4209 		CTX_UNLOCK(ctx);
4210 		/* falls thru */
4211 	case SIOCGIFMEDIA:
4212 	case SIOCGIFXMEDIA:
4213 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
4214 		break;
4215 	case SIOCGI2C:
4216 	{
4217 		struct ifi2creq i2c;
4218 
4219 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4220 		if (err != 0)
4221 			break;
4222 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4223 			err = EINVAL;
4224 			break;
4225 		}
4226 		if (i2c.len > sizeof(i2c.data)) {
4227 			err = EINVAL;
4228 			break;
4229 		}
4230 
4231 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4232 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4233 			    sizeof(i2c));
4234 		break;
4235 	}
4236 	case SIOCSIFCAP:
4237 	{
4238 		int mask, setmask;
4239 
4240 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
4241 		setmask = 0;
4242 #ifdef TCP_OFFLOAD
4243 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
4244 #endif
4245 		setmask |= (mask & IFCAP_FLAGS);
4246 
4247 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
4248 			setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4249 		if ((mask & IFCAP_WOL) &&
4250 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
4251 			setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
4252 		if_vlancap(ifp);
4253 		/*
4254 		 * want to ensure that traffic has stopped before we change any of the flags
4255 		 */
4256 		if (setmask) {
4257 			CTX_LOCK(ctx);
4258 			bits = if_getdrvflags(ifp);
4259 			if (bits & IFF_DRV_RUNNING)
4260 				iflib_stop(ctx);
4261 			STATE_LOCK(ctx);
4262 			if_togglecapenable(ifp, setmask);
4263 			STATE_UNLOCK(ctx);
4264 			if (bits & IFF_DRV_RUNNING)
4265 				iflib_init_locked(ctx);
4266 			STATE_LOCK(ctx);
4267 			if_setdrvflags(ifp, bits);
4268 			STATE_UNLOCK(ctx);
4269 			CTX_UNLOCK(ctx);
4270 		}
4271 		break;
4272 	}
4273 	case SIOCGPRIVATE_0:
4274 	case SIOCSDRVSPEC:
4275 	case SIOCGDRVSPEC:
4276 		CTX_LOCK(ctx);
4277 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4278 		CTX_UNLOCK(ctx);
4279 		break;
4280 	default:
4281 		err = ether_ioctl(ifp, command, data);
4282 		break;
4283 	}
4284 	if (reinit)
4285 		iflib_if_init(ctx);
4286 	return (err);
4287 }
4288 
4289 static uint64_t
4290 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4291 {
4292 	if_ctx_t ctx = if_getsoftc(ifp);
4293 
4294 	return (IFDI_GET_COUNTER(ctx, cnt));
4295 }
4296 
4297 /*********************************************************************
4298  *
4299  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4300  *
4301  **********************************************************************/
4302 
4303 static void
4304 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4305 {
4306 	if_ctx_t ctx = if_getsoftc(ifp);
4307 
4308 	if ((void *)ctx != arg)
4309 		return;
4310 
4311 	if ((vtag == 0) || (vtag > 4095))
4312 		return;
4313 
4314 	CTX_LOCK(ctx);
4315 	IFDI_VLAN_REGISTER(ctx, vtag);
4316 	/* Re-init to load the changes */
4317 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4318 		iflib_if_init_locked(ctx);
4319 	CTX_UNLOCK(ctx);
4320 }
4321 
4322 static void
4323 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4324 {
4325 	if_ctx_t ctx = if_getsoftc(ifp);
4326 
4327 	if ((void *)ctx != arg)
4328 		return;
4329 
4330 	if ((vtag == 0) || (vtag > 4095))
4331 		return;
4332 
4333 	CTX_LOCK(ctx);
4334 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4335 	/* Re-init to load the changes */
4336 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4337 		iflib_if_init_locked(ctx);
4338 	CTX_UNLOCK(ctx);
4339 }
4340 
4341 static void
4342 iflib_led_func(void *arg, int onoff)
4343 {
4344 	if_ctx_t ctx = arg;
4345 
4346 	CTX_LOCK(ctx);
4347 	IFDI_LED_FUNC(ctx, onoff);
4348 	CTX_UNLOCK(ctx);
4349 }
4350 
4351 /*********************************************************************
4352  *
4353  *  BUS FUNCTION DEFINITIONS
4354  *
4355  **********************************************************************/
4356 
4357 int
4358 iflib_device_probe(device_t dev)
4359 {
4360 	pci_vendor_info_t *ent;
4361 
4362 	uint16_t	pci_vendor_id, pci_device_id;
4363 	uint16_t	pci_subvendor_id, pci_subdevice_id;
4364 	uint16_t	pci_rev_id;
4365 	if_shared_ctx_t sctx;
4366 
4367 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4368 		return (ENOTSUP);
4369 
4370 	pci_vendor_id = pci_get_vendor(dev);
4371 	pci_device_id = pci_get_device(dev);
4372 	pci_subvendor_id = pci_get_subvendor(dev);
4373 	pci_subdevice_id = pci_get_subdevice(dev);
4374 	pci_rev_id = pci_get_revid(dev);
4375 	if (sctx->isc_parse_devinfo != NULL)
4376 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4377 
4378 	ent = sctx->isc_vendor_info;
4379 	while (ent->pvi_vendor_id != 0) {
4380 		if (pci_vendor_id != ent->pvi_vendor_id) {
4381 			ent++;
4382 			continue;
4383 		}
4384 		if ((pci_device_id == ent->pvi_device_id) &&
4385 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4386 		     (ent->pvi_subvendor_id == 0)) &&
4387 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4388 		     (ent->pvi_subdevice_id == 0)) &&
4389 		    ((pci_rev_id == ent->pvi_rev_id) ||
4390 		     (ent->pvi_rev_id == 0))) {
4391 
4392 			device_set_desc_copy(dev, ent->pvi_name);
4393 			/* this needs to be changed to zero if the bus probing code
4394 			 * ever stops re-probing on best match because the sctx
4395 			 * may have its values over written by register calls
4396 			 * in subsequent probes
4397 			 */
4398 			return (BUS_PROBE_DEFAULT);
4399 		}
4400 		ent++;
4401 	}
4402 	return (ENXIO);
4403 }
4404 
4405 static void
4406 iflib_reset_qvalues(if_ctx_t ctx)
4407 {
4408 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4409 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4410 	device_t dev = ctx->ifc_dev;
4411 	int i;
4412 
4413 	scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
4414 	scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
4415 	/*
4416 	 * XXX sanity check that ntxd & nrxd are a power of 2
4417 	 */
4418 	if (ctx->ifc_sysctl_ntxqs != 0)
4419 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4420 	if (ctx->ifc_sysctl_nrxqs != 0)
4421 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4422 
4423 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4424 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4425 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4426 		else
4427 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4428 	}
4429 
4430 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4431 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4432 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4433 		else
4434 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4435 	}
4436 
4437 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4438 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4439 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4440 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4441 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4442 		}
4443 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4444 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4445 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4446 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4447 		}
4448 	}
4449 
4450 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4451 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4452 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4453 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4454 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4455 		}
4456 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4457 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4458 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4459 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4460 		}
4461 	}
4462 }
4463 
4464 int
4465 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
4466 {
4467 	int err, rid, msix;
4468 	if_ctx_t ctx;
4469 	if_t ifp;
4470 	if_softc_ctx_t scctx;
4471 	int i;
4472 	uint16_t main_txq;
4473 	uint16_t main_rxq;
4474 
4475 
4476 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
4477 
4478 	if (sc == NULL) {
4479 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4480 		device_set_softc(dev, ctx);
4481 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
4482 	}
4483 
4484 	ctx->ifc_sctx = sctx;
4485 	ctx->ifc_dev = dev;
4486 	ctx->ifc_softc = sc;
4487 
4488 	if ((err = iflib_register(ctx)) != 0) {
4489 		if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4490 			free(sc, M_IFLIB);
4491 		free(ctx, M_IFLIB);
4492 		device_printf(dev, "iflib_register failed %d\n", err);
4493 		return (err);
4494 	}
4495 	iflib_add_device_sysctl_pre(ctx);
4496 
4497 	scctx = &ctx->ifc_softc_ctx;
4498 	ifp = ctx->ifc_ifp;
4499 
4500 	iflib_reset_qvalues(ctx);
4501 	CTX_LOCK(ctx);
4502 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4503 		CTX_UNLOCK(ctx);
4504 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4505 		return (err);
4506 	}
4507 	_iflib_pre_assert(scctx);
4508 	ctx->ifc_txrx = *scctx->isc_txrx;
4509 
4510 #ifdef INVARIANTS
4511 	MPASS(scctx->isc_capabilities);
4512 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4513 		MPASS(scctx->isc_tx_csum_flags);
4514 #endif
4515 
4516 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
4517 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
4518 
4519 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4520 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4521 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4522 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4523 
4524 #ifdef ACPI_DMAR
4525 	if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
4526 		ctx->ifc_flags |= IFC_DMAR;
4527 #elif !(defined(__i386__) || defined(__amd64__))
4528 	/* set unconditionally for !x86 */
4529 	ctx->ifc_flags |= IFC_DMAR;
4530 #endif
4531 
4532 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4533 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4534 
4535 	/* XXX change for per-queue sizes */
4536 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
4537 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4538 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4539 		if (!powerof2(scctx->isc_nrxd[i])) {
4540 			/* round down instead? */
4541 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4542 			err = EINVAL;
4543 			goto fail;
4544 		}
4545 	}
4546 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4547 		if (!powerof2(scctx->isc_ntxd[i])) {
4548 			device_printf(dev,
4549 			    "# tx descriptors must be a power of 2");
4550 			err = EINVAL;
4551 			goto fail;
4552 		}
4553 	}
4554 
4555 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4556 	    MAX_SINGLE_PACKET_FRACTION)
4557 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4558 		    MAX_SINGLE_PACKET_FRACTION);
4559 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4560 	    MAX_SINGLE_PACKET_FRACTION)
4561 		scctx->isc_tx_tso_segments_max = max(1,
4562 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4563 
4564 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4565 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4566 		/*
4567 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4568 		 * but some MACs do.
4569 		 */
4570 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4571 		    IP_MAXPACKET));
4572 		/*
4573 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4574 		 * into account.  In the worst case, each of these calls will
4575 		 * add another mbuf and, thus, the requirement for another DMA
4576 		 * segment.  So for best performance, it doesn't make sense to
4577 		 * advertize a maximum of TSO segments that typically will
4578 		 * require defragmentation in iflib_encap().
4579 		 */
4580 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4581 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4582 	}
4583 	if (scctx->isc_rss_table_size == 0)
4584 		scctx->isc_rss_table_size = 64;
4585 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4586 
4587 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4588 	/* XXX format name */
4589 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4590 
4591 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4592 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4593 		device_printf(dev, "Unable to fetch CPU list\n");
4594 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4595 	}
4596 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4597 
4598 	/*
4599 	** Now setup MSI or MSI/X, should
4600 	** return us the number of supported
4601 	** vectors. (Will be 1 for MSI)
4602 	*/
4603 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
4604 		msix = scctx->isc_vectors;
4605 	} else if (scctx->isc_msix_bar != 0)
4606 	       /*
4607 		* The simple fact that isc_msix_bar is not 0 does not mean we
4608 		* we have a good value there that is known to work.
4609 		*/
4610 		msix = iflib_msix_init(ctx);
4611 	else {
4612 		scctx->isc_vectors = 1;
4613 		scctx->isc_ntxqsets = 1;
4614 		scctx->isc_nrxqsets = 1;
4615 		scctx->isc_intr = IFLIB_INTR_LEGACY;
4616 		msix = 0;
4617 	}
4618 	/* Get memory for the station queues */
4619 	if ((err = iflib_queues_alloc(ctx))) {
4620 		device_printf(dev, "Unable to allocate queue memory\n");
4621 		goto fail;
4622 	}
4623 
4624 	if ((err = iflib_qset_structures_setup(ctx)))
4625 		goto fail_queues;
4626 
4627 	/*
4628 	 * Group taskqueues aren't properly set up until SMP is started,
4629 	 * so we disable interrupts until we can handle them post
4630 	 * SI_SUB_SMP.
4631 	 *
4632 	 * XXX: disabling interrupts doesn't actually work, at least for
4633 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4634 	 * we do null handling and depend on this not causing too large an
4635 	 * interrupt storm.
4636 	 */
4637 	IFDI_INTR_DISABLE(ctx);
4638 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
4639 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
4640 		goto fail_intr_free;
4641 	}
4642 	if (msix <= 1) {
4643 		rid = 0;
4644 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
4645 			MPASS(msix == 1);
4646 			rid = 1;
4647 		}
4648 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
4649 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
4650 			goto fail_intr_free;
4651 		}
4652 	}
4653 
4654 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4655 
4656 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4657 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4658 		goto fail_detach;
4659 	}
4660 
4661 	/*
4662 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4663 	 * This must appear after the call to ether_ifattach() because
4664 	 * ether_ifattach() sets if_hdrlen to the default value.
4665 	 */
4666 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4667 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4668 
4669 	if ((err = iflib_netmap_attach(ctx))) {
4670 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
4671 		goto fail_detach;
4672 	}
4673 	*ctxp = ctx;
4674 
4675 	NETDUMP_SET(ctx->ifc_ifp, iflib);
4676 
4677 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4678 	iflib_add_device_sysctl_post(ctx);
4679 	ctx->ifc_flags |= IFC_INIT_DONE;
4680 	CTX_UNLOCK(ctx);
4681 	return (0);
4682 fail_detach:
4683 	ether_ifdetach(ctx->ifc_ifp);
4684 fail_intr_free:
4685 	if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
4686 		pci_release_msi(ctx->ifc_dev);
4687 fail_queues:
4688 	iflib_tx_structures_free(ctx);
4689 	iflib_rx_structures_free(ctx);
4690 fail:
4691 	IFDI_DETACH(ctx);
4692 	CTX_UNLOCK(ctx);
4693 	return (err);
4694 }
4695 
4696 int
4697 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
4698 					  struct iflib_cloneattach_ctx *clctx)
4699 {
4700 	int err;
4701 	if_ctx_t ctx;
4702 	if_t ifp;
4703 	if_softc_ctx_t scctx;
4704 	int i;
4705 	void *sc;
4706 	uint16_t main_txq;
4707 	uint16_t main_rxq;
4708 
4709 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
4710 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4711 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
4712 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
4713 		ctx->ifc_flags |= IFC_PSEUDO;
4714 
4715 	ctx->ifc_sctx = sctx;
4716 	ctx->ifc_softc = sc;
4717 	ctx->ifc_dev = dev;
4718 
4719 	if ((err = iflib_register(ctx)) != 0) {
4720 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
4721 		free(sc, M_IFLIB);
4722 		free(ctx, M_IFLIB);
4723 		return (err);
4724 	}
4725 	iflib_add_device_sysctl_pre(ctx);
4726 
4727 	scctx = &ctx->ifc_softc_ctx;
4728 	ifp = ctx->ifc_ifp;
4729 
4730 	/*
4731 	 * XXX sanity check that ntxd & nrxd are a power of 2
4732 	 */
4733 	iflib_reset_qvalues(ctx);
4734 
4735 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4736 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4737 		return (err);
4738 	}
4739 	if (sctx->isc_flags & IFLIB_GEN_MAC)
4740 		iflib_gen_mac(ctx);
4741 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
4742 								clctx->cc_params)) != 0) {
4743 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
4744 		return (err);
4745 	}
4746 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
4747 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
4748 	ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
4749 
4750 #ifdef INVARIANTS
4751 	MPASS(scctx->isc_capabilities);
4752 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4753 		MPASS(scctx->isc_tx_csum_flags);
4754 #endif
4755 
4756 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4757 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4758 
4759 	ifp->if_flags |= IFF_NOGROUP;
4760 	if (sctx->isc_flags & IFLIB_PSEUDO) {
4761 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4762 
4763 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4764 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4765 			goto fail_detach;
4766 		}
4767 		*ctxp = ctx;
4768 
4769 		/*
4770 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4771 		 * This must appear after the call to ether_ifattach() because
4772 		 * ether_ifattach() sets if_hdrlen to the default value.
4773 		 */
4774 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4775 			if_setifheaderlen(ifp,
4776 			    sizeof(struct ether_vlan_header));
4777 
4778 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4779 		iflib_add_device_sysctl_post(ctx);
4780 		ctx->ifc_flags |= IFC_INIT_DONE;
4781 		return (0);
4782 	}
4783 	_iflib_pre_assert(scctx);
4784 	ctx->ifc_txrx = *scctx->isc_txrx;
4785 
4786 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4787 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4788 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4789 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4790 
4791 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4792 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4793 
4794 	/* XXX change for per-queue sizes */
4795 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
4796 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4797 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4798 		if (!powerof2(scctx->isc_nrxd[i])) {
4799 			/* round down instead? */
4800 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4801 			err = EINVAL;
4802 			goto fail;
4803 		}
4804 	}
4805 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4806 		if (!powerof2(scctx->isc_ntxd[i])) {
4807 			device_printf(dev,
4808 			    "# tx descriptors must be a power of 2");
4809 			err = EINVAL;
4810 			goto fail;
4811 		}
4812 	}
4813 
4814 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4815 	    MAX_SINGLE_PACKET_FRACTION)
4816 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4817 		    MAX_SINGLE_PACKET_FRACTION);
4818 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4819 	    MAX_SINGLE_PACKET_FRACTION)
4820 		scctx->isc_tx_tso_segments_max = max(1,
4821 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4822 
4823 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4824 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4825 		/*
4826 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4827 		 * but some MACs do.
4828 		 */
4829 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4830 		    IP_MAXPACKET));
4831 		/*
4832 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4833 		 * into account.  In the worst case, each of these calls will
4834 		 * add another mbuf and, thus, the requirement for another DMA
4835 		 * segment.  So for best performance, it doesn't make sense to
4836 		 * advertize a maximum of TSO segments that typically will
4837 		 * require defragmentation in iflib_encap().
4838 		 */
4839 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4840 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4841 	}
4842 	if (scctx->isc_rss_table_size == 0)
4843 		scctx->isc_rss_table_size = 64;
4844 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4845 
4846 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4847 	/* XXX format name */
4848 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4849 
4850 	/* XXX --- can support > 1 -- but keep it simple for now */
4851 	scctx->isc_intr = IFLIB_INTR_LEGACY;
4852 
4853 	/* Get memory for the station queues */
4854 	if ((err = iflib_queues_alloc(ctx))) {
4855 		device_printf(dev, "Unable to allocate queue memory\n");
4856 		goto fail;
4857 	}
4858 
4859 	if ((err = iflib_qset_structures_setup(ctx))) {
4860 		device_printf(dev, "qset structure setup failed %d\n", err);
4861 		goto fail_queues;
4862 	}
4863 
4864 	/*
4865 	 * XXX What if anything do we want to do about interrupts?
4866 	 */
4867 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4868 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4869 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4870 		goto fail_detach;
4871 	}
4872 
4873 	/*
4874 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4875 	 * This must appear after the call to ether_ifattach() because
4876 	 * ether_ifattach() sets if_hdrlen to the default value.
4877 	 */
4878 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4879 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4880 
4881 	/* XXX handle more than one queue */
4882 	for (i = 0; i < scctx->isc_nrxqsets; i++)
4883 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
4884 
4885 	*ctxp = ctx;
4886 
4887 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4888 	iflib_add_device_sysctl_post(ctx);
4889 	ctx->ifc_flags |= IFC_INIT_DONE;
4890 	return (0);
4891 fail_detach:
4892 	ether_ifdetach(ctx->ifc_ifp);
4893 fail_queues:
4894 	iflib_tx_structures_free(ctx);
4895 	iflib_rx_structures_free(ctx);
4896 fail:
4897 	IFDI_DETACH(ctx);
4898 	return (err);
4899 }
4900 
4901 int
4902 iflib_pseudo_deregister(if_ctx_t ctx)
4903 {
4904 	if_t ifp = ctx->ifc_ifp;
4905 	iflib_txq_t txq;
4906 	iflib_rxq_t rxq;
4907 	int i, j;
4908 	struct taskqgroup *tqg;
4909 	iflib_fl_t fl;
4910 
4911 	/* Unregister VLAN events */
4912 	if (ctx->ifc_vlan_attach_event != NULL)
4913 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4914 	if (ctx->ifc_vlan_detach_event != NULL)
4915 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4916 
4917 	ether_ifdetach(ifp);
4918 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4919 	CTX_LOCK_DESTROY(ctx);
4920 	/* XXX drain any dependent tasks */
4921 	tqg = qgroup_if_io_tqg;
4922 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4923 		callout_drain(&txq->ift_timer);
4924 		if (txq->ift_task.gt_uniq != NULL)
4925 			taskqgroup_detach(tqg, &txq->ift_task);
4926 	}
4927 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4928 		if (rxq->ifr_task.gt_uniq != NULL)
4929 			taskqgroup_detach(tqg, &rxq->ifr_task);
4930 
4931 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4932 			free(fl->ifl_rx_bitmap, M_IFLIB);
4933 	}
4934 	tqg = qgroup_if_config_tqg;
4935 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4936 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4937 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4938 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4939 
4940 	if_free(ifp);
4941 
4942 	iflib_tx_structures_free(ctx);
4943 	iflib_rx_structures_free(ctx);
4944 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4945 		free(ctx->ifc_softc, M_IFLIB);
4946 	free(ctx, M_IFLIB);
4947 	return (0);
4948 }
4949 
4950 int
4951 iflib_device_attach(device_t dev)
4952 {
4953 	if_ctx_t ctx;
4954 	if_shared_ctx_t sctx;
4955 
4956 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4957 		return (ENOTSUP);
4958 
4959 	pci_enable_busmaster(dev);
4960 
4961 	return (iflib_device_register(dev, NULL, sctx, &ctx));
4962 }
4963 
4964 int
4965 iflib_device_deregister(if_ctx_t ctx)
4966 {
4967 	if_t ifp = ctx->ifc_ifp;
4968 	iflib_txq_t txq;
4969 	iflib_rxq_t rxq;
4970 	device_t dev = ctx->ifc_dev;
4971 	int i, j;
4972 	struct taskqgroup *tqg;
4973 	iflib_fl_t fl;
4974 
4975 	/* Make sure VLANS are not using driver */
4976 	if (if_vlantrunkinuse(ifp)) {
4977 		device_printf(dev,"Vlan in use, detach first\n");
4978 		return (EBUSY);
4979 	}
4980 
4981 	CTX_LOCK(ctx);
4982 	ctx->ifc_in_detach = 1;
4983 	iflib_stop(ctx);
4984 	CTX_UNLOCK(ctx);
4985 
4986 	/* Unregister VLAN events */
4987 	if (ctx->ifc_vlan_attach_event != NULL)
4988 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4989 	if (ctx->ifc_vlan_detach_event != NULL)
4990 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4991 
4992 	iflib_netmap_detach(ifp);
4993 	ether_ifdetach(ifp);
4994 	if (ctx->ifc_led_dev != NULL)
4995 		led_destroy(ctx->ifc_led_dev);
4996 	/* XXX drain any dependent tasks */
4997 	tqg = qgroup_if_io_tqg;
4998 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4999 		callout_drain(&txq->ift_timer);
5000 		if (txq->ift_task.gt_uniq != NULL)
5001 			taskqgroup_detach(tqg, &txq->ift_task);
5002 	}
5003 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5004 		if (rxq->ifr_task.gt_uniq != NULL)
5005 			taskqgroup_detach(tqg, &rxq->ifr_task);
5006 
5007 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5008 			free(fl->ifl_rx_bitmap, M_IFLIB);
5009 
5010 	}
5011 	tqg = qgroup_if_config_tqg;
5012 	if (ctx->ifc_admin_task.gt_uniq != NULL)
5013 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
5014 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
5015 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
5016 	CTX_LOCK(ctx);
5017 	IFDI_DETACH(ctx);
5018 	CTX_UNLOCK(ctx);
5019 
5020 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5021 	CTX_LOCK_DESTROY(ctx);
5022 	device_set_softc(ctx->ifc_dev, NULL);
5023 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5024 		pci_release_msi(dev);
5025 	}
5026 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
5027 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
5028 	}
5029 	if (ctx->ifc_msix_mem != NULL) {
5030 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5031 			ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
5032 		ctx->ifc_msix_mem = NULL;
5033 	}
5034 
5035 	bus_generic_detach(dev);
5036 	if_free(ifp);
5037 
5038 	iflib_tx_structures_free(ctx);
5039 	iflib_rx_structures_free(ctx);
5040 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5041 		free(ctx->ifc_softc, M_IFLIB);
5042 	free(ctx, M_IFLIB);
5043 	return (0);
5044 }
5045 
5046 
5047 int
5048 iflib_device_detach(device_t dev)
5049 {
5050 	if_ctx_t ctx = device_get_softc(dev);
5051 
5052 	return (iflib_device_deregister(ctx));
5053 }
5054 
5055 int
5056 iflib_device_suspend(device_t dev)
5057 {
5058 	if_ctx_t ctx = device_get_softc(dev);
5059 
5060 	CTX_LOCK(ctx);
5061 	IFDI_SUSPEND(ctx);
5062 	CTX_UNLOCK(ctx);
5063 
5064 	return bus_generic_suspend(dev);
5065 }
5066 int
5067 iflib_device_shutdown(device_t dev)
5068 {
5069 	if_ctx_t ctx = device_get_softc(dev);
5070 
5071 	CTX_LOCK(ctx);
5072 	IFDI_SHUTDOWN(ctx);
5073 	CTX_UNLOCK(ctx);
5074 
5075 	return bus_generic_suspend(dev);
5076 }
5077 
5078 
5079 int
5080 iflib_device_resume(device_t dev)
5081 {
5082 	if_ctx_t ctx = device_get_softc(dev);
5083 	iflib_txq_t txq = ctx->ifc_txqs;
5084 
5085 	CTX_LOCK(ctx);
5086 	IFDI_RESUME(ctx);
5087 	iflib_init_locked(ctx);
5088 	CTX_UNLOCK(ctx);
5089 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5090 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5091 
5092 	return (bus_generic_resume(dev));
5093 }
5094 
5095 int
5096 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5097 {
5098 	int error;
5099 	if_ctx_t ctx = device_get_softc(dev);
5100 
5101 	CTX_LOCK(ctx);
5102 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
5103 	CTX_UNLOCK(ctx);
5104 
5105 	return (error);
5106 }
5107 
5108 void
5109 iflib_device_iov_uninit(device_t dev)
5110 {
5111 	if_ctx_t ctx = device_get_softc(dev);
5112 
5113 	CTX_LOCK(ctx);
5114 	IFDI_IOV_UNINIT(ctx);
5115 	CTX_UNLOCK(ctx);
5116 }
5117 
5118 int
5119 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5120 {
5121 	int error;
5122 	if_ctx_t ctx = device_get_softc(dev);
5123 
5124 	CTX_LOCK(ctx);
5125 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5126 	CTX_UNLOCK(ctx);
5127 
5128 	return (error);
5129 }
5130 
5131 /*********************************************************************
5132  *
5133  *  MODULE FUNCTION DEFINITIONS
5134  *
5135  **********************************************************************/
5136 
5137 /*
5138  * - Start a fast taskqueue thread for each core
5139  * - Start a taskqueue for control operations
5140  */
5141 static int
5142 iflib_module_init(void)
5143 {
5144 	return (0);
5145 }
5146 
5147 static int
5148 iflib_module_event_handler(module_t mod, int what, void *arg)
5149 {
5150 	int err;
5151 
5152 	switch (what) {
5153 	case MOD_LOAD:
5154 		if ((err = iflib_module_init()) != 0)
5155 			return (err);
5156 		break;
5157 	case MOD_UNLOAD:
5158 		return (EBUSY);
5159 	default:
5160 		return (EOPNOTSUPP);
5161 	}
5162 
5163 	return (0);
5164 }
5165 
5166 /*********************************************************************
5167  *
5168  *  PUBLIC FUNCTION DEFINITIONS
5169  *     ordered as in iflib.h
5170  *
5171  **********************************************************************/
5172 
5173 
5174 static void
5175 _iflib_assert(if_shared_ctx_t sctx)
5176 {
5177 	MPASS(sctx->isc_tx_maxsize);
5178 	MPASS(sctx->isc_tx_maxsegsize);
5179 
5180 	MPASS(sctx->isc_rx_maxsize);
5181 	MPASS(sctx->isc_rx_nsegments);
5182 	MPASS(sctx->isc_rx_maxsegsize);
5183 
5184 	MPASS(sctx->isc_nrxd_min[0]);
5185 	MPASS(sctx->isc_nrxd_max[0]);
5186 	MPASS(sctx->isc_nrxd_default[0]);
5187 	MPASS(sctx->isc_ntxd_min[0]);
5188 	MPASS(sctx->isc_ntxd_max[0]);
5189 	MPASS(sctx->isc_ntxd_default[0]);
5190 }
5191 
5192 static void
5193 _iflib_pre_assert(if_softc_ctx_t scctx)
5194 {
5195 
5196 	MPASS(scctx->isc_txrx->ift_txd_encap);
5197 	MPASS(scctx->isc_txrx->ift_txd_flush);
5198 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
5199 	MPASS(scctx->isc_txrx->ift_rxd_available);
5200 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5201 	MPASS(scctx->isc_txrx->ift_rxd_refill);
5202 	MPASS(scctx->isc_txrx->ift_rxd_flush);
5203 }
5204 
5205 static int
5206 iflib_register(if_ctx_t ctx)
5207 {
5208 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5209 	driver_t *driver = sctx->isc_driver;
5210 	device_t dev = ctx->ifc_dev;
5211 	if_t ifp;
5212 
5213 	_iflib_assert(sctx);
5214 
5215 	CTX_LOCK_INIT(ctx);
5216 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5217 	ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
5218 	if (ifp == NULL) {
5219 		device_printf(dev, "can not allocate ifnet structure\n");
5220 		return (ENOMEM);
5221 	}
5222 
5223 	/*
5224 	 * Initialize our context's device specific methods
5225 	 */
5226 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5227 	kobj_class_compile((kobj_class_t) driver);
5228 	driver->refs++;
5229 
5230 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5231 	if_setsoftc(ifp, ctx);
5232 	if_setdev(ifp, dev);
5233 	if_setinitfn(ifp, iflib_if_init);
5234 	if_setioctlfn(ifp, iflib_if_ioctl);
5235 #ifdef ALTQ
5236 	if_setstartfn(ifp, iflib_altq_if_start);
5237 	if_settransmitfn(ifp, iflib_altq_if_transmit);
5238 	if_setsendqready(ifp);
5239 #else
5240 	if_settransmitfn(ifp, iflib_if_transmit);
5241 #endif
5242 	if_setqflushfn(ifp, iflib_if_qflush);
5243 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
5244 
5245 	ctx->ifc_vlan_attach_event =
5246 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5247 							  EVENTHANDLER_PRI_FIRST);
5248 	ctx->ifc_vlan_detach_event =
5249 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5250 							  EVENTHANDLER_PRI_FIRST);
5251 
5252 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
5253 					 iflib_media_change, iflib_media_status);
5254 
5255 	return (0);
5256 }
5257 
5258 
5259 static int
5260 iflib_queues_alloc(if_ctx_t ctx)
5261 {
5262 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5263 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5264 	device_t dev = ctx->ifc_dev;
5265 	int nrxqsets = scctx->isc_nrxqsets;
5266 	int ntxqsets = scctx->isc_ntxqsets;
5267 	iflib_txq_t txq;
5268 	iflib_rxq_t rxq;
5269 	iflib_fl_t fl = NULL;
5270 	int i, j, cpu, err, txconf, rxconf;
5271 	iflib_dma_info_t ifdip;
5272 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
5273 	uint32_t *txqsizes = scctx->isc_txqsizes;
5274 	uint8_t nrxqs = sctx->isc_nrxqs;
5275 	uint8_t ntxqs = sctx->isc_ntxqs;
5276 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5277 	caddr_t *vaddrs;
5278 	uint64_t *paddrs;
5279 
5280 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5281 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5282 
5283 	/* Allocate the TX ring struct memory */
5284 	if (!(ctx->ifc_txqs =
5285 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5286 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5287 		device_printf(dev, "Unable to allocate TX ring memory\n");
5288 		err = ENOMEM;
5289 		goto fail;
5290 	}
5291 
5292 	/* Now allocate the RX */
5293 	if (!(ctx->ifc_rxqs =
5294 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5295 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5296 		device_printf(dev, "Unable to allocate RX ring memory\n");
5297 		err = ENOMEM;
5298 		goto rx_fail;
5299 	}
5300 
5301 	txq = ctx->ifc_txqs;
5302 	rxq = ctx->ifc_rxqs;
5303 
5304 	/*
5305 	 * XXX handle allocation failure
5306 	 */
5307 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5308 		/* Set up some basics */
5309 
5310 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
5311 			device_printf(dev, "failed to allocate iflib_dma_info\n");
5312 			err = ENOMEM;
5313 			goto err_tx_desc;
5314 		}
5315 		txq->ift_ifdi = ifdip;
5316 		for (j = 0; j < ntxqs; j++, ifdip++) {
5317 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
5318 				device_printf(dev, "Unable to allocate Descriptor memory\n");
5319 				err = ENOMEM;
5320 				goto err_tx_desc;
5321 			}
5322 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5323 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5324 		}
5325 		txq->ift_ctx = ctx;
5326 		txq->ift_id = i;
5327 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5328 			txq->ift_br_offset = 1;
5329 		} else {
5330 			txq->ift_br_offset = 0;
5331 		}
5332 		/* XXX fix this */
5333 		txq->ift_timer.c_cpu = cpu;
5334 
5335 		if (iflib_txsd_alloc(txq)) {
5336 			device_printf(dev, "Critical Failure setting up TX buffers\n");
5337 			err = ENOMEM;
5338 			goto err_tx_desc;
5339 		}
5340 
5341 		/* Initialize the TX lock */
5342 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
5343 		    device_get_nameunit(dev), txq->ift_id);
5344 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5345 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5346 
5347 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
5348 			 device_get_nameunit(dev), txq->ift_id);
5349 
5350 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5351 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
5352 		if (err) {
5353 			/* XXX free any allocated rings */
5354 			device_printf(dev, "Unable to allocate buf_ring\n");
5355 			goto err_tx_desc;
5356 		}
5357 	}
5358 
5359 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
5360 		/* Set up some basics */
5361 
5362 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
5363 			device_printf(dev, "failed to allocate iflib_dma_info\n");
5364 			err = ENOMEM;
5365 			goto err_tx_desc;
5366 		}
5367 
5368 		rxq->ifr_ifdi = ifdip;
5369 		/* XXX this needs to be changed if #rx queues != #tx queues */
5370 		rxq->ifr_ntxqirq = 1;
5371 		rxq->ifr_txqid[0] = i;
5372 		for (j = 0; j < nrxqs; j++, ifdip++) {
5373 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
5374 				device_printf(dev, "Unable to allocate Descriptor memory\n");
5375 				err = ENOMEM;
5376 				goto err_tx_desc;
5377 			}
5378 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5379 		}
5380 		rxq->ifr_ctx = ctx;
5381 		rxq->ifr_id = i;
5382 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
5383 			rxq->ifr_fl_offset = 1;
5384 		} else {
5385 			rxq->ifr_fl_offset = 0;
5386 		}
5387 		rxq->ifr_nfl = nfree_lists;
5388 		if (!(fl =
5389 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
5390 			device_printf(dev, "Unable to allocate free list memory\n");
5391 			err = ENOMEM;
5392 			goto err_tx_desc;
5393 		}
5394 		rxq->ifr_fl = fl;
5395 		for (j = 0; j < nfree_lists; j++) {
5396 			fl[j].ifl_rxq = rxq;
5397 			fl[j].ifl_id = j;
5398 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5399 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
5400 		}
5401         /* Allocate receive buffers for the ring*/
5402 		if (iflib_rxsd_alloc(rxq)) {
5403 			device_printf(dev,
5404 			    "Critical Failure setting up receive buffers\n");
5405 			err = ENOMEM;
5406 			goto err_rx_desc;
5407 		}
5408 
5409 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5410 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
5411 	}
5412 
5413 	/* TXQs */
5414 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5415 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5416 	for (i = 0; i < ntxqsets; i++) {
5417 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
5418 
5419 		for (j = 0; j < ntxqs; j++, di++) {
5420 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
5421 			paddrs[i*ntxqs + j] = di->idi_paddr;
5422 		}
5423 	}
5424 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5425 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
5426 		iflib_tx_structures_free(ctx);
5427 		free(vaddrs, M_IFLIB);
5428 		free(paddrs, M_IFLIB);
5429 		goto err_rx_desc;
5430 	}
5431 	free(vaddrs, M_IFLIB);
5432 	free(paddrs, M_IFLIB);
5433 
5434 	/* RXQs */
5435 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5436 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5437 	for (i = 0; i < nrxqsets; i++) {
5438 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
5439 
5440 		for (j = 0; j < nrxqs; j++, di++) {
5441 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
5442 			paddrs[i*nrxqs + j] = di->idi_paddr;
5443 		}
5444 	}
5445 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5446 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
5447 		iflib_tx_structures_free(ctx);
5448 		free(vaddrs, M_IFLIB);
5449 		free(paddrs, M_IFLIB);
5450 		goto err_rx_desc;
5451 	}
5452 	free(vaddrs, M_IFLIB);
5453 	free(paddrs, M_IFLIB);
5454 
5455 	return (0);
5456 
5457 /* XXX handle allocation failure changes */
5458 err_rx_desc:
5459 err_tx_desc:
5460 rx_fail:
5461 	if (ctx->ifc_rxqs != NULL)
5462 		free(ctx->ifc_rxqs, M_IFLIB);
5463 	ctx->ifc_rxqs = NULL;
5464 	if (ctx->ifc_txqs != NULL)
5465 		free(ctx->ifc_txqs, M_IFLIB);
5466 	ctx->ifc_txqs = NULL;
5467 fail:
5468 	return (err);
5469 }
5470 
5471 static int
5472 iflib_tx_structures_setup(if_ctx_t ctx)
5473 {
5474 	iflib_txq_t txq = ctx->ifc_txqs;
5475 	int i;
5476 
5477 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
5478 		iflib_txq_setup(txq);
5479 
5480 	return (0);
5481 }
5482 
5483 static void
5484 iflib_tx_structures_free(if_ctx_t ctx)
5485 {
5486 	iflib_txq_t txq = ctx->ifc_txqs;
5487 	int i, j;
5488 
5489 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
5490 		iflib_txq_destroy(txq);
5491 		for (j = 0; j < ctx->ifc_nhwtxqs; j++)
5492 			iflib_dma_free(&txq->ift_ifdi[j]);
5493 	}
5494 	free(ctx->ifc_txqs, M_IFLIB);
5495 	ctx->ifc_txqs = NULL;
5496 	IFDI_QUEUES_FREE(ctx);
5497 }
5498 
5499 /*********************************************************************
5500  *
5501  *  Initialize all receive rings.
5502  *
5503  **********************************************************************/
5504 static int
5505 iflib_rx_structures_setup(if_ctx_t ctx)
5506 {
5507 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5508 	int q;
5509 #if defined(INET6) || defined(INET)
5510 	int i, err;
5511 #endif
5512 
5513 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5514 #if defined(INET6) || defined(INET)
5515 		tcp_lro_free(&rxq->ifr_lc);
5516 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
5517 		    TCP_LRO_ENTRIES, min(1024,
5518 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
5519 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
5520 			goto fail;
5521 		}
5522 		rxq->ifr_lro_enabled = TRUE;
5523 #endif
5524 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
5525 	}
5526 	return (0);
5527 #if defined(INET6) || defined(INET)
5528 fail:
5529 	/*
5530 	 * Free RX software descriptors allocated so far, we will only handle
5531 	 * the rings that completed, the failing case will have
5532 	 * cleaned up for itself. 'q' failed, so its the terminus.
5533 	 */
5534 	rxq = ctx->ifc_rxqs;
5535 	for (i = 0; i < q; ++i, rxq++) {
5536 		iflib_rx_sds_free(rxq);
5537 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
5538 	}
5539 	return (err);
5540 #endif
5541 }
5542 
5543 /*********************************************************************
5544  *
5545  *  Free all receive rings.
5546  *
5547  **********************************************************************/
5548 static void
5549 iflib_rx_structures_free(if_ctx_t ctx)
5550 {
5551 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5552 
5553 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5554 		iflib_rx_sds_free(rxq);
5555 	}
5556 }
5557 
5558 static int
5559 iflib_qset_structures_setup(if_ctx_t ctx)
5560 {
5561 	int err;
5562 
5563 	/*
5564 	 * It is expected that the caller takes care of freeing queues if this
5565 	 * fails.
5566 	 */
5567 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5568 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
5569 		return (err);
5570 	}
5571 
5572 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
5573 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
5574 
5575 	return (err);
5576 }
5577 
5578 int
5579 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
5580 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
5581 {
5582 
5583 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
5584 }
5585 
5586 #ifdef SMP
5587 static int
5588 find_nth(if_ctx_t ctx, int qid)
5589 {
5590 	cpuset_t cpus;
5591 	int i, cpuid, eqid, count;
5592 
5593 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5594 	count = CPU_COUNT(&cpus);
5595 	eqid = qid % count;
5596 	/* clear up to the qid'th bit */
5597 	for (i = 0; i < eqid; i++) {
5598 		cpuid = CPU_FFS(&cpus);
5599 		MPASS(cpuid != 0);
5600 		CPU_CLR(cpuid-1, &cpus);
5601 	}
5602 	cpuid = CPU_FFS(&cpus);
5603 	MPASS(cpuid != 0);
5604 	return (cpuid-1);
5605 }
5606 
5607 #ifdef SCHED_ULE
5608 extern struct cpu_group *cpu_top;              /* CPU topology */
5609 
5610 static int
5611 find_child_with_core(int cpu, struct cpu_group *grp)
5612 {
5613 	int i;
5614 
5615 	if (grp->cg_children == 0)
5616 		return -1;
5617 
5618 	MPASS(grp->cg_child);
5619 	for (i = 0; i < grp->cg_children; i++) {
5620 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5621 			return i;
5622 	}
5623 
5624 	return -1;
5625 }
5626 
5627 /*
5628  * Find the nth "close" core to the specified core
5629  * "close" is defined as the deepest level that shares
5630  * at least an L2 cache.  With threads, this will be
5631  * threads on the same core.  If the sahred cache is L3
5632  * or higher, simply returns the same core.
5633  */
5634 static int
5635 find_close_core(int cpu, int core_offset)
5636 {
5637 	struct cpu_group *grp;
5638 	int i;
5639 	int fcpu;
5640 	cpuset_t cs;
5641 
5642 	grp = cpu_top;
5643 	if (grp == NULL)
5644 		return cpu;
5645 	i = 0;
5646 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5647 		/* If the child only has one cpu, don't descend */
5648 		if (grp->cg_child[i].cg_count <= 1)
5649 			break;
5650 		grp = &grp->cg_child[i];
5651 	}
5652 
5653 	/* If they don't share at least an L2 cache, use the same CPU */
5654 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5655 		return cpu;
5656 
5657 	/* Now pick one */
5658 	CPU_COPY(&grp->cg_mask, &cs);
5659 
5660 	/* Add the selected CPU offset to core offset. */
5661 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
5662 		if (fcpu - 1 == cpu)
5663 			break;
5664 		CPU_CLR(fcpu - 1, &cs);
5665 	}
5666 	MPASS(fcpu);
5667 
5668 	core_offset += i;
5669 
5670 	CPU_COPY(&grp->cg_mask, &cs);
5671 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5672 		MPASS(CPU_FFS(&cs));
5673 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5674 	}
5675 	MPASS(CPU_FFS(&cs));
5676 	return CPU_FFS(&cs) - 1;
5677 }
5678 #else
5679 static int
5680 find_close_core(int cpu, int core_offset __unused)
5681 {
5682 	return cpu;
5683 }
5684 #endif
5685 
5686 static int
5687 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5688 {
5689 	switch (type) {
5690 	case IFLIB_INTR_TX:
5691 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
5692 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5693 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5694 	case IFLIB_INTR_RX:
5695 	case IFLIB_INTR_RXTX:
5696 		/* RX queues get the specified core */
5697 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5698 	default:
5699 		return -1;
5700 	}
5701 }
5702 #else
5703 #define get_core_offset(ctx, type, qid)	CPU_FIRST()
5704 #define find_close_core(cpuid, tid)	CPU_FIRST()
5705 #define find_nth(ctx, gid)		CPU_FIRST()
5706 #endif
5707 
5708 /* Just to avoid copy/paste */
5709 static inline int
5710 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
5711     struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name)
5712 {
5713 	int cpuid;
5714 	int err, tid;
5715 
5716 	cpuid = find_nth(ctx, qid);
5717 	tid = get_core_offset(ctx, type, qid);
5718 	MPASS(tid >= 0);
5719 	cpuid = find_close_core(cpuid, tid);
5720 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
5721 	if (err) {
5722 		device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
5723 		return (err);
5724 	}
5725 #ifdef notyet
5726 	if (cpuid > ctx->ifc_cpuid_highest)
5727 		ctx->ifc_cpuid_highest = cpuid;
5728 #endif
5729 	return 0;
5730 }
5731 
5732 int
5733 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
5734 			iflib_intr_type_t type, driver_filter_t *filter,
5735 			void *filter_arg, int qid, const char *name)
5736 {
5737 	struct grouptask *gtask;
5738 	struct taskqgroup *tqg;
5739 	iflib_filter_info_t info;
5740 	gtask_fn_t *fn;
5741 	int tqrid, err;
5742 	driver_filter_t *intr_fast;
5743 	void *q;
5744 
5745 	info = &ctx->ifc_filter_info;
5746 	tqrid = rid;
5747 
5748 	switch (type) {
5749 	/* XXX merge tx/rx for netmap? */
5750 	case IFLIB_INTR_TX:
5751 		q = &ctx->ifc_txqs[qid];
5752 		info = &ctx->ifc_txqs[qid].ift_filter_info;
5753 		gtask = &ctx->ifc_txqs[qid].ift_task;
5754 		tqg = qgroup_if_io_tqg;
5755 		fn = _task_fn_tx;
5756 		intr_fast = iflib_fast_intr;
5757 		GROUPTASK_INIT(gtask, 0, fn, q);
5758 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
5759 		break;
5760 	case IFLIB_INTR_RX:
5761 		q = &ctx->ifc_rxqs[qid];
5762 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5763 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5764 		tqg = qgroup_if_io_tqg;
5765 		fn = _task_fn_rx;
5766 		intr_fast = iflib_fast_intr;
5767 		GROUPTASK_INIT(gtask, 0, fn, q);
5768 		break;
5769 	case IFLIB_INTR_RXTX:
5770 		q = &ctx->ifc_rxqs[qid];
5771 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5772 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5773 		tqg = qgroup_if_io_tqg;
5774 		fn = _task_fn_rx;
5775 		intr_fast = iflib_fast_intr_rxtx;
5776 		GROUPTASK_INIT(gtask, 0, fn, q);
5777 		break;
5778 	case IFLIB_INTR_ADMIN:
5779 		q = ctx;
5780 		tqrid = -1;
5781 		info = &ctx->ifc_filter_info;
5782 		gtask = &ctx->ifc_admin_task;
5783 		tqg = qgroup_if_config_tqg;
5784 		fn = _task_fn_admin;
5785 		intr_fast = iflib_fast_intr_ctx;
5786 		break;
5787 	default:
5788 		panic("unknown net intr type");
5789 	}
5790 
5791 	info->ifi_filter = filter;
5792 	info->ifi_filter_arg = filter_arg;
5793 	info->ifi_task = gtask;
5794 	info->ifi_ctx = q;
5795 
5796 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5797 	if (err != 0) {
5798 		device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
5799 		return (err);
5800 	}
5801 	if (type == IFLIB_INTR_ADMIN)
5802 		return (0);
5803 
5804 	if (tqrid != -1) {
5805 		err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
5806 		if (err)
5807 			return (err);
5808 	} else {
5809 		taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5810 	}
5811 
5812 	return (0);
5813 }
5814 
5815 void
5816 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,  void *arg, int qid, const char *name)
5817 {
5818 	struct grouptask *gtask;
5819 	struct taskqgroup *tqg;
5820 	gtask_fn_t *fn;
5821 	void *q;
5822 	int irq_num = -1;
5823 	int err;
5824 
5825 	switch (type) {
5826 	case IFLIB_INTR_TX:
5827 		q = &ctx->ifc_txqs[qid];
5828 		gtask = &ctx->ifc_txqs[qid].ift_task;
5829 		tqg = qgroup_if_io_tqg;
5830 		fn = _task_fn_tx;
5831 		if (irq != NULL)
5832 			irq_num = rman_get_start(irq->ii_res);
5833 		break;
5834 	case IFLIB_INTR_RX:
5835 		q = &ctx->ifc_rxqs[qid];
5836 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5837 		tqg = qgroup_if_io_tqg;
5838 		fn = _task_fn_rx;
5839 		if (irq != NULL)
5840 			irq_num = rman_get_start(irq->ii_res);
5841 		break;
5842 	case IFLIB_INTR_IOV:
5843 		q = ctx;
5844 		gtask = &ctx->ifc_vflr_task;
5845 		tqg = qgroup_if_config_tqg;
5846 		fn = _task_fn_iov;
5847 		break;
5848 	default:
5849 		panic("unknown net intr type");
5850 	}
5851 	GROUPTASK_INIT(gtask, 0, fn, q);
5852 	if (irq_num != -1) {
5853 		err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
5854 		if (err)
5855 			taskqgroup_attach(tqg, gtask, q, irq_num, name);
5856 	}
5857 	else {
5858 		taskqgroup_attach(tqg, gtask, q, irq_num, name);
5859 	}
5860 }
5861 
5862 void
5863 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
5864 {
5865 	if (irq->ii_tag)
5866 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
5867 
5868 	if (irq->ii_res)
5869 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
5870 }
5871 
5872 static int
5873 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
5874 {
5875 	iflib_txq_t txq = ctx->ifc_txqs;
5876 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5877 	if_irq_t irq = &ctx->ifc_legacy_irq;
5878 	iflib_filter_info_t info;
5879 	struct grouptask *gtask;
5880 	struct taskqgroup *tqg;
5881 	gtask_fn_t *fn;
5882 	int tqrid;
5883 	void *q;
5884 	int err;
5885 
5886 	q = &ctx->ifc_rxqs[0];
5887 	info = &rxq[0].ifr_filter_info;
5888 	gtask = &rxq[0].ifr_task;
5889 	tqg = qgroup_if_io_tqg;
5890 	tqrid = irq->ii_rid = *rid;
5891 	fn = _task_fn_rx;
5892 
5893 	ctx->ifc_flags |= IFC_LEGACY;
5894 	info->ifi_filter = filter;
5895 	info->ifi_filter_arg = filter_arg;
5896 	info->ifi_task = gtask;
5897 	info->ifi_ctx = ctx;
5898 
5899 	/* We allocate a single interrupt resource */
5900 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
5901 		return (err);
5902 	GROUPTASK_INIT(gtask, 0, fn, q);
5903 	taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5904 
5905 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
5906 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
5907 	return (0);
5908 }
5909 
5910 void
5911 iflib_led_create(if_ctx_t ctx)
5912 {
5913 
5914 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
5915 	    device_get_nameunit(ctx->ifc_dev));
5916 }
5917 
5918 void
5919 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
5920 {
5921 
5922 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
5923 }
5924 
5925 void
5926 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
5927 {
5928 
5929 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
5930 }
5931 
5932 void
5933 iflib_admin_intr_deferred(if_ctx_t ctx)
5934 {
5935 #ifdef INVARIANTS
5936 	struct grouptask *gtask;
5937 
5938 	gtask = &ctx->ifc_admin_task;
5939 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
5940 #endif
5941 
5942 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
5943 }
5944 
5945 void
5946 iflib_iov_intr_deferred(if_ctx_t ctx)
5947 {
5948 
5949 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
5950 }
5951 
5952 void
5953 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
5954 {
5955 
5956 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
5957 }
5958 
5959 void
5960 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
5961 	const char *name)
5962 {
5963 
5964 	GROUPTASK_INIT(gtask, 0, fn, ctx);
5965 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
5966 }
5967 
5968 void
5969 iflib_config_gtask_deinit(struct grouptask *gtask)
5970 {
5971 
5972 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
5973 }
5974 
5975 void
5976 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
5977 {
5978 	if_t ifp = ctx->ifc_ifp;
5979 	iflib_txq_t txq = ctx->ifc_txqs;
5980 
5981 	if_setbaudrate(ifp, baudrate);
5982 	if (baudrate >= IF_Gbps(10)) {
5983 		STATE_LOCK(ctx);
5984 		ctx->ifc_flags |= IFC_PREFETCH;
5985 		STATE_UNLOCK(ctx);
5986 	}
5987 	/* If link down, disable watchdog */
5988 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
5989 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
5990 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
5991 	}
5992 	ctx->ifc_link_state = link_state;
5993 	if_link_state_change(ifp, link_state);
5994 }
5995 
5996 static int
5997 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
5998 {
5999 	int credits;
6000 #ifdef INVARIANTS
6001 	int credits_pre = txq->ift_cidx_processed;
6002 #endif
6003 
6004 	if (ctx->isc_txd_credits_update == NULL)
6005 		return (0);
6006 
6007 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
6008 		return (0);
6009 
6010 	txq->ift_processed += credits;
6011 	txq->ift_cidx_processed += credits;
6012 
6013 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
6014 	if (txq->ift_cidx_processed >= txq->ift_size)
6015 		txq->ift_cidx_processed -= txq->ift_size;
6016 	return (credits);
6017 }
6018 
6019 static int
6020 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
6021 {
6022 
6023 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
6024 	    budget));
6025 }
6026 
6027 void
6028 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
6029 	const char *description, if_int_delay_info_t info,
6030 	int offset, int value)
6031 {
6032 	info->iidi_ctx = ctx;
6033 	info->iidi_offset = offset;
6034 	info->iidi_value = value;
6035 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
6036 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
6037 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
6038 	    info, 0, iflib_sysctl_int_delay, "I", description);
6039 }
6040 
6041 struct sx *
6042 iflib_ctx_lock_get(if_ctx_t ctx)
6043 {
6044 
6045 	return (&ctx->ifc_ctx_sx);
6046 }
6047 
6048 static int
6049 iflib_msix_init(if_ctx_t ctx)
6050 {
6051 	device_t dev = ctx->ifc_dev;
6052 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6053 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6054 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
6055 	int iflib_num_tx_queues, iflib_num_rx_queues;
6056 	int err, admincnt, bar;
6057 
6058 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6059 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6060 
6061 	device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6062 
6063 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
6064 	admincnt = sctx->isc_admin_intrcnt;
6065 	/* Override by tuneable */
6066 	if (scctx->isc_disable_msix)
6067 		goto msi;
6068 
6069 	/*
6070 	 * bar == -1 => "trust me I know what I'm doing"
6071 	 * Some drivers are for hardware that is so shoddily
6072 	 * documented that no one knows which bars are which
6073 	 * so the developer has to map all bars. This hack
6074 	 * allows shoddy garbage to use msix in this framework.
6075 	 */
6076 	if (bar != -1) {
6077 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6078 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
6079 		if (ctx->ifc_msix_mem == NULL) {
6080 			/* May not be enabled */
6081 			device_printf(dev, "Unable to map MSIX table \n");
6082 			goto msi;
6083 		}
6084 	}
6085 	/* First try MSI/X */
6086 	if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
6087 		device_printf(dev, "System has MSIX disabled \n");
6088 		bus_release_resource(dev, SYS_RES_MEMORY,
6089 		    bar, ctx->ifc_msix_mem);
6090 		ctx->ifc_msix_mem = NULL;
6091 		goto msi;
6092 	}
6093 #if IFLIB_DEBUG
6094 	/* use only 1 qset in debug mode */
6095 	queuemsgs = min(msgs - admincnt, 1);
6096 #else
6097 	queuemsgs = msgs - admincnt;
6098 #endif
6099 #ifdef RSS
6100 	queues = imin(queuemsgs, rss_getnumbuckets());
6101 #else
6102 	queues = queuemsgs;
6103 #endif
6104 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6105 	device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
6106 				  CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6107 #ifdef  RSS
6108 	/* If we're doing RSS, clamp at the number of RSS buckets */
6109 	if (queues > rss_getnumbuckets())
6110 		queues = rss_getnumbuckets();
6111 #endif
6112 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6113 		rx_queues = iflib_num_rx_queues;
6114 	else
6115 		rx_queues = queues;
6116 
6117 	if (rx_queues > scctx->isc_nrxqsets)
6118 		rx_queues = scctx->isc_nrxqsets;
6119 
6120 	/*
6121 	 * We want this to be all logical CPUs by default
6122 	 */
6123 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6124 		tx_queues = iflib_num_tx_queues;
6125 	else
6126 		tx_queues = mp_ncpus;
6127 
6128 	if (tx_queues > scctx->isc_ntxqsets)
6129 		tx_queues = scctx->isc_ntxqsets;
6130 
6131 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
6132 #ifdef INVARIANTS
6133 		if (tx_queues != rx_queues)
6134 			device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6135 				      min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6136 #endif
6137 		tx_queues = min(rx_queues, tx_queues);
6138 		rx_queues = min(rx_queues, tx_queues);
6139 	}
6140 
6141 	device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
6142 
6143 	vectors = rx_queues + admincnt;
6144 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6145 		device_printf(dev,
6146 					  "Using MSIX interrupts with %d vectors\n", vectors);
6147 		scctx->isc_vectors = vectors;
6148 		scctx->isc_nrxqsets = rx_queues;
6149 		scctx->isc_ntxqsets = tx_queues;
6150 		scctx->isc_intr = IFLIB_INTR_MSIX;
6151 
6152 		return (vectors);
6153 	} else {
6154 		device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
6155 		bus_release_resource(dev, SYS_RES_MEMORY, bar,
6156 		    ctx->ifc_msix_mem);
6157 		ctx->ifc_msix_mem = NULL;
6158 	}
6159 msi:
6160 	vectors = pci_msi_count(dev);
6161 	scctx->isc_nrxqsets = 1;
6162 	scctx->isc_ntxqsets = 1;
6163 	scctx->isc_vectors = vectors;
6164 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6165 		device_printf(dev,"Using an MSI interrupt\n");
6166 		scctx->isc_intr = IFLIB_INTR_MSI;
6167 	} else {
6168 		scctx->isc_vectors = 1;
6169 		device_printf(dev,"Using a Legacy interrupt\n");
6170 		scctx->isc_intr = IFLIB_INTR_LEGACY;
6171 	}
6172 
6173 	return (vectors);
6174 }
6175 
6176 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6177 
6178 static int
6179 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6180 {
6181 	int rc;
6182 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6183 	struct sbuf *sb;
6184 	const char *ring_state = "UNKNOWN";
6185 
6186 	/* XXX needed ? */
6187 	rc = sysctl_wire_old_buffer(req, 0);
6188 	MPASS(rc == 0);
6189 	if (rc != 0)
6190 		return (rc);
6191 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6192 	MPASS(sb != NULL);
6193 	if (sb == NULL)
6194 		return (ENOMEM);
6195 	if (state[3] <= 3)
6196 		ring_state = ring_states[state[3]];
6197 
6198 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6199 		    state[0], state[1], state[2], ring_state);
6200 	rc = sbuf_finish(sb);
6201 	sbuf_delete(sb);
6202         return(rc);
6203 }
6204 
6205 enum iflib_ndesc_handler {
6206 	IFLIB_NTXD_HANDLER,
6207 	IFLIB_NRXD_HANDLER,
6208 };
6209 
6210 static int
6211 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6212 {
6213 	if_ctx_t ctx = (void *)arg1;
6214 	enum iflib_ndesc_handler type = arg2;
6215 	char buf[256] = {0};
6216 	qidx_t *ndesc;
6217 	char *p, *next;
6218 	int nqs, rc, i;
6219 
6220 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
6221 
6222 	nqs = 8;
6223 	switch(type) {
6224 	case IFLIB_NTXD_HANDLER:
6225 		ndesc = ctx->ifc_sysctl_ntxds;
6226 		if (ctx->ifc_sctx)
6227 			nqs = ctx->ifc_sctx->isc_ntxqs;
6228 		break;
6229 	case IFLIB_NRXD_HANDLER:
6230 		ndesc = ctx->ifc_sysctl_nrxds;
6231 		if (ctx->ifc_sctx)
6232 			nqs = ctx->ifc_sctx->isc_nrxqs;
6233 		break;
6234 	default:
6235 			panic("unhandled type");
6236 	}
6237 	if (nqs == 0)
6238 		nqs = 8;
6239 
6240 	for (i=0; i<8; i++) {
6241 		if (i >= nqs)
6242 			break;
6243 		if (i)
6244 			strcat(buf, ",");
6245 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
6246 	}
6247 
6248 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6249 	if (rc || req->newptr == NULL)
6250 		return rc;
6251 
6252 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6253 	    i++, p = strsep(&next, " ,")) {
6254 		ndesc[i] = strtoul(p, NULL, 10);
6255 	}
6256 
6257 	return(rc);
6258 }
6259 
6260 #define NAME_BUFLEN 32
6261 static void
6262 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6263 {
6264         device_t dev = iflib_get_dev(ctx);
6265 	struct sysctl_oid_list *child, *oid_list;
6266 	struct sysctl_ctx_list *ctx_list;
6267 	struct sysctl_oid *node;
6268 
6269 	ctx_list = device_get_sysctl_ctx(dev);
6270 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6271 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
6272 						      CTLFLAG_RD, NULL, "IFLIB fields");
6273 	oid_list = SYSCTL_CHILDREN(node);
6274 
6275 	SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6276 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
6277 		       "driver version");
6278 
6279 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6280 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6281 			"# of txqs to use, 0 => use default #");
6282 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6283 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6284 			"# of rxqs to use, 0 => use default #");
6285 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6286 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6287                        "permit #txq != #rxq");
6288 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6289                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6290                       "disable MSIX (default 0)");
6291 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6292 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6293                        "set the rx budget");
6294 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6295 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6296 		       "cause tx to abdicate instead of running to completion");
6297 
6298 	/* XXX change for per-queue sizes */
6299 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6300 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
6301                        mp_ndesc_handler, "A",
6302                        "list of # of tx descriptors to use, 0 = use default #");
6303 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6304 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
6305                        mp_ndesc_handler, "A",
6306                        "list of # of rx descriptors to use, 0 = use default #");
6307 }
6308 
6309 static void
6310 iflib_add_device_sysctl_post(if_ctx_t ctx)
6311 {
6312 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6313 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6314         device_t dev = iflib_get_dev(ctx);
6315 	struct sysctl_oid_list *child;
6316 	struct sysctl_ctx_list *ctx_list;
6317 	iflib_fl_t fl;
6318 	iflib_txq_t txq;
6319 	iflib_rxq_t rxq;
6320 	int i, j;
6321 	char namebuf[NAME_BUFLEN];
6322 	char *qfmt;
6323 	struct sysctl_oid *queue_node, *fl_node, *node;
6324 	struct sysctl_oid_list *queue_list, *fl_list;
6325 	ctx_list = device_get_sysctl_ctx(dev);
6326 
6327 	node = ctx->ifc_sysctl_node;
6328 	child = SYSCTL_CHILDREN(node);
6329 
6330 	if (scctx->isc_ntxqsets > 100)
6331 		qfmt = "txq%03d";
6332 	else if (scctx->isc_ntxqsets > 10)
6333 		qfmt = "txq%02d";
6334 	else
6335 		qfmt = "txq%d";
6336 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6337 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6338 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6339 					     CTLFLAG_RD, NULL, "Queue Name");
6340 		queue_list = SYSCTL_CHILDREN(queue_node);
6341 #if MEMORY_LOGGING
6342 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6343 				CTLFLAG_RD,
6344 				&txq->ift_dequeued, "total mbufs freed");
6345 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6346 				CTLFLAG_RD,
6347 				&txq->ift_enqueued, "total mbufs enqueued");
6348 #endif
6349 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6350 				   CTLFLAG_RD,
6351 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
6352 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6353 				   CTLFLAG_RD,
6354 				   &txq->ift_pullups, "# of times m_pullup was called");
6355 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
6356 				   CTLFLAG_RD,
6357 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6358 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
6359 				   CTLFLAG_RD,
6360 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
6361 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
6362 				   CTLFLAG_RD,
6363 				   &txq->ift_map_failed, "# of times dma map failed");
6364 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
6365 				   CTLFLAG_RD,
6366 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
6367 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
6368 				   CTLFLAG_RD,
6369 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
6370 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6371 				   CTLFLAG_RD,
6372 				   &txq->ift_pidx, 1, "Producer Index");
6373 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6374 				   CTLFLAG_RD,
6375 				   &txq->ift_cidx, 1, "Consumer Index");
6376 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
6377 				   CTLFLAG_RD,
6378 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
6379 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6380 				   CTLFLAG_RD,
6381 				   &txq->ift_in_use, 1, "descriptors in use");
6382 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
6383 				   CTLFLAG_RD,
6384 				   &txq->ift_processed, "descriptors procesed for clean");
6385 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6386 				   CTLFLAG_RD,
6387 				   &txq->ift_cleaned, "total cleaned");
6388 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
6389 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
6390 				0, mp_ring_state_handler, "A", "soft ring state");
6391 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
6392 				       CTLFLAG_RD, &txq->ift_br->enqueues,
6393 				       "# of enqueues to the mp_ring for this queue");
6394 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
6395 				       CTLFLAG_RD, &txq->ift_br->drops,
6396 				       "# of drops in the mp_ring for this queue");
6397 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
6398 				       CTLFLAG_RD, &txq->ift_br->starts,
6399 				       "# of normal consumer starts in the mp_ring for this queue");
6400 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
6401 				       CTLFLAG_RD, &txq->ift_br->stalls,
6402 					       "# of consumer stalls in the mp_ring for this queue");
6403 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
6404 			       CTLFLAG_RD, &txq->ift_br->restarts,
6405 				       "# of consumer restarts in the mp_ring for this queue");
6406 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
6407 				       CTLFLAG_RD, &txq->ift_br->abdications,
6408 				       "# of consumer abdications in the mp_ring for this queue");
6409 	}
6410 
6411 	if (scctx->isc_nrxqsets > 100)
6412 		qfmt = "rxq%03d";
6413 	else if (scctx->isc_nrxqsets > 10)
6414 		qfmt = "rxq%02d";
6415 	else
6416 		qfmt = "rxq%d";
6417 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
6418 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6419 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6420 					     CTLFLAG_RD, NULL, "Queue Name");
6421 		queue_list = SYSCTL_CHILDREN(queue_node);
6422 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
6423 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
6424 				       CTLFLAG_RD,
6425 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
6426 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
6427 				       CTLFLAG_RD,
6428 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
6429 		}
6430 
6431 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
6432 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
6433 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
6434 						     CTLFLAG_RD, NULL, "freelist Name");
6435 			fl_list = SYSCTL_CHILDREN(fl_node);
6436 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
6437 				       CTLFLAG_RD,
6438 				       &fl->ifl_pidx, 1, "Producer Index");
6439 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
6440 				       CTLFLAG_RD,
6441 				       &fl->ifl_cidx, 1, "Consumer Index");
6442 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
6443 				       CTLFLAG_RD,
6444 				       &fl->ifl_credits, 1, "credits available");
6445 #if MEMORY_LOGGING
6446 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
6447 					CTLFLAG_RD,
6448 					&fl->ifl_m_enqueued, "mbufs allocated");
6449 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
6450 					CTLFLAG_RD,
6451 					&fl->ifl_m_dequeued, "mbufs freed");
6452 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
6453 					CTLFLAG_RD,
6454 					&fl->ifl_cl_enqueued, "clusters allocated");
6455 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
6456 					CTLFLAG_RD,
6457 					&fl->ifl_cl_dequeued, "clusters freed");
6458 #endif
6459 
6460 		}
6461 	}
6462 
6463 }
6464 
6465 #ifndef __NO_STRICT_ALIGNMENT
6466 static struct mbuf *
6467 iflib_fixup_rx(struct mbuf *m)
6468 {
6469 	struct mbuf *n;
6470 
6471 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
6472 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
6473 		m->m_data += ETHER_HDR_LEN;
6474 		n = m;
6475 	} else {
6476 		MGETHDR(n, M_NOWAIT, MT_DATA);
6477 		if (n == NULL) {
6478 			m_freem(m);
6479 			return (NULL);
6480 		}
6481 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
6482 		m->m_data += ETHER_HDR_LEN;
6483 		m->m_len -= ETHER_HDR_LEN;
6484 		n->m_len = ETHER_HDR_LEN;
6485 		M_MOVE_PKTHDR(n, m);
6486 		n->m_next = m;
6487 	}
6488 	return (n);
6489 }
6490 #endif
6491 
6492 #ifdef NETDUMP
6493 static void
6494 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
6495 {
6496 	if_ctx_t ctx;
6497 
6498 	ctx = if_getsoftc(ifp);
6499 	CTX_LOCK(ctx);
6500 	*nrxr = NRXQSETS(ctx);
6501 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
6502 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
6503 	CTX_UNLOCK(ctx);
6504 }
6505 
6506 static void
6507 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
6508 {
6509 	if_ctx_t ctx;
6510 	if_softc_ctx_t scctx;
6511 	iflib_fl_t fl;
6512 	iflib_rxq_t rxq;
6513 	int i, j;
6514 
6515 	ctx = if_getsoftc(ifp);
6516 	scctx = &ctx->ifc_softc_ctx;
6517 
6518 	switch (event) {
6519 	case NETDUMP_START:
6520 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
6521 			rxq = &ctx->ifc_rxqs[i];
6522 			for (j = 0; j < rxq->ifr_nfl; j++) {
6523 				fl = rxq->ifr_fl;
6524 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
6525 			}
6526 		}
6527 		iflib_no_tx_batch = 1;
6528 		break;
6529 	default:
6530 		break;
6531 	}
6532 }
6533 
6534 static int
6535 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
6536 {
6537 	if_ctx_t ctx;
6538 	iflib_txq_t txq;
6539 	int error;
6540 
6541 	ctx = if_getsoftc(ifp);
6542 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6543 	    IFF_DRV_RUNNING)
6544 		return (EBUSY);
6545 
6546 	txq = &ctx->ifc_txqs[0];
6547 	error = iflib_encap(txq, &m);
6548 	if (error == 0)
6549 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
6550 	return (error);
6551 }
6552 
6553 static int
6554 iflib_netdump_poll(struct ifnet *ifp, int count)
6555 {
6556 	if_ctx_t ctx;
6557 	if_softc_ctx_t scctx;
6558 	iflib_txq_t txq;
6559 	int i;
6560 
6561 	ctx = if_getsoftc(ifp);
6562 	scctx = &ctx->ifc_softc_ctx;
6563 
6564 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6565 	    IFF_DRV_RUNNING)
6566 		return (EBUSY);
6567 
6568 	txq = &ctx->ifc_txqs[0];
6569 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
6570 
6571 	for (i = 0; i < scctx->isc_nrxqsets; i++)
6572 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
6573 	return (0);
6574 }
6575 #endif /* NETDUMP */
6576