xref: /freebsd/sys/net/iflib.c (revision a530b610636be65c4948ba01a65da56627d7ffe2)
1 /*-
2  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_acpi.h"
34 #include "opt_sched.h"
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/bus.h>
39 #include <sys/eventhandler.h>
40 #include <sys/jail.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/md5.h>
44 #include <sys/mutex.h>
45 #include <sys/module.h>
46 #include <sys/kobj.h>
47 #include <sys/rman.h>
48 #include <sys/proc.h>
49 #include <sys/sbuf.h>
50 #include <sys/smp.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/taskqueue.h>
56 #include <sys/limits.h>
57 
58 #include <net/if.h>
59 #include <net/if_var.h>
60 #include <net/if_types.h>
61 #include <net/if_media.h>
62 #include <net/bpf.h>
63 #include <net/ethernet.h>
64 #include <net/mp_ring.h>
65 #include <net/vnet.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/tcp_lro.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/ip_var.h>
76 #include <netinet/netdump/netdump.h>
77 #include <netinet6/ip6_var.h>
78 
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
81 
82 #include <vm/vm.h>
83 #include <vm/pmap.h>
84 
85 #include <dev/led/led.h>
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pci_private.h>
89 
90 #include <net/iflib.h>
91 #include <net/iflib_private.h>
92 
93 #include "ifdi_if.h"
94 
95 #ifdef PCI_IOV
96 #include <dev/pci/pci_iov.h>
97 #endif
98 
99 #include <sys/bitstring.h>
100 /*
101  * enable accounting of every mbuf as it comes in to and goes out of
102  * iflib's software descriptor references
103  */
104 #define MEMORY_LOGGING 0
105 /*
106  * Enable mbuf vectors for compressing long mbuf chains
107  */
108 
109 /*
110  * NB:
111  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
112  *   we prefetch needs to be determined by the time spent in m_free vis a vis
113  *   the cost of a prefetch. This will of course vary based on the workload:
114  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
115  *        is quite expensive, thus suggesting very little prefetch.
116  *      - small packet forwarding which is just returning a single mbuf to
117  *        UMA will typically be very fast vis a vis the cost of a memory
118  *        access.
119  */
120 
121 
122 /*
123  * File organization:
124  *  - private structures
125  *  - iflib private utility functions
126  *  - ifnet functions
127  *  - vlan registry and other exported functions
128  *  - iflib public core functions
129  *
130  *
131  */
132 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
133 
134 struct iflib_txq;
135 typedef struct iflib_txq *iflib_txq_t;
136 struct iflib_rxq;
137 typedef struct iflib_rxq *iflib_rxq_t;
138 struct iflib_fl;
139 typedef struct iflib_fl *iflib_fl_t;
140 
141 struct iflib_ctx;
142 
143 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
144 static void iflib_timer(void *arg);
145 
146 typedef struct iflib_filter_info {
147 	driver_filter_t *ifi_filter;
148 	void *ifi_filter_arg;
149 	struct grouptask *ifi_task;
150 	void *ifi_ctx;
151 } *iflib_filter_info_t;
152 
153 struct iflib_ctx {
154 	KOBJ_FIELDS;
155 	/*
156 	 * Pointer to hardware driver's softc
157 	 */
158 	void *ifc_softc;
159 	device_t ifc_dev;
160 	if_t ifc_ifp;
161 
162 	cpuset_t ifc_cpus;
163 	if_shared_ctx_t ifc_sctx;
164 	struct if_softc_ctx ifc_softc_ctx;
165 
166 	struct sx ifc_ctx_sx;
167 	struct mtx ifc_state_mtx;
168 
169 	iflib_txq_t ifc_txqs;
170 	iflib_rxq_t ifc_rxqs;
171 	uint32_t ifc_if_flags;
172 	uint32_t ifc_flags;
173 	uint32_t ifc_max_fl_buf_size;
174 
175 	int ifc_link_state;
176 	int ifc_link_irq;
177 	int ifc_watchdog_events;
178 	struct cdev *ifc_led_dev;
179 	struct resource *ifc_msix_mem;
180 
181 	struct if_irq ifc_legacy_irq;
182 	struct grouptask ifc_admin_task;
183 	struct grouptask ifc_vflr_task;
184 	struct iflib_filter_info ifc_filter_info;
185 	struct ifmedia	ifc_media;
186 
187 	struct sysctl_oid *ifc_sysctl_node;
188 	uint16_t ifc_sysctl_ntxqs;
189 	uint16_t ifc_sysctl_nrxqs;
190 	uint16_t ifc_sysctl_qs_eq_override;
191 	uint16_t ifc_sysctl_rx_budget;
192 	uint16_t ifc_sysctl_tx_abdicate;
193 
194 	qidx_t ifc_sysctl_ntxds[8];
195 	qidx_t ifc_sysctl_nrxds[8];
196 	struct if_txrx ifc_txrx;
197 #define isc_txd_encap  ifc_txrx.ift_txd_encap
198 #define isc_txd_flush  ifc_txrx.ift_txd_flush
199 #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
200 #define isc_rxd_available ifc_txrx.ift_rxd_available
201 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
202 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
203 #define isc_rxd_flush ifc_txrx.ift_rxd_flush
204 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
205 #define isc_rxd_refill ifc_txrx.ift_rxd_refill
206 #define isc_legacy_intr ifc_txrx.ift_legacy_intr
207 	eventhandler_tag ifc_vlan_attach_event;
208 	eventhandler_tag ifc_vlan_detach_event;
209 	uint8_t ifc_mac[ETHER_ADDR_LEN];
210 	char ifc_mtx_name[16];
211 };
212 
213 
214 void *
215 iflib_get_softc(if_ctx_t ctx)
216 {
217 
218 	return (ctx->ifc_softc);
219 }
220 
221 device_t
222 iflib_get_dev(if_ctx_t ctx)
223 {
224 
225 	return (ctx->ifc_dev);
226 }
227 
228 if_t
229 iflib_get_ifp(if_ctx_t ctx)
230 {
231 
232 	return (ctx->ifc_ifp);
233 }
234 
235 struct ifmedia *
236 iflib_get_media(if_ctx_t ctx)
237 {
238 
239 	return (&ctx->ifc_media);
240 }
241 
242 uint32_t
243 iflib_get_flags(if_ctx_t ctx)
244 {
245 	return (ctx->ifc_flags);
246 }
247 
248 void
249 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
250 {
251 
252 	bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
253 }
254 
255 if_softc_ctx_t
256 iflib_get_softc_ctx(if_ctx_t ctx)
257 {
258 
259 	return (&ctx->ifc_softc_ctx);
260 }
261 
262 if_shared_ctx_t
263 iflib_get_sctx(if_ctx_t ctx)
264 {
265 
266 	return (ctx->ifc_sctx);
267 }
268 
269 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
270 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
271 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
272 
273 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
274 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
275 
276 typedef struct iflib_sw_rx_desc_array {
277 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
278 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
279 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
280 	bus_addr_t	*ifsd_ba;          /* bus addr of cluster for rx */
281 } iflib_rxsd_array_t;
282 
283 typedef struct iflib_sw_tx_desc_array {
284 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
285 	bus_dmamap_t	*ifsd_tso_map;     /* bus_dma maps for TSO packet */
286 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
287 } if_txsd_vec_t;
288 
289 
290 /* magic number that should be high enough for any hardware */
291 #define IFLIB_MAX_TX_SEGS		128
292 #define IFLIB_RX_COPY_THRESH		128
293 #define IFLIB_MAX_RX_REFRESH		32
294 /* The minimum descriptors per second before we start coalescing */
295 #define IFLIB_MIN_DESC_SEC		16384
296 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
297 #define IFLIB_QUEUE_IDLE		0
298 #define IFLIB_QUEUE_HUNG		1
299 #define IFLIB_QUEUE_WORKING		2
300 /* maximum number of txqs that can share an rx interrupt */
301 #define IFLIB_MAX_TX_SHARED_INTR	4
302 
303 /* this should really scale with ring size - this is a fairly arbitrary value */
304 #define TX_BATCH_SIZE			32
305 
306 #define IFLIB_RESTART_BUDGET		8
307 
308 
309 #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
310 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
311 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
312 struct iflib_txq {
313 	qidx_t		ift_in_use;
314 	qidx_t		ift_cidx;
315 	qidx_t		ift_cidx_processed;
316 	qidx_t		ift_pidx;
317 	uint8_t		ift_gen;
318 	uint8_t		ift_br_offset;
319 	uint16_t	ift_npending;
320 	uint16_t	ift_db_pending;
321 	uint16_t	ift_rs_pending;
322 	/* implicit pad */
323 	uint8_t		ift_txd_size[8];
324 	uint64_t	ift_processed;
325 	uint64_t	ift_cleaned;
326 	uint64_t	ift_cleaned_prev;
327 #if MEMORY_LOGGING
328 	uint64_t	ift_enqueued;
329 	uint64_t	ift_dequeued;
330 #endif
331 	uint64_t	ift_no_tx_dma_setup;
332 	uint64_t	ift_no_desc_avail;
333 	uint64_t	ift_mbuf_defrag_failed;
334 	uint64_t	ift_mbuf_defrag;
335 	uint64_t	ift_map_failed;
336 	uint64_t	ift_txd_encap_efbig;
337 	uint64_t	ift_pullups;
338 	uint64_t	ift_last_timer_tick;
339 
340 	struct mtx	ift_mtx;
341 	struct mtx	ift_db_mtx;
342 
343 	/* constant values */
344 	if_ctx_t	ift_ctx;
345 	struct ifmp_ring        *ift_br;
346 	struct grouptask	ift_task;
347 	qidx_t		ift_size;
348 	uint16_t	ift_id;
349 	struct callout	ift_timer;
350 
351 	if_txsd_vec_t	ift_sds;
352 	uint8_t		ift_qstatus;
353 	uint8_t		ift_closed;
354 	uint8_t		ift_update_freq;
355 	struct iflib_filter_info ift_filter_info;
356 	bus_dma_tag_t	ift_buf_tag;
357 	bus_dma_tag_t	ift_tso_buf_tag;
358 	iflib_dma_info_t	ift_ifdi;
359 #define MTX_NAME_LEN 16
360 	char                    ift_mtx_name[MTX_NAME_LEN];
361 	char                    ift_db_mtx_name[MTX_NAME_LEN];
362 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
363 #ifdef IFLIB_DIAGNOSTICS
364 	uint64_t ift_cpu_exec_count[256];
365 #endif
366 } __aligned(CACHE_LINE_SIZE);
367 
368 struct iflib_fl {
369 	qidx_t		ifl_cidx;
370 	qidx_t		ifl_pidx;
371 	qidx_t		ifl_credits;
372 	uint8_t		ifl_gen;
373 	uint8_t		ifl_rxd_size;
374 #if MEMORY_LOGGING
375 	uint64_t	ifl_m_enqueued;
376 	uint64_t	ifl_m_dequeued;
377 	uint64_t	ifl_cl_enqueued;
378 	uint64_t	ifl_cl_dequeued;
379 #endif
380 	/* implicit pad */
381 
382 	bitstr_t 	*ifl_rx_bitmap;
383 	qidx_t		ifl_fragidx;
384 	/* constant */
385 	qidx_t		ifl_size;
386 	uint16_t	ifl_buf_size;
387 	uint16_t	ifl_cltype;
388 	uma_zone_t	ifl_zone;
389 	iflib_rxsd_array_t	ifl_sds;
390 	iflib_rxq_t	ifl_rxq;
391 	uint8_t		ifl_id;
392 	bus_dma_tag_t	ifl_buf_tag;
393 	iflib_dma_info_t	ifl_ifdi;
394 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
395 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
396 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
397 }  __aligned(CACHE_LINE_SIZE);
398 
399 static inline qidx_t
400 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
401 {
402 	qidx_t used;
403 
404 	if (pidx > cidx)
405 		used = pidx - cidx;
406 	else if (pidx < cidx)
407 		used = size - cidx + pidx;
408 	else if (gen == 0 && pidx == cidx)
409 		used = 0;
410 	else if (gen == 1 && pidx == cidx)
411 		used = size;
412 	else
413 		panic("bad state");
414 
415 	return (used);
416 }
417 
418 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
419 
420 #define IDXDIFF(head, tail, wrap) \
421 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
422 
423 struct iflib_rxq {
424 	/* If there is a separate completion queue -
425 	 * these are the cq cidx and pidx. Otherwise
426 	 * these are unused.
427 	 */
428 	qidx_t		ifr_size;
429 	qidx_t		ifr_cq_cidx;
430 	qidx_t		ifr_cq_pidx;
431 	uint8_t		ifr_cq_gen;
432 	uint8_t		ifr_fl_offset;
433 
434 	if_ctx_t	ifr_ctx;
435 	iflib_fl_t	ifr_fl;
436 	uint64_t	ifr_rx_irq;
437 	uint16_t	ifr_id;
438 	uint8_t		ifr_lro_enabled;
439 	uint8_t		ifr_nfl;
440 	uint8_t		ifr_ntxqirq;
441 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
442 	struct lro_ctrl			ifr_lc;
443 	struct grouptask        ifr_task;
444 	struct iflib_filter_info ifr_filter_info;
445 	iflib_dma_info_t		ifr_ifdi;
446 
447 	/* dynamically allocate if any drivers need a value substantially larger than this */
448 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
449 #ifdef IFLIB_DIAGNOSTICS
450 	uint64_t ifr_cpu_exec_count[256];
451 #endif
452 }  __aligned(CACHE_LINE_SIZE);
453 
454 typedef struct if_rxsd {
455 	caddr_t *ifsd_cl;
456 	struct mbuf **ifsd_m;
457 	iflib_fl_t ifsd_fl;
458 	qidx_t ifsd_cidx;
459 } *if_rxsd_t;
460 
461 /* multiple of word size */
462 #ifdef __LP64__
463 #define PKT_INFO_SIZE	6
464 #define RXD_INFO_SIZE	5
465 #define PKT_TYPE uint64_t
466 #else
467 #define PKT_INFO_SIZE	11
468 #define RXD_INFO_SIZE	8
469 #define PKT_TYPE uint32_t
470 #endif
471 #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
472 #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
473 
474 typedef struct if_pkt_info_pad {
475 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
476 } *if_pkt_info_pad_t;
477 typedef struct if_rxd_info_pad {
478 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
479 } *if_rxd_info_pad_t;
480 
481 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
482 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
483 
484 
485 static inline void
486 pkt_info_zero(if_pkt_info_t pi)
487 {
488 	if_pkt_info_pad_t pi_pad;
489 
490 	pi_pad = (if_pkt_info_pad_t)pi;
491 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
492 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
493 #ifndef __LP64__
494 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
495 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
496 #endif
497 }
498 
499 static device_method_t iflib_pseudo_methods[] = {
500 	DEVMETHOD(device_attach, noop_attach),
501 	DEVMETHOD(device_detach, iflib_pseudo_detach),
502 	DEVMETHOD_END
503 };
504 
505 driver_t iflib_pseudodriver = {
506 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
507 };
508 
509 static inline void
510 rxd_info_zero(if_rxd_info_t ri)
511 {
512 	if_rxd_info_pad_t ri_pad;
513 	int i;
514 
515 	ri_pad = (if_rxd_info_pad_t)ri;
516 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
517 		ri_pad->rxd_val[i] = 0;
518 		ri_pad->rxd_val[i+1] = 0;
519 		ri_pad->rxd_val[i+2] = 0;
520 		ri_pad->rxd_val[i+3] = 0;
521 	}
522 #ifdef __LP64__
523 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
524 #endif
525 }
526 
527 /*
528  * Only allow a single packet to take up most 1/nth of the tx ring
529  */
530 #define MAX_SINGLE_PACKET_FRACTION 12
531 #define IF_BAD_DMA (bus_addr_t)-1
532 
533 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
534 
535 #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
536 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
537 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
538 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
539 
540 
541 #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
542 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
543 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
544 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
545 
546 
547 
548 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
549 #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
550 
551 void
552 iflib_set_detach(if_ctx_t ctx)
553 {
554 	STATE_LOCK(ctx);
555 	ctx->ifc_flags |= IFC_IN_DETACH;
556 	STATE_UNLOCK(ctx);
557 }
558 
559 /* Our boot-time initialization hook */
560 static int	iflib_module_event_handler(module_t, int, void *);
561 
562 static moduledata_t iflib_moduledata = {
563 	"iflib",
564 	iflib_module_event_handler,
565 	NULL
566 };
567 
568 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
569 MODULE_VERSION(iflib, 1);
570 
571 MODULE_DEPEND(iflib, pci, 1, 1, 1);
572 MODULE_DEPEND(iflib, ether, 1, 1, 1);
573 
574 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
575 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
576 
577 #ifndef IFLIB_DEBUG_COUNTERS
578 #ifdef INVARIANTS
579 #define IFLIB_DEBUG_COUNTERS 1
580 #else
581 #define IFLIB_DEBUG_COUNTERS 0
582 #endif /* !INVARIANTS */
583 #endif
584 
585 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
586                    "iflib driver parameters");
587 
588 /*
589  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
590  */
591 static int iflib_min_tx_latency = 0;
592 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
593 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
594 static int iflib_no_tx_batch = 0;
595 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
596 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
597 
598 
599 #if IFLIB_DEBUG_COUNTERS
600 
601 static int iflib_tx_seen;
602 static int iflib_tx_sent;
603 static int iflib_tx_encap;
604 static int iflib_rx_allocs;
605 static int iflib_fl_refills;
606 static int iflib_fl_refills_large;
607 static int iflib_tx_frees;
608 
609 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
610 		   &iflib_tx_seen, 0, "# tx mbufs seen");
611 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
612 		   &iflib_tx_sent, 0, "# tx mbufs sent");
613 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
614 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
615 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
616 		   &iflib_tx_frees, 0, "# tx frees");
617 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
618 		   &iflib_rx_allocs, 0, "# rx allocations");
619 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
620 		   &iflib_fl_refills, 0, "# refills");
621 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
622 		   &iflib_fl_refills_large, 0, "# large refills");
623 
624 
625 static int iflib_txq_drain_flushing;
626 static int iflib_txq_drain_oactive;
627 static int iflib_txq_drain_notready;
628 
629 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
630 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
631 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
632 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
633 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
634 		   &iflib_txq_drain_notready, 0, "# drain notready");
635 
636 
637 static int iflib_encap_load_mbuf_fail;
638 static int iflib_encap_pad_mbuf_fail;
639 static int iflib_encap_txq_avail_fail;
640 static int iflib_encap_txd_encap_fail;
641 
642 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
643 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
644 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
645 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
646 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
647 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
648 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
649 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
650 
651 static int iflib_task_fn_rxs;
652 static int iflib_rx_intr_enables;
653 static int iflib_fast_intrs;
654 static int iflib_rx_unavail;
655 static int iflib_rx_ctx_inactive;
656 static int iflib_rx_if_input;
657 static int iflib_rx_mbuf_null;
658 static int iflib_rxd_flush;
659 
660 static int iflib_verbose_debug;
661 
662 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
663 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
664 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
665 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
666 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
667 		   &iflib_fast_intrs, 0, "# fast_intr calls");
668 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
669 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
670 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
671 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
672 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
673 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
674 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
675 		   &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
676 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
677 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
678 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
679 		   &iflib_verbose_debug, 0, "enable verbose debugging");
680 
681 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
682 static void
683 iflib_debug_reset(void)
684 {
685 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
686 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
687 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
688 		iflib_txq_drain_notready =
689 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
690 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
691 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
692 		iflib_rx_unavail =
693 		iflib_rx_ctx_inactive = iflib_rx_if_input =
694 		iflib_rx_mbuf_null = iflib_rxd_flush = 0;
695 }
696 
697 #else
698 #define DBG_COUNTER_INC(name)
699 static void iflib_debug_reset(void) {}
700 #endif
701 
702 #define IFLIB_DEBUG 0
703 
704 static void iflib_tx_structures_free(if_ctx_t ctx);
705 static void iflib_rx_structures_free(if_ctx_t ctx);
706 static int iflib_queues_alloc(if_ctx_t ctx);
707 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
708 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
709 static int iflib_qset_structures_setup(if_ctx_t ctx);
710 static int iflib_msix_init(if_ctx_t ctx);
711 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
712 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
713 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
714 #ifdef ALTQ
715 static void iflib_altq_if_start(if_t ifp);
716 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
717 #endif
718 static int iflib_register(if_ctx_t);
719 static void iflib_init_locked(if_ctx_t ctx);
720 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
721 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
722 static void iflib_ifmp_purge(iflib_txq_t txq);
723 static void _iflib_pre_assert(if_softc_ctx_t scctx);
724 static void iflib_if_init_locked(if_ctx_t ctx);
725 static void iflib_free_intr_mem(if_ctx_t ctx);
726 #ifndef __NO_STRICT_ALIGNMENT
727 static struct mbuf * iflib_fixup_rx(struct mbuf *m);
728 #endif
729 
730 NETDUMP_DEFINE(iflib);
731 
732 #ifdef DEV_NETMAP
733 #include <sys/selinfo.h>
734 #include <net/netmap.h>
735 #include <dev/netmap/netmap_kern.h>
736 
737 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
738 
739 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
740 
741 /*
742  * device-specific sysctl variables:
743  *
744  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
745  *	During regular operations the CRC is stripped, but on some
746  *	hardware reception of frames not multiple of 64 is slower,
747  *	so using crcstrip=0 helps in benchmarks.
748  *
749  * iflib_rx_miss, iflib_rx_miss_bufs:
750  *	count packets that might be missed due to lost interrupts.
751  */
752 SYSCTL_DECL(_dev_netmap);
753 /*
754  * The xl driver by default strips CRCs and we do not override it.
755  */
756 
757 int iflib_crcstrip = 1;
758 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
759     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
760 
761 int iflib_rx_miss, iflib_rx_miss_bufs;
762 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
763     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
764 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
765     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
766 
767 /*
768  * Register/unregister. We are already under netmap lock.
769  * Only called on the first register or the last unregister.
770  */
771 static int
772 iflib_netmap_register(struct netmap_adapter *na, int onoff)
773 {
774 	struct ifnet *ifp = na->ifp;
775 	if_ctx_t ctx = ifp->if_softc;
776 	int status;
777 
778 	CTX_LOCK(ctx);
779 	IFDI_INTR_DISABLE(ctx);
780 
781 	/* Tell the stack that the interface is no longer active */
782 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
783 
784 	if (!CTX_IS_VF(ctx))
785 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
786 
787 	/* enable or disable flags and callbacks in na and ifp */
788 	if (onoff) {
789 		nm_set_native_flags(na);
790 	} else {
791 		nm_clear_native_flags(na);
792 	}
793 	iflib_stop(ctx);
794 	iflib_init_locked(ctx);
795 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
796 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
797 	if (status)
798 		nm_clear_native_flags(na);
799 	CTX_UNLOCK(ctx);
800 	return (status);
801 }
802 
803 static int
804 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
805 {
806 	struct netmap_adapter *na = kring->na;
807 	u_int const lim = kring->nkr_num_slots - 1;
808 	u_int head = kring->rhead;
809 	struct netmap_ring *ring = kring->ring;
810 	bus_dmamap_t *map;
811 	struct if_rxd_update iru;
812 	if_ctx_t ctx = rxq->ifr_ctx;
813 	iflib_fl_t fl = &rxq->ifr_fl[0];
814 	uint32_t refill_pidx, nic_i;
815 #if IFLIB_DEBUG_COUNTERS
816 	int rf_count = 0;
817 #endif
818 
819 	if (nm_i == head && __predict_true(!init))
820 		return 0;
821 	iru_init(&iru, rxq, 0 /* flid */);
822 	map = fl->ifl_sds.ifsd_map;
823 	refill_pidx = netmap_idx_k2n(kring, nm_i);
824 	/*
825 	 * IMPORTANT: we must leave one free slot in the ring,
826 	 * so move head back by one unit
827 	 */
828 	head = nm_prev(head, lim);
829 	nic_i = UINT_MAX;
830 	DBG_COUNTER_INC(fl_refills);
831 	while (nm_i != head) {
832 #if IFLIB_DEBUG_COUNTERS
833 		if (++rf_count == 9)
834 			DBG_COUNTER_INC(fl_refills_large);
835 #endif
836 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
837 			struct netmap_slot *slot = &ring->slot[nm_i];
838 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
839 			uint32_t nic_i_dma = refill_pidx;
840 			nic_i = netmap_idx_k2n(kring, nm_i);
841 
842 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
843 
844 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
845 			        return netmap_ring_reinit(kring);
846 
847 			fl->ifl_vm_addrs[tmp_pidx] = addr;
848 			if (__predict_false(init)) {
849 				netmap_load_map(na, fl->ifl_buf_tag,
850 				    map[nic_i], addr);
851 			} else if (slot->flags & NS_BUF_CHANGED) {
852 				/* buffer has changed, reload map */
853 				netmap_reload_map(na, fl->ifl_buf_tag,
854 				    map[nic_i], addr);
855 			}
856 			slot->flags &= ~NS_BUF_CHANGED;
857 
858 			nm_i = nm_next(nm_i, lim);
859 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
860 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
861 				continue;
862 
863 			iru.iru_pidx = refill_pidx;
864 			iru.iru_count = tmp_pidx+1;
865 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
866 			refill_pidx = nic_i;
867 			for (int n = 0; n < iru.iru_count; n++) {
868 				bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma],
869 						BUS_DMASYNC_PREREAD);
870 				/* XXX - change this to not use the netmap func*/
871 				nic_i_dma = nm_next(nic_i_dma, lim);
872 			}
873 		}
874 	}
875 	kring->nr_hwcur = head;
876 
877 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
878 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
879 	if (__predict_true(nic_i != UINT_MAX)) {
880 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
881 		DBG_COUNTER_INC(rxd_flush);
882 	}
883 	return (0);
884 }
885 
886 /*
887  * Reconcile kernel and user view of the transmit ring.
888  *
889  * All information is in the kring.
890  * Userspace wants to send packets up to the one before kring->rhead,
891  * kernel knows kring->nr_hwcur is the first unsent packet.
892  *
893  * Here we push packets out (as many as possible), and possibly
894  * reclaim buffers from previously completed transmission.
895  *
896  * The caller (netmap) guarantees that there is only one instance
897  * running at any time. Any interference with other driver
898  * methods should be handled by the individual drivers.
899  */
900 static int
901 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
902 {
903 	struct netmap_adapter *na = kring->na;
904 	struct ifnet *ifp = na->ifp;
905 	struct netmap_ring *ring = kring->ring;
906 	u_int nm_i;	/* index into the netmap kring */
907 	u_int nic_i;	/* index into the NIC ring */
908 	u_int n;
909 	u_int const lim = kring->nkr_num_slots - 1;
910 	u_int const head = kring->rhead;
911 	struct if_pkt_info pi;
912 
913 	/*
914 	 * interrupts on every tx packet are expensive so request
915 	 * them every half ring, or where NS_REPORT is set
916 	 */
917 	u_int report_frequency = kring->nkr_num_slots >> 1;
918 	/* device-specific */
919 	if_ctx_t ctx = ifp->if_softc;
920 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
921 
922 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
923 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
924 
925 	/*
926 	 * First part: process new packets to send.
927 	 * nm_i is the current index in the netmap kring,
928 	 * nic_i is the corresponding index in the NIC ring.
929 	 *
930 	 * If we have packets to send (nm_i != head)
931 	 * iterate over the netmap ring, fetch length and update
932 	 * the corresponding slot in the NIC ring. Some drivers also
933 	 * need to update the buffer's physical address in the NIC slot
934 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
935 	 *
936 	 * The netmap_reload_map() calls is especially expensive,
937 	 * even when (as in this case) the tag is 0, so do only
938 	 * when the buffer has actually changed.
939 	 *
940 	 * If possible do not set the report/intr bit on all slots,
941 	 * but only a few times per ring or when NS_REPORT is set.
942 	 *
943 	 * Finally, on 10G and faster drivers, it might be useful
944 	 * to prefetch the next slot and txr entry.
945 	 */
946 
947 	nm_i = kring->nr_hwcur;
948 	if (nm_i != head) {	/* we have new packets to send */
949 		pkt_info_zero(&pi);
950 		pi.ipi_segs = txq->ift_segs;
951 		pi.ipi_qsidx = kring->ring_id;
952 		nic_i = netmap_idx_k2n(kring, nm_i);
953 
954 		__builtin_prefetch(&ring->slot[nm_i]);
955 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
956 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
957 
958 		for (n = 0; nm_i != head; n++) {
959 			struct netmap_slot *slot = &ring->slot[nm_i];
960 			u_int len = slot->len;
961 			uint64_t paddr;
962 			void *addr = PNMB(na, slot, &paddr);
963 			int flags = (slot->flags & NS_REPORT ||
964 				nic_i == 0 || nic_i == report_frequency) ?
965 				IPI_TX_INTR : 0;
966 
967 			/* device-specific */
968 			pi.ipi_len = len;
969 			pi.ipi_segs[0].ds_addr = paddr;
970 			pi.ipi_segs[0].ds_len = len;
971 			pi.ipi_nsegs = 1;
972 			pi.ipi_ndescs = 0;
973 			pi.ipi_pidx = nic_i;
974 			pi.ipi_flags = flags;
975 
976 			/* Fill the slot in the NIC ring. */
977 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
978 			DBG_COUNTER_INC(tx_encap);
979 
980 			/* prefetch for next round */
981 			__builtin_prefetch(&ring->slot[nm_i + 1]);
982 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
983 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
984 
985 			NM_CHECK_ADDR_LEN(na, addr, len);
986 
987 			if (slot->flags & NS_BUF_CHANGED) {
988 				/* buffer has changed, reload map */
989 				netmap_reload_map(na, txq->ift_buf_tag,
990 				    txq->ift_sds.ifsd_map[nic_i], addr);
991 			}
992 			/* make sure changes to the buffer are synced */
993 			bus_dmamap_sync(txq->ift_buf_tag,
994 			    txq->ift_sds.ifsd_map[nic_i],
995 			    BUS_DMASYNC_PREWRITE);
996 
997 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
998 			nm_i = nm_next(nm_i, lim);
999 			nic_i = nm_next(nic_i, lim);
1000 		}
1001 		kring->nr_hwcur = nm_i;
1002 
1003 		/* synchronize the NIC ring */
1004 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1005 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1006 
1007 		/* (re)start the tx unit up to slot nic_i (excluded) */
1008 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1009 	}
1010 
1011 	/*
1012 	 * Second part: reclaim buffers for completed transmissions.
1013 	 *
1014 	 * If there are unclaimed buffers, attempt to reclaim them.
1015 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
1016 	 * minimal delay, then trigger the tx handler which will spin in the
1017 	 * group task queue.
1018 	 */
1019 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1020 		if (iflib_tx_credits_update(ctx, txq)) {
1021 			/* some tx completed, increment avail */
1022 			nic_i = txq->ift_cidx_processed;
1023 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1024 		}
1025 	}
1026 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1027 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1028 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1029 			    iflib_timer, txq, txq->ift_timer.c_cpu);
1030 	}
1031 	return (0);
1032 }
1033 
1034 /*
1035  * Reconcile kernel and user view of the receive ring.
1036  * Same as for the txsync, this routine must be efficient.
1037  * The caller guarantees a single invocations, but races against
1038  * the rest of the driver should be handled here.
1039  *
1040  * On call, kring->rhead is the first packet that userspace wants
1041  * to keep, and kring->rcur is the wakeup point.
1042  * The kernel has previously reported packets up to kring->rtail.
1043  *
1044  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1045  * of whether or not we received an interrupt.
1046  */
1047 static int
1048 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1049 {
1050 	struct netmap_adapter *na = kring->na;
1051 	struct netmap_ring *ring = kring->ring;
1052 	iflib_fl_t fl;
1053 	uint32_t nm_i;	/* index into the netmap ring */
1054 	uint32_t nic_i;	/* index into the NIC ring */
1055 	u_int i, n;
1056 	u_int const lim = kring->nkr_num_slots - 1;
1057 	u_int const head = kring->rhead;
1058 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1059 	struct if_rxd_info ri;
1060 
1061 	struct ifnet *ifp = na->ifp;
1062 	if_ctx_t ctx = ifp->if_softc;
1063 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1064 	if (head > lim)
1065 		return netmap_ring_reinit(kring);
1066 
1067 	/*
1068 	 * XXX netmap_fl_refill() only ever (re)fills free list 0 so far.
1069 	 */
1070 
1071 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
1072 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1073 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1074 	}
1075 
1076 	/*
1077 	 * First part: import newly received packets.
1078 	 *
1079 	 * nm_i is the index of the next free slot in the netmap ring,
1080 	 * nic_i is the index of the next received packet in the NIC ring,
1081 	 * and they may differ in case if_init() has been called while
1082 	 * in netmap mode. For the receive ring we have
1083 	 *
1084 	 *	nic_i = rxr->next_check;
1085 	 *	nm_i = kring->nr_hwtail (previous)
1086 	 * and
1087 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1088 	 *
1089 	 * rxr->next_check is set to 0 on a ring reinit
1090 	 */
1091 	if (netmap_no_pendintr || force_update) {
1092 		int crclen = iflib_crcstrip ? 0 : 4;
1093 		int error, avail;
1094 
1095 		for (i = 0; i < rxq->ifr_nfl; i++) {
1096 			fl = &rxq->ifr_fl[i];
1097 			nic_i = fl->ifl_cidx;
1098 			nm_i = netmap_idx_n2k(kring, nic_i);
1099 			avail = ctx->isc_rxd_available(ctx->ifc_softc,
1100 			    rxq->ifr_id, nic_i, USHRT_MAX);
1101 			for (n = 0; avail > 0; n++, avail--) {
1102 				rxd_info_zero(&ri);
1103 				ri.iri_frags = rxq->ifr_frags;
1104 				ri.iri_qsidx = kring->ring_id;
1105 				ri.iri_ifp = ctx->ifc_ifp;
1106 				ri.iri_cidx = nic_i;
1107 
1108 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1109 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
1110 				ring->slot[nm_i].flags = 0;
1111 				bus_dmamap_sync(fl->ifl_buf_tag,
1112 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1113 				nm_i = nm_next(nm_i, lim);
1114 				nic_i = nm_next(nic_i, lim);
1115 			}
1116 			if (n) { /* update the state variables */
1117 				if (netmap_no_pendintr && !force_update) {
1118 					/* diagnostics */
1119 					iflib_rx_miss ++;
1120 					iflib_rx_miss_bufs += n;
1121 				}
1122 				fl->ifl_cidx = nic_i;
1123 				kring->nr_hwtail = nm_i;
1124 			}
1125 			kring->nr_kflags &= ~NKR_PENDINTR;
1126 		}
1127 	}
1128 	/*
1129 	 * Second part: skip past packets that userspace has released.
1130 	 * (kring->nr_hwcur to head excluded),
1131 	 * and make the buffers available for reception.
1132 	 * As usual nm_i is the index in the netmap ring,
1133 	 * nic_i is the index in the NIC ring, and
1134 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1135 	 */
1136 	/* XXX not sure how this will work with multiple free lists */
1137 	nm_i = kring->nr_hwcur;
1138 
1139 	return (netmap_fl_refill(rxq, kring, nm_i, false));
1140 }
1141 
1142 static void
1143 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1144 {
1145 	struct ifnet *ifp = na->ifp;
1146 	if_ctx_t ctx = ifp->if_softc;
1147 
1148 	CTX_LOCK(ctx);
1149 	if (onoff) {
1150 		IFDI_INTR_ENABLE(ctx);
1151 	} else {
1152 		IFDI_INTR_DISABLE(ctx);
1153 	}
1154 	CTX_UNLOCK(ctx);
1155 }
1156 
1157 
1158 static int
1159 iflib_netmap_attach(if_ctx_t ctx)
1160 {
1161 	struct netmap_adapter na;
1162 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1163 
1164 	bzero(&na, sizeof(na));
1165 
1166 	na.ifp = ctx->ifc_ifp;
1167 	na.na_flags = NAF_BDG_MAYSLEEP;
1168 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1169 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1170 
1171 	na.num_tx_desc = scctx->isc_ntxd[0];
1172 	na.num_rx_desc = scctx->isc_nrxd[0];
1173 	na.nm_txsync = iflib_netmap_txsync;
1174 	na.nm_rxsync = iflib_netmap_rxsync;
1175 	na.nm_register = iflib_netmap_register;
1176 	na.nm_intr = iflib_netmap_intr;
1177 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1178 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1179 	return (netmap_attach(&na));
1180 }
1181 
1182 static void
1183 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1184 {
1185 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1186 	struct netmap_slot *slot;
1187 
1188 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1189 	if (slot == NULL)
1190 		return;
1191 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1192 
1193 		/*
1194 		 * In netmap mode, set the map for the packet buffer.
1195 		 * NOTE: Some drivers (not this one) also need to set
1196 		 * the physical buffer address in the NIC ring.
1197 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1198 		 * netmap slot index, si
1199 		 */
1200 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1201 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1202 		    NMB(na, slot + si));
1203 	}
1204 }
1205 
1206 static void
1207 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1208 {
1209 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1210 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
1211 	struct netmap_slot *slot;
1212 	uint32_t nm_i;
1213 
1214 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1215 	if (slot == NULL)
1216 		return;
1217 	nm_i = netmap_idx_n2k(kring, 0);
1218 	netmap_fl_refill(rxq, kring, nm_i, true);
1219 }
1220 
1221 static void
1222 iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on)
1223 {
1224 	struct netmap_kring *kring;
1225 	uint16_t txqid;
1226 
1227 	txqid = txq->ift_id;
1228 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1229 
1230 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
1231 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1232 		    BUS_DMASYNC_POSTREAD);
1233 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1234 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1235 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1236 			if (hz < 2000)
1237 				*reset_on = 1;
1238 			else
1239 				*reset_on = hz / 1000;
1240 		}
1241 	}
1242 }
1243 
1244 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1245 
1246 #else
1247 #define iflib_netmap_txq_init(ctx, txq)
1248 #define iflib_netmap_rxq_init(ctx, rxq)
1249 #define iflib_netmap_detach(ifp)
1250 
1251 #define iflib_netmap_attach(ctx) (0)
1252 #define netmap_rx_irq(ifp, qid, budget) (0)
1253 #define netmap_tx_irq(ifp, qid) do {} while (0)
1254 #define iflib_netmap_timer_adjust(ctx, txq, reset_on)
1255 
1256 #endif
1257 
1258 #if defined(__i386__) || defined(__amd64__)
1259 static __inline void
1260 prefetch(void *x)
1261 {
1262 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1263 }
1264 static __inline void
1265 prefetch2cachelines(void *x)
1266 {
1267 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1268 #if (CACHE_LINE_SIZE < 128)
1269 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
1270 #endif
1271 }
1272 #else
1273 #define prefetch(x)
1274 #define prefetch2cachelines(x)
1275 #endif
1276 
1277 static void
1278 iflib_gen_mac(if_ctx_t ctx)
1279 {
1280 	struct thread *td;
1281 	MD5_CTX mdctx;
1282 	char uuid[HOSTUUIDLEN+1];
1283 	char buf[HOSTUUIDLEN+16];
1284 	uint8_t *mac;
1285 	unsigned char digest[16];
1286 
1287 	td = curthread;
1288 	mac = ctx->ifc_mac;
1289 	uuid[HOSTUUIDLEN] = 0;
1290 	bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
1291 	snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
1292 	/*
1293 	 * Generate a pseudo-random, deterministic MAC
1294 	 * address based on the UUID and unit number.
1295 	 * The FreeBSD Foundation OUI of 58-9C-FC is used.
1296 	 */
1297 	MD5Init(&mdctx);
1298 	MD5Update(&mdctx, buf, strlen(buf));
1299 	MD5Final(digest, &mdctx);
1300 
1301 	mac[0] = 0x58;
1302 	mac[1] = 0x9C;
1303 	mac[2] = 0xFC;
1304 	mac[3] = digest[0];
1305 	mac[4] = digest[1];
1306 	mac[5] = digest[2];
1307 }
1308 
1309 static void
1310 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1311 {
1312 	iflib_fl_t fl;
1313 
1314 	fl = &rxq->ifr_fl[flid];
1315 	iru->iru_paddrs = fl->ifl_bus_addrs;
1316 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
1317 	iru->iru_idxs = fl->ifl_rxd_idxs;
1318 	iru->iru_qsidx = rxq->ifr_id;
1319 	iru->iru_buf_size = fl->ifl_buf_size;
1320 	iru->iru_flidx = fl->ifl_id;
1321 }
1322 
1323 static void
1324 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1325 {
1326 	if (err)
1327 		return;
1328 	*(bus_addr_t *) arg = segs[0].ds_addr;
1329 }
1330 
1331 int
1332 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
1333 {
1334 	int err;
1335 	device_t dev = ctx->ifc_dev;
1336 
1337 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1338 				align, 0,		/* alignment, bounds */
1339 				BUS_SPACE_MAXADDR,	/* lowaddr */
1340 				BUS_SPACE_MAXADDR,	/* highaddr */
1341 				NULL, NULL,		/* filter, filterarg */
1342 				size,			/* maxsize */
1343 				1,			/* nsegments */
1344 				size,			/* maxsegsize */
1345 				BUS_DMA_ALLOCNOW,	/* flags */
1346 				NULL,			/* lockfunc */
1347 				NULL,			/* lockarg */
1348 				&dma->idi_tag);
1349 	if (err) {
1350 		device_printf(dev,
1351 		    "%s: bus_dma_tag_create failed: %d\n",
1352 		    __func__, err);
1353 		goto fail_0;
1354 	}
1355 
1356 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
1357 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1358 	if (err) {
1359 		device_printf(dev,
1360 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1361 		    __func__, (uintmax_t)size, err);
1362 		goto fail_1;
1363 	}
1364 
1365 	dma->idi_paddr = IF_BAD_DMA;
1366 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1367 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1368 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1369 		device_printf(dev,
1370 		    "%s: bus_dmamap_load failed: %d\n",
1371 		    __func__, err);
1372 		goto fail_2;
1373 	}
1374 
1375 	dma->idi_size = size;
1376 	return (0);
1377 
1378 fail_2:
1379 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1380 fail_1:
1381 	bus_dma_tag_destroy(dma->idi_tag);
1382 fail_0:
1383 	dma->idi_tag = NULL;
1384 
1385 	return (err);
1386 }
1387 
1388 int
1389 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1390 {
1391 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1392 
1393 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1394 
1395 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
1396 }
1397 
1398 int
1399 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1400 {
1401 	int i, err;
1402 	iflib_dma_info_t *dmaiter;
1403 
1404 	dmaiter = dmalist;
1405 	for (i = 0; i < count; i++, dmaiter++) {
1406 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1407 			break;
1408 	}
1409 	if (err)
1410 		iflib_dma_free_multi(dmalist, i);
1411 	return (err);
1412 }
1413 
1414 void
1415 iflib_dma_free(iflib_dma_info_t dma)
1416 {
1417 	if (dma->idi_tag == NULL)
1418 		return;
1419 	if (dma->idi_paddr != IF_BAD_DMA) {
1420 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1421 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1422 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1423 		dma->idi_paddr = IF_BAD_DMA;
1424 	}
1425 	if (dma->idi_vaddr != NULL) {
1426 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1427 		dma->idi_vaddr = NULL;
1428 	}
1429 	bus_dma_tag_destroy(dma->idi_tag);
1430 	dma->idi_tag = NULL;
1431 }
1432 
1433 void
1434 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1435 {
1436 	int i;
1437 	iflib_dma_info_t *dmaiter = dmalist;
1438 
1439 	for (i = 0; i < count; i++, dmaiter++)
1440 		iflib_dma_free(*dmaiter);
1441 }
1442 
1443 #ifdef EARLY_AP_STARTUP
1444 static const int iflib_started = 1;
1445 #else
1446 /*
1447  * We used to abuse the smp_started flag to decide if the queues have been
1448  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1449  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1450  * is set.  Run a SYSINIT() strictly after that to just set a usable
1451  * completion flag.
1452  */
1453 
1454 static int iflib_started;
1455 
1456 static void
1457 iflib_record_started(void *arg)
1458 {
1459 	iflib_started = 1;
1460 }
1461 
1462 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1463 	iflib_record_started, NULL);
1464 #endif
1465 
1466 static int
1467 iflib_fast_intr(void *arg)
1468 {
1469 	iflib_filter_info_t info = arg;
1470 	struct grouptask *gtask = info->ifi_task;
1471 	int result;
1472 
1473 	if (!iflib_started)
1474 		return (FILTER_STRAY);
1475 
1476 	DBG_COUNTER_INC(fast_intrs);
1477 	if (info->ifi_filter != NULL) {
1478 		result = info->ifi_filter(info->ifi_filter_arg);
1479 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1480 			return (result);
1481 	}
1482 
1483 	GROUPTASK_ENQUEUE(gtask);
1484 	return (FILTER_HANDLED);
1485 }
1486 
1487 static int
1488 iflib_fast_intr_rxtx(void *arg)
1489 {
1490 	iflib_filter_info_t info = arg;
1491 	struct grouptask *gtask = info->ifi_task;
1492 	if_ctx_t ctx;
1493 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1494 	iflib_txq_t txq;
1495 	void *sc;
1496 	int i, cidx, result;
1497 	qidx_t txqid;
1498 
1499 	if (!iflib_started)
1500 		return (FILTER_STRAY);
1501 
1502 	DBG_COUNTER_INC(fast_intrs);
1503 	if (info->ifi_filter != NULL) {
1504 		result = info->ifi_filter(info->ifi_filter_arg);
1505 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1506 			return (result);
1507 	}
1508 
1509 	ctx = rxq->ifr_ctx;
1510 	sc = ctx->ifc_softc;
1511 	MPASS(rxq->ifr_ntxqirq);
1512 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1513 		txqid = rxq->ifr_txqid[i];
1514 		txq = &ctx->ifc_txqs[txqid];
1515 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1516 		    BUS_DMASYNC_POSTREAD);
1517 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
1518 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1519 			continue;
1520 		}
1521 		GROUPTASK_ENQUEUE(&txq->ift_task);
1522 	}
1523 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1524 		cidx = rxq->ifr_cq_cidx;
1525 	else
1526 		cidx = rxq->ifr_fl[0].ifl_cidx;
1527 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1528 		GROUPTASK_ENQUEUE(gtask);
1529 	else {
1530 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1531 		DBG_COUNTER_INC(rx_intr_enables);
1532 	}
1533 	return (FILTER_HANDLED);
1534 }
1535 
1536 
1537 static int
1538 iflib_fast_intr_ctx(void *arg)
1539 {
1540 	iflib_filter_info_t info = arg;
1541 	struct grouptask *gtask = info->ifi_task;
1542 	int result;
1543 
1544 	if (!iflib_started)
1545 		return (FILTER_STRAY);
1546 
1547 	DBG_COUNTER_INC(fast_intrs);
1548 	if (info->ifi_filter != NULL) {
1549 		result = info->ifi_filter(info->ifi_filter_arg);
1550 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1551 			return (result);
1552 	}
1553 
1554 	GROUPTASK_ENQUEUE(gtask);
1555 	return (FILTER_HANDLED);
1556 }
1557 
1558 static int
1559 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1560 		 driver_filter_t filter, driver_intr_t handler, void *arg,
1561 		 const char *name)
1562 {
1563 	int rc, flags;
1564 	struct resource *res;
1565 	void *tag = NULL;
1566 	device_t dev = ctx->ifc_dev;
1567 
1568 	flags = RF_ACTIVE;
1569 	if (ctx->ifc_flags & IFC_LEGACY)
1570 		flags |= RF_SHAREABLE;
1571 	MPASS(rid < 512);
1572 	irq->ii_rid = rid;
1573 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
1574 	if (res == NULL) {
1575 		device_printf(dev,
1576 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1577 		return (ENOMEM);
1578 	}
1579 	irq->ii_res = res;
1580 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1581 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1582 						filter, handler, arg, &tag);
1583 	if (rc != 0) {
1584 		device_printf(dev,
1585 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1586 					  rid, name ? name : "unknown", rc);
1587 		return (rc);
1588 	} else if (name)
1589 		bus_describe_intr(dev, res, tag, "%s", name);
1590 
1591 	irq->ii_tag = tag;
1592 	return (0);
1593 }
1594 
1595 
1596 /*********************************************************************
1597  *
1598  *  Allocate DMA resources for TX buffers as well as memory for the TX
1599  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1600  *  iflib_sw_tx_desc_array structure, storing all the information that
1601  *  is needed to transmit a packet on the wire.  This is called only
1602  *  once at attach, setup is done every reset.
1603  *
1604  **********************************************************************/
1605 static int
1606 iflib_txsd_alloc(iflib_txq_t txq)
1607 {
1608 	if_ctx_t ctx = txq->ift_ctx;
1609 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1610 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1611 	device_t dev = ctx->ifc_dev;
1612 	bus_size_t tsomaxsize;
1613 	int err, nsegments, ntsosegments;
1614 	bool tso;
1615 
1616 	nsegments = scctx->isc_tx_nsegments;
1617 	ntsosegments = scctx->isc_tx_tso_segments_max;
1618 	tsomaxsize = scctx->isc_tx_tso_size_max;
1619 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1620 		tsomaxsize += sizeof(struct ether_vlan_header);
1621 	MPASS(scctx->isc_ntxd[0] > 0);
1622 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1623 	MPASS(nsegments > 0);
1624 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1625 		MPASS(ntsosegments > 0);
1626 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1627 	}
1628 
1629 	/*
1630 	 * Set up DMA tags for TX buffers.
1631 	 */
1632 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1633 			       1, 0,			/* alignment, bounds */
1634 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1635 			       BUS_SPACE_MAXADDR,	/* highaddr */
1636 			       NULL, NULL,		/* filter, filterarg */
1637 			       sctx->isc_tx_maxsize,		/* maxsize */
1638 			       nsegments,	/* nsegments */
1639 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
1640 			       0,			/* flags */
1641 			       NULL,			/* lockfunc */
1642 			       NULL,			/* lockfuncarg */
1643 			       &txq->ift_buf_tag))) {
1644 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
1645 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1646 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1647 		goto fail;
1648 	}
1649 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
1650 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1651 			       1, 0,			/* alignment, bounds */
1652 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1653 			       BUS_SPACE_MAXADDR,	/* highaddr */
1654 			       NULL, NULL,		/* filter, filterarg */
1655 			       tsomaxsize,		/* maxsize */
1656 			       ntsosegments,	/* nsegments */
1657 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
1658 			       0,			/* flags */
1659 			       NULL,			/* lockfunc */
1660 			       NULL,			/* lockfuncarg */
1661 			       &txq->ift_tso_buf_tag))) {
1662 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1663 		    err);
1664 		goto fail;
1665 	}
1666 
1667 	/* Allocate memory for the TX mbuf map. */
1668 	if (!(txq->ift_sds.ifsd_m =
1669 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1670 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1671 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
1672 		err = ENOMEM;
1673 		goto fail;
1674 	}
1675 
1676 	/*
1677 	 * Create the DMA maps for TX buffers.
1678 	 */
1679 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
1680 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1681 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1682 		device_printf(dev,
1683 		    "Unable to allocate TX buffer DMA map memory\n");
1684 		err = ENOMEM;
1685 		goto fail;
1686 	}
1687 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
1688 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1689 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1690 		device_printf(dev,
1691 		    "Unable to allocate TSO TX buffer map memory\n");
1692 		err = ENOMEM;
1693 		goto fail;
1694 	}
1695 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1696 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
1697 		    &txq->ift_sds.ifsd_map[i]);
1698 		if (err != 0) {
1699 			device_printf(dev, "Unable to create TX DMA map\n");
1700 			goto fail;
1701 		}
1702 		if (!tso)
1703 			continue;
1704 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
1705 		    &txq->ift_sds.ifsd_tso_map[i]);
1706 		if (err != 0) {
1707 			device_printf(dev, "Unable to create TSO TX DMA map\n");
1708 			goto fail;
1709 		}
1710 	}
1711 	return (0);
1712 fail:
1713 	/* We free all, it handles case where we are in the middle */
1714 	iflib_tx_structures_free(ctx);
1715 	return (err);
1716 }
1717 
1718 static void
1719 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1720 {
1721 	bus_dmamap_t map;
1722 
1723 	map = NULL;
1724 	if (txq->ift_sds.ifsd_map != NULL)
1725 		map = txq->ift_sds.ifsd_map[i];
1726 	if (map != NULL) {
1727 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1728 		bus_dmamap_unload(txq->ift_buf_tag, map);
1729 		bus_dmamap_destroy(txq->ift_buf_tag, map);
1730 		txq->ift_sds.ifsd_map[i] = NULL;
1731 	}
1732 
1733 	map = NULL;
1734 	if (txq->ift_sds.ifsd_tso_map != NULL)
1735 		map = txq->ift_sds.ifsd_tso_map[i];
1736 	if (map != NULL) {
1737 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
1738 		    BUS_DMASYNC_POSTWRITE);
1739 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1740 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
1741 		txq->ift_sds.ifsd_tso_map[i] = NULL;
1742 	}
1743 }
1744 
1745 static void
1746 iflib_txq_destroy(iflib_txq_t txq)
1747 {
1748 	if_ctx_t ctx = txq->ift_ctx;
1749 
1750 	for (int i = 0; i < txq->ift_size; i++)
1751 		iflib_txsd_destroy(ctx, txq, i);
1752 	if (txq->ift_sds.ifsd_map != NULL) {
1753 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1754 		txq->ift_sds.ifsd_map = NULL;
1755 	}
1756 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1757 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
1758 		txq->ift_sds.ifsd_tso_map = NULL;
1759 	}
1760 	if (txq->ift_sds.ifsd_m != NULL) {
1761 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1762 		txq->ift_sds.ifsd_m = NULL;
1763 	}
1764 	if (txq->ift_buf_tag != NULL) {
1765 		bus_dma_tag_destroy(txq->ift_buf_tag);
1766 		txq->ift_buf_tag = NULL;
1767 	}
1768 	if (txq->ift_tso_buf_tag != NULL) {
1769 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1770 		txq->ift_tso_buf_tag = NULL;
1771 	}
1772 }
1773 
1774 static void
1775 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1776 {
1777 	struct mbuf **mp;
1778 
1779 	mp = &txq->ift_sds.ifsd_m[i];
1780 	if (*mp == NULL)
1781 		return;
1782 
1783 	if (txq->ift_sds.ifsd_map != NULL) {
1784 		bus_dmamap_sync(txq->ift_buf_tag,
1785 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1786 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
1787 	}
1788 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1789 		bus_dmamap_sync(txq->ift_tso_buf_tag,
1790 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1791 		bus_dmamap_unload(txq->ift_tso_buf_tag,
1792 		    txq->ift_sds.ifsd_tso_map[i]);
1793 	}
1794 	m_free(*mp);
1795 	DBG_COUNTER_INC(tx_frees);
1796 	*mp = NULL;
1797 }
1798 
1799 static int
1800 iflib_txq_setup(iflib_txq_t txq)
1801 {
1802 	if_ctx_t ctx = txq->ift_ctx;
1803 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1804 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1805 	iflib_dma_info_t di;
1806 	int i;
1807 
1808 	/* Set number of descriptors available */
1809 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1810 	/* XXX make configurable */
1811 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1812 
1813 	/* Reset indices */
1814 	txq->ift_cidx_processed = 0;
1815 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1816 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1817 
1818 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1819 		bzero((void *)di->idi_vaddr, di->idi_size);
1820 
1821 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1822 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1823 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1824 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1825 	return (0);
1826 }
1827 
1828 /*********************************************************************
1829  *
1830  *  Allocate DMA resources for RX buffers as well as memory for the RX
1831  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1832  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1833  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1834  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1835  *  packet, the maximum number of entries we'll need is equal to the
1836  *  number of hardware receive descriptors that we've allocated.
1837  *
1838  **********************************************************************/
1839 static int
1840 iflib_rxsd_alloc(iflib_rxq_t rxq)
1841 {
1842 	if_ctx_t ctx = rxq->ifr_ctx;
1843 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1844 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1845 	device_t dev = ctx->ifc_dev;
1846 	iflib_fl_t fl;
1847 	int			err;
1848 
1849 	MPASS(scctx->isc_nrxd[0] > 0);
1850 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1851 
1852 	fl = rxq->ifr_fl;
1853 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
1854 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1855 		/* Set up DMA tag for RX buffers. */
1856 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1857 					 1, 0,			/* alignment, bounds */
1858 					 BUS_SPACE_MAXADDR,	/* lowaddr */
1859 					 BUS_SPACE_MAXADDR,	/* highaddr */
1860 					 NULL, NULL,		/* filter, filterarg */
1861 					 sctx->isc_rx_maxsize,	/* maxsize */
1862 					 sctx->isc_rx_nsegments,	/* nsegments */
1863 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
1864 					 0,			/* flags */
1865 					 NULL,			/* lockfunc */
1866 					 NULL,			/* lockarg */
1867 					 &fl->ifl_buf_tag);
1868 		if (err) {
1869 			device_printf(dev,
1870 			    "Unable to allocate RX DMA tag: %d\n", err);
1871 			goto fail;
1872 		}
1873 
1874 		/* Allocate memory for the RX mbuf map. */
1875 		if (!(fl->ifl_sds.ifsd_m =
1876 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1877 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1878 			device_printf(dev,
1879 			    "Unable to allocate RX mbuf map memory\n");
1880 			err = ENOMEM;
1881 			goto fail;
1882 		}
1883 
1884 		/* Allocate memory for the direct RX cluster pointer map. */
1885 		if (!(fl->ifl_sds.ifsd_cl =
1886 		      (caddr_t *) malloc(sizeof(caddr_t) *
1887 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1888 			device_printf(dev,
1889 			    "Unable to allocate RX cluster map memory\n");
1890 			err = ENOMEM;
1891 			goto fail;
1892 		}
1893 
1894 		/* Allocate memory for the RX cluster bus address map. */
1895 		if (!(fl->ifl_sds.ifsd_ba =
1896 		      (bus_addr_t *) malloc(sizeof(bus_addr_t) *
1897 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1898 			device_printf(dev,
1899 			    "Unable to allocate RX bus address map memory\n");
1900 			err = ENOMEM;
1901 			goto fail;
1902 		}
1903 
1904 		/*
1905 		 * Create the DMA maps for RX buffers.
1906 		 */
1907 		if (!(fl->ifl_sds.ifsd_map =
1908 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1909 			device_printf(dev,
1910 			    "Unable to allocate RX buffer DMA map memory\n");
1911 			err = ENOMEM;
1912 			goto fail;
1913 		}
1914 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1915 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
1916 			    &fl->ifl_sds.ifsd_map[i]);
1917 			if (err != 0) {
1918 				device_printf(dev, "Unable to create RX buffer DMA map\n");
1919 				goto fail;
1920 			}
1921 		}
1922 	}
1923 	return (0);
1924 
1925 fail:
1926 	iflib_rx_structures_free(ctx);
1927 	return (err);
1928 }
1929 
1930 
1931 /*
1932  * Internal service routines
1933  */
1934 
1935 struct rxq_refill_cb_arg {
1936 	int               error;
1937 	bus_dma_segment_t seg;
1938 	int               nseg;
1939 };
1940 
1941 static void
1942 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1943 {
1944 	struct rxq_refill_cb_arg *cb_arg = arg;
1945 
1946 	cb_arg->error = error;
1947 	cb_arg->seg = segs[0];
1948 	cb_arg->nseg = nseg;
1949 }
1950 
1951 /**
1952  *	rxq_refill - refill an rxq  free-buffer list
1953  *	@ctx: the iflib context
1954  *	@rxq: the free-list to refill
1955  *	@n: the number of new buffers to allocate
1956  *
1957  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
1958  *	The caller must assure that @n does not exceed the queue's capacity.
1959  */
1960 static void
1961 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
1962 {
1963 	struct if_rxd_update iru;
1964 	struct rxq_refill_cb_arg cb_arg;
1965 	struct mbuf *m;
1966 	caddr_t cl, *sd_cl;
1967 	struct mbuf **sd_m;
1968 	bus_dmamap_t *sd_map;
1969 	bus_addr_t bus_addr, *sd_ba;
1970 	int err, frag_idx, i, idx, n, pidx;
1971 	qidx_t credits;
1972 
1973 	sd_m = fl->ifl_sds.ifsd_m;
1974 	sd_map = fl->ifl_sds.ifsd_map;
1975 	sd_cl = fl->ifl_sds.ifsd_cl;
1976 	sd_ba = fl->ifl_sds.ifsd_ba;
1977 	pidx = fl->ifl_pidx;
1978 	idx = pidx;
1979 	frag_idx = fl->ifl_fragidx;
1980 	credits = fl->ifl_credits;
1981 
1982 	i = 0;
1983 	n = count;
1984 	MPASS(n > 0);
1985 	MPASS(credits + n <= fl->ifl_size);
1986 
1987 	if (pidx < fl->ifl_cidx)
1988 		MPASS(pidx + n <= fl->ifl_cidx);
1989 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
1990 		MPASS(fl->ifl_gen == 0);
1991 	if (pidx > fl->ifl_cidx)
1992 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
1993 
1994 	DBG_COUNTER_INC(fl_refills);
1995 	if (n > 8)
1996 		DBG_COUNTER_INC(fl_refills_large);
1997 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
1998 	while (n--) {
1999 		/*
2000 		 * We allocate an uninitialized mbuf + cluster, mbuf is
2001 		 * initialized after rx.
2002 		 *
2003 		 * If the cluster is still set then we know a minimum sized packet was received
2004 		 */
2005 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
2006 		    &frag_idx);
2007 		if (frag_idx < 0)
2008 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
2009 		MPASS(frag_idx >= 0);
2010 		if ((cl = sd_cl[frag_idx]) == NULL) {
2011 			if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
2012 				break;
2013 
2014 			cb_arg.error = 0;
2015 			MPASS(sd_map != NULL);
2016 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
2017 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
2018 			    BUS_DMA_NOWAIT);
2019 			if (err != 0 || cb_arg.error) {
2020 				/*
2021 				 * !zone_pack ?
2022 				 */
2023 				if (fl->ifl_zone == zone_pack)
2024 					uma_zfree(fl->ifl_zone, cl);
2025 				break;
2026 			}
2027 
2028 			sd_ba[frag_idx] =  bus_addr = cb_arg.seg.ds_addr;
2029 			sd_cl[frag_idx] = cl;
2030 #if MEMORY_LOGGING
2031 			fl->ifl_cl_enqueued++;
2032 #endif
2033 		} else {
2034 			bus_addr = sd_ba[frag_idx];
2035 		}
2036 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
2037 		    BUS_DMASYNC_PREREAD);
2038 
2039 		MPASS(sd_m[frag_idx] == NULL);
2040 		if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
2041 			break;
2042 		}
2043 		sd_m[frag_idx] = m;
2044 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2045 #if MEMORY_LOGGING
2046 		fl->ifl_m_enqueued++;
2047 #endif
2048 
2049 		DBG_COUNTER_INC(rx_allocs);
2050 		fl->ifl_rxd_idxs[i] = frag_idx;
2051 		fl->ifl_bus_addrs[i] = bus_addr;
2052 		fl->ifl_vm_addrs[i] = cl;
2053 		credits++;
2054 		i++;
2055 		MPASS(credits <= fl->ifl_size);
2056 		if (++idx == fl->ifl_size) {
2057 			fl->ifl_gen = 1;
2058 			idx = 0;
2059 		}
2060 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2061 			iru.iru_pidx = pidx;
2062 			iru.iru_count = i;
2063 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2064 			i = 0;
2065 			pidx = idx;
2066 			fl->ifl_pidx = idx;
2067 			fl->ifl_credits = credits;
2068 		}
2069 	}
2070 
2071 	if (i) {
2072 		iru.iru_pidx = pidx;
2073 		iru.iru_count = i;
2074 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2075 		fl->ifl_pidx = idx;
2076 		fl->ifl_credits = credits;
2077 	}
2078 	DBG_COUNTER_INC(rxd_flush);
2079 	if (fl->ifl_pidx == 0)
2080 		pidx = fl->ifl_size - 1;
2081 	else
2082 		pidx = fl->ifl_pidx - 1;
2083 
2084 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2085 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2086 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
2087 	fl->ifl_fragidx = frag_idx;
2088 }
2089 
2090 static __inline void
2091 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
2092 {
2093 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
2094 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2095 #ifdef INVARIANTS
2096 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2097 #endif
2098 
2099 	MPASS(fl->ifl_credits <= fl->ifl_size);
2100 	MPASS(reclaimable == delta);
2101 
2102 	if (reclaimable > 0)
2103 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
2104 }
2105 
2106 uint8_t
2107 iflib_in_detach(if_ctx_t ctx)
2108 {
2109 	bool in_detach;
2110 	STATE_LOCK(ctx);
2111 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
2112 	STATE_UNLOCK(ctx);
2113 	return (in_detach);
2114 }
2115 
2116 static void
2117 iflib_fl_bufs_free(iflib_fl_t fl)
2118 {
2119 	iflib_dma_info_t idi = fl->ifl_ifdi;
2120 	bus_dmamap_t sd_map;
2121 	uint32_t i;
2122 
2123 	for (i = 0; i < fl->ifl_size; i++) {
2124 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2125 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2126 
2127 		if (*sd_cl != NULL) {
2128 			sd_map = fl->ifl_sds.ifsd_map[i];
2129 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
2130 			    BUS_DMASYNC_POSTREAD);
2131 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2132 			if (*sd_cl != NULL)
2133 				uma_zfree(fl->ifl_zone, *sd_cl);
2134 			// XXX: Should this get moved out?
2135 			if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
2136 				bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
2137 			if (*sd_m != NULL) {
2138 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2139 				uma_zfree(zone_mbuf, *sd_m);
2140 			}
2141 		} else {
2142 			MPASS(*sd_cl == NULL);
2143 			MPASS(*sd_m == NULL);
2144 		}
2145 #if MEMORY_LOGGING
2146 		fl->ifl_m_dequeued++;
2147 		fl->ifl_cl_dequeued++;
2148 #endif
2149 		*sd_cl = NULL;
2150 		*sd_m = NULL;
2151 	}
2152 #ifdef INVARIANTS
2153 	for (i = 0; i < fl->ifl_size; i++) {
2154 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2155 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2156 	}
2157 #endif
2158 	/*
2159 	 * Reset free list values
2160 	 */
2161 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2162 	bzero(idi->idi_vaddr, idi->idi_size);
2163 }
2164 
2165 /*********************************************************************
2166  *
2167  *  Initialize a receive ring and its buffers.
2168  *
2169  **********************************************************************/
2170 static int
2171 iflib_fl_setup(iflib_fl_t fl)
2172 {
2173 	iflib_rxq_t rxq = fl->ifl_rxq;
2174 	if_ctx_t ctx = rxq->ifr_ctx;
2175 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2176 
2177 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2178 	/*
2179 	** Free current RX buffer structs and their mbufs
2180 	*/
2181 	iflib_fl_bufs_free(fl);
2182 	/* Now replenish the mbufs */
2183 	MPASS(fl->ifl_credits == 0);
2184 	/*
2185 	 * XXX don't set the max_frame_size to larger
2186 	 * than the hardware can handle
2187 	 */
2188 	if (sctx->isc_max_frame_size <= 2048)
2189 		fl->ifl_buf_size = MCLBYTES;
2190 	else
2191 		fl->ifl_buf_size = MJUMPAGESIZE;
2192 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2193 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2194 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2195 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2196 
2197 
2198 	/* avoid pre-allocating zillions of clusters to an idle card
2199 	 * potentially speeding up attach
2200 	 */
2201 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
2202 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
2203 	if (min(128, fl->ifl_size) != fl->ifl_credits)
2204 		return (ENOBUFS);
2205 	/*
2206 	 * handle failure
2207 	 */
2208 	MPASS(rxq != NULL);
2209 	MPASS(fl->ifl_ifdi != NULL);
2210 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2211 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2212 	return (0);
2213 }
2214 
2215 /*********************************************************************
2216  *
2217  *  Free receive ring data structures
2218  *
2219  **********************************************************************/
2220 static void
2221 iflib_rx_sds_free(iflib_rxq_t rxq)
2222 {
2223 	iflib_fl_t fl;
2224 	int i, j;
2225 
2226 	if (rxq->ifr_fl != NULL) {
2227 		for (i = 0; i < rxq->ifr_nfl; i++) {
2228 			fl = &rxq->ifr_fl[i];
2229 			if (fl->ifl_buf_tag != NULL) {
2230 				if (fl->ifl_sds.ifsd_map != NULL) {
2231 					for (j = 0; j < fl->ifl_size; j++) {
2232 						if (fl->ifl_sds.ifsd_map[j] ==
2233 						    NULL)
2234 							continue;
2235 						bus_dmamap_sync(
2236 						    fl->ifl_buf_tag,
2237 						    fl->ifl_sds.ifsd_map[j],
2238 						    BUS_DMASYNC_POSTREAD);
2239 						bus_dmamap_unload(
2240 						    fl->ifl_buf_tag,
2241 						    fl->ifl_sds.ifsd_map[j]);
2242 					}
2243 				}
2244 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2245 				fl->ifl_buf_tag = NULL;
2246 			}
2247 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2248 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2249 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2250 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2251 			fl->ifl_sds.ifsd_m = NULL;
2252 			fl->ifl_sds.ifsd_cl = NULL;
2253 			fl->ifl_sds.ifsd_ba = NULL;
2254 			fl->ifl_sds.ifsd_map = NULL;
2255 		}
2256 		free(rxq->ifr_fl, M_IFLIB);
2257 		rxq->ifr_fl = NULL;
2258 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
2259 	}
2260 }
2261 
2262 /*
2263  * MI independent logic
2264  *
2265  */
2266 static void
2267 iflib_timer(void *arg)
2268 {
2269 	iflib_txq_t txq = arg;
2270 	if_ctx_t ctx = txq->ift_ctx;
2271 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2272 	uint64_t this_tick = ticks;
2273 	uint32_t reset_on = hz / 2;
2274 
2275 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2276 		return;
2277 	/*
2278 	** Check on the state of the TX queue(s), this
2279 	** can be done without the lock because its RO
2280 	** and the HUNG state will be static if set.
2281 	*/
2282 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2283 		txq->ift_last_timer_tick = this_tick;
2284 		IFDI_TIMER(ctx, txq->ift_id);
2285 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2286 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2287 		     (sctx->isc_pause_frames == 0)))
2288 			goto hung;
2289 
2290 		if (ifmp_ring_is_stalled(txq->ift_br))
2291 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2292 		txq->ift_cleaned_prev = txq->ift_cleaned;
2293 	}
2294 #ifdef DEV_NETMAP
2295 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
2296 		iflib_netmap_timer_adjust(ctx, txq, &reset_on);
2297 #endif
2298 	/* handle any laggards */
2299 	if (txq->ift_db_pending)
2300 		GROUPTASK_ENQUEUE(&txq->ift_task);
2301 
2302 	sctx->isc_pause_frames = 0;
2303 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2304 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2305 	return;
2306  hung:
2307 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2308 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2309 	STATE_LOCK(ctx);
2310 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2311 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2312 	iflib_admin_intr_deferred(ctx);
2313 	STATE_UNLOCK(ctx);
2314 }
2315 
2316 static void
2317 iflib_init_locked(if_ctx_t ctx)
2318 {
2319 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2320 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2321 	if_t ifp = ctx->ifc_ifp;
2322 	iflib_fl_t fl;
2323 	iflib_txq_t txq;
2324 	iflib_rxq_t rxq;
2325 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2326 
2327 
2328 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2329 	IFDI_INTR_DISABLE(ctx);
2330 
2331 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2332 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2333 	/* Set hardware offload abilities */
2334 	if_clearhwassist(ifp);
2335 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2336 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2337 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2338 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2339 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2340 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2341 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2342 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2343 
2344 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2345 		CALLOUT_LOCK(txq);
2346 		callout_stop(&txq->ift_timer);
2347 		CALLOUT_UNLOCK(txq);
2348 		iflib_netmap_txq_init(ctx, txq);
2349 	}
2350 #ifdef INVARIANTS
2351 	i = if_getdrvflags(ifp);
2352 #endif
2353 	IFDI_INIT(ctx);
2354 	MPASS(if_getdrvflags(ifp) == i);
2355 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
2356 		/* XXX this should really be done on a per-queue basis */
2357 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2358 			MPASS(rxq->ifr_id == i);
2359 			iflib_netmap_rxq_init(ctx, rxq);
2360 			continue;
2361 		}
2362 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2363 			if (iflib_fl_setup(fl)) {
2364 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
2365 				goto done;
2366 			}
2367 		}
2368 	}
2369 done:
2370 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2371 	IFDI_INTR_ENABLE(ctx);
2372 	txq = ctx->ifc_txqs;
2373 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2374 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2375 			txq->ift_timer.c_cpu);
2376 }
2377 
2378 static int
2379 iflib_media_change(if_t ifp)
2380 {
2381 	if_ctx_t ctx = if_getsoftc(ifp);
2382 	int err;
2383 
2384 	CTX_LOCK(ctx);
2385 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2386 		iflib_init_locked(ctx);
2387 	CTX_UNLOCK(ctx);
2388 	return (err);
2389 }
2390 
2391 static void
2392 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2393 {
2394 	if_ctx_t ctx = if_getsoftc(ifp);
2395 
2396 	CTX_LOCK(ctx);
2397 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2398 	IFDI_MEDIA_STATUS(ctx, ifmr);
2399 	CTX_UNLOCK(ctx);
2400 }
2401 
2402 void
2403 iflib_stop(if_ctx_t ctx)
2404 {
2405 	iflib_txq_t txq = ctx->ifc_txqs;
2406 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2407 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2408 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2409 	iflib_dma_info_t di;
2410 	iflib_fl_t fl;
2411 	int i, j;
2412 
2413 	/* Tell the stack that the interface is no longer active */
2414 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2415 
2416 	IFDI_INTR_DISABLE(ctx);
2417 	DELAY(1000);
2418 	IFDI_STOP(ctx);
2419 	DELAY(1000);
2420 
2421 	iflib_debug_reset();
2422 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2423 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2424 		/* make sure all transmitters have completed before proceeding XXX */
2425 
2426 		CALLOUT_LOCK(txq);
2427 		callout_stop(&txq->ift_timer);
2428 		CALLOUT_UNLOCK(txq);
2429 
2430 		/* clean any enqueued buffers */
2431 		iflib_ifmp_purge(txq);
2432 		/* Free any existing tx buffers. */
2433 		for (j = 0; j < txq->ift_size; j++) {
2434 			iflib_txsd_free(ctx, txq, j);
2435 		}
2436 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2437 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
2438 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2439 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2440 		txq->ift_pullups = 0;
2441 		ifmp_ring_reset_stats(txq->ift_br);
2442 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
2443 			bzero((void *)di->idi_vaddr, di->idi_size);
2444 	}
2445 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2446 		/* make sure all transmitters have completed before proceeding XXX */
2447 
2448 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
2449 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
2450 			bzero((void *)di->idi_vaddr, di->idi_size);
2451 		/* also resets the free lists pidx/cidx */
2452 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2453 			iflib_fl_bufs_free(fl);
2454 	}
2455 }
2456 
2457 static inline caddr_t
2458 calc_next_rxd(iflib_fl_t fl, int cidx)
2459 {
2460 	qidx_t size;
2461 	int nrxd;
2462 	caddr_t start, end, cur, next;
2463 
2464 	nrxd = fl->ifl_size;
2465 	size = fl->ifl_rxd_size;
2466 	start = fl->ifl_ifdi->idi_vaddr;
2467 
2468 	if (__predict_false(size == 0))
2469 		return (start);
2470 	cur = start + size*cidx;
2471 	end = start + size*nrxd;
2472 	next = CACHE_PTR_NEXT(cur);
2473 	return (next < end ? next : start);
2474 }
2475 
2476 static inline void
2477 prefetch_pkts(iflib_fl_t fl, int cidx)
2478 {
2479 	int nextptr;
2480 	int nrxd = fl->ifl_size;
2481 	caddr_t next_rxd;
2482 
2483 
2484 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2485 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2486 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2487 	next_rxd = calc_next_rxd(fl, cidx);
2488 	prefetch(next_rxd);
2489 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2490 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2491 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2492 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2493 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2494 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2495 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2496 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2497 }
2498 
2499 static void
2500 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
2501 {
2502 	int flid, cidx;
2503 	bus_dmamap_t map;
2504 	iflib_fl_t fl;
2505 	int next;
2506 
2507 	map = NULL;
2508 	flid = irf->irf_flid;
2509 	cidx = irf->irf_idx;
2510 	fl = &rxq->ifr_fl[flid];
2511 	sd->ifsd_fl = fl;
2512 	sd->ifsd_cidx = cidx;
2513 	sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
2514 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2515 	fl->ifl_credits--;
2516 #if MEMORY_LOGGING
2517 	fl->ifl_m_dequeued++;
2518 #endif
2519 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2520 		prefetch_pkts(fl, cidx);
2521 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2522 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2523 	map = fl->ifl_sds.ifsd_map[cidx];
2524 	next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2525 
2526 	/* not valid assert if bxe really does SGE from non-contiguous elements */
2527 	MPASS(fl->ifl_cidx == cidx);
2528 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
2529 	if (unload)
2530 		bus_dmamap_unload(fl->ifl_buf_tag, map);
2531 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
2532 	if (__predict_false(fl->ifl_cidx == 0))
2533 		fl->ifl_gen = 0;
2534 	bit_clear(fl->ifl_rx_bitmap, cidx);
2535 }
2536 
2537 static struct mbuf *
2538 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
2539 {
2540 	int i, padlen , flags;
2541 	struct mbuf *m, *mh, *mt;
2542 	caddr_t cl;
2543 
2544 	i = 0;
2545 	mh = NULL;
2546 	do {
2547 		rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
2548 
2549 		MPASS(*sd->ifsd_cl != NULL);
2550 		MPASS(*sd->ifsd_m != NULL);
2551 
2552 		/* Don't include zero-length frags */
2553 		if (ri->iri_frags[i].irf_len == 0) {
2554 			/* XXX we can save the cluster here, but not the mbuf */
2555 			m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
2556 			m_free(*sd->ifsd_m);
2557 			*sd->ifsd_m = NULL;
2558 			continue;
2559 		}
2560 		m = *sd->ifsd_m;
2561 		*sd->ifsd_m = NULL;
2562 		if (mh == NULL) {
2563 			flags = M_PKTHDR|M_EXT;
2564 			mh = mt = m;
2565 			padlen = ri->iri_pad;
2566 		} else {
2567 			flags = M_EXT;
2568 			mt->m_next = m;
2569 			mt = m;
2570 			/* assuming padding is only on the first fragment */
2571 			padlen = 0;
2572 		}
2573 		cl = *sd->ifsd_cl;
2574 		*sd->ifsd_cl = NULL;
2575 
2576 		/* Can these two be made one ? */
2577 		m_init(m, M_NOWAIT, MT_DATA, flags);
2578 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2579 		/*
2580 		 * These must follow m_init and m_cljset
2581 		 */
2582 		m->m_data += padlen;
2583 		ri->iri_len -= padlen;
2584 		m->m_len = ri->iri_frags[i].irf_len;
2585 	} while (++i < ri->iri_nfrags);
2586 
2587 	return (mh);
2588 }
2589 
2590 /*
2591  * Process one software descriptor
2592  */
2593 static struct mbuf *
2594 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2595 {
2596 	struct if_rxsd sd;
2597 	struct mbuf *m;
2598 
2599 	/* should I merge this back in now that the two paths are basically duplicated? */
2600 	if (ri->iri_nfrags == 1 &&
2601 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2602 		rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
2603 		m = *sd.ifsd_m;
2604 		*sd.ifsd_m = NULL;
2605 		m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2606 #ifndef __NO_STRICT_ALIGNMENT
2607 		if (!IP_ALIGNED(m))
2608 			m->m_data += 2;
2609 #endif
2610 		memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2611 		m->m_len = ri->iri_frags[0].irf_len;
2612        } else {
2613 		m = assemble_segments(rxq, ri, &sd);
2614 	}
2615 	m->m_pkthdr.len = ri->iri_len;
2616 	m->m_pkthdr.rcvif = ri->iri_ifp;
2617 	m->m_flags |= ri->iri_flags;
2618 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2619 	m->m_pkthdr.flowid = ri->iri_flowid;
2620 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2621 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2622 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2623 	return (m);
2624 }
2625 
2626 #if defined(INET6) || defined(INET)
2627 static void
2628 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2629 {
2630 	CURVNET_SET(lc->ifp->if_vnet);
2631 #if defined(INET6)
2632 	*v6 = VNET(ip6_forwarding);
2633 #endif
2634 #if defined(INET)
2635 	*v4 = VNET(ipforwarding);
2636 #endif
2637 	CURVNET_RESTORE();
2638 }
2639 
2640 /*
2641  * Returns true if it's possible this packet could be LROed.
2642  * if it returns false, it is guaranteed that tcp_lro_rx()
2643  * would not return zero.
2644  */
2645 static bool
2646 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
2647 {
2648 	struct ether_header *eh;
2649 	uint16_t eh_type;
2650 
2651 	eh = mtod(m, struct ether_header *);
2652 	eh_type = ntohs(eh->ether_type);
2653 	switch (eh_type) {
2654 #if defined(INET6)
2655 		case ETHERTYPE_IPV6:
2656 			return !v6_forwarding;
2657 #endif
2658 #if defined (INET)
2659 		case ETHERTYPE_IP:
2660 			return !v4_forwarding;
2661 #endif
2662 	}
2663 
2664 	return false;
2665 }
2666 #else
2667 static void
2668 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2669 {
2670 }
2671 #endif
2672 
2673 static bool
2674 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2675 {
2676 	if_ctx_t ctx = rxq->ifr_ctx;
2677 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2678 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2679 	int avail, i;
2680 	qidx_t *cidxp;
2681 	struct if_rxd_info ri;
2682 	int err, budget_left, rx_bytes, rx_pkts;
2683 	iflib_fl_t fl;
2684 	struct ifnet *ifp;
2685 	int lro_enabled;
2686 	bool v4_forwarding, v6_forwarding, lro_possible;
2687 
2688 	/*
2689 	 * XXX early demux data packets so that if_input processing only handles
2690 	 * acks in interrupt context
2691 	 */
2692 	struct mbuf *m, *mh, *mt, *mf;
2693 
2694 	lro_possible = v4_forwarding = v6_forwarding = false;
2695 	ifp = ctx->ifc_ifp;
2696 	mh = mt = NULL;
2697 	MPASS(budget > 0);
2698 	rx_pkts	= rx_bytes = 0;
2699 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2700 		cidxp = &rxq->ifr_cq_cidx;
2701 	else
2702 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2703 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2704 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2705 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
2706 		DBG_COUNTER_INC(rx_unavail);
2707 		return (false);
2708 	}
2709 
2710 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
2711 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2712 			DBG_COUNTER_INC(rx_ctx_inactive);
2713 			break;
2714 		}
2715 		/*
2716 		 * Reset client set fields to their default values
2717 		 */
2718 		rxd_info_zero(&ri);
2719 		ri.iri_qsidx = rxq->ifr_id;
2720 		ri.iri_cidx = *cidxp;
2721 		ri.iri_ifp = ifp;
2722 		ri.iri_frags = rxq->ifr_frags;
2723 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2724 
2725 		if (err)
2726 			goto err;
2727 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2728 			*cidxp = ri.iri_cidx;
2729 			/* Update our consumer index */
2730 			/* XXX NB: shurd - check if this is still safe */
2731 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
2732 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2733 				rxq->ifr_cq_gen = 0;
2734 			}
2735 			/* was this only a completion queue message? */
2736 			if (__predict_false(ri.iri_nfrags == 0))
2737 				continue;
2738 		}
2739 		MPASS(ri.iri_nfrags != 0);
2740 		MPASS(ri.iri_len != 0);
2741 
2742 		/* will advance the cidx on the corresponding free lists */
2743 		m = iflib_rxd_pkt_get(rxq, &ri);
2744 		avail--;
2745 		budget_left--;
2746 		if (avail == 0 && budget_left)
2747 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2748 
2749 		if (__predict_false(m == NULL)) {
2750 			DBG_COUNTER_INC(rx_mbuf_null);
2751 			continue;
2752 		}
2753 		/* imm_pkt: -- cxgb */
2754 		if (mh == NULL)
2755 			mh = mt = m;
2756 		else {
2757 			mt->m_nextpkt = m;
2758 			mt = m;
2759 		}
2760 	}
2761 	/* make sure that we can refill faster than drain */
2762 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2763 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
2764 
2765 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2766 	if (lro_enabled)
2767 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
2768 	mt = mf = NULL;
2769 	while (mh != NULL) {
2770 		m = mh;
2771 		mh = mh->m_nextpkt;
2772 		m->m_nextpkt = NULL;
2773 #ifndef __NO_STRICT_ALIGNMENT
2774 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
2775 			continue;
2776 #endif
2777 		rx_bytes += m->m_pkthdr.len;
2778 		rx_pkts++;
2779 #if defined(INET6) || defined(INET)
2780 		if (lro_enabled) {
2781 			if (!lro_possible) {
2782 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
2783 				if (lro_possible && mf != NULL) {
2784 					ifp->if_input(ifp, mf);
2785 					DBG_COUNTER_INC(rx_if_input);
2786 					mt = mf = NULL;
2787 				}
2788 			}
2789 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
2790 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
2791 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
2792 					continue;
2793 			}
2794 		}
2795 #endif
2796 		if (lro_possible) {
2797 			ifp->if_input(ifp, m);
2798 			DBG_COUNTER_INC(rx_if_input);
2799 			continue;
2800 		}
2801 
2802 		if (mf == NULL)
2803 			mf = m;
2804 		if (mt != NULL)
2805 			mt->m_nextpkt = m;
2806 		mt = m;
2807 	}
2808 	if (mf != NULL) {
2809 		ifp->if_input(ifp, mf);
2810 		DBG_COUNTER_INC(rx_if_input);
2811 	}
2812 
2813 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
2814 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
2815 
2816 	/*
2817 	 * Flush any outstanding LRO work
2818 	 */
2819 #if defined(INET6) || defined(INET)
2820 	tcp_lro_flush_all(&rxq->ifr_lc);
2821 #endif
2822 	if (avail)
2823 		return true;
2824 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
2825 err:
2826 	STATE_LOCK(ctx);
2827 	ctx->ifc_flags |= IFC_DO_RESET;
2828 	iflib_admin_intr_deferred(ctx);
2829 	STATE_UNLOCK(ctx);
2830 	return (false);
2831 }
2832 
2833 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
2834 static inline qidx_t
2835 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
2836 {
2837 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2838 	qidx_t minthresh = txq->ift_size / 8;
2839 	if (in_use > 4*minthresh)
2840 		return (notify_count);
2841 	if (in_use > 2*minthresh)
2842 		return (notify_count >> 1);
2843 	if (in_use > minthresh)
2844 		return (notify_count >> 3);
2845 	return (0);
2846 }
2847 
2848 static inline qidx_t
2849 txq_max_rs_deferred(iflib_txq_t txq)
2850 {
2851 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2852 	qidx_t minthresh = txq->ift_size / 8;
2853 	if (txq->ift_in_use > 4*minthresh)
2854 		return (notify_count);
2855 	if (txq->ift_in_use > 2*minthresh)
2856 		return (notify_count >> 1);
2857 	if (txq->ift_in_use > minthresh)
2858 		return (notify_count >> 2);
2859 	return (2);
2860 }
2861 
2862 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
2863 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
2864 
2865 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
2866 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
2867 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
2868 
2869 /* forward compatibility for cxgb */
2870 #define FIRST_QSET(ctx) 0
2871 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
2872 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
2873 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
2874 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
2875 
2876 /* XXX we should be setting this to something other than zero */
2877 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
2878 #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
2879     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
2880 
2881 static inline bool
2882 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
2883 {
2884 	qidx_t dbval, max;
2885 	bool rang;
2886 
2887 	rang = false;
2888 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
2889 	if (ring || txq->ift_db_pending >= max) {
2890 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
2891 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
2892 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2893 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
2894 		txq->ift_db_pending = txq->ift_npending = 0;
2895 		rang = true;
2896 	}
2897 	return (rang);
2898 }
2899 
2900 #ifdef PKT_DEBUG
2901 static void
2902 print_pkt(if_pkt_info_t pi)
2903 {
2904 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
2905 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
2906 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
2907 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
2908 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
2909 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
2910 }
2911 #endif
2912 
2913 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2914 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
2915 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2916 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
2917 
2918 static int
2919 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
2920 {
2921 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
2922 	struct ether_vlan_header *eh;
2923 	struct mbuf *m;
2924 
2925 	m = *mp;
2926 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2927 	    M_WRITABLE(m) == 0) {
2928 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2929 			return (ENOMEM);
2930 		} else {
2931 			m_freem(*mp);
2932 			DBG_COUNTER_INC(tx_frees);
2933 			*mp = m;
2934 		}
2935 	}
2936 
2937 	/*
2938 	 * Determine where frame payload starts.
2939 	 * Jump over vlan headers if already present,
2940 	 * helpful for QinQ too.
2941 	 */
2942 	if (__predict_false(m->m_len < sizeof(*eh))) {
2943 		txq->ift_pullups++;
2944 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
2945 			return (ENOMEM);
2946 	}
2947 	eh = mtod(m, struct ether_vlan_header *);
2948 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2949 		pi->ipi_etype = ntohs(eh->evl_proto);
2950 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2951 	} else {
2952 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
2953 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
2954 	}
2955 
2956 	switch (pi->ipi_etype) {
2957 #ifdef INET
2958 	case ETHERTYPE_IP:
2959 	{
2960 		struct mbuf *n;
2961 		struct ip *ip = NULL;
2962 		struct tcphdr *th = NULL;
2963 		int minthlen;
2964 
2965 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
2966 		if (__predict_false(m->m_len < minthlen)) {
2967 			/*
2968 			 * if this code bloat is causing too much of a hit
2969 			 * move it to a separate function and mark it noinline
2970 			 */
2971 			if (m->m_len == pi->ipi_ehdrlen) {
2972 				n = m->m_next;
2973 				MPASS(n);
2974 				if (n->m_len >= sizeof(*ip))  {
2975 					ip = (struct ip *)n->m_data;
2976 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2977 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2978 				} else {
2979 					txq->ift_pullups++;
2980 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2981 						return (ENOMEM);
2982 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2983 				}
2984 			} else {
2985 				txq->ift_pullups++;
2986 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
2987 					return (ENOMEM);
2988 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2989 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2990 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2991 			}
2992 		} else {
2993 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
2994 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
2995 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
2996 		}
2997 		pi->ipi_ip_hlen = ip->ip_hl << 2;
2998 		pi->ipi_ipproto = ip->ip_p;
2999 		pi->ipi_flags |= IPI_TX_IPV4;
3000 
3001 		/* TCP checksum offload may require TCP header length */
3002 		if (IS_TX_OFFLOAD4(pi)) {
3003 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
3004 				if (__predict_false(th == NULL)) {
3005 					txq->ift_pullups++;
3006 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
3007 						return (ENOMEM);
3008 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
3009 				}
3010 				pi->ipi_tcp_hflags = th->th_flags;
3011 				pi->ipi_tcp_hlen = th->th_off << 2;
3012 				pi->ipi_tcp_seq = th->th_seq;
3013 			}
3014 			if (IS_TSO4(pi)) {
3015 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
3016 					return (ENXIO);
3017 				/*
3018 				 * TSO always requires hardware checksum offload.
3019 				 */
3020 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
3021 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
3022 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3023 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3024 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
3025 					ip->ip_sum = 0;
3026 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
3027 				}
3028 			}
3029 		}
3030 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
3031                        ip->ip_sum = 0;
3032 
3033 		break;
3034 	}
3035 #endif
3036 #ifdef INET6
3037 	case ETHERTYPE_IPV6:
3038 	{
3039 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3040 		struct tcphdr *th;
3041 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3042 
3043 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3044 			txq->ift_pullups++;
3045 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3046 				return (ENOMEM);
3047 		}
3048 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3049 
3050 		/* XXX-BZ this will go badly in case of ext hdrs. */
3051 		pi->ipi_ipproto = ip6->ip6_nxt;
3052 		pi->ipi_flags |= IPI_TX_IPV6;
3053 
3054 		/* TCP checksum offload may require TCP header length */
3055 		if (IS_TX_OFFLOAD6(pi)) {
3056 			if (pi->ipi_ipproto == IPPROTO_TCP) {
3057 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3058 					txq->ift_pullups++;
3059 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3060 						return (ENOMEM);
3061 				}
3062 				pi->ipi_tcp_hflags = th->th_flags;
3063 				pi->ipi_tcp_hlen = th->th_off << 2;
3064 				pi->ipi_tcp_seq = th->th_seq;
3065 			}
3066 			if (IS_TSO6(pi)) {
3067 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3068 					return (ENXIO);
3069 				/*
3070 				 * TSO always requires hardware checksum offload.
3071 				 */
3072 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
3073 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3074 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3075 			}
3076 		}
3077 		break;
3078 	}
3079 #endif
3080 	default:
3081 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3082 		pi->ipi_ip_hlen = 0;
3083 		break;
3084 	}
3085 	*mp = m;
3086 
3087 	return (0);
3088 }
3089 
3090 /*
3091  * If dodgy hardware rejects the scatter gather chain we've handed it
3092  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3093  * m_defrag'd mbufs
3094  */
3095 static __noinline struct mbuf *
3096 iflib_remove_mbuf(iflib_txq_t txq)
3097 {
3098 	int ntxd, pidx;
3099 	struct mbuf *m, **ifsd_m;
3100 
3101 	ifsd_m = txq->ift_sds.ifsd_m;
3102 	ntxd = txq->ift_size;
3103 	pidx = txq->ift_pidx & (ntxd - 1);
3104 	ifsd_m = txq->ift_sds.ifsd_m;
3105 	m = ifsd_m[pidx];
3106 	ifsd_m[pidx] = NULL;
3107 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
3108 	if (txq->ift_sds.ifsd_tso_map != NULL)
3109 		bus_dmamap_unload(txq->ift_tso_buf_tag,
3110 		    txq->ift_sds.ifsd_tso_map[pidx]);
3111 #if MEMORY_LOGGING
3112 	txq->ift_dequeued++;
3113 #endif
3114 	return (m);
3115 }
3116 
3117 static inline caddr_t
3118 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3119 {
3120 	qidx_t size;
3121 	int ntxd;
3122 	caddr_t start, end, cur, next;
3123 
3124 	ntxd = txq->ift_size;
3125 	size = txq->ift_txd_size[qid];
3126 	start = txq->ift_ifdi[qid].idi_vaddr;
3127 
3128 	if (__predict_false(size == 0))
3129 		return (start);
3130 	cur = start + size*cidx;
3131 	end = start + size*ntxd;
3132 	next = CACHE_PTR_NEXT(cur);
3133 	return (next < end ? next : start);
3134 }
3135 
3136 /*
3137  * Pad an mbuf to ensure a minimum ethernet frame size.
3138  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3139  */
3140 static __noinline int
3141 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3142 {
3143 	/*
3144 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3145 	 * and ARP message is the smallest common payload I can think of
3146 	 */
3147 	static char pad[18];	/* just zeros */
3148 	int n;
3149 	struct mbuf *new_head;
3150 
3151 	if (!M_WRITABLE(*m_head)) {
3152 		new_head = m_dup(*m_head, M_NOWAIT);
3153 		if (new_head == NULL) {
3154 			m_freem(*m_head);
3155 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3156 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3157 			DBG_COUNTER_INC(tx_frees);
3158 			return ENOMEM;
3159 		}
3160 		m_freem(*m_head);
3161 		*m_head = new_head;
3162 	}
3163 
3164 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3165 	     n > 0; n -= sizeof(pad))
3166 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3167 			break;
3168 
3169 	if (n > 0) {
3170 		m_freem(*m_head);
3171 		device_printf(dev, "cannot pad short frame\n");
3172 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3173 		DBG_COUNTER_INC(tx_frees);
3174 		return (ENOBUFS);
3175 	}
3176 
3177 	return 0;
3178 }
3179 
3180 static int
3181 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3182 {
3183 	if_ctx_t		ctx;
3184 	if_shared_ctx_t		sctx;
3185 	if_softc_ctx_t		scctx;
3186 	bus_dma_tag_t		buf_tag;
3187 	bus_dma_segment_t	*segs;
3188 	struct mbuf		*m_head, **ifsd_m;
3189 	void			*next_txd;
3190 	bus_dmamap_t		map;
3191 	struct if_pkt_info	pi;
3192 	int remap = 0;
3193 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3194 
3195 	ctx = txq->ift_ctx;
3196 	sctx = ctx->ifc_sctx;
3197 	scctx = &ctx->ifc_softc_ctx;
3198 	segs = txq->ift_segs;
3199 	ntxd = txq->ift_size;
3200 	m_head = *m_headp;
3201 	map = NULL;
3202 
3203 	/*
3204 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3205 	 */
3206 	cidx = txq->ift_cidx;
3207 	pidx = txq->ift_pidx;
3208 	if (ctx->ifc_flags & IFC_PREFETCH) {
3209 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3210 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3211 			next_txd = calc_next_txd(txq, cidx, 0);
3212 			prefetch(next_txd);
3213 		}
3214 
3215 		/* prefetch the next cache line of mbuf pointers and flags */
3216 		prefetch(&txq->ift_sds.ifsd_m[next]);
3217 		prefetch(&txq->ift_sds.ifsd_map[next]);
3218 		next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3219 	}
3220 	map = txq->ift_sds.ifsd_map[pidx];
3221 	ifsd_m = txq->ift_sds.ifsd_m;
3222 
3223 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3224 		buf_tag = txq->ift_tso_buf_tag;
3225 		max_segs = scctx->isc_tx_tso_segments_max;
3226 		map = txq->ift_sds.ifsd_tso_map[pidx];
3227 		MPASS(buf_tag != NULL);
3228 		MPASS(max_segs > 0);
3229 	} else {
3230 		buf_tag = txq->ift_buf_tag;
3231 		max_segs = scctx->isc_tx_nsegments;
3232 		map = txq->ift_sds.ifsd_map[pidx];
3233 	}
3234 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3235 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3236 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3237 		if (err) {
3238 			DBG_COUNTER_INC(encap_txd_encap_fail);
3239 			return err;
3240 		}
3241 	}
3242 	m_head = *m_headp;
3243 
3244 	pkt_info_zero(&pi);
3245 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3246 	pi.ipi_pidx = pidx;
3247 	pi.ipi_qsidx = txq->ift_id;
3248 	pi.ipi_len = m_head->m_pkthdr.len;
3249 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3250 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3251 
3252 	/* deliberate bitwise OR to make one condition */
3253 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3254 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
3255 			DBG_COUNTER_INC(encap_txd_encap_fail);
3256 			return (err);
3257 		}
3258 		m_head = *m_headp;
3259 	}
3260 
3261 retry:
3262 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3263 	    BUS_DMA_NOWAIT);
3264 defrag:
3265 	if (__predict_false(err)) {
3266 		switch (err) {
3267 		case EFBIG:
3268 			/* try collapse once and defrag once */
3269 			if (remap == 0) {
3270 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3271 				/* try defrag if collapsing fails */
3272 				if (m_head == NULL)
3273 					remap++;
3274 			}
3275 			if (remap == 1) {
3276 				txq->ift_mbuf_defrag++;
3277 				m_head = m_defrag(*m_headp, M_NOWAIT);
3278 			}
3279 			remap++;
3280 			if (__predict_false(m_head == NULL))
3281 				goto defrag_failed;
3282 			*m_headp = m_head;
3283 			goto retry;
3284 			break;
3285 		case ENOMEM:
3286 			txq->ift_no_tx_dma_setup++;
3287 			break;
3288 		default:
3289 			txq->ift_no_tx_dma_setup++;
3290 			m_freem(*m_headp);
3291 			DBG_COUNTER_INC(tx_frees);
3292 			*m_headp = NULL;
3293 			break;
3294 		}
3295 		txq->ift_map_failed++;
3296 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3297 		DBG_COUNTER_INC(encap_txd_encap_fail);
3298 		return (err);
3299 	}
3300 	ifsd_m[pidx] = m_head;
3301 	/*
3302 	 * XXX assumes a 1 to 1 relationship between segments and
3303 	 *        descriptors - this does not hold true on all drivers, e.g.
3304 	 *        cxgb
3305 	 */
3306 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3307 		txq->ift_no_desc_avail++;
3308 		bus_dmamap_unload(buf_tag, map);
3309 		DBG_COUNTER_INC(encap_txq_avail_fail);
3310 		DBG_COUNTER_INC(encap_txd_encap_fail);
3311 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3312 			GROUPTASK_ENQUEUE(&txq->ift_task);
3313 		return (ENOBUFS);
3314 	}
3315 	/*
3316 	 * On Intel cards we can greatly reduce the number of TX interrupts
3317 	 * we see by only setting report status on every Nth descriptor.
3318 	 * However, this also means that the driver will need to keep track
3319 	 * of the descriptors that RS was set on to check them for the DD bit.
3320 	 */
3321 	txq->ift_rs_pending += nsegs + 1;
3322 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3323 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3324 		pi.ipi_flags |= IPI_TX_INTR;
3325 		txq->ift_rs_pending = 0;
3326 	}
3327 
3328 	pi.ipi_segs = segs;
3329 	pi.ipi_nsegs = nsegs;
3330 
3331 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3332 #ifdef PKT_DEBUG
3333 	print_pkt(&pi);
3334 #endif
3335 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3336 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
3337 		DBG_COUNTER_INC(tx_encap);
3338 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3339 
3340 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3341 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3342 			ndesc += txq->ift_size;
3343 			txq->ift_gen = 1;
3344 		}
3345 		/*
3346 		 * drivers can need as many as
3347 		 * two sentinels
3348 		 */
3349 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3350 		MPASS(pi.ipi_new_pidx != pidx);
3351 		MPASS(ndesc > 0);
3352 		txq->ift_in_use += ndesc;
3353 
3354 		/*
3355 		 * We update the last software descriptor again here because there may
3356 		 * be a sentinel and/or there may be more mbufs than segments
3357 		 */
3358 		txq->ift_pidx = pi.ipi_new_pidx;
3359 		txq->ift_npending += pi.ipi_ndescs;
3360 	} else {
3361 		*m_headp = m_head = iflib_remove_mbuf(txq);
3362 		if (err == EFBIG) {
3363 			txq->ift_txd_encap_efbig++;
3364 			if (remap < 2) {
3365 				remap = 1;
3366 				goto defrag;
3367 			}
3368 		}
3369 		goto defrag_failed;
3370 	}
3371 	/*
3372 	 * err can't possibly be non-zero here, so we don't neet to test it
3373 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
3374 	 */
3375 	return (err);
3376 
3377 defrag_failed:
3378 	txq->ift_mbuf_defrag_failed++;
3379 	txq->ift_map_failed++;
3380 	m_freem(*m_headp);
3381 	DBG_COUNTER_INC(tx_frees);
3382 	*m_headp = NULL;
3383 	DBG_COUNTER_INC(encap_txd_encap_fail);
3384 	return (ENOMEM);
3385 }
3386 
3387 static void
3388 iflib_tx_desc_free(iflib_txq_t txq, int n)
3389 {
3390 	uint32_t qsize, cidx, mask, gen;
3391 	struct mbuf *m, **ifsd_m;
3392 	bool do_prefetch;
3393 
3394 	cidx = txq->ift_cidx;
3395 	gen = txq->ift_gen;
3396 	qsize = txq->ift_size;
3397 	mask = qsize-1;
3398 	ifsd_m = txq->ift_sds.ifsd_m;
3399 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3400 
3401 	while (n-- > 0) {
3402 		if (do_prefetch) {
3403 			prefetch(ifsd_m[(cidx + 3) & mask]);
3404 			prefetch(ifsd_m[(cidx + 4) & mask]);
3405 		}
3406 		if ((m = ifsd_m[cidx]) != NULL) {
3407 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3408 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3409 				bus_dmamap_sync(txq->ift_tso_buf_tag,
3410 				    txq->ift_sds.ifsd_tso_map[cidx],
3411 				    BUS_DMASYNC_POSTWRITE);
3412 				bus_dmamap_unload(txq->ift_tso_buf_tag,
3413 				    txq->ift_sds.ifsd_tso_map[cidx]);
3414 			} else {
3415 				bus_dmamap_sync(txq->ift_buf_tag,
3416 				    txq->ift_sds.ifsd_map[cidx],
3417 				    BUS_DMASYNC_POSTWRITE);
3418 				bus_dmamap_unload(txq->ift_buf_tag,
3419 				    txq->ift_sds.ifsd_map[cidx]);
3420 			}
3421 			/* XXX we don't support any drivers that batch packets yet */
3422 			MPASS(m->m_nextpkt == NULL);
3423 			m_freem(m);
3424 			ifsd_m[cidx] = NULL;
3425 #if MEMORY_LOGGING
3426 			txq->ift_dequeued++;
3427 #endif
3428 			DBG_COUNTER_INC(tx_frees);
3429 		}
3430 		if (__predict_false(++cidx == qsize)) {
3431 			cidx = 0;
3432 			gen = 0;
3433 		}
3434 	}
3435 	txq->ift_cidx = cidx;
3436 	txq->ift_gen = gen;
3437 }
3438 
3439 static __inline int
3440 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3441 {
3442 	int reclaim;
3443 	if_ctx_t ctx = txq->ift_ctx;
3444 
3445 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3446 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3447 
3448 	/*
3449 	 * Need a rate-limiting check so that this isn't called every time
3450 	 */
3451 	iflib_tx_credits_update(ctx, txq);
3452 	reclaim = DESC_RECLAIMABLE(txq);
3453 
3454 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3455 #ifdef INVARIANTS
3456 		if (iflib_verbose_debug) {
3457 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
3458 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3459 			       reclaim, thresh);
3460 
3461 		}
3462 #endif
3463 		return (0);
3464 	}
3465 	iflib_tx_desc_free(txq, reclaim);
3466 	txq->ift_cleaned += reclaim;
3467 	txq->ift_in_use -= reclaim;
3468 
3469 	return (reclaim);
3470 }
3471 
3472 static struct mbuf **
3473 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3474 {
3475 	int next, size;
3476 	struct mbuf **items;
3477 
3478 	size = r->size;
3479 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3480 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3481 
3482 	prefetch(items[(cidx + offset) & (size-1)]);
3483 	if (remaining > 1) {
3484 		prefetch2cachelines(&items[next]);
3485 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3486 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3487 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3488 	}
3489 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3490 }
3491 
3492 static void
3493 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3494 {
3495 
3496 	ifmp_ring_check_drainage(txq->ift_br, budget);
3497 }
3498 
3499 static uint32_t
3500 iflib_txq_can_drain(struct ifmp_ring *r)
3501 {
3502 	iflib_txq_t txq = r->cookie;
3503 	if_ctx_t ctx = txq->ift_ctx;
3504 
3505 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
3506 		return (1);
3507 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3508 	    BUS_DMASYNC_POSTREAD);
3509 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
3510 	    false));
3511 }
3512 
3513 static uint32_t
3514 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3515 {
3516 	iflib_txq_t txq = r->cookie;
3517 	if_ctx_t ctx = txq->ift_ctx;
3518 	struct ifnet *ifp = ctx->ifc_ifp;
3519 	struct mbuf **mp, *m;
3520 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
3521 	int reclaimed, err, in_use_prev, desc_used;
3522 	bool do_prefetch, ring, rang;
3523 
3524 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3525 			    !LINK_ACTIVE(ctx))) {
3526 		DBG_COUNTER_INC(txq_drain_notready);
3527 		return (0);
3528 	}
3529 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3530 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
3531 	avail = IDXDIFF(pidx, cidx, r->size);
3532 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3533 		DBG_COUNTER_INC(txq_drain_flushing);
3534 		for (i = 0; i < avail; i++) {
3535 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
3536 				m_free(r->items[(cidx + i) & (r->size-1)]);
3537 			r->items[(cidx + i) & (r->size-1)] = NULL;
3538 		}
3539 		return (avail);
3540 	}
3541 
3542 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3543 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3544 		CALLOUT_LOCK(txq);
3545 		callout_stop(&txq->ift_timer);
3546 		CALLOUT_UNLOCK(txq);
3547 		DBG_COUNTER_INC(txq_drain_oactive);
3548 		return (0);
3549 	}
3550 	if (reclaimed)
3551 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3552 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
3553 	count = MIN(avail, TX_BATCH_SIZE);
3554 #ifdef INVARIANTS
3555 	if (iflib_verbose_debug)
3556 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3557 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3558 #endif
3559 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3560 	avail = TXQ_AVAIL(txq);
3561 	err = 0;
3562 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
3563 		int rem = do_prefetch ? count - i : 0;
3564 
3565 		mp = _ring_peek_one(r, cidx, i, rem);
3566 		MPASS(mp != NULL && *mp != NULL);
3567 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3568 			consumed++;
3569 			reclaimed++;
3570 			continue;
3571 		}
3572 		in_use_prev = txq->ift_in_use;
3573 		err = iflib_encap(txq, mp);
3574 		if (__predict_false(err)) {
3575 			/* no room - bail out */
3576 			if (err == ENOBUFS)
3577 				break;
3578 			consumed++;
3579 			/* we can't send this packet - skip it */
3580 			continue;
3581 		}
3582 		consumed++;
3583 		pkt_sent++;
3584 		m = *mp;
3585 		DBG_COUNTER_INC(tx_sent);
3586 		bytes_sent += m->m_pkthdr.len;
3587 		mcast_sent += !!(m->m_flags & M_MCAST);
3588 		avail = TXQ_AVAIL(txq);
3589 
3590 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
3591 		desc_used += (txq->ift_in_use - in_use_prev);
3592 		ETHER_BPF_MTAP(ifp, m);
3593 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
3594 			break;
3595 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
3596 	}
3597 
3598 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3599 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
3600 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
3601 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3602 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3603 	if (mcast_sent)
3604 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3605 #ifdef INVARIANTS
3606 	if (iflib_verbose_debug)
3607 		printf("consumed=%d\n", consumed);
3608 #endif
3609 	return (consumed);
3610 }
3611 
3612 static uint32_t
3613 iflib_txq_drain_always(struct ifmp_ring *r)
3614 {
3615 	return (1);
3616 }
3617 
3618 static uint32_t
3619 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3620 {
3621 	int i, avail;
3622 	struct mbuf **mp;
3623 	iflib_txq_t txq;
3624 
3625 	txq = r->cookie;
3626 
3627 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3628 	CALLOUT_LOCK(txq);
3629 	callout_stop(&txq->ift_timer);
3630 	CALLOUT_UNLOCK(txq);
3631 
3632 	avail = IDXDIFF(pidx, cidx, r->size);
3633 	for (i = 0; i < avail; i++) {
3634 		mp = _ring_peek_one(r, cidx, i, avail - i);
3635 		if (__predict_false(*mp == (struct mbuf *)txq))
3636 			continue;
3637 		m_freem(*mp);
3638 		DBG_COUNTER_INC(tx_frees);
3639 	}
3640 	MPASS(ifmp_ring_is_stalled(r) == 0);
3641 	return (avail);
3642 }
3643 
3644 static void
3645 iflib_ifmp_purge(iflib_txq_t txq)
3646 {
3647 	struct ifmp_ring *r;
3648 
3649 	r = txq->ift_br;
3650 	r->drain = iflib_txq_drain_free;
3651 	r->can_drain = iflib_txq_drain_always;
3652 
3653 	ifmp_ring_check_drainage(r, r->size);
3654 
3655 	r->drain = iflib_txq_drain;
3656 	r->can_drain = iflib_txq_can_drain;
3657 }
3658 
3659 static void
3660 _task_fn_tx(void *context)
3661 {
3662 	iflib_txq_t txq = context;
3663 	if_ctx_t ctx = txq->ift_ctx;
3664 #if defined(ALTQ) || defined(DEV_NETMAP)
3665 	if_t ifp = ctx->ifc_ifp;
3666 #endif
3667 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3668 
3669 #ifdef IFLIB_DIAGNOSTICS
3670 	txq->ift_cpu_exec_count[curcpu]++;
3671 #endif
3672 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
3673 		return;
3674 #ifdef DEV_NETMAP
3675 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3676 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3677 		    BUS_DMASYNC_POSTREAD);
3678 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3679 			netmap_tx_irq(ifp, txq->ift_id);
3680 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3681 		return;
3682 	}
3683 #endif
3684 #ifdef ALTQ
3685 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3686 		iflib_altq_if_start(ifp);
3687 #endif
3688 	if (txq->ift_db_pending)
3689 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3690 	else if (!abdicate)
3691 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3692 	/*
3693 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3694 	 */
3695 	if (abdicate)
3696 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3697 	if (ctx->ifc_flags & IFC_LEGACY)
3698 		IFDI_INTR_ENABLE(ctx);
3699 	else {
3700 #ifdef INVARIANTS
3701 		int rc =
3702 #endif
3703 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3704 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3705 	}
3706 }
3707 
3708 static void
3709 _task_fn_rx(void *context)
3710 {
3711 	iflib_rxq_t rxq = context;
3712 	if_ctx_t ctx = rxq->ifr_ctx;
3713 	bool more;
3714 	uint16_t budget;
3715 
3716 #ifdef IFLIB_DIAGNOSTICS
3717 	rxq->ifr_cpu_exec_count[curcpu]++;
3718 #endif
3719 	DBG_COUNTER_INC(task_fn_rxs);
3720 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3721 		return;
3722 	more = true;
3723 #ifdef DEV_NETMAP
3724 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3725 		u_int work = 0;
3726 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3727 			more = false;
3728 		}
3729 	}
3730 #endif
3731 	budget = ctx->ifc_sysctl_rx_budget;
3732 	if (budget == 0)
3733 		budget = 16;	/* XXX */
3734 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
3735 		if (ctx->ifc_flags & IFC_LEGACY)
3736 			IFDI_INTR_ENABLE(ctx);
3737 		else {
3738 #ifdef INVARIANTS
3739 			int rc =
3740 #endif
3741 				IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
3742 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
3743 			DBG_COUNTER_INC(rx_intr_enables);
3744 		}
3745 	}
3746 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
3747 		return;
3748 	if (more)
3749 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3750 }
3751 
3752 static void
3753 _task_fn_admin(void *context)
3754 {
3755 	if_ctx_t ctx = context;
3756 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
3757 	iflib_txq_t txq;
3758 	int i;
3759 	bool oactive, running, do_reset, do_watchdog, in_detach;
3760 	uint32_t reset_on = hz / 2;
3761 
3762 	STATE_LOCK(ctx);
3763 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
3764 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
3765 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
3766 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
3767 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
3768 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
3769 	STATE_UNLOCK(ctx);
3770 
3771 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
3772 		return;
3773 	if (in_detach)
3774 		return;
3775 
3776 	CTX_LOCK(ctx);
3777 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3778 		CALLOUT_LOCK(txq);
3779 		callout_stop(&txq->ift_timer);
3780 		CALLOUT_UNLOCK(txq);
3781 	}
3782 	if (do_watchdog) {
3783 		ctx->ifc_watchdog_events++;
3784 		IFDI_WATCHDOG_RESET(ctx);
3785 	}
3786 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3787 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3788 #ifdef DEV_NETMAP
3789 		reset_on = hz / 2;
3790 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
3791 			iflib_netmap_timer_adjust(ctx, txq, &reset_on);
3792 #endif
3793 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3794 	}
3795 	IFDI_LINK_INTR_ENABLE(ctx);
3796 	if (do_reset)
3797 		iflib_if_init_locked(ctx);
3798 	CTX_UNLOCK(ctx);
3799 
3800 	if (LINK_ACTIVE(ctx) == 0)
3801 		return;
3802 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3803 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
3804 }
3805 
3806 
3807 static void
3808 _task_fn_iov(void *context)
3809 {
3810 	if_ctx_t ctx = context;
3811 
3812 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
3813 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
3814 		return;
3815 
3816 	CTX_LOCK(ctx);
3817 	IFDI_VFLR_HANDLE(ctx);
3818 	CTX_UNLOCK(ctx);
3819 }
3820 
3821 static int
3822 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3823 {
3824 	int err;
3825 	if_int_delay_info_t info;
3826 	if_ctx_t ctx;
3827 
3828 	info = (if_int_delay_info_t)arg1;
3829 	ctx = info->iidi_ctx;
3830 	info->iidi_req = req;
3831 	info->iidi_oidp = oidp;
3832 	CTX_LOCK(ctx);
3833 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
3834 	CTX_UNLOCK(ctx);
3835 	return (err);
3836 }
3837 
3838 /*********************************************************************
3839  *
3840  *  IFNET FUNCTIONS
3841  *
3842  **********************************************************************/
3843 
3844 static void
3845 iflib_if_init_locked(if_ctx_t ctx)
3846 {
3847 	iflib_stop(ctx);
3848 	iflib_init_locked(ctx);
3849 }
3850 
3851 
3852 static void
3853 iflib_if_init(void *arg)
3854 {
3855 	if_ctx_t ctx = arg;
3856 
3857 	CTX_LOCK(ctx);
3858 	iflib_if_init_locked(ctx);
3859 	CTX_UNLOCK(ctx);
3860 }
3861 
3862 static int
3863 iflib_if_transmit(if_t ifp, struct mbuf *m)
3864 {
3865 	if_ctx_t	ctx = if_getsoftc(ifp);
3866 
3867 	iflib_txq_t txq;
3868 	int err, qidx;
3869 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
3870 
3871 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
3872 		DBG_COUNTER_INC(tx_frees);
3873 		m_freem(m);
3874 		return (ENOBUFS);
3875 	}
3876 
3877 	MPASS(m->m_nextpkt == NULL);
3878 	/* ALTQ-enabled interfaces always use queue 0. */
3879 	qidx = 0;
3880 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
3881 		qidx = QIDX(ctx, m);
3882 	/*
3883 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
3884 	 */
3885 	txq = &ctx->ifc_txqs[qidx];
3886 
3887 #ifdef DRIVER_BACKPRESSURE
3888 	if (txq->ift_closed) {
3889 		while (m != NULL) {
3890 			next = m->m_nextpkt;
3891 			m->m_nextpkt = NULL;
3892 			m_freem(m);
3893 			DBG_COUNTER_INC(tx_frees);
3894 			m = next;
3895 		}
3896 		return (ENOBUFS);
3897 	}
3898 #endif
3899 #ifdef notyet
3900 	qidx = count = 0;
3901 	mp = marr;
3902 	next = m;
3903 	do {
3904 		count++;
3905 		next = next->m_nextpkt;
3906 	} while (next != NULL);
3907 
3908 	if (count > nitems(marr))
3909 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
3910 			/* XXX check nextpkt */
3911 			m_freem(m);
3912 			/* XXX simplify for now */
3913 			DBG_COUNTER_INC(tx_frees);
3914 			return (ENOBUFS);
3915 		}
3916 	for (next = m, i = 0; next != NULL; i++) {
3917 		mp[i] = next;
3918 		next = next->m_nextpkt;
3919 		mp[i]->m_nextpkt = NULL;
3920 	}
3921 #endif
3922 	DBG_COUNTER_INC(tx_seen);
3923 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
3924 
3925 	if (abdicate)
3926 		GROUPTASK_ENQUEUE(&txq->ift_task);
3927  	if (err) {
3928 		if (!abdicate)
3929 			GROUPTASK_ENQUEUE(&txq->ift_task);
3930 		/* support forthcoming later */
3931 #ifdef DRIVER_BACKPRESSURE
3932 		txq->ift_closed = TRUE;
3933 #endif
3934 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3935 		m_freem(m);
3936 		DBG_COUNTER_INC(tx_frees);
3937 	}
3938 
3939 	return (err);
3940 }
3941 
3942 #ifdef ALTQ
3943 /*
3944  * The overall approach to integrating iflib with ALTQ is to continue to use
3945  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
3946  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
3947  * is redundant/unnecessary, but doing so minimizes the amount of
3948  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
3949  * redundantly queueing to an intermediate mp_ring is swamped by the
3950  * performance limitations inherent in using ALTQ.
3951  *
3952  * When ALTQ support is compiled in, all iflib drivers will use a transmit
3953  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
3954  * given interface.  If ALTQ is enabled for an interface, then all
3955  * transmitted packets for that interface will be submitted to the ALTQ
3956  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
3957  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
3958  * update stats that the iflib machinery handles, and which is sensitve to
3959  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
3960  * will be installed as the start routine for use by ALTQ facilities that
3961  * need to trigger queue drains on a scheduled basis.
3962  *
3963  */
3964 static void
3965 iflib_altq_if_start(if_t ifp)
3966 {
3967 	struct ifaltq *ifq = &ifp->if_snd;
3968 	struct mbuf *m;
3969 
3970 	IFQ_LOCK(ifq);
3971 	IFQ_DEQUEUE_NOLOCK(ifq, m);
3972 	while (m != NULL) {
3973 		iflib_if_transmit(ifp, m);
3974 		IFQ_DEQUEUE_NOLOCK(ifq, m);
3975 	}
3976 	IFQ_UNLOCK(ifq);
3977 }
3978 
3979 static int
3980 iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
3981 {
3982 	int err;
3983 
3984 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
3985 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
3986 		if (err == 0)
3987 			iflib_altq_if_start(ifp);
3988 	} else
3989 		err = iflib_if_transmit(ifp, m);
3990 
3991 	return (err);
3992 }
3993 #endif /* ALTQ */
3994 
3995 static void
3996 iflib_if_qflush(if_t ifp)
3997 {
3998 	if_ctx_t ctx = if_getsoftc(ifp);
3999 	iflib_txq_t txq = ctx->ifc_txqs;
4000 	int i;
4001 
4002 	STATE_LOCK(ctx);
4003 	ctx->ifc_flags |= IFC_QFLUSH;
4004 	STATE_UNLOCK(ctx);
4005 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4006 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4007 			iflib_txq_check_drain(txq, 0);
4008 	STATE_LOCK(ctx);
4009 	ctx->ifc_flags &= ~IFC_QFLUSH;
4010 	STATE_UNLOCK(ctx);
4011 
4012 	/*
4013 	 * When ALTQ is enabled, this will also take care of purging the
4014 	 * ALTQ queue(s).
4015 	 */
4016 	if_qflush(ifp);
4017 }
4018 
4019 
4020 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4021 		     IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4022 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
4023 		     IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)
4024 
4025 static int
4026 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4027 {
4028 	if_ctx_t ctx = if_getsoftc(ifp);
4029 	struct ifreq	*ifr = (struct ifreq *)data;
4030 #if defined(INET) || defined(INET6)
4031 	struct ifaddr	*ifa = (struct ifaddr *)data;
4032 #endif
4033 	bool		avoid_reset = FALSE;
4034 	int		err = 0, reinit = 0, bits;
4035 
4036 	switch (command) {
4037 	case SIOCSIFADDR:
4038 #ifdef INET
4039 		if (ifa->ifa_addr->sa_family == AF_INET)
4040 			avoid_reset = TRUE;
4041 #endif
4042 #ifdef INET6
4043 		if (ifa->ifa_addr->sa_family == AF_INET6)
4044 			avoid_reset = TRUE;
4045 #endif
4046 		/*
4047 		** Calling init results in link renegotiation,
4048 		** so we avoid doing it when possible.
4049 		*/
4050 		if (avoid_reset) {
4051 			if_setflagbits(ifp, IFF_UP,0);
4052 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4053 				reinit = 1;
4054 #ifdef INET
4055 			if (!(if_getflags(ifp) & IFF_NOARP))
4056 				arp_ifinit(ifp, ifa);
4057 #endif
4058 		} else
4059 			err = ether_ioctl(ifp, command, data);
4060 		break;
4061 	case SIOCSIFMTU:
4062 		CTX_LOCK(ctx);
4063 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
4064 			CTX_UNLOCK(ctx);
4065 			break;
4066 		}
4067 		bits = if_getdrvflags(ifp);
4068 		/* stop the driver and free any clusters before proceeding */
4069 		iflib_stop(ctx);
4070 
4071 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4072 			STATE_LOCK(ctx);
4073 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4074 				ctx->ifc_flags |= IFC_MULTISEG;
4075 			else
4076 				ctx->ifc_flags &= ~IFC_MULTISEG;
4077 			STATE_UNLOCK(ctx);
4078 			err = if_setmtu(ifp, ifr->ifr_mtu);
4079 		}
4080 		iflib_init_locked(ctx);
4081 		STATE_LOCK(ctx);
4082 		if_setdrvflags(ifp, bits);
4083 		STATE_UNLOCK(ctx);
4084 		CTX_UNLOCK(ctx);
4085 		break;
4086 	case SIOCSIFFLAGS:
4087 		CTX_LOCK(ctx);
4088 		if (if_getflags(ifp) & IFF_UP) {
4089 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4090 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4091 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4092 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4093 				}
4094 			} else
4095 				reinit = 1;
4096 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4097 			iflib_stop(ctx);
4098 		}
4099 		ctx->ifc_if_flags = if_getflags(ifp);
4100 		CTX_UNLOCK(ctx);
4101 		break;
4102 	case SIOCADDMULTI:
4103 	case SIOCDELMULTI:
4104 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4105 			CTX_LOCK(ctx);
4106 			IFDI_INTR_DISABLE(ctx);
4107 			IFDI_MULTI_SET(ctx);
4108 			IFDI_INTR_ENABLE(ctx);
4109 			CTX_UNLOCK(ctx);
4110 		}
4111 		break;
4112 	case SIOCSIFMEDIA:
4113 		CTX_LOCK(ctx);
4114 		IFDI_MEDIA_SET(ctx);
4115 		CTX_UNLOCK(ctx);
4116 		/* falls thru */
4117 	case SIOCGIFMEDIA:
4118 	case SIOCGIFXMEDIA:
4119 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
4120 		break;
4121 	case SIOCGI2C:
4122 	{
4123 		struct ifi2creq i2c;
4124 
4125 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4126 		if (err != 0)
4127 			break;
4128 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4129 			err = EINVAL;
4130 			break;
4131 		}
4132 		if (i2c.len > sizeof(i2c.data)) {
4133 			err = EINVAL;
4134 			break;
4135 		}
4136 
4137 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4138 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4139 			    sizeof(i2c));
4140 		break;
4141 	}
4142 	case SIOCSIFCAP:
4143 	{
4144 		int mask, setmask, oldmask;
4145 
4146 		oldmask = if_getcapenable(ifp);
4147 		mask = ifr->ifr_reqcap ^ oldmask;
4148 		mask &= ctx->ifc_softc_ctx.isc_capabilities;
4149 		setmask = 0;
4150 #ifdef TCP_OFFLOAD
4151 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
4152 #endif
4153 		setmask |= (mask & IFCAP_FLAGS);
4154 		setmask |= (mask & IFCAP_WOL);
4155 
4156 		/*
4157 		 * If any RX csum has changed, change all the ones that
4158 		 * are supported by the driver.
4159 		 */
4160 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4161 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4162 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4163 		}
4164 
4165 		/*
4166 		 * want to ensure that traffic has stopped before we change any of the flags
4167 		 */
4168 		if (setmask) {
4169 			CTX_LOCK(ctx);
4170 			bits = if_getdrvflags(ifp);
4171 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4172 				iflib_stop(ctx);
4173 			STATE_LOCK(ctx);
4174 			if_togglecapenable(ifp, setmask);
4175 			STATE_UNLOCK(ctx);
4176 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4177 				iflib_init_locked(ctx);
4178 			STATE_LOCK(ctx);
4179 			if_setdrvflags(ifp, bits);
4180 			STATE_UNLOCK(ctx);
4181 			CTX_UNLOCK(ctx);
4182 		}
4183 		if_vlancap(ifp);
4184 		break;
4185 	}
4186 	case SIOCGPRIVATE_0:
4187 	case SIOCSDRVSPEC:
4188 	case SIOCGDRVSPEC:
4189 		CTX_LOCK(ctx);
4190 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4191 		CTX_UNLOCK(ctx);
4192 		break;
4193 	default:
4194 		err = ether_ioctl(ifp, command, data);
4195 		break;
4196 	}
4197 	if (reinit)
4198 		iflib_if_init(ctx);
4199 	return (err);
4200 }
4201 
4202 static uint64_t
4203 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4204 {
4205 	if_ctx_t ctx = if_getsoftc(ifp);
4206 
4207 	return (IFDI_GET_COUNTER(ctx, cnt));
4208 }
4209 
4210 /*********************************************************************
4211  *
4212  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4213  *
4214  **********************************************************************/
4215 
4216 static void
4217 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4218 {
4219 	if_ctx_t ctx = if_getsoftc(ifp);
4220 
4221 	if ((void *)ctx != arg)
4222 		return;
4223 
4224 	if ((vtag == 0) || (vtag > 4095))
4225 		return;
4226 
4227 	CTX_LOCK(ctx);
4228 	IFDI_VLAN_REGISTER(ctx, vtag);
4229 	/* Re-init to load the changes */
4230 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4231 		iflib_if_init_locked(ctx);
4232 	CTX_UNLOCK(ctx);
4233 }
4234 
4235 static void
4236 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4237 {
4238 	if_ctx_t ctx = if_getsoftc(ifp);
4239 
4240 	if ((void *)ctx != arg)
4241 		return;
4242 
4243 	if ((vtag == 0) || (vtag > 4095))
4244 		return;
4245 
4246 	CTX_LOCK(ctx);
4247 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4248 	/* Re-init to load the changes */
4249 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
4250 		iflib_if_init_locked(ctx);
4251 	CTX_UNLOCK(ctx);
4252 }
4253 
4254 static void
4255 iflib_led_func(void *arg, int onoff)
4256 {
4257 	if_ctx_t ctx = arg;
4258 
4259 	CTX_LOCK(ctx);
4260 	IFDI_LED_FUNC(ctx, onoff);
4261 	CTX_UNLOCK(ctx);
4262 }
4263 
4264 /*********************************************************************
4265  *
4266  *  BUS FUNCTION DEFINITIONS
4267  *
4268  **********************************************************************/
4269 
4270 int
4271 iflib_device_probe(device_t dev)
4272 {
4273 	pci_vendor_info_t *ent;
4274 
4275 	uint16_t	pci_vendor_id, pci_device_id;
4276 	uint16_t	pci_subvendor_id, pci_subdevice_id;
4277 	uint16_t	pci_rev_id;
4278 	if_shared_ctx_t sctx;
4279 
4280 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4281 		return (ENOTSUP);
4282 
4283 	pci_vendor_id = pci_get_vendor(dev);
4284 	pci_device_id = pci_get_device(dev);
4285 	pci_subvendor_id = pci_get_subvendor(dev);
4286 	pci_subdevice_id = pci_get_subdevice(dev);
4287 	pci_rev_id = pci_get_revid(dev);
4288 	if (sctx->isc_parse_devinfo != NULL)
4289 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4290 
4291 	ent = sctx->isc_vendor_info;
4292 	while (ent->pvi_vendor_id != 0) {
4293 		if (pci_vendor_id != ent->pvi_vendor_id) {
4294 			ent++;
4295 			continue;
4296 		}
4297 		if ((pci_device_id == ent->pvi_device_id) &&
4298 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4299 		     (ent->pvi_subvendor_id == 0)) &&
4300 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4301 		     (ent->pvi_subdevice_id == 0)) &&
4302 		    ((pci_rev_id == ent->pvi_rev_id) ||
4303 		     (ent->pvi_rev_id == 0))) {
4304 
4305 			device_set_desc_copy(dev, ent->pvi_name);
4306 			/* this needs to be changed to zero if the bus probing code
4307 			 * ever stops re-probing on best match because the sctx
4308 			 * may have its values over written by register calls
4309 			 * in subsequent probes
4310 			 */
4311 			return (BUS_PROBE_DEFAULT);
4312 		}
4313 		ent++;
4314 	}
4315 	return (ENXIO);
4316 }
4317 
4318 static void
4319 iflib_reset_qvalues(if_ctx_t ctx)
4320 {
4321 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4322 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4323 	device_t dev = ctx->ifc_dev;
4324 	int i;
4325 
4326 	scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
4327 	scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
4328 	/*
4329 	 * XXX sanity check that ntxd & nrxd are a power of 2
4330 	 */
4331 	if (ctx->ifc_sysctl_ntxqs != 0)
4332 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4333 	if (ctx->ifc_sysctl_nrxqs != 0)
4334 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4335 
4336 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4337 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4338 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4339 		else
4340 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4341 	}
4342 
4343 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4344 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4345 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4346 		else
4347 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4348 	}
4349 
4350 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4351 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4352 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4353 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4354 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4355 		}
4356 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4357 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4358 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4359 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4360 		}
4361 	}
4362 
4363 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4364 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4365 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4366 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4367 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4368 		}
4369 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4370 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4371 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4372 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4373 		}
4374 	}
4375 }
4376 
4377 int
4378 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
4379 {
4380 	int err, rid, msix;
4381 	if_ctx_t ctx;
4382 	if_t ifp;
4383 	if_softc_ctx_t scctx;
4384 	int i;
4385 	uint16_t main_txq;
4386 	uint16_t main_rxq;
4387 
4388 
4389 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
4390 
4391 	if (sc == NULL) {
4392 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4393 		device_set_softc(dev, ctx);
4394 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
4395 	}
4396 
4397 	ctx->ifc_sctx = sctx;
4398 	ctx->ifc_dev = dev;
4399 	ctx->ifc_softc = sc;
4400 
4401 	if ((err = iflib_register(ctx)) != 0) {
4402 		device_printf(dev, "iflib_register failed %d\n", err);
4403 		goto fail_ctx_free;
4404 	}
4405 	iflib_add_device_sysctl_pre(ctx);
4406 
4407 	scctx = &ctx->ifc_softc_ctx;
4408 	ifp = ctx->ifc_ifp;
4409 
4410 	iflib_reset_qvalues(ctx);
4411 	CTX_LOCK(ctx);
4412 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4413 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4414 		goto fail_unlock;
4415 	}
4416 	_iflib_pre_assert(scctx);
4417 	ctx->ifc_txrx = *scctx->isc_txrx;
4418 
4419 #ifdef INVARIANTS
4420 	MPASS(scctx->isc_capabilities);
4421 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4422 		MPASS(scctx->isc_tx_csum_flags);
4423 #endif
4424 
4425 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
4426 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
4427 
4428 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4429 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4430 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4431 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4432 
4433 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4434 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4435 
4436 	/* XXX change for per-queue sizes */
4437 	device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
4438 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4439 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4440 		if (!powerof2(scctx->isc_nrxd[i])) {
4441 			/* round down instead? */
4442 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4443 			err = EINVAL;
4444 			goto fail_iflib_detach;
4445 		}
4446 	}
4447 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4448 		if (!powerof2(scctx->isc_ntxd[i])) {
4449 			device_printf(dev,
4450 			    "# tx descriptors must be a power of 2");
4451 			err = EINVAL;
4452 			goto fail_iflib_detach;
4453 		}
4454 	}
4455 
4456 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4457 	    MAX_SINGLE_PACKET_FRACTION)
4458 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4459 		    MAX_SINGLE_PACKET_FRACTION);
4460 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4461 	    MAX_SINGLE_PACKET_FRACTION)
4462 		scctx->isc_tx_tso_segments_max = max(1,
4463 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4464 
4465 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4466 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4467 		/*
4468 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4469 		 * but some MACs do.
4470 		 */
4471 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4472 		    IP_MAXPACKET));
4473 		/*
4474 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4475 		 * into account.  In the worst case, each of these calls will
4476 		 * add another mbuf and, thus, the requirement for another DMA
4477 		 * segment.  So for best performance, it doesn't make sense to
4478 		 * advertize a maximum of TSO segments that typically will
4479 		 * require defragmentation in iflib_encap().
4480 		 */
4481 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4482 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4483 	}
4484 	if (scctx->isc_rss_table_size == 0)
4485 		scctx->isc_rss_table_size = 64;
4486 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4487 
4488 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4489 	/* XXX format name */
4490 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4491 	    NULL, NULL, "admin");
4492 
4493 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4494 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4495 		device_printf(dev, "Unable to fetch CPU list\n");
4496 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4497 	}
4498 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4499 
4500 	/*
4501 	** Now set up MSI or MSI-X, should return us the number of supported
4502 	** vectors (will be 1 for a legacy interrupt and MSI).
4503 	*/
4504 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
4505 		msix = scctx->isc_vectors;
4506 	} else if (scctx->isc_msix_bar != 0)
4507 	       /*
4508 		* The simple fact that isc_msix_bar is not 0 does not mean we
4509 		* we have a good value there that is known to work.
4510 		*/
4511 		msix = iflib_msix_init(ctx);
4512 	else {
4513 		scctx->isc_vectors = 1;
4514 		scctx->isc_ntxqsets = 1;
4515 		scctx->isc_nrxqsets = 1;
4516 		scctx->isc_intr = IFLIB_INTR_LEGACY;
4517 		msix = 0;
4518 	}
4519 	/* Get memory for the station queues */
4520 	if ((err = iflib_queues_alloc(ctx))) {
4521 		device_printf(dev, "Unable to allocate queue memory\n");
4522 		goto fail_intr_free;
4523 	}
4524 
4525 	if ((err = iflib_qset_structures_setup(ctx)))
4526 		goto fail_queues;
4527 
4528 	/*
4529 	 * Group taskqueues aren't properly set up until SMP is started,
4530 	 * so we disable interrupts until we can handle them post
4531 	 * SI_SUB_SMP.
4532 	 *
4533 	 * XXX: disabling interrupts doesn't actually work, at least for
4534 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4535 	 * we do null handling and depend on this not causing too large an
4536 	 * interrupt storm.
4537 	 */
4538 	IFDI_INTR_DISABLE(ctx);
4539 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
4540 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
4541 		goto fail_queues;
4542 	}
4543 	if (msix <= 1) {
4544 		rid = 0;
4545 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
4546 			MPASS(msix == 1);
4547 			rid = 1;
4548 		}
4549 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
4550 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
4551 			goto fail_queues;
4552 		}
4553 	}
4554 
4555 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4556 
4557 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4558 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4559 		goto fail_detach;
4560 	}
4561 
4562 	/*
4563 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4564 	 * This must appear after the call to ether_ifattach() because
4565 	 * ether_ifattach() sets if_hdrlen to the default value.
4566 	 */
4567 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4568 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4569 
4570 	if ((err = iflib_netmap_attach(ctx))) {
4571 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
4572 		goto fail_detach;
4573 	}
4574 	*ctxp = ctx;
4575 
4576 	NETDUMP_SET(ctx->ifc_ifp, iflib);
4577 
4578 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4579 	iflib_add_device_sysctl_post(ctx);
4580 	ctx->ifc_flags |= IFC_INIT_DONE;
4581 	CTX_UNLOCK(ctx);
4582 	return (0);
4583 
4584 fail_detach:
4585 	ether_ifdetach(ctx->ifc_ifp);
4586 fail_intr_free:
4587 	iflib_free_intr_mem(ctx);
4588 fail_queues:
4589 	iflib_tx_structures_free(ctx);
4590 	iflib_rx_structures_free(ctx);
4591 fail_iflib_detach:
4592 	IFDI_DETACH(ctx);
4593 fail_unlock:
4594 	CTX_UNLOCK(ctx);
4595 fail_ctx_free:
4596         if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4597                 free(ctx->ifc_softc, M_IFLIB);
4598         free(ctx, M_IFLIB);
4599 	return (err);
4600 }
4601 
4602 int
4603 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
4604 					  struct iflib_cloneattach_ctx *clctx)
4605 {
4606 	int err;
4607 	if_ctx_t ctx;
4608 	if_t ifp;
4609 	if_softc_ctx_t scctx;
4610 	int i;
4611 	void *sc;
4612 	uint16_t main_txq;
4613 	uint16_t main_rxq;
4614 
4615 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
4616 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
4617 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
4618 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
4619 		ctx->ifc_flags |= IFC_PSEUDO;
4620 
4621 	ctx->ifc_sctx = sctx;
4622 	ctx->ifc_softc = sc;
4623 	ctx->ifc_dev = dev;
4624 
4625 	if ((err = iflib_register(ctx)) != 0) {
4626 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
4627 		goto fail_ctx_free;
4628 	}
4629 	iflib_add_device_sysctl_pre(ctx);
4630 
4631 	scctx = &ctx->ifc_softc_ctx;
4632 	ifp = ctx->ifc_ifp;
4633 
4634 	/*
4635 	 * XXX sanity check that ntxd & nrxd are a power of 2
4636 	 */
4637 	iflib_reset_qvalues(ctx);
4638 
4639 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4640 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4641 		goto fail_ctx_free;
4642 	}
4643 	if (sctx->isc_flags & IFLIB_GEN_MAC)
4644 		iflib_gen_mac(ctx);
4645 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
4646 								clctx->cc_params)) != 0) {
4647 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
4648 		goto fail_ctx_free;
4649 	}
4650 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
4651 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
4652 	ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
4653 
4654 #ifdef INVARIANTS
4655 	MPASS(scctx->isc_capabilities);
4656 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
4657 		MPASS(scctx->isc_tx_csum_flags);
4658 #endif
4659 
4660 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4661 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
4662 
4663 	ifp->if_flags |= IFF_NOGROUP;
4664 	if (sctx->isc_flags & IFLIB_PSEUDO) {
4665 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4666 
4667 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4668 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4669 			goto fail_detach;
4670 		}
4671 		*ctxp = ctx;
4672 
4673 		/*
4674 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4675 		 * This must appear after the call to ether_ifattach() because
4676 		 * ether_ifattach() sets if_hdrlen to the default value.
4677 		 */
4678 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4679 			if_setifheaderlen(ifp,
4680 			    sizeof(struct ether_vlan_header));
4681 
4682 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4683 		iflib_add_device_sysctl_post(ctx);
4684 		ctx->ifc_flags |= IFC_INIT_DONE;
4685 		return (0);
4686 	}
4687 	_iflib_pre_assert(scctx);
4688 	ctx->ifc_txrx = *scctx->isc_txrx;
4689 
4690 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
4691 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
4692 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
4693 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
4694 
4695 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
4696 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
4697 
4698 	/* XXX change for per-queue sizes */
4699 	device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
4700 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
4701 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4702 		if (!powerof2(scctx->isc_nrxd[i])) {
4703 			/* round down instead? */
4704 			device_printf(dev, "# rx descriptors must be a power of 2\n");
4705 			err = EINVAL;
4706 			goto fail_iflib_detach;
4707 		}
4708 	}
4709 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4710 		if (!powerof2(scctx->isc_ntxd[i])) {
4711 			device_printf(dev,
4712 			    "# tx descriptors must be a power of 2");
4713 			err = EINVAL;
4714 			goto fail_iflib_detach;
4715 		}
4716 	}
4717 
4718 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
4719 	    MAX_SINGLE_PACKET_FRACTION)
4720 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
4721 		    MAX_SINGLE_PACKET_FRACTION);
4722 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
4723 	    MAX_SINGLE_PACKET_FRACTION)
4724 		scctx->isc_tx_tso_segments_max = max(1,
4725 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
4726 
4727 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
4728 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
4729 		/*
4730 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
4731 		 * but some MACs do.
4732 		 */
4733 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
4734 		    IP_MAXPACKET));
4735 		/*
4736 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
4737 		 * into account.  In the worst case, each of these calls will
4738 		 * add another mbuf and, thus, the requirement for another DMA
4739 		 * segment.  So for best performance, it doesn't make sense to
4740 		 * advertize a maximum of TSO segments that typically will
4741 		 * require defragmentation in iflib_encap().
4742 		 */
4743 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
4744 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
4745 	}
4746 	if (scctx->isc_rss_table_size == 0)
4747 		scctx->isc_rss_table_size = 64;
4748 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4749 
4750 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4751 	/* XXX format name */
4752 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4753 	    NULL, NULL, "admin");
4754 
4755 	/* XXX --- can support > 1 -- but keep it simple for now */
4756 	scctx->isc_intr = IFLIB_INTR_LEGACY;
4757 
4758 	/* Get memory for the station queues */
4759 	if ((err = iflib_queues_alloc(ctx))) {
4760 		device_printf(dev, "Unable to allocate queue memory\n");
4761 		goto fail_iflib_detach;
4762 	}
4763 
4764 	if ((err = iflib_qset_structures_setup(ctx))) {
4765 		device_printf(dev, "qset structure setup failed %d\n", err);
4766 		goto fail_queues;
4767 	}
4768 
4769 	/*
4770 	 * XXX What if anything do we want to do about interrupts?
4771 	 */
4772 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
4773 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
4774 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
4775 		goto fail_detach;
4776 	}
4777 
4778 	/*
4779 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
4780 	 * This must appear after the call to ether_ifattach() because
4781 	 * ether_ifattach() sets if_hdrlen to the default value.
4782 	 */
4783 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
4784 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
4785 
4786 	/* XXX handle more than one queue */
4787 	for (i = 0; i < scctx->isc_nrxqsets; i++)
4788 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
4789 
4790 	*ctxp = ctx;
4791 
4792 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
4793 	iflib_add_device_sysctl_post(ctx);
4794 	ctx->ifc_flags |= IFC_INIT_DONE;
4795 	return (0);
4796 fail_detach:
4797 	ether_ifdetach(ctx->ifc_ifp);
4798 fail_queues:
4799 	iflib_tx_structures_free(ctx);
4800 	iflib_rx_structures_free(ctx);
4801 fail_iflib_detach:
4802 	IFDI_DETACH(ctx);
4803 fail_ctx_free:
4804 	free(ctx->ifc_softc, M_IFLIB);
4805 	free(ctx, M_IFLIB);
4806 	return (err);
4807 }
4808 
4809 int
4810 iflib_pseudo_deregister(if_ctx_t ctx)
4811 {
4812 	if_t ifp = ctx->ifc_ifp;
4813 	iflib_txq_t txq;
4814 	iflib_rxq_t rxq;
4815 	int i, j;
4816 	struct taskqgroup *tqg;
4817 	iflib_fl_t fl;
4818 
4819 	/* Unregister VLAN events */
4820 	if (ctx->ifc_vlan_attach_event != NULL)
4821 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4822 	if (ctx->ifc_vlan_detach_event != NULL)
4823 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4824 
4825 	ether_ifdetach(ifp);
4826 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4827 	CTX_LOCK_DESTROY(ctx);
4828 	/* XXX drain any dependent tasks */
4829 	tqg = qgroup_if_io_tqg;
4830 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4831 		callout_drain(&txq->ift_timer);
4832 		if (txq->ift_task.gt_uniq != NULL)
4833 			taskqgroup_detach(tqg, &txq->ift_task);
4834 	}
4835 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4836 		if (rxq->ifr_task.gt_uniq != NULL)
4837 			taskqgroup_detach(tqg, &rxq->ifr_task);
4838 
4839 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4840 			free(fl->ifl_rx_bitmap, M_IFLIB);
4841 	}
4842 	tqg = qgroup_if_config_tqg;
4843 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4844 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4845 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4846 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4847 
4848 	if_free(ifp);
4849 
4850 	iflib_tx_structures_free(ctx);
4851 	iflib_rx_structures_free(ctx);
4852 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4853 		free(ctx->ifc_softc, M_IFLIB);
4854 	free(ctx, M_IFLIB);
4855 	return (0);
4856 }
4857 
4858 int
4859 iflib_device_attach(device_t dev)
4860 {
4861 	if_ctx_t ctx;
4862 	if_shared_ctx_t sctx;
4863 
4864 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4865 		return (ENOTSUP);
4866 
4867 	pci_enable_busmaster(dev);
4868 
4869 	return (iflib_device_register(dev, NULL, sctx, &ctx));
4870 }
4871 
4872 int
4873 iflib_device_deregister(if_ctx_t ctx)
4874 {
4875 	if_t ifp = ctx->ifc_ifp;
4876 	iflib_txq_t txq;
4877 	iflib_rxq_t rxq;
4878 	device_t dev = ctx->ifc_dev;
4879 	int i, j;
4880 	struct taskqgroup *tqg;
4881 	iflib_fl_t fl;
4882 
4883 	/* Make sure VLANS are not using driver */
4884 	if (if_vlantrunkinuse(ifp)) {
4885 		device_printf(dev, "Vlan in use, detach first\n");
4886 		return (EBUSY);
4887 	}
4888 #ifdef PCI_IOV
4889 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
4890 		device_printf(dev, "SR-IOV in use; detach first.\n");
4891 		return (EBUSY);
4892 	}
4893 #endif
4894 
4895 	STATE_LOCK(ctx);
4896 	ctx->ifc_flags |= IFC_IN_DETACH;
4897 	STATE_UNLOCK(ctx);
4898 
4899 	CTX_LOCK(ctx);
4900 	iflib_stop(ctx);
4901 	CTX_UNLOCK(ctx);
4902 
4903 	/* Unregister VLAN events */
4904 	if (ctx->ifc_vlan_attach_event != NULL)
4905 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
4906 	if (ctx->ifc_vlan_detach_event != NULL)
4907 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
4908 
4909 	iflib_netmap_detach(ifp);
4910 	ether_ifdetach(ifp);
4911 	if (ctx->ifc_led_dev != NULL)
4912 		led_destroy(ctx->ifc_led_dev);
4913 	/* XXX drain any dependent tasks */
4914 	tqg = qgroup_if_io_tqg;
4915 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4916 		callout_drain(&txq->ift_timer);
4917 		if (txq->ift_task.gt_uniq != NULL)
4918 			taskqgroup_detach(tqg, &txq->ift_task);
4919 	}
4920 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4921 		if (rxq->ifr_task.gt_uniq != NULL)
4922 			taskqgroup_detach(tqg, &rxq->ifr_task);
4923 
4924 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
4925 			free(fl->ifl_rx_bitmap, M_IFLIB);
4926 	}
4927 	tqg = qgroup_if_config_tqg;
4928 	if (ctx->ifc_admin_task.gt_uniq != NULL)
4929 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
4930 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
4931 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
4932 	CTX_LOCK(ctx);
4933 	IFDI_DETACH(ctx);
4934 	CTX_UNLOCK(ctx);
4935 
4936 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
4937 	CTX_LOCK_DESTROY(ctx);
4938 	device_set_softc(ctx->ifc_dev, NULL);
4939 	iflib_free_intr_mem(ctx);
4940 
4941 	bus_generic_detach(dev);
4942 	if_free(ifp);
4943 
4944 	iflib_tx_structures_free(ctx);
4945 	iflib_rx_structures_free(ctx);
4946 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
4947 		free(ctx->ifc_softc, M_IFLIB);
4948 	STATE_LOCK_DESTROY(ctx);
4949 	free(ctx, M_IFLIB);
4950 	return (0);
4951 }
4952 
4953 static void
4954 iflib_free_intr_mem(if_ctx_t ctx)
4955 {
4956 
4957 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
4958 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
4959 	}
4960 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
4961 		pci_release_msi(ctx->ifc_dev);
4962 	}
4963 	if (ctx->ifc_msix_mem != NULL) {
4964 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
4965 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
4966 		ctx->ifc_msix_mem = NULL;
4967 	}
4968 }
4969 
4970 int
4971 iflib_device_detach(device_t dev)
4972 {
4973 	if_ctx_t ctx = device_get_softc(dev);
4974 
4975 	return (iflib_device_deregister(ctx));
4976 }
4977 
4978 int
4979 iflib_device_suspend(device_t dev)
4980 {
4981 	if_ctx_t ctx = device_get_softc(dev);
4982 
4983 	CTX_LOCK(ctx);
4984 	IFDI_SUSPEND(ctx);
4985 	CTX_UNLOCK(ctx);
4986 
4987 	return bus_generic_suspend(dev);
4988 }
4989 int
4990 iflib_device_shutdown(device_t dev)
4991 {
4992 	if_ctx_t ctx = device_get_softc(dev);
4993 
4994 	CTX_LOCK(ctx);
4995 	IFDI_SHUTDOWN(ctx);
4996 	CTX_UNLOCK(ctx);
4997 
4998 	return bus_generic_suspend(dev);
4999 }
5000 
5001 
5002 int
5003 iflib_device_resume(device_t dev)
5004 {
5005 	if_ctx_t ctx = device_get_softc(dev);
5006 	iflib_txq_t txq = ctx->ifc_txqs;
5007 
5008 	CTX_LOCK(ctx);
5009 	IFDI_RESUME(ctx);
5010 	iflib_if_init_locked(ctx);
5011 	CTX_UNLOCK(ctx);
5012 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5013 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5014 
5015 	return (bus_generic_resume(dev));
5016 }
5017 
5018 int
5019 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5020 {
5021 	int error;
5022 	if_ctx_t ctx = device_get_softc(dev);
5023 
5024 	CTX_LOCK(ctx);
5025 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
5026 	CTX_UNLOCK(ctx);
5027 
5028 	return (error);
5029 }
5030 
5031 void
5032 iflib_device_iov_uninit(device_t dev)
5033 {
5034 	if_ctx_t ctx = device_get_softc(dev);
5035 
5036 	CTX_LOCK(ctx);
5037 	IFDI_IOV_UNINIT(ctx);
5038 	CTX_UNLOCK(ctx);
5039 }
5040 
5041 int
5042 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5043 {
5044 	int error;
5045 	if_ctx_t ctx = device_get_softc(dev);
5046 
5047 	CTX_LOCK(ctx);
5048 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5049 	CTX_UNLOCK(ctx);
5050 
5051 	return (error);
5052 }
5053 
5054 /*********************************************************************
5055  *
5056  *  MODULE FUNCTION DEFINITIONS
5057  *
5058  **********************************************************************/
5059 
5060 /*
5061  * - Start a fast taskqueue thread for each core
5062  * - Start a taskqueue for control operations
5063  */
5064 static int
5065 iflib_module_init(void)
5066 {
5067 	return (0);
5068 }
5069 
5070 static int
5071 iflib_module_event_handler(module_t mod, int what, void *arg)
5072 {
5073 	int err;
5074 
5075 	switch (what) {
5076 	case MOD_LOAD:
5077 		if ((err = iflib_module_init()) != 0)
5078 			return (err);
5079 		break;
5080 	case MOD_UNLOAD:
5081 		return (EBUSY);
5082 	default:
5083 		return (EOPNOTSUPP);
5084 	}
5085 
5086 	return (0);
5087 }
5088 
5089 /*********************************************************************
5090  *
5091  *  PUBLIC FUNCTION DEFINITIONS
5092  *     ordered as in iflib.h
5093  *
5094  **********************************************************************/
5095 
5096 
5097 static void
5098 _iflib_assert(if_shared_ctx_t sctx)
5099 {
5100 	MPASS(sctx->isc_tx_maxsize);
5101 	MPASS(sctx->isc_tx_maxsegsize);
5102 
5103 	MPASS(sctx->isc_rx_maxsize);
5104 	MPASS(sctx->isc_rx_nsegments);
5105 	MPASS(sctx->isc_rx_maxsegsize);
5106 
5107 	MPASS(sctx->isc_nrxd_min[0]);
5108 	MPASS(sctx->isc_nrxd_max[0]);
5109 	MPASS(sctx->isc_nrxd_default[0]);
5110 	MPASS(sctx->isc_ntxd_min[0]);
5111 	MPASS(sctx->isc_ntxd_max[0]);
5112 	MPASS(sctx->isc_ntxd_default[0]);
5113 }
5114 
5115 static void
5116 _iflib_pre_assert(if_softc_ctx_t scctx)
5117 {
5118 
5119 	MPASS(scctx->isc_txrx->ift_txd_encap);
5120 	MPASS(scctx->isc_txrx->ift_txd_flush);
5121 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
5122 	MPASS(scctx->isc_txrx->ift_rxd_available);
5123 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5124 	MPASS(scctx->isc_txrx->ift_rxd_refill);
5125 	MPASS(scctx->isc_txrx->ift_rxd_flush);
5126 }
5127 
5128 static int
5129 iflib_register(if_ctx_t ctx)
5130 {
5131 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5132 	driver_t *driver = sctx->isc_driver;
5133 	device_t dev = ctx->ifc_dev;
5134 	if_t ifp;
5135 
5136 	_iflib_assert(sctx);
5137 
5138 	CTX_LOCK_INIT(ctx);
5139 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5140 	ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
5141 	if (ifp == NULL) {
5142 		device_printf(dev, "can not allocate ifnet structure\n");
5143 		return (ENOMEM);
5144 	}
5145 
5146 	/*
5147 	 * Initialize our context's device specific methods
5148 	 */
5149 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5150 	kobj_class_compile((kobj_class_t) driver);
5151 	driver->refs++;
5152 
5153 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5154 	if_setsoftc(ifp, ctx);
5155 	if_setdev(ifp, dev);
5156 	if_setinitfn(ifp, iflib_if_init);
5157 	if_setioctlfn(ifp, iflib_if_ioctl);
5158 #ifdef ALTQ
5159 	if_setstartfn(ifp, iflib_altq_if_start);
5160 	if_settransmitfn(ifp, iflib_altq_if_transmit);
5161 	if_setsendqready(ifp);
5162 #else
5163 	if_settransmitfn(ifp, iflib_if_transmit);
5164 #endif
5165 	if_setqflushfn(ifp, iflib_if_qflush);
5166 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
5167 
5168 	ctx->ifc_vlan_attach_event =
5169 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5170 							  EVENTHANDLER_PRI_FIRST);
5171 	ctx->ifc_vlan_detach_event =
5172 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5173 							  EVENTHANDLER_PRI_FIRST);
5174 
5175 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
5176 					 iflib_media_change, iflib_media_status);
5177 
5178 	return (0);
5179 }
5180 
5181 
5182 static int
5183 iflib_queues_alloc(if_ctx_t ctx)
5184 {
5185 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5186 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5187 	device_t dev = ctx->ifc_dev;
5188 	int nrxqsets = scctx->isc_nrxqsets;
5189 	int ntxqsets = scctx->isc_ntxqsets;
5190 	iflib_txq_t txq;
5191 	iflib_rxq_t rxq;
5192 	iflib_fl_t fl = NULL;
5193 	int i, j, cpu, err, txconf, rxconf;
5194 	iflib_dma_info_t ifdip;
5195 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
5196 	uint32_t *txqsizes = scctx->isc_txqsizes;
5197 	uint8_t nrxqs = sctx->isc_nrxqs;
5198 	uint8_t ntxqs = sctx->isc_ntxqs;
5199 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5200 	caddr_t *vaddrs;
5201 	uint64_t *paddrs;
5202 
5203 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5204 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5205 
5206 	/* Allocate the TX ring struct memory */
5207 	if (!(ctx->ifc_txqs =
5208 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5209 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5210 		device_printf(dev, "Unable to allocate TX ring memory\n");
5211 		err = ENOMEM;
5212 		goto fail;
5213 	}
5214 
5215 	/* Now allocate the RX */
5216 	if (!(ctx->ifc_rxqs =
5217 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5218 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5219 		device_printf(dev, "Unable to allocate RX ring memory\n");
5220 		err = ENOMEM;
5221 		goto rx_fail;
5222 	}
5223 
5224 	txq = ctx->ifc_txqs;
5225 	rxq = ctx->ifc_rxqs;
5226 
5227 	/*
5228 	 * XXX handle allocation failure
5229 	 */
5230 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5231 		/* Set up some basics */
5232 
5233 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5234 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5235 			device_printf(dev,
5236 			    "Unable to allocate TX DMA info memory\n");
5237 			err = ENOMEM;
5238 			goto err_tx_desc;
5239 		}
5240 		txq->ift_ifdi = ifdip;
5241 		for (j = 0; j < ntxqs; j++, ifdip++) {
5242 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5243 				device_printf(dev,
5244 				    "Unable to allocate TX descriptors\n");
5245 				err = ENOMEM;
5246 				goto err_tx_desc;
5247 			}
5248 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5249 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5250 		}
5251 		txq->ift_ctx = ctx;
5252 		txq->ift_id = i;
5253 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5254 			txq->ift_br_offset = 1;
5255 		} else {
5256 			txq->ift_br_offset = 0;
5257 		}
5258 		/* XXX fix this */
5259 		txq->ift_timer.c_cpu = cpu;
5260 
5261 		if (iflib_txsd_alloc(txq)) {
5262 			device_printf(dev, "Critical Failure setting up TX buffers\n");
5263 			err = ENOMEM;
5264 			goto err_tx_desc;
5265 		}
5266 
5267 		/* Initialize the TX lock */
5268 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
5269 		    device_get_nameunit(dev), txq->ift_id);
5270 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5271 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5272 
5273 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
5274 			 device_get_nameunit(dev), txq->ift_id);
5275 
5276 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5277 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
5278 		if (err) {
5279 			/* XXX free any allocated rings */
5280 			device_printf(dev, "Unable to allocate buf_ring\n");
5281 			goto err_tx_desc;
5282 		}
5283 	}
5284 
5285 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
5286 		/* Set up some basics */
5287 
5288 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5289 		   M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5290 			device_printf(dev,
5291 			    "Unable to allocate RX DMA info memory\n");
5292 			err = ENOMEM;
5293 			goto err_tx_desc;
5294 		}
5295 
5296 		rxq->ifr_ifdi = ifdip;
5297 		/* XXX this needs to be changed if #rx queues != #tx queues */
5298 		rxq->ifr_ntxqirq = 1;
5299 		rxq->ifr_txqid[0] = i;
5300 		for (j = 0; j < nrxqs; j++, ifdip++) {
5301 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5302 				device_printf(dev,
5303 				    "Unable to allocate RX descriptors\n");
5304 				err = ENOMEM;
5305 				goto err_tx_desc;
5306 			}
5307 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5308 		}
5309 		rxq->ifr_ctx = ctx;
5310 		rxq->ifr_id = i;
5311 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
5312 			rxq->ifr_fl_offset = 1;
5313 		} else {
5314 			rxq->ifr_fl_offset = 0;
5315 		}
5316 		rxq->ifr_nfl = nfree_lists;
5317 		if (!(fl =
5318 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
5319 			device_printf(dev, "Unable to allocate free list memory\n");
5320 			err = ENOMEM;
5321 			goto err_tx_desc;
5322 		}
5323 		rxq->ifr_fl = fl;
5324 		for (j = 0; j < nfree_lists; j++) {
5325 			fl[j].ifl_rxq = rxq;
5326 			fl[j].ifl_id = j;
5327 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5328 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
5329 		}
5330 		/* Allocate receive buffers for the ring */
5331 		if (iflib_rxsd_alloc(rxq)) {
5332 			device_printf(dev,
5333 			    "Critical Failure setting up receive buffers\n");
5334 			err = ENOMEM;
5335 			goto err_rx_desc;
5336 		}
5337 
5338 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5339 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
5340 			    M_WAITOK);
5341 	}
5342 
5343 	/* TXQs */
5344 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5345 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
5346 	for (i = 0; i < ntxqsets; i++) {
5347 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
5348 
5349 		for (j = 0; j < ntxqs; j++, di++) {
5350 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
5351 			paddrs[i*ntxqs + j] = di->idi_paddr;
5352 		}
5353 	}
5354 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5355 		device_printf(ctx->ifc_dev,
5356 		    "Unable to allocate device TX queue\n");
5357 		iflib_tx_structures_free(ctx);
5358 		free(vaddrs, M_IFLIB);
5359 		free(paddrs, M_IFLIB);
5360 		goto err_rx_desc;
5361 	}
5362 	free(vaddrs, M_IFLIB);
5363 	free(paddrs, M_IFLIB);
5364 
5365 	/* RXQs */
5366 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5367 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
5368 	for (i = 0; i < nrxqsets; i++) {
5369 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
5370 
5371 		for (j = 0; j < nrxqs; j++, di++) {
5372 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
5373 			paddrs[i*nrxqs + j] = di->idi_paddr;
5374 		}
5375 	}
5376 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5377 		device_printf(ctx->ifc_dev,
5378 		    "Unable to allocate device RX queue\n");
5379 		iflib_tx_structures_free(ctx);
5380 		free(vaddrs, M_IFLIB);
5381 		free(paddrs, M_IFLIB);
5382 		goto err_rx_desc;
5383 	}
5384 	free(vaddrs, M_IFLIB);
5385 	free(paddrs, M_IFLIB);
5386 
5387 	return (0);
5388 
5389 /* XXX handle allocation failure changes */
5390 err_rx_desc:
5391 err_tx_desc:
5392 rx_fail:
5393 	if (ctx->ifc_rxqs != NULL)
5394 		free(ctx->ifc_rxqs, M_IFLIB);
5395 	ctx->ifc_rxqs = NULL;
5396 	if (ctx->ifc_txqs != NULL)
5397 		free(ctx->ifc_txqs, M_IFLIB);
5398 	ctx->ifc_txqs = NULL;
5399 fail:
5400 	return (err);
5401 }
5402 
5403 static int
5404 iflib_tx_structures_setup(if_ctx_t ctx)
5405 {
5406 	iflib_txq_t txq = ctx->ifc_txqs;
5407 	int i;
5408 
5409 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
5410 		iflib_txq_setup(txq);
5411 
5412 	return (0);
5413 }
5414 
5415 static void
5416 iflib_tx_structures_free(if_ctx_t ctx)
5417 {
5418 	iflib_txq_t txq = ctx->ifc_txqs;
5419 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5420 	int i, j;
5421 
5422 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
5423 		iflib_txq_destroy(txq);
5424 		for (j = 0; j < sctx->isc_ntxqs; j++)
5425 			iflib_dma_free(&txq->ift_ifdi[j]);
5426 	}
5427 	free(ctx->ifc_txqs, M_IFLIB);
5428 	ctx->ifc_txqs = NULL;
5429 	IFDI_QUEUES_FREE(ctx);
5430 }
5431 
5432 /*********************************************************************
5433  *
5434  *  Initialize all receive rings.
5435  *
5436  **********************************************************************/
5437 static int
5438 iflib_rx_structures_setup(if_ctx_t ctx)
5439 {
5440 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5441 	int q;
5442 #if defined(INET6) || defined(INET)
5443 	int i, err;
5444 #endif
5445 
5446 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5447 #if defined(INET6) || defined(INET)
5448 		tcp_lro_free(&rxq->ifr_lc);
5449 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
5450 		    TCP_LRO_ENTRIES, min(1024,
5451 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
5452 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
5453 			goto fail;
5454 		}
5455 		rxq->ifr_lro_enabled = TRUE;
5456 #endif
5457 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
5458 	}
5459 	return (0);
5460 #if defined(INET6) || defined(INET)
5461 fail:
5462 	/*
5463 	 * Free RX software descriptors allocated so far, we will only handle
5464 	 * the rings that completed, the failing case will have
5465 	 * cleaned up for itself. 'q' failed, so its the terminus.
5466 	 */
5467 	rxq = ctx->ifc_rxqs;
5468 	for (i = 0; i < q; ++i, rxq++) {
5469 		iflib_rx_sds_free(rxq);
5470 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
5471 	}
5472 	return (err);
5473 #endif
5474 }
5475 
5476 /*********************************************************************
5477  *
5478  *  Free all receive rings.
5479  *
5480  **********************************************************************/
5481 static void
5482 iflib_rx_structures_free(if_ctx_t ctx)
5483 {
5484 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5485 
5486 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5487 		iflib_rx_sds_free(rxq);
5488 	}
5489 	free(ctx->ifc_rxqs, M_IFLIB);
5490 	ctx->ifc_rxqs = NULL;
5491 }
5492 
5493 static int
5494 iflib_qset_structures_setup(if_ctx_t ctx)
5495 {
5496 	int err;
5497 
5498 	/*
5499 	 * It is expected that the caller takes care of freeing queues if this
5500 	 * fails.
5501 	 */
5502 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5503 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
5504 		return (err);
5505 	}
5506 
5507 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
5508 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
5509 
5510 	return (err);
5511 }
5512 
5513 int
5514 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
5515 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
5516 {
5517 
5518 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
5519 }
5520 
5521 #ifdef SMP
5522 static int
5523 find_nth(if_ctx_t ctx, int qid)
5524 {
5525 	cpuset_t cpus;
5526 	int i, cpuid, eqid, count;
5527 
5528 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5529 	count = CPU_COUNT(&cpus);
5530 	eqid = qid % count;
5531 	/* clear up to the qid'th bit */
5532 	for (i = 0; i < eqid; i++) {
5533 		cpuid = CPU_FFS(&cpus);
5534 		MPASS(cpuid != 0);
5535 		CPU_CLR(cpuid-1, &cpus);
5536 	}
5537 	cpuid = CPU_FFS(&cpus);
5538 	MPASS(cpuid != 0);
5539 	return (cpuid-1);
5540 }
5541 
5542 #ifdef SCHED_ULE
5543 extern struct cpu_group *cpu_top;              /* CPU topology */
5544 
5545 static int
5546 find_child_with_core(int cpu, struct cpu_group *grp)
5547 {
5548 	int i;
5549 
5550 	if (grp->cg_children == 0)
5551 		return -1;
5552 
5553 	MPASS(grp->cg_child);
5554 	for (i = 0; i < grp->cg_children; i++) {
5555 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5556 			return i;
5557 	}
5558 
5559 	return -1;
5560 }
5561 
5562 /*
5563  * Find the nth "close" core to the specified core
5564  * "close" is defined as the deepest level that shares
5565  * at least an L2 cache.  With threads, this will be
5566  * threads on the same core.  If the sahred cache is L3
5567  * or higher, simply returns the same core.
5568  */
5569 static int
5570 find_close_core(int cpu, int core_offset)
5571 {
5572 	struct cpu_group *grp;
5573 	int i;
5574 	int fcpu;
5575 	cpuset_t cs;
5576 
5577 	grp = cpu_top;
5578 	if (grp == NULL)
5579 		return cpu;
5580 	i = 0;
5581 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5582 		/* If the child only has one cpu, don't descend */
5583 		if (grp->cg_child[i].cg_count <= 1)
5584 			break;
5585 		grp = &grp->cg_child[i];
5586 	}
5587 
5588 	/* If they don't share at least an L2 cache, use the same CPU */
5589 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5590 		return cpu;
5591 
5592 	/* Now pick one */
5593 	CPU_COPY(&grp->cg_mask, &cs);
5594 
5595 	/* Add the selected CPU offset to core offset. */
5596 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
5597 		if (fcpu - 1 == cpu)
5598 			break;
5599 		CPU_CLR(fcpu - 1, &cs);
5600 	}
5601 	MPASS(fcpu);
5602 
5603 	core_offset += i;
5604 
5605 	CPU_COPY(&grp->cg_mask, &cs);
5606 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5607 		MPASS(CPU_FFS(&cs));
5608 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5609 	}
5610 	MPASS(CPU_FFS(&cs));
5611 	return CPU_FFS(&cs) - 1;
5612 }
5613 #else
5614 static int
5615 find_close_core(int cpu, int core_offset __unused)
5616 {
5617 	return cpu;
5618 }
5619 #endif
5620 
5621 static int
5622 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5623 {
5624 	switch (type) {
5625 	case IFLIB_INTR_TX:
5626 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
5627 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5628 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5629 	case IFLIB_INTR_RX:
5630 	case IFLIB_INTR_RXTX:
5631 		/* RX queues get the specified core */
5632 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5633 	default:
5634 		return -1;
5635 	}
5636 }
5637 #else
5638 #define get_core_offset(ctx, type, qid)	CPU_FIRST()
5639 #define find_close_core(cpuid, tid)	CPU_FIRST()
5640 #define find_nth(ctx, gid)		CPU_FIRST()
5641 #endif
5642 
5643 /* Just to avoid copy/paste */
5644 static inline int
5645 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
5646     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
5647     const char *name)
5648 {
5649 	device_t dev;
5650 	int err, cpuid, tid;
5651 
5652 	dev = ctx->ifc_dev;
5653 	cpuid = find_nth(ctx, qid);
5654 	tid = get_core_offset(ctx, type, qid);
5655 	MPASS(tid >= 0);
5656 	cpuid = find_close_core(cpuid, tid);
5657 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
5658 	    name);
5659 	if (err) {
5660 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
5661 		return (err);
5662 	}
5663 #ifdef notyet
5664 	if (cpuid > ctx->ifc_cpuid_highest)
5665 		ctx->ifc_cpuid_highest = cpuid;
5666 #endif
5667 	return 0;
5668 }
5669 
5670 int
5671 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
5672 			iflib_intr_type_t type, driver_filter_t *filter,
5673 			void *filter_arg, int qid, const char *name)
5674 {
5675 	device_t dev;
5676 	struct grouptask *gtask;
5677 	struct taskqgroup *tqg;
5678 	iflib_filter_info_t info;
5679 	gtask_fn_t *fn;
5680 	int tqrid, err;
5681 	driver_filter_t *intr_fast;
5682 	void *q;
5683 
5684 	info = &ctx->ifc_filter_info;
5685 	tqrid = rid;
5686 
5687 	switch (type) {
5688 	/* XXX merge tx/rx for netmap? */
5689 	case IFLIB_INTR_TX:
5690 		q = &ctx->ifc_txqs[qid];
5691 		info = &ctx->ifc_txqs[qid].ift_filter_info;
5692 		gtask = &ctx->ifc_txqs[qid].ift_task;
5693 		tqg = qgroup_if_io_tqg;
5694 		fn = _task_fn_tx;
5695 		intr_fast = iflib_fast_intr;
5696 		GROUPTASK_INIT(gtask, 0, fn, q);
5697 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
5698 		break;
5699 	case IFLIB_INTR_RX:
5700 		q = &ctx->ifc_rxqs[qid];
5701 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5702 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5703 		tqg = qgroup_if_io_tqg;
5704 		fn = _task_fn_rx;
5705 		intr_fast = iflib_fast_intr;
5706 		GROUPTASK_INIT(gtask, 0, fn, q);
5707 		break;
5708 	case IFLIB_INTR_RXTX:
5709 		q = &ctx->ifc_rxqs[qid];
5710 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
5711 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5712 		tqg = qgroup_if_io_tqg;
5713 		fn = _task_fn_rx;
5714 		intr_fast = iflib_fast_intr_rxtx;
5715 		GROUPTASK_INIT(gtask, 0, fn, q);
5716 		break;
5717 	case IFLIB_INTR_ADMIN:
5718 		q = ctx;
5719 		tqrid = -1;
5720 		info = &ctx->ifc_filter_info;
5721 		gtask = &ctx->ifc_admin_task;
5722 		tqg = qgroup_if_config_tqg;
5723 		fn = _task_fn_admin;
5724 		intr_fast = iflib_fast_intr_ctx;
5725 		break;
5726 	default:
5727 		panic("unknown net intr type");
5728 	}
5729 
5730 	info->ifi_filter = filter;
5731 	info->ifi_filter_arg = filter_arg;
5732 	info->ifi_task = gtask;
5733 	info->ifi_ctx = q;
5734 
5735 	dev = ctx->ifc_dev;
5736 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5737 	if (err != 0) {
5738 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
5739 		return (err);
5740 	}
5741 	if (type == IFLIB_INTR_ADMIN)
5742 		return (0);
5743 
5744 	if (tqrid != -1) {
5745 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
5746 		    q, name);
5747 		if (err)
5748 			return (err);
5749 	} else {
5750 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
5751 	}
5752 
5753 	return (0);
5754 }
5755 
5756 void
5757 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
5758 {
5759 	struct grouptask *gtask;
5760 	struct taskqgroup *tqg;
5761 	gtask_fn_t *fn;
5762 	void *q;
5763 	int err;
5764 
5765 	switch (type) {
5766 	case IFLIB_INTR_TX:
5767 		q = &ctx->ifc_txqs[qid];
5768 		gtask = &ctx->ifc_txqs[qid].ift_task;
5769 		tqg = qgroup_if_io_tqg;
5770 		fn = _task_fn_tx;
5771 		break;
5772 	case IFLIB_INTR_RX:
5773 		q = &ctx->ifc_rxqs[qid];
5774 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5775 		tqg = qgroup_if_io_tqg;
5776 		fn = _task_fn_rx;
5777 		break;
5778 	case IFLIB_INTR_IOV:
5779 		q = ctx;
5780 		gtask = &ctx->ifc_vflr_task;
5781 		tqg = qgroup_if_config_tqg;
5782 		fn = _task_fn_iov;
5783 		break;
5784 	default:
5785 		panic("unknown net intr type");
5786 	}
5787 	GROUPTASK_INIT(gtask, 0, fn, q);
5788 	if (irq != NULL) {
5789 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
5790 		    q, name);
5791 		if (err)
5792 			taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
5793 			    irq->ii_res, name);
5794 	} else {
5795 		taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
5796 	}
5797 }
5798 
5799 void
5800 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
5801 {
5802 
5803 	if (irq->ii_tag)
5804 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
5805 
5806 	if (irq->ii_res)
5807 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
5808 		    rman_get_rid(irq->ii_res), irq->ii_res);
5809 }
5810 
5811 static int
5812 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
5813 {
5814 	iflib_txq_t txq = ctx->ifc_txqs;
5815 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5816 	if_irq_t irq = &ctx->ifc_legacy_irq;
5817 	iflib_filter_info_t info;
5818 	device_t dev;
5819 	struct grouptask *gtask;
5820 	struct resource *res;
5821 	struct taskqgroup *tqg;
5822 	gtask_fn_t *fn;
5823 	int tqrid;
5824 	void *q;
5825 	int err;
5826 
5827 	q = &ctx->ifc_rxqs[0];
5828 	info = &rxq[0].ifr_filter_info;
5829 	gtask = &rxq[0].ifr_task;
5830 	tqg = qgroup_if_io_tqg;
5831 	tqrid = irq->ii_rid = *rid;
5832 	fn = _task_fn_rx;
5833 
5834 	ctx->ifc_flags |= IFC_LEGACY;
5835 	info->ifi_filter = filter;
5836 	info->ifi_filter_arg = filter_arg;
5837 	info->ifi_task = gtask;
5838 	info->ifi_ctx = ctx;
5839 
5840 	dev = ctx->ifc_dev;
5841 	/* We allocate a single interrupt resource */
5842 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
5843 		return (err);
5844 	GROUPTASK_INIT(gtask, 0, fn, q);
5845 	res = irq->ii_res;
5846 	taskqgroup_attach(tqg, gtask, q, dev, res, name);
5847 
5848 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
5849 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
5850 	    "tx");
5851 	return (0);
5852 }
5853 
5854 void
5855 iflib_led_create(if_ctx_t ctx)
5856 {
5857 
5858 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
5859 	    device_get_nameunit(ctx->ifc_dev));
5860 }
5861 
5862 void
5863 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
5864 {
5865 
5866 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
5867 }
5868 
5869 void
5870 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
5871 {
5872 
5873 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
5874 }
5875 
5876 void
5877 iflib_admin_intr_deferred(if_ctx_t ctx)
5878 {
5879 #ifdef INVARIANTS
5880 	struct grouptask *gtask;
5881 
5882 	gtask = &ctx->ifc_admin_task;
5883 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
5884 #endif
5885 
5886 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
5887 }
5888 
5889 void
5890 iflib_iov_intr_deferred(if_ctx_t ctx)
5891 {
5892 
5893 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
5894 }
5895 
5896 void
5897 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
5898 {
5899 
5900 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
5901 	    name);
5902 }
5903 
5904 void
5905 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
5906 	const char *name)
5907 {
5908 
5909 	GROUPTASK_INIT(gtask, 0, fn, ctx);
5910 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
5911 	    name);
5912 }
5913 
5914 void
5915 iflib_config_gtask_deinit(struct grouptask *gtask)
5916 {
5917 
5918 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
5919 }
5920 
5921 void
5922 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
5923 {
5924 	if_t ifp = ctx->ifc_ifp;
5925 	iflib_txq_t txq = ctx->ifc_txqs;
5926 
5927 	if_setbaudrate(ifp, baudrate);
5928 	if (baudrate >= IF_Gbps(10)) {
5929 		STATE_LOCK(ctx);
5930 		ctx->ifc_flags |= IFC_PREFETCH;
5931 		STATE_UNLOCK(ctx);
5932 	}
5933 	/* If link down, disable watchdog */
5934 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
5935 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
5936 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
5937 	}
5938 	ctx->ifc_link_state = link_state;
5939 	if_link_state_change(ifp, link_state);
5940 }
5941 
5942 static int
5943 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
5944 {
5945 	int credits;
5946 #ifdef INVARIANTS
5947 	int credits_pre = txq->ift_cidx_processed;
5948 #endif
5949 
5950 	if (ctx->isc_txd_credits_update == NULL)
5951 		return (0);
5952 
5953 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
5954 	    BUS_DMASYNC_POSTREAD);
5955 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
5956 		return (0);
5957 
5958 	txq->ift_processed += credits;
5959 	txq->ift_cidx_processed += credits;
5960 
5961 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
5962 	if (txq->ift_cidx_processed >= txq->ift_size)
5963 		txq->ift_cidx_processed -= txq->ift_size;
5964 	return (credits);
5965 }
5966 
5967 static int
5968 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
5969 {
5970 	iflib_fl_t fl;
5971 	u_int i;
5972 
5973 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
5974 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
5975 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5976 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
5977 	    budget));
5978 }
5979 
5980 void
5981 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
5982 	const char *description, if_int_delay_info_t info,
5983 	int offset, int value)
5984 {
5985 	info->iidi_ctx = ctx;
5986 	info->iidi_offset = offset;
5987 	info->iidi_value = value;
5988 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
5989 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
5990 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5991 	    info, 0, iflib_sysctl_int_delay, "I", description);
5992 }
5993 
5994 struct sx *
5995 iflib_ctx_lock_get(if_ctx_t ctx)
5996 {
5997 
5998 	return (&ctx->ifc_ctx_sx);
5999 }
6000 
6001 static int
6002 iflib_msix_init(if_ctx_t ctx)
6003 {
6004 	device_t dev = ctx->ifc_dev;
6005 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6006 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6007 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
6008 	int iflib_num_tx_queues, iflib_num_rx_queues;
6009 	int err, admincnt, bar;
6010 
6011 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6012 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6013 
6014 	if (bootverbose)
6015 		device_printf(dev, "msix_init qsets capped at %d\n",
6016 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6017 
6018 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
6019 	admincnt = sctx->isc_admin_intrcnt;
6020 	/* Override by tuneable */
6021 	if (scctx->isc_disable_msix)
6022 		goto msi;
6023 
6024 	/* First try MSI-X */
6025 	if ((msgs = pci_msix_count(dev)) == 0) {
6026 		if (bootverbose)
6027 			device_printf(dev, "MSI-X not supported or disabled\n");
6028 		goto msi;
6029 	}
6030 	/*
6031 	 * bar == -1 => "trust me I know what I'm doing"
6032 	 * Some drivers are for hardware that is so shoddily
6033 	 * documented that no one knows which bars are which
6034 	 * so the developer has to map all bars. This hack
6035 	 * allows shoddy garbage to use MSI-X in this framework.
6036 	 */
6037 	if (bar != -1) {
6038 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6039 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
6040 		if (ctx->ifc_msix_mem == NULL) {
6041 			device_printf(dev, "Unable to map MSI-X table\n");
6042 			goto msi;
6043 		}
6044 	}
6045 #if IFLIB_DEBUG
6046 	/* use only 1 qset in debug mode */
6047 	queuemsgs = min(msgs - admincnt, 1);
6048 #else
6049 	queuemsgs = msgs - admincnt;
6050 #endif
6051 #ifdef RSS
6052 	queues = imin(queuemsgs, rss_getnumbuckets());
6053 #else
6054 	queues = queuemsgs;
6055 #endif
6056 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6057 	if (bootverbose)
6058 		device_printf(dev,
6059 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
6060 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6061 #ifdef  RSS
6062 	/* If we're doing RSS, clamp at the number of RSS buckets */
6063 	if (queues > rss_getnumbuckets())
6064 		queues = rss_getnumbuckets();
6065 #endif
6066 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6067 		rx_queues = iflib_num_rx_queues;
6068 	else
6069 		rx_queues = queues;
6070 
6071 	if (rx_queues > scctx->isc_nrxqsets)
6072 		rx_queues = scctx->isc_nrxqsets;
6073 
6074 	/*
6075 	 * We want this to be all logical CPUs by default
6076 	 */
6077 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6078 		tx_queues = iflib_num_tx_queues;
6079 	else
6080 		tx_queues = mp_ncpus;
6081 
6082 	if (tx_queues > scctx->isc_ntxqsets)
6083 		tx_queues = scctx->isc_ntxqsets;
6084 
6085 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
6086 #ifdef INVARIANTS
6087 		if (tx_queues != rx_queues)
6088 			device_printf(dev,
6089 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6090 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6091 #endif
6092 		tx_queues = min(rx_queues, tx_queues);
6093 		rx_queues = min(rx_queues, tx_queues);
6094 	}
6095 
6096 	device_printf(dev, "Using %d rx queues %d tx queues\n",
6097 	    rx_queues, tx_queues);
6098 
6099 	vectors = rx_queues + admincnt;
6100 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6101 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6102 		    vectors);
6103 		scctx->isc_vectors = vectors;
6104 		scctx->isc_nrxqsets = rx_queues;
6105 		scctx->isc_ntxqsets = tx_queues;
6106 		scctx->isc_intr = IFLIB_INTR_MSIX;
6107 
6108 		return (vectors);
6109 	} else {
6110 		device_printf(dev,
6111 		    "failed to allocate %d MSI-X vectors, err: %d - using MSI\n",
6112 		    vectors, err);
6113 		bus_release_resource(dev, SYS_RES_MEMORY, bar,
6114 		    ctx->ifc_msix_mem);
6115 		ctx->ifc_msix_mem = NULL;
6116 	}
6117 msi:
6118 	vectors = pci_msi_count(dev);
6119 	scctx->isc_nrxqsets = 1;
6120 	scctx->isc_ntxqsets = 1;
6121 	scctx->isc_vectors = vectors;
6122 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6123 		device_printf(dev,"Using an MSI interrupt\n");
6124 		scctx->isc_intr = IFLIB_INTR_MSI;
6125 	} else {
6126 		scctx->isc_vectors = 1;
6127 		device_printf(dev,"Using a Legacy interrupt\n");
6128 		scctx->isc_intr = IFLIB_INTR_LEGACY;
6129 	}
6130 
6131 	return (vectors);
6132 }
6133 
6134 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6135 
6136 static int
6137 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6138 {
6139 	int rc;
6140 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6141 	struct sbuf *sb;
6142 	const char *ring_state = "UNKNOWN";
6143 
6144 	/* XXX needed ? */
6145 	rc = sysctl_wire_old_buffer(req, 0);
6146 	MPASS(rc == 0);
6147 	if (rc != 0)
6148 		return (rc);
6149 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6150 	MPASS(sb != NULL);
6151 	if (sb == NULL)
6152 		return (ENOMEM);
6153 	if (state[3] <= 3)
6154 		ring_state = ring_states[state[3]];
6155 
6156 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6157 		    state[0], state[1], state[2], ring_state);
6158 	rc = sbuf_finish(sb);
6159 	sbuf_delete(sb);
6160         return(rc);
6161 }
6162 
6163 enum iflib_ndesc_handler {
6164 	IFLIB_NTXD_HANDLER,
6165 	IFLIB_NRXD_HANDLER,
6166 };
6167 
6168 static int
6169 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6170 {
6171 	if_ctx_t ctx = (void *)arg1;
6172 	enum iflib_ndesc_handler type = arg2;
6173 	char buf[256] = {0};
6174 	qidx_t *ndesc;
6175 	char *p, *next;
6176 	int nqs, rc, i;
6177 
6178 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
6179 
6180 	nqs = 8;
6181 	switch(type) {
6182 	case IFLIB_NTXD_HANDLER:
6183 		ndesc = ctx->ifc_sysctl_ntxds;
6184 		if (ctx->ifc_sctx)
6185 			nqs = ctx->ifc_sctx->isc_ntxqs;
6186 		break;
6187 	case IFLIB_NRXD_HANDLER:
6188 		ndesc = ctx->ifc_sysctl_nrxds;
6189 		if (ctx->ifc_sctx)
6190 			nqs = ctx->ifc_sctx->isc_nrxqs;
6191 		break;
6192 	default:
6193 			panic("unhandled type");
6194 	}
6195 	if (nqs == 0)
6196 		nqs = 8;
6197 
6198 	for (i=0; i<8; i++) {
6199 		if (i >= nqs)
6200 			break;
6201 		if (i)
6202 			strcat(buf, ",");
6203 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
6204 	}
6205 
6206 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6207 	if (rc || req->newptr == NULL)
6208 		return rc;
6209 
6210 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6211 	    i++, p = strsep(&next, " ,")) {
6212 		ndesc[i] = strtoul(p, NULL, 10);
6213 	}
6214 
6215 	return(rc);
6216 }
6217 
6218 #define NAME_BUFLEN 32
6219 static void
6220 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6221 {
6222         device_t dev = iflib_get_dev(ctx);
6223 	struct sysctl_oid_list *child, *oid_list;
6224 	struct sysctl_ctx_list *ctx_list;
6225 	struct sysctl_oid *node;
6226 
6227 	ctx_list = device_get_sysctl_ctx(dev);
6228 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6229 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
6230 						      CTLFLAG_RD, NULL, "IFLIB fields");
6231 	oid_list = SYSCTL_CHILDREN(node);
6232 
6233 	SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6234 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
6235 		       "driver version");
6236 
6237 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6238 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6239 			"# of txqs to use, 0 => use default #");
6240 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6241 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6242 			"# of rxqs to use, 0 => use default #");
6243 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6244 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6245                        "permit #txq != #rxq");
6246 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6247                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6248                       "disable MSI-X (default 0)");
6249 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6250 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6251                        "set the rx budget");
6252 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6253 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6254 		       "cause tx to abdicate instead of running to completion");
6255 
6256 	/* XXX change for per-queue sizes */
6257 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6258 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
6259                        mp_ndesc_handler, "A",
6260                        "list of # of tx descriptors to use, 0 = use default #");
6261 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6262 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
6263                        mp_ndesc_handler, "A",
6264                        "list of # of rx descriptors to use, 0 = use default #");
6265 }
6266 
6267 static void
6268 iflib_add_device_sysctl_post(if_ctx_t ctx)
6269 {
6270 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6271 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6272         device_t dev = iflib_get_dev(ctx);
6273 	struct sysctl_oid_list *child;
6274 	struct sysctl_ctx_list *ctx_list;
6275 	iflib_fl_t fl;
6276 	iflib_txq_t txq;
6277 	iflib_rxq_t rxq;
6278 	int i, j;
6279 	char namebuf[NAME_BUFLEN];
6280 	char *qfmt;
6281 	struct sysctl_oid *queue_node, *fl_node, *node;
6282 	struct sysctl_oid_list *queue_list, *fl_list;
6283 	ctx_list = device_get_sysctl_ctx(dev);
6284 
6285 	node = ctx->ifc_sysctl_node;
6286 	child = SYSCTL_CHILDREN(node);
6287 
6288 	if (scctx->isc_ntxqsets > 100)
6289 		qfmt = "txq%03d";
6290 	else if (scctx->isc_ntxqsets > 10)
6291 		qfmt = "txq%02d";
6292 	else
6293 		qfmt = "txq%d";
6294 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6295 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6296 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6297 					     CTLFLAG_RD, NULL, "Queue Name");
6298 		queue_list = SYSCTL_CHILDREN(queue_node);
6299 #if MEMORY_LOGGING
6300 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6301 				CTLFLAG_RD,
6302 				&txq->ift_dequeued, "total mbufs freed");
6303 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6304 				CTLFLAG_RD,
6305 				&txq->ift_enqueued, "total mbufs enqueued");
6306 #endif
6307 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6308 				   CTLFLAG_RD,
6309 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
6310 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6311 				   CTLFLAG_RD,
6312 				   &txq->ift_pullups, "# of times m_pullup was called");
6313 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
6314 				   CTLFLAG_RD,
6315 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6316 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
6317 				   CTLFLAG_RD,
6318 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
6319 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
6320 				   CTLFLAG_RD,
6321 				   &txq->ift_map_failed, "# of times dma map failed");
6322 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
6323 				   CTLFLAG_RD,
6324 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
6325 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
6326 				   CTLFLAG_RD,
6327 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
6328 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6329 				   CTLFLAG_RD,
6330 				   &txq->ift_pidx, 1, "Producer Index");
6331 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6332 				   CTLFLAG_RD,
6333 				   &txq->ift_cidx, 1, "Consumer Index");
6334 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
6335 				   CTLFLAG_RD,
6336 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
6337 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6338 				   CTLFLAG_RD,
6339 				   &txq->ift_in_use, 1, "descriptors in use");
6340 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
6341 				   CTLFLAG_RD,
6342 				   &txq->ift_processed, "descriptors procesed for clean");
6343 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6344 				   CTLFLAG_RD,
6345 				   &txq->ift_cleaned, "total cleaned");
6346 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
6347 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
6348 				0, mp_ring_state_handler, "A", "soft ring state");
6349 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
6350 				       CTLFLAG_RD, &txq->ift_br->enqueues,
6351 				       "# of enqueues to the mp_ring for this queue");
6352 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
6353 				       CTLFLAG_RD, &txq->ift_br->drops,
6354 				       "# of drops in the mp_ring for this queue");
6355 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
6356 				       CTLFLAG_RD, &txq->ift_br->starts,
6357 				       "# of normal consumer starts in the mp_ring for this queue");
6358 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
6359 				       CTLFLAG_RD, &txq->ift_br->stalls,
6360 					       "# of consumer stalls in the mp_ring for this queue");
6361 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
6362 			       CTLFLAG_RD, &txq->ift_br->restarts,
6363 				       "# of consumer restarts in the mp_ring for this queue");
6364 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
6365 				       CTLFLAG_RD, &txq->ift_br->abdications,
6366 				       "# of consumer abdications in the mp_ring for this queue");
6367 	}
6368 
6369 	if (scctx->isc_nrxqsets > 100)
6370 		qfmt = "rxq%03d";
6371 	else if (scctx->isc_nrxqsets > 10)
6372 		qfmt = "rxq%02d";
6373 	else
6374 		qfmt = "rxq%d";
6375 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
6376 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6377 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6378 					     CTLFLAG_RD, NULL, "Queue Name");
6379 		queue_list = SYSCTL_CHILDREN(queue_node);
6380 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
6381 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
6382 				       CTLFLAG_RD,
6383 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
6384 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
6385 				       CTLFLAG_RD,
6386 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
6387 		}
6388 
6389 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
6390 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
6391 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
6392 						     CTLFLAG_RD, NULL, "freelist Name");
6393 			fl_list = SYSCTL_CHILDREN(fl_node);
6394 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
6395 				       CTLFLAG_RD,
6396 				       &fl->ifl_pidx, 1, "Producer Index");
6397 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
6398 				       CTLFLAG_RD,
6399 				       &fl->ifl_cidx, 1, "Consumer Index");
6400 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
6401 				       CTLFLAG_RD,
6402 				       &fl->ifl_credits, 1, "credits available");
6403 #if MEMORY_LOGGING
6404 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
6405 					CTLFLAG_RD,
6406 					&fl->ifl_m_enqueued, "mbufs allocated");
6407 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
6408 					CTLFLAG_RD,
6409 					&fl->ifl_m_dequeued, "mbufs freed");
6410 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
6411 					CTLFLAG_RD,
6412 					&fl->ifl_cl_enqueued, "clusters allocated");
6413 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
6414 					CTLFLAG_RD,
6415 					&fl->ifl_cl_dequeued, "clusters freed");
6416 #endif
6417 
6418 		}
6419 	}
6420 
6421 }
6422 
6423 void
6424 iflib_request_reset(if_ctx_t ctx)
6425 {
6426 
6427 	STATE_LOCK(ctx);
6428 	ctx->ifc_flags |= IFC_DO_RESET;
6429 	STATE_UNLOCK(ctx);
6430 }
6431 
6432 #ifndef __NO_STRICT_ALIGNMENT
6433 static struct mbuf *
6434 iflib_fixup_rx(struct mbuf *m)
6435 {
6436 	struct mbuf *n;
6437 
6438 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
6439 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
6440 		m->m_data += ETHER_HDR_LEN;
6441 		n = m;
6442 	} else {
6443 		MGETHDR(n, M_NOWAIT, MT_DATA);
6444 		if (n == NULL) {
6445 			m_freem(m);
6446 			return (NULL);
6447 		}
6448 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
6449 		m->m_data += ETHER_HDR_LEN;
6450 		m->m_len -= ETHER_HDR_LEN;
6451 		n->m_len = ETHER_HDR_LEN;
6452 		M_MOVE_PKTHDR(n, m);
6453 		n->m_next = m;
6454 	}
6455 	return (n);
6456 }
6457 #endif
6458 
6459 #ifdef NETDUMP
6460 static void
6461 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
6462 {
6463 	if_ctx_t ctx;
6464 
6465 	ctx = if_getsoftc(ifp);
6466 	CTX_LOCK(ctx);
6467 	*nrxr = NRXQSETS(ctx);
6468 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
6469 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
6470 	CTX_UNLOCK(ctx);
6471 }
6472 
6473 static void
6474 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
6475 {
6476 	if_ctx_t ctx;
6477 	if_softc_ctx_t scctx;
6478 	iflib_fl_t fl;
6479 	iflib_rxq_t rxq;
6480 	int i, j;
6481 
6482 	ctx = if_getsoftc(ifp);
6483 	scctx = &ctx->ifc_softc_ctx;
6484 
6485 	switch (event) {
6486 	case NETDUMP_START:
6487 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
6488 			rxq = &ctx->ifc_rxqs[i];
6489 			for (j = 0; j < rxq->ifr_nfl; j++) {
6490 				fl = rxq->ifr_fl;
6491 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
6492 			}
6493 		}
6494 		iflib_no_tx_batch = 1;
6495 		break;
6496 	default:
6497 		break;
6498 	}
6499 }
6500 
6501 static int
6502 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
6503 {
6504 	if_ctx_t ctx;
6505 	iflib_txq_t txq;
6506 	int error;
6507 
6508 	ctx = if_getsoftc(ifp);
6509 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6510 	    IFF_DRV_RUNNING)
6511 		return (EBUSY);
6512 
6513 	txq = &ctx->ifc_txqs[0];
6514 	error = iflib_encap(txq, &m);
6515 	if (error == 0)
6516 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
6517 	return (error);
6518 }
6519 
6520 static int
6521 iflib_netdump_poll(struct ifnet *ifp, int count)
6522 {
6523 	if_ctx_t ctx;
6524 	if_softc_ctx_t scctx;
6525 	iflib_txq_t txq;
6526 	int i;
6527 
6528 	ctx = if_getsoftc(ifp);
6529 	scctx = &ctx->ifc_softc_ctx;
6530 
6531 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6532 	    IFF_DRV_RUNNING)
6533 		return (EBUSY);
6534 
6535 	txq = &ctx->ifc_txqs[0];
6536 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
6537 
6538 	for (i = 0; i < scctx->isc_nrxqsets; i++)
6539 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
6540 	return (0);
6541 }
6542 #endif /* NETDUMP */
6543