xref: /freebsd/sys/net/iflib.c (revision 6577e32ea2c7868c275eae6d1c68f1c37d418c71)
1 /*-
2  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  *  1. Redistributions of source code must retain the above copyright notice,
9  *     this list of conditions and the following disclaimer.
10  *
11  *  2. Neither the name of Matthew Macy nor the names of its
12  *     contributors may be used to endorse or promote products derived from
13  *     this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_acpi.h"
32 #include "opt_sched.h"
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/bus.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/module.h>
42 #include <sys/kobj.h>
43 #include <sys/rman.h>
44 #include <sys/sbuf.h>
45 #include <sys/smp.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/syslog.h>
50 #include <sys/taskqueue.h>
51 #include <sys/limits.h>
52 
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56 #include <net/if_types.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/mp_ring.h>
61 #include <net/debugnet.h>
62 #include <net/pfil.h>
63 #include <net/vnet.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/tcp_lro.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74 #include <netinet/ip_var.h>
75 #include <netinet6/ip6_var.h>
76 
77 #include <machine/bus.h>
78 #include <machine/in_cksum.h>
79 
80 #include <vm/vm.h>
81 #include <vm/pmap.h>
82 
83 #include <dev/led/led.h>
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
86 #include <dev/pci/pci_private.h>
87 
88 #include <net/iflib.h>
89 
90 #include "ifdi_if.h"
91 
92 #ifdef PCI_IOV
93 #include <dev/pci/pci_iov.h>
94 #endif
95 
96 #include <sys/bitstring.h>
97 /*
98  * enable accounting of every mbuf as it comes in to and goes out of
99  * iflib's software descriptor references
100  */
101 #define MEMORY_LOGGING 0
102 /*
103  * Enable mbuf vectors for compressing long mbuf chains
104  */
105 
106 /*
107  * NB:
108  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
109  *   we prefetch needs to be determined by the time spent in m_free vis a vis
110  *   the cost of a prefetch. This will of course vary based on the workload:
111  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
112  *        is quite expensive, thus suggesting very little prefetch.
113  *      - small packet forwarding which is just returning a single mbuf to
114  *        UMA will typically be very fast vis a vis the cost of a memory
115  *        access.
116  */
117 
118 /*
119  * File organization:
120  *  - private structures
121  *  - iflib private utility functions
122  *  - ifnet functions
123  *  - vlan registry and other exported functions
124  *  - iflib public core functions
125  *
126  *
127  */
128 static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
129 
130 #define	IFLIB_RXEOF_MORE	(1U << 0)
131 #define	IFLIB_RXEOF_EMPTY	(2U << 0)
132 
133 struct iflib_txq;
134 typedef struct iflib_txq *iflib_txq_t;
135 struct iflib_rxq;
136 typedef struct iflib_rxq *iflib_rxq_t;
137 struct iflib_fl;
138 typedef struct iflib_fl *iflib_fl_t;
139 
140 struct iflib_ctx;
141 
142 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
143 static void iflib_timer(void *arg);
144 static void iflib_tqg_detach(if_ctx_t ctx);
145 static int  iflib_simple_transmit(if_t ifp, struct mbuf *m);
146 
147 typedef struct iflib_filter_info {
148 	driver_filter_t *ifi_filter;
149 	void *ifi_filter_arg;
150 	struct grouptask *ifi_task;
151 	void *ifi_ctx;
152 } *iflib_filter_info_t;
153 
154 struct iflib_ctx {
155 	KOBJ_FIELDS;
156 	/*
157 	 * Pointer to hardware driver's softc
158 	 */
159 	void *ifc_softc;
160 	device_t ifc_dev;
161 	if_t ifc_ifp;
162 
163 	cpuset_t ifc_cpus;
164 	if_shared_ctx_t ifc_sctx;
165 	struct if_softc_ctx ifc_softc_ctx;
166 
167 	struct sx ifc_ctx_sx;
168 	struct mtx ifc_state_mtx;
169 
170 	iflib_txq_t ifc_txqs;
171 	iflib_rxq_t ifc_rxqs;
172 	uint32_t ifc_if_flags;
173 	uint32_t ifc_flags;
174 	uint32_t ifc_max_fl_buf_size;
175 	uint32_t ifc_rx_mbuf_sz;
176 
177 	int ifc_link_state;
178 	int ifc_watchdog_events;
179 	struct cdev *ifc_led_dev;
180 	struct resource *ifc_msix_mem;
181 
182 	struct if_irq ifc_legacy_irq;
183 	struct task ifc_admin_task;
184 	struct task ifc_vflr_task;
185 	struct taskqueue *ifc_tq;
186 	struct iflib_filter_info ifc_filter_info;
187 	struct ifmedia	ifc_media;
188 	struct ifmedia	*ifc_mediap;
189 
190 	struct sysctl_oid *ifc_sysctl_node;
191 	uint16_t ifc_sysctl_ntxqs;
192 	uint16_t ifc_sysctl_nrxqs;
193 	uint16_t ifc_sysctl_qs_eq_override;
194 	uint16_t ifc_sysctl_rx_budget;
195 	uint16_t ifc_sysctl_tx_abdicate;
196 	uint16_t ifc_sysctl_core_offset;
197 #define	CORE_OFFSET_UNSPECIFIED	0xffff
198 	uint8_t  ifc_sysctl_separate_txrx;
199 	uint8_t  ifc_sysctl_use_logical_cores;
200 	uint16_t ifc_sysctl_extra_msix_vectors;
201 	bool     ifc_cpus_are_physical_cores;
202 	bool     ifc_sysctl_simple_tx;
203 
204 	qidx_t ifc_sysctl_ntxds[8];
205 	qidx_t ifc_sysctl_nrxds[8];
206 	struct if_txrx ifc_txrx;
207 #define isc_txd_encap		ifc_txrx.ift_txd_encap
208 #define isc_txd_flush		ifc_txrx.ift_txd_flush
209 #define isc_txd_credits_update	ifc_txrx.ift_txd_credits_update
210 #define isc_rxd_available	ifc_txrx.ift_rxd_available
211 #define isc_rxd_pkt_get		ifc_txrx.ift_rxd_pkt_get
212 #define isc_rxd_refill		ifc_txrx.ift_rxd_refill
213 #define isc_rxd_flush		ifc_txrx.ift_rxd_flush
214 #define isc_legacy_intr		ifc_txrx.ift_legacy_intr
215 #define isc_txq_select		ifc_txrx.ift_txq_select
216 #define isc_txq_select_v2	ifc_txrx.ift_txq_select_v2
217 
218 	eventhandler_tag ifc_vlan_attach_event;
219 	eventhandler_tag ifc_vlan_detach_event;
220 	struct ether_addr ifc_mac;
221 };
222 
223 void *
iflib_get_softc(if_ctx_t ctx)224 iflib_get_softc(if_ctx_t ctx)
225 {
226 
227 	return (ctx->ifc_softc);
228 }
229 
230 device_t
iflib_get_dev(if_ctx_t ctx)231 iflib_get_dev(if_ctx_t ctx)
232 {
233 
234 	return (ctx->ifc_dev);
235 }
236 
237 if_t
iflib_get_ifp(if_ctx_t ctx)238 iflib_get_ifp(if_ctx_t ctx)
239 {
240 
241 	return (ctx->ifc_ifp);
242 }
243 
244 struct ifmedia *
iflib_get_media(if_ctx_t ctx)245 iflib_get_media(if_ctx_t ctx)
246 {
247 
248 	return (ctx->ifc_mediap);
249 }
250 
251 void
iflib_set_mac(if_ctx_t ctx,uint8_t mac[ETHER_ADDR_LEN])252 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
253 {
254 
255 	bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
256 }
257 
258 if_softc_ctx_t
iflib_get_softc_ctx(if_ctx_t ctx)259 iflib_get_softc_ctx(if_ctx_t ctx)
260 {
261 
262 	return (&ctx->ifc_softc_ctx);
263 }
264 
265 if_shared_ctx_t
iflib_get_sctx(if_ctx_t ctx)266 iflib_get_sctx(if_ctx_t ctx)
267 {
268 
269 	return (ctx->ifc_sctx);
270 }
271 
272 uint16_t
iflib_get_extra_msix_vectors_sysctl(if_ctx_t ctx)273 iflib_get_extra_msix_vectors_sysctl(if_ctx_t ctx)
274 {
275 
276 	return (ctx->ifc_sysctl_extra_msix_vectors);
277 }
278 
279 #define IP_ALIGNED(m)		((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
280 #define CACHE_PTR_INCREMENT	(CACHE_LINE_SIZE / sizeof(void *))
281 #define CACHE_PTR_NEXT(ptr)	((void *)(roundup2(ptr, CACHE_LINE_SIZE)))
282 
283 #define LINK_ACTIVE(ctx)	((ctx)->ifc_link_state == LINK_STATE_UP)
284 #define CTX_IS_VF(ctx)		((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
285 
286 typedef struct iflib_sw_rx_desc_array {
287 	bus_dmamap_t	*ifsd_map;	/* bus_dma maps for packet */
288 	struct mbuf	**ifsd_m;	/* pkthdr mbufs */
289 	caddr_t		*ifsd_cl;	/* direct cluster pointer for rx */
290 	bus_addr_t	*ifsd_ba;	/* bus addr of cluster for rx */
291 } iflib_rxsd_array_t;
292 
293 typedef struct iflib_sw_tx_desc_array {
294 	bus_dmamap_t	*ifsd_map;	/* bus_dma maps for packet */
295 	bus_dmamap_t	*ifsd_tso_map;	/* bus_dma maps for TSO packet */
296 	struct mbuf	**ifsd_m;	/* pkthdr mbufs */
297 } if_txsd_vec_t;
298 
299 /* magic number that should be high enough for any hardware */
300 #define IFLIB_MAX_TX_SEGS		128
301 #define IFLIB_RX_COPY_THRESH		128
302 #define IFLIB_MAX_RX_REFRESH		32
303 /* The minimum descriptors per second before we start coalescing */
304 #define IFLIB_MIN_DESC_SEC		16384
305 #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
306 #define IFLIB_QUEUE_IDLE		0
307 #define IFLIB_QUEUE_HUNG		1
308 #define IFLIB_QUEUE_WORKING		2
309 /* maximum number of txqs that can share an rx interrupt */
310 #define IFLIB_MAX_TX_SHARED_INTR	4
311 
312 /* this should really scale with ring size - this is a fairly arbitrary value */
313 #define TX_BATCH_SIZE			32
314 
315 #define IFLIB_RESTART_BUDGET		8
316 
317 #define	IFC_LEGACY		0x001
318 #define	IFC_QFLUSH		0x002
319 #define	IFC_MULTISEG		0x004
320 #define	IFC_SPARE1		0x008
321 #define	IFC_SC_ALLOCATED	0x010
322 #define	IFC_INIT_DONE		0x020
323 #define	IFC_PREFETCH		0x040
324 #define	IFC_DO_RESET		0x080
325 #define	IFC_DO_WATCHDOG		0x100
326 #define	IFC_SPARE0		0x200
327 #define	IFC_SPARE2		0x400
328 #define	IFC_IN_DETACH		0x800
329 
330 #define	IFC_NETMAP_TX_IRQ	0x80000000
331 
332 #define CSUM_OFFLOAD		(CSUM_IP_TSO | CSUM_IP6_TSO | CSUM_IP | \
333 				 CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_SCTP | \
334 				 CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_SCTP)
335 
336 struct iflib_txq {
337 	qidx_t		ift_in_use;
338 	qidx_t		ift_cidx;
339 	qidx_t		ift_cidx_processed;
340 	qidx_t		ift_pidx;
341 	uint8_t		ift_gen;
342 	uint8_t		ift_br_offset;
343 	uint16_t	ift_npending;
344 	uint16_t	ift_db_pending;
345 	uint16_t	ift_rs_pending;
346 	/* implicit pad */
347 	uint8_t		ift_txd_size[8];
348 	uint64_t	ift_processed;
349 	uint64_t	ift_cleaned;
350 	uint64_t	ift_cleaned_prev;
351 #if MEMORY_LOGGING
352 	uint64_t	ift_enqueued;
353 	uint64_t	ift_dequeued;
354 #endif
355 	uint64_t	ift_no_tx_dma_setup;
356 	uint64_t	ift_no_desc_avail;
357 	uint64_t	ift_mbuf_defrag_failed;
358 	uint64_t	ift_mbuf_defrag;
359 	uint64_t	ift_map_failed;
360 	uint64_t	ift_txd_encap_efbig;
361 	uint64_t	ift_pullups;
362 	uint64_t	ift_last_timer_tick;
363 
364 	struct mtx	ift_mtx;
365 	struct mtx	ift_db_mtx;
366 
367 	/* constant values */
368 	if_ctx_t	ift_ctx;
369 	struct ifmp_ring	*ift_br;
370 	struct grouptask	ift_task;
371 	qidx_t		ift_size;
372 	uint16_t	ift_id;
373 	struct callout	ift_timer;
374 #ifdef DEV_NETMAP
375 	struct callout	ift_netmap_timer;
376 #endif /* DEV_NETMAP */
377 
378 	if_txsd_vec_t	ift_sds;
379 	uint8_t		ift_qstatus;
380 	uint8_t		ift_closed;
381 	uint8_t		ift_update_freq;
382 	struct iflib_filter_info ift_filter_info;
383 	bus_dma_tag_t	ift_buf_tag;
384 	bus_dma_tag_t	ift_tso_buf_tag;
385 	iflib_dma_info_t	ift_ifdi;
386 #define	MTX_NAME_LEN	32
387 	char		ift_mtx_name[MTX_NAME_LEN];
388 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
389 #ifdef IFLIB_DIAGNOSTICS
390 	uint64_t ift_cpu_exec_count[256];
391 #endif
392 } __aligned(CACHE_LINE_SIZE);
393 
394 struct iflib_fl {
395 	qidx_t		ifl_cidx;
396 	qidx_t		ifl_pidx;
397 	qidx_t		ifl_credits;
398 	uint8_t		ifl_gen;
399 	uint8_t		ifl_rxd_size;
400 #if MEMORY_LOGGING
401 	uint64_t	ifl_m_enqueued;
402 	uint64_t	ifl_m_dequeued;
403 	uint64_t	ifl_cl_enqueued;
404 	uint64_t	ifl_cl_dequeued;
405 #endif
406 	/* implicit pad */
407 	bitstr_t	*ifl_rx_bitmap;
408 	qidx_t		ifl_fragidx;
409 	/* constant */
410 	qidx_t		ifl_size;
411 	uint16_t	ifl_buf_size;
412 	uint16_t	ifl_cltype;
413 	uma_zone_t	ifl_zone;
414 	iflib_rxsd_array_t	ifl_sds;
415 	iflib_rxq_t	ifl_rxq;
416 	uint8_t		ifl_id;
417 	bus_dma_tag_t	ifl_buf_tag;
418 	iflib_dma_info_t	ifl_ifdi;
419 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
420 	qidx_t		ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
421 }  __aligned(CACHE_LINE_SIZE);
422 
423 static inline qidx_t
get_inuse(int size,qidx_t cidx,qidx_t pidx,uint8_t gen)424 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
425 {
426 	qidx_t used;
427 
428 	if (pidx > cidx)
429 		used = pidx - cidx;
430 	else if (pidx < cidx)
431 		used = size - cidx + pidx;
432 	else if (gen == 0 && pidx == cidx)
433 		used = 0;
434 	else if (gen == 1 && pidx == cidx)
435 		used = size;
436 	else
437 		panic("bad state");
438 
439 	return (used);
440 }
441 
442 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
443 
444 #define IDXDIFF(head, tail, wrap) \
445 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
446 
447 struct iflib_rxq {
448 	if_ctx_t	ifr_ctx;
449 	iflib_fl_t	ifr_fl;
450 	uint64_t	ifr_rx_irq;
451 	struct pfil_head	*pfil;
452 	/*
453 	 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
454 	 * the completion queue consumer index.  Otherwise it's unused.
455 	 */
456 	qidx_t		ifr_cq_cidx;
457 	uint16_t	ifr_id;
458 	uint8_t		ifr_nfl;
459 	uint8_t		ifr_ntxqirq;
460 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
461 	uint8_t		ifr_fl_offset;
462 	struct lro_ctrl		ifr_lc;
463 	struct grouptask	ifr_task;
464 	struct callout		ifr_watchdog;
465 	struct iflib_filter_info ifr_filter_info;
466 	iflib_dma_info_t	ifr_ifdi;
467 
468 	/* dynamically allocate if any drivers need a value substantially larger than this */
469 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
470 #ifdef IFLIB_DIAGNOSTICS
471 	uint64_t ifr_cpu_exec_count[256];
472 #endif
473 }  __aligned(CACHE_LINE_SIZE);
474 
475 typedef struct if_rxsd {
476 	caddr_t *ifsd_cl;
477 	iflib_fl_t ifsd_fl;
478 } *if_rxsd_t;
479 
480 /* multiple of word size */
481 #ifdef __LP64__
482 #define PKT_INFO_SIZE	6
483 #define RXD_INFO_SIZE	5
484 #define PKT_TYPE uint64_t
485 #else
486 #define PKT_INFO_SIZE	11
487 #define RXD_INFO_SIZE	8
488 #define PKT_TYPE uint32_t
489 #endif
490 #define PKT_LOOP_BOUND	((PKT_INFO_SIZE / 3) * 3)
491 #define RXD_LOOP_BOUND	((RXD_INFO_SIZE / 4) * 4)
492 
493 typedef struct if_pkt_info_pad {
494 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
495 } *if_pkt_info_pad_t;
496 typedef struct if_rxd_info_pad {
497 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
498 } *if_rxd_info_pad_t;
499 
500 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
501 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
502 
503 static inline void
pkt_info_zero(if_pkt_info_t pi)504 pkt_info_zero(if_pkt_info_t pi)
505 {
506 	if_pkt_info_pad_t pi_pad;
507 
508 	pi_pad = (if_pkt_info_pad_t)pi;
509 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
510 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
511 #ifndef __LP64__
512 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
513 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
514 #endif
515 }
516 
517 static inline void
rxd_info_zero(if_rxd_info_t ri)518 rxd_info_zero(if_rxd_info_t ri)
519 {
520 	if_rxd_info_pad_t ri_pad;
521 	int i;
522 
523 	ri_pad = (if_rxd_info_pad_t)ri;
524 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
525 		ri_pad->rxd_val[i] = 0;
526 		ri_pad->rxd_val[i + 1] = 0;
527 		ri_pad->rxd_val[i + 2] = 0;
528 		ri_pad->rxd_val[i + 3] = 0;
529 	}
530 #ifdef __LP64__
531 	ri_pad->rxd_val[RXD_INFO_SIZE - 1] = 0;
532 #endif
533 }
534 
535 /*
536  * Only allow a single packet to take up most 1/nth of the tx ring
537  */
538 #define MAX_SINGLE_PACKET_FRACTION 12
539 #define IF_BAD_DMA	((bus_addr_t)-1)
540 
541 #define CTX_ACTIVE(ctx)	((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
542 
543 #define CTX_LOCK_INIT(_sc)	sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
544 #define CTX_LOCK(ctx)		sx_xlock(&(ctx)->ifc_ctx_sx)
545 #define CTX_UNLOCK(ctx)		sx_xunlock(&(ctx)->ifc_ctx_sx)
546 #define CTX_LOCK_DESTROY(ctx)	sx_destroy(&(ctx)->ifc_ctx_sx)
547 
548 #define STATE_LOCK_INIT(_sc, _name)	mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
549 #define STATE_LOCK(ctx)		mtx_lock(&(ctx)->ifc_state_mtx)
550 #define STATE_UNLOCK(ctx)	mtx_unlock(&(ctx)->ifc_state_mtx)
551 #define STATE_LOCK_DESTROY(ctx)	mtx_destroy(&(ctx)->ifc_state_mtx)
552 
553 #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
554 #define CALLOUT_UNLOCK(txq)	mtx_unlock(&txq->ift_mtx)
555 
556 /* Our boot-time initialization hook */
557 static int	iflib_module_event_handler(module_t, int, void *);
558 
559 static moduledata_t iflib_moduledata = {
560 	"iflib",
561 	iflib_module_event_handler,
562 	NULL
563 };
564 
565 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
566 MODULE_VERSION(iflib, 1);
567 
568 MODULE_DEPEND(iflib, pci, 1, 1, 1);
569 MODULE_DEPEND(iflib, ether, 1, 1, 1);
570 
571 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
572 TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
573 
574 #ifndef IFLIB_DEBUG_COUNTERS
575 #ifdef INVARIANTS
576 #define IFLIB_DEBUG_COUNTERS 1
577 #else
578 #define IFLIB_DEBUG_COUNTERS 0
579 #endif /* !INVARIANTS */
580 #endif
581 
582 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
583     "iflib driver parameters");
584 
585 /*
586  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
587  */
588 static int iflib_min_tx_latency = 0;
589 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
590     &iflib_min_tx_latency, 0,
591     "minimize transmit latency at the possible expense of throughput");
592 static int iflib_no_tx_batch = 0;
593 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
594     &iflib_no_tx_batch, 0,
595     "minimize transmit latency at the possible expense of throughput");
596 static int iflib_timer_default = 1000;
597 SYSCTL_INT(_net_iflib, OID_AUTO, timer_default, CTLFLAG_RW,
598     &iflib_timer_default, 0, "number of ticks between iflib_timer calls");
599 
600 
601 #if IFLIB_DEBUG_COUNTERS
602 
603 static int iflib_tx_seen;
604 static int iflib_tx_sent;
605 static int iflib_tx_encap;
606 static int iflib_rx_allocs;
607 static int iflib_fl_refills;
608 static int iflib_fl_refills_large;
609 static int iflib_tx_frees;
610 
611 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0,
612     "# TX mbufs seen");
613 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0,
614     "# TX mbufs sent");
615 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0,
616     "# TX mbufs encapped");
617 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0,
618     "# TX frees");
619 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0,
620     "# RX allocations");
621 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0,
622     "# refills");
623 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
624     &iflib_fl_refills_large, 0, "# large refills");
625 
626 static int iflib_txq_drain_flushing;
627 static int iflib_txq_drain_oactive;
628 static int iflib_txq_drain_notready;
629 
630 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
631     &iflib_txq_drain_flushing, 0, "# drain flushes");
632 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
633     &iflib_txq_drain_oactive, 0, "# drain oactives");
634 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
635     &iflib_txq_drain_notready, 0, "# drain notready");
636 
637 static int iflib_encap_load_mbuf_fail;
638 static int iflib_encap_pad_mbuf_fail;
639 static int iflib_encap_txq_avail_fail;
640 static int iflib_encap_txd_encap_fail;
641 
642 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
643     &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
644 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
645     &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
646 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
647     &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
648 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
649     &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
650 
651 static int iflib_task_fn_rxs;
652 static int iflib_rx_intr_enables;
653 static int iflib_fast_intrs;
654 static int iflib_rx_unavail;
655 static int iflib_rx_ctx_inactive;
656 static int iflib_rx_if_input;
657 static int iflib_rxd_flush;
658 
659 static int iflib_verbose_debug;
660 
661 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0,
662     "# task_fn_rx calls");
663 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
664     &iflib_rx_intr_enables, 0, "# RX intr enables");
665 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0,
666     "# fast_intr calls");
667 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0,
668     "# times rxeof called with no available data");
669 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
670     &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
671 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input,
672     0, "# times rxeof called if_input");
673 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0,
674     "# times rxd_flush called");
675 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
676     &iflib_verbose_debug, 0, "enable verbose debugging");
677 
678 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
679 static void
iflib_debug_reset(void)680 iflib_debug_reset(void)
681 {
682 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
683 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
684 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
685 		iflib_txq_drain_notready =
686 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
687 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
688 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
689 		iflib_rx_unavail =
690 		iflib_rx_ctx_inactive = iflib_rx_if_input =
691 		iflib_rxd_flush = 0;
692 }
693 
694 #else
695 #define DBG_COUNTER_INC(name)
iflib_debug_reset(void)696 static void iflib_debug_reset(void) {}
697 #endif
698 
699 #define IFLIB_DEBUG 0
700 
701 static void iflib_tx_structures_free(if_ctx_t ctx);
702 static void iflib_rx_structures_free(if_ctx_t ctx);
703 static int iflib_queues_alloc(if_ctx_t ctx);
704 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
705 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
706 static int iflib_qset_structures_setup(if_ctx_t ctx);
707 static int iflib_msix_init(if_ctx_t ctx);
708 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
709 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
710 static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
711 #ifdef ALTQ
712 static void iflib_altq_if_start(if_t ifp);
713 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
714 #endif
715 static void iflib_register(if_ctx_t);
716 static void iflib_deregister(if_ctx_t);
717 static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
718 static uint16_t iflib_get_mbuf_size_for(unsigned int size);
719 static void iflib_init_locked(if_ctx_t ctx);
720 static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
721 static void iflib_add_device_sysctl_post(if_ctx_t ctx);
722 static void iflib_ifmp_purge(iflib_txq_t txq);
723 static void _iflib_pre_assert(if_softc_ctx_t scctx);
724 static void iflib_stop(if_ctx_t ctx);
725 static void iflib_if_init_locked(if_ctx_t ctx);
726 static void iflib_free_intr_mem(if_ctx_t ctx);
727 #ifndef __NO_STRICT_ALIGNMENT
728 static struct mbuf *iflib_fixup_rx(struct mbuf *m);
729 #endif
730 static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh);
731 
732 static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
733     SLIST_HEAD_INITIALIZER(cpu_offsets);
734 struct cpu_offset {
735 	SLIST_ENTRY(cpu_offset) entries;
736 	cpuset_t	set;
737 	unsigned int	refcount;
738 	uint16_t	next_cpuid;
739 };
740 static struct mtx cpu_offset_mtx;
741 MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
742     MTX_DEF);
743 
744 DEBUGNET_DEFINE(iflib);
745 
746 static int
iflib_num_rx_descs(if_ctx_t ctx)747 iflib_num_rx_descs(if_ctx_t ctx)
748 {
749 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
750 	if_shared_ctx_t sctx = ctx->ifc_sctx;
751 	uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
752 
753 	return (scctx->isc_nrxd[first_rxq]);
754 }
755 
756 static int
iflib_num_tx_descs(if_ctx_t ctx)757 iflib_num_tx_descs(if_ctx_t ctx)
758 {
759 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
760 	if_shared_ctx_t sctx = ctx->ifc_sctx;
761 	uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
762 
763 	return (scctx->isc_ntxd[first_txq]);
764 }
765 
766 #ifdef DEV_NETMAP
767 #include <sys/selinfo.h>
768 #include <net/netmap.h>
769 #include <dev/netmap/netmap_kern.h>
770 
771 MODULE_DEPEND(iflib, netmap, 1, 1, 1);
772 
773 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init);
774 static void iflib_netmap_timer(void *arg);
775 
776 /*
777  * device-specific sysctl variables:
778  *
779  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
780  *	During regular operations the CRC is stripped, but on some
781  *	hardware reception of frames not multiple of 64 is slower,
782  *	so using crcstrip=0 helps in benchmarks.
783  *
784  * iflib_rx_miss, iflib_rx_miss_bufs:
785  *	count packets that might be missed due to lost interrupts.
786  */
787 SYSCTL_DECL(_dev_netmap);
788 /*
789  * The xl driver by default strips CRCs and we do not override it.
790  */
791 
792 int iflib_crcstrip = 1;
793 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
794     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
795 
796 int iflib_rx_miss, iflib_rx_miss_bufs;
797 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
798     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
799 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
800     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
801 
802 /*
803  * Register/unregister. We are already under netmap lock.
804  * Only called on the first register or the last unregister.
805  */
806 static int
iflib_netmap_register(struct netmap_adapter * na,int onoff)807 iflib_netmap_register(struct netmap_adapter *na, int onoff)
808 {
809 	if_t ifp = na->ifp;
810 	if_ctx_t ctx = if_getsoftc(ifp);
811 	int status;
812 
813 	CTX_LOCK(ctx);
814 	if (!CTX_IS_VF(ctx))
815 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
816 
817 	iflib_stop(ctx);
818 
819 	/*
820 	 * Enable (or disable) netmap flags, and intercept (or restore)
821 	 * ifp->if_transmit. This is done once the device has been stopped
822 	 * to prevent race conditions. Also, this must be done after
823 	 * calling netmap_disable_all_rings() and before calling
824 	 * netmap_enable_all_rings(), so that these two functions see the
825 	 * updated state of the NAF_NETMAP_ON bit.
826 	 */
827 	if (onoff) {
828 		nm_set_native_flags(na);
829 	} else {
830 		nm_clear_native_flags(na);
831 	}
832 
833 	iflib_init_locked(ctx);
834 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
835 	status = if_getdrvflags(ifp) & IFF_DRV_RUNNING ? 0 : 1;
836 	if (status)
837 		nm_clear_native_flags(na);
838 	CTX_UNLOCK(ctx);
839 	return (status);
840 }
841 
842 static int
iflib_netmap_config(struct netmap_adapter * na,struct nm_config_info * info)843 iflib_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
844 {
845 	if_t ifp = na->ifp;
846 	if_ctx_t ctx = if_getsoftc(ifp);
847 	iflib_rxq_t rxq = &ctx->ifc_rxqs[0];
848 	iflib_fl_t fl = &rxq->ifr_fl[0];
849 
850 	info->num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
851 	info->num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
852 	info->num_tx_descs = iflib_num_tx_descs(ctx);
853 	info->num_rx_descs = iflib_num_rx_descs(ctx);
854 	info->rx_buf_maxsize = fl->ifl_buf_size;
855 	nm_prinf("txr %u rxr %u txd %u rxd %u rbufsz %u",
856 		info->num_tx_rings, info->num_rx_rings, info->num_tx_descs,
857 		info->num_rx_descs, info->rx_buf_maxsize);
858 
859 	return (0);
860 }
861 
862 static int
netmap_fl_refill(iflib_rxq_t rxq,struct netmap_kring * kring,bool init)863 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init)
864 {
865 	struct netmap_adapter *na = kring->na;
866 	u_int const lim = kring->nkr_num_slots - 1;
867 	struct netmap_ring *ring = kring->ring;
868 	bus_dmamap_t *map;
869 	struct if_rxd_update iru;
870 	if_ctx_t ctx = rxq->ifr_ctx;
871 	iflib_fl_t fl = &rxq->ifr_fl[0];
872 	u_int nic_i_first, nic_i;
873 	u_int nm_i;
874 	int i, n;
875 #if IFLIB_DEBUG_COUNTERS
876 	int rf_count = 0;
877 #endif
878 
879 	/*
880 	 * This function is used both at initialization and in rxsync.
881 	 * At initialization we need to prepare (with isc_rxd_refill())
882 	 * all the netmap buffers currently owned by the kernel, in
883 	 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync
884 	 * (except for kring->nkr_hwofs). These may be less than
885 	 * kring->nkr_num_slots if netmap_reset() was called while
886 	 * an application using the kring that still owned some
887 	 * buffers.
888 	 * At rxsync time, both indexes point to the next buffer to be
889 	 * refilled.
890 	 * In any case we publish (with isc_rxd_flush()) up to
891 	 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod
892 	 * pointer to overrun the head/cons pointer, although this is
893 	 * not necessary for some NICs (e.g. vmx).
894 	 */
895 	if (__predict_false(init)) {
896 		n = kring->nkr_num_slots - nm_kr_rxspace(kring);
897 	} else {
898 		n = kring->rhead - kring->nr_hwcur;
899 		if (n == 0)
900 			return (0); /* Nothing to do. */
901 		if (n < 0)
902 			n += kring->nkr_num_slots;
903 	}
904 
905 	iru_init(&iru, rxq, 0 /* flid */);
906 	map = fl->ifl_sds.ifsd_map;
907 	nic_i = fl->ifl_pidx;
908 	nm_i = netmap_idx_n2k(kring, nic_i);
909 	if (__predict_false(init)) {
910 		/*
911 		 * On init/reset, nic_i must be 0, and we must
912 		 * start to refill from hwtail (see netmap_reset()).
913 		 */
914 		MPASS(nic_i == 0);
915 		MPASS(nm_i == kring->nr_hwtail);
916 	} else
917 		MPASS(nm_i == kring->nr_hwcur);
918 	DBG_COUNTER_INC(fl_refills);
919 	while (n > 0) {
920 #if IFLIB_DEBUG_COUNTERS
921 		if (++rf_count == 9)
922 			DBG_COUNTER_INC(fl_refills_large);
923 #endif
924 		nic_i_first = nic_i;
925 		for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) {
926 			struct netmap_slot *slot = &ring->slot[nm_i];
927 			uint64_t paddr;
928 			void *addr = PNMB(na, slot, &paddr);
929 
930 			MPASS(i < IFLIB_MAX_RX_REFRESH);
931 
932 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
933 				return (netmap_ring_reinit(kring));
934 
935 			fl->ifl_bus_addrs[i] = paddr +
936 			    nm_get_offset(kring, slot);
937 			fl->ifl_rxd_idxs[i] = nic_i;
938 
939 			if (__predict_false(init)) {
940 				netmap_load_map(na, fl->ifl_buf_tag,
941 				    map[nic_i], addr);
942 			} else if (slot->flags & NS_BUF_CHANGED) {
943 				/* buffer has changed, reload map */
944 				netmap_reload_map(na, fl->ifl_buf_tag,
945 				    map[nic_i], addr);
946 			}
947 			bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i],
948 			    BUS_DMASYNC_PREREAD);
949 			slot->flags &= ~NS_BUF_CHANGED;
950 
951 			nm_i = nm_next(nm_i, lim);
952 			nic_i = nm_next(nic_i, lim);
953 		}
954 
955 		iru.iru_pidx = nic_i_first;
956 		iru.iru_count = i;
957 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
958 	}
959 	fl->ifl_pidx = nic_i;
960 	/*
961 	 * At the end of the loop we must have refilled everything
962 	 * we could possibly refill.
963 	 */
964 	MPASS(nm_i == kring->rhead);
965 	kring->nr_hwcur = nm_i;
966 
967 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
968 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
969 	ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id,
970 	    nm_prev(nic_i, lim));
971 	DBG_COUNTER_INC(rxd_flush);
972 
973 	return (0);
974 }
975 
976 #define NETMAP_TX_TIMER_US	90
977 
978 /*
979  * Reconcile kernel and user view of the transmit ring.
980  *
981  * All information is in the kring.
982  * Userspace wants to send packets up to the one before kring->rhead,
983  * kernel knows kring->nr_hwcur is the first unsent packet.
984  *
985  * Here we push packets out (as many as possible), and possibly
986  * reclaim buffers from previously completed transmission.
987  *
988  * The caller (netmap) guarantees that there is only one instance
989  * running at any time. Any interference with other driver
990  * methods should be handled by the individual drivers.
991  */
992 static int
iflib_netmap_txsync(struct netmap_kring * kring,int flags)993 iflib_netmap_txsync(struct netmap_kring *kring, int flags)
994 {
995 	struct netmap_adapter *na = kring->na;
996 	if_t ifp = na->ifp;
997 	struct netmap_ring *ring = kring->ring;
998 	u_int nm_i;	/* index into the netmap kring */
999 	u_int nic_i;	/* index into the NIC ring */
1000 	u_int const lim = kring->nkr_num_slots - 1;
1001 	u_int const head = kring->rhead;
1002 	struct if_pkt_info pi;
1003 	int tx_pkts = 0, tx_bytes = 0;
1004 
1005 	/*
1006 	 * interrupts on every tx packet are expensive so request
1007 	 * them every half ring, or where NS_REPORT is set
1008 	 */
1009 	u_int report_frequency = kring->nkr_num_slots >> 1;
1010 	/* device-specific */
1011 	if_ctx_t ctx = if_getsoftc(ifp);
1012 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
1013 
1014 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1015 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1016 
1017 	/*
1018 	 * First part: process new packets to send.
1019 	 * nm_i is the current index in the netmap kring,
1020 	 * nic_i is the corresponding index in the NIC ring.
1021 	 *
1022 	 * If we have packets to send (nm_i != head)
1023 	 * iterate over the netmap ring, fetch length and update
1024 	 * the corresponding slot in the NIC ring. Some drivers also
1025 	 * need to update the buffer's physical address in the NIC slot
1026 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
1027 	 *
1028 	 * The netmap_reload_map() calls is especially expensive,
1029 	 * even when (as in this case) the tag is 0, so do only
1030 	 * when the buffer has actually changed.
1031 	 *
1032 	 * If possible do not set the report/intr bit on all slots,
1033 	 * but only a few times per ring or when NS_REPORT is set.
1034 	 *
1035 	 * Finally, on 10G and faster drivers, it might be useful
1036 	 * to prefetch the next slot and txr entry.
1037 	 */
1038 
1039 	nm_i = kring->nr_hwcur;
1040 	if (nm_i != head) {	/* we have new packets to send */
1041 		uint32_t pkt_len = 0, seg_idx = 0;
1042 		int nic_i_start = -1, flags = 0;
1043 		pkt_info_zero(&pi);
1044 		pi.ipi_segs = txq->ift_segs;
1045 		pi.ipi_qsidx = kring->ring_id;
1046 		nic_i = netmap_idx_k2n(kring, nm_i);
1047 
1048 		__builtin_prefetch(&ring->slot[nm_i]);
1049 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
1050 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
1051 
1052 		while (nm_i != head) {
1053 			struct netmap_slot *slot = &ring->slot[nm_i];
1054 			uint64_t offset = nm_get_offset(kring, slot);
1055 			u_int len = slot->len;
1056 			uint64_t paddr;
1057 			void *addr = PNMB(na, slot, &paddr);
1058 
1059 			flags |= (slot->flags & NS_REPORT ||
1060 				nic_i == 0 || nic_i == report_frequency) ?
1061 				IPI_TX_INTR : 0;
1062 
1063 			/*
1064 			 * If this is the first packet fragment, save the
1065 			 * index of the first NIC slot for later.
1066 			 */
1067 			if (nic_i_start < 0)
1068 				nic_i_start = nic_i;
1069 
1070 			pi.ipi_segs[seg_idx].ds_addr = paddr + offset;
1071 			pi.ipi_segs[seg_idx].ds_len = len;
1072 			if (len) {
1073 				pkt_len += len;
1074 				seg_idx++;
1075 			}
1076 
1077 			if (!(slot->flags & NS_MOREFRAG)) {
1078 				pi.ipi_len = pkt_len;
1079 				pi.ipi_nsegs = seg_idx;
1080 				pi.ipi_pidx = nic_i_start;
1081 				pi.ipi_ndescs = 0;
1082 				pi.ipi_flags = flags;
1083 
1084 				/* Prepare the NIC TX ring. */
1085 				ctx->isc_txd_encap(ctx->ifc_softc, &pi);
1086 				DBG_COUNTER_INC(tx_encap);
1087 
1088 				/* Update transmit counters */
1089 				tx_bytes += pi.ipi_len;
1090 				tx_pkts++;
1091 
1092 				/* Reinit per-packet info for the next one. */
1093 				flags = seg_idx = pkt_len = 0;
1094 				nic_i_start = -1;
1095 			}
1096 
1097 			/* prefetch for next round */
1098 			__builtin_prefetch(&ring->slot[nm_i + 1]);
1099 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
1100 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
1101 
1102 			NM_CHECK_ADDR_LEN_OFF(na, len, offset);
1103 
1104 			if (slot->flags & NS_BUF_CHANGED) {
1105 				/* buffer has changed, reload map */
1106 				netmap_reload_map(na, txq->ift_buf_tag,
1107 				    txq->ift_sds.ifsd_map[nic_i], addr);
1108 			}
1109 			/* make sure changes to the buffer are synced */
1110 			bus_dmamap_sync(txq->ift_buf_tag,
1111 			    txq->ift_sds.ifsd_map[nic_i],
1112 			    BUS_DMASYNC_PREWRITE);
1113 
1114 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
1115 			nm_i = nm_next(nm_i, lim);
1116 			nic_i = nm_next(nic_i, lim);
1117 		}
1118 		kring->nr_hwcur = nm_i;
1119 
1120 		/* synchronize the NIC ring */
1121 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1122 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1123 
1124 		/* (re)start the tx unit up to slot nic_i (excluded) */
1125 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1126 	}
1127 
1128 	/*
1129 	 * Second part: reclaim buffers for completed transmissions.
1130 	 *
1131 	 * If there are unclaimed buffers, attempt to reclaim them.
1132 	 * If we don't manage to reclaim them all, and TX IRQs are not in use,
1133 	 * trigger a per-tx-queue timer to try again later.
1134 	 */
1135 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1136 		if (iflib_tx_credits_update(ctx, txq)) {
1137 			/* some tx completed, increment avail */
1138 			nic_i = txq->ift_cidx_processed;
1139 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
1140 		}
1141 	}
1142 
1143 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1144 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1145 			callout_reset_sbt_on(&txq->ift_netmap_timer,
1146 			    NETMAP_TX_TIMER_US * SBT_1US, SBT_1US,
1147 			    iflib_netmap_timer, txq,
1148 			    txq->ift_netmap_timer.c_cpu, 0);
1149 		}
1150 
1151 	if_inc_counter(ifp, IFCOUNTER_OBYTES, tx_bytes);
1152 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, tx_pkts);
1153 
1154 	return (0);
1155 }
1156 
1157 /*
1158  * Reconcile kernel and user view of the receive ring.
1159  * Same as for the txsync, this routine must be efficient.
1160  * The caller guarantees a single invocations, but races against
1161  * the rest of the driver should be handled here.
1162  *
1163  * On call, kring->rhead is the first packet that userspace wants
1164  * to keep, and kring->rcur is the wakeup point.
1165  * The kernel has previously reported packets up to kring->rtail.
1166  *
1167  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
1168  * of whether or not we received an interrupt.
1169  */
1170 static int
iflib_netmap_rxsync(struct netmap_kring * kring,int flags)1171 iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
1172 {
1173 	struct netmap_adapter *na = kring->na;
1174 	struct netmap_ring *ring = kring->ring;
1175 	if_t ifp = na->ifp;
1176 	uint32_t nm_i;	/* index into the netmap ring */
1177 	uint32_t nic_i;	/* index into the NIC ring */
1178 	u_int n;
1179 	u_int const lim = kring->nkr_num_slots - 1;
1180 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1181 	int i = 0, rx_bytes = 0, rx_pkts = 0;
1182 
1183 	if_ctx_t ctx = if_getsoftc(ifp);
1184 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1185 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1186 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
1187 	iflib_fl_t fl = &rxq->ifr_fl[0];
1188 	struct if_rxd_info ri;
1189 	qidx_t *cidxp;
1190 
1191 	/*
1192 	 * netmap only uses free list 0, to avoid out of order consumption
1193 	 * of receive buffers
1194 	 */
1195 
1196 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
1197 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1198 
1199 	/*
1200 	 * First part: import newly received packets.
1201 	 *
1202 	 * nm_i is the index of the next free slot in the netmap ring,
1203 	 * nic_i is the index of the next received packet in the NIC ring
1204 	 * (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may
1205 	 * differ in case if_init() has been called while
1206 	 * in netmap mode. For the receive ring we have
1207 	 *
1208 	 *	nic_i = fl->ifl_cidx;
1209 	 *	nm_i = kring->nr_hwtail (previous)
1210 	 * and
1211 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1212 	 *
1213 	 * fl->ifl_cidx is set to 0 on a ring reinit
1214 	 */
1215 	if (netmap_no_pendintr || force_update) {
1216 		uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim);
1217 		bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ;
1218 		int crclen = iflib_crcstrip ? 0 : 4;
1219 		int error, avail;
1220 
1221 		/*
1222 		 * For the free list consumer index, we use the same
1223 		 * logic as in iflib_rxeof().
1224 		 */
1225 		if (have_rxcq)
1226 			cidxp = &rxq->ifr_cq_cidx;
1227 		else
1228 			cidxp = &fl->ifl_cidx;
1229 		avail = ctx->isc_rxd_available(ctx->ifc_softc,
1230 		    rxq->ifr_id, *cidxp, USHRT_MAX);
1231 
1232 		nic_i = fl->ifl_cidx;
1233 		nm_i = netmap_idx_n2k(kring, nic_i);
1234 		MPASS(nm_i == kring->nr_hwtail);
1235 		for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) {
1236 			rxd_info_zero(&ri);
1237 			ri.iri_frags = rxq->ifr_frags;
1238 			ri.iri_qsidx = kring->ring_id;
1239 			ri.iri_ifp = ctx->ifc_ifp;
1240 			ri.iri_cidx = *cidxp;
1241 
1242 			error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1243 			for (i = 0; i < ri.iri_nfrags; i++) {
1244 				if (error) {
1245 					ring->slot[nm_i].len = 0;
1246 					ring->slot[nm_i].flags = 0;
1247 				} else {
1248 					ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
1249 					if (i == (ri.iri_nfrags - 1)) {
1250 						ring->slot[nm_i].len -= crclen;
1251 						ring->slot[nm_i].flags = 0;
1252 
1253 						/* Update receive counters */
1254 						rx_bytes += ri.iri_len;
1255 						rx_pkts++;
1256 					} else
1257 						ring->slot[nm_i].flags = NS_MOREFRAG;
1258 				}
1259 
1260 				bus_dmamap_sync(fl->ifl_buf_tag,
1261 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
1262 				nm_i = nm_next(nm_i, lim);
1263 				fl->ifl_cidx = nic_i = nm_next(nic_i, lim);
1264 			}
1265 
1266 			if (have_rxcq) {
1267 				*cidxp = ri.iri_cidx;
1268 				while (*cidxp >= scctx->isc_nrxd[0])
1269 					*cidxp -= scctx->isc_nrxd[0];
1270 			}
1271 
1272 		}
1273 		if (n) { /* update the state variables */
1274 			if (netmap_no_pendintr && !force_update) {
1275 				/* diagnostics */
1276 				iflib_rx_miss++;
1277 				iflib_rx_miss_bufs += n;
1278 			}
1279 			kring->nr_hwtail = nm_i;
1280 		}
1281 		kring->nr_kflags &= ~NKR_PENDINTR;
1282 	}
1283 	/*
1284 	 * Second part: skip past packets that userspace has released.
1285 	 * (kring->nr_hwcur to head excluded),
1286 	 * and make the buffers available for reception.
1287 	 * As usual nm_i is the index in the netmap ring,
1288 	 * nic_i is the index in the NIC ring, and
1289 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
1290 	 */
1291 	netmap_fl_refill(rxq, kring, false);
1292 
1293 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
1294 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
1295 
1296 	return (0);
1297 }
1298 
1299 static void
iflib_netmap_intr(struct netmap_adapter * na,int onoff)1300 iflib_netmap_intr(struct netmap_adapter *na, int onoff)
1301 {
1302 	if_ctx_t ctx = if_getsoftc(na->ifp);
1303 
1304 	CTX_LOCK(ctx);
1305 	if (onoff) {
1306 		IFDI_INTR_ENABLE(ctx);
1307 	} else {
1308 		IFDI_INTR_DISABLE(ctx);
1309 	}
1310 	CTX_UNLOCK(ctx);
1311 }
1312 
1313 static int
iflib_netmap_attach(if_ctx_t ctx)1314 iflib_netmap_attach(if_ctx_t ctx)
1315 {
1316 	struct netmap_adapter na;
1317 
1318 	bzero(&na, sizeof(na));
1319 
1320 	na.ifp = ctx->ifc_ifp;
1321 	na.na_flags = NAF_BDG_MAYSLEEP | NAF_MOREFRAG | NAF_OFFSETS;
1322 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
1323 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
1324 
1325 	na.num_tx_desc = iflib_num_tx_descs(ctx);
1326 	na.num_rx_desc = iflib_num_rx_descs(ctx);
1327 	na.nm_txsync = iflib_netmap_txsync;
1328 	na.nm_rxsync = iflib_netmap_rxsync;
1329 	na.nm_register = iflib_netmap_register;
1330 	na.nm_intr = iflib_netmap_intr;
1331 	na.nm_config = iflib_netmap_config;
1332 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
1333 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
1334 	return (netmap_attach(&na));
1335 }
1336 
1337 static int
iflib_netmap_txq_init(if_ctx_t ctx,iflib_txq_t txq)1338 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1339 {
1340 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1341 	struct netmap_slot *slot;
1342 
1343 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1344 	if (slot == NULL)
1345 		return (0);
1346 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
1347 		/*
1348 		 * In netmap mode, set the map for the packet buffer.
1349 		 * NOTE: Some drivers (not this one) also need to set
1350 		 * the physical buffer address in the NIC ring.
1351 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
1352 		 * netmap slot index, si
1353 		 */
1354 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1355 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1356 		    NMB(na, slot + si));
1357 	}
1358 	return (1);
1359 }
1360 
1361 static int
iflib_netmap_rxq_init(if_ctx_t ctx,iflib_rxq_t rxq)1362 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
1363 {
1364 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
1365 	struct netmap_kring *kring;
1366 	struct netmap_slot *slot;
1367 
1368 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1369 	if (slot == NULL)
1370 		return (0);
1371 	kring = na->rx_rings[rxq->ifr_id];
1372 	netmap_fl_refill(rxq, kring, true);
1373 	return (1);
1374 }
1375 
1376 static void
iflib_netmap_timer(void * arg)1377 iflib_netmap_timer(void *arg)
1378 {
1379 	iflib_txq_t txq = arg;
1380 	if_ctx_t ctx = txq->ift_ctx;
1381 
1382 	/*
1383 	 * Wake up the netmap application, to give it a chance to
1384 	 * call txsync and reclaim more completed TX buffers.
1385 	 */
1386 	netmap_tx_irq(ctx->ifc_ifp, txq->ift_id);
1387 }
1388 
1389 #define iflib_netmap_detach(ifp) netmap_detach(ifp)
1390 
1391 #else
1392 #define iflib_netmap_txq_init(ctx, txq) (0)
1393 #define iflib_netmap_rxq_init(ctx, rxq) (0)
1394 #define iflib_netmap_detach(ifp)
1395 #define netmap_enable_all_rings(ifp)
1396 #define netmap_disable_all_rings(ifp)
1397 
1398 #define iflib_netmap_attach(ctx) (0)
1399 #define netmap_rx_irq(ifp, qid, budget) (0)
1400 #endif
1401 
1402 #if defined(__i386__) || defined(__amd64__)
1403 static __inline void
prefetch(void * x)1404 prefetch(void *x)
1405 {
1406 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1407 }
1408 
1409 static __inline void
prefetch2cachelines(void * x)1410 prefetch2cachelines(void *x)
1411 {
1412 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
1413 #if (CACHE_LINE_SIZE < 128)
1414 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x) + CACHE_LINE_SIZE / (sizeof(unsigned long)))));
1415 #endif
1416 }
1417 #else
1418 static __inline void
prefetch(void * x)1419 prefetch(void *x)
1420 {
1421 }
1422 
1423 static __inline void
prefetch2cachelines(void * x)1424 prefetch2cachelines(void *x)
1425 {
1426 }
1427 #endif
1428 
1429 static void
iru_init(if_rxd_update_t iru,iflib_rxq_t rxq,uint8_t flid)1430 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
1431 {
1432 	iflib_fl_t fl;
1433 
1434 	fl = &rxq->ifr_fl[flid];
1435 	iru->iru_paddrs = fl->ifl_bus_addrs;
1436 	iru->iru_idxs = fl->ifl_rxd_idxs;
1437 	iru->iru_qsidx = rxq->ifr_id;
1438 	iru->iru_buf_size = fl->ifl_buf_size;
1439 	iru->iru_flidx = fl->ifl_id;
1440 }
1441 
1442 static void
_iflib_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int err)1443 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
1444 {
1445 	if (err)
1446 		return;
1447 	*(bus_addr_t *) arg = segs[0].ds_addr;
1448 }
1449 
1450 #define	DMA_WIDTH_TO_BUS_LOWADDR(width)				\
1451 	(((width) == 0) || (width) == flsll(BUS_SPACE_MAXADDR) ?	\
1452 	    BUS_SPACE_MAXADDR : (1ULL << (width)) - 1ULL)
1453 
1454 int
iflib_dma_alloc_align(if_ctx_t ctx,int size,int align,iflib_dma_info_t dma,int mapflags)1455 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
1456 {
1457 	int err;
1458 	device_t dev = ctx->ifc_dev;
1459 	bus_addr_t lowaddr;
1460 
1461 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(ctx->ifc_softc_ctx.isc_dma_width);
1462 
1463 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1464 		    align, 0,		/* alignment, bounds */
1465 		    lowaddr,		/* lowaddr */
1466 		    BUS_SPACE_MAXADDR,	/* highaddr */
1467 		    NULL, NULL,		/* filter, filterarg */
1468 		    size,		/* maxsize */
1469 		    1,			/* nsegments */
1470 		    size,		/* maxsegsize */
1471 		    BUS_DMA_ALLOCNOW,	/* flags */
1472 		    NULL,		/* lockfunc */
1473 		    NULL,		/* lockarg */
1474 		    &dma->idi_tag);
1475 	if (err) {
1476 		device_printf(dev,
1477 		    "%s: bus_dma_tag_create failed: %d (size=%d, align=%d)\n",
1478 		    __func__, err, size, align);
1479 		goto fail_0;
1480 	}
1481 
1482 	err = bus_dmamem_alloc(dma->idi_tag, (void **)&dma->idi_vaddr,
1483 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
1484 	if (err) {
1485 		device_printf(dev,
1486 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
1487 		    __func__, (uintmax_t)size, err);
1488 		goto fail_1;
1489 	}
1490 
1491 	dma->idi_paddr = IF_BAD_DMA;
1492 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
1493 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
1494 	if (err || dma->idi_paddr == IF_BAD_DMA) {
1495 		device_printf(dev,
1496 		    "%s: bus_dmamap_load failed: %d\n",
1497 		    __func__, err);
1498 		goto fail_2;
1499 	}
1500 
1501 	dma->idi_size = size;
1502 	return (0);
1503 
1504 fail_2:
1505 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1506 fail_1:
1507 	bus_dma_tag_destroy(dma->idi_tag);
1508 fail_0:
1509 	dma->idi_tag = NULL;
1510 
1511 	return (err);
1512 }
1513 
1514 int
iflib_dma_alloc(if_ctx_t ctx,int size,iflib_dma_info_t dma,int mapflags)1515 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
1516 {
1517 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1518 
1519 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
1520 
1521 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
1522 }
1523 
1524 int
iflib_dma_alloc_multi(if_ctx_t ctx,int * sizes,iflib_dma_info_t * dmalist,int mapflags,int count)1525 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
1526 {
1527 	int i, err;
1528 	iflib_dma_info_t *dmaiter;
1529 
1530 	dmaiter = dmalist;
1531 	for (i = 0; i < count; i++, dmaiter++) {
1532 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
1533 			break;
1534 	}
1535 	if (err)
1536 		iflib_dma_free_multi(dmalist, i);
1537 	return (err);
1538 }
1539 
1540 void
iflib_dma_free(iflib_dma_info_t dma)1541 iflib_dma_free(iflib_dma_info_t dma)
1542 {
1543 	if (dma->idi_tag == NULL)
1544 		return;
1545 	if (dma->idi_paddr != IF_BAD_DMA) {
1546 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
1547 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1548 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
1549 		dma->idi_paddr = IF_BAD_DMA;
1550 	}
1551 	if (dma->idi_vaddr != NULL) {
1552 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
1553 		dma->idi_vaddr = NULL;
1554 	}
1555 	bus_dma_tag_destroy(dma->idi_tag);
1556 	dma->idi_tag = NULL;
1557 }
1558 
1559 void
iflib_dma_free_multi(iflib_dma_info_t * dmalist,int count)1560 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
1561 {
1562 	int i;
1563 	iflib_dma_info_t *dmaiter = dmalist;
1564 
1565 	for (i = 0; i < count; i++, dmaiter++)
1566 		iflib_dma_free(*dmaiter);
1567 }
1568 
1569 static int
iflib_fast_intr(void * arg)1570 iflib_fast_intr(void *arg)
1571 {
1572 	iflib_filter_info_t info = arg;
1573 	struct grouptask *gtask = info->ifi_task;
1574 	int result;
1575 
1576 	DBG_COUNTER_INC(fast_intrs);
1577 	if (info->ifi_filter != NULL) {
1578 		result = info->ifi_filter(info->ifi_filter_arg);
1579 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1580 			return (result);
1581 	}
1582 
1583 	GROUPTASK_ENQUEUE(gtask);
1584 	return (FILTER_HANDLED);
1585 }
1586 
1587 static int
iflib_fast_intr_rxtx(void * arg)1588 iflib_fast_intr_rxtx(void *arg)
1589 {
1590 	iflib_filter_info_t info = arg;
1591 	struct grouptask *gtask = info->ifi_task;
1592 	if_ctx_t ctx;
1593 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
1594 	iflib_txq_t txq;
1595 	void *sc;
1596 	int i, cidx, result;
1597 	qidx_t txqid;
1598 	bool intr_enable, intr_legacy;
1599 
1600 	DBG_COUNTER_INC(fast_intrs);
1601 	if (info->ifi_filter != NULL) {
1602 		result = info->ifi_filter(info->ifi_filter_arg);
1603 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1604 			return (result);
1605 	}
1606 
1607 	ctx = rxq->ifr_ctx;
1608 	sc = ctx->ifc_softc;
1609 	intr_enable = false;
1610 	intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
1611 	MPASS(rxq->ifr_ntxqirq);
1612 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
1613 		txqid = rxq->ifr_txqid[i];
1614 		txq = &ctx->ifc_txqs[txqid];
1615 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
1616 		    BUS_DMASYNC_POSTREAD);
1617 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
1618 			if (intr_legacy)
1619 				intr_enable = true;
1620 			else
1621 				IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
1622 			continue;
1623 		}
1624 		GROUPTASK_ENQUEUE(&txq->ift_task);
1625 	}
1626 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
1627 		cidx = rxq->ifr_cq_cidx;
1628 	else
1629 		cidx = rxq->ifr_fl[0].ifl_cidx;
1630 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1631 		GROUPTASK_ENQUEUE(gtask);
1632 	else {
1633 		if (intr_legacy)
1634 			intr_enable = true;
1635 		else
1636 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
1637 		DBG_COUNTER_INC(rx_intr_enables);
1638 	}
1639 	if (intr_enable)
1640 		IFDI_INTR_ENABLE(ctx);
1641 	return (FILTER_HANDLED);
1642 }
1643 
1644 static int
iflib_fast_intr_ctx(void * arg)1645 iflib_fast_intr_ctx(void *arg)
1646 {
1647 	iflib_filter_info_t info = arg;
1648 	if_ctx_t ctx = info->ifi_ctx;
1649 	int result;
1650 
1651 	DBG_COUNTER_INC(fast_intrs);
1652 	if (info->ifi_filter != NULL) {
1653 		result = info->ifi_filter(info->ifi_filter_arg);
1654 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1655 			return (result);
1656 	}
1657 
1658 	taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
1659 	return (FILTER_HANDLED);
1660 }
1661 
1662 static int
_iflib_irq_alloc(if_ctx_t ctx,if_irq_t irq,int rid,driver_filter_t filter,driver_intr_t handler,void * arg,const char * name)1663 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
1664 		 driver_filter_t filter, driver_intr_t handler, void *arg,
1665 		 const char *name)
1666 {
1667 	struct resource *res;
1668 	void *tag = NULL;
1669 	device_t dev = ctx->ifc_dev;
1670 	int flags, i, rc;
1671 
1672 	flags = RF_ACTIVE;
1673 	if (ctx->ifc_flags & IFC_LEGACY)
1674 		flags |= RF_SHAREABLE;
1675 	MPASS(rid < 512);
1676 	i = rid;
1677 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
1678 	if (res == NULL) {
1679 		device_printf(dev,
1680 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1681 		return (ENOMEM);
1682 	}
1683 	irq->ii_res = res;
1684 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
1685 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
1686 		    filter, handler, arg, &tag);
1687 	if (rc != 0) {
1688 		device_printf(dev,
1689 		    "failed to setup interrupt for rid %d, name %s: %d\n",
1690 		    rid, name ? name : "unknown", rc);
1691 		return (rc);
1692 	} else if (name)
1693 		bus_describe_intr(dev, res, tag, "%s", name);
1694 
1695 	irq->ii_tag = tag;
1696 	return (0);
1697 }
1698 
1699 /*********************************************************************
1700  *
1701  *  Allocate DMA resources for TX buffers as well as memory for the TX
1702  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1703  *  iflib_sw_tx_desc_array structure, storing all the information that
1704  *  is needed to transmit a packet on the wire.  This is called only
1705  *  once at attach, setup is done every reset.
1706  *
1707  **********************************************************************/
1708 static int
iflib_txsd_alloc(iflib_txq_t txq)1709 iflib_txsd_alloc(iflib_txq_t txq)
1710 {
1711 	if_ctx_t ctx = txq->ift_ctx;
1712 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1713 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1714 	device_t dev = ctx->ifc_dev;
1715 	bus_size_t tsomaxsize;
1716 	bus_addr_t lowaddr;
1717 	int err, nsegments, ntsosegments;
1718 	bool tso;
1719 
1720 	nsegments = scctx->isc_tx_nsegments;
1721 	ntsosegments = scctx->isc_tx_tso_segments_max;
1722 	tsomaxsize = scctx->isc_tx_tso_size_max;
1723 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
1724 		tsomaxsize += sizeof(struct ether_vlan_header);
1725 	MPASS(scctx->isc_ntxd[0] > 0);
1726 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1727 	MPASS(nsegments > 0);
1728 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
1729 		MPASS(ntsosegments > 0);
1730 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
1731 	}
1732 
1733 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1734 
1735 	/*
1736 	 * Set up DMA tags for TX buffers.
1737 	 */
1738 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1739 		    1, 0,			/* alignment, bounds */
1740 		    lowaddr,			/* lowaddr */
1741 		    BUS_SPACE_MAXADDR,		/* highaddr */
1742 		    NULL, NULL,			/* filter, filterarg */
1743 		    sctx->isc_tx_maxsize,	/* maxsize */
1744 		    nsegments,			/* nsegments */
1745 		    sctx->isc_tx_maxsegsize,	/* maxsegsize */
1746 		    0,				/* flags */
1747 		    NULL,			/* lockfunc */
1748 		    NULL,			/* lockfuncarg */
1749 		    &txq->ift_buf_tag))) {
1750 		device_printf(dev, "Unable to allocate TX DMA tag: %d\n", err);
1751 		device_printf(dev, "maxsize: %ju nsegments: %d maxsegsize: %ju\n",
1752 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
1753 		goto fail;
1754 	}
1755 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
1756 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
1757 		    1, 0,			/* alignment, bounds */
1758 		    lowaddr,			/* lowaddr */
1759 		    BUS_SPACE_MAXADDR,		/* highaddr */
1760 		    NULL, NULL,			/* filter, filterarg */
1761 		    tsomaxsize,			/* maxsize */
1762 		    ntsosegments,		/* nsegments */
1763 		    sctx->isc_tso_maxsegsize,	/* maxsegsize */
1764 		    0,				/* flags */
1765 		    NULL,			/* lockfunc */
1766 		    NULL,			/* lockfuncarg */
1767 		    &txq->ift_tso_buf_tag))) {
1768 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1769 		    err);
1770 		goto fail;
1771 	}
1772 
1773 	/* Allocate memory for the TX mbuf map. */
1774 	if (!(txq->ift_sds.ifsd_m =
1775 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1776 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1777 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
1778 		err = ENOMEM;
1779 		goto fail;
1780 	}
1781 
1782 	/*
1783 	 * Create the DMA maps for TX buffers.
1784 	 */
1785 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
1786 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1787 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1788 		device_printf(dev,
1789 		    "Unable to allocate TX buffer DMA map memory\n");
1790 		err = ENOMEM;
1791 		goto fail;
1792 	}
1793 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
1794 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
1795 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1796 		device_printf(dev,
1797 		    "Unable to allocate TSO TX buffer map memory\n");
1798 		err = ENOMEM;
1799 		goto fail;
1800 	}
1801 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1802 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
1803 		    &txq->ift_sds.ifsd_map[i]);
1804 		if (err != 0) {
1805 			device_printf(dev, "Unable to create TX DMA map\n");
1806 			goto fail;
1807 		}
1808 		if (!tso)
1809 			continue;
1810 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
1811 		    &txq->ift_sds.ifsd_tso_map[i]);
1812 		if (err != 0) {
1813 			device_printf(dev, "Unable to create TSO TX DMA map\n");
1814 			goto fail;
1815 		}
1816 	}
1817 	return (0);
1818 fail:
1819 	/* We free all, it handles case where we are in the middle */
1820 	iflib_tx_structures_free(ctx);
1821 	return (err);
1822 }
1823 
1824 static void
iflib_txsd_destroy(if_ctx_t ctx,iflib_txq_t txq,int i)1825 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1826 {
1827 	bus_dmamap_t map;
1828 
1829 	if (txq->ift_sds.ifsd_map != NULL) {
1830 		map = txq->ift_sds.ifsd_map[i];
1831 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1832 		bus_dmamap_unload(txq->ift_buf_tag, map);
1833 		bus_dmamap_destroy(txq->ift_buf_tag, map);
1834 		txq->ift_sds.ifsd_map[i] = NULL;
1835 	}
1836 
1837 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1838 		map = txq->ift_sds.ifsd_tso_map[i];
1839 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
1840 		    BUS_DMASYNC_POSTWRITE);
1841 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1842 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
1843 		txq->ift_sds.ifsd_tso_map[i] = NULL;
1844 	}
1845 }
1846 
1847 static void
iflib_txq_destroy(iflib_txq_t txq)1848 iflib_txq_destroy(iflib_txq_t txq)
1849 {
1850 	if_ctx_t ctx = txq->ift_ctx;
1851 
1852 	for (int i = 0; i < txq->ift_size; i++)
1853 		iflib_txsd_destroy(ctx, txq, i);
1854 
1855 	if (txq->ift_br != NULL) {
1856 		ifmp_ring_free(txq->ift_br);
1857 		txq->ift_br = NULL;
1858 	}
1859 
1860 	mtx_destroy(&txq->ift_mtx);
1861 
1862 	if (txq->ift_sds.ifsd_map != NULL) {
1863 		free(txq->ift_sds.ifsd_map, M_IFLIB);
1864 		txq->ift_sds.ifsd_map = NULL;
1865 	}
1866 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1867 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
1868 		txq->ift_sds.ifsd_tso_map = NULL;
1869 	}
1870 	if (txq->ift_sds.ifsd_m != NULL) {
1871 		free(txq->ift_sds.ifsd_m, M_IFLIB);
1872 		txq->ift_sds.ifsd_m = NULL;
1873 	}
1874 	if (txq->ift_buf_tag != NULL) {
1875 		bus_dma_tag_destroy(txq->ift_buf_tag);
1876 		txq->ift_buf_tag = NULL;
1877 	}
1878 	if (txq->ift_tso_buf_tag != NULL) {
1879 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1880 		txq->ift_tso_buf_tag = NULL;
1881 	}
1882 	if (txq->ift_ifdi != NULL) {
1883 		free(txq->ift_ifdi, M_IFLIB);
1884 	}
1885 }
1886 
1887 static void
iflib_txsd_free(if_ctx_t ctx,iflib_txq_t txq,int i)1888 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1889 {
1890 	struct mbuf **mp;
1891 
1892 	mp = &txq->ift_sds.ifsd_m[i];
1893 	if (*mp == NULL)
1894 		return;
1895 
1896 	if (txq->ift_sds.ifsd_map != NULL) {
1897 		bus_dmamap_sync(txq->ift_buf_tag,
1898 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1899 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
1900 	}
1901 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1902 		bus_dmamap_sync(txq->ift_tso_buf_tag,
1903 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1904 		bus_dmamap_unload(txq->ift_tso_buf_tag,
1905 		    txq->ift_sds.ifsd_tso_map[i]);
1906 	}
1907 	m_freem(*mp);
1908 	DBG_COUNTER_INC(tx_frees);
1909 	*mp = NULL;
1910 }
1911 
1912 static int
iflib_txq_setup(iflib_txq_t txq)1913 iflib_txq_setup(iflib_txq_t txq)
1914 {
1915 	if_ctx_t ctx = txq->ift_ctx;
1916 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1917 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1918 	iflib_dma_info_t di;
1919 	int i;
1920 
1921 	/* Set number of descriptors available */
1922 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1923 	/* XXX make configurable */
1924 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1925 
1926 	/* Reset indices */
1927 	txq->ift_cidx_processed = 0;
1928 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1929 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1930 
1931 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1932 		bzero((void *)di->idi_vaddr, di->idi_size);
1933 
1934 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
1935 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
1936 		bus_dmamap_sync(di->idi_tag, di->idi_map,
1937 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1938 	return (0);
1939 }
1940 
1941 /*********************************************************************
1942  *
1943  *  Allocate DMA resources for RX buffers as well as memory for the RX
1944  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1945  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1946  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1947  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1948  *  packet, the maximum number of entries we'll need is equal to the
1949  *  number of hardware receive descriptors that we've allocated.
1950  *
1951  **********************************************************************/
1952 static int
iflib_rxsd_alloc(iflib_rxq_t rxq)1953 iflib_rxsd_alloc(iflib_rxq_t rxq)
1954 {
1955 	if_ctx_t ctx = rxq->ifr_ctx;
1956 	if_shared_ctx_t sctx = ctx->ifc_sctx;
1957 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
1958 	device_t dev = ctx->ifc_dev;
1959 	iflib_fl_t fl;
1960 	bus_addr_t lowaddr;
1961 	int err;
1962 
1963 	MPASS(scctx->isc_nrxd[0] > 0);
1964 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
1965 
1966 	lowaddr = DMA_WIDTH_TO_BUS_LOWADDR(scctx->isc_dma_width);
1967 
1968 	fl = rxq->ifr_fl;
1969 	for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
1970 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1971 		/* Set up DMA tag for RX buffers. */
1972 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1973 			    1, 0,			/* alignment, bounds */
1974 			    lowaddr,			/* lowaddr */
1975 			    BUS_SPACE_MAXADDR,		/* highaddr */
1976 			    NULL, NULL,			/* filter, filterarg */
1977 			    sctx->isc_rx_maxsize,	/* maxsize */
1978 			    sctx->isc_rx_nsegments,	/* nsegments */
1979 			    sctx->isc_rx_maxsegsize,	/* maxsegsize */
1980 			    0,				/* flags */
1981 			    NULL,			/* lockfunc */
1982 			    NULL,			/* lockarg */
1983 			    &fl->ifl_buf_tag);
1984 		if (err) {
1985 			device_printf(dev,
1986 			    "Unable to allocate RX DMA tag: %d\n", err);
1987 			goto fail;
1988 		}
1989 
1990 		/* Allocate memory for the RX mbuf map. */
1991 		if (!(fl->ifl_sds.ifsd_m =
1992 		    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1993 			    scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1994 			device_printf(dev,
1995 			    "Unable to allocate RX mbuf map memory\n");
1996 			err = ENOMEM;
1997 			goto fail;
1998 		}
1999 
2000 		/* Allocate memory for the direct RX cluster pointer map. */
2001 		if (!(fl->ifl_sds.ifsd_cl =
2002 		    (caddr_t *) malloc(sizeof(caddr_t) *
2003 			    scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2004 			device_printf(dev,
2005 			    "Unable to allocate RX cluster map memory\n");
2006 			err = ENOMEM;
2007 			goto fail;
2008 		}
2009 
2010 		/* Allocate memory for the RX cluster bus address map. */
2011 		if (!(fl->ifl_sds.ifsd_ba =
2012 		    (bus_addr_t *) malloc(sizeof(bus_addr_t) *
2013 			    scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2014 			device_printf(dev,
2015 			    "Unable to allocate RX bus address map memory\n");
2016 			err = ENOMEM;
2017 			goto fail;
2018 		}
2019 
2020 		/*
2021 		 * Create the DMA maps for RX buffers.
2022 		 */
2023 		if (!(fl->ifl_sds.ifsd_map =
2024 		    (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
2025 			device_printf(dev,
2026 			    "Unable to allocate RX buffer DMA map memory\n");
2027 			err = ENOMEM;
2028 			goto fail;
2029 		}
2030 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
2031 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
2032 			    &fl->ifl_sds.ifsd_map[i]);
2033 			if (err != 0) {
2034 				device_printf(dev, "Unable to create RX buffer DMA map\n");
2035 				goto fail;
2036 			}
2037 		}
2038 	}
2039 	return (0);
2040 
2041 fail:
2042 	iflib_rx_structures_free(ctx);
2043 	return (err);
2044 }
2045 
2046 /*
2047  * Internal service routines
2048  */
2049 
2050 struct rxq_refill_cb_arg {
2051 	int               error;
2052 	bus_dma_segment_t seg;
2053 	int               nseg;
2054 };
2055 
2056 static void
_rxq_refill_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)2057 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2058 {
2059 	struct rxq_refill_cb_arg *cb_arg = arg;
2060 
2061 	cb_arg->error = error;
2062 	cb_arg->seg = segs[0];
2063 	cb_arg->nseg = nseg;
2064 }
2065 
2066 /**
2067  * iflib_fl_refill - refill an rxq free-buffer list
2068  * @ctx: the iflib context
2069  * @fl: the free list to refill
2070  * @count: the number of new buffers to allocate
2071  *
2072  * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
2073  * The caller must assure that @count does not exceed the queue's capacity
2074  * minus one (since we always leave a descriptor unavailable).
2075  */
2076 static uint8_t
iflib_fl_refill(if_ctx_t ctx,iflib_fl_t fl,int count)2077 iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
2078 {
2079 	struct if_rxd_update iru;
2080 	struct rxq_refill_cb_arg cb_arg;
2081 	struct mbuf *m;
2082 	caddr_t cl, *sd_cl;
2083 	struct mbuf **sd_m;
2084 	bus_dmamap_t *sd_map;
2085 	bus_addr_t bus_addr, *sd_ba;
2086 	int err, frag_idx, i, idx, n, pidx;
2087 	qidx_t credits;
2088 
2089 	MPASS(count <= fl->ifl_size - fl->ifl_credits - 1);
2090 
2091 	sd_m = fl->ifl_sds.ifsd_m;
2092 	sd_map = fl->ifl_sds.ifsd_map;
2093 	sd_cl = fl->ifl_sds.ifsd_cl;
2094 	sd_ba = fl->ifl_sds.ifsd_ba;
2095 	pidx = fl->ifl_pidx;
2096 	idx = pidx;
2097 	frag_idx = fl->ifl_fragidx;
2098 	credits = fl->ifl_credits;
2099 
2100 	i = 0;
2101 	n = count;
2102 	MPASS(n > 0);
2103 	MPASS(credits + n <= fl->ifl_size);
2104 
2105 	if (pidx < fl->ifl_cidx)
2106 		MPASS(pidx + n <= fl->ifl_cidx);
2107 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
2108 		MPASS(fl->ifl_gen == 0);
2109 	if (pidx > fl->ifl_cidx)
2110 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
2111 
2112 	DBG_COUNTER_INC(fl_refills);
2113 	if (n > 8)
2114 		DBG_COUNTER_INC(fl_refills_large);
2115 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
2116 	while (n-- > 0) {
2117 		/*
2118 		 * We allocate an uninitialized mbuf + cluster, mbuf is
2119 		 * initialized after rx.
2120 		 *
2121 		 * If the cluster is still set then we know a minimum sized
2122 		 * packet was received
2123 		 */
2124 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
2125 		    &frag_idx);
2126 		if (frag_idx < 0)
2127 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
2128 		MPASS(frag_idx >= 0);
2129 		if ((cl = sd_cl[frag_idx]) == NULL) {
2130 			cl = uma_zalloc(fl->ifl_zone, M_NOWAIT);
2131 			if (__predict_false(cl == NULL))
2132 				break;
2133 
2134 			cb_arg.error = 0;
2135 			MPASS(sd_map != NULL);
2136 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
2137 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
2138 			    BUS_DMA_NOWAIT);
2139 			if (__predict_false(err != 0 || cb_arg.error)) {
2140 				uma_zfree(fl->ifl_zone, cl);
2141 				break;
2142 			}
2143 
2144 			sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
2145 			sd_cl[frag_idx] = cl;
2146 #if MEMORY_LOGGING
2147 			fl->ifl_cl_enqueued++;
2148 #endif
2149 		} else {
2150 			bus_addr = sd_ba[frag_idx];
2151 		}
2152 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
2153 		    BUS_DMASYNC_PREREAD);
2154 
2155 		if (sd_m[frag_idx] == NULL) {
2156 			m = m_gethdr_raw(M_NOWAIT, 0);
2157 			if (__predict_false(m == NULL))
2158 				break;
2159 			sd_m[frag_idx] = m;
2160 		}
2161 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2162 #if MEMORY_LOGGING
2163 		fl->ifl_m_enqueued++;
2164 #endif
2165 
2166 		DBG_COUNTER_INC(rx_allocs);
2167 		fl->ifl_rxd_idxs[i] = frag_idx;
2168 		fl->ifl_bus_addrs[i] = bus_addr;
2169 		credits++;
2170 		i++;
2171 		MPASS(credits <= fl->ifl_size);
2172 		if (++idx == fl->ifl_size) {
2173 #ifdef INVARIANTS
2174 			fl->ifl_gen = 1;
2175 #endif
2176 			idx = 0;
2177 		}
2178 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
2179 			iru.iru_pidx = pidx;
2180 			iru.iru_count = i;
2181 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2182 			fl->ifl_pidx = idx;
2183 			fl->ifl_credits = credits;
2184 			pidx = idx;
2185 			i = 0;
2186 		}
2187 	}
2188 
2189 	if (n < count - 1) {
2190 		if (i != 0) {
2191 			iru.iru_pidx = pidx;
2192 			iru.iru_count = i;
2193 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2194 			fl->ifl_pidx = idx;
2195 			fl->ifl_credits = credits;
2196 		}
2197 		DBG_COUNTER_INC(rxd_flush);
2198 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2199 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2200 		ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id,
2201 		    fl->ifl_id, fl->ifl_pidx);
2202 		if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) {
2203 			fl->ifl_fragidx = frag_idx + 1;
2204 			if (fl->ifl_fragidx == fl->ifl_size)
2205 				fl->ifl_fragidx = 0;
2206 		} else {
2207 			fl->ifl_fragidx = frag_idx;
2208 		}
2209 	}
2210 
2211 	return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
2212 }
2213 
2214 static inline uint8_t
iflib_fl_refill_all(if_ctx_t ctx,iflib_fl_t fl)2215 iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl)
2216 {
2217 	/*
2218 	 * We leave an unused descriptor to avoid pidx to catch up with cidx.
2219 	 * This is important as it confuses most NICs. For instance,
2220 	 * Intel NICs have (per receive ring) RDH and RDT registers, where
2221 	 * RDH points to the next receive descriptor to be used by the NIC,
2222 	 * and RDT for the next receive descriptor to be published by the
2223 	 * driver to the NIC (RDT - 1 is thus the last valid one).
2224 	 * The condition RDH == RDT means no descriptors are available to
2225 	 * the NIC, and thus it would be ambiguous if it also meant that
2226 	 * all the descriptors are available to the NIC.
2227 	 */
2228 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
2229 #ifdef INVARIANTS
2230 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
2231 #endif
2232 
2233 	MPASS(fl->ifl_credits <= fl->ifl_size);
2234 	MPASS(reclaimable == delta);
2235 
2236 	if (reclaimable > 0)
2237 		return (iflib_fl_refill(ctx, fl, reclaimable));
2238 	return (0);
2239 }
2240 
2241 uint8_t
iflib_in_detach(if_ctx_t ctx)2242 iflib_in_detach(if_ctx_t ctx)
2243 {
2244 	bool in_detach;
2245 
2246 	STATE_LOCK(ctx);
2247 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
2248 	STATE_UNLOCK(ctx);
2249 	return (in_detach);
2250 }
2251 
2252 static void
iflib_fl_bufs_free(iflib_fl_t fl)2253 iflib_fl_bufs_free(iflib_fl_t fl)
2254 {
2255 	iflib_dma_info_t idi = fl->ifl_ifdi;
2256 	bus_dmamap_t sd_map;
2257 	uint32_t i;
2258 
2259 	for (i = 0; i < fl->ifl_size; i++) {
2260 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2261 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
2262 
2263 		if (*sd_cl != NULL) {
2264 			sd_map = fl->ifl_sds.ifsd_map[i];
2265 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
2266 			    BUS_DMASYNC_POSTREAD);
2267 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2268 			uma_zfree(fl->ifl_zone, *sd_cl);
2269 			*sd_cl = NULL;
2270 			if (*sd_m != NULL) {
2271 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2272 				m_free_raw(*sd_m);
2273 				*sd_m = NULL;
2274 			}
2275 		} else {
2276 			MPASS(*sd_m == NULL);
2277 		}
2278 #if MEMORY_LOGGING
2279 		fl->ifl_m_dequeued++;
2280 		fl->ifl_cl_dequeued++;
2281 #endif
2282 	}
2283 #ifdef INVARIANTS
2284 	for (i = 0; i < fl->ifl_size; i++) {
2285 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
2286 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
2287 	}
2288 #endif
2289 	/*
2290 	 * Reset free list values
2291 	 */
2292 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
2293 	bzero(idi->idi_vaddr, idi->idi_size);
2294 }
2295 
2296 /*********************************************************************
2297  *
2298  *  Initialize a free list and its buffers.
2299  *
2300  **********************************************************************/
2301 static int
iflib_fl_setup(iflib_fl_t fl)2302 iflib_fl_setup(iflib_fl_t fl)
2303 {
2304 	iflib_rxq_t rxq = fl->ifl_rxq;
2305 	if_ctx_t ctx = rxq->ifr_ctx;
2306 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2307 	int qidx;
2308 
2309 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
2310 	/*
2311 	 * Free current RX buffer structs and their mbufs
2312 	 */
2313 	iflib_fl_bufs_free(fl);
2314 	/* Now replenish the mbufs */
2315 	MPASS(fl->ifl_credits == 0);
2316 	qidx = rxq->ifr_fl_offset + fl->ifl_id;
2317 	if (scctx->isc_rxd_buf_size[qidx] != 0)
2318 		fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
2319 	else
2320 		fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
2321 	/*
2322 	 * ifl_buf_size may be a driver-supplied value, so pull it up
2323 	 * to the selected mbuf size.
2324 	 */
2325 	fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
2326 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
2327 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
2328 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
2329 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
2330 
2331 	/*
2332 	 * Avoid pre-allocating zillions of clusters to an idle card
2333 	 * potentially speeding up attach. In any case make sure
2334 	 * to leave a descriptor unavailable. See the comment in
2335 	 * iflib_fl_refill_all().
2336 	 */
2337 	MPASS(fl->ifl_size > 0);
2338 	(void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1));
2339 	if (min(128, fl->ifl_size - 1) != fl->ifl_credits)
2340 		return (ENOBUFS);
2341 	/*
2342 	 * handle failure
2343 	 */
2344 	MPASS(rxq != NULL);
2345 	MPASS(fl->ifl_ifdi != NULL);
2346 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
2347 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2348 	return (0);
2349 }
2350 
2351 /*********************************************************************
2352  *
2353  *  Free receive ring data structures
2354  *
2355  **********************************************************************/
2356 static void
iflib_rx_sds_free(iflib_rxq_t rxq)2357 iflib_rx_sds_free(iflib_rxq_t rxq)
2358 {
2359 	iflib_fl_t fl;
2360 	int i, j;
2361 
2362 	if (rxq->ifr_fl != NULL) {
2363 		for (i = 0; i < rxq->ifr_nfl; i++) {
2364 			fl = &rxq->ifr_fl[i];
2365 			if (fl->ifl_buf_tag != NULL) {
2366 				if (fl->ifl_sds.ifsd_map != NULL) {
2367 					for (j = 0; j < fl->ifl_size; j++) {
2368 						bus_dmamap_sync(
2369 						    fl->ifl_buf_tag,
2370 						    fl->ifl_sds.ifsd_map[j],
2371 						    BUS_DMASYNC_POSTREAD);
2372 						bus_dmamap_unload(
2373 						    fl->ifl_buf_tag,
2374 						    fl->ifl_sds.ifsd_map[j]);
2375 						bus_dmamap_destroy(
2376 						    fl->ifl_buf_tag,
2377 						    fl->ifl_sds.ifsd_map[j]);
2378 					}
2379 				}
2380 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2381 				fl->ifl_buf_tag = NULL;
2382 			}
2383 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2384 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2385 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2386 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2387 			free(fl->ifl_rx_bitmap, M_IFLIB);
2388 			fl->ifl_sds.ifsd_m = NULL;
2389 			fl->ifl_sds.ifsd_cl = NULL;
2390 			fl->ifl_sds.ifsd_ba = NULL;
2391 			fl->ifl_sds.ifsd_map = NULL;
2392 			fl->ifl_rx_bitmap = NULL;
2393 		}
2394 		free(rxq->ifr_fl, M_IFLIB);
2395 		rxq->ifr_fl = NULL;
2396 		free(rxq->ifr_ifdi, M_IFLIB);
2397 		rxq->ifr_ifdi = NULL;
2398 		rxq->ifr_cq_cidx = 0;
2399 	}
2400 }
2401 
2402 /*
2403  * Timer routine
2404  */
2405 static void
iflib_timer(void * arg)2406 iflib_timer(void *arg)
2407 {
2408 	iflib_txq_t txq = arg;
2409 	if_ctx_t ctx = txq->ift_ctx;
2410 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2411 	uint64_t this_tick = ticks;
2412 
2413 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
2414 		return;
2415 
2416 	/*
2417 	** Check on the state of the TX queue(s), this
2418 	** can be done without the lock because its RO
2419 	** and the HUNG state will be static if set.
2420 	*/
2421 	if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) {
2422 		txq->ift_last_timer_tick = this_tick;
2423 		IFDI_TIMER(ctx, txq->ift_id);
2424 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2425 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2426 		     (sctx->isc_pause_frames == 0)))
2427 			goto hung;
2428 
2429 		if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2430 		    ifmp_ring_is_stalled(txq->ift_br)) {
2431 			KASSERT(ctx->ifc_link_state == LINK_STATE_UP,
2432 			    ("queue can't be marked as hung if interface is down"));
2433 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2434 		}
2435 		txq->ift_cleaned_prev = txq->ift_cleaned;
2436 	}
2437 	/* handle any laggards */
2438 	if (txq->ift_db_pending)
2439 		GROUPTASK_ENQUEUE(&txq->ift_task);
2440 
2441 	sctx->isc_pause_frames = 0;
2442 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2443 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer,
2444 		    txq, txq->ift_timer.c_cpu);
2445 	return;
2446 
2447  hung:
2448 	device_printf(ctx->ifc_dev,
2449 	    "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2450 	    txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2451 	STATE_LOCK(ctx);
2452 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2453 	ctx->ifc_flags |= (IFC_DO_WATCHDOG | IFC_DO_RESET);
2454 	iflib_admin_intr_deferred(ctx);
2455 	STATE_UNLOCK(ctx);
2456 }
2457 
2458 static uint16_t
iflib_get_mbuf_size_for(unsigned int size)2459 iflib_get_mbuf_size_for(unsigned int size)
2460 {
2461 
2462 	if (size <= MCLBYTES)
2463 		return (MCLBYTES);
2464 	else
2465 		return (MJUMPAGESIZE);
2466 }
2467 
2468 static void
iflib_calc_rx_mbuf_sz(if_ctx_t ctx)2469 iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
2470 {
2471 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2472 
2473 	/*
2474 	 * XXX don't set the max_frame_size to larger
2475 	 * than the hardware can handle
2476 	 */
2477 	ctx->ifc_rx_mbuf_sz =
2478 	    iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
2479 }
2480 
2481 uint32_t
iflib_get_rx_mbuf_sz(if_ctx_t ctx)2482 iflib_get_rx_mbuf_sz(if_ctx_t ctx)
2483 {
2484 
2485 	return (ctx->ifc_rx_mbuf_sz);
2486 }
2487 
2488 static void
iflib_init_locked(if_ctx_t ctx)2489 iflib_init_locked(if_ctx_t ctx)
2490 {
2491 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2492 	if_t ifp = ctx->ifc_ifp;
2493 	iflib_fl_t fl;
2494 	iflib_txq_t txq;
2495 	iflib_rxq_t rxq;
2496 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
2497 
2498 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2499 	IFDI_INTR_DISABLE(ctx);
2500 
2501 	/*
2502 	 * See iflib_stop(). Useful in case iflib_init_locked() is
2503 	 * called without first calling iflib_stop().
2504 	 */
2505 	netmap_disable_all_rings(ifp);
2506 
2507 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
2508 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
2509 	/* Set hardware offload abilities */
2510 	if_clearhwassist(ifp);
2511 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2512 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
2513 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
2514 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
2515 	if (if_getcapenable(ifp) & IFCAP_TSO4)
2516 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
2517 	if (if_getcapenable(ifp) & IFCAP_TSO6)
2518 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
2519 
2520 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
2521 		CALLOUT_LOCK(txq);
2522 		callout_stop(&txq->ift_timer);
2523 #ifdef DEV_NETMAP
2524 		callout_stop(&txq->ift_netmap_timer);
2525 #endif /* DEV_NETMAP */
2526 		CALLOUT_UNLOCK(txq);
2527 		(void)iflib_netmap_txq_init(ctx, txq);
2528 	}
2529 
2530 	/*
2531 	 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
2532 	 * that drivers can use the value when setting up the hardware receive
2533 	 * buffers.
2534 	 */
2535 	iflib_calc_rx_mbuf_sz(ctx);
2536 
2537 #ifdef INVARIANTS
2538 	i = if_getdrvflags(ifp);
2539 #endif
2540 	IFDI_INIT(ctx);
2541 	MPASS(if_getdrvflags(ifp) == i);
2542 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
2543 		if (iflib_netmap_rxq_init(ctx, rxq) > 0) {
2544 			/* This rxq is in netmap mode. Skip normal init. */
2545 			continue;
2546 		}
2547 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
2548 			if (iflib_fl_setup(fl)) {
2549 				device_printf(ctx->ifc_dev,
2550 				    "setting up free list %d failed - "
2551 				    "check cluster settings\n", j);
2552 				goto done;
2553 			}
2554 		}
2555 	}
2556 done:
2557 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2558 	IFDI_INTR_ENABLE(ctx);
2559 	txq = ctx->ifc_txqs;
2560 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++)
2561 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
2562 			txq->ift_timer.c_cpu);
2563 
2564 	/* Re-enable txsync/rxsync. */
2565 	netmap_enable_all_rings(ifp);
2566 }
2567 
2568 static int
iflib_media_change(if_t ifp)2569 iflib_media_change(if_t ifp)
2570 {
2571 	if_ctx_t ctx = if_getsoftc(ifp);
2572 	int err;
2573 
2574 	CTX_LOCK(ctx);
2575 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
2576 		iflib_if_init_locked(ctx);
2577 	CTX_UNLOCK(ctx);
2578 	return (err);
2579 }
2580 
2581 static void
iflib_media_status(if_t ifp,struct ifmediareq * ifmr)2582 iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
2583 {
2584 	if_ctx_t ctx = if_getsoftc(ifp);
2585 
2586 	CTX_LOCK(ctx);
2587 	IFDI_UPDATE_ADMIN_STATUS(ctx);
2588 	IFDI_MEDIA_STATUS(ctx, ifmr);
2589 	CTX_UNLOCK(ctx);
2590 }
2591 
2592 static void
iflib_stop(if_ctx_t ctx)2593 iflib_stop(if_ctx_t ctx)
2594 {
2595 	iflib_txq_t txq = ctx->ifc_txqs;
2596 	iflib_rxq_t rxq = ctx->ifc_rxqs;
2597 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2598 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2599 	iflib_dma_info_t di;
2600 	iflib_fl_t fl;
2601 	int i, j;
2602 
2603 	/* Tell the stack that the interface is no longer active */
2604 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2605 
2606 	IFDI_INTR_DISABLE(ctx);
2607 	DELAY(1000);
2608 	IFDI_STOP(ctx);
2609 	DELAY(1000);
2610 
2611 	/*
2612 	 * Stop any pending txsync/rxsync and prevent new ones
2613 	 * form starting. Processes blocked in poll() will get
2614 	 * POLLERR.
2615 	 */
2616 	netmap_disable_all_rings(ctx->ifc_ifp);
2617 
2618 	iflib_debug_reset();
2619 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
2620 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2621 		/* make sure all transmitters have completed before proceeding XXX */
2622 
2623 		CALLOUT_LOCK(txq);
2624 		callout_stop(&txq->ift_timer);
2625 #ifdef DEV_NETMAP
2626 		callout_stop(&txq->ift_netmap_timer);
2627 #endif /* DEV_NETMAP */
2628 		CALLOUT_UNLOCK(txq);
2629 
2630 		if (!ctx->ifc_sysctl_simple_tx) {
2631 			/* clean any enqueued buffers */
2632 			iflib_ifmp_purge(txq);
2633 		}
2634 		/* Free any existing tx buffers. */
2635 		for (j = 0; j < txq->ift_size; j++) {
2636 			iflib_txsd_free(ctx, txq, j);
2637 		}
2638 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2639 		txq->ift_in_use = txq->ift_gen = txq->ift_no_desc_avail = 0;
2640 		if (sctx->isc_flags & IFLIB_PRESERVE_TX_INDICES)
2641 			txq->ift_cidx = txq->ift_pidx;
2642 		else
2643 			txq->ift_cidx = txq->ift_pidx = 0;
2644 
2645 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2646 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2647 		txq->ift_pullups = 0;
2648 		ifmp_ring_reset_stats(txq->ift_br);
2649 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
2650 			bzero((void *)di->idi_vaddr, di->idi_size);
2651 	}
2652 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
2653 		if (rxq->ifr_task.gt_taskqueue != NULL)
2654 			gtaskqueue_drain(rxq->ifr_task.gt_taskqueue,
2655 				 &rxq->ifr_task.gt_task);
2656 
2657 		rxq->ifr_cq_cidx = 0;
2658 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
2659 			bzero((void *)di->idi_vaddr, di->idi_size);
2660 		/* also resets the free lists pidx/cidx */
2661 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
2662 			iflib_fl_bufs_free(fl);
2663 	}
2664 }
2665 
2666 static inline caddr_t
calc_next_rxd(iflib_fl_t fl,int cidx)2667 calc_next_rxd(iflib_fl_t fl, int cidx)
2668 {
2669 	qidx_t size;
2670 	int nrxd;
2671 	caddr_t start, end, cur, next;
2672 
2673 	nrxd = fl->ifl_size;
2674 	size = fl->ifl_rxd_size;
2675 	start = fl->ifl_ifdi->idi_vaddr;
2676 
2677 	if (__predict_false(size == 0))
2678 		return (start);
2679 	cur = start + size * cidx;
2680 	end = start + size * nrxd;
2681 	next = CACHE_PTR_NEXT(cur);
2682 	return (next < end ? next : start);
2683 }
2684 
2685 static inline void
prefetch_pkts(iflib_fl_t fl,int cidx)2686 prefetch_pkts(iflib_fl_t fl, int cidx)
2687 {
2688 	int nextptr;
2689 	int nrxd = fl->ifl_size;
2690 	caddr_t next_rxd;
2691 
2692 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd - 1);
2693 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2694 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
2695 	next_rxd = calc_next_rxd(fl, cidx);
2696 	prefetch(next_rxd);
2697 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd - 1)]);
2698 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd - 1)]);
2699 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd - 1)]);
2700 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd - 1)]);
2701 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd - 1)]);
2702 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd - 1)]);
2703 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd - 1)]);
2704 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd - 1)]);
2705 }
2706 
2707 static struct mbuf *
rxd_frag_to_sd(iflib_rxq_t rxq,if_rxd_frag_t irf,bool unload,if_rxsd_t sd,int * pf_rv,if_rxd_info_t ri)2708 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
2709     int *pf_rv, if_rxd_info_t ri)
2710 {
2711 	bus_dmamap_t map;
2712 	iflib_fl_t fl;
2713 	caddr_t payload;
2714 	struct mbuf *m;
2715 	int flid, cidx, len, next;
2716 
2717 	map = NULL;
2718 	flid = irf->irf_flid;
2719 	cidx = irf->irf_idx;
2720 	fl = &rxq->ifr_fl[flid];
2721 	sd->ifsd_fl = fl;
2722 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2723 	fl->ifl_credits--;
2724 #if MEMORY_LOGGING
2725 	fl->ifl_m_dequeued++;
2726 #endif
2727 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2728 		prefetch_pkts(fl, cidx);
2729 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size - 1);
2730 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2731 	map = fl->ifl_sds.ifsd_map[cidx];
2732 
2733 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
2734 
2735 	if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
2736 	    irf->irf_len != 0) {
2737 		payload  = *sd->ifsd_cl;
2738 		payload +=  ri->iri_pad;
2739 		len = ri->iri_len - ri->iri_pad;
2740 		*pf_rv = pfil_mem_in(rxq->pfil, payload, len, ri->iri_ifp, &m);
2741 		switch (*pf_rv) {
2742 		case PFIL_DROPPED:
2743 		case PFIL_CONSUMED:
2744 			/*
2745 			 * The filter ate it.  Everything is recycled.
2746 			 */
2747 			m = NULL;
2748 			unload = 0;
2749 			break;
2750 		case PFIL_REALLOCED:
2751 			/*
2752 			 * The filter copied it.  Everything is recycled.
2753 			 * 'm' points at new mbuf.
2754 			 */
2755 			unload = 0;
2756 			break;
2757 		case PFIL_PASS:
2758 			/*
2759 			 * Filter said it was OK, so receive like
2760 			 * normal
2761 			 */
2762 			m = fl->ifl_sds.ifsd_m[cidx];
2763 			fl->ifl_sds.ifsd_m[cidx] = NULL;
2764 			break;
2765 		default:
2766 			MPASS(0);
2767 		}
2768 	} else {
2769 		m = fl->ifl_sds.ifsd_m[cidx];
2770 		fl->ifl_sds.ifsd_m[cidx] = NULL;
2771 		if (pf_rv != NULL)
2772 			*pf_rv = PFIL_PASS;
2773 	}
2774 
2775 	if (unload && irf->irf_len != 0)
2776 		bus_dmamap_unload(fl->ifl_buf_tag, map);
2777 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size - 1);
2778 	if (__predict_false(fl->ifl_cidx == 0))
2779 		fl->ifl_gen = 0;
2780 	bit_clear(fl->ifl_rx_bitmap, cidx);
2781 	return (m);
2782 }
2783 
2784 static struct mbuf *
assemble_segments(iflib_rxq_t rxq,if_rxd_info_t ri,if_rxsd_t sd,int * pf_rv)2785 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
2786 {
2787 	struct mbuf *m, *mh, *mt;
2788 	caddr_t cl;
2789 	int  *pf_rv_ptr, flags, i, padlen;
2790 	bool consumed;
2791 
2792 	i = 0;
2793 	mh = NULL;
2794 	consumed = false;
2795 	*pf_rv = PFIL_PASS;
2796 	pf_rv_ptr = pf_rv;
2797 	do {
2798 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
2799 		    pf_rv_ptr, ri);
2800 
2801 		MPASS(*sd->ifsd_cl != NULL);
2802 
2803 		/*
2804 		 * Exclude zero-length frags & frags from
2805 		 * packets the filter has consumed or dropped
2806 		 */
2807 		if (ri->iri_frags[i].irf_len == 0 || consumed ||
2808 		    *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
2809 			if (mh == NULL) {
2810 				/* everything saved here */
2811 				consumed = true;
2812 				pf_rv_ptr = NULL;
2813 				continue;
2814 			}
2815 			/* XXX we can save the cluster here, but not the mbuf */
2816 			m_init(m, M_NOWAIT, MT_DATA, 0);
2817 			m_free(m);
2818 			continue;
2819 		}
2820 		if (mh == NULL) {
2821 			flags = M_PKTHDR | M_EXT;
2822 			mh = mt = m;
2823 			padlen = ri->iri_pad;
2824 		} else {
2825 			flags = M_EXT;
2826 			mt->m_next = m;
2827 			mt = m;
2828 			/* assuming padding is only on the first fragment */
2829 			padlen = 0;
2830 		}
2831 		cl = *sd->ifsd_cl;
2832 		*sd->ifsd_cl = NULL;
2833 
2834 		/* Can these two be made one ? */
2835 		m_init(m, M_NOWAIT, MT_DATA, flags);
2836 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
2837 		/*
2838 		 * These must follow m_init and m_cljset
2839 		 */
2840 		m->m_data += padlen;
2841 		ri->iri_len -= padlen;
2842 		m->m_len = ri->iri_frags[i].irf_len;
2843 	} while (++i < ri->iri_nfrags);
2844 
2845 	return (mh);
2846 }
2847 
2848 /*
2849  * Process one software descriptor
2850  */
2851 static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq,if_rxd_info_t ri)2852 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
2853 {
2854 	struct if_rxsd sd;
2855 	struct mbuf *m;
2856 	int pf_rv;
2857 
2858 	/* should I merge this back in now that the two paths are basically duplicated? */
2859 	if (ri->iri_nfrags == 1 &&
2860 	    ri->iri_frags[0].irf_len != 0 &&
2861 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
2862 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
2863 		    &pf_rv, ri);
2864 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2865 			return (m);
2866 		if (pf_rv == PFIL_PASS) {
2867 			m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
2868 #ifndef __NO_STRICT_ALIGNMENT
2869 			if (!IP_ALIGNED(m) && ri->iri_pad == 0)
2870 				m->m_data += 2;
2871 #endif
2872 			memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
2873 			m->m_len = ri->iri_frags[0].irf_len;
2874 			m->m_data += ri->iri_pad;
2875 			ri->iri_len -= ri->iri_pad;
2876 		}
2877 	} else {
2878 		m = assemble_segments(rxq, ri, &sd, &pf_rv);
2879 		if (m == NULL)
2880 			return (NULL);
2881 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
2882 			return (m);
2883 	}
2884 	m->m_pkthdr.len = ri->iri_len;
2885 	m->m_pkthdr.rcvif = ri->iri_ifp;
2886 	m->m_flags |= ri->iri_flags;
2887 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
2888 	m->m_pkthdr.flowid = ri->iri_flowid;
2889 #ifdef NUMA
2890 	m->m_pkthdr.numa_domain = if_getnumadomain(ri->iri_ifp);
2891 #endif
2892 	M_HASHTYPE_SET(m, ri->iri_rsstype);
2893 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
2894 	m->m_pkthdr.csum_data = ri->iri_csum_data;
2895 	return (m);
2896 }
2897 
2898 static void
_task_fn_rx_watchdog(void * context)2899 _task_fn_rx_watchdog(void *context)
2900 {
2901 	iflib_rxq_t rxq = context;
2902 
2903 	GROUPTASK_ENQUEUE(&rxq->ifr_task);
2904 }
2905 
2906 static uint8_t
iflib_rxeof(iflib_rxq_t rxq,qidx_t budget)2907 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
2908 {
2909 	if_t ifp;
2910 	if_ctx_t ctx = rxq->ifr_ctx;
2911 	if_shared_ctx_t sctx = ctx->ifc_sctx;
2912 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2913 	int avail, i;
2914 	qidx_t *cidxp;
2915 	struct if_rxd_info ri;
2916 	int err, budget_left, rx_bytes, rx_pkts;
2917 	iflib_fl_t fl;
2918 #if defined(INET6) || defined(INET)
2919 	int lro_enabled;
2920 #endif
2921 	uint8_t retval = 0;
2922 
2923 	/*
2924 	 * XXX early demux data packets so that if_input processing only handles
2925 	 * acks in interrupt context
2926 	 */
2927 	struct mbuf *m, *mh, *mt;
2928 
2929 	NET_EPOCH_ASSERT();
2930 
2931 	ifp = ctx->ifc_ifp;
2932 	mh = mt = NULL;
2933 	MPASS(budget > 0);
2934 	rx_pkts	= rx_bytes = 0;
2935 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
2936 		cidxp = &rxq->ifr_cq_cidx;
2937 	else
2938 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
2939 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
2940 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2941 			retval |= iflib_fl_refill_all(ctx, fl);
2942 		DBG_COUNTER_INC(rx_unavail);
2943 		return (retval);
2944 	}
2945 
2946 #if defined(INET6) || defined(INET)
2947 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2948 #endif
2949 
2950 	/* pfil needs the vnet to be set */
2951 	CURVNET_SET_QUIET(if_getvnet(ifp));
2952 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
2953 		if (__predict_false(!CTX_ACTIVE(ctx))) {
2954 			DBG_COUNTER_INC(rx_ctx_inactive);
2955 			break;
2956 		}
2957 		/*
2958 		 * Reset client set fields to their default values
2959 		 */
2960 		rxd_info_zero(&ri);
2961 		ri.iri_qsidx = rxq->ifr_id;
2962 		ri.iri_cidx = *cidxp;
2963 		ri.iri_ifp = ifp;
2964 		ri.iri_frags = rxq->ifr_frags;
2965 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
2966 
2967 		if (err)
2968 			goto err;
2969 		rx_pkts += 1;
2970 		rx_bytes += ri.iri_len;
2971 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
2972 			*cidxp = ri.iri_cidx;
2973 			/* Update our consumer index */
2974 			/* XXX NB: shurd - check if this is still safe */
2975 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
2976 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
2977 			/* was this only a completion queue message? */
2978 			if (__predict_false(ri.iri_nfrags == 0))
2979 				continue;
2980 		}
2981 		MPASS(ri.iri_nfrags != 0);
2982 		MPASS(ri.iri_len != 0);
2983 
2984 		/* will advance the cidx on the corresponding free lists */
2985 		m = iflib_rxd_pkt_get(rxq, &ri);
2986 		avail--;
2987 		budget_left--;
2988 		if (avail == 0 && budget_left)
2989 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
2990 
2991 		if (__predict_false(m == NULL))
2992 			continue;
2993 
2994 #ifndef __NO_STRICT_ALIGNMENT
2995 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
2996 			continue;
2997 #endif
2998 #if defined(INET6) || defined(INET)
2999 		if (lro_enabled) {
3000 			tcp_lro_queue_mbuf(&rxq->ifr_lc, m);
3001 			continue;
3002 		}
3003 #endif
3004 
3005 		if (mh == NULL)
3006 			mh = mt = m;
3007 		else {
3008 			mt->m_nextpkt = m;
3009 			mt = m;
3010 		}
3011 	}
3012 	CURVNET_RESTORE();
3013 	/* make sure that we can refill faster than drain */
3014 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
3015 		retval |= iflib_fl_refill_all(ctx, fl);
3016 
3017 	if (mh != NULL) {
3018 		if_input(ifp, mh);
3019 		DBG_COUNTER_INC(rx_if_input);
3020 	}
3021 
3022 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
3023 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
3024 
3025 	/*
3026 	 * Flush any outstanding LRO work
3027 	 */
3028 #if defined(INET6) || defined(INET)
3029 	tcp_lro_flush_all(&rxq->ifr_lc);
3030 #endif
3031 	if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
3032 		retval |= IFLIB_RXEOF_MORE;
3033 	return (retval);
3034 err:
3035 	STATE_LOCK(ctx);
3036 	ctx->ifc_flags |= IFC_DO_RESET;
3037 	iflib_admin_intr_deferred(ctx);
3038 	STATE_UNLOCK(ctx);
3039 	return (0);
3040 }
3041 
3042 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq) - 1)
3043 static inline qidx_t
txq_max_db_deferred(iflib_txq_t txq,qidx_t in_use)3044 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
3045 {
3046 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3047 	qidx_t minthresh = txq->ift_size / 8;
3048 	if (in_use > 4 * minthresh)
3049 		return (notify_count);
3050 	if (in_use > 2 * minthresh)
3051 		return (notify_count >> 1);
3052 	if (in_use > minthresh)
3053 		return (notify_count >> 3);
3054 	return (0);
3055 }
3056 
3057 static inline qidx_t
txq_max_rs_deferred(iflib_txq_t txq)3058 txq_max_rs_deferred(iflib_txq_t txq)
3059 {
3060 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
3061 	qidx_t minthresh = txq->ift_size / 8;
3062 	if (txq->ift_in_use > 4 * minthresh)
3063 		return (notify_count);
3064 	if (txq->ift_in_use > 2 * minthresh)
3065 		return (notify_count >> 1);
3066 	if (txq->ift_in_use > minthresh)
3067 		return (notify_count >> 2);
3068 	return (2);
3069 }
3070 
3071 #define M_CSUM_FLAGS(m)		((m)->m_pkthdr.csum_flags)
3072 #define M_HAS_VLANTAG(m)	(m->m_flags & M_VLANTAG)
3073 
3074 #define TXQ_MAX_DB_DEFERRED(txq, in_use)	txq_max_db_deferred((txq), (in_use))
3075 #define TXQ_MAX_RS_DEFERRED(txq)	txq_max_rs_deferred(txq)
3076 #define TXQ_MAX_DB_CONSUMED(size)	(size >> 4)
3077 
3078 /* forward compatibility for cxgb */
3079 #define FIRST_QSET(ctx) 0
3080 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
3081 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
3082 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
3083 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
3084 
3085 /* XXX we should be setting this to something other than zero */
3086 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
3087 #define	MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
3088     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
3089 
3090 static inline bool
iflib_txd_db_check(iflib_txq_t txq,int ring)3091 iflib_txd_db_check(iflib_txq_t txq, int ring)
3092 {
3093 	if_ctx_t ctx = txq->ift_ctx;
3094 	qidx_t dbval, max;
3095 
3096 	max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use);
3097 
3098 	/* force || threshold exceeded || at the edge of the ring */
3099 	if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
3100 
3101 		/*
3102 		 * 'npending' is used if the card's doorbell is in terms of the number of descriptors
3103 		 * pending flush (BRCM). 'pidx' is used in cases where the card's doorbeel uses the
3104 		 * producer index explicitly (INTC).
3105 		 */
3106 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
3107 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3108 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3109 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
3110 
3111 		/*
3112 		 * Absent bugs there are zero packets pending so reset pending counts to zero.
3113 		 */
3114 		txq->ift_db_pending = txq->ift_npending = 0;
3115 		return (true);
3116 	}
3117 	return (false);
3118 }
3119 
3120 #ifdef PKT_DEBUG
3121 static void
print_pkt(if_pkt_info_t pi)3122 print_pkt(if_pkt_info_t pi)
3123 {
3124 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
3125 	    pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
3126 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
3127 	    pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
3128 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
3129 	    pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
3130 }
3131 #endif
3132 
3133 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3134 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
3135 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3136 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
3137 
3138 /**
3139  * Parses out ethernet header information in the given mbuf.
3140  * Returns in pi: ipi_etype (EtherType) and ipi_ehdrlen (Ethernet header length)
3141  *
3142  * This will account for the VLAN header if present.
3143  *
3144  * XXX: This doesn't handle QinQ, which could prevent TX offloads for those
3145  * types of packets.
3146  */
3147 static int
iflib_parse_ether_header(if_pkt_info_t pi,struct mbuf ** mp,uint64_t * pullups)3148 iflib_parse_ether_header(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
3149 {
3150 	struct ether_vlan_header *eh;
3151 	struct mbuf *m;
3152 
3153 	m = *mp;
3154 	if (__predict_false(m->m_len < sizeof(*eh))) {
3155 		(*pullups)++;
3156 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
3157 			return (ENOMEM);
3158 	}
3159 	eh = mtod(m, struct ether_vlan_header *);
3160 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3161 		pi->ipi_etype = ntohs(eh->evl_proto);
3162 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3163 	} else {
3164 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
3165 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
3166 	}
3167 	*mp = m;
3168 
3169 	return (0);
3170 }
3171 
3172 /**
3173  * Parse up to the L3 header and extract IPv4/IPv6 header information into pi.
3174  * Currently this information includes: IP ToS value, IP header version/presence
3175  *
3176  * This is missing some checks and doesn't edit the packet content as it goes,
3177  * unlike iflib_parse_header(), in order to keep the amount of code here minimal.
3178  */
3179 static int
iflib_parse_header_partial(if_pkt_info_t pi,struct mbuf ** mp,uint64_t * pullups)3180 iflib_parse_header_partial(if_pkt_info_t pi, struct mbuf **mp, uint64_t *pullups)
3181 {
3182 	struct mbuf *m;
3183 	int err;
3184 
3185 	*pullups = 0;
3186 	m = *mp;
3187 	if (!M_WRITABLE(m)) {
3188 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3189 			return (ENOMEM);
3190 		} else {
3191 			m_freem(*mp);
3192 			DBG_COUNTER_INC(tx_frees);
3193 			*mp = m;
3194 		}
3195 	}
3196 
3197 	/* Fills out pi->ipi_etype */
3198 	err = iflib_parse_ether_header(pi, mp, pullups);
3199 	if (err)
3200 		return (err);
3201 	m = *mp;
3202 
3203 	switch (pi->ipi_etype) {
3204 #ifdef INET
3205 	case ETHERTYPE_IP:
3206 	{
3207 		struct mbuf *n;
3208 		struct ip *ip = NULL;
3209 		int miniplen;
3210 
3211 		miniplen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip));
3212 		if (__predict_false(m->m_len < miniplen)) {
3213 			/*
3214 			 * Check for common case where the first mbuf only contains
3215 			 * the Ethernet header
3216 			 */
3217 			if (m->m_len == pi->ipi_ehdrlen) {
3218 				n = m->m_next;
3219 				MPASS(n);
3220 				/* If next mbuf contains at least the minimal IP header, then stop */
3221 				if (n->m_len >= sizeof(*ip)) {
3222 					ip = (struct ip *)n->m_data;
3223 				} else {
3224 					(*pullups)++;
3225 					if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
3226 						return (ENOMEM);
3227 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3228 				}
3229 			} else {
3230 				(*pullups)++;
3231 				if (__predict_false((m = m_pullup(m, miniplen)) == NULL))
3232 					return (ENOMEM);
3233 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3234 			}
3235 		} else {
3236 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3237 		}
3238 
3239 		/* Have the IPv4 header w/ no options here */
3240 		pi->ipi_ip_hlen = ip->ip_hl << 2;
3241 		pi->ipi_ipproto = ip->ip_p;
3242 		pi->ipi_ip_tos = ip->ip_tos;
3243 		pi->ipi_flags |= IPI_TX_IPV4;
3244 
3245 		break;
3246 	}
3247 #endif
3248 #ifdef INET6
3249 	case ETHERTYPE_IPV6:
3250 	{
3251 		struct ip6_hdr *ip6;
3252 
3253 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3254 			(*pullups)++;
3255 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3256 				return (ENOMEM);
3257 		}
3258 		ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3259 
3260 		/* Have the IPv6 fixed header here */
3261 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3262 		pi->ipi_ipproto = ip6->ip6_nxt;
3263 		pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3264 		pi->ipi_flags |= IPI_TX_IPV6;
3265 
3266 		break;
3267 	}
3268 #endif
3269 	default:
3270 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3271 		pi->ipi_ip_hlen = 0;
3272 		break;
3273 	}
3274 	*mp = m;
3275 
3276 	return (0);
3277 
3278 }
3279 
3280 static int
iflib_parse_header(iflib_txq_t txq,if_pkt_info_t pi,struct mbuf ** mp)3281 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
3282 {
3283 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
3284 	struct mbuf *m;
3285 	int err;
3286 
3287 	m = *mp;
3288 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3289 	    M_WRITABLE(m) == 0) {
3290 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3291 			return (ENOMEM);
3292 		} else {
3293 			m_freem(*mp);
3294 			DBG_COUNTER_INC(tx_frees);
3295 			*mp = m;
3296 		}
3297 	}
3298 
3299 	/* Fills out pi->ipi_etype */
3300 	err = iflib_parse_ether_header(pi, mp, &txq->ift_pullups);
3301 	if (__predict_false(err))
3302 		return (err);
3303 	m = *mp;
3304 
3305 	switch (pi->ipi_etype) {
3306 #ifdef INET
3307 	case ETHERTYPE_IP:
3308 	{
3309 		struct ip *ip;
3310 		struct tcphdr *th;
3311 		uint8_t hlen;
3312 
3313 		hlen = pi->ipi_ehdrlen + sizeof(*ip);
3314 		if (__predict_false(m->m_len < hlen)) {
3315 			txq->ift_pullups++;
3316 			if (__predict_false((m = m_pullup(m, hlen)) == NULL))
3317 				return (ENOMEM);
3318 		}
3319 		ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
3320 		hlen = pi->ipi_ehdrlen + (ip->ip_hl << 2);
3321 		if (ip->ip_p == IPPROTO_TCP) {
3322 			hlen += sizeof(*th);
3323 			th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
3324 		} else if (ip->ip_p == IPPROTO_UDP) {
3325 			hlen += sizeof(struct udphdr);
3326 		}
3327 		if (__predict_false(m->m_len < hlen)) {
3328 			txq->ift_pullups++;
3329 			if ((m = m_pullup(m, hlen)) == NULL)
3330 				return (ENOMEM);
3331 		}
3332 		pi->ipi_ip_hlen = ip->ip_hl << 2;
3333 		pi->ipi_ipproto = ip->ip_p;
3334 		pi->ipi_ip_tos = ip->ip_tos;
3335 		pi->ipi_flags |= IPI_TX_IPV4;
3336 
3337 		/* TCP checksum offload may require TCP header length */
3338 		if (IS_TX_OFFLOAD4(pi)) {
3339 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
3340 				pi->ipi_tcp_hflags = tcp_get_flags(th);
3341 				pi->ipi_tcp_hlen = th->th_off << 2;
3342 				pi->ipi_tcp_seq = th->th_seq;
3343 			}
3344 			if (IS_TSO4(pi)) {
3345 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
3346 					return (ENXIO);
3347 				/*
3348 				 * TSO always requires hardware checksum offload.
3349 				 */
3350 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
3351 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
3352 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3353 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3354 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
3355 					ip->ip_sum = 0;
3356 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
3357 				}
3358 			}
3359 		}
3360 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
3361 			ip->ip_sum = 0;
3362 
3363 		break;
3364 	}
3365 #endif
3366 #ifdef INET6
3367 	case ETHERTYPE_IPV6:
3368 	{
3369 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
3370 		struct tcphdr *th;
3371 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
3372 
3373 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
3374 			txq->ift_pullups++;
3375 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
3376 				return (ENOMEM);
3377 		}
3378 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
3379 
3380 		/* XXX-BZ this will go badly in case of ext hdrs. */
3381 		pi->ipi_ipproto = ip6->ip6_nxt;
3382 		pi->ipi_ip_tos = IPV6_TRAFFIC_CLASS(ip6);
3383 		pi->ipi_flags |= IPI_TX_IPV6;
3384 
3385 		/* TCP checksum offload may require TCP header length */
3386 		if (IS_TX_OFFLOAD6(pi)) {
3387 			if (pi->ipi_ipproto == IPPROTO_TCP) {
3388 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3389 					txq->ift_pullups++;
3390 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
3391 						return (ENOMEM);
3392 				}
3393 				pi->ipi_tcp_hflags = tcp_get_flags(th);
3394 				pi->ipi_tcp_hlen = th->th_off << 2;
3395 				pi->ipi_tcp_seq = th->th_seq;
3396 			}
3397 			if (IS_TSO6(pi)) {
3398 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
3399 					return (ENXIO);
3400 				/*
3401 				 * TSO always requires hardware checksum offload.
3402 				 */
3403 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
3404 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3405 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
3406 			}
3407 		}
3408 		break;
3409 	}
3410 #endif
3411 	default:
3412 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
3413 		pi->ipi_ip_hlen = 0;
3414 		break;
3415 	}
3416 	*mp = m;
3417 
3418 	return (0);
3419 }
3420 
3421 /*
3422  * If dodgy hardware rejects the scatter gather chain we've handed it
3423  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
3424  * m_defrag'd mbufs
3425  */
3426 static __noinline struct mbuf *
iflib_remove_mbuf(iflib_txq_t txq)3427 iflib_remove_mbuf(iflib_txq_t txq)
3428 {
3429 	int ntxd, pidx;
3430 	struct mbuf *m, **ifsd_m;
3431 
3432 	ifsd_m = txq->ift_sds.ifsd_m;
3433 	ntxd = txq->ift_size;
3434 	pidx = txq->ift_pidx & (ntxd - 1);
3435 	ifsd_m = txq->ift_sds.ifsd_m;
3436 	m = ifsd_m[pidx];
3437 	ifsd_m[pidx] = NULL;
3438 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
3439 	if (txq->ift_sds.ifsd_tso_map != NULL)
3440 		bus_dmamap_unload(txq->ift_tso_buf_tag,
3441 		    txq->ift_sds.ifsd_tso_map[pidx]);
3442 #if MEMORY_LOGGING
3443 	txq->ift_dequeued++;
3444 #endif
3445 	return (m);
3446 }
3447 
3448 static inline caddr_t
calc_next_txd(iflib_txq_t txq,int cidx,uint8_t qid)3449 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3450 {
3451 	qidx_t size;
3452 	int ntxd;
3453 	caddr_t start, end, cur, next;
3454 
3455 	ntxd = txq->ift_size;
3456 	size = txq->ift_txd_size[qid];
3457 	start = txq->ift_ifdi[qid].idi_vaddr;
3458 
3459 	if (__predict_false(size == 0))
3460 		return (start);
3461 	cur = start + size * cidx;
3462 	end = start + size * ntxd;
3463 	next = CACHE_PTR_NEXT(cur);
3464 	return (next < end ? next : start);
3465 }
3466 
3467 /*
3468  * Pad an mbuf to ensure a minimum ethernet frame size.
3469  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3470  */
3471 static __noinline int
iflib_ether_pad(device_t dev,struct mbuf ** m_head,uint16_t min_frame_size)3472 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3473 {
3474 	/*
3475 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3476 	 * and ARP message is the smallest common payload I can think of
3477 	 */
3478 	static char pad[18];	/* just zeros */
3479 	int n;
3480 	struct mbuf *new_head;
3481 
3482 	if (!M_WRITABLE(*m_head)) {
3483 		new_head = m_dup(*m_head, M_NOWAIT);
3484 		if (new_head == NULL) {
3485 			m_freem(*m_head);
3486 			device_printf(dev, "cannot pad short frame, m_dup() failed");
3487 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3488 			DBG_COUNTER_INC(tx_frees);
3489 			return (ENOMEM);
3490 		}
3491 		m_freem(*m_head);
3492 		*m_head = new_head;
3493 	}
3494 
3495 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3496 	     n > 0; n -= sizeof(pad))
3497 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3498 			break;
3499 
3500 	if (n > 0) {
3501 		m_freem(*m_head);
3502 		device_printf(dev, "cannot pad short frame\n");
3503 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3504 		DBG_COUNTER_INC(tx_frees);
3505 		return (ENOBUFS);
3506 	}
3507 
3508 	return (0);
3509 }
3510 
3511 static int
iflib_encap(iflib_txq_t txq,struct mbuf ** m_headp)3512 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3513 {
3514 	if_ctx_t		ctx;
3515 	if_shared_ctx_t		sctx;
3516 	if_softc_ctx_t		scctx;
3517 	bus_dma_tag_t		buf_tag;
3518 	bus_dma_segment_t	*segs;
3519 	struct mbuf		*m_head, **ifsd_m;
3520 	void			*next_txd;
3521 	bus_dmamap_t		map;
3522 	struct if_pkt_info	pi;
3523 	int remap = 0;
3524 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3525 
3526 	ctx = txq->ift_ctx;
3527 	sctx = ctx->ifc_sctx;
3528 	scctx = &ctx->ifc_softc_ctx;
3529 	segs = txq->ift_segs;
3530 	ntxd = txq->ift_size;
3531 	m_head = *m_headp;
3532 	map = NULL;
3533 
3534 	/*
3535 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
3536 	 */
3537 	cidx = txq->ift_cidx;
3538 	pidx = txq->ift_pidx;
3539 	if (ctx->ifc_flags & IFC_PREFETCH) {
3540 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd - 1);
3541 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
3542 			next_txd = calc_next_txd(txq, cidx, 0);
3543 			prefetch(next_txd);
3544 		}
3545 
3546 		/* prefetch the next cache line of mbuf pointers and flags */
3547 		prefetch(&txq->ift_sds.ifsd_m[next]);
3548 		prefetch(&txq->ift_sds.ifsd_map[next]);
3549 		next = (cidx + CACHE_LINE_SIZE) & (ntxd - 1);
3550 	}
3551 	map = txq->ift_sds.ifsd_map[pidx];
3552 	ifsd_m = txq->ift_sds.ifsd_m;
3553 
3554 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3555 		buf_tag = txq->ift_tso_buf_tag;
3556 		max_segs = scctx->isc_tx_tso_segments_max;
3557 		map = txq->ift_sds.ifsd_tso_map[pidx];
3558 		MPASS(buf_tag != NULL);
3559 		MPASS(max_segs > 0);
3560 	} else {
3561 		buf_tag = txq->ift_buf_tag;
3562 		max_segs = scctx->isc_tx_nsegments;
3563 		map = txq->ift_sds.ifsd_map[pidx];
3564 	}
3565 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3566 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3567 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3568 		if (err) {
3569 			DBG_COUNTER_INC(encap_txd_encap_fail);
3570 			return (err);
3571 		}
3572 	}
3573 	m_head = *m_headp;
3574 
3575 	pkt_info_zero(&pi);
3576 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG | M_BCAST | M_MCAST));
3577 	pi.ipi_pidx = pidx;
3578 	pi.ipi_qsidx = txq->ift_id;
3579 	pi.ipi_len = m_head->m_pkthdr.len;
3580 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
3581 	pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
3582 
3583 	/* deliberate bitwise OR to make one condition */
3584 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
3585 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
3586 			DBG_COUNTER_INC(encap_txd_encap_fail);
3587 			return (err);
3588 		}
3589 		m_head = *m_headp;
3590 	}
3591 
3592 retry:
3593 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3594 	    BUS_DMA_NOWAIT);
3595 defrag:
3596 	if (__predict_false(err)) {
3597 		switch (err) {
3598 		case EFBIG:
3599 			/* try collapse once and defrag once */
3600 			if (remap == 0) {
3601 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3602 				/* try defrag if collapsing fails */
3603 				if (m_head == NULL)
3604 					remap++;
3605 			}
3606 			if (remap == 1) {
3607 				txq->ift_mbuf_defrag++;
3608 				m_head = m_defrag(*m_headp, M_NOWAIT);
3609 			}
3610 			/*
3611 			 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
3612 			 * failed to map an mbuf that was run through m_defrag
3613 			 */
3614 			MPASS(remap <= 1);
3615 			if (__predict_false(m_head == NULL || remap > 1))
3616 				goto defrag_failed;
3617 			remap++;
3618 			*m_headp = m_head;
3619 			goto retry;
3620 			break;
3621 		case ENOMEM:
3622 			txq->ift_no_tx_dma_setup++;
3623 			break;
3624 		default:
3625 			txq->ift_no_tx_dma_setup++;
3626 			m_freem(*m_headp);
3627 			DBG_COUNTER_INC(tx_frees);
3628 			*m_headp = NULL;
3629 			break;
3630 		}
3631 		txq->ift_map_failed++;
3632 		DBG_COUNTER_INC(encap_load_mbuf_fail);
3633 		DBG_COUNTER_INC(encap_txd_encap_fail);
3634 		return (err);
3635 	}
3636 	ifsd_m[pidx] = m_head;
3637 	/*
3638 	 * XXX assumes a 1 to 1 relationship between segments and
3639 	 *        descriptors - this does not hold true on all drivers, e.g.
3640 	 *        cxgb
3641 	 */
3642 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3643 		(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3644 		if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3645 			txq->ift_no_desc_avail++;
3646 			bus_dmamap_unload(buf_tag, map);
3647 			DBG_COUNTER_INC(encap_txq_avail_fail);
3648 			DBG_COUNTER_INC(encap_txd_encap_fail);
3649 			if (ctx->ifc_sysctl_simple_tx) {
3650 				*m_headp = m_head = iflib_remove_mbuf(txq);
3651 				m_freem(*m_headp);
3652 				DBG_COUNTER_INC(tx_frees);
3653 				*m_headp = NULL;
3654 			}
3655 			if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3656 				GROUPTASK_ENQUEUE(&txq->ift_task);
3657 			return (ENOBUFS);
3658 		}
3659 	}
3660 	/*
3661 	 * On Intel cards we can greatly reduce the number of TX interrupts
3662 	 * we see by only setting report status on every Nth descriptor.
3663 	 * However, this also means that the driver will need to keep track
3664 	 * of the descriptors that RS was set on to check them for the DD bit.
3665 	 */
3666 	txq->ift_rs_pending += nsegs + 1;
3667 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3668 	    iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3669 		pi.ipi_flags |= IPI_TX_INTR;
3670 		txq->ift_rs_pending = 0;
3671 	}
3672 
3673 	pi.ipi_segs = segs;
3674 	pi.ipi_nsegs = nsegs;
3675 
3676 	MPASS(pidx >= 0 && pidx < txq->ift_size);
3677 #ifdef PKT_DEBUG
3678 	print_pkt(&pi);
3679 #endif
3680 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
3681 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
3682 		DBG_COUNTER_INC(tx_encap);
3683 		MPASS(pi.ipi_new_pidx < txq->ift_size);
3684 
3685 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
3686 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
3687 			ndesc += txq->ift_size;
3688 			txq->ift_gen = 1;
3689 		}
3690 		/*
3691 		 * drivers can need as many as
3692 		 * two sentinels
3693 		 */
3694 		MPASS(ndesc <= pi.ipi_nsegs + 2);
3695 		MPASS(pi.ipi_new_pidx != pidx);
3696 		MPASS(ndesc > 0);
3697 		txq->ift_in_use += ndesc;
3698 		txq->ift_db_pending += ndesc;
3699 
3700 		/*
3701 		 * We update the last software descriptor again here because there may
3702 		 * be a sentinel and/or there may be more mbufs than segments
3703 		 */
3704 		txq->ift_pidx = pi.ipi_new_pidx;
3705 		txq->ift_npending += pi.ipi_ndescs;
3706 	} else {
3707 		*m_headp = m_head = iflib_remove_mbuf(txq);
3708 		if (err == EFBIG) {
3709 			txq->ift_txd_encap_efbig++;
3710 			if (remap < 2) {
3711 				remap = 1;
3712 				goto defrag;
3713 			}
3714 		}
3715 		goto defrag_failed;
3716 	}
3717 	/*
3718 	 * err can't possibly be non-zero here, so we don't neet to test it
3719 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
3720 	 */
3721 	return (err);
3722 
3723 defrag_failed:
3724 	txq->ift_mbuf_defrag_failed++;
3725 	txq->ift_map_failed++;
3726 	m_freem(*m_headp);
3727 	DBG_COUNTER_INC(tx_frees);
3728 	*m_headp = NULL;
3729 	DBG_COUNTER_INC(encap_txd_encap_fail);
3730 	return (ENOMEM);
3731 }
3732 
3733 static void
iflib_tx_desc_free(iflib_txq_t txq,int n)3734 iflib_tx_desc_free(iflib_txq_t txq, int n)
3735 {
3736 	uint32_t qsize, cidx, mask, gen;
3737 	struct mbuf *m, **ifsd_m;
3738 	bool do_prefetch;
3739 
3740 	cidx = txq->ift_cidx;
3741 	gen = txq->ift_gen;
3742 	qsize = txq->ift_size;
3743 	mask = qsize - 1;
3744 	ifsd_m = txq->ift_sds.ifsd_m;
3745 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3746 
3747 	while (n-- > 0) {
3748 		if (do_prefetch) {
3749 			prefetch(ifsd_m[(cidx + 3) & mask]);
3750 			prefetch(ifsd_m[(cidx + 4) & mask]);
3751 		}
3752 		if ((m = ifsd_m[cidx]) != NULL) {
3753 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3754 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3755 				bus_dmamap_sync(txq->ift_tso_buf_tag,
3756 				    txq->ift_sds.ifsd_tso_map[cidx],
3757 				    BUS_DMASYNC_POSTWRITE);
3758 				bus_dmamap_unload(txq->ift_tso_buf_tag,
3759 				    txq->ift_sds.ifsd_tso_map[cidx]);
3760 			} else {
3761 				bus_dmamap_sync(txq->ift_buf_tag,
3762 				    txq->ift_sds.ifsd_map[cidx],
3763 				    BUS_DMASYNC_POSTWRITE);
3764 				bus_dmamap_unload(txq->ift_buf_tag,
3765 				    txq->ift_sds.ifsd_map[cidx]);
3766 			}
3767 			/* XXX we don't support any drivers that batch packets yet */
3768 			MPASS(m->m_nextpkt == NULL);
3769 			m_freem(m);
3770 			ifsd_m[cidx] = NULL;
3771 #if MEMORY_LOGGING
3772 			txq->ift_dequeued++;
3773 #endif
3774 			DBG_COUNTER_INC(tx_frees);
3775 		}
3776 		if (__predict_false(++cidx == qsize)) {
3777 			cidx = 0;
3778 			gen = 0;
3779 		}
3780 	}
3781 	txq->ift_cidx = cidx;
3782 	txq->ift_gen = gen;
3783 }
3784 
3785 static __inline int
iflib_completed_tx_reclaim(iflib_txq_t txq,int thresh)3786 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3787 {
3788 	int reclaim;
3789 	if_ctx_t ctx = txq->ift_ctx;
3790 
3791 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
3792 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3793 
3794 	/*
3795 	 * Need a rate-limiting check so that this isn't called every time
3796 	 */
3797 	iflib_tx_credits_update(ctx, txq);
3798 	reclaim = DESC_RECLAIMABLE(txq);
3799 
3800 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3801 #ifdef INVARIANTS
3802 		if (iflib_verbose_debug) {
3803 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __func__,
3804 			    txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3805 			    reclaim, thresh);
3806 		}
3807 #endif
3808 		return (0);
3809 	}
3810 	iflib_tx_desc_free(txq, reclaim);
3811 	txq->ift_cleaned += reclaim;
3812 	txq->ift_in_use -= reclaim;
3813 
3814 	return (reclaim);
3815 }
3816 
3817 static struct mbuf **
_ring_peek_one(struct ifmp_ring * r,int cidx,int offset,int remaining)3818 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3819 {
3820 	int next, size;
3821 	struct mbuf **items;
3822 
3823 	size = r->size;
3824 	next = (cidx + CACHE_PTR_INCREMENT) & (size - 1);
3825 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
3826 
3827 	prefetch(items[(cidx + offset) & (size - 1)]);
3828 	if (remaining > 1) {
3829 		prefetch2cachelines(&items[next]);
3830 		prefetch2cachelines(items[(cidx + offset + 1) & (size - 1)]);
3831 		prefetch2cachelines(items[(cidx + offset + 2) & (size - 1)]);
3832 		prefetch2cachelines(items[(cidx + offset + 3) & (size - 1)]);
3833 	}
3834 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size - 1)]));
3835 }
3836 
3837 static void
iflib_txq_check_drain(iflib_txq_t txq,int budget)3838 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3839 {
3840 
3841 	ifmp_ring_check_drainage(txq->ift_br, budget);
3842 }
3843 
3844 static uint32_t
iflib_txq_can_drain(struct ifmp_ring * r)3845 iflib_txq_can_drain(struct ifmp_ring *r)
3846 {
3847 	iflib_txq_t txq = r->cookie;
3848 	if_ctx_t ctx = txq->ift_ctx;
3849 
3850 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
3851 		return (1);
3852 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3853 	    BUS_DMASYNC_POSTREAD);
3854 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
3855 	    false));
3856 }
3857 
3858 static uint32_t
iflib_txq_drain(struct ifmp_ring * r,uint32_t cidx,uint32_t pidx)3859 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3860 {
3861 	iflib_txq_t txq = r->cookie;
3862 	if_ctx_t ctx = txq->ift_ctx;
3863 	if_t ifp = ctx->ifc_ifp;
3864 	struct mbuf *m, **mp;
3865 	int avail, bytes_sent, skipped, count, err, i;
3866 	int mcast_sent, pkt_sent, reclaimed;
3867 	bool do_prefetch, rang, ring;
3868 
3869 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
3870 			    !LINK_ACTIVE(ctx))) {
3871 		DBG_COUNTER_INC(txq_drain_notready);
3872 		return (0);
3873 	}
3874 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3875 	rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
3876 	avail = IDXDIFF(pidx, cidx, r->size);
3877 
3878 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
3879 		/*
3880 		 * The driver is unloading so we need to free all pending packets.
3881 		 */
3882 		DBG_COUNTER_INC(txq_drain_flushing);
3883 		for (i = 0; i < avail; i++) {
3884 			if (__predict_true(r->items[(cidx + i) & (r->size - 1)] != (void *)txq))
3885 				m_freem(r->items[(cidx + i) & (r->size - 1)]);
3886 			r->items[(cidx + i) & (r->size - 1)] = NULL;
3887 		}
3888 		return (avail);
3889 	}
3890 
3891 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
3892 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3893 		CALLOUT_LOCK(txq);
3894 		callout_stop(&txq->ift_timer);
3895 		CALLOUT_UNLOCK(txq);
3896 		DBG_COUNTER_INC(txq_drain_oactive);
3897 		return (0);
3898 	}
3899 
3900 	/*
3901 	 * If we've reclaimed any packets this queue cannot be hung.
3902 	 */
3903 	if (reclaimed)
3904 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3905 	skipped = mcast_sent = bytes_sent = pkt_sent = 0;
3906 	count = MIN(avail, TX_BATCH_SIZE);
3907 #ifdef INVARIANTS
3908 	if (iflib_verbose_debug)
3909 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __func__,
3910 		    avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3911 #endif
3912 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3913 	err = 0;
3914 	for (i = 0; i < count && TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx) + 2; i++) {
3915 		int rem = do_prefetch ? count - i : 0;
3916 
3917 		mp = _ring_peek_one(r, cidx, i, rem);
3918 		MPASS(mp != NULL && *mp != NULL);
3919 
3920 		/*
3921 		 * Completion interrupts will use the address of the txq
3922 		 * as a sentinel to enqueue _something_ in order to acquire
3923 		 * the lock on the mp_ring (there's no direct lock call).
3924 		 * We obviously whave to check for these sentinel cases
3925 		 * and skip them.
3926 		 */
3927 		if (__predict_false(*mp == (struct mbuf *)txq)) {
3928 			skipped++;
3929 			continue;
3930 		}
3931 		err = iflib_encap(txq, mp);
3932 		if (__predict_false(err)) {
3933 			/* no room - bail out */
3934 			if (err == ENOBUFS)
3935 				break;
3936 			skipped++;
3937 			/* we can't send this packet - skip it */
3938 			continue;
3939 		}
3940 		pkt_sent++;
3941 		m = *mp;
3942 		DBG_COUNTER_INC(tx_sent);
3943 		bytes_sent += m->m_pkthdr.len;
3944 		mcast_sent += !!(m->m_flags & M_MCAST);
3945 
3946 		if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
3947 			break;
3948 		ETHER_BPF_MTAP(ifp, m);
3949 		rang = iflib_txd_db_check(txq, false);
3950 	}
3951 
3952 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
3953 	ring = rang ? false  : (iflib_min_tx_latency | err);
3954 	iflib_txd_db_check(txq, ring);
3955 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
3956 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
3957 	if (mcast_sent)
3958 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3959 #ifdef INVARIANTS
3960 	if (iflib_verbose_debug)
3961 		printf("consumed=%d\n", skipped + pkt_sent);
3962 #endif
3963 	return (skipped + pkt_sent);
3964 }
3965 
3966 static uint32_t
iflib_txq_drain_always(struct ifmp_ring * r)3967 iflib_txq_drain_always(struct ifmp_ring *r)
3968 {
3969 	return (1);
3970 }
3971 
3972 static uint32_t
iflib_txq_drain_free(struct ifmp_ring * r,uint32_t cidx,uint32_t pidx)3973 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3974 {
3975 	int i, avail;
3976 	struct mbuf **mp;
3977 	iflib_txq_t txq;
3978 
3979 	txq = r->cookie;
3980 
3981 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3982 	CALLOUT_LOCK(txq);
3983 	callout_stop(&txq->ift_timer);
3984 	CALLOUT_UNLOCK(txq);
3985 
3986 	avail = IDXDIFF(pidx, cidx, r->size);
3987 	for (i = 0; i < avail; i++) {
3988 		mp = _ring_peek_one(r, cidx, i, avail - i);
3989 		if (__predict_false(*mp == (struct mbuf *)txq))
3990 			continue;
3991 		m_freem(*mp);
3992 		DBG_COUNTER_INC(tx_frees);
3993 	}
3994 	MPASS(ifmp_ring_is_stalled(r) == 0);
3995 	return (avail);
3996 }
3997 
3998 static void
iflib_ifmp_purge(iflib_txq_t txq)3999 iflib_ifmp_purge(iflib_txq_t txq)
4000 {
4001 	struct ifmp_ring *r;
4002 
4003 	r = txq->ift_br;
4004 	r->drain = iflib_txq_drain_free;
4005 	r->can_drain = iflib_txq_drain_always;
4006 
4007 	ifmp_ring_check_drainage(r, r->size);
4008 
4009 	r->drain = iflib_txq_drain;
4010 	r->can_drain = iflib_txq_can_drain;
4011 }
4012 
4013 static void
_task_fn_tx(void * context)4014 _task_fn_tx(void *context)
4015 {
4016 	iflib_txq_t txq = context;
4017 	if_ctx_t ctx = txq->ift_ctx;
4018 	if_t ifp = ctx->ifc_ifp;
4019 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
4020 
4021 #ifdef IFLIB_DIAGNOSTICS
4022 	txq->ift_cpu_exec_count[curcpu]++;
4023 #endif
4024 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4025 		return;
4026 #ifdef DEV_NETMAP
4027 	if ((if_getcapenable(ifp) & IFCAP_NETMAP) &&
4028 	    netmap_tx_irq(ifp, txq->ift_id))
4029 		goto skip_ifmp;
4030 #endif
4031         if (ctx->ifc_sysctl_simple_tx) {
4032                 mtx_lock(&txq->ift_mtx);
4033                 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
4034                 mtx_unlock(&txq->ift_mtx);
4035                 goto skip_ifmp;
4036         }
4037 #ifdef ALTQ
4038 	if (if_altq_is_enabled(ifp))
4039 		iflib_altq_if_start(ifp);
4040 #endif
4041 	if (txq->ift_db_pending)
4042 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
4043 	else if (!abdicate)
4044 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4045 	/*
4046 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
4047 	 */
4048 	if (abdicate)
4049 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4050 
4051 skip_ifmp:
4052 	if (ctx->ifc_flags & IFC_LEGACY)
4053 		IFDI_INTR_ENABLE(ctx);
4054 	else
4055 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
4056 }
4057 
4058 static void
_task_fn_rx(void * context)4059 _task_fn_rx(void *context)
4060 {
4061 	iflib_rxq_t rxq = context;
4062 	if_ctx_t ctx = rxq->ifr_ctx;
4063 	uint8_t more;
4064 	uint16_t budget;
4065 #ifdef DEV_NETMAP
4066 	u_int work = 0;
4067 	int nmirq;
4068 #endif
4069 
4070 #ifdef IFLIB_DIAGNOSTICS
4071 	rxq->ifr_cpu_exec_count[curcpu]++;
4072 #endif
4073 	DBG_COUNTER_INC(task_fn_rxs);
4074 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4075 		return;
4076 #ifdef DEV_NETMAP
4077 	nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work);
4078 	if (nmirq != NM_IRQ_PASS) {
4079 		more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0;
4080 		goto skip_rxeof;
4081 	}
4082 #endif
4083 	budget = ctx->ifc_sysctl_rx_budget;
4084 	if (budget == 0)
4085 		budget = 16;	/* XXX */
4086 	more = iflib_rxeof(rxq, budget);
4087 #ifdef DEV_NETMAP
4088 skip_rxeof:
4089 #endif
4090 	if ((more & IFLIB_RXEOF_MORE) == 0) {
4091 		if (ctx->ifc_flags & IFC_LEGACY)
4092 			IFDI_INTR_ENABLE(ctx);
4093 		else
4094 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
4095 		DBG_COUNTER_INC(rx_intr_enables);
4096 	}
4097 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
4098 		return;
4099 
4100 	if (more & IFLIB_RXEOF_MORE)
4101 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
4102 	else if (more & IFLIB_RXEOF_EMPTY)
4103 		callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
4104 }
4105 
4106 static void
_task_fn_admin(void * context,int pending)4107 _task_fn_admin(void *context, int pending)
4108 {
4109 	if_ctx_t ctx = context;
4110 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
4111 	iflib_txq_t txq;
4112 	int i;
4113 	bool oactive, running, do_reset, do_watchdog, in_detach;
4114 
4115 	STATE_LOCK(ctx);
4116 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
4117 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
4118 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
4119 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
4120 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
4121 	ctx->ifc_flags &= ~(IFC_DO_RESET | IFC_DO_WATCHDOG);
4122 	STATE_UNLOCK(ctx);
4123 
4124 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4125 		return;
4126 	if (in_detach)
4127 		return;
4128 
4129 	CTX_LOCK(ctx);
4130 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4131 		CALLOUT_LOCK(txq);
4132 		callout_stop(&txq->ift_timer);
4133 		CALLOUT_UNLOCK(txq);
4134 	}
4135 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_ADMINCQ)
4136 		IFDI_ADMIN_COMPLETION_HANDLE(ctx);
4137 	if (do_watchdog) {
4138 		ctx->ifc_watchdog_events++;
4139 		IFDI_WATCHDOG_RESET(ctx);
4140 	}
4141 	IFDI_UPDATE_ADMIN_STATUS(ctx);
4142 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
4143 		callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq,
4144 		    txq->ift_timer.c_cpu);
4145 	}
4146 	IFDI_LINK_INTR_ENABLE(ctx);
4147 	if (do_reset)
4148 		iflib_if_init_locked(ctx);
4149 	CTX_UNLOCK(ctx);
4150 
4151 	if (LINK_ACTIVE(ctx) == 0)
4152 		return;
4153 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
4154 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
4155 }
4156 
4157 static void
_task_fn_iov(void * context,int pending)4158 _task_fn_iov(void *context, int pending)
4159 {
4160 	if_ctx_t ctx = context;
4161 
4162 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
4163 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
4164 		return;
4165 
4166 	CTX_LOCK(ctx);
4167 	IFDI_VFLR_HANDLE(ctx);
4168 	CTX_UNLOCK(ctx);
4169 }
4170 
4171 static int
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)4172 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4173 {
4174 	int err;
4175 	if_int_delay_info_t info;
4176 	if_ctx_t ctx;
4177 
4178 	info = (if_int_delay_info_t)arg1;
4179 	ctx = info->iidi_ctx;
4180 	info->iidi_req = req;
4181 	info->iidi_oidp = oidp;
4182 	CTX_LOCK(ctx);
4183 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
4184 	CTX_UNLOCK(ctx);
4185 	return (err);
4186 }
4187 
4188 /*********************************************************************
4189  *
4190  *  IFNET FUNCTIONS
4191  *
4192  **********************************************************************/
4193 
4194 static void
iflib_if_init_locked(if_ctx_t ctx)4195 iflib_if_init_locked(if_ctx_t ctx)
4196 {
4197 	iflib_stop(ctx);
4198 	iflib_init_locked(ctx);
4199 }
4200 
4201 static void
iflib_if_init(void * arg)4202 iflib_if_init(void *arg)
4203 {
4204 	if_ctx_t ctx = arg;
4205 
4206 	CTX_LOCK(ctx);
4207 	iflib_if_init_locked(ctx);
4208 	CTX_UNLOCK(ctx);
4209 }
4210 
4211 static int
iflib_if_transmit(if_t ifp,struct mbuf * m)4212 iflib_if_transmit(if_t ifp, struct mbuf *m)
4213 {
4214 	if_ctx_t ctx = if_getsoftc(ifp);
4215 	iflib_txq_t txq;
4216 	int err, qidx;
4217 	int abdicate;
4218 
4219 	if (__predict_false((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
4220 		DBG_COUNTER_INC(tx_frees);
4221 		m_freem(m);
4222 		return (ENETDOWN);
4223 	}
4224 
4225 	MPASS(m->m_nextpkt == NULL);
4226 	/* ALTQ-enabled interfaces always use queue 0. */
4227 	qidx = 0;
4228 	/* Use driver-supplied queue selection method if it exists */
4229 	if (ctx->isc_txq_select_v2) {
4230 		struct if_pkt_info pi;
4231 		uint64_t early_pullups = 0;
4232 		pkt_info_zero(&pi);
4233 
4234 		err = iflib_parse_header_partial(&pi, &m, &early_pullups);
4235 		if (__predict_false(err != 0)) {
4236 			/* Assign pullups for bad pkts to default queue */
4237 			ctx->ifc_txqs[0].ift_pullups += early_pullups;
4238 			DBG_COUNTER_INC(encap_txd_encap_fail);
4239 			return (err);
4240 		}
4241 		/* Let driver make queueing decision */
4242 		qidx = ctx->isc_txq_select_v2(ctx->ifc_softc, m, &pi);
4243 		ctx->ifc_txqs[qidx].ift_pullups += early_pullups;
4244 	}
4245 	/* Backwards compatibility w/ simpler queue select */
4246 	else if (ctx->isc_txq_select)
4247 		qidx = ctx->isc_txq_select(ctx->ifc_softc, m);
4248 	/* If not, use iflib's standard method */
4249 	else if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !if_altq_is_enabled(ifp))
4250 		qidx = QIDX(ctx, m);
4251 
4252 	/* Set TX queue */
4253 	txq = &ctx->ifc_txqs[qidx];
4254 
4255 #ifdef DRIVER_BACKPRESSURE
4256 	if (txq->ift_closed) {
4257 		while (m != NULL) {
4258 			next = m->m_nextpkt;
4259 			m->m_nextpkt = NULL;
4260 			m_freem(m);
4261 			DBG_COUNTER_INC(tx_frees);
4262 			m = next;
4263 		}
4264 		return (ENOBUFS);
4265 	}
4266 #endif
4267 #ifdef notyet
4268 	qidx = count = 0;
4269 	mp = marr;
4270 	next = m;
4271 	do {
4272 		count++;
4273 		next = next->m_nextpkt;
4274 	} while (next != NULL);
4275 
4276 	if (count > nitems(marr))
4277 		if ((mp = malloc(count * sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
4278 			/* XXX check nextpkt */
4279 			m_freem(m);
4280 			/* XXX simplify for now */
4281 			DBG_COUNTER_INC(tx_frees);
4282 			return (ENOBUFS);
4283 		}
4284 	for (next = m, i = 0; next != NULL; i++) {
4285 		mp[i] = next;
4286 		next = next->m_nextpkt;
4287 		mp[i]->m_nextpkt = NULL;
4288 	}
4289 #endif
4290 	DBG_COUNTER_INC(tx_seen);
4291 	abdicate = ctx->ifc_sysctl_tx_abdicate;
4292 
4293 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
4294 
4295 	if (abdicate)
4296 		GROUPTASK_ENQUEUE(&txq->ift_task);
4297 	if (err) {
4298 		if (!abdicate)
4299 			GROUPTASK_ENQUEUE(&txq->ift_task);
4300 		/* support forthcoming later */
4301 #ifdef DRIVER_BACKPRESSURE
4302 		txq->ift_closed = TRUE;
4303 #endif
4304 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
4305 		m_freem(m);
4306 		DBG_COUNTER_INC(tx_frees);
4307 		if (err == ENOBUFS)
4308 			if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
4309 		else
4310 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4311 	}
4312 
4313 	return (err);
4314 }
4315 
4316 #ifdef ALTQ
4317 /*
4318  * The overall approach to integrating iflib with ALTQ is to continue to use
4319  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4320  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4321  * is redundant/unnecessary, but doing so minimizes the amount of
4322  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4323  * redundantly queueing to an intermediate mp_ring is swamped by the
4324  * performance limitations inherent in using ALTQ.
4325  *
4326  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4327  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4328  * given interface.  If ALTQ is enabled for an interface, then all
4329  * transmitted packets for that interface will be submitted to the ALTQ
4330  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4331  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4332  * update stats that the iflib machinery handles, and which is sensitve to
4333  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4334  * will be installed as the start routine for use by ALTQ facilities that
4335  * need to trigger queue drains on a scheduled basis.
4336  *
4337  */
4338 static void
iflib_altq_if_start(if_t ifp)4339 iflib_altq_if_start(if_t ifp)
4340 {
4341 	struct ifaltq *ifq = &ifp->if_snd; /* XXX - DRVAPI */
4342 	struct mbuf *m;
4343 
4344 	IFQ_LOCK(ifq);
4345 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4346 	while (m != NULL) {
4347 		iflib_if_transmit(ifp, m);
4348 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4349 	}
4350 	IFQ_UNLOCK(ifq);
4351 }
4352 
4353 static int
iflib_altq_if_transmit(if_t ifp,struct mbuf * m)4354 iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4355 {
4356 	int err;
4357 
4358 	if (if_altq_is_enabled(ifp)) {
4359 		IFQ_ENQUEUE(&ifp->if_snd, m, err); /* XXX - DRVAPI */
4360 		if (err == 0)
4361 			iflib_altq_if_start(ifp);
4362 	} else
4363 		err = iflib_if_transmit(ifp, m);
4364 
4365 	return (err);
4366 }
4367 #endif /* ALTQ */
4368 
4369 static void
iflib_if_qflush(if_t ifp)4370 iflib_if_qflush(if_t ifp)
4371 {
4372 	if_ctx_t ctx = if_getsoftc(ifp);
4373 	iflib_txq_t txq = ctx->ifc_txqs;
4374 	int i;
4375 
4376 	STATE_LOCK(ctx);
4377 	ctx->ifc_flags |= IFC_QFLUSH;
4378 	STATE_UNLOCK(ctx);
4379 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
4380 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
4381 			iflib_txq_check_drain(txq, 0);
4382 	STATE_LOCK(ctx);
4383 	ctx->ifc_flags &= ~IFC_QFLUSH;
4384 	STATE_UNLOCK(ctx);
4385 
4386 	/*
4387 	 * When ALTQ is enabled, this will also take care of purging the
4388 	 * ALTQ queue(s).
4389 	 */
4390 	if_qflush(ifp);
4391 }
4392 
4393 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
4394 		    IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
4395 		    IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
4396 		    IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_MEXTPG)
4397 
4398 static int
iflib_if_ioctl(if_t ifp,u_long command,caddr_t data)4399 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
4400 {
4401 	if_ctx_t ctx = if_getsoftc(ifp);
4402 	struct ifreq	*ifr = (struct ifreq *)data;
4403 #if defined(INET) || defined(INET6)
4404 	struct ifaddr	*ifa = (struct ifaddr *)data;
4405 #endif
4406 	bool		avoid_reset = false;
4407 	int		err = 0, reinit = 0, bits;
4408 
4409 	switch (command) {
4410 	case SIOCSIFADDR:
4411 #ifdef INET
4412 		if (ifa->ifa_addr->sa_family == AF_INET)
4413 			avoid_reset = true;
4414 #endif
4415 #ifdef INET6
4416 		if (ifa->ifa_addr->sa_family == AF_INET6)
4417 			avoid_reset = true;
4418 #endif
4419 		/*
4420 		 * Calling init results in link renegotiation,
4421 		 * so we avoid doing it when possible.
4422 		 */
4423 		if (avoid_reset) {
4424 			if_setflagbits(ifp, IFF_UP, 0);
4425 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4426 				reinit = 1;
4427 #ifdef INET
4428 			if (!(if_getflags(ifp) & IFF_NOARP))
4429 				arp_ifinit(ifp, ifa);
4430 #endif
4431 		} else
4432 			err = ether_ioctl(ifp, command, data);
4433 		break;
4434 	case SIOCSIFMTU:
4435 		CTX_LOCK(ctx);
4436 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
4437 			CTX_UNLOCK(ctx);
4438 			break;
4439 		}
4440 		bits = if_getdrvflags(ifp);
4441 		/* stop the driver and free any clusters before proceeding */
4442 		iflib_stop(ctx);
4443 
4444 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
4445 			STATE_LOCK(ctx);
4446 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
4447 				ctx->ifc_flags |= IFC_MULTISEG;
4448 			else
4449 				ctx->ifc_flags &= ~IFC_MULTISEG;
4450 			STATE_UNLOCK(ctx);
4451 			err = if_setmtu(ifp, ifr->ifr_mtu);
4452 		}
4453 		iflib_init_locked(ctx);
4454 		STATE_LOCK(ctx);
4455 		if_setdrvflags(ifp, bits);
4456 		STATE_UNLOCK(ctx);
4457 		CTX_UNLOCK(ctx);
4458 		break;
4459 	case SIOCSIFFLAGS:
4460 		CTX_LOCK(ctx);
4461 		if (if_getflags(ifp) & IFF_UP) {
4462 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4463 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4464 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4465 					CTX_UNLOCK(ctx);
4466 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4467 					CTX_LOCK(ctx);
4468 				}
4469 			} else
4470 				reinit = 1;
4471 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4472 			iflib_stop(ctx);
4473 		}
4474 		ctx->ifc_if_flags = if_getflags(ifp);
4475 		CTX_UNLOCK(ctx);
4476 		break;
4477 	case SIOCADDMULTI:
4478 	case SIOCDELMULTI:
4479 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4480 			CTX_LOCK(ctx);
4481 			IFDI_INTR_DISABLE(ctx);
4482 			IFDI_MULTI_SET(ctx);
4483 			IFDI_INTR_ENABLE(ctx);
4484 			CTX_UNLOCK(ctx);
4485 		}
4486 		break;
4487 	case SIOCSIFMEDIA:
4488 		CTX_LOCK(ctx);
4489 		IFDI_MEDIA_SET(ctx);
4490 		CTX_UNLOCK(ctx);
4491 		/* FALLTHROUGH */
4492 	case SIOCGIFMEDIA:
4493 	case SIOCGIFXMEDIA:
4494 		err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
4495 		break;
4496 	case SIOCGI2C:
4497 	{
4498 		struct ifi2creq i2c;
4499 
4500 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4501 		if (err != 0)
4502 			break;
4503 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4504 			err = EINVAL;
4505 			break;
4506 		}
4507 		if (i2c.len > sizeof(i2c.data)) {
4508 			err = EINVAL;
4509 			break;
4510 		}
4511 
4512 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4513 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4514 			    sizeof(i2c));
4515 		break;
4516 	}
4517 	case SIOCSIFCAP:
4518 	{
4519 		int mask, setmask, oldmask;
4520 
4521 		oldmask = if_getcapenable(ifp);
4522 		mask = ifr->ifr_reqcap ^ oldmask;
4523 		mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_MEXTPG;
4524 		setmask = 0;
4525 #ifdef TCP_OFFLOAD
4526 		setmask |= mask & (IFCAP_TOE4 | IFCAP_TOE6);
4527 #endif
4528 		setmask |= (mask & IFCAP_FLAGS);
4529 		setmask |= (mask & IFCAP_WOL);
4530 
4531 		/*
4532 		 * If any RX csum has changed, change all the ones that
4533 		 * are supported by the driver.
4534 		 */
4535 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4536 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4537 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4538 		}
4539 
4540 		/*
4541 		 * want to ensure that traffic has stopped before we change any of the flags
4542 		 */
4543 		if (setmask) {
4544 			CTX_LOCK(ctx);
4545 			bits = if_getdrvflags(ifp);
4546 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4547 				iflib_stop(ctx);
4548 			STATE_LOCK(ctx);
4549 			if_togglecapenable(ifp, setmask);
4550 			ctx->ifc_softc_ctx.isc_capenable ^= setmask;
4551 			STATE_UNLOCK(ctx);
4552 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
4553 				iflib_init_locked(ctx);
4554 			STATE_LOCK(ctx);
4555 			if_setdrvflags(ifp, bits);
4556 			STATE_UNLOCK(ctx);
4557 			CTX_UNLOCK(ctx);
4558 		}
4559 		if_vlancap(ifp);
4560 		break;
4561 	}
4562 	case SIOCGPRIVATE_0:
4563 	case SIOCSDRVSPEC:
4564 	case SIOCGDRVSPEC:
4565 		CTX_LOCK(ctx);
4566 		err = IFDI_PRIV_IOCTL(ctx, command, data);
4567 		CTX_UNLOCK(ctx);
4568 		break;
4569 	default:
4570 		err = ether_ioctl(ifp, command, data);
4571 		break;
4572 	}
4573 	if (reinit)
4574 		iflib_if_init(ctx);
4575 	return (err);
4576 }
4577 
4578 static uint64_t
iflib_if_get_counter(if_t ifp,ift_counter cnt)4579 iflib_if_get_counter(if_t ifp, ift_counter cnt)
4580 {
4581 	if_ctx_t ctx = if_getsoftc(ifp);
4582 
4583 	return (IFDI_GET_COUNTER(ctx, cnt));
4584 }
4585 
4586 /*********************************************************************
4587  *
4588  *  OTHER FUNCTIONS EXPORTED TO THE STACK
4589  *
4590  **********************************************************************/
4591 
4592 static void
iflib_vlan_register(void * arg,if_t ifp,uint16_t vtag)4593 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
4594 {
4595 	if_ctx_t ctx = if_getsoftc(ifp);
4596 
4597 	if ((void *)ctx != arg)
4598 		return;
4599 
4600 	if ((vtag == 0) || (vtag > 4095))
4601 		return;
4602 
4603 	if (iflib_in_detach(ctx))
4604 		return;
4605 
4606 	CTX_LOCK(ctx);
4607 	/* Driver may need all untagged packets to be flushed */
4608 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4609 		iflib_stop(ctx);
4610 	IFDI_VLAN_REGISTER(ctx, vtag);
4611 	/* Re-init to load the changes, if required */
4612 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4613 		iflib_init_locked(ctx);
4614 	CTX_UNLOCK(ctx);
4615 }
4616 
4617 static void
iflib_vlan_unregister(void * arg,if_t ifp,uint16_t vtag)4618 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
4619 {
4620 	if_ctx_t ctx = if_getsoftc(ifp);
4621 
4622 	if ((void *)ctx != arg)
4623 		return;
4624 
4625 	if ((vtag == 0) || (vtag > 4095))
4626 		return;
4627 
4628 	CTX_LOCK(ctx);
4629 	/* Driver may need all tagged packets to be flushed */
4630 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4631 		iflib_stop(ctx);
4632 	IFDI_VLAN_UNREGISTER(ctx, vtag);
4633 	/* Re-init to load the changes, if required */
4634 	if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG))
4635 		iflib_init_locked(ctx);
4636 	CTX_UNLOCK(ctx);
4637 }
4638 
4639 static void
iflib_led_func(void * arg,int onoff)4640 iflib_led_func(void *arg, int onoff)
4641 {
4642 	if_ctx_t ctx = arg;
4643 
4644 	CTX_LOCK(ctx);
4645 	IFDI_LED_FUNC(ctx, onoff);
4646 	CTX_UNLOCK(ctx);
4647 }
4648 
4649 /*********************************************************************
4650  *
4651  *  BUS FUNCTION DEFINITIONS
4652  *
4653  **********************************************************************/
4654 
4655 int
iflib_device_probe(device_t dev)4656 iflib_device_probe(device_t dev)
4657 {
4658 	const pci_vendor_info_t *ent;
4659 	if_shared_ctx_t sctx;
4660 	uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
4661 	uint16_t pci_vendor_id;
4662 
4663 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
4664 		return (ENOTSUP);
4665 
4666 	pci_vendor_id = pci_get_vendor(dev);
4667 	pci_device_id = pci_get_device(dev);
4668 	pci_subvendor_id = pci_get_subvendor(dev);
4669 	pci_subdevice_id = pci_get_subdevice(dev);
4670 	pci_rev_id = pci_get_revid(dev);
4671 	if (sctx->isc_parse_devinfo != NULL)
4672 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
4673 
4674 	ent = sctx->isc_vendor_info;
4675 	while (ent->pvi_vendor_id != 0) {
4676 		if (pci_vendor_id != ent->pvi_vendor_id) {
4677 			ent++;
4678 			continue;
4679 		}
4680 		if ((pci_device_id == ent->pvi_device_id) &&
4681 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
4682 		     (ent->pvi_subvendor_id == 0)) &&
4683 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
4684 		     (ent->pvi_subdevice_id == 0)) &&
4685 		    ((pci_rev_id == ent->pvi_rev_id) ||
4686 		     (ent->pvi_rev_id == 0))) {
4687 			device_set_desc_copy(dev, ent->pvi_name);
4688 			/* this needs to be changed to zero if the bus probing code
4689 			 * ever stops re-probing on best match because the sctx
4690 			 * may have its values over written by register calls
4691 			 * in subsequent probes
4692 			 */
4693 			return (BUS_PROBE_DEFAULT);
4694 		}
4695 		ent++;
4696 	}
4697 	return (ENXIO);
4698 }
4699 
4700 int
iflib_device_probe_vendor(device_t dev)4701 iflib_device_probe_vendor(device_t dev)
4702 {
4703 	int probe;
4704 
4705 	probe = iflib_device_probe(dev);
4706 	if (probe == BUS_PROBE_DEFAULT)
4707 		return (BUS_PROBE_VENDOR);
4708 	else
4709 		return (probe);
4710 }
4711 
4712 static void
iflib_reset_qvalues(if_ctx_t ctx)4713 iflib_reset_qvalues(if_ctx_t ctx)
4714 {
4715 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4716 	if_shared_ctx_t sctx = ctx->ifc_sctx;
4717 	device_t dev = ctx->ifc_dev;
4718 	int i;
4719 
4720 	if (ctx->ifc_sysctl_ntxqs != 0)
4721 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
4722 	if (ctx->ifc_sysctl_nrxqs != 0)
4723 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
4724 
4725 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4726 		if (ctx->ifc_sysctl_ntxds[i] != 0)
4727 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
4728 		else
4729 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4730 	}
4731 
4732 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4733 		if (ctx->ifc_sysctl_nrxds[i] != 0)
4734 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
4735 		else
4736 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4737 	}
4738 
4739 	for (i = 0; i < sctx->isc_nrxqs; i++) {
4740 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
4741 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
4742 			    i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
4743 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
4744 		}
4745 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
4746 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
4747 			    i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
4748 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
4749 		}
4750 		if (!powerof2(scctx->isc_nrxd[i])) {
4751 			device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4752 			    i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4753 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4754 		}
4755 	}
4756 
4757 	for (i = 0; i < sctx->isc_ntxqs; i++) {
4758 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
4759 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
4760 			    i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
4761 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
4762 		}
4763 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
4764 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
4765 			    i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
4766 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
4767 		}
4768 		if (!powerof2(scctx->isc_ntxd[i])) {
4769 			device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4770 			    i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4771 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4772 		}
4773 	}
4774 }
4775 
4776 static void
iflib_add_pfil(if_ctx_t ctx)4777 iflib_add_pfil(if_ctx_t ctx)
4778 {
4779 	struct pfil_head *pfil;
4780 	struct pfil_head_args pa;
4781 	iflib_rxq_t rxq;
4782 	int i;
4783 
4784 	pa.pa_version = PFIL_VERSION;
4785 	pa.pa_flags = PFIL_IN;
4786 	pa.pa_type = PFIL_TYPE_ETHERNET;
4787 	pa.pa_headname = if_name(ctx->ifc_ifp);
4788 	pfil = pfil_head_register(&pa);
4789 
4790 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
4791 		rxq->pfil = pfil;
4792 	}
4793 }
4794 
4795 static void
iflib_rem_pfil(if_ctx_t ctx)4796 iflib_rem_pfil(if_ctx_t ctx)
4797 {
4798 	struct pfil_head *pfil;
4799 	iflib_rxq_t rxq;
4800 	int i;
4801 
4802 	rxq = ctx->ifc_rxqs;
4803 	pfil = rxq->pfil;
4804 	for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
4805 		rxq->pfil = NULL;
4806 	}
4807 	pfil_head_unregister(pfil);
4808 }
4809 
4810 
4811 /*
4812  * Advance forward by n members of the cpuset ctx->ifc_cpus starting from
4813  * cpuid and wrapping as necessary.
4814  */
4815 static unsigned int
cpuid_advance(if_ctx_t ctx,unsigned int cpuid,unsigned int n)4816 cpuid_advance(if_ctx_t ctx, unsigned int cpuid, unsigned int n)
4817 {
4818 	unsigned int first_valid;
4819 	unsigned int last_valid;
4820 
4821 	/* cpuid should always be in the valid set */
4822 	MPASS(CPU_ISSET(cpuid, &ctx->ifc_cpus));
4823 
4824 	/* valid set should never be empty */
4825 	MPASS(!CPU_EMPTY(&ctx->ifc_cpus));
4826 
4827 	first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
4828 	last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
4829 	n = n % CPU_COUNT(&ctx->ifc_cpus);
4830 	while (n > 0) {
4831 		do {
4832 			cpuid++;
4833 			if (cpuid > last_valid)
4834 				cpuid = first_valid;
4835 		} while (!CPU_ISSET(cpuid, &ctx->ifc_cpus));
4836 		n--;
4837 	}
4838 
4839 	return (cpuid);
4840 }
4841 
4842 #if defined(SMP) && defined(SCHED_ULE)
4843 extern struct cpu_group *cpu_top;	/* CPU topology */
4844 
4845 static int
find_child_with_core(int cpu,struct cpu_group * grp)4846 find_child_with_core(int cpu, struct cpu_group *grp)
4847 {
4848 	int i;
4849 
4850 	if (grp->cg_children == 0)
4851 		return (-1);
4852 
4853 	MPASS(grp->cg_child);
4854 	for (i = 0; i < grp->cg_children; i++) {
4855 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
4856 			return (i);
4857 	}
4858 
4859 	return (-1);
4860 }
4861 
4862 
4863 /*
4864  * Find an L2 neighbor of the given CPU or return -1 if none found.  This
4865  * does not distinguish among multiple L2 neighbors if the given CPU has
4866  * more than one (it will always return the same result in that case).
4867  */
4868 static int
find_l2_neighbor(int cpu)4869 find_l2_neighbor(int cpu)
4870 {
4871 	struct cpu_group *grp;
4872 	int i;
4873 
4874 	grp = cpu_top;
4875 	if (grp == NULL)
4876 		return (-1);
4877 
4878 	/*
4879 	 * Find the smallest CPU group that contains the given core.
4880 	 */
4881 	i = 0;
4882 	while ((i = find_child_with_core(cpu, grp)) != -1) {
4883 		/*
4884 		 * If the smallest group containing the given CPU has less
4885 		 * than two members, we conclude the given CPU has no
4886 		 * L2 neighbor.
4887 		 */
4888 		if (grp->cg_child[i].cg_count <= 1)
4889 			return (-1);
4890 		grp = &grp->cg_child[i];
4891 	}
4892 
4893 	/* Must share L2. */
4894 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
4895 		return (-1);
4896 
4897 	/*
4898 	 * Select the first member of the set that isn't the reference
4899 	 * CPU, which at this point is guaranteed to exist.
4900 	 */
4901 	for (i = 0; i < CPU_SETSIZE; i++) {
4902 		if (CPU_ISSET(i, &grp->cg_mask) && i != cpu)
4903 			return (i);
4904 	}
4905 
4906 	/* Should never be reached */
4907 	return (-1);
4908 }
4909 
4910 #else
4911 static int
find_l2_neighbor(int cpu)4912 find_l2_neighbor(int cpu)
4913 {
4914 
4915 	return (-1);
4916 }
4917 #endif
4918 
4919 /*
4920  * CPU mapping behaviors
4921  * ---------------------
4922  * 'separate txrx' refers to the separate_txrx sysctl
4923  * 'use logical' refers to the use_logical_cores sysctl
4924  * 'INTR CPUS' indicates whether bus_get_cpus(INTR_CPUS) succeeded
4925  *
4926  *  separate     use     INTR
4927  *    txrx     logical   CPUS   result
4928  * ---------- --------- ------ ------------------------------------------------
4929  *     -          -       X     RX and TX queues mapped to consecutive physical
4930  *                              cores with RX/TX pairs on same core and excess
4931  *                              of either following
4932  *     -          X       X     RX and TX queues mapped to consecutive cores
4933  *                              of any type with RX/TX pairs on same core and
4934  *                              excess of either following
4935  *     X          -       X     RX and TX queues mapped to consecutive physical
4936  *                              cores; all RX then all TX
4937  *     X          X       X     RX queues mapped to consecutive physical cores
4938  *                              first, then TX queues mapped to L2 neighbor of
4939  *                              the corresponding RX queue if one exists,
4940  *                              otherwise to consecutive physical cores
4941  *     -         n/a      -     RX and TX queues mapped to consecutive cores of
4942  *                              any type with RX/TX pairs on same core and excess
4943  *                              of either following
4944  *     X         n/a      -     RX and TX queues mapped to consecutive cores of
4945  *                              any type; all RX then all TX
4946  */
4947 static unsigned int
get_cpuid_for_queue(if_ctx_t ctx,unsigned int base_cpuid,unsigned int qid,bool is_tx)4948 get_cpuid_for_queue(if_ctx_t ctx, unsigned int base_cpuid, unsigned int qid,
4949     bool is_tx)
4950 {
4951 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4952 	unsigned int core_index;
4953 
4954 	if (ctx->ifc_sysctl_separate_txrx) {
4955 		/*
4956 		 * When using separate CPUs for TX and RX, the assignment
4957 		 * will always be of a consecutive CPU out of the set of
4958 		 * context CPUs, except for the specific case where the
4959 		 * context CPUs are phsyical cores, the use of logical cores
4960 		 * has been enabled, the assignment is for TX, the TX qid
4961 		 * corresponds to an RX qid, and the CPU assigned to the
4962 		 * corresponding RX queue has an L2 neighbor.
4963 		 */
4964 		if (ctx->ifc_sysctl_use_logical_cores &&
4965 		    ctx->ifc_cpus_are_physical_cores &&
4966 		    is_tx && qid < scctx->isc_nrxqsets) {
4967 			int l2_neighbor;
4968 			unsigned int rx_cpuid;
4969 
4970 			rx_cpuid = cpuid_advance(ctx, base_cpuid, qid);
4971 			l2_neighbor = find_l2_neighbor(rx_cpuid);
4972 			if (l2_neighbor != -1) {
4973 				return (l2_neighbor);
4974 			}
4975 			/*
4976 			 * ... else fall through to the normal
4977 			 * consecutive-after-RX assignment scheme.
4978 			 *
4979 			 * Note that we are assuming that all RX queue CPUs
4980 			 * have an L2 neighbor, or all do not.  If a mixed
4981 			 * scenario is possible, we will have to keep track
4982 			 * separately of how many queues prior to this one
4983 			 * were not able to be assigned to an L2 neighbor.
4984 			 */
4985 		}
4986 		if (is_tx)
4987 			core_index = scctx->isc_nrxqsets + qid;
4988 		else
4989 			core_index = qid;
4990 	} else {
4991 		core_index = qid;
4992 	}
4993 
4994 	return (cpuid_advance(ctx, base_cpuid, core_index));
4995 }
4996 
4997 static uint16_t
get_ctx_core_offset(if_ctx_t ctx)4998 get_ctx_core_offset(if_ctx_t ctx)
4999 {
5000 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5001 	struct cpu_offset *op;
5002 	cpuset_t assigned_cpus;
5003 	unsigned int cores_consumed;
5004 	unsigned int base_cpuid = ctx->ifc_sysctl_core_offset;
5005 	unsigned int first_valid;
5006 	unsigned int last_valid;
5007 	unsigned int i;
5008 
5009 	first_valid = CPU_FFS(&ctx->ifc_cpus) - 1;
5010 	last_valid = CPU_FLS(&ctx->ifc_cpus) - 1;
5011 
5012 	if (base_cpuid != CORE_OFFSET_UNSPECIFIED) {
5013 		/*
5014 		 * Align the user-chosen base CPU ID to the next valid CPU
5015 		 * for this device.  If the chosen base CPU ID is smaller
5016 		 * than the first valid CPU or larger than the last valid
5017 		 * CPU, we assume the user does not know what the valid
5018 		 * range is for this device and is thinking in terms of a
5019 		 * zero-based reference frame, and so we shift the given
5020 		 * value into the valid range (and wrap accordingly) so the
5021 		 * intent is translated to the proper frame of reference.
5022 		 * If the base CPU ID is within the valid first/last, but
5023 		 * does not correspond to a valid CPU, it is advanced to the
5024 		 * next valid CPU (wrapping if necessary).
5025 		 */
5026 		if (base_cpuid < first_valid || base_cpuid > last_valid) {
5027 			/* shift from zero-based to first_valid-based */
5028 			base_cpuid += first_valid;
5029 			/* wrap to range [first_valid, last_valid] */
5030 			base_cpuid = (base_cpuid - first_valid) %
5031 			    (last_valid - first_valid + 1);
5032 		}
5033 		if (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus)) {
5034 			/*
5035 			 * base_cpuid is in [first_valid, last_valid], but
5036 			 * not a member of the valid set.  In this case,
5037 			 * there will always be a member of the valid set
5038 			 * with a CPU ID that is greater than base_cpuid,
5039 			 * and we simply advance to it.
5040 			 */
5041 			while (!CPU_ISSET(base_cpuid, &ctx->ifc_cpus))
5042 				base_cpuid++;
5043 		}
5044 		return (base_cpuid);
5045 	}
5046 
5047 	/*
5048 	 * Determine how many cores will be consumed by performing the CPU
5049 	 * assignments and counting how many of the assigned CPUs correspond
5050 	 * to CPUs in the set of context CPUs.  This is done using the CPU
5051 	 * ID first_valid as the base CPU ID, as the base CPU must be within
5052 	 * the set of context CPUs.
5053 	 *
5054 	 * Note not all assigned CPUs will be in the set of context CPUs
5055 	 * when separate CPUs are being allocated to TX and RX queues,
5056 	 * assignment to logical cores has been enabled, the set of context
5057 	 * CPUs contains only physical CPUs, and TX queues are mapped to L2
5058 	 * neighbors of CPUs that RX queues have been mapped to - in this
5059 	 * case we do only want to count how many CPUs in the set of context
5060 	 * CPUs have been consumed, as that determines the next CPU in that
5061 	 * set to start allocating at for the next device for which
5062 	 * core_offset is not set.
5063 	 */
5064 	CPU_ZERO(&assigned_cpus);
5065 	for (i = 0; i < scctx->isc_ntxqsets; i++)
5066 		CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, true),
5067 		    &assigned_cpus);
5068 	for (i = 0; i < scctx->isc_nrxqsets; i++)
5069 		CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
5070 		    &assigned_cpus);
5071 	CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
5072 	cores_consumed = CPU_COUNT(&assigned_cpus);
5073 
5074 	mtx_lock(&cpu_offset_mtx);
5075 	SLIST_FOREACH(op, &cpu_offsets, entries) {
5076 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5077 			base_cpuid = op->next_cpuid;
5078 			op->next_cpuid = cpuid_advance(ctx, op->next_cpuid,
5079 			    cores_consumed);
5080 			MPASS(op->refcount < UINT_MAX);
5081 			op->refcount++;
5082 			break;
5083 		}
5084 	}
5085 	if (base_cpuid == CORE_OFFSET_UNSPECIFIED) {
5086 		base_cpuid = first_valid;
5087 		op = malloc(sizeof(struct cpu_offset), M_IFLIB,
5088 		    M_NOWAIT | M_ZERO);
5089 		if (op == NULL) {
5090 			device_printf(ctx->ifc_dev,
5091 			    "allocation for cpu offset failed.\n");
5092 		} else {
5093 			op->next_cpuid = cpuid_advance(ctx, base_cpuid,
5094 			    cores_consumed);
5095 			op->refcount = 1;
5096 			CPU_COPY(&ctx->ifc_cpus, &op->set);
5097 			SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
5098 		}
5099 	}
5100 	mtx_unlock(&cpu_offset_mtx);
5101 
5102 	return (base_cpuid);
5103 }
5104 
5105 static void
unref_ctx_core_offset(if_ctx_t ctx)5106 unref_ctx_core_offset(if_ctx_t ctx)
5107 {
5108 	struct cpu_offset *op, *top;
5109 
5110 	mtx_lock(&cpu_offset_mtx);
5111 	SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
5112 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
5113 			MPASS(op->refcount > 0);
5114 			op->refcount--;
5115 			if (op->refcount == 0) {
5116 				SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
5117 				free(op, M_IFLIB);
5118 			}
5119 			break;
5120 		}
5121 	}
5122 	mtx_unlock(&cpu_offset_mtx);
5123 }
5124 
5125 int
iflib_device_register(device_t dev,void * sc,if_shared_ctx_t sctx,if_ctx_t * ctxp)5126 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
5127 {
5128 	if_ctx_t ctx;
5129 	if_t ifp;
5130 	if_softc_ctx_t scctx;
5131 	kobjop_desc_t kobj_desc;
5132 	kobj_method_t *kobj_method;
5133 	int err, msix, rid;
5134 	int num_txd, num_rxd;
5135 	char namebuf[TASKQUEUE_NAMELEN];
5136 
5137 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK | M_ZERO);
5138 
5139 	if (sc == NULL) {
5140 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK | M_ZERO);
5141 		device_set_softc(dev, ctx);
5142 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
5143 	}
5144 
5145 	ctx->ifc_sctx = sctx;
5146 	ctx->ifc_dev = dev;
5147 	ctx->ifc_softc = sc;
5148 
5149 	iflib_register(ctx);
5150 	iflib_add_device_sysctl_pre(ctx);
5151 
5152 	scctx = &ctx->ifc_softc_ctx;
5153 	ifp = ctx->ifc_ifp;
5154 	if (ctx->ifc_sysctl_simple_tx) {
5155 #ifndef ALTQ
5156 		if_settransmitfn(ifp, iflib_simple_transmit);
5157 		device_printf(dev, "using simple if_transmit\n");
5158 #else
5159 		device_printf(dev, "ALTQ prevents using simple if_transmit\n");
5160 #endif
5161 	}
5162 	iflib_reset_qvalues(ctx);
5163 	IFNET_WLOCK();
5164 	CTX_LOCK(ctx);
5165 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
5166 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
5167 		goto fail_unlock;
5168 	}
5169 	_iflib_pre_assert(scctx);
5170 	ctx->ifc_txrx = *scctx->isc_txrx;
5171 
5172 	MPASS(scctx->isc_dma_width <= flsll(BUS_SPACE_MAXADDR));
5173 
5174 	if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
5175 		ctx->ifc_mediap = scctx->isc_media;
5176 
5177 #ifdef INVARIANTS
5178 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
5179 		MPASS(scctx->isc_tx_csum_flags);
5180 #endif
5181 
5182 	if_setcapabilities(ifp,
5183 	    scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_MEXTPG);
5184 	if_setcapenable(ifp,
5185 	    scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_MEXTPG);
5186 
5187 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
5188 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
5189 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
5190 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
5191 
5192 	num_txd = iflib_num_tx_descs(ctx);
5193 	num_rxd = iflib_num_rx_descs(ctx);
5194 
5195 	/* XXX change for per-queue sizes */
5196 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
5197 	    num_txd, num_rxd);
5198 
5199 	if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION)
5200 		scctx->isc_tx_nsegments = max(1, num_txd /
5201 		    MAX_SINGLE_PACKET_FRACTION);
5202 	if (scctx->isc_tx_tso_segments_max > num_txd /
5203 	    MAX_SINGLE_PACKET_FRACTION)
5204 		scctx->isc_tx_tso_segments_max = max(1,
5205 		    num_txd / MAX_SINGLE_PACKET_FRACTION);
5206 
5207 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
5208 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
5209 		/*
5210 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
5211 		 * but some MACs do.
5212 		 */
5213 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
5214 		    IP_MAXPACKET));
5215 		/*
5216 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
5217 		 * into account.  In the worst case, each of these calls will
5218 		 * add another mbuf and, thus, the requirement for another DMA
5219 		 * segment.  So for best performance, it doesn't make sense to
5220 		 * advertize a maximum of TSO segments that typically will
5221 		 * require defragmentation in iflib_encap().
5222 		 */
5223 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
5224 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
5225 	}
5226 	if (scctx->isc_rss_table_size == 0)
5227 		scctx->isc_rss_table_size = 64;
5228 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
5229 
5230 	/* Create and start admin taskqueue */
5231 	snprintf(namebuf, TASKQUEUE_NAMELEN, "if_%s_tq", device_get_nameunit(dev));
5232 	ctx->ifc_tq = taskqueue_create_fast(namebuf, M_NOWAIT,
5233 	    taskqueue_thread_enqueue, &ctx->ifc_tq);
5234 	if (ctx->ifc_tq == NULL) {
5235 		device_printf(dev, "Unable to create admin taskqueue\n");
5236 		return (ENOMEM);
5237 	}
5238 
5239 	err = taskqueue_start_threads(&ctx->ifc_tq, 1, PI_NET, "%s", namebuf);
5240 	if (err) {
5241 		device_printf(dev,
5242 		    "Unable to start admin taskqueue threads error: %d\n",
5243 		    err);
5244 		taskqueue_free(ctx->ifc_tq);
5245 		return (err);
5246 	}
5247 
5248 	TASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
5249 
5250 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
5251 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
5252 		device_printf(dev, "Unable to fetch CPU list\n");
5253 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
5254 		ctx->ifc_cpus_are_physical_cores = false;
5255 	} else
5256 		ctx->ifc_cpus_are_physical_cores = true;
5257 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
5258 
5259 	/*
5260 	 * Now set up MSI or MSI-X, should return us the number of supported
5261 	 * vectors (will be 1 for a legacy interrupt and MSI).
5262 	 */
5263 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
5264 		msix = scctx->isc_vectors;
5265 	} else if (scctx->isc_msix_bar != 0)
5266 		/*
5267 		 * The simple fact that isc_msix_bar is not 0 does not mean we
5268 		 * we have a good value there that is known to work.
5269 		 */
5270 		msix = iflib_msix_init(ctx);
5271 	else {
5272 		scctx->isc_vectors = 1;
5273 		scctx->isc_ntxqsets = 1;
5274 		scctx->isc_nrxqsets = 1;
5275 		scctx->isc_intr = IFLIB_INTR_LEGACY;
5276 		msix = 0;
5277 	}
5278 	/* Get memory for the station queues */
5279 	if ((err = iflib_queues_alloc(ctx))) {
5280 		device_printf(dev, "Unable to allocate queue memory\n");
5281 		goto fail_intr_free;
5282 	}
5283 
5284 	if ((err = iflib_qset_structures_setup(ctx)))
5285 		goto fail_queues;
5286 
5287 	/*
5288 	 * Now that we know how many queues there are, get the core offset.
5289 	 */
5290 	ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
5291 
5292 	if (msix > 1) {
5293 		/*
5294 		 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
5295 		 * aren't the default NULL implementation.
5296 		 */
5297 		kobj_desc = &ifdi_rx_queue_intr_enable_desc;
5298 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5299 		    kobj_desc);
5300 		if (kobj_method == &kobj_desc->deflt) {
5301 			device_printf(dev,
5302 			    "MSI-X requires ifdi_rx_queue_intr_enable method");
5303 			err = EOPNOTSUPP;
5304 			goto fail_queues;
5305 		}
5306 		kobj_desc = &ifdi_tx_queue_intr_enable_desc;
5307 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
5308 		    kobj_desc);
5309 		if (kobj_method == &kobj_desc->deflt) {
5310 			device_printf(dev,
5311 			    "MSI-X requires ifdi_tx_queue_intr_enable method");
5312 			err = EOPNOTSUPP;
5313 			goto fail_queues;
5314 		}
5315 
5316 		/*
5317 		 * Assign the MSI-X vectors.
5318 		 * Note that the default NULL ifdi_msix_intr_assign method will
5319 		 * fail here, too.
5320 		 */
5321 		err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
5322 		if (err != 0) {
5323 			device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
5324 			    err);
5325 			goto fail_queues;
5326 		}
5327 	} else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
5328 		rid = 0;
5329 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
5330 			MPASS(msix == 1);
5331 			rid = 1;
5332 		}
5333 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
5334 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
5335 			goto fail_queues;
5336 		}
5337 	} else {
5338 		device_printf(dev,
5339 		    "Cannot use iflib with only 1 MSI-X interrupt!\n");
5340 		err = ENODEV;
5341 		goto fail_queues;
5342 	}
5343 
5344 	/*
5345 	 * It prevents a double-locking panic with iflib_media_status when
5346 	 * the driver loads.
5347 	 */
5348 	CTX_UNLOCK(ctx);
5349 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
5350 	CTX_LOCK(ctx);
5351 
5352 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
5353 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
5354 		goto fail_detach;
5355 	}
5356 
5357 	/*
5358 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
5359 	 * This must appear after the call to ether_ifattach() because
5360 	 * ether_ifattach() sets if_hdrlen to the default value.
5361 	 */
5362 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
5363 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
5364 
5365 	if ((err = iflib_netmap_attach(ctx))) {
5366 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
5367 		goto fail_detach;
5368 	}
5369 	*ctxp = ctx;
5370 
5371 	DEBUGNET_SET(ctx->ifc_ifp, iflib);
5372 
5373 	iflib_add_device_sysctl_post(ctx);
5374 	iflib_add_pfil(ctx);
5375 	ctx->ifc_flags |= IFC_INIT_DONE;
5376 	CTX_UNLOCK(ctx);
5377 	IFNET_WUNLOCK();
5378 
5379 	return (0);
5380 
5381 fail_detach:
5382 	ether_ifdetach(ctx->ifc_ifp);
5383 fail_queues:
5384 	taskqueue_free(ctx->ifc_tq);
5385 	iflib_tqg_detach(ctx);
5386 	iflib_tx_structures_free(ctx);
5387 	iflib_rx_structures_free(ctx);
5388 	IFDI_DETACH(ctx);
5389 	IFDI_QUEUES_FREE(ctx);
5390 fail_intr_free:
5391 	iflib_free_intr_mem(ctx);
5392 fail_unlock:
5393 	CTX_UNLOCK(ctx);
5394 	IFNET_WUNLOCK();
5395 	iflib_deregister(ctx);
5396 	device_set_softc(ctx->ifc_dev, NULL);
5397 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5398 		free(ctx->ifc_softc, M_IFLIB);
5399 	free(ctx, M_IFLIB);
5400 	return (err);
5401 }
5402 
5403 int
iflib_device_attach(device_t dev)5404 iflib_device_attach(device_t dev)
5405 {
5406 	if_ctx_t ctx;
5407 	if_shared_ctx_t sctx;
5408 
5409 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
5410 		return (ENOTSUP);
5411 
5412 	pci_enable_busmaster(dev);
5413 
5414 	return (iflib_device_register(dev, NULL, sctx, &ctx));
5415 }
5416 
5417 int
iflib_device_deregister(if_ctx_t ctx)5418 iflib_device_deregister(if_ctx_t ctx)
5419 {
5420 	if_t ifp = ctx->ifc_ifp;
5421 	device_t dev = ctx->ifc_dev;
5422 
5423 	/* Make sure VLANS are not using driver */
5424 	if (if_vlantrunkinuse(ifp)) {
5425 		device_printf(dev, "Vlan in use, detach first\n");
5426 		return (EBUSY);
5427 	}
5428 #ifdef PCI_IOV
5429 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
5430 		device_printf(dev, "SR-IOV in use; detach first.\n");
5431 		return (EBUSY);
5432 	}
5433 #endif
5434 
5435 	STATE_LOCK(ctx);
5436 	ctx->ifc_flags |= IFC_IN_DETACH;
5437 	STATE_UNLOCK(ctx);
5438 
5439 	/* Unregister VLAN handlers before calling iflib_stop() */
5440 	iflib_unregister_vlan_handlers(ctx);
5441 
5442 	iflib_netmap_detach(ifp);
5443 	ether_ifdetach(ifp);
5444 
5445 	CTX_LOCK(ctx);
5446 	iflib_stop(ctx);
5447 	CTX_UNLOCK(ctx);
5448 
5449 	iflib_rem_pfil(ctx);
5450 	if (ctx->ifc_led_dev != NULL)
5451 		led_destroy(ctx->ifc_led_dev);
5452 
5453 	iflib_tqg_detach(ctx);
5454 	iflib_tx_structures_free(ctx);
5455 	iflib_rx_structures_free(ctx);
5456 
5457 	CTX_LOCK(ctx);
5458 	IFDI_DETACH(ctx);
5459 	IFDI_QUEUES_FREE(ctx);
5460 	CTX_UNLOCK(ctx);
5461 
5462 	taskqueue_free(ctx->ifc_tq);
5463 	ctx->ifc_tq = NULL;
5464 
5465 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5466 	iflib_free_intr_mem(ctx);
5467 
5468 	bus_generic_detach(dev);
5469 
5470 	iflib_deregister(ctx);
5471 
5472 	device_set_softc(ctx->ifc_dev, NULL);
5473 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
5474 		free(ctx->ifc_softc, M_IFLIB);
5475 	unref_ctx_core_offset(ctx);
5476 	free(ctx, M_IFLIB);
5477 	return (0);
5478 }
5479 
5480 static void
iflib_tqg_detach(if_ctx_t ctx)5481 iflib_tqg_detach(if_ctx_t ctx)
5482 {
5483 	iflib_txq_t txq;
5484 	iflib_rxq_t rxq;
5485 	int i;
5486 	struct taskqgroup *tqg;
5487 
5488 	/* XXX drain any dependent tasks */
5489 	tqg = qgroup_if_io_tqg;
5490 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
5491 		callout_drain(&txq->ift_timer);
5492 #ifdef DEV_NETMAP
5493 		callout_drain(&txq->ift_netmap_timer);
5494 #endif /* DEV_NETMAP */
5495 		if (txq->ift_task.gt_uniq != NULL)
5496 			taskqgroup_detach(tqg, &txq->ift_task);
5497 	}
5498 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5499 		if (rxq->ifr_task.gt_uniq != NULL)
5500 			taskqgroup_detach(tqg, &rxq->ifr_task);
5501 	}
5502 }
5503 
5504 static void
iflib_free_intr_mem(if_ctx_t ctx)5505 iflib_free_intr_mem(if_ctx_t ctx)
5506 {
5507 
5508 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
5509 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
5510 	}
5511 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5512 		pci_release_msi(ctx->ifc_dev);
5513 	}
5514 	if (ctx->ifc_msix_mem != NULL) {
5515 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5516 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
5517 		ctx->ifc_msix_mem = NULL;
5518 	}
5519 }
5520 
5521 int
iflib_device_detach(device_t dev)5522 iflib_device_detach(device_t dev)
5523 {
5524 	if_ctx_t ctx = device_get_softc(dev);
5525 
5526 	return (iflib_device_deregister(ctx));
5527 }
5528 
5529 int
iflib_device_suspend(device_t dev)5530 iflib_device_suspend(device_t dev)
5531 {
5532 	if_ctx_t ctx = device_get_softc(dev);
5533 
5534 	CTX_LOCK(ctx);
5535 	IFDI_SUSPEND(ctx);
5536 	CTX_UNLOCK(ctx);
5537 
5538 	return (bus_generic_suspend(dev));
5539 }
5540 int
iflib_device_shutdown(device_t dev)5541 iflib_device_shutdown(device_t dev)
5542 {
5543 	if_ctx_t ctx = device_get_softc(dev);
5544 
5545 	CTX_LOCK(ctx);
5546 	IFDI_SHUTDOWN(ctx);
5547 	CTX_UNLOCK(ctx);
5548 
5549 	return (bus_generic_suspend(dev));
5550 }
5551 
5552 int
iflib_device_resume(device_t dev)5553 iflib_device_resume(device_t dev)
5554 {
5555 	if_ctx_t ctx = device_get_softc(dev);
5556 	iflib_txq_t txq = ctx->ifc_txqs;
5557 
5558 	CTX_LOCK(ctx);
5559 	IFDI_RESUME(ctx);
5560 	iflib_if_init_locked(ctx);
5561 	CTX_UNLOCK(ctx);
5562 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
5563 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
5564 
5565 	return (bus_generic_resume(dev));
5566 }
5567 
5568 int
iflib_device_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * params)5569 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
5570 {
5571 	int error;
5572 	if_ctx_t ctx = device_get_softc(dev);
5573 
5574 	CTX_LOCK(ctx);
5575 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
5576 	CTX_UNLOCK(ctx);
5577 
5578 	return (error);
5579 }
5580 
5581 void
iflib_device_iov_uninit(device_t dev)5582 iflib_device_iov_uninit(device_t dev)
5583 {
5584 	if_ctx_t ctx = device_get_softc(dev);
5585 
5586 	CTX_LOCK(ctx);
5587 	IFDI_IOV_UNINIT(ctx);
5588 	CTX_UNLOCK(ctx);
5589 }
5590 
5591 int
iflib_device_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * params)5592 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
5593 {
5594 	int error;
5595 	if_ctx_t ctx = device_get_softc(dev);
5596 
5597 	CTX_LOCK(ctx);
5598 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
5599 	CTX_UNLOCK(ctx);
5600 
5601 	return (error);
5602 }
5603 
5604 /*********************************************************************
5605  *
5606  *  MODULE FUNCTION DEFINITIONS
5607  *
5608  **********************************************************************/
5609 
5610 /*
5611  * - Start a fast taskqueue thread for each core
5612  * - Start a taskqueue for control operations
5613  */
5614 static int
iflib_module_init(void)5615 iflib_module_init(void)
5616 {
5617 	iflib_timer_default = hz / 2;
5618 	return (0);
5619 }
5620 
5621 static int
iflib_module_event_handler(module_t mod,int what,void * arg)5622 iflib_module_event_handler(module_t mod, int what, void *arg)
5623 {
5624 	int err;
5625 
5626 	switch (what) {
5627 	case MOD_LOAD:
5628 		if ((err = iflib_module_init()) != 0)
5629 			return (err);
5630 		break;
5631 	case MOD_UNLOAD:
5632 		return (EBUSY);
5633 	default:
5634 		return (EOPNOTSUPP);
5635 	}
5636 
5637 	return (0);
5638 }
5639 
5640 /*********************************************************************
5641  *
5642  *  PUBLIC FUNCTION DEFINITIONS
5643  *     ordered as in iflib.h
5644  *
5645  **********************************************************************/
5646 
5647 static void
_iflib_assert(if_shared_ctx_t sctx)5648 _iflib_assert(if_shared_ctx_t sctx)
5649 {
5650 	int i;
5651 
5652 	MPASS(sctx->isc_tx_maxsize);
5653 	MPASS(sctx->isc_tx_maxsegsize);
5654 
5655 	MPASS(sctx->isc_rx_maxsize);
5656 	MPASS(sctx->isc_rx_nsegments);
5657 	MPASS(sctx->isc_rx_maxsegsize);
5658 
5659 	MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5660 	for (i = 0; i < sctx->isc_nrxqs; i++) {
5661 		MPASS(sctx->isc_nrxd_min[i]);
5662 		MPASS(powerof2(sctx->isc_nrxd_min[i]));
5663 		MPASS(sctx->isc_nrxd_max[i]);
5664 		MPASS(powerof2(sctx->isc_nrxd_max[i]));
5665 		MPASS(sctx->isc_nrxd_default[i]);
5666 		MPASS(powerof2(sctx->isc_nrxd_default[i]));
5667 	}
5668 
5669 	MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5670 	for (i = 0; i < sctx->isc_ntxqs; i++) {
5671 		MPASS(sctx->isc_ntxd_min[i]);
5672 		MPASS(powerof2(sctx->isc_ntxd_min[i]));
5673 		MPASS(sctx->isc_ntxd_max[i]);
5674 		MPASS(powerof2(sctx->isc_ntxd_max[i]));
5675 		MPASS(sctx->isc_ntxd_default[i]);
5676 		MPASS(powerof2(sctx->isc_ntxd_default[i]));
5677 	}
5678 }
5679 
5680 static void
_iflib_pre_assert(if_softc_ctx_t scctx)5681 _iflib_pre_assert(if_softc_ctx_t scctx)
5682 {
5683 
5684 	MPASS(scctx->isc_txrx->ift_txd_encap);
5685 	MPASS(scctx->isc_txrx->ift_txd_flush);
5686 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
5687 	MPASS(scctx->isc_txrx->ift_rxd_available);
5688 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
5689 	MPASS(scctx->isc_txrx->ift_rxd_refill);
5690 	MPASS(scctx->isc_txrx->ift_rxd_flush);
5691 }
5692 
5693 static void
iflib_register(if_ctx_t ctx)5694 iflib_register(if_ctx_t ctx)
5695 {
5696 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5697 	driver_t *driver = sctx->isc_driver;
5698 	device_t dev = ctx->ifc_dev;
5699 	if_t ifp;
5700 
5701 	_iflib_assert(sctx);
5702 
5703 	CTX_LOCK_INIT(ctx);
5704 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
5705 	ifp = ctx->ifc_ifp = if_alloc_dev(IFT_ETHER, dev);
5706 
5707 	/*
5708 	 * Initialize our context's device specific methods
5709 	 */
5710 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
5711 	kobj_class_compile((kobj_class_t) driver);
5712 
5713 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
5714 	if_setsoftc(ifp, ctx);
5715 	if_setdev(ifp, dev);
5716 	if_setinitfn(ifp, iflib_if_init);
5717 	if_setioctlfn(ifp, iflib_if_ioctl);
5718 #ifdef ALTQ
5719 	if_setstartfn(ifp, iflib_altq_if_start);
5720 	if_settransmitfn(ifp, iflib_altq_if_transmit);
5721 	if_setsendqready(ifp);
5722 #else
5723 	if_settransmitfn(ifp, iflib_if_transmit);
5724 #endif
5725 	if_setqflushfn(ifp, iflib_if_qflush);
5726 	if_setgetcounterfn(ifp, iflib_if_get_counter);
5727 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
5728 	ctx->ifc_vlan_attach_event =
5729 	    EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
5730 		    EVENTHANDLER_PRI_FIRST);
5731 	ctx->ifc_vlan_detach_event =
5732 	    EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
5733 		    EVENTHANDLER_PRI_FIRST);
5734 
5735 	if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5736 		ctx->ifc_mediap = &ctx->ifc_media;
5737 		ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
5738 		    iflib_media_change, iflib_media_status);
5739 	}
5740 }
5741 
5742 static void
iflib_unregister_vlan_handlers(if_ctx_t ctx)5743 iflib_unregister_vlan_handlers(if_ctx_t ctx)
5744 {
5745 	/* Unregister VLAN events */
5746 	if (ctx->ifc_vlan_attach_event != NULL) {
5747 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
5748 		ctx->ifc_vlan_attach_event = NULL;
5749 	}
5750 	if (ctx->ifc_vlan_detach_event != NULL) {
5751 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
5752 		ctx->ifc_vlan_detach_event = NULL;
5753 	}
5754 
5755 }
5756 
5757 static void
iflib_deregister(if_ctx_t ctx)5758 iflib_deregister(if_ctx_t ctx)
5759 {
5760 	if_t ifp = ctx->ifc_ifp;
5761 
5762 	/* Remove all media */
5763 	ifmedia_removeall(&ctx->ifc_media);
5764 
5765 	/* Ensure that VLAN event handlers are unregistered */
5766 	iflib_unregister_vlan_handlers(ctx);
5767 
5768 	/* Release kobject reference */
5769 	kobj_delete((kobj_t) ctx, NULL);
5770 
5771 	/* Free the ifnet structure */
5772 	if_free(ifp);
5773 
5774 	STATE_LOCK_DESTROY(ctx);
5775 
5776 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
5777 	CTX_LOCK_DESTROY(ctx);
5778 }
5779 
5780 static int
iflib_queues_alloc(if_ctx_t ctx)5781 iflib_queues_alloc(if_ctx_t ctx)
5782 {
5783 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5784 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
5785 	device_t dev = ctx->ifc_dev;
5786 	int nrxqsets = scctx->isc_nrxqsets;
5787 	int ntxqsets = scctx->isc_ntxqsets;
5788 	iflib_txq_t txq;
5789 	iflib_rxq_t rxq;
5790 	iflib_fl_t fl = NULL;
5791 	int i, j, cpu, err, txconf, rxconf;
5792 	iflib_dma_info_t ifdip;
5793 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
5794 	uint32_t *txqsizes = scctx->isc_txqsizes;
5795 	uint8_t nrxqs = sctx->isc_nrxqs;
5796 	uint8_t ntxqs = sctx->isc_ntxqs;
5797 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
5798 	int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0);
5799 	caddr_t *vaddrs;
5800 	uint64_t *paddrs;
5801 
5802 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
5803 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
5804 	KASSERT(nrxqs >= fl_offset + nfree_lists,
5805 	    ("there must be at least a rxq for each free list"));
5806 
5807 	/* Allocate the TX ring struct memory */
5808 	if (!(ctx->ifc_txqs =
5809 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5810 		    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5811 		device_printf(dev, "Unable to allocate TX ring memory\n");
5812 		err = ENOMEM;
5813 		goto fail;
5814 	}
5815 
5816 	/* Now allocate the RX */
5817 	if (!(ctx->ifc_rxqs =
5818 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5819 		    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
5820 		device_printf(dev, "Unable to allocate RX ring memory\n");
5821 		err = ENOMEM;
5822 		goto rx_fail;
5823 	}
5824 
5825 	txq = ctx->ifc_txqs;
5826 	rxq = ctx->ifc_rxqs;
5827 
5828 	/*
5829 	 * XXX handle allocation failure
5830 	 */
5831 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
5832 		/* Set up some basics */
5833 
5834 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5835 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5836 			device_printf(dev,
5837 			    "Unable to allocate TX DMA info memory\n");
5838 			err = ENOMEM;
5839 			goto err_tx_desc;
5840 		}
5841 		txq->ift_ifdi = ifdip;
5842 		for (j = 0; j < ntxqs; j++, ifdip++) {
5843 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5844 				device_printf(dev,
5845 				    "Unable to allocate TX descriptors\n");
5846 				err = ENOMEM;
5847 				goto err_tx_desc;
5848 			}
5849 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
5850 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
5851 		}
5852 		txq->ift_ctx = ctx;
5853 		txq->ift_id = i;
5854 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
5855 			txq->ift_br_offset = 1;
5856 		} else {
5857 			txq->ift_br_offset = 0;
5858 		}
5859 
5860 		if (iflib_txsd_alloc(txq)) {
5861 			device_printf(dev, "Critical Failure setting up TX buffers\n");
5862 			err = ENOMEM;
5863 			goto err_tx_desc;
5864 		}
5865 
5866 		/* Initialize the TX lock */
5867 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
5868 		    device_get_nameunit(dev), txq->ift_id);
5869 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
5870 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
5871 		txq->ift_timer.c_cpu = cpu;
5872 #ifdef DEV_NETMAP
5873 		callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0);
5874 		txq->ift_netmap_timer.c_cpu = cpu;
5875 #endif /* DEV_NETMAP */
5876 
5877 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5878 		    iflib_txq_can_drain, M_IFLIB, M_WAITOK);
5879 		if (err) {
5880 			/* XXX free any allocated rings */
5881 			device_printf(dev, "Unable to allocate buf_ring\n");
5882 			goto err_tx_desc;
5883 		}
5884 	}
5885 
5886 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
5887 		/* Set up some basics */
5888 		callout_init(&rxq->ifr_watchdog, 1);
5889 
5890 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5891 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5892 			device_printf(dev,
5893 			    "Unable to allocate RX DMA info memory\n");
5894 			err = ENOMEM;
5895 			goto err_tx_desc;
5896 		}
5897 
5898 		rxq->ifr_ifdi = ifdip;
5899 		/* XXX this needs to be changed if #rx queues != #tx queues */
5900 		rxq->ifr_ntxqirq = 1;
5901 		rxq->ifr_txqid[0] = i;
5902 		for (j = 0; j < nrxqs; j++, ifdip++) {
5903 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5904 				device_printf(dev,
5905 				    "Unable to allocate RX descriptors\n");
5906 				err = ENOMEM;
5907 				goto err_tx_desc;
5908 			}
5909 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
5910 		}
5911 		rxq->ifr_ctx = ctx;
5912 		rxq->ifr_id = i;
5913 		rxq->ifr_fl_offset = fl_offset;
5914 		rxq->ifr_nfl = nfree_lists;
5915 		if (!(fl =
5916 		    (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
5917 			device_printf(dev, "Unable to allocate free list memory\n");
5918 			err = ENOMEM;
5919 			goto err_tx_desc;
5920 		}
5921 		rxq->ifr_fl = fl;
5922 		for (j = 0; j < nfree_lists; j++) {
5923 			fl[j].ifl_rxq = rxq;
5924 			fl[j].ifl_id = j;
5925 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
5926 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
5927 		}
5928 		/* Allocate receive buffers for the ring */
5929 		if (iflib_rxsd_alloc(rxq)) {
5930 			device_printf(dev,
5931 			    "Critical Failure setting up receive buffers\n");
5932 			err = ENOMEM;
5933 			goto err_rx_desc;
5934 		}
5935 
5936 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
5937 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
5938 			    M_WAITOK);
5939 	}
5940 
5941 	/* TXQs */
5942 	vaddrs = malloc(sizeof(caddr_t)  * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
5943 	paddrs = malloc(sizeof(uint64_t) * ntxqsets * ntxqs, M_IFLIB, M_WAITOK);
5944 	for (i = 0; i < ntxqsets; i++) {
5945 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
5946 
5947 		for (j = 0; j < ntxqs; j++, di++) {
5948 			vaddrs[i * ntxqs + j] = di->idi_vaddr;
5949 			paddrs[i * ntxqs + j] = di->idi_paddr;
5950 		}
5951 	}
5952 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5953 		device_printf(ctx->ifc_dev,
5954 		    "Unable to allocate device TX queue\n");
5955 		iflib_tx_structures_free(ctx);
5956 		free(vaddrs, M_IFLIB);
5957 		free(paddrs, M_IFLIB);
5958 		goto err_rx_desc;
5959 	}
5960 	free(vaddrs, M_IFLIB);
5961 	free(paddrs, M_IFLIB);
5962 
5963 	/* RXQs */
5964 	vaddrs = malloc(sizeof(caddr_t)  * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
5965 	paddrs = malloc(sizeof(uint64_t) * nrxqsets * nrxqs, M_IFLIB, M_WAITOK);
5966 	for (i = 0; i < nrxqsets; i++) {
5967 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
5968 
5969 		for (j = 0; j < nrxqs; j++, di++) {
5970 			vaddrs[i * nrxqs + j] = di->idi_vaddr;
5971 			paddrs[i * nrxqs + j] = di->idi_paddr;
5972 		}
5973 	}
5974 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5975 		device_printf(ctx->ifc_dev,
5976 		    "Unable to allocate device RX queue\n");
5977 		iflib_tx_structures_free(ctx);
5978 		free(vaddrs, M_IFLIB);
5979 		free(paddrs, M_IFLIB);
5980 		goto err_rx_desc;
5981 	}
5982 	free(vaddrs, M_IFLIB);
5983 	free(paddrs, M_IFLIB);
5984 
5985 	return (0);
5986 
5987 /* XXX handle allocation failure changes */
5988 err_rx_desc:
5989 err_tx_desc:
5990 rx_fail:
5991 	if (ctx->ifc_rxqs != NULL)
5992 		free(ctx->ifc_rxqs, M_IFLIB);
5993 	ctx->ifc_rxqs = NULL;
5994 	if (ctx->ifc_txqs != NULL)
5995 		free(ctx->ifc_txqs, M_IFLIB);
5996 	ctx->ifc_txqs = NULL;
5997 fail:
5998 	return (err);
5999 }
6000 
6001 static int
iflib_tx_structures_setup(if_ctx_t ctx)6002 iflib_tx_structures_setup(if_ctx_t ctx)
6003 {
6004 	iflib_txq_t txq = ctx->ifc_txqs;
6005 	int i;
6006 
6007 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
6008 		iflib_txq_setup(txq);
6009 
6010 	return (0);
6011 }
6012 
6013 static void
iflib_tx_structures_free(if_ctx_t ctx)6014 iflib_tx_structures_free(if_ctx_t ctx)
6015 {
6016 	iflib_txq_t txq = ctx->ifc_txqs;
6017 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6018 	int i, j;
6019 
6020 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
6021 		for (j = 0; j < sctx->isc_ntxqs; j++)
6022 			iflib_dma_free(&txq->ift_ifdi[j]);
6023 		iflib_txq_destroy(txq);
6024 	}
6025 	free(ctx->ifc_txqs, M_IFLIB);
6026 	ctx->ifc_txqs = NULL;
6027 }
6028 
6029 /*********************************************************************
6030  *
6031  *  Initialize all receive rings.
6032  *
6033  **********************************************************************/
6034 static int
iflib_rx_structures_setup(if_ctx_t ctx)6035 iflib_rx_structures_setup(if_ctx_t ctx)
6036 {
6037 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6038 	int q;
6039 #if defined(INET6) || defined(INET)
6040 	int err, i;
6041 #endif
6042 
6043 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
6044 #if defined(INET6) || defined(INET)
6045 		err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
6046 		    TCP_LRO_ENTRIES, min(1024,
6047 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
6048 		if (err != 0) {
6049 			device_printf(ctx->ifc_dev,
6050 			    "LRO Initialization failed!\n");
6051 			goto fail;
6052 		}
6053 #endif
6054 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
6055 	}
6056 	return (0);
6057 #if defined(INET6) || defined(INET)
6058 fail:
6059 	/*
6060 	 * Free LRO resources allocated so far, we will only handle
6061 	 * the rings that completed, the failing case will have
6062 	 * cleaned up for itself.  'q' failed, so its the terminus.
6063 	 */
6064 	rxq = ctx->ifc_rxqs;
6065 	for (i = 0; i < q; ++i, rxq++) {
6066 		tcp_lro_free(&rxq->ifr_lc);
6067 	}
6068 	return (err);
6069 #endif
6070 }
6071 
6072 /*********************************************************************
6073  *
6074  *  Free all receive rings.
6075  *
6076  **********************************************************************/
6077 static void
iflib_rx_structures_free(if_ctx_t ctx)6078 iflib_rx_structures_free(if_ctx_t ctx)
6079 {
6080 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6081 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6082 	int i, j;
6083 
6084 	for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
6085 		for (j = 0; j < sctx->isc_nrxqs; j++)
6086 			iflib_dma_free(&rxq->ifr_ifdi[j]);
6087 		iflib_rx_sds_free(rxq);
6088 #if defined(INET6) || defined(INET)
6089 		tcp_lro_free(&rxq->ifr_lc);
6090 #endif
6091 	}
6092 	free(ctx->ifc_rxqs, M_IFLIB);
6093 	ctx->ifc_rxqs = NULL;
6094 }
6095 
6096 static int
iflib_qset_structures_setup(if_ctx_t ctx)6097 iflib_qset_structures_setup(if_ctx_t ctx)
6098 {
6099 	int err;
6100 
6101 	/*
6102 	 * It is expected that the caller takes care of freeing queues if this
6103 	 * fails.
6104 	 */
6105 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
6106 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
6107 		return (err);
6108 	}
6109 
6110 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
6111 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
6112 
6113 	return (err);
6114 }
6115 
6116 int
iflib_irq_alloc(if_ctx_t ctx,if_irq_t irq,int rid,driver_filter_t filter,void * filter_arg,driver_intr_t handler,void * arg,const char * name)6117 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
6118 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
6119 {
6120 
6121 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
6122 }
6123 
6124 /* Just to avoid copy/paste */
6125 static inline int
iflib_irq_set_affinity(if_ctx_t ctx,if_irq_t irq,iflib_intr_type_t type,int qid,struct grouptask * gtask,struct taskqgroup * tqg,void * uniq,const char * name)6126 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
6127     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
6128     const char *name)
6129 {
6130 	device_t dev;
6131 	unsigned int base_cpuid, cpuid;
6132 	int err;
6133 
6134 	dev = ctx->ifc_dev;
6135 	base_cpuid = ctx->ifc_sysctl_core_offset;
6136 	cpuid = get_cpuid_for_queue(ctx, base_cpuid, qid, type == IFLIB_INTR_TX);
6137 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev,
6138 	    irq ? irq->ii_res : NULL, name);
6139 	if (err) {
6140 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
6141 		return (err);
6142 	}
6143 #ifdef notyet
6144 	if (cpuid > ctx->ifc_cpuid_highest)
6145 		ctx->ifc_cpuid_highest = cpuid;
6146 #endif
6147 	return (0);
6148 }
6149 
6150 /*
6151  * Allocate a hardware interrupt for subctx using the parent (ctx)'s hardware
6152  * resources.
6153  *
6154  * Similar to iflib_irq_alloc_generic(), but for interrupt type IFLIB_INTR_RXTX
6155  * only.
6156  *
6157  * XXX: Could be removed if subctx's dev has its intr resource allocation
6158  * methods replaced with custom ones?
6159  */
6160 int
iflib_irq_alloc_generic_subctx(if_ctx_t ctx,if_ctx_t subctx,if_irq_t irq,int rid,iflib_intr_type_t type,driver_filter_t * filter,void * filter_arg,int qid,const char * name)6161 iflib_irq_alloc_generic_subctx(if_ctx_t ctx, if_ctx_t subctx, if_irq_t irq,
6162 			       int rid, iflib_intr_type_t type,
6163 			       driver_filter_t *filter, void *filter_arg,
6164 			       int qid, const char *name)
6165 {
6166 	device_t dev, subdev;
6167 	struct grouptask *gtask;
6168 	struct taskqgroup *tqg;
6169 	iflib_filter_info_t info;
6170 	gtask_fn_t *fn;
6171 	int tqrid, err;
6172 	driver_filter_t *intr_fast;
6173 	void *q;
6174 
6175 	MPASS(ctx != NULL);
6176 	MPASS(subctx != NULL);
6177 
6178 	tqrid = rid;
6179 	dev = ctx->ifc_dev;
6180 	subdev = subctx->ifc_dev;
6181 
6182 	switch (type) {
6183 	case IFLIB_INTR_RXTX:
6184 		q = &subctx->ifc_rxqs[qid];
6185 		info = &subctx->ifc_rxqs[qid].ifr_filter_info;
6186 		gtask = &subctx->ifc_rxqs[qid].ifr_task;
6187 		tqg = qgroup_if_io_tqg;
6188 		fn = _task_fn_rx;
6189 		intr_fast = iflib_fast_intr_rxtx;
6190 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6191 		break;
6192 	default:
6193 		device_printf(dev, "%s: unknown net intr type for subctx %s (%d)\n",
6194 		    __func__, device_get_nameunit(subdev), type);
6195 		return (EINVAL);
6196 	}
6197 
6198 	info->ifi_filter = filter;
6199 	info->ifi_filter_arg = filter_arg;
6200 	info->ifi_task = gtask;
6201 	info->ifi_ctx = q;
6202 
6203 	NET_GROUPTASK_INIT(gtask, 0, fn, q);
6204 
6205 	/* Allocate interrupts from hardware using parent context */
6206 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
6207 	if (err != 0) {
6208 		device_printf(dev, "_iflib_irq_alloc failed for subctx %s: %d\n",
6209 		    device_get_nameunit(subdev), err);
6210 		return (err);
6211 	}
6212 
6213 	if (tqrid != -1) {
6214 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
6215 		    name);
6216 		if (err)
6217 			return (err);
6218 	} else {
6219 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6220 	}
6221 
6222 	return (0);
6223 }
6224 
6225 int
iflib_irq_alloc_generic(if_ctx_t ctx,if_irq_t irq,int rid,iflib_intr_type_t type,driver_filter_t * filter,void * filter_arg,int qid,const char * name)6226 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
6227 			iflib_intr_type_t type, driver_filter_t *filter,
6228 			void *filter_arg, int qid, const char *name)
6229 {
6230 	device_t dev;
6231 	struct grouptask *gtask;
6232 	struct taskqgroup *tqg;
6233 	iflib_filter_info_t info;
6234 	gtask_fn_t *fn;
6235 	int tqrid, err;
6236 	driver_filter_t *intr_fast;
6237 	void *q;
6238 
6239 	info = &ctx->ifc_filter_info;
6240 	tqrid = rid;
6241 
6242 	switch (type) {
6243 	/* XXX merge tx/rx for netmap? */
6244 	case IFLIB_INTR_TX:
6245 		q = &ctx->ifc_txqs[qid];
6246 		info = &ctx->ifc_txqs[qid].ift_filter_info;
6247 		gtask = &ctx->ifc_txqs[qid].ift_task;
6248 		tqg = qgroup_if_io_tqg;
6249 		fn = _task_fn_tx;
6250 		intr_fast = iflib_fast_intr;
6251 		GROUPTASK_INIT(gtask, 0, fn, q);
6252 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
6253 		break;
6254 	case IFLIB_INTR_RX:
6255 		q = &ctx->ifc_rxqs[qid];
6256 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6257 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6258 		tqg = qgroup_if_io_tqg;
6259 		fn = _task_fn_rx;
6260 		intr_fast = iflib_fast_intr;
6261 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6262 		break;
6263 	case IFLIB_INTR_RXTX:
6264 		q = &ctx->ifc_rxqs[qid];
6265 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
6266 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6267 		tqg = qgroup_if_io_tqg;
6268 		fn = _task_fn_rx;
6269 		intr_fast = iflib_fast_intr_rxtx;
6270 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6271 		break;
6272 	case IFLIB_INTR_ADMIN:
6273 		q = ctx;
6274 		tqrid = -1;
6275 		info = &ctx->ifc_filter_info;
6276 		gtask = NULL;
6277 		intr_fast = iflib_fast_intr_ctx;
6278 		break;
6279 	default:
6280 		device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
6281 		    __func__);
6282 		return (EINVAL);
6283 	}
6284 
6285 	info->ifi_filter = filter;
6286 	info->ifi_filter_arg = filter_arg;
6287 	info->ifi_task = gtask;
6288 	info->ifi_ctx = q;
6289 
6290 	dev = ctx->ifc_dev;
6291 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
6292 	if (err != 0) {
6293 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
6294 		return (err);
6295 	}
6296 	if (type == IFLIB_INTR_ADMIN)
6297 		return (0);
6298 
6299 	if (tqrid != -1) {
6300 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q,
6301 		    name);
6302 		if (err)
6303 			return (err);
6304 	} else {
6305 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6306 	}
6307 
6308 	return (0);
6309 }
6310 
6311 void
iflib_softirq_alloc_generic(if_ctx_t ctx,if_irq_t irq,iflib_intr_type_t type,void * arg,int qid,const char * name)6312 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
6313 			    void *arg, int qid, const char *name)
6314 {
6315 	device_t dev;
6316 	struct grouptask *gtask;
6317 	struct taskqgroup *tqg;
6318 	gtask_fn_t *fn;
6319 	void *q;
6320 	int err;
6321 
6322 	switch (type) {
6323 	case IFLIB_INTR_TX:
6324 		q = &ctx->ifc_txqs[qid];
6325 		gtask = &ctx->ifc_txqs[qid].ift_task;
6326 		tqg = qgroup_if_io_tqg;
6327 		fn = _task_fn_tx;
6328 		GROUPTASK_INIT(gtask, 0, fn, q);
6329 		break;
6330 	case IFLIB_INTR_RX:
6331 		q = &ctx->ifc_rxqs[qid];
6332 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6333 		tqg = qgroup_if_io_tqg;
6334 		fn = _task_fn_rx;
6335 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
6336 		break;
6337 	case IFLIB_INTR_IOV:
6338 		TASK_INIT(&ctx->ifc_vflr_task, 0, _task_fn_iov, ctx);
6339 		return;
6340 	default:
6341 		panic("unknown net intr type");
6342 	}
6343 	err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name);
6344 	if (err) {
6345 		dev = ctx->ifc_dev;
6346 		taskqgroup_attach(tqg, gtask, q, dev, irq ? irq->ii_res : NULL,
6347 		    name);
6348 	}
6349 }
6350 
6351 void
iflib_irq_free(if_ctx_t ctx,if_irq_t irq)6352 iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
6353 {
6354 
6355 	if (irq->ii_tag)
6356 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
6357 
6358 	if (irq->ii_res)
6359 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6360 		    rman_get_rid(irq->ii_res), irq->ii_res);
6361 }
6362 
6363 static int
iflib_legacy_setup(if_ctx_t ctx,driver_filter_t filter,void * filter_arg,int * rid,const char * name)6364 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
6365 {
6366 	iflib_txq_t txq = ctx->ifc_txqs;
6367 	iflib_rxq_t rxq = ctx->ifc_rxqs;
6368 	if_irq_t irq = &ctx->ifc_legacy_irq;
6369 	iflib_filter_info_t info;
6370 	device_t dev;
6371 	struct grouptask *gtask;
6372 	struct resource *res;
6373 	int err, tqrid;
6374 	bool rx_only;
6375 
6376 	info = &rxq->ifr_filter_info;
6377 	gtask = &rxq->ifr_task;
6378 	tqrid = *rid;
6379 	rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
6380 
6381 	ctx->ifc_flags |= IFC_LEGACY;
6382 	info->ifi_filter = filter;
6383 	info->ifi_filter_arg = filter_arg;
6384 	info->ifi_task = gtask;
6385 	info->ifi_ctx = rxq;
6386 
6387 	dev = ctx->ifc_dev;
6388 	/* We allocate a single interrupt resource */
6389 	err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr :
6390 	    iflib_fast_intr_rxtx, NULL, info, name);
6391 	if (err != 0)
6392 		return (err);
6393 	NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, rxq);
6394 	res = irq->ii_res;
6395 	taskqgroup_attach(qgroup_if_io_tqg, gtask, rxq, dev, res, name);
6396 
6397 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6398 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6399 	    "tx");
6400 	return (0);
6401 }
6402 
6403 void
iflib_led_create(if_ctx_t ctx)6404 iflib_led_create(if_ctx_t ctx)
6405 {
6406 
6407 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
6408 	    device_get_nameunit(ctx->ifc_dev));
6409 }
6410 
6411 void
iflib_tx_intr_deferred(if_ctx_t ctx,int txqid)6412 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
6413 {
6414 
6415 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
6416 }
6417 
6418 void
iflib_rx_intr_deferred(if_ctx_t ctx,int rxqid)6419 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
6420 {
6421 
6422 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
6423 }
6424 
6425 void
iflib_admin_intr_deferred(if_ctx_t ctx)6426 iflib_admin_intr_deferred(if_ctx_t ctx)
6427 {
6428 
6429 	taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_admin_task);
6430 }
6431 
6432 void
iflib_iov_intr_deferred(if_ctx_t ctx)6433 iflib_iov_intr_deferred(if_ctx_t ctx)
6434 {
6435 
6436 	taskqueue_enqueue(ctx->ifc_tq, &ctx->ifc_vflr_task);
6437 }
6438 
6439 void
iflib_io_tqg_attach(struct grouptask * gt,void * uniq,int cpu,const char * name)6440 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
6441 {
6442 
6443 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6444 	    name);
6445 }
6446 
6447 void
iflib_config_task_init(if_ctx_t ctx,struct task * config_task,task_fn_t * fn)6448 iflib_config_task_init(if_ctx_t ctx, struct task *config_task, task_fn_t *fn)
6449 {
6450 	TASK_INIT(config_task, 0, fn, ctx);
6451 }
6452 
6453 void
iflib_config_task_enqueue(if_ctx_t ctx,struct task * config_task)6454 iflib_config_task_enqueue(if_ctx_t ctx, struct task *config_task)
6455 {
6456 	taskqueue_enqueue(ctx->ifc_tq, config_task);
6457 }
6458 
6459 void
iflib_link_state_change(if_ctx_t ctx,int link_state,uint64_t baudrate)6460 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
6461 {
6462 	if_t ifp = ctx->ifc_ifp;
6463 	iflib_txq_t txq = ctx->ifc_txqs;
6464 
6465 	if_setbaudrate(ifp, baudrate);
6466 	if (baudrate >= IF_Gbps(10)) {
6467 		STATE_LOCK(ctx);
6468 		ctx->ifc_flags |= IFC_PREFETCH;
6469 		STATE_UNLOCK(ctx);
6470 	}
6471 	/* If link down, disable watchdog */
6472 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
6473 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
6474 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
6475 	}
6476 	ctx->ifc_link_state = link_state;
6477 	if_link_state_change(ifp, link_state);
6478 }
6479 
6480 static int
iflib_tx_credits_update(if_ctx_t ctx,iflib_txq_t txq)6481 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
6482 {
6483 	int credits;
6484 #ifdef INVARIANTS
6485 	int credits_pre = txq->ift_cidx_processed;
6486 #endif
6487 
6488 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
6489 	    BUS_DMASYNC_POSTREAD);
6490 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
6491 		return (0);
6492 
6493 	txq->ift_processed += credits;
6494 	txq->ift_cidx_processed += credits;
6495 
6496 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
6497 	if (txq->ift_cidx_processed >= txq->ift_size)
6498 		txq->ift_cidx_processed -= txq->ift_size;
6499 	return (credits);
6500 }
6501 
6502 static int
iflib_rxd_avail(if_ctx_t ctx,iflib_rxq_t rxq,qidx_t cidx,qidx_t budget)6503 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
6504 {
6505 	iflib_fl_t fl;
6506 	u_int i;
6507 
6508 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
6509 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
6510 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6511 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
6512 	    budget));
6513 }
6514 
6515 void
iflib_add_int_delay_sysctl(if_ctx_t ctx,const char * name,const char * description,if_int_delay_info_t info,int offset,int value)6516 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
6517 	const char *description, if_int_delay_info_t info,
6518 	int offset, int value)
6519 {
6520 	info->iidi_ctx = ctx;
6521 	info->iidi_offset = offset;
6522 	info->iidi_value = value;
6523 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
6524 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
6525 	    OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
6526 	    info, 0, iflib_sysctl_int_delay, "I", description);
6527 }
6528 
6529 struct sx *
iflib_ctx_lock_get(if_ctx_t ctx)6530 iflib_ctx_lock_get(if_ctx_t ctx)
6531 {
6532 
6533 	return (&ctx->ifc_ctx_sx);
6534 }
6535 
6536 static int
iflib_msix_init(if_ctx_t ctx)6537 iflib_msix_init(if_ctx_t ctx)
6538 {
6539 	device_t dev = ctx->ifc_dev;
6540 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6541 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6542 	int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
6543 	int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
6544 
6545 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6546 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
6547 
6548 	if (bootverbose)
6549 		device_printf(dev, "msix_init qsets capped at %d\n",
6550 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
6551 
6552 	/* Override by tuneable */
6553 	if (scctx->isc_disable_msix)
6554 		goto msi;
6555 
6556 	/* First try MSI-X */
6557 	if ((msgs = pci_msix_count(dev)) == 0) {
6558 		if (bootverbose)
6559 			device_printf(dev, "MSI-X not supported or disabled\n");
6560 		goto msi;
6561 	}
6562 
6563 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
6564 	/*
6565 	 * bar == -1 => "trust me I know what I'm doing"
6566 	 * Some drivers are for hardware that is so shoddily
6567 	 * documented that no one knows which bars are which
6568 	 * so the developer has to map all bars. This hack
6569 	 * allows shoddy garbage to use MSI-X in this framework.
6570 	 */
6571 	if (bar != -1) {
6572 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
6573 		    SYS_RES_MEMORY, &bar, RF_ACTIVE);
6574 		if (ctx->ifc_msix_mem == NULL) {
6575 			device_printf(dev, "Unable to map MSI-X table\n");
6576 			goto msi;
6577 		}
6578 	}
6579 
6580 	admincnt = sctx->isc_admin_intrcnt;
6581 #if IFLIB_DEBUG
6582 	/* use only 1 qset in debug mode */
6583 	queuemsgs = min(msgs - admincnt, 1);
6584 #else
6585 	queuemsgs = msgs - admincnt;
6586 #endif
6587 #ifdef RSS
6588 	queues = imin(queuemsgs, rss_getnumbuckets());
6589 #else
6590 	queues = queuemsgs;
6591 #endif
6592 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6593 	if (bootverbose)
6594 		device_printf(dev,
6595 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
6596 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
6597 #ifdef  RSS
6598 	/* If we're doing RSS, clamp at the number of RSS buckets */
6599 	if (queues > rss_getnumbuckets())
6600 		queues = rss_getnumbuckets();
6601 #endif
6602 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
6603 		rx_queues = iflib_num_rx_queues;
6604 	else
6605 		rx_queues = queues;
6606 
6607 	if (rx_queues > scctx->isc_nrxqsets)
6608 		rx_queues = scctx->isc_nrxqsets;
6609 
6610 	/*
6611 	 * We want this to be all logical CPUs by default
6612 	 */
6613 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
6614 		tx_queues = iflib_num_tx_queues;
6615 	else
6616 		tx_queues = mp_ncpus;
6617 
6618 	if (tx_queues > scctx->isc_ntxqsets)
6619 		tx_queues = scctx->isc_ntxqsets;
6620 
6621 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
6622 #ifdef INVARIANTS
6623 		if (tx_queues != rx_queues)
6624 			device_printf(dev,
6625 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
6626 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
6627 #endif
6628 		tx_queues = min(rx_queues, tx_queues);
6629 		rx_queues = min(rx_queues, tx_queues);
6630 	}
6631 
6632 	vectors = rx_queues + admincnt;
6633 	if (msgs < vectors) {
6634 		device_printf(dev,
6635 		    "insufficient number of MSI-X vectors "
6636 		    "(supported %d, need %d)\n", msgs, vectors);
6637 		goto msi;
6638 	}
6639 
6640 	device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
6641 	    tx_queues);
6642 	msgs = vectors;
6643 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6644 		if (vectors != msgs) {
6645 			device_printf(dev,
6646 			    "Unable to allocate sufficient MSI-X vectors "
6647 			    "(got %d, need %d)\n", vectors, msgs);
6648 			pci_release_msi(dev);
6649 			if (bar != -1) {
6650 				bus_release_resource(dev, SYS_RES_MEMORY, bar,
6651 				    ctx->ifc_msix_mem);
6652 				ctx->ifc_msix_mem = NULL;
6653 			}
6654 			goto msi;
6655 		}
6656 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6657 		    vectors);
6658 		scctx->isc_vectors = vectors;
6659 		scctx->isc_nrxqsets = rx_queues;
6660 		scctx->isc_ntxqsets = tx_queues;
6661 		scctx->isc_intr = IFLIB_INTR_MSIX;
6662 
6663 		return (vectors);
6664 	} else {
6665 		device_printf(dev,
6666 		    "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
6667 		    err);
6668 		if (bar != -1) {
6669 			bus_release_resource(dev, SYS_RES_MEMORY, bar,
6670 			    ctx->ifc_msix_mem);
6671 			ctx->ifc_msix_mem = NULL;
6672 		}
6673 	}
6674 
6675 msi:
6676 	vectors = pci_msi_count(dev);
6677 	scctx->isc_nrxqsets = 1;
6678 	scctx->isc_ntxqsets = 1;
6679 	scctx->isc_vectors = vectors;
6680 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
6681 		device_printf(dev, "Using an MSI interrupt\n");
6682 		scctx->isc_intr = IFLIB_INTR_MSI;
6683 	} else {
6684 		scctx->isc_vectors = 1;
6685 		device_printf(dev, "Using a Legacy interrupt\n");
6686 		scctx->isc_intr = IFLIB_INTR_LEGACY;
6687 	}
6688 
6689 	return (vectors);
6690 }
6691 
6692 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
6693 
6694 static int
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)6695 mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
6696 {
6697 	int rc;
6698 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
6699 	struct sbuf *sb;
6700 	const char *ring_state = "UNKNOWN";
6701 
6702 	/* XXX needed ? */
6703 	rc = sysctl_wire_old_buffer(req, 0);
6704 	MPASS(rc == 0);
6705 	if (rc != 0)
6706 		return (rc);
6707 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
6708 	MPASS(sb != NULL);
6709 	if (sb == NULL)
6710 		return (ENOMEM);
6711 	if (state[3] <= 3)
6712 		ring_state = ring_states[state[3]];
6713 
6714 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6715 		    state[0], state[1], state[2], ring_state);
6716 	rc = sbuf_finish(sb);
6717 	sbuf_delete(sb);
6718 	return (rc);
6719 }
6720 
6721 enum iflib_ndesc_handler {
6722 	IFLIB_NTXD_HANDLER,
6723 	IFLIB_NRXD_HANDLER,
6724 };
6725 
6726 static int
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)6727 mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
6728 {
6729 	if_ctx_t ctx = (void *)arg1;
6730 	enum iflib_ndesc_handler type = arg2;
6731 	char buf[256] = {0};
6732 	qidx_t *ndesc;
6733 	char *p, *next;
6734 	int nqs, rc, i;
6735 
6736 	nqs = 8;
6737 	switch (type) {
6738 	case IFLIB_NTXD_HANDLER:
6739 		ndesc = ctx->ifc_sysctl_ntxds;
6740 		if (ctx->ifc_sctx)
6741 			nqs = ctx->ifc_sctx->isc_ntxqs;
6742 		break;
6743 	case IFLIB_NRXD_HANDLER:
6744 		ndesc = ctx->ifc_sysctl_nrxds;
6745 		if (ctx->ifc_sctx)
6746 			nqs = ctx->ifc_sctx->isc_nrxqs;
6747 		break;
6748 	default:
6749 		printf("%s: unhandled type\n", __func__);
6750 		return (EINVAL);
6751 	}
6752 	if (nqs == 0)
6753 		nqs = 8;
6754 
6755 	for (i = 0; i < 8; i++) {
6756 		if (i >= nqs)
6757 			break;
6758 		if (i)
6759 			strcat(buf, ",");
6760 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
6761 	}
6762 
6763 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
6764 	if (rc || req->newptr == NULL)
6765 		return (rc);
6766 
6767 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
6768 	    i++, p = strsep(&next, " ,")) {
6769 		ndesc[i] = strtoul(p, NULL, 10);
6770 	}
6771 
6772 	return (rc);
6773 }
6774 
6775 #define NAME_BUFLEN 32
6776 static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)6777 iflib_add_device_sysctl_pre(if_ctx_t ctx)
6778 {
6779 	device_t dev = iflib_get_dev(ctx);
6780 	struct sysctl_oid_list *child, *oid_list;
6781 	struct sysctl_ctx_list *ctx_list;
6782 	struct sysctl_oid *node;
6783 
6784 	ctx_list = device_get_sysctl_ctx(dev);
6785 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
6786 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child,
6787 	    OID_AUTO, "iflib", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
6788 	    "IFLIB fields");
6789 	oid_list = SYSCTL_CHILDREN(node);
6790 
6791 	SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
6792 	    CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
6793 
6794 	SYSCTL_ADD_BOOL(ctx_list, oid_list, OID_AUTO, "simple_tx",
6795 	    CTLFLAG_RDTUN, &ctx->ifc_sysctl_simple_tx, 0,
6796 	    "use simple tx ring");
6797 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
6798 	    CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
6799 	    "# of txqs to use, 0 => use default #");
6800 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
6801 	    CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
6802 	    "# of rxqs to use, 0 => use default #");
6803 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
6804 	    CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
6805 	    "permit #txq != #rxq");
6806 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6807 	    CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6808 	    "disable MSI-X (default 0)");
6809 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6810 	    CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget");
6811 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6812 	    CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6813 	    "cause TX to abdicate instead of running to completion");
6814 	ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6815 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6816 	    CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6817 	    "offset to start using cores at");
6818 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6819 	    CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6820 	    "use separate cores for TX and RX");
6821 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "use_logical_cores",
6822 	    CTLFLAG_RDTUN, &ctx->ifc_sysctl_use_logical_cores, 0,
6823 	    "try to make use of logical cores for TX and RX");
6824 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "use_extra_msix_vectors",
6825 	    CTLFLAG_RDTUN, &ctx->ifc_sysctl_extra_msix_vectors, 0,
6826 	    "attempt to reserve the given number of extra MSI-X vectors during driver load for the creation of additional interfaces later");
6827 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "allocated_msix_vectors",
6828 	    CTLFLAG_RDTUN, &ctx->ifc_softc_ctx.isc_vectors, 0,
6829 	    "total # of MSI-X vectors allocated by driver");
6830 
6831 	/* XXX change for per-queue sizes */
6832 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
6833 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6834 	    IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
6835 	    "list of # of TX descriptors to use, 0 = use default #");
6836 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
6837 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
6838 	    IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
6839 	    "list of # of RX descriptors to use, 0 = use default #");
6840 }
6841 
6842 static void
iflib_add_device_sysctl_post(if_ctx_t ctx)6843 iflib_add_device_sysctl_post(if_ctx_t ctx)
6844 {
6845 	if_shared_ctx_t sctx = ctx->ifc_sctx;
6846 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
6847 	device_t dev = iflib_get_dev(ctx);
6848 	struct sysctl_oid_list *child;
6849 	struct sysctl_ctx_list *ctx_list;
6850 	iflib_fl_t fl;
6851 	iflib_txq_t txq;
6852 	iflib_rxq_t rxq;
6853 	int i, j;
6854 	char namebuf[NAME_BUFLEN];
6855 	char *qfmt;
6856 	struct sysctl_oid *queue_node, *fl_node, *node;
6857 	struct sysctl_oid_list *queue_list, *fl_list;
6858 	ctx_list = device_get_sysctl_ctx(dev);
6859 
6860 	node = ctx->ifc_sysctl_node;
6861 	child = SYSCTL_CHILDREN(node);
6862 
6863 	if (scctx->isc_ntxqsets > 100)
6864 		qfmt = "txq%03d";
6865 	else if (scctx->isc_ntxqsets > 10)
6866 		qfmt = "txq%02d";
6867 	else
6868 		qfmt = "txq%d";
6869 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
6870 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6871 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6872 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
6873 		queue_list = SYSCTL_CHILDREN(queue_node);
6874 		SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
6875 		    CTLFLAG_RD, &txq->ift_task.gt_cpu, 0,
6876 		    "cpu this queue is bound to");
6877 #if MEMORY_LOGGING
6878 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
6879 		    CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed");
6880 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
6881 		    CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued");
6882 #endif
6883 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
6884 		    CTLFLAG_RD, &txq->ift_mbuf_defrag,
6885 		    "# of times m_defrag was called");
6886 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
6887 		    CTLFLAG_RD, &txq->ift_pullups,
6888 		    "# of times m_pullup was called");
6889 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6890 		    "mbuf_defrag_failed", CTLFLAG_RD,
6891 		    &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
6892 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6893 		    "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail,
6894 		    "# of times no descriptors were available");
6895 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6896 		    "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed,
6897 		    "# of times DMA map failed");
6898 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6899 		    "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig,
6900 		    "# of times txd_encap returned EFBIG");
6901 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6902 		    "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup,
6903 		    "# of times map failed for other than EFBIG");
6904 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
6905 		    CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index");
6906 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
6907 		    CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index");
6908 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
6909 		    "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed,
6910 		    1, "Consumer Index seen by credit update");
6911 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
6912 		    CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use");
6913 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO,
6914 		    "txq_processed", CTLFLAG_RD, &txq->ift_processed,
6915 		    "descriptors procesed for clean");
6916 		SYSCTL_ADD_UQUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
6917 		    CTLFLAG_RD, &txq->ift_cleaned, "total cleaned");
6918 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
6919 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
6920 		    __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
6921 		    mp_ring_state_handler, "A", "soft ring state");
6922 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6923 		    "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues,
6924 		    "# of enqueues to the mp_ring for this queue");
6925 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6926 		    "r_drops", CTLFLAG_RD, &txq->ift_br->drops,
6927 		    "# of drops in the mp_ring for this queue");
6928 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6929 		    "r_starts", CTLFLAG_RD, &txq->ift_br->starts,
6930 		    "# of normal consumer starts in mp_ring for this queue");
6931 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6932 		    "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls,
6933 		    "# of consumer stalls in the mp_ring for this queue");
6934 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6935 		    "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts,
6936 		    "# of consumer restarts in the mp_ring for this queue");
6937 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO,
6938 		    "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications,
6939 		    "# of consumer abdications in the mp_ring for this queue");
6940 	}
6941 
6942 	if (scctx->isc_nrxqsets > 100)
6943 		qfmt = "rxq%03d";
6944 	else if (scctx->isc_nrxqsets > 10)
6945 		qfmt = "rxq%02d";
6946 	else
6947 		qfmt = "rxq%d";
6948 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
6949 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
6950 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
6951 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
6952 		queue_list = SYSCTL_CHILDREN(queue_node);
6953 		SYSCTL_ADD_INT(ctx_list, queue_list, OID_AUTO, "cpu",
6954 		    CTLFLAG_RD, &rxq->ifr_task.gt_cpu, 0,
6955 		    "cpu this queue is bound to");
6956 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
6957 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO,
6958 			    "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1,
6959 			    "Consumer Index");
6960 		}
6961 
6962 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
6963 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
6964 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list,
6965 			    OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
6966 			    NULL, "freelist Name");
6967 			fl_list = SYSCTL_CHILDREN(fl_node);
6968 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
6969 			    CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index");
6970 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
6971 			    CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index");
6972 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
6973 			    CTLFLAG_RD, &fl->ifl_credits, 1,
6974 			    "credits available");
6975 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size",
6976 			    CTLFLAG_RD, &fl->ifl_buf_size, 1, "buffer size");
6977 #if MEMORY_LOGGING
6978 			SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
6979 			    "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued,
6980 			    "mbufs allocated");
6981 			SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
6982 			    "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued,
6983 			    "mbufs freed");
6984 			SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
6985 			    "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued,
6986 			    "clusters allocated");
6987 			SYSCTL_ADD_UQUAD(ctx_list, fl_list, OID_AUTO,
6988 			    "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued,
6989 			    "clusters freed");
6990 #endif
6991 		}
6992 	}
6993 
6994 }
6995 
6996 void
iflib_request_reset(if_ctx_t ctx)6997 iflib_request_reset(if_ctx_t ctx)
6998 {
6999 
7000 	STATE_LOCK(ctx);
7001 	ctx->ifc_flags |= IFC_DO_RESET;
7002 	STATE_UNLOCK(ctx);
7003 }
7004 
7005 #ifndef __NO_STRICT_ALIGNMENT
7006 static struct mbuf *
iflib_fixup_rx(struct mbuf * m)7007 iflib_fixup_rx(struct mbuf *m)
7008 {
7009 	struct mbuf *n;
7010 
7011 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
7012 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
7013 		m->m_data += ETHER_HDR_LEN;
7014 		n = m;
7015 	} else {
7016 		MGETHDR(n, M_NOWAIT, MT_DATA);
7017 		if (n == NULL) {
7018 			m_freem(m);
7019 			return (NULL);
7020 		}
7021 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
7022 		m->m_data += ETHER_HDR_LEN;
7023 		m->m_len -= ETHER_HDR_LEN;
7024 		n->m_len = ETHER_HDR_LEN;
7025 		M_MOVE_PKTHDR(n, m);
7026 		n->m_next = m;
7027 	}
7028 	return (n);
7029 }
7030 #endif
7031 
7032 #ifdef DEBUGNET
7033 static void
iflib_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)7034 iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
7035 {
7036 	if_ctx_t ctx;
7037 
7038 	ctx = if_getsoftc(ifp);
7039 	CTX_LOCK(ctx);
7040 	*nrxr = NRXQSETS(ctx);
7041 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
7042 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
7043 	CTX_UNLOCK(ctx);
7044 }
7045 
7046 static void
iflib_debugnet_event(if_t ifp,enum debugnet_ev event)7047 iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
7048 {
7049 	if_ctx_t ctx;
7050 	if_softc_ctx_t scctx;
7051 	iflib_fl_t fl;
7052 	iflib_rxq_t rxq;
7053 	int i, j;
7054 
7055 	ctx = if_getsoftc(ifp);
7056 	scctx = &ctx->ifc_softc_ctx;
7057 
7058 	switch (event) {
7059 	case DEBUGNET_START:
7060 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
7061 			rxq = &ctx->ifc_rxqs[i];
7062 			for (j = 0; j < rxq->ifr_nfl; j++) {
7063 				fl = rxq->ifr_fl;
7064 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
7065 			}
7066 		}
7067 		iflib_no_tx_batch = 1;
7068 		break;
7069 	default:
7070 		break;
7071 	}
7072 }
7073 
7074 static int
iflib_debugnet_transmit(if_t ifp,struct mbuf * m)7075 iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
7076 {
7077 	if_ctx_t ctx;
7078 	iflib_txq_t txq;
7079 	int error;
7080 
7081 	ctx = if_getsoftc(ifp);
7082 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7083 	    IFF_DRV_RUNNING)
7084 		return (EBUSY);
7085 
7086 	txq = &ctx->ifc_txqs[0];
7087 	error = iflib_encap(txq, &m);
7088 	if (error == 0)
7089 		(void)iflib_txd_db_check(txq, true);
7090 	return (error);
7091 }
7092 
7093 static int
iflib_debugnet_poll(if_t ifp,int count)7094 iflib_debugnet_poll(if_t ifp, int count)
7095 {
7096 	struct epoch_tracker et;
7097 	if_ctx_t ctx;
7098 	if_softc_ctx_t scctx;
7099 	iflib_txq_t txq;
7100 	int i;
7101 
7102 	ctx = if_getsoftc(ifp);
7103 	scctx = &ctx->ifc_softc_ctx;
7104 
7105 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7106 	    IFF_DRV_RUNNING)
7107 		return (EBUSY);
7108 
7109 	txq = &ctx->ifc_txqs[0];
7110 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
7111 
7112 	NET_EPOCH_ENTER(et);
7113 	for (i = 0; i < scctx->isc_nrxqsets; i++)
7114 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
7115 	NET_EPOCH_EXIT(et);
7116 	return (0);
7117 }
7118 #endif /* DEBUGNET */
7119 
7120 
7121 static inline iflib_txq_t
iflib_simple_select_queue(if_ctx_t ctx,struct mbuf * m)7122 iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m)
7123 {
7124 	int qidx;
7125 
7126 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
7127 		qidx = QIDX(ctx, m);
7128 	else
7129 		qidx = NTXQSETS(ctx) + FIRST_QSET(ctx) - 1;
7130 	return (&ctx->ifc_txqs[qidx]);
7131 }
7132 
7133 static int
iflib_simple_transmit(if_t ifp,struct mbuf * m)7134 iflib_simple_transmit(if_t ifp, struct mbuf *m)
7135 {
7136 	if_ctx_t ctx;
7137 	iflib_txq_t txq;
7138 	int error;
7139 	int bytes_sent = 0, pkt_sent = 0, mcast_sent = 0;
7140 
7141 
7142 	ctx = if_getsoftc(ifp);
7143 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
7144 	    IFF_DRV_RUNNING)
7145 		return (EBUSY);
7146 	txq = iflib_simple_select_queue(ctx, m);
7147 	mtx_lock(&txq->ift_mtx);
7148 	error = iflib_encap(txq, &m);
7149 	if (error == 0) {
7150 		pkt_sent++;
7151 		bytes_sent += m->m_pkthdr.len;
7152 		mcast_sent += !!(m->m_flags & M_MCAST);
7153 		(void)iflib_txd_db_check(txq, true);
7154 	} else {
7155 		if (error == ENOBUFS)
7156 			if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
7157 		else
7158 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
7159 	}
7160 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
7161 	mtx_unlock(&txq->ift_mtx);
7162 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
7163 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
7164 	if (mcast_sent)
7165 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
7166 
7167 	return (error);
7168 }
7169