xref: /freebsd/sys/net/iflib.c (revision ed6611cc8c996a47d334096533caf874a6e27fd7)
14c7070dbSScott Long /*-
27b610b60SSean Bruno  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
34c7070dbSScott Long  * All rights reserved.
44c7070dbSScott Long  *
54c7070dbSScott Long  * Redistribution and use in source and binary forms, with or without
64c7070dbSScott Long  * modification, are permitted provided that the following conditions are met:
74c7070dbSScott Long  *
84c7070dbSScott Long  *  1. Redistributions of source code must retain the above copyright notice,
94c7070dbSScott Long  *     this list of conditions and the following disclaimer.
104c7070dbSScott Long  *
114c7070dbSScott Long  *  2. Neither the name of Matthew Macy nor the names of its
124c7070dbSScott Long  *     contributors may be used to endorse or promote products derived from
134c7070dbSScott Long  *     this software without specific prior written permission.
144c7070dbSScott Long  *
154c7070dbSScott Long  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
164c7070dbSScott Long  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
174c7070dbSScott Long  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
184c7070dbSScott Long  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
194c7070dbSScott Long  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204c7070dbSScott Long  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
214c7070dbSScott Long  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
224c7070dbSScott Long  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
234c7070dbSScott Long  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
244c7070dbSScott Long  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
254c7070dbSScott Long  * POSSIBILITY OF SUCH DAMAGE.
264c7070dbSScott Long  */
274c7070dbSScott Long 
284c7070dbSScott Long #include <sys/cdefs.h>
294c7070dbSScott Long __FBSDID("$FreeBSD$");
304c7070dbSScott Long 
31aaeb188aSBjoern A. Zeeb #include "opt_inet.h"
32aaeb188aSBjoern A. Zeeb #include "opt_inet6.h"
33aaeb188aSBjoern A. Zeeb #include "opt_acpi.h"
34b103855eSStephen Hurd #include "opt_sched.h"
35aaeb188aSBjoern A. Zeeb 
364c7070dbSScott Long #include <sys/param.h>
374c7070dbSScott Long #include <sys/types.h>
384c7070dbSScott Long #include <sys/bus.h>
394c7070dbSScott Long #include <sys/eventhandler.h>
404c7070dbSScott Long #include <sys/kernel.h>
414c7070dbSScott Long #include <sys/lock.h>
424c7070dbSScott Long #include <sys/mutex.h>
434c7070dbSScott Long #include <sys/module.h>
444c7070dbSScott Long #include <sys/kobj.h>
454c7070dbSScott Long #include <sys/rman.h>
464c7070dbSScott Long #include <sys/sbuf.h>
474c7070dbSScott Long #include <sys/smp.h>
484c7070dbSScott Long #include <sys/socket.h>
4909f6ff4fSMatt Macy #include <sys/sockio.h>
504c7070dbSScott Long #include <sys/sysctl.h>
514c7070dbSScott Long #include <sys/syslog.h>
524c7070dbSScott Long #include <sys/taskqueue.h>
5323ac9029SStephen Hurd #include <sys/limits.h>
544c7070dbSScott Long 
554c7070dbSScott Long #include <net/if.h>
564c7070dbSScott Long #include <net/if_var.h>
574c7070dbSScott Long #include <net/if_types.h>
584c7070dbSScott Long #include <net/if_media.h>
594c7070dbSScott Long #include <net/bpf.h>
604c7070dbSScott Long #include <net/ethernet.h>
614c7070dbSScott Long #include <net/mp_ring.h>
627790c8c1SConrad Meyer #include <net/debugnet.h>
636d49b41eSAndrew Gallatin #include <net/pfil.h>
6435e4e998SStephen Hurd #include <net/vnet.h>
654c7070dbSScott Long 
664c7070dbSScott Long #include <netinet/in.h>
674c7070dbSScott Long #include <netinet/in_pcb.h>
684c7070dbSScott Long #include <netinet/tcp_lro.h>
694c7070dbSScott Long #include <netinet/in_systm.h>
704c7070dbSScott Long #include <netinet/if_ether.h>
714c7070dbSScott Long #include <netinet/ip.h>
724c7070dbSScott Long #include <netinet/ip6.h>
734c7070dbSScott Long #include <netinet/tcp.h>
7435e4e998SStephen Hurd #include <netinet/ip_var.h>
7535e4e998SStephen Hurd #include <netinet6/ip6_var.h>
764c7070dbSScott Long 
774c7070dbSScott Long #include <machine/bus.h>
784c7070dbSScott Long #include <machine/in_cksum.h>
794c7070dbSScott Long 
804c7070dbSScott Long #include <vm/vm.h>
814c7070dbSScott Long #include <vm/pmap.h>
824c7070dbSScott Long 
834c7070dbSScott Long #include <dev/led/led.h>
844c7070dbSScott Long #include <dev/pci/pcireg.h>
854c7070dbSScott Long #include <dev/pci/pcivar.h>
864c7070dbSScott Long #include <dev/pci/pci_private.h>
874c7070dbSScott Long 
884c7070dbSScott Long #include <net/iflib.h>
8909f6ff4fSMatt Macy #include <net/iflib_private.h>
904c7070dbSScott Long 
914c7070dbSScott Long #include "ifdi_if.h"
924c7070dbSScott Long 
9377c1fcecSEric Joyner #ifdef PCI_IOV
9477c1fcecSEric Joyner #include <dev/pci/pci_iov.h>
9577c1fcecSEric Joyner #endif
9677c1fcecSEric Joyner 
9787890dbaSSean Bruno #include <sys/bitstring.h>
984c7070dbSScott Long /*
9995246abbSSean Bruno  * enable accounting of every mbuf as it comes in to and goes out of
10095246abbSSean Bruno  * iflib's software descriptor references
1014c7070dbSScott Long  */
1024c7070dbSScott Long #define MEMORY_LOGGING 0
1034c7070dbSScott Long /*
1044c7070dbSScott Long  * Enable mbuf vectors for compressing long mbuf chains
1054c7070dbSScott Long  */
1064c7070dbSScott Long 
1074c7070dbSScott Long /*
1084c7070dbSScott Long  * NB:
1094c7070dbSScott Long  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
1104c7070dbSScott Long  *   we prefetch needs to be determined by the time spent in m_free vis a vis
1114c7070dbSScott Long  *   the cost of a prefetch. This will of course vary based on the workload:
1124c7070dbSScott Long  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
1134c7070dbSScott Long  *        is quite expensive, thus suggesting very little prefetch.
1144c7070dbSScott Long  *      - small packet forwarding which is just returning a single mbuf to
1154c7070dbSScott Long  *        UMA will typically be very fast vis a vis the cost of a memory
1164c7070dbSScott Long  *        access.
1174c7070dbSScott Long  */
1184c7070dbSScott Long 
1194c7070dbSScott Long 
1204c7070dbSScott Long /*
1214c7070dbSScott Long  * File organization:
1224c7070dbSScott Long  *  - private structures
1234c7070dbSScott Long  *  - iflib private utility functions
1244c7070dbSScott Long  *  - ifnet functions
1254c7070dbSScott Long  *  - vlan registry and other exported functions
1264c7070dbSScott Long  *  - iflib public core functions
1274c7070dbSScott Long  *
1284c7070dbSScott Long  *
1294c7070dbSScott Long  */
13009f6ff4fSMatt Macy MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
1314c7070dbSScott Long 
132fb1a29b4SHans Petter Selasky #define	IFLIB_RXEOF_MORE (1U << 0)
133fb1a29b4SHans Petter Selasky #define	IFLIB_RXEOF_EMPTY (2U << 0)
134fb1a29b4SHans Petter Selasky 
1354c7070dbSScott Long struct iflib_txq;
1364c7070dbSScott Long typedef struct iflib_txq *iflib_txq_t;
1374c7070dbSScott Long struct iflib_rxq;
1384c7070dbSScott Long typedef struct iflib_rxq *iflib_rxq_t;
1394c7070dbSScott Long struct iflib_fl;
1404c7070dbSScott Long typedef struct iflib_fl *iflib_fl_t;
1414c7070dbSScott Long 
1424ecb427aSSean Bruno struct iflib_ctx;
1434ecb427aSSean Bruno 
1442d873474SStephen Hurd static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
145dd7fbcf1SStephen Hurd static void iflib_timer(void *arg);
1462d873474SStephen Hurd 
1474c7070dbSScott Long typedef struct iflib_filter_info {
1484c7070dbSScott Long 	driver_filter_t *ifi_filter;
1494c7070dbSScott Long 	void *ifi_filter_arg;
1504c7070dbSScott Long 	struct grouptask *ifi_task;
15195246abbSSean Bruno 	void *ifi_ctx;
1524c7070dbSScott Long } *iflib_filter_info_t;
1534c7070dbSScott Long 
1544c7070dbSScott Long struct iflib_ctx {
1554c7070dbSScott Long 	KOBJ_FIELDS;
1564c7070dbSScott Long 	/*
1574c7070dbSScott Long 	 * Pointer to hardware driver's softc
1584c7070dbSScott Long 	 */
1594c7070dbSScott Long 	void *ifc_softc;
1604c7070dbSScott Long 	device_t ifc_dev;
1614c7070dbSScott Long 	if_t ifc_ifp;
1624c7070dbSScott Long 
1634c7070dbSScott Long 	cpuset_t ifc_cpus;
1644c7070dbSScott Long 	if_shared_ctx_t ifc_sctx;
1654c7070dbSScott Long 	struct if_softc_ctx ifc_softc_ctx;
1664c7070dbSScott Long 
167aa8a24d3SStephen Hurd 	struct sx ifc_ctx_sx;
1687b610b60SSean Bruno 	struct mtx ifc_state_mtx;
1694c7070dbSScott Long 
1704c7070dbSScott Long 	iflib_txq_t ifc_txqs;
1714c7070dbSScott Long 	iflib_rxq_t ifc_rxqs;
1724c7070dbSScott Long 	uint32_t ifc_if_flags;
1734c7070dbSScott Long 	uint32_t ifc_flags;
1744c7070dbSScott Long 	uint32_t ifc_max_fl_buf_size;
1751b9d9394SEric Joyner 	uint32_t ifc_rx_mbuf_sz;
1764c7070dbSScott Long 
1774c7070dbSScott Long 	int ifc_link_state;
1784c7070dbSScott Long 	int ifc_watchdog_events;
1794c7070dbSScott Long 	struct cdev *ifc_led_dev;
1804c7070dbSScott Long 	struct resource *ifc_msix_mem;
1814c7070dbSScott Long 
1824c7070dbSScott Long 	struct if_irq ifc_legacy_irq;
1834c7070dbSScott Long 	struct grouptask ifc_admin_task;
1844c7070dbSScott Long 	struct grouptask ifc_vflr_task;
1854c7070dbSScott Long 	struct iflib_filter_info ifc_filter_info;
1864c7070dbSScott Long 	struct ifmedia	ifc_media;
187e2621d96SMatt Macy 	struct ifmedia	*ifc_mediap;
1884c7070dbSScott Long 
1894c7070dbSScott Long 	struct sysctl_oid *ifc_sysctl_node;
1904c7070dbSScott Long 	uint16_t ifc_sysctl_ntxqs;
1914c7070dbSScott Long 	uint16_t ifc_sysctl_nrxqs;
19223ac9029SStephen Hurd 	uint16_t ifc_sysctl_qs_eq_override;
193f4d2154eSStephen Hurd 	uint16_t ifc_sysctl_rx_budget;
194fe51d4cdSStephen Hurd 	uint16_t ifc_sysctl_tx_abdicate;
195f154ece0SStephen Hurd 	uint16_t ifc_sysctl_core_offset;
196f154ece0SStephen Hurd #define	CORE_OFFSET_UNSPECIFIED	0xffff
197f154ece0SStephen Hurd 	uint8_t  ifc_sysctl_separate_txrx;
19823ac9029SStephen Hurd 
19995246abbSSean Bruno 	qidx_t ifc_sysctl_ntxds[8];
20095246abbSSean Bruno 	qidx_t ifc_sysctl_nrxds[8];
2014c7070dbSScott Long 	struct if_txrx ifc_txrx;
2024c7070dbSScott Long #define isc_txd_encap  ifc_txrx.ift_txd_encap
2034c7070dbSScott Long #define isc_txd_flush  ifc_txrx.ift_txd_flush
2044c7070dbSScott Long #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
2054c7070dbSScott Long #define isc_rxd_available ifc_txrx.ift_rxd_available
2064c7070dbSScott Long #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
2074c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2084c7070dbSScott Long #define isc_rxd_flush ifc_txrx.ift_rxd_flush
2094c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2104c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2114c7070dbSScott Long #define isc_legacy_intr ifc_txrx.ift_legacy_intr
2124c7070dbSScott Long 	eventhandler_tag ifc_vlan_attach_event;
2134c7070dbSScott Long 	eventhandler_tag ifc_vlan_detach_event;
2141fd8c72cSKyle Evans 	struct ether_addr ifc_mac;
2154c7070dbSScott Long };
2164c7070dbSScott Long 
2174c7070dbSScott Long void *
2184c7070dbSScott Long iflib_get_softc(if_ctx_t ctx)
2194c7070dbSScott Long {
2204c7070dbSScott Long 
2214c7070dbSScott Long 	return (ctx->ifc_softc);
2224c7070dbSScott Long }
2234c7070dbSScott Long 
2244c7070dbSScott Long device_t
2254c7070dbSScott Long iflib_get_dev(if_ctx_t ctx)
2264c7070dbSScott Long {
2274c7070dbSScott Long 
2284c7070dbSScott Long 	return (ctx->ifc_dev);
2294c7070dbSScott Long }
2304c7070dbSScott Long 
2314c7070dbSScott Long if_t
2324c7070dbSScott Long iflib_get_ifp(if_ctx_t ctx)
2334c7070dbSScott Long {
2344c7070dbSScott Long 
2354c7070dbSScott Long 	return (ctx->ifc_ifp);
2364c7070dbSScott Long }
2374c7070dbSScott Long 
2384c7070dbSScott Long struct ifmedia *
2394c7070dbSScott Long iflib_get_media(if_ctx_t ctx)
2404c7070dbSScott Long {
2414c7070dbSScott Long 
242e2621d96SMatt Macy 	return (ctx->ifc_mediap);
2434c7070dbSScott Long }
2444c7070dbSScott Long 
24509f6ff4fSMatt Macy uint32_t
24609f6ff4fSMatt Macy iflib_get_flags(if_ctx_t ctx)
24709f6ff4fSMatt Macy {
24809f6ff4fSMatt Macy 	return (ctx->ifc_flags);
24909f6ff4fSMatt Macy }
25009f6ff4fSMatt Macy 
25109f6ff4fSMatt Macy void
2524c7070dbSScott Long iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
2534c7070dbSScott Long {
2544c7070dbSScott Long 
2551fd8c72cSKyle Evans 	bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
2564c7070dbSScott Long }
2574c7070dbSScott Long 
2584c7070dbSScott Long if_softc_ctx_t
2594c7070dbSScott Long iflib_get_softc_ctx(if_ctx_t ctx)
2604c7070dbSScott Long {
2614c7070dbSScott Long 
2624c7070dbSScott Long 	return (&ctx->ifc_softc_ctx);
2634c7070dbSScott Long }
2644c7070dbSScott Long 
2654c7070dbSScott Long if_shared_ctx_t
2664c7070dbSScott Long iflib_get_sctx(if_ctx_t ctx)
2674c7070dbSScott Long {
2684c7070dbSScott Long 
2694c7070dbSScott Long 	return (ctx->ifc_sctx);
2704c7070dbSScott Long }
2714c7070dbSScott Long 
27295246abbSSean Bruno #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
2734c7070dbSScott Long #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
2745e888388SSean Bruno #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
2754c7070dbSScott Long 
2764c7070dbSScott Long #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
2774c7070dbSScott Long #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
2784c7070dbSScott Long 
279e035717eSSean Bruno typedef struct iflib_sw_rx_desc_array {
280e035717eSSean Bruno 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
281e035717eSSean Bruno 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
282e035717eSSean Bruno 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
283fbec776dSAndrew Gallatin 	bus_addr_t	*ifsd_ba;          /* bus addr of cluster for rx */
284e035717eSSean Bruno } iflib_rxsd_array_t;
2854c7070dbSScott Long 
2864c7070dbSScott Long typedef struct iflib_sw_tx_desc_array {
2874c7070dbSScott Long 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
2888a04b53dSKonstantin Belousov 	bus_dmamap_t	*ifsd_tso_map;     /* bus_dma maps for TSO packet */
2894c7070dbSScott Long 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
29095246abbSSean Bruno } if_txsd_vec_t;
2914c7070dbSScott Long 
2924c7070dbSScott Long /* magic number that should be high enough for any hardware */
2934c7070dbSScott Long #define IFLIB_MAX_TX_SEGS		128
29495246abbSSean Bruno #define IFLIB_RX_COPY_THRESH		128
2954c7070dbSScott Long #define IFLIB_MAX_RX_REFRESH		32
29695246abbSSean Bruno /* The minimum descriptors per second before we start coalescing */
29795246abbSSean Bruno #define IFLIB_MIN_DESC_SEC		16384
29895246abbSSean Bruno #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
2994c7070dbSScott Long #define IFLIB_QUEUE_IDLE		0
3004c7070dbSScott Long #define IFLIB_QUEUE_HUNG		1
3014c7070dbSScott Long #define IFLIB_QUEUE_WORKING		2
30295246abbSSean Bruno /* maximum number of txqs that can share an rx interrupt */
30395246abbSSean Bruno #define IFLIB_MAX_TX_SHARED_INTR	4
3044c7070dbSScott Long 
30595246abbSSean Bruno /* this should really scale with ring size - this is a fairly arbitrary value */
30695246abbSSean Bruno #define TX_BATCH_SIZE			32
3074c7070dbSScott Long 
3084c7070dbSScott Long #define IFLIB_RESTART_BUDGET		8
3094c7070dbSScott Long 
3104c7070dbSScott Long #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
3114c7070dbSScott Long 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
3124c7070dbSScott Long 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
3131722eeacSMarius Strobl 
3144c7070dbSScott Long struct iflib_txq {
31595246abbSSean Bruno 	qidx_t		ift_in_use;
31695246abbSSean Bruno 	qidx_t		ift_cidx;
31795246abbSSean Bruno 	qidx_t		ift_cidx_processed;
31895246abbSSean Bruno 	qidx_t		ift_pidx;
3194c7070dbSScott Long 	uint8_t		ift_gen;
32023ac9029SStephen Hurd 	uint8_t		ift_br_offset;
32195246abbSSean Bruno 	uint16_t	ift_npending;
32295246abbSSean Bruno 	uint16_t	ift_db_pending;
32395246abbSSean Bruno 	uint16_t	ift_rs_pending;
3244c7070dbSScott Long 	/* implicit pad */
32595246abbSSean Bruno 	uint8_t		ift_txd_size[8];
3264c7070dbSScott Long 	uint64_t	ift_processed;
3274c7070dbSScott Long 	uint64_t	ift_cleaned;
32895246abbSSean Bruno 	uint64_t	ift_cleaned_prev;
3294c7070dbSScott Long #if MEMORY_LOGGING
3304c7070dbSScott Long 	uint64_t	ift_enqueued;
3314c7070dbSScott Long 	uint64_t	ift_dequeued;
3324c7070dbSScott Long #endif
3334c7070dbSScott Long 	uint64_t	ift_no_tx_dma_setup;
3344c7070dbSScott Long 	uint64_t	ift_no_desc_avail;
3354c7070dbSScott Long 	uint64_t	ift_mbuf_defrag_failed;
3364c7070dbSScott Long 	uint64_t	ift_mbuf_defrag;
3374c7070dbSScott Long 	uint64_t	ift_map_failed;
3384c7070dbSScott Long 	uint64_t	ift_txd_encap_efbig;
3394c7070dbSScott Long 	uint64_t	ift_pullups;
340dd7fbcf1SStephen Hurd 	uint64_t	ift_last_timer_tick;
3414c7070dbSScott Long 
3424c7070dbSScott Long 	struct mtx	ift_mtx;
3434c7070dbSScott Long 	struct mtx	ift_db_mtx;
3444c7070dbSScott Long 
3454c7070dbSScott Long 	/* constant values */
3464c7070dbSScott Long 	if_ctx_t	ift_ctx;
34795246abbSSean Bruno 	struct ifmp_ring        *ift_br;
3484c7070dbSScott Long 	struct grouptask	ift_task;
34995246abbSSean Bruno 	qidx_t		ift_size;
3504c7070dbSScott Long 	uint16_t	ift_id;
3514c7070dbSScott Long 	struct callout	ift_timer;
3524c7070dbSScott Long 
35395246abbSSean Bruno 	if_txsd_vec_t	ift_sds;
3544c7070dbSScott Long 	uint8_t		ift_qstatus;
3554c7070dbSScott Long 	uint8_t		ift_closed;
35695246abbSSean Bruno 	uint8_t		ift_update_freq;
3574c7070dbSScott Long 	struct iflib_filter_info ift_filter_info;
358bfce461eSMarius Strobl 	bus_dma_tag_t	ift_buf_tag;
359bfce461eSMarius Strobl 	bus_dma_tag_t	ift_tso_buf_tag;
3604c7070dbSScott Long 	iflib_dma_info_t	ift_ifdi;
3614c7070dbSScott Long #define MTX_NAME_LEN 16
3624c7070dbSScott Long 	char                    ift_mtx_name[MTX_NAME_LEN];
3634c7070dbSScott Long 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
3641248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
3651248952aSSean Bruno 	uint64_t ift_cpu_exec_count[256];
3661248952aSSean Bruno #endif
3674c7070dbSScott Long } __aligned(CACHE_LINE_SIZE);
3684c7070dbSScott Long 
3694c7070dbSScott Long struct iflib_fl {
37095246abbSSean Bruno 	qidx_t		ifl_cidx;
37195246abbSSean Bruno 	qidx_t		ifl_pidx;
37295246abbSSean Bruno 	qidx_t		ifl_credits;
3734c7070dbSScott Long 	uint8_t		ifl_gen;
37495246abbSSean Bruno 	uint8_t		ifl_rxd_size;
3754c7070dbSScott Long #if MEMORY_LOGGING
3764c7070dbSScott Long 	uint64_t	ifl_m_enqueued;
3774c7070dbSScott Long 	uint64_t	ifl_m_dequeued;
3784c7070dbSScott Long 	uint64_t	ifl_cl_enqueued;
3794c7070dbSScott Long 	uint64_t	ifl_cl_dequeued;
3804c7070dbSScott Long #endif
3814c7070dbSScott Long 	/* implicit pad */
38287890dbaSSean Bruno 	bitstr_t 	*ifl_rx_bitmap;
38387890dbaSSean Bruno 	qidx_t		ifl_fragidx;
3844c7070dbSScott Long 	/* constant */
38595246abbSSean Bruno 	qidx_t		ifl_size;
3864c7070dbSScott Long 	uint16_t	ifl_buf_size;
3874c7070dbSScott Long 	uint16_t	ifl_cltype;
3884c7070dbSScott Long 	uma_zone_t	ifl_zone;
389e035717eSSean Bruno 	iflib_rxsd_array_t	ifl_sds;
3904c7070dbSScott Long 	iflib_rxq_t	ifl_rxq;
3914c7070dbSScott Long 	uint8_t		ifl_id;
392bfce461eSMarius Strobl 	bus_dma_tag_t	ifl_buf_tag;
3934c7070dbSScott Long 	iflib_dma_info_t	ifl_ifdi;
3944c7070dbSScott Long 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
3954c7070dbSScott Long 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
39695246abbSSean Bruno 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
3974c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
3984c7070dbSScott Long 
39995246abbSSean Bruno static inline qidx_t
40095246abbSSean Bruno get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
4014c7070dbSScott Long {
40295246abbSSean Bruno 	qidx_t used;
4034c7070dbSScott Long 
4044c7070dbSScott Long 	if (pidx > cidx)
4054c7070dbSScott Long 		used = pidx - cidx;
4064c7070dbSScott Long 	else if (pidx < cidx)
4074c7070dbSScott Long 		used = size - cidx + pidx;
4084c7070dbSScott Long 	else if (gen == 0 && pidx == cidx)
4094c7070dbSScott Long 		used = 0;
4104c7070dbSScott Long 	else if (gen == 1 && pidx == cidx)
4114c7070dbSScott Long 		used = size;
4124c7070dbSScott Long 	else
4134c7070dbSScott Long 		panic("bad state");
4144c7070dbSScott Long 
4154c7070dbSScott Long 	return (used);
4164c7070dbSScott Long }
4174c7070dbSScott Long 
4184c7070dbSScott Long #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
4194c7070dbSScott Long 
4204c7070dbSScott Long #define IDXDIFF(head, tail, wrap) \
4214c7070dbSScott Long 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
4224c7070dbSScott Long 
4234c7070dbSScott Long struct iflib_rxq {
4244c7070dbSScott Long 	if_ctx_t	ifr_ctx;
4254c7070dbSScott Long 	iflib_fl_t	ifr_fl;
4264c7070dbSScott Long 	uint64_t	ifr_rx_irq;
4276d49b41eSAndrew Gallatin 	struct pfil_head	*pfil;
4281722eeacSMarius Strobl 	/*
4291722eeacSMarius Strobl 	 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
4301722eeacSMarius Strobl 	 * the command queue consumer index.  Otherwise it's unused.
4311722eeacSMarius Strobl 	 */
4321722eeacSMarius Strobl 	qidx_t		ifr_cq_cidx;
4334c7070dbSScott Long 	uint16_t	ifr_id;
4344c7070dbSScott Long 	uint8_t		ifr_nfl;
43595246abbSSean Bruno 	uint8_t		ifr_ntxqirq;
43695246abbSSean Bruno 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
4371722eeacSMarius Strobl 	uint8_t		ifr_fl_offset;
4384c7070dbSScott Long 	struct lro_ctrl			ifr_lc;
4394c7070dbSScott Long 	struct grouptask        ifr_task;
440fb1a29b4SHans Petter Selasky 	struct callout		ifr_watchdog;
4414c7070dbSScott Long 	struct iflib_filter_info ifr_filter_info;
4424c7070dbSScott Long 	iflib_dma_info_t		ifr_ifdi;
443ab2e3f79SStephen Hurd 
4444c7070dbSScott Long 	/* dynamically allocate if any drivers need a value substantially larger than this */
4454c7070dbSScott Long 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
4461248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
4471248952aSSean Bruno 	uint64_t ifr_cpu_exec_count[256];
4481248952aSSean Bruno #endif
4494c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4504c7070dbSScott Long 
45195246abbSSean Bruno typedef struct if_rxsd {
45295246abbSSean Bruno 	caddr_t *ifsd_cl;
45395246abbSSean Bruno 	iflib_fl_t ifsd_fl;
45495246abbSSean Bruno } *if_rxsd_t;
45595246abbSSean Bruno 
45695246abbSSean Bruno /* multiple of word size */
45795246abbSSean Bruno #ifdef __LP64__
458ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	6
45995246abbSSean Bruno #define RXD_INFO_SIZE	5
46095246abbSSean Bruno #define PKT_TYPE uint64_t
46195246abbSSean Bruno #else
462ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	11
46395246abbSSean Bruno #define RXD_INFO_SIZE	8
46495246abbSSean Bruno #define PKT_TYPE uint32_t
46595246abbSSean Bruno #endif
46695246abbSSean Bruno #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
46795246abbSSean Bruno #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
46895246abbSSean Bruno 
46995246abbSSean Bruno typedef struct if_pkt_info_pad {
47095246abbSSean Bruno 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
47195246abbSSean Bruno } *if_pkt_info_pad_t;
47295246abbSSean Bruno typedef struct if_rxd_info_pad {
47395246abbSSean Bruno 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
47495246abbSSean Bruno } *if_rxd_info_pad_t;
47595246abbSSean Bruno 
47695246abbSSean Bruno CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
47795246abbSSean Bruno CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
47895246abbSSean Bruno 
47995246abbSSean Bruno 
48095246abbSSean Bruno static inline void
48195246abbSSean Bruno pkt_info_zero(if_pkt_info_t pi)
48295246abbSSean Bruno {
48395246abbSSean Bruno 	if_pkt_info_pad_t pi_pad;
48495246abbSSean Bruno 
48595246abbSSean Bruno 	pi_pad = (if_pkt_info_pad_t)pi;
48695246abbSSean Bruno 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
48795246abbSSean Bruno 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
48895246abbSSean Bruno #ifndef __LP64__
489ab2e3f79SStephen Hurd 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
490ab2e3f79SStephen Hurd 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
49195246abbSSean Bruno #endif
49295246abbSSean Bruno }
49395246abbSSean Bruno 
49409f6ff4fSMatt Macy static device_method_t iflib_pseudo_methods[] = {
49509f6ff4fSMatt Macy 	DEVMETHOD(device_attach, noop_attach),
49609f6ff4fSMatt Macy 	DEVMETHOD(device_detach, iflib_pseudo_detach),
49709f6ff4fSMatt Macy 	DEVMETHOD_END
49809f6ff4fSMatt Macy };
49909f6ff4fSMatt Macy 
50009f6ff4fSMatt Macy driver_t iflib_pseudodriver = {
50109f6ff4fSMatt Macy 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
50209f6ff4fSMatt Macy };
50309f6ff4fSMatt Macy 
50495246abbSSean Bruno static inline void
50595246abbSSean Bruno rxd_info_zero(if_rxd_info_t ri)
50695246abbSSean Bruno {
50795246abbSSean Bruno 	if_rxd_info_pad_t ri_pad;
50895246abbSSean Bruno 	int i;
50995246abbSSean Bruno 
51095246abbSSean Bruno 	ri_pad = (if_rxd_info_pad_t)ri;
51195246abbSSean Bruno 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
51295246abbSSean Bruno 		ri_pad->rxd_val[i] = 0;
51395246abbSSean Bruno 		ri_pad->rxd_val[i+1] = 0;
51495246abbSSean Bruno 		ri_pad->rxd_val[i+2] = 0;
51595246abbSSean Bruno 		ri_pad->rxd_val[i+3] = 0;
51695246abbSSean Bruno 	}
51795246abbSSean Bruno #ifdef __LP64__
51895246abbSSean Bruno 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
51995246abbSSean Bruno #endif
52095246abbSSean Bruno }
52195246abbSSean Bruno 
5224c7070dbSScott Long /*
5234c7070dbSScott Long  * Only allow a single packet to take up most 1/nth of the tx ring
5244c7070dbSScott Long  */
5254c7070dbSScott Long #define MAX_SINGLE_PACKET_FRACTION 12
5264c7070dbSScott Long #define IF_BAD_DMA (bus_addr_t)-1
5274c7070dbSScott Long 
5284c7070dbSScott Long #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
5294c7070dbSScott Long 
530aa8a24d3SStephen Hurd #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
531aa8a24d3SStephen Hurd #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
532aa8a24d3SStephen Hurd #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
533aa8a24d3SStephen Hurd #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
5344c7070dbSScott Long 
5357b610b60SSean Bruno #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
5367b610b60SSean Bruno #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
5377b610b60SSean Bruno #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
5387b610b60SSean Bruno #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
5397b610b60SSean Bruno 
5404c7070dbSScott Long #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
5414c7070dbSScott Long #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
5424c7070dbSScott Long 
54377c1fcecSEric Joyner void
54477c1fcecSEric Joyner iflib_set_detach(if_ctx_t ctx)
54577c1fcecSEric Joyner {
54677c1fcecSEric Joyner 	STATE_LOCK(ctx);
54777c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
54877c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
54977c1fcecSEric Joyner }
5504c7070dbSScott Long 
5514c7070dbSScott Long /* Our boot-time initialization hook */
5524c7070dbSScott Long static int	iflib_module_event_handler(module_t, int, void *);
5534c7070dbSScott Long 
5544c7070dbSScott Long static moduledata_t iflib_moduledata = {
5554c7070dbSScott Long 	"iflib",
5564c7070dbSScott Long 	iflib_module_event_handler,
5574c7070dbSScott Long 	NULL
5584c7070dbSScott Long };
5594c7070dbSScott Long 
5604c7070dbSScott Long DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
5614c7070dbSScott Long MODULE_VERSION(iflib, 1);
5624c7070dbSScott Long 
5634c7070dbSScott Long MODULE_DEPEND(iflib, pci, 1, 1, 1);
5644c7070dbSScott Long MODULE_DEPEND(iflib, ether, 1, 1, 1);
5654c7070dbSScott Long 
566ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
567ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
568ab2e3f79SStephen Hurd 
5694c7070dbSScott Long #ifndef IFLIB_DEBUG_COUNTERS
5704c7070dbSScott Long #ifdef INVARIANTS
5714c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 1
5724c7070dbSScott Long #else
5734c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 0
5744c7070dbSScott Long #endif /* !INVARIANTS */
5754c7070dbSScott Long #endif
5764c7070dbSScott Long 
5777029da5cSPawel Biernacki static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
578ab2e3f79SStephen Hurd     "iflib driver parameters");
579ab2e3f79SStephen Hurd 
5804c7070dbSScott Long /*
5814c7070dbSScott Long  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
5824c7070dbSScott Long  */
5834c7070dbSScott Long static int iflib_min_tx_latency = 0;
5844c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
585da69b8f9SSean Bruno 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
58695246abbSSean Bruno static int iflib_no_tx_batch = 0;
58795246abbSSean Bruno SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
58895246abbSSean Bruno 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
5894c7070dbSScott Long 
5904c7070dbSScott Long 
5914c7070dbSScott Long #if IFLIB_DEBUG_COUNTERS
5924c7070dbSScott Long 
5934c7070dbSScott Long static int iflib_tx_seen;
5944c7070dbSScott Long static int iflib_tx_sent;
5954c7070dbSScott Long static int iflib_tx_encap;
5964c7070dbSScott Long static int iflib_rx_allocs;
5974c7070dbSScott Long static int iflib_fl_refills;
5984c7070dbSScott Long static int iflib_fl_refills_large;
5994c7070dbSScott Long static int iflib_tx_frees;
6004c7070dbSScott Long 
6014c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
6021722eeacSMarius Strobl 		   &iflib_tx_seen, 0, "# TX mbufs seen");
6034c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
6041722eeacSMarius Strobl 		   &iflib_tx_sent, 0, "# TX mbufs sent");
6054c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
6061722eeacSMarius Strobl 		   &iflib_tx_encap, 0, "# TX mbufs encapped");
6074c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
6081722eeacSMarius Strobl 		   &iflib_tx_frees, 0, "# TX frees");
6094c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
6101722eeacSMarius Strobl 		   &iflib_rx_allocs, 0, "# RX allocations");
6114c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
6124c7070dbSScott Long 		   &iflib_fl_refills, 0, "# refills");
6134c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
6144c7070dbSScott Long 		   &iflib_fl_refills_large, 0, "# large refills");
6154c7070dbSScott Long 
6164c7070dbSScott Long 
6174c7070dbSScott Long static int iflib_txq_drain_flushing;
6184c7070dbSScott Long static int iflib_txq_drain_oactive;
6194c7070dbSScott Long static int iflib_txq_drain_notready;
6204c7070dbSScott Long 
6214c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
6224c7070dbSScott Long 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
6234c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
6244c7070dbSScott Long 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
6254c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
6264c7070dbSScott Long 		   &iflib_txq_drain_notready, 0, "# drain notready");
6274c7070dbSScott Long 
6284c7070dbSScott Long 
6294c7070dbSScott Long static int iflib_encap_load_mbuf_fail;
630d14c853bSStephen Hurd static int iflib_encap_pad_mbuf_fail;
6314c7070dbSScott Long static int iflib_encap_txq_avail_fail;
6324c7070dbSScott Long static int iflib_encap_txd_encap_fail;
6334c7070dbSScott Long 
6344c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
6354c7070dbSScott Long 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
636d14c853bSStephen Hurd SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
637d14c853bSStephen Hurd 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
6384c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
6394c7070dbSScott Long 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
6404c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
6414c7070dbSScott Long 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
6424c7070dbSScott Long 
6434c7070dbSScott Long static int iflib_task_fn_rxs;
6444c7070dbSScott Long static int iflib_rx_intr_enables;
6454c7070dbSScott Long static int iflib_fast_intrs;
6464c7070dbSScott Long static int iflib_rx_unavail;
6474c7070dbSScott Long static int iflib_rx_ctx_inactive;
6484c7070dbSScott Long static int iflib_rx_if_input;
6494c7070dbSScott Long static int iflib_rxd_flush;
6504c7070dbSScott Long 
6514c7070dbSScott Long static int iflib_verbose_debug;
6524c7070dbSScott Long 
6534c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
6544c7070dbSScott Long 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
6554c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
6561722eeacSMarius Strobl 		   &iflib_rx_intr_enables, 0, "# RX intr enables");
6574c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
6584c7070dbSScott Long 		   &iflib_fast_intrs, 0, "# fast_intr calls");
6594c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
6604c7070dbSScott Long 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
6614c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
6624c7070dbSScott Long 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
6634c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
6644c7070dbSScott Long 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
6654c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
6664c7070dbSScott Long 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
6674c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
6684c7070dbSScott Long 		   &iflib_verbose_debug, 0, "enable verbose debugging");
6694c7070dbSScott Long 
6704c7070dbSScott Long #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
671da69b8f9SSean Bruno static void
672da69b8f9SSean Bruno iflib_debug_reset(void)
673da69b8f9SSean Bruno {
674da69b8f9SSean Bruno 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
675da69b8f9SSean Bruno 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
676da69b8f9SSean Bruno 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
67764e6fc13SStephen Hurd 		iflib_txq_drain_notready =
678d14c853bSStephen Hurd 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
679d14c853bSStephen Hurd 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
680d14c853bSStephen Hurd 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
68164e6fc13SStephen Hurd 		iflib_rx_unavail =
68264e6fc13SStephen Hurd 		iflib_rx_ctx_inactive = iflib_rx_if_input =
6836d49b41eSAndrew Gallatin 		iflib_rxd_flush = 0;
684da69b8f9SSean Bruno }
6854c7070dbSScott Long 
6864c7070dbSScott Long #else
6874c7070dbSScott Long #define DBG_COUNTER_INC(name)
688da69b8f9SSean Bruno static void iflib_debug_reset(void) {}
6894c7070dbSScott Long #endif
6904c7070dbSScott Long 
6914c7070dbSScott Long #define IFLIB_DEBUG 0
6924c7070dbSScott Long 
6934c7070dbSScott Long static void iflib_tx_structures_free(if_ctx_t ctx);
6944c7070dbSScott Long static void iflib_rx_structures_free(if_ctx_t ctx);
6954c7070dbSScott Long static int iflib_queues_alloc(if_ctx_t ctx);
6964c7070dbSScott Long static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
69795246abbSSean Bruno static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
6984c7070dbSScott Long static int iflib_qset_structures_setup(if_ctx_t ctx);
6994c7070dbSScott Long static int iflib_msix_init(if_ctx_t ctx);
7003e0e6330SStephen Hurd static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
7014c7070dbSScott Long static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
7024c7070dbSScott Long static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
703b8ca4756SPatrick Kelsey #ifdef ALTQ
704b8ca4756SPatrick Kelsey static void iflib_altq_if_start(if_t ifp);
705b8ca4756SPatrick Kelsey static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
706b8ca4756SPatrick Kelsey #endif
7074c7070dbSScott Long static int iflib_register(if_ctx_t);
70856614414SEric Joyner static void iflib_deregister(if_ctx_t);
7091558015eSEric Joyner static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
710b3813609SPatrick Kelsey static uint16_t iflib_get_mbuf_size_for(unsigned int size);
7114c7070dbSScott Long static void iflib_init_locked(if_ctx_t ctx);
7124c7070dbSScott Long static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
7134c7070dbSScott Long static void iflib_add_device_sysctl_post(if_ctx_t ctx);
714da69b8f9SSean Bruno static void iflib_ifmp_purge(iflib_txq_t txq);
7151248952aSSean Bruno static void _iflib_pre_assert(if_softc_ctx_t scctx);
71695246abbSSean Bruno static void iflib_if_init_locked(if_ctx_t ctx);
71777c1fcecSEric Joyner static void iflib_free_intr_mem(if_ctx_t ctx);
71895246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
71995246abbSSean Bruno static struct mbuf * iflib_fixup_rx(struct mbuf *m);
72095246abbSSean Bruno #endif
7214c7070dbSScott Long 
722f154ece0SStephen Hurd static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
723f154ece0SStephen Hurd     SLIST_HEAD_INITIALIZER(cpu_offsets);
724f154ece0SStephen Hurd struct cpu_offset {
725f154ece0SStephen Hurd 	SLIST_ENTRY(cpu_offset) entries;
726f154ece0SStephen Hurd 	cpuset_t	set;
727f154ece0SStephen Hurd 	unsigned int	refcount;
728f154ece0SStephen Hurd 	uint16_t	offset;
729f154ece0SStephen Hurd };
730f154ece0SStephen Hurd static struct mtx cpu_offset_mtx;
731f154ece0SStephen Hurd MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
732f154ece0SStephen Hurd     MTX_DEF);
733f154ece0SStephen Hurd 
7347790c8c1SConrad Meyer DEBUGNET_DEFINE(iflib);
73594618825SMark Johnston 
7364c7070dbSScott Long #ifdef DEV_NETMAP
7374c7070dbSScott Long #include <sys/selinfo.h>
7384c7070dbSScott Long #include <net/netmap.h>
7394c7070dbSScott Long #include <dev/netmap/netmap_kern.h>
7404c7070dbSScott Long 
7414c7070dbSScott Long MODULE_DEPEND(iflib, netmap, 1, 1, 1);
7424c7070dbSScott Long 
7432d873474SStephen Hurd static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
7442d873474SStephen Hurd 
7454c7070dbSScott Long /*
7464c7070dbSScott Long  * device-specific sysctl variables:
7474c7070dbSScott Long  *
74891d546a0SConrad Meyer  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
7494c7070dbSScott Long  *	During regular operations the CRC is stripped, but on some
7504c7070dbSScott Long  *	hardware reception of frames not multiple of 64 is slower,
7514c7070dbSScott Long  *	so using crcstrip=0 helps in benchmarks.
7524c7070dbSScott Long  *
75391d546a0SConrad Meyer  * iflib_rx_miss, iflib_rx_miss_bufs:
7544c7070dbSScott Long  *	count packets that might be missed due to lost interrupts.
7554c7070dbSScott Long  */
7564c7070dbSScott Long SYSCTL_DECL(_dev_netmap);
7574c7070dbSScott Long /*
7584c7070dbSScott Long  * The xl driver by default strips CRCs and we do not override it.
7594c7070dbSScott Long  */
7604c7070dbSScott Long 
7614c7070dbSScott Long int iflib_crcstrip = 1;
7624c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
7631722eeacSMarius Strobl     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
7644c7070dbSScott Long 
7654c7070dbSScott Long int iflib_rx_miss, iflib_rx_miss_bufs;
7664c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
7671722eeacSMarius Strobl     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
76891d546a0SConrad Meyer SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
7691722eeacSMarius Strobl     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
7704c7070dbSScott Long 
7714c7070dbSScott Long /*
7724c7070dbSScott Long  * Register/unregister. We are already under netmap lock.
7734c7070dbSScott Long  * Only called on the first register or the last unregister.
7744c7070dbSScott Long  */
7754c7070dbSScott Long static int
7764c7070dbSScott Long iflib_netmap_register(struct netmap_adapter *na, int onoff)
7774c7070dbSScott Long {
7781722eeacSMarius Strobl 	if_t ifp = na->ifp;
7794c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
78095246abbSSean Bruno 	int status;
7814c7070dbSScott Long 
7824c7070dbSScott Long 	CTX_LOCK(ctx);
7834c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
7844c7070dbSScott Long 
7854c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
7864c7070dbSScott Long 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7874c7070dbSScott Long 
7884c7070dbSScott Long 	if (!CTX_IS_VF(ctx))
7891248952aSSean Bruno 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
7904c7070dbSScott Long 
7914c7070dbSScott Long 	/* enable or disable flags and callbacks in na and ifp */
7924c7070dbSScott Long 	if (onoff) {
7934c7070dbSScott Long 		nm_set_native_flags(na);
7944c7070dbSScott Long 	} else {
7954c7070dbSScott Long 		nm_clear_native_flags(na);
7964c7070dbSScott Long 	}
79795246abbSSean Bruno 	iflib_stop(ctx);
79895246abbSSean Bruno 	iflib_init_locked(ctx);
7991248952aSSean Bruno 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
80095246abbSSean Bruno 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
80195246abbSSean Bruno 	if (status)
80295246abbSSean Bruno 		nm_clear_native_flags(na);
8034c7070dbSScott Long 	CTX_UNLOCK(ctx);
80495246abbSSean Bruno 	return (status);
8054c7070dbSScott Long }
8064c7070dbSScott Long 
8072d873474SStephen Hurd static int
8082d873474SStephen Hurd netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
8092d873474SStephen Hurd {
8102d873474SStephen Hurd 	struct netmap_adapter *na = kring->na;
8112d873474SStephen Hurd 	u_int const lim = kring->nkr_num_slots - 1;
8122d873474SStephen Hurd 	u_int head = kring->rhead;
8132d873474SStephen Hurd 	struct netmap_ring *ring = kring->ring;
8142d873474SStephen Hurd 	bus_dmamap_t *map;
8152d873474SStephen Hurd 	struct if_rxd_update iru;
8162d873474SStephen Hurd 	if_ctx_t ctx = rxq->ifr_ctx;
8172d873474SStephen Hurd 	iflib_fl_t fl = &rxq->ifr_fl[0];
8182d873474SStephen Hurd 	uint32_t refill_pidx, nic_i;
81964e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
82064e6fc13SStephen Hurd 	int rf_count = 0;
82164e6fc13SStephen Hurd #endif
8222d873474SStephen Hurd 
8232d873474SStephen Hurd 	if (nm_i == head && __predict_true(!init))
8242d873474SStephen Hurd 		return 0;
8252d873474SStephen Hurd 	iru_init(&iru, rxq, 0 /* flid */);
8262d873474SStephen Hurd 	map = fl->ifl_sds.ifsd_map;
8272d873474SStephen Hurd 	refill_pidx = netmap_idx_k2n(kring, nm_i);
8282d873474SStephen Hurd 	/*
8292d873474SStephen Hurd 	 * IMPORTANT: we must leave one free slot in the ring,
8302d873474SStephen Hurd 	 * so move head back by one unit
8312d873474SStephen Hurd 	 */
8322d873474SStephen Hurd 	head = nm_prev(head, lim);
8331ae4848cSMatt Macy 	nic_i = UINT_MAX;
83464e6fc13SStephen Hurd 	DBG_COUNTER_INC(fl_refills);
8352d873474SStephen Hurd 	while (nm_i != head) {
83664e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
83764e6fc13SStephen Hurd 		if (++rf_count == 9)
83864e6fc13SStephen Hurd 			DBG_COUNTER_INC(fl_refills_large);
83964e6fc13SStephen Hurd #endif
8402d873474SStephen Hurd 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
8412d873474SStephen Hurd 			struct netmap_slot *slot = &ring->slot[nm_i];
8422d873474SStephen Hurd 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
8432d873474SStephen Hurd 			uint32_t nic_i_dma = refill_pidx;
8442d873474SStephen Hurd 			nic_i = netmap_idx_k2n(kring, nm_i);
8452d873474SStephen Hurd 
8462d873474SStephen Hurd 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
8472d873474SStephen Hurd 
8482d873474SStephen Hurd 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
8492d873474SStephen Hurd 			        return netmap_ring_reinit(kring);
8502d873474SStephen Hurd 
8512d873474SStephen Hurd 			fl->ifl_vm_addrs[tmp_pidx] = addr;
85295dcf343SMarius Strobl 			if (__predict_false(init)) {
85395dcf343SMarius Strobl 				netmap_load_map(na, fl->ifl_buf_tag,
85495dcf343SMarius Strobl 				    map[nic_i], addr);
85595dcf343SMarius Strobl 			} else if (slot->flags & NS_BUF_CHANGED) {
8562d873474SStephen Hurd 				/* buffer has changed, reload map */
85795dcf343SMarius Strobl 				netmap_reload_map(na, fl->ifl_buf_tag,
85895dcf343SMarius Strobl 				    map[nic_i], addr);
8592d873474SStephen Hurd 			}
8602d873474SStephen Hurd 			slot->flags &= ~NS_BUF_CHANGED;
8612d873474SStephen Hurd 
8622d873474SStephen Hurd 			nm_i = nm_next(nm_i, lim);
8632d873474SStephen Hurd 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
8642d873474SStephen Hurd 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
8652d873474SStephen Hurd 				continue;
8662d873474SStephen Hurd 
8672d873474SStephen Hurd 			iru.iru_pidx = refill_pidx;
8682d873474SStephen Hurd 			iru.iru_count = tmp_pidx+1;
8692d873474SStephen Hurd 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
8702d873474SStephen Hurd 			refill_pidx = nic_i;
8712d873474SStephen Hurd 			for (int n = 0; n < iru.iru_count; n++) {
87295dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma],
8732d873474SStephen Hurd 						BUS_DMASYNC_PREREAD);
8742d873474SStephen Hurd 				/* XXX - change this to not use the netmap func*/
8752d873474SStephen Hurd 				nic_i_dma = nm_next(nic_i_dma, lim);
8762d873474SStephen Hurd 			}
8772d873474SStephen Hurd 		}
8782d873474SStephen Hurd 	}
8792d873474SStephen Hurd 	kring->nr_hwcur = head;
8802d873474SStephen Hurd 
8812d873474SStephen Hurd 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
8822d873474SStephen Hurd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
88364e6fc13SStephen Hurd 	if (__predict_true(nic_i != UINT_MAX)) {
8842d873474SStephen Hurd 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
88564e6fc13SStephen Hurd 		DBG_COUNTER_INC(rxd_flush);
88664e6fc13SStephen Hurd 	}
8872d873474SStephen Hurd 	return (0);
8882d873474SStephen Hurd }
8892d873474SStephen Hurd 
8904c7070dbSScott Long /*
8914c7070dbSScott Long  * Reconcile kernel and user view of the transmit ring.
8924c7070dbSScott Long  *
8934c7070dbSScott Long  * All information is in the kring.
8944c7070dbSScott Long  * Userspace wants to send packets up to the one before kring->rhead,
8954c7070dbSScott Long  * kernel knows kring->nr_hwcur is the first unsent packet.
8964c7070dbSScott Long  *
8974c7070dbSScott Long  * Here we push packets out (as many as possible), and possibly
8984c7070dbSScott Long  * reclaim buffers from previously completed transmission.
8994c7070dbSScott Long  *
9004c7070dbSScott Long  * The caller (netmap) guarantees that there is only one instance
9014c7070dbSScott Long  * running at any time. Any interference with other driver
9024c7070dbSScott Long  * methods should be handled by the individual drivers.
9034c7070dbSScott Long  */
9044c7070dbSScott Long static int
9054c7070dbSScott Long iflib_netmap_txsync(struct netmap_kring *kring, int flags)
9064c7070dbSScott Long {
9074c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
9081722eeacSMarius Strobl 	if_t ifp = na->ifp;
9094c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
910dd7fbcf1SStephen Hurd 	u_int nm_i;	/* index into the netmap kring */
9114c7070dbSScott Long 	u_int nic_i;	/* index into the NIC ring */
9124c7070dbSScott Long 	u_int n;
9134c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
9144c7070dbSScott Long 	u_int const head = kring->rhead;
9154c7070dbSScott Long 	struct if_pkt_info pi;
9164c7070dbSScott Long 
9174c7070dbSScott Long 	/*
9184c7070dbSScott Long 	 * interrupts on every tx packet are expensive so request
9194c7070dbSScott Long 	 * them every half ring, or where NS_REPORT is set
9204c7070dbSScott Long 	 */
9214c7070dbSScott Long 	u_int report_frequency = kring->nkr_num_slots >> 1;
9224c7070dbSScott Long 	/* device-specific */
9234c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
9244c7070dbSScott Long 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
9254c7070dbSScott Long 
92695dcf343SMarius Strobl 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
9274c7070dbSScott Long 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9284c7070dbSScott Long 
9294c7070dbSScott Long 	/*
9304c7070dbSScott Long 	 * First part: process new packets to send.
931dd7fbcf1SStephen Hurd 	 * nm_i is the current index in the netmap kring,
9324c7070dbSScott Long 	 * nic_i is the corresponding index in the NIC ring.
9334c7070dbSScott Long 	 *
9344c7070dbSScott Long 	 * If we have packets to send (nm_i != head)
9354c7070dbSScott Long 	 * iterate over the netmap ring, fetch length and update
9364c7070dbSScott Long 	 * the corresponding slot in the NIC ring. Some drivers also
9374c7070dbSScott Long 	 * need to update the buffer's physical address in the NIC slot
9384c7070dbSScott Long 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
9394c7070dbSScott Long 	 *
9404c7070dbSScott Long 	 * The netmap_reload_map() calls is especially expensive,
9414c7070dbSScott Long 	 * even when (as in this case) the tag is 0, so do only
9424c7070dbSScott Long 	 * when the buffer has actually changed.
9434c7070dbSScott Long 	 *
9444c7070dbSScott Long 	 * If possible do not set the report/intr bit on all slots,
9454c7070dbSScott Long 	 * but only a few times per ring or when NS_REPORT is set.
9464c7070dbSScott Long 	 *
9474c7070dbSScott Long 	 * Finally, on 10G and faster drivers, it might be useful
9484c7070dbSScott Long 	 * to prefetch the next slot and txr entry.
9494c7070dbSScott Long 	 */
9504c7070dbSScott Long 
951dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
9525ee36c68SStephen Hurd 	if (nm_i != head) {	/* we have new packets to send */
95395246abbSSean Bruno 		pkt_info_zero(&pi);
95495246abbSSean Bruno 		pi.ipi_segs = txq->ift_segs;
95595246abbSSean Bruno 		pi.ipi_qsidx = kring->ring_id;
9564c7070dbSScott Long 		nic_i = netmap_idx_k2n(kring, nm_i);
9574c7070dbSScott Long 
9584c7070dbSScott Long 		__builtin_prefetch(&ring->slot[nm_i]);
9594c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
9604c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
9614c7070dbSScott Long 
9624c7070dbSScott Long 		for (n = 0; nm_i != head; n++) {
9634c7070dbSScott Long 			struct netmap_slot *slot = &ring->slot[nm_i];
9644c7070dbSScott Long 			u_int len = slot->len;
9650a1b74a3SSean Bruno 			uint64_t paddr;
9664c7070dbSScott Long 			void *addr = PNMB(na, slot, &paddr);
9674c7070dbSScott Long 			int flags = (slot->flags & NS_REPORT ||
9684c7070dbSScott Long 				nic_i == 0 || nic_i == report_frequency) ?
9694c7070dbSScott Long 				IPI_TX_INTR : 0;
9704c7070dbSScott Long 
9714c7070dbSScott Long 			/* device-specific */
97295246abbSSean Bruno 			pi.ipi_len = len;
97395246abbSSean Bruno 			pi.ipi_segs[0].ds_addr = paddr;
97495246abbSSean Bruno 			pi.ipi_segs[0].ds_len = len;
97595246abbSSean Bruno 			pi.ipi_nsegs = 1;
97695246abbSSean Bruno 			pi.ipi_ndescs = 0;
9774c7070dbSScott Long 			pi.ipi_pidx = nic_i;
9784c7070dbSScott Long 			pi.ipi_flags = flags;
9794c7070dbSScott Long 
9804c7070dbSScott Long 			/* Fill the slot in the NIC ring. */
9814c7070dbSScott Long 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
98264e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_encap);
9834c7070dbSScott Long 
9844c7070dbSScott Long 			/* prefetch for next round */
9854c7070dbSScott Long 			__builtin_prefetch(&ring->slot[nm_i + 1]);
9864c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
9874c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
9884c7070dbSScott Long 
9894c7070dbSScott Long 			NM_CHECK_ADDR_LEN(na, addr, len);
9904c7070dbSScott Long 
9914c7070dbSScott Long 			if (slot->flags & NS_BUF_CHANGED) {
9924c7070dbSScott Long 				/* buffer has changed, reload map */
993bfce461eSMarius Strobl 				netmap_reload_map(na, txq->ift_buf_tag,
994bfce461eSMarius Strobl 				    txq->ift_sds.ifsd_map[nic_i], addr);
9954c7070dbSScott Long 			}
9964c7070dbSScott Long 			/* make sure changes to the buffer are synced */
99795dcf343SMarius Strobl 			bus_dmamap_sync(txq->ift_buf_tag,
99895dcf343SMarius Strobl 			    txq->ift_sds.ifsd_map[nic_i],
9994c7070dbSScott Long 			    BUS_DMASYNC_PREWRITE);
100095dcf343SMarius Strobl 
100195246abbSSean Bruno 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
10024c7070dbSScott Long 			nm_i = nm_next(nm_i, lim);
10034c7070dbSScott Long 			nic_i = nm_next(nic_i, lim);
10044c7070dbSScott Long 		}
1005dd7fbcf1SStephen Hurd 		kring->nr_hwcur = nm_i;
10064c7070dbSScott Long 
10074c7070dbSScott Long 		/* synchronize the NIC ring */
100895dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
10094c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10104c7070dbSScott Long 
10114c7070dbSScott Long 		/* (re)start the tx unit up to slot nic_i (excluded) */
10124c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
10134c7070dbSScott Long 	}
10144c7070dbSScott Long 
10154c7070dbSScott Long 	/*
10164c7070dbSScott Long 	 * Second part: reclaim buffers for completed transmissions.
10175ee36c68SStephen Hurd 	 *
10185ee36c68SStephen Hurd 	 * If there are unclaimed buffers, attempt to reclaim them.
10195ee36c68SStephen Hurd 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
10205ee36c68SStephen Hurd 	 * minimal delay, then trigger the tx handler which will spin in the
10215ee36c68SStephen Hurd 	 * group task queue.
10224c7070dbSScott Long 	 */
1023dd7fbcf1SStephen Hurd 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
10244c7070dbSScott Long 		if (iflib_tx_credits_update(ctx, txq)) {
10254c7070dbSScott Long 			/* some tx completed, increment avail */
10264c7070dbSScott Long 			nic_i = txq->ift_cidx_processed;
10274c7070dbSScott Long 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
10284c7070dbSScott Long 		}
10295ee36c68SStephen Hurd 	}
1030dd7fbcf1SStephen Hurd 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1031dd7fbcf1SStephen Hurd 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1032dd7fbcf1SStephen Hurd 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1033dd7fbcf1SStephen Hurd 			    iflib_timer, txq, txq->ift_timer.c_cpu);
10345ee36c68SStephen Hurd 	}
10354c7070dbSScott Long 	return (0);
10364c7070dbSScott Long }
10374c7070dbSScott Long 
10384c7070dbSScott Long /*
10394c7070dbSScott Long  * Reconcile kernel and user view of the receive ring.
10404c7070dbSScott Long  * Same as for the txsync, this routine must be efficient.
10414c7070dbSScott Long  * The caller guarantees a single invocations, but races against
10424c7070dbSScott Long  * the rest of the driver should be handled here.
10434c7070dbSScott Long  *
10444c7070dbSScott Long  * On call, kring->rhead is the first packet that userspace wants
10454c7070dbSScott Long  * to keep, and kring->rcur is the wakeup point.
10464c7070dbSScott Long  * The kernel has previously reported packets up to kring->rtail.
10474c7070dbSScott Long  *
10484c7070dbSScott Long  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
10494c7070dbSScott Long  * of whether or not we received an interrupt.
10504c7070dbSScott Long  */
10514c7070dbSScott Long static int
10524c7070dbSScott Long iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
10534c7070dbSScott Long {
10544c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
10554c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
10561722eeacSMarius Strobl 	if_t ifp = na->ifp;
105795dcf343SMarius Strobl 	iflib_fl_t fl;
105895246abbSSean Bruno 	uint32_t nm_i;	/* index into the netmap ring */
10592d873474SStephen Hurd 	uint32_t nic_i;	/* index into the NIC ring */
10604c7070dbSScott Long 	u_int i, n;
10614c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
1062dd7fbcf1SStephen Hurd 	u_int const head = kring->rhead;
10634c7070dbSScott Long 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1064ab2e3f79SStephen Hurd 	struct if_rxd_info ri;
106595246abbSSean Bruno 
10664c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
10674c7070dbSScott Long 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
10684c7070dbSScott Long 	if (head > lim)
10694c7070dbSScott Long 		return netmap_ring_reinit(kring);
10704c7070dbSScott Long 
107195dcf343SMarius Strobl 	/*
107295dcf343SMarius Strobl 	 * XXX netmap_fl_refill() only ever (re)fills free list 0 so far.
107395dcf343SMarius Strobl 	 */
107495dcf343SMarius Strobl 
107595246abbSSean Bruno 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
107695dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
10774c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
107895246abbSSean Bruno 	}
107995dcf343SMarius Strobl 
10804c7070dbSScott Long 	/*
10814c7070dbSScott Long 	 * First part: import newly received packets.
10824c7070dbSScott Long 	 *
10834c7070dbSScott Long 	 * nm_i is the index of the next free slot in the netmap ring,
10844c7070dbSScott Long 	 * nic_i is the index of the next received packet in the NIC ring,
10854c7070dbSScott Long 	 * and they may differ in case if_init() has been called while
10864c7070dbSScott Long 	 * in netmap mode. For the receive ring we have
10874c7070dbSScott Long 	 *
10884c7070dbSScott Long 	 *	nic_i = rxr->next_check;
10894c7070dbSScott Long 	 *	nm_i = kring->nr_hwtail (previous)
10904c7070dbSScott Long 	 * and
10914c7070dbSScott Long 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
10924c7070dbSScott Long 	 *
10934c7070dbSScott Long 	 * rxr->next_check is set to 0 on a ring reinit
10944c7070dbSScott Long 	 */
10954c7070dbSScott Long 	if (netmap_no_pendintr || force_update) {
10964c7070dbSScott Long 		int crclen = iflib_crcstrip ? 0 : 4;
10974c7070dbSScott Long 		int error, avail;
10984c7070dbSScott Long 
10992d873474SStephen Hurd 		for (i = 0; i < rxq->ifr_nfl; i++) {
11002d873474SStephen Hurd 			fl = &rxq->ifr_fl[i];
11014c7070dbSScott Long 			nic_i = fl->ifl_cidx;
11024c7070dbSScott Long 			nm_i = netmap_idx_n2k(kring, nic_i);
110395dcf343SMarius Strobl 			avail = ctx->isc_rxd_available(ctx->ifc_softc,
110495dcf343SMarius Strobl 			    rxq->ifr_id, nic_i, USHRT_MAX);
11054c7070dbSScott Long 			for (n = 0; avail > 0; n++, avail--) {
1106ab2e3f79SStephen Hurd 				rxd_info_zero(&ri);
1107ab2e3f79SStephen Hurd 				ri.iri_frags = rxq->ifr_frags;
1108ab2e3f79SStephen Hurd 				ri.iri_qsidx = kring->ring_id;
1109ab2e3f79SStephen Hurd 				ri.iri_ifp = ctx->ifc_ifp;
1110ab2e3f79SStephen Hurd 				ri.iri_cidx = nic_i;
111195246abbSSean Bruno 
1112ab2e3f79SStephen Hurd 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1113ab2e3f79SStephen Hurd 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
11147cb7c6e3SNavdeep Parhar 				ring->slot[nm_i].flags = 0;
111595dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag,
1116e035717eSSean Bruno 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
11174c7070dbSScott Long 				nm_i = nm_next(nm_i, lim);
11184c7070dbSScott Long 				nic_i = nm_next(nic_i, lim);
11194c7070dbSScott Long 			}
11204c7070dbSScott Long 			if (n) { /* update the state variables */
11214c7070dbSScott Long 				if (netmap_no_pendintr && !force_update) {
11224c7070dbSScott Long 					/* diagnostics */
11234c7070dbSScott Long 					iflib_rx_miss ++;
11244c7070dbSScott Long 					iflib_rx_miss_bufs += n;
11254c7070dbSScott Long 				}
11264c7070dbSScott Long 				fl->ifl_cidx = nic_i;
1127dd7fbcf1SStephen Hurd 				kring->nr_hwtail = nm_i;
11284c7070dbSScott Long 			}
11294c7070dbSScott Long 			kring->nr_kflags &= ~NKR_PENDINTR;
11304c7070dbSScott Long 		}
11314c7070dbSScott Long 	}
11324c7070dbSScott Long 	/*
11334c7070dbSScott Long 	 * Second part: skip past packets that userspace has released.
11344c7070dbSScott Long 	 * (kring->nr_hwcur to head excluded),
11354c7070dbSScott Long 	 * and make the buffers available for reception.
11364c7070dbSScott Long 	 * As usual nm_i is the index in the netmap ring,
11374c7070dbSScott Long 	 * nic_i is the index in the NIC ring, and
11384c7070dbSScott Long 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
11394c7070dbSScott Long 	 */
11404c7070dbSScott Long 	/* XXX not sure how this will work with multiple free lists */
1141dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
114295246abbSSean Bruno 
11432d873474SStephen Hurd 	return (netmap_fl_refill(rxq, kring, nm_i, false));
11444c7070dbSScott Long }
11454c7070dbSScott Long 
114695246abbSSean Bruno static void
114795246abbSSean Bruno iflib_netmap_intr(struct netmap_adapter *na, int onoff)
114895246abbSSean Bruno {
11491722eeacSMarius Strobl 	if_ctx_t ctx = na->ifp->if_softc;
115095246abbSSean Bruno 
1151ab2e3f79SStephen Hurd 	CTX_LOCK(ctx);
115295246abbSSean Bruno 	if (onoff) {
115395246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
115495246abbSSean Bruno 	} else {
115595246abbSSean Bruno 		IFDI_INTR_DISABLE(ctx);
115695246abbSSean Bruno 	}
1157ab2e3f79SStephen Hurd 	CTX_UNLOCK(ctx);
115895246abbSSean Bruno }
115995246abbSSean Bruno 
116095246abbSSean Bruno 
11614c7070dbSScott Long static int
11624c7070dbSScott Long iflib_netmap_attach(if_ctx_t ctx)
11634c7070dbSScott Long {
11644c7070dbSScott Long 	struct netmap_adapter na;
116523ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
11664c7070dbSScott Long 
11674c7070dbSScott Long 	bzero(&na, sizeof(na));
11684c7070dbSScott Long 
11694c7070dbSScott Long 	na.ifp = ctx->ifc_ifp;
11704c7070dbSScott Long 	na.na_flags = NAF_BDG_MAYSLEEP;
11714c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
11724c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
11734c7070dbSScott Long 
117423ac9029SStephen Hurd 	na.num_tx_desc = scctx->isc_ntxd[0];
117523ac9029SStephen Hurd 	na.num_rx_desc = scctx->isc_nrxd[0];
11764c7070dbSScott Long 	na.nm_txsync = iflib_netmap_txsync;
11774c7070dbSScott Long 	na.nm_rxsync = iflib_netmap_rxsync;
11784c7070dbSScott Long 	na.nm_register = iflib_netmap_register;
117995246abbSSean Bruno 	na.nm_intr = iflib_netmap_intr;
11804c7070dbSScott Long 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
11814c7070dbSScott Long 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
11824c7070dbSScott Long 	return (netmap_attach(&na));
11834c7070dbSScott Long }
11844c7070dbSScott Long 
11854c7070dbSScott Long static void
11864c7070dbSScott Long iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
11874c7070dbSScott Long {
11884c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
11894c7070dbSScott Long 	struct netmap_slot *slot;
11904c7070dbSScott Long 
11914c7070dbSScott Long 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1192e099b90bSPedro F. Giffuni 	if (slot == NULL)
11934c7070dbSScott Long 		return;
119423ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
11954c7070dbSScott Long 
11964c7070dbSScott Long 		/*
11974c7070dbSScott Long 		 * In netmap mode, set the map for the packet buffer.
11984c7070dbSScott Long 		 * NOTE: Some drivers (not this one) also need to set
11994c7070dbSScott Long 		 * the physical buffer address in the NIC ring.
12004c7070dbSScott Long 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
12014c7070dbSScott Long 		 * netmap slot index, si
12024c7070dbSScott Long 		 */
12032ff91c17SVincenzo Maffione 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1204bfce461eSMarius Strobl 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1205bfce461eSMarius Strobl 		    NMB(na, slot + si));
12064c7070dbSScott Long 	}
12074c7070dbSScott Long }
12082d873474SStephen Hurd 
12094c7070dbSScott Long static void
12104c7070dbSScott Long iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
12114c7070dbSScott Long {
12124c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
12132ff91c17SVincenzo Maffione 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
12144c7070dbSScott Long 	struct netmap_slot *slot;
12152d873474SStephen Hurd 	uint32_t nm_i;
12164c7070dbSScott Long 
12174c7070dbSScott Long 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1218e099b90bSPedro F. Giffuni 	if (slot == NULL)
12194c7070dbSScott Long 		return;
12202d873474SStephen Hurd 	nm_i = netmap_idx_n2k(kring, 0);
12212d873474SStephen Hurd 	netmap_fl_refill(rxq, kring, nm_i, true);
12224c7070dbSScott Long }
12234c7070dbSScott Long 
1224dd7fbcf1SStephen Hurd static void
122595dcf343SMarius Strobl iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on)
1226dd7fbcf1SStephen Hurd {
1227dd7fbcf1SStephen Hurd 	struct netmap_kring *kring;
122895dcf343SMarius Strobl 	uint16_t txqid;
1229dd7fbcf1SStephen Hurd 
123095dcf343SMarius Strobl 	txqid = txq->ift_id;
1231dd7fbcf1SStephen Hurd 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1232dd7fbcf1SStephen Hurd 
1233dd7fbcf1SStephen Hurd 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
123495dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
123595dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD);
1236dd7fbcf1SStephen Hurd 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1237dd7fbcf1SStephen Hurd 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1238dd7fbcf1SStephen Hurd 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1239dd7fbcf1SStephen Hurd 			if (hz < 2000)
1240dd7fbcf1SStephen Hurd 				*reset_on = 1;
1241dd7fbcf1SStephen Hurd 			else
1242dd7fbcf1SStephen Hurd 				*reset_on = hz / 1000;
1243dd7fbcf1SStephen Hurd 		}
1244dd7fbcf1SStephen Hurd 	}
1245dd7fbcf1SStephen Hurd }
1246dd7fbcf1SStephen Hurd 
12474c7070dbSScott Long #define iflib_netmap_detach(ifp) netmap_detach(ifp)
12484c7070dbSScott Long 
12494c7070dbSScott Long #else
12504c7070dbSScott Long #define iflib_netmap_txq_init(ctx, txq)
12514c7070dbSScott Long #define iflib_netmap_rxq_init(ctx, rxq)
12524c7070dbSScott Long #define iflib_netmap_detach(ifp)
12534c7070dbSScott Long 
12544c7070dbSScott Long #define iflib_netmap_attach(ctx) (0)
12554c7070dbSScott Long #define netmap_rx_irq(ifp, qid, budget) (0)
125695246abbSSean Bruno #define netmap_tx_irq(ifp, qid) do {} while (0)
125795dcf343SMarius Strobl #define iflib_netmap_timer_adjust(ctx, txq, reset_on)
12584c7070dbSScott Long #endif
12594c7070dbSScott Long 
12604c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
12614c7070dbSScott Long static __inline void
12624c7070dbSScott Long prefetch(void *x)
12634c7070dbSScott Long {
12644c7070dbSScott Long 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12654c7070dbSScott Long }
12663429c02fSStephen Hurd static __inline void
12673429c02fSStephen Hurd prefetch2cachelines(void *x)
12683429c02fSStephen Hurd {
12693429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12703429c02fSStephen Hurd #if (CACHE_LINE_SIZE < 128)
12713429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
12723429c02fSStephen Hurd #endif
12733429c02fSStephen Hurd }
12744c7070dbSScott Long #else
12754c7070dbSScott Long #define prefetch(x)
12763429c02fSStephen Hurd #define prefetch2cachelines(x)
12774c7070dbSScott Long #endif
12784c7070dbSScott Long 
12794c7070dbSScott Long static void
128010e0d938SStephen Hurd iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
128110e0d938SStephen Hurd {
128210e0d938SStephen Hurd 	iflib_fl_t fl;
128310e0d938SStephen Hurd 
128410e0d938SStephen Hurd 	fl = &rxq->ifr_fl[flid];
128510e0d938SStephen Hurd 	iru->iru_paddrs = fl->ifl_bus_addrs;
128610e0d938SStephen Hurd 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
128710e0d938SStephen Hurd 	iru->iru_idxs = fl->ifl_rxd_idxs;
128810e0d938SStephen Hurd 	iru->iru_qsidx = rxq->ifr_id;
128910e0d938SStephen Hurd 	iru->iru_buf_size = fl->ifl_buf_size;
129010e0d938SStephen Hurd 	iru->iru_flidx = fl->ifl_id;
129110e0d938SStephen Hurd }
129210e0d938SStephen Hurd 
129310e0d938SStephen Hurd static void
12944c7070dbSScott Long _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
12954c7070dbSScott Long {
12964c7070dbSScott Long 	if (err)
12974c7070dbSScott Long 		return;
12984c7070dbSScott Long 	*(bus_addr_t *) arg = segs[0].ds_addr;
12994c7070dbSScott Long }
13004c7070dbSScott Long 
13014c7070dbSScott Long int
13028f82136aSPatrick Kelsey iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
13034c7070dbSScott Long {
13044c7070dbSScott Long 	int err;
13054c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
13064c7070dbSScott Long 
13074c7070dbSScott Long 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
13088f82136aSPatrick Kelsey 				align, 0,		/* alignment, bounds */
13094c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* lowaddr */
13104c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* highaddr */
13114c7070dbSScott Long 				NULL, NULL,		/* filter, filterarg */
13124c7070dbSScott Long 				size,			/* maxsize */
13134c7070dbSScott Long 				1,			/* nsegments */
13144c7070dbSScott Long 				size,			/* maxsegsize */
13154c7070dbSScott Long 				BUS_DMA_ALLOCNOW,	/* flags */
13164c7070dbSScott Long 				NULL,			/* lockfunc */
13174c7070dbSScott Long 				NULL,			/* lockarg */
13184c7070dbSScott Long 				&dma->idi_tag);
13194c7070dbSScott Long 	if (err) {
13204c7070dbSScott Long 		device_printf(dev,
13214c7070dbSScott Long 		    "%s: bus_dma_tag_create failed: %d\n",
13224c7070dbSScott Long 		    __func__, err);
13234c7070dbSScott Long 		goto fail_0;
13244c7070dbSScott Long 	}
13254c7070dbSScott Long 
13264c7070dbSScott Long 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
13274c7070dbSScott Long 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
13284c7070dbSScott Long 	if (err) {
13294c7070dbSScott Long 		device_printf(dev,
13304c7070dbSScott Long 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
13314c7070dbSScott Long 		    __func__, (uintmax_t)size, err);
13324c7070dbSScott Long 		goto fail_1;
13334c7070dbSScott Long 	}
13344c7070dbSScott Long 
13354c7070dbSScott Long 	dma->idi_paddr = IF_BAD_DMA;
13364c7070dbSScott Long 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
13374c7070dbSScott Long 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
13384c7070dbSScott Long 	if (err || dma->idi_paddr == IF_BAD_DMA) {
13394c7070dbSScott Long 		device_printf(dev,
13404c7070dbSScott Long 		    "%s: bus_dmamap_load failed: %d\n",
13414c7070dbSScott Long 		    __func__, err);
13424c7070dbSScott Long 		goto fail_2;
13434c7070dbSScott Long 	}
13444c7070dbSScott Long 
13454c7070dbSScott Long 	dma->idi_size = size;
13464c7070dbSScott Long 	return (0);
13474c7070dbSScott Long 
13484c7070dbSScott Long fail_2:
13494c7070dbSScott Long 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13504c7070dbSScott Long fail_1:
13514c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
13524c7070dbSScott Long fail_0:
13534c7070dbSScott Long 	dma->idi_tag = NULL;
13544c7070dbSScott Long 
13554c7070dbSScott Long 	return (err);
13564c7070dbSScott Long }
13574c7070dbSScott Long 
13584c7070dbSScott Long int
13598f82136aSPatrick Kelsey iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
13608f82136aSPatrick Kelsey {
13618f82136aSPatrick Kelsey 	if_shared_ctx_t sctx = ctx->ifc_sctx;
13628f82136aSPatrick Kelsey 
13638f82136aSPatrick Kelsey 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
13648f82136aSPatrick Kelsey 
13658f82136aSPatrick Kelsey 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
13668f82136aSPatrick Kelsey }
13678f82136aSPatrick Kelsey 
13688f82136aSPatrick Kelsey int
13694c7070dbSScott Long iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
13704c7070dbSScott Long {
13714c7070dbSScott Long 	int i, err;
13724c7070dbSScott Long 	iflib_dma_info_t *dmaiter;
13734c7070dbSScott Long 
13744c7070dbSScott Long 	dmaiter = dmalist;
13754c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++) {
13764c7070dbSScott Long 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
13774c7070dbSScott Long 			break;
13784c7070dbSScott Long 	}
13794c7070dbSScott Long 	if (err)
13804c7070dbSScott Long 		iflib_dma_free_multi(dmalist, i);
13814c7070dbSScott Long 	return (err);
13824c7070dbSScott Long }
13834c7070dbSScott Long 
13844c7070dbSScott Long void
13854c7070dbSScott Long iflib_dma_free(iflib_dma_info_t dma)
13864c7070dbSScott Long {
13874c7070dbSScott Long 	if (dma->idi_tag == NULL)
13884c7070dbSScott Long 		return;
13894c7070dbSScott Long 	if (dma->idi_paddr != IF_BAD_DMA) {
13904c7070dbSScott Long 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
13914c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
13924c7070dbSScott Long 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
13934c7070dbSScott Long 		dma->idi_paddr = IF_BAD_DMA;
13944c7070dbSScott Long 	}
13954c7070dbSScott Long 	if (dma->idi_vaddr != NULL) {
13964c7070dbSScott Long 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13974c7070dbSScott Long 		dma->idi_vaddr = NULL;
13984c7070dbSScott Long 	}
13994c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
14004c7070dbSScott Long 	dma->idi_tag = NULL;
14014c7070dbSScott Long }
14024c7070dbSScott Long 
14034c7070dbSScott Long void
14044c7070dbSScott Long iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
14054c7070dbSScott Long {
14064c7070dbSScott Long 	int i;
14074c7070dbSScott Long 	iflib_dma_info_t *dmaiter = dmalist;
14084c7070dbSScott Long 
14094c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++)
14104c7070dbSScott Long 		iflib_dma_free(*dmaiter);
14114c7070dbSScott Long }
14124c7070dbSScott Long 
1413bd84f700SSean Bruno #ifdef EARLY_AP_STARTUP
1414bd84f700SSean Bruno static const int iflib_started = 1;
1415bd84f700SSean Bruno #else
1416bd84f700SSean Bruno /*
1417bd84f700SSean Bruno  * We used to abuse the smp_started flag to decide if the queues have been
1418bd84f700SSean Bruno  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1419bd84f700SSean Bruno  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1420bd84f700SSean Bruno  * is set.  Run a SYSINIT() strictly after that to just set a usable
1421bd84f700SSean Bruno  * completion flag.
1422bd84f700SSean Bruno  */
1423bd84f700SSean Bruno 
1424bd84f700SSean Bruno static int iflib_started;
1425bd84f700SSean Bruno 
1426bd84f700SSean Bruno static void
1427bd84f700SSean Bruno iflib_record_started(void *arg)
1428bd84f700SSean Bruno {
1429bd84f700SSean Bruno 	iflib_started = 1;
1430bd84f700SSean Bruno }
1431bd84f700SSean Bruno 
1432bd84f700SSean Bruno SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1433bd84f700SSean Bruno 	iflib_record_started, NULL);
1434bd84f700SSean Bruno #endif
1435bd84f700SSean Bruno 
14364c7070dbSScott Long static int
14374c7070dbSScott Long iflib_fast_intr(void *arg)
14384c7070dbSScott Long {
14394c7070dbSScott Long 	iflib_filter_info_t info = arg;
14404c7070dbSScott Long 	struct grouptask *gtask = info->ifi_task;
1441ca62461bSStephen Hurd 	int result;
1442ca62461bSStephen Hurd 
144395246abbSSean Bruno 	if (!iflib_started)
1444ca62461bSStephen Hurd 		return (FILTER_STRAY);
144595246abbSSean Bruno 
144695246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1447ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1448ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1449ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1450ca62461bSStephen Hurd 			return (result);
1451ca62461bSStephen Hurd 	}
145295246abbSSean Bruno 
145395246abbSSean Bruno 	GROUPTASK_ENQUEUE(gtask);
145495246abbSSean Bruno 	return (FILTER_HANDLED);
145595246abbSSean Bruno }
145695246abbSSean Bruno 
145795246abbSSean Bruno static int
145895246abbSSean Bruno iflib_fast_intr_rxtx(void *arg)
145995246abbSSean Bruno {
146095246abbSSean Bruno 	iflib_filter_info_t info = arg;
146195246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
146295dcf343SMarius Strobl 	if_ctx_t ctx;
146395246abbSSean Bruno 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
146495dcf343SMarius Strobl 	iflib_txq_t txq;
146595dcf343SMarius Strobl 	void *sc;
1466ca62461bSStephen Hurd 	int i, cidx, result;
146795dcf343SMarius Strobl 	qidx_t txqid;
14683d10e9edSMarius Strobl 	bool intr_enable, intr_legacy;
146995246abbSSean Bruno 
147095246abbSSean Bruno 	if (!iflib_started)
1471ca62461bSStephen Hurd 		return (FILTER_STRAY);
147295246abbSSean Bruno 
147395246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1474ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1475ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1476ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1477ca62461bSStephen Hurd 			return (result);
1478ca62461bSStephen Hurd 	}
147995246abbSSean Bruno 
148095dcf343SMarius Strobl 	ctx = rxq->ifr_ctx;
148195dcf343SMarius Strobl 	sc = ctx->ifc_softc;
14823d10e9edSMarius Strobl 	intr_enable = false;
14833d10e9edSMarius Strobl 	intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
14841ae4848cSMatt Macy 	MPASS(rxq->ifr_ntxqirq);
148595246abbSSean Bruno 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
148695dcf343SMarius Strobl 		txqid = rxq->ifr_txqid[i];
148795dcf343SMarius Strobl 		txq = &ctx->ifc_txqs[txqid];
148895dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
14898a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
149095dcf343SMarius Strobl 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
14913d10e9edSMarius Strobl 			if (intr_legacy)
14923d10e9edSMarius Strobl 				intr_enable = true;
14933d10e9edSMarius Strobl 			else
149495246abbSSean Bruno 				IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
149595246abbSSean Bruno 			continue;
149695246abbSSean Bruno 		}
149795dcf343SMarius Strobl 		GROUPTASK_ENQUEUE(&txq->ift_task);
149895246abbSSean Bruno 	}
149995246abbSSean Bruno 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
150095246abbSSean Bruno 		cidx = rxq->ifr_cq_cidx;
150195246abbSSean Bruno 	else
150295246abbSSean Bruno 		cidx = rxq->ifr_fl[0].ifl_cidx;
150395246abbSSean Bruno 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
150495246abbSSean Bruno 		GROUPTASK_ENQUEUE(gtask);
150564e6fc13SStephen Hurd 	else {
15063d10e9edSMarius Strobl 		if (intr_legacy)
15073d10e9edSMarius Strobl 			intr_enable = true;
15083d10e9edSMarius Strobl 		else
150995246abbSSean Bruno 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
151064e6fc13SStephen Hurd 		DBG_COUNTER_INC(rx_intr_enables);
151164e6fc13SStephen Hurd 	}
15123d10e9edSMarius Strobl 	if (intr_enable)
15133d10e9edSMarius Strobl 		IFDI_INTR_ENABLE(ctx);
151495246abbSSean Bruno 	return (FILTER_HANDLED);
151595246abbSSean Bruno }
151695246abbSSean Bruno 
151795246abbSSean Bruno 
151895246abbSSean Bruno static int
151995246abbSSean Bruno iflib_fast_intr_ctx(void *arg)
152095246abbSSean Bruno {
152195246abbSSean Bruno 	iflib_filter_info_t info = arg;
152295246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
1523ca62461bSStephen Hurd 	int result;
15244c7070dbSScott Long 
1525bd84f700SSean Bruno 	if (!iflib_started)
1526ca62461bSStephen Hurd 		return (FILTER_STRAY);
15271248952aSSean Bruno 
15284c7070dbSScott Long 	DBG_COUNTER_INC(fast_intrs);
1529ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1530ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1531ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1532ca62461bSStephen Hurd 			return (result);
1533ca62461bSStephen Hurd 	}
15344c7070dbSScott Long 
15354c7070dbSScott Long 	GROUPTASK_ENQUEUE(gtask);
15364c7070dbSScott Long 	return (FILTER_HANDLED);
15374c7070dbSScott Long }
15384c7070dbSScott Long 
15394c7070dbSScott Long static int
15404c7070dbSScott Long _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
15414c7070dbSScott Long 		 driver_filter_t filter, driver_intr_t handler, void *arg,
15423e0e6330SStephen Hurd 		 const char *name)
15434c7070dbSScott Long {
15444c7070dbSScott Long 	struct resource *res;
15452b2fc973SSean Bruno 	void *tag = NULL;
15464c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
1547d49e83eaSMarius Strobl 	int flags, i, rc;
15484c7070dbSScott Long 
15492b2fc973SSean Bruno 	flags = RF_ACTIVE;
15502b2fc973SSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
15512b2fc973SSean Bruno 		flags |= RF_SHAREABLE;
15524c7070dbSScott Long 	MPASS(rid < 512);
1553d49e83eaSMarius Strobl 	i = rid;
1554d49e83eaSMarius Strobl 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
15554c7070dbSScott Long 	if (res == NULL) {
15564c7070dbSScott Long 		device_printf(dev,
15574c7070dbSScott Long 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
15584c7070dbSScott Long 		return (ENOMEM);
15594c7070dbSScott Long 	}
15604c7070dbSScott Long 	irq->ii_res = res;
15614c7070dbSScott Long 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
15624c7070dbSScott Long 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
15634c7070dbSScott Long 						filter, handler, arg, &tag);
15644c7070dbSScott Long 	if (rc != 0) {
15654c7070dbSScott Long 		device_printf(dev,
15664c7070dbSScott Long 		    "failed to setup interrupt for rid %d, name %s: %d\n",
15674c7070dbSScott Long 					  rid, name ? name : "unknown", rc);
15684c7070dbSScott Long 		return (rc);
15694c7070dbSScott Long 	} else if (name)
1570f454e7ebSJohn Baldwin 		bus_describe_intr(dev, res, tag, "%s", name);
15714c7070dbSScott Long 
15724c7070dbSScott Long 	irq->ii_tag = tag;
15734c7070dbSScott Long 	return (0);
15744c7070dbSScott Long }
15754c7070dbSScott Long 
15764c7070dbSScott Long /*********************************************************************
15774c7070dbSScott Long  *
1578bfce461eSMarius Strobl  *  Allocate DMA resources for TX buffers as well as memory for the TX
1579bfce461eSMarius Strobl  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1580bfce461eSMarius Strobl  *  iflib_sw_tx_desc_array structure, storing all the information that
1581bfce461eSMarius Strobl  *  is needed to transmit a packet on the wire.  This is called only
1582bfce461eSMarius Strobl  *  once at attach, setup is done every reset.
15834c7070dbSScott Long  *
15844c7070dbSScott Long  **********************************************************************/
15854c7070dbSScott Long static int
15864c7070dbSScott Long iflib_txsd_alloc(iflib_txq_t txq)
15874c7070dbSScott Long {
15884c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
15894c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
15904c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
15914c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15927f87c040SMarius Strobl 	bus_size_t tsomaxsize;
15934c7070dbSScott Long 	int err, nsegments, ntsosegments;
15948a04b53dSKonstantin Belousov 	bool tso;
15954c7070dbSScott Long 
15964c7070dbSScott Long 	nsegments = scctx->isc_tx_nsegments;
15974c7070dbSScott Long 	ntsosegments = scctx->isc_tx_tso_segments_max;
15987f87c040SMarius Strobl 	tsomaxsize = scctx->isc_tx_tso_size_max;
15997f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
16007f87c040SMarius Strobl 		tsomaxsize += sizeof(struct ether_vlan_header);
160123ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[0] > 0);
160223ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
16034c7070dbSScott Long 	MPASS(nsegments > 0);
16047f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
16054c7070dbSScott Long 		MPASS(ntsosegments > 0);
16067f87c040SMarius Strobl 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
16077f87c040SMarius Strobl 	}
16087f87c040SMarius Strobl 
16094c7070dbSScott Long 	/*
1610bfce461eSMarius Strobl 	 * Set up DMA tags for TX buffers.
16114c7070dbSScott Long 	 */
16124c7070dbSScott Long 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
16134c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16144c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16154c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16164c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16174c7070dbSScott Long 			       sctx->isc_tx_maxsize,		/* maxsize */
16184c7070dbSScott Long 			       nsegments,	/* nsegments */
16194c7070dbSScott Long 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
16204c7070dbSScott Long 			       0,			/* flags */
16214c7070dbSScott Long 			       NULL,			/* lockfunc */
16224c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1623bfce461eSMarius Strobl 			       &txq->ift_buf_tag))) {
16244c7070dbSScott Long 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
16259d0a88deSDimitry Andric 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
16269d0a88deSDimitry Andric 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
16274c7070dbSScott Long 		goto fail;
16284c7070dbSScott Long 	}
16298a04b53dSKonstantin Belousov 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
16308a04b53dSKonstantin Belousov 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
16314c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16324c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16334c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16344c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16357f87c040SMarius Strobl 			       tsomaxsize,		/* maxsize */
16364c7070dbSScott Long 			       ntsosegments,	/* nsegments */
16377f87c040SMarius Strobl 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
16384c7070dbSScott Long 			       0,			/* flags */
16394c7070dbSScott Long 			       NULL,			/* lockfunc */
16404c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1641bfce461eSMarius Strobl 			       &txq->ift_tso_buf_tag))) {
1642bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1643bfce461eSMarius Strobl 		    err);
16444c7070dbSScott Long 		goto fail;
16454c7070dbSScott Long 	}
1646bfce461eSMarius Strobl 
1647bfce461eSMarius Strobl 	/* Allocate memory for the TX mbuf map. */
16484c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_m =
1649ac2fffa4SPedro F. Giffuni 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1650ac2fffa4SPedro F. Giffuni 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1651bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
16524c7070dbSScott Long 		err = ENOMEM;
16534c7070dbSScott Long 		goto fail;
16544c7070dbSScott Long 	}
16554c7070dbSScott Long 
1656bfce461eSMarius Strobl 	/*
1657bfce461eSMarius Strobl 	 * Create the DMA maps for TX buffers.
1658bfce461eSMarius Strobl 	 */
16598a04b53dSKonstantin Belousov 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
16608a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16618a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1662bfce461eSMarius Strobl 		device_printf(dev,
1663bfce461eSMarius Strobl 		    "Unable to allocate TX buffer DMA map memory\n");
16644c7070dbSScott Long 		err = ENOMEM;
16654c7070dbSScott Long 		goto fail;
16664c7070dbSScott Long 	}
16678a04b53dSKonstantin Belousov 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
16688a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16698a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1670bfce461eSMarius Strobl 		device_printf(dev,
1671bfce461eSMarius Strobl 		    "Unable to allocate TSO TX buffer map memory\n");
16728a04b53dSKonstantin Belousov 		err = ENOMEM;
16738a04b53dSKonstantin Belousov 		goto fail;
16748a04b53dSKonstantin Belousov 	}
167523ac9029SStephen Hurd 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1676bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
16778a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_map[i]);
16784c7070dbSScott Long 		if (err != 0) {
16794c7070dbSScott Long 			device_printf(dev, "Unable to create TX DMA map\n");
16804c7070dbSScott Long 			goto fail;
16814c7070dbSScott Long 		}
16828a04b53dSKonstantin Belousov 		if (!tso)
16838a04b53dSKonstantin Belousov 			continue;
1684bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
16858a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_tso_map[i]);
16868a04b53dSKonstantin Belousov 		if (err != 0) {
16878a04b53dSKonstantin Belousov 			device_printf(dev, "Unable to create TSO TX DMA map\n");
16888a04b53dSKonstantin Belousov 			goto fail;
16898a04b53dSKonstantin Belousov 		}
16904c7070dbSScott Long 	}
16914c7070dbSScott Long 	return (0);
16924c7070dbSScott Long fail:
16934c7070dbSScott Long 	/* We free all, it handles case where we are in the middle */
16944c7070dbSScott Long 	iflib_tx_structures_free(ctx);
16954c7070dbSScott Long 	return (err);
16964c7070dbSScott Long }
16974c7070dbSScott Long 
16984c7070dbSScott Long static void
16994c7070dbSScott Long iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
17004c7070dbSScott Long {
17014c7070dbSScott Long 	bus_dmamap_t map;
17024c7070dbSScott Long 
1703db8e8f1eSEric Joyner 	if (txq->ift_sds.ifsd_map != NULL) {
17044c7070dbSScott Long 		map = txq->ift_sds.ifsd_map[i];
1705bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1706bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, map);
1707bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_buf_tag, map);
17084c7070dbSScott Long 		txq->ift_sds.ifsd_map[i] = NULL;
17094c7070dbSScott Long 	}
17108a04b53dSKonstantin Belousov 
1711db8e8f1eSEric Joyner 	if (txq->ift_sds.ifsd_tso_map != NULL) {
17128a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[i];
1713bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
17148a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTWRITE);
1715bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1716bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
17178a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map[i] = NULL;
17188a04b53dSKonstantin Belousov 	}
17194c7070dbSScott Long }
17204c7070dbSScott Long 
17214c7070dbSScott Long static void
17224c7070dbSScott Long iflib_txq_destroy(iflib_txq_t txq)
17234c7070dbSScott Long {
17244c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
17254c7070dbSScott Long 
172623ac9029SStephen Hurd 	for (int i = 0; i < txq->ift_size; i++)
17274c7070dbSScott Long 		iflib_txsd_destroy(ctx, txq, i);
1728244e7cffSEric Joyner 
1729244e7cffSEric Joyner 	if (txq->ift_br != NULL) {
1730244e7cffSEric Joyner 		ifmp_ring_free(txq->ift_br);
1731244e7cffSEric Joyner 		txq->ift_br = NULL;
1732244e7cffSEric Joyner 	}
1733244e7cffSEric Joyner 
1734244e7cffSEric Joyner 	mtx_destroy(&txq->ift_mtx);
1735244e7cffSEric Joyner 
17364c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
17374c7070dbSScott Long 		free(txq->ift_sds.ifsd_map, M_IFLIB);
17384c7070dbSScott Long 		txq->ift_sds.ifsd_map = NULL;
17394c7070dbSScott Long 	}
17408a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
17418a04b53dSKonstantin Belousov 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
17428a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map = NULL;
17438a04b53dSKonstantin Belousov 	}
17444c7070dbSScott Long 	if (txq->ift_sds.ifsd_m != NULL) {
17454c7070dbSScott Long 		free(txq->ift_sds.ifsd_m, M_IFLIB);
17464c7070dbSScott Long 		txq->ift_sds.ifsd_m = NULL;
17474c7070dbSScott Long 	}
1748bfce461eSMarius Strobl 	if (txq->ift_buf_tag != NULL) {
1749bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_buf_tag);
1750bfce461eSMarius Strobl 		txq->ift_buf_tag = NULL;
17514c7070dbSScott Long 	}
1752bfce461eSMarius Strobl 	if (txq->ift_tso_buf_tag != NULL) {
1753bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1754bfce461eSMarius Strobl 		txq->ift_tso_buf_tag = NULL;
17554c7070dbSScott Long 	}
1756244e7cffSEric Joyner 	if (txq->ift_ifdi != NULL) {
1757244e7cffSEric Joyner 		free(txq->ift_ifdi, M_IFLIB);
1758244e7cffSEric Joyner 	}
17594c7070dbSScott Long }
17604c7070dbSScott Long 
17614c7070dbSScott Long static void
17624c7070dbSScott Long iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
17634c7070dbSScott Long {
17644c7070dbSScott Long 	struct mbuf **mp;
17654c7070dbSScott Long 
17664c7070dbSScott Long 	mp = &txq->ift_sds.ifsd_m[i];
17674c7070dbSScott Long 	if (*mp == NULL)
17684c7070dbSScott Long 		return;
17694c7070dbSScott Long 
17704c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
1771bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag,
17728a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1773bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
17748a04b53dSKonstantin Belousov 	}
17758a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1776bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag,
17778a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1778bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
17798a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i]);
17804c7070dbSScott Long 	}
178123ac9029SStephen Hurd 	m_free(*mp);
17824c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
17834c7070dbSScott Long 	*mp = NULL;
17844c7070dbSScott Long }
17854c7070dbSScott Long 
17864c7070dbSScott Long static int
17874c7070dbSScott Long iflib_txq_setup(iflib_txq_t txq)
17884c7070dbSScott Long {
17894c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
179023ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
17914d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
17924c7070dbSScott Long 	iflib_dma_info_t di;
17934c7070dbSScott Long 	int i;
17944c7070dbSScott Long 
17954c7070dbSScott Long 	/* Set number of descriptors available */
17964c7070dbSScott Long 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
179795246abbSSean Bruno 	/* XXX make configurable */
179895246abbSSean Bruno 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
17994c7070dbSScott Long 
18004c7070dbSScott Long 	/* Reset indices */
180195246abbSSean Bruno 	txq->ift_cidx_processed = 0;
180295246abbSSean Bruno 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
180323ac9029SStephen Hurd 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
18044c7070dbSScott Long 
18054d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
18064c7070dbSScott Long 		bzero((void *)di->idi_vaddr, di->idi_size);
18074c7070dbSScott Long 
18084c7070dbSScott Long 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
18094d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
18104c7070dbSScott Long 		bus_dmamap_sync(di->idi_tag, di->idi_map,
18114c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
18124c7070dbSScott Long 	return (0);
18134c7070dbSScott Long }
18144c7070dbSScott Long 
18154c7070dbSScott Long /*********************************************************************
18164c7070dbSScott Long  *
1817bfce461eSMarius Strobl  *  Allocate DMA resources for RX buffers as well as memory for the RX
1818bfce461eSMarius Strobl  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1819bfce461eSMarius Strobl  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1820bfce461eSMarius Strobl  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1821bfce461eSMarius Strobl  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1822bfce461eSMarius Strobl  *  packet, the maximum number of entries we'll need is equal to the
1823bfce461eSMarius Strobl  *  number of hardware receive descriptors that we've allocated.
18244c7070dbSScott Long  *
18254c7070dbSScott Long  **********************************************************************/
18264c7070dbSScott Long static int
18274c7070dbSScott Long iflib_rxsd_alloc(iflib_rxq_t rxq)
18284c7070dbSScott Long {
18294c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
18304c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
183123ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
18324c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
18334c7070dbSScott Long 	iflib_fl_t fl;
18344c7070dbSScott Long 	int			err;
18354c7070dbSScott Long 
183623ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[0] > 0);
183723ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
18384c7070dbSScott Long 
18394c7070dbSScott Long 	fl = rxq->ifr_fl;
18404c7070dbSScott Long 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
184123ac9029SStephen Hurd 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1842bfce461eSMarius Strobl 		/* Set up DMA tag for RX buffers. */
18434c7070dbSScott Long 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
18444c7070dbSScott Long 					 1, 0,			/* alignment, bounds */
18454c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* lowaddr */
18464c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* highaddr */
18474c7070dbSScott Long 					 NULL, NULL,		/* filter, filterarg */
18484c7070dbSScott Long 					 sctx->isc_rx_maxsize,	/* maxsize */
18494c7070dbSScott Long 					 sctx->isc_rx_nsegments,	/* nsegments */
18504c7070dbSScott Long 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
18514c7070dbSScott Long 					 0,			/* flags */
18524c7070dbSScott Long 					 NULL,			/* lockfunc */
18534c7070dbSScott Long 					 NULL,			/* lockarg */
1854bfce461eSMarius Strobl 					 &fl->ifl_buf_tag);
18554c7070dbSScott Long 		if (err) {
1856bfce461eSMarius Strobl 			device_printf(dev,
1857bfce461eSMarius Strobl 			    "Unable to allocate RX DMA tag: %d\n", err);
18584c7070dbSScott Long 			goto fail;
18594c7070dbSScott Long 		}
1860bfce461eSMarius Strobl 
1861bfce461eSMarius Strobl 		/* Allocate memory for the RX mbuf map. */
1862e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_m =
1863ac2fffa4SPedro F. Giffuni 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1864ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1865bfce461eSMarius Strobl 			device_printf(dev,
1866bfce461eSMarius Strobl 			    "Unable to allocate RX mbuf map memory\n");
1867e035717eSSean Bruno 			err = ENOMEM;
1868e035717eSSean Bruno 			goto fail;
1869e035717eSSean Bruno 		}
1870bfce461eSMarius Strobl 
1871bfce461eSMarius Strobl 		/* Allocate memory for the direct RX cluster pointer map. */
1872e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_cl =
1873ac2fffa4SPedro F. Giffuni 		      (caddr_t *) malloc(sizeof(caddr_t) *
1874ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1875bfce461eSMarius Strobl 			device_printf(dev,
1876bfce461eSMarius Strobl 			    "Unable to allocate RX cluster map memory\n");
1877e035717eSSean Bruno 			err = ENOMEM;
1878e035717eSSean Bruno 			goto fail;
1879e035717eSSean Bruno 		}
18804c7070dbSScott Long 
1881bfce461eSMarius Strobl 		/* Allocate memory for the RX cluster bus address map. */
1882fbec776dSAndrew Gallatin 		if (!(fl->ifl_sds.ifsd_ba =
1883fbec776dSAndrew Gallatin 		      (bus_addr_t *) malloc(sizeof(bus_addr_t) *
1884fbec776dSAndrew Gallatin 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1885bfce461eSMarius Strobl 			device_printf(dev,
1886bfce461eSMarius Strobl 			    "Unable to allocate RX bus address map memory\n");
1887fbec776dSAndrew Gallatin 			err = ENOMEM;
1888fbec776dSAndrew Gallatin 			goto fail;
1889fbec776dSAndrew Gallatin 		}
1890e035717eSSean Bruno 
1891bfce461eSMarius Strobl 		/*
1892bfce461eSMarius Strobl 		 * Create the DMA maps for RX buffers.
1893bfce461eSMarius Strobl 		 */
1894e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_map =
1895ac2fffa4SPedro F. Giffuni 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1896bfce461eSMarius Strobl 			device_printf(dev,
1897bfce461eSMarius Strobl 			    "Unable to allocate RX buffer DMA map memory\n");
1898e035717eSSean Bruno 			err = ENOMEM;
1899e035717eSSean Bruno 			goto fail;
1900e035717eSSean Bruno 		}
1901e035717eSSean Bruno 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1902bfce461eSMarius Strobl 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
1903bfce461eSMarius Strobl 			    &fl->ifl_sds.ifsd_map[i]);
1904e035717eSSean Bruno 			if (err != 0) {
190595246abbSSean Bruno 				device_printf(dev, "Unable to create RX buffer DMA map\n");
19064c7070dbSScott Long 				goto fail;
19074c7070dbSScott Long 			}
19084c7070dbSScott Long 		}
1909835809f9SSean Bruno 	}
19104c7070dbSScott Long 	return (0);
19114c7070dbSScott Long 
19124c7070dbSScott Long fail:
19134c7070dbSScott Long 	iflib_rx_structures_free(ctx);
19144c7070dbSScott Long 	return (err);
19154c7070dbSScott Long }
19164c7070dbSScott Long 
19174c7070dbSScott Long 
19184c7070dbSScott Long /*
19194c7070dbSScott Long  * Internal service routines
19204c7070dbSScott Long  */
19214c7070dbSScott Long 
19224c7070dbSScott Long struct rxq_refill_cb_arg {
19234c7070dbSScott Long 	int               error;
19244c7070dbSScott Long 	bus_dma_segment_t seg;
19254c7070dbSScott Long 	int               nseg;
19264c7070dbSScott Long };
19274c7070dbSScott Long 
19284c7070dbSScott Long static void
19294c7070dbSScott Long _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
19304c7070dbSScott Long {
19314c7070dbSScott Long 	struct rxq_refill_cb_arg *cb_arg = arg;
19324c7070dbSScott Long 
19334c7070dbSScott Long 	cb_arg->error = error;
19344c7070dbSScott Long 	cb_arg->seg = segs[0];
19354c7070dbSScott Long 	cb_arg->nseg = nseg;
19364c7070dbSScott Long }
19374c7070dbSScott Long 
19384c7070dbSScott Long /**
19391722eeacSMarius Strobl  * _iflib_fl_refill - refill an rxq free-buffer list
19404c7070dbSScott Long  * @ctx: the iflib context
19411722eeacSMarius Strobl  * @fl: the free list to refill
19421722eeacSMarius Strobl  * @count: the number of new buffers to allocate
19434c7070dbSScott Long  *
19441722eeacSMarius Strobl  * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
19451722eeacSMarius Strobl  * The caller must assure that @count does not exceed the queue's capacity.
19464c7070dbSScott Long  */
1947fb1a29b4SHans Petter Selasky static uint8_t
19484c7070dbSScott Long _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
19494c7070dbSScott Long {
195095246abbSSean Bruno 	struct if_rxd_update iru;
1951fbec776dSAndrew Gallatin 	struct rxq_refill_cb_arg cb_arg;
19523db348b5SMarius Strobl 	struct mbuf *m;
19533db348b5SMarius Strobl 	caddr_t cl, *sd_cl;
19543db348b5SMarius Strobl 	struct mbuf **sd_m;
1955e035717eSSean Bruno 	bus_dmamap_t *sd_map;
1956fbec776dSAndrew Gallatin 	bus_addr_t bus_addr, *sd_ba;
19573db348b5SMarius Strobl 	int err, frag_idx, i, idx, n, pidx;
1958a1b799caSStephen Hurd 	qidx_t credits;
19594c7070dbSScott Long 
1960e035717eSSean Bruno 	sd_m = fl->ifl_sds.ifsd_m;
1961e035717eSSean Bruno 	sd_map = fl->ifl_sds.ifsd_map;
1962e035717eSSean Bruno 	sd_cl = fl->ifl_sds.ifsd_cl;
1963fbec776dSAndrew Gallatin 	sd_ba = fl->ifl_sds.ifsd_ba;
19643db348b5SMarius Strobl 	pidx = fl->ifl_pidx;
1965e035717eSSean Bruno 	idx = pidx;
19663db348b5SMarius Strobl 	frag_idx = fl->ifl_fragidx;
1967a1b799caSStephen Hurd 	credits = fl->ifl_credits;
1968e035717eSSean Bruno 
19693db348b5SMarius Strobl 	i = 0;
19704c7070dbSScott Long 	n = count;
19714c7070dbSScott Long 	MPASS(n > 0);
1972a1b799caSStephen Hurd 	MPASS(credits + n <= fl->ifl_size);
19734c7070dbSScott Long 
19744c7070dbSScott Long 	if (pidx < fl->ifl_cidx)
19754c7070dbSScott Long 		MPASS(pidx + n <= fl->ifl_cidx);
1976a1b799caSStephen Hurd 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
19774c7070dbSScott Long 		MPASS(fl->ifl_gen == 0);
19784c7070dbSScott Long 	if (pidx > fl->ifl_cidx)
19794c7070dbSScott Long 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
19804c7070dbSScott Long 
19814c7070dbSScott Long 	DBG_COUNTER_INC(fl_refills);
19824c7070dbSScott Long 	if (n > 8)
19834c7070dbSScott Long 		DBG_COUNTER_INC(fl_refills_large);
19842d873474SStephen Hurd 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
19854c7070dbSScott Long 	while (n--) {
19864c7070dbSScott Long 		/*
19874c7070dbSScott Long 		 * We allocate an uninitialized mbuf + cluster, mbuf is
19884c7070dbSScott Long 		 * initialized after rx.
19894c7070dbSScott Long 		 *
19904c7070dbSScott Long 		 * If the cluster is still set then we know a minimum sized packet was received
19914c7070dbSScott Long 		 */
19923db348b5SMarius Strobl 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
19933db348b5SMarius Strobl 		    &frag_idx);
19943db348b5SMarius Strobl 		if (frag_idx < 0)
199587890dbaSSean Bruno 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
19963db348b5SMarius Strobl 		MPASS(frag_idx >= 0);
199787890dbaSSean Bruno 		if ((cl = sd_cl[frag_idx]) == NULL) {
1998fbec776dSAndrew Gallatin 			if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
19994c7070dbSScott Long 				break;
20004c7070dbSScott Long 
20014c7070dbSScott Long 			cb_arg.error = 0;
200295246abbSSean Bruno 			MPASS(sd_map != NULL);
2003bfce461eSMarius Strobl 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
20048a04b53dSKonstantin Belousov 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
20058a04b53dSKonstantin Belousov 			    BUS_DMA_NOWAIT);
20064c7070dbSScott Long 			if (err != 0 || cb_arg.error) {
20074c7070dbSScott Long 				/*
20084c7070dbSScott Long 				 * !zone_pack ?
20094c7070dbSScott Long 				 */
20104c7070dbSScott Long 				if (fl->ifl_zone == zone_pack)
20114c7070dbSScott Long 					uma_zfree(fl->ifl_zone, cl);
2012fbec776dSAndrew Gallatin 				break;
20134c7070dbSScott Long 			}
20144c7070dbSScott Long 
2015fbec776dSAndrew Gallatin 			sd_ba[frag_idx] =  bus_addr = cb_arg.seg.ds_addr;
201687890dbaSSean Bruno 			sd_cl[frag_idx] = cl;
2017fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2018fbec776dSAndrew Gallatin 			fl->ifl_cl_enqueued++;
2019fbec776dSAndrew Gallatin #endif
2020fbec776dSAndrew Gallatin 		} else {
2021fbec776dSAndrew Gallatin 			bus_addr = sd_ba[frag_idx];
2022fbec776dSAndrew Gallatin 		}
202395dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
202495dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD);
2025fbec776dSAndrew Gallatin 
20266d49b41eSAndrew Gallatin 		if (sd_m[frag_idx] == NULL) {
2027fbec776dSAndrew Gallatin 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
2028fbec776dSAndrew Gallatin 				break;
2029fbec776dSAndrew Gallatin 			}
203087890dbaSSean Bruno 			sd_m[frag_idx] = m;
20316d49b41eSAndrew Gallatin 		}
20323db348b5SMarius Strobl 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2033fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2034fbec776dSAndrew Gallatin 		fl->ifl_m_enqueued++;
2035fbec776dSAndrew Gallatin #endif
2036fbec776dSAndrew Gallatin 
2037fbec776dSAndrew Gallatin 		DBG_COUNTER_INC(rx_allocs);
203887890dbaSSean Bruno 		fl->ifl_rxd_idxs[i] = frag_idx;
20394c7070dbSScott Long 		fl->ifl_bus_addrs[i] = bus_addr;
20404c7070dbSScott Long 		fl->ifl_vm_addrs[i] = cl;
2041a1b799caSStephen Hurd 		credits++;
20424c7070dbSScott Long 		i++;
2043a1b799caSStephen Hurd 		MPASS(credits <= fl->ifl_size);
2044e035717eSSean Bruno 		if (++idx == fl->ifl_size) {
20454c7070dbSScott Long 			fl->ifl_gen = 1;
2046e035717eSSean Bruno 			idx = 0;
20474c7070dbSScott Long 		}
20484c7070dbSScott Long 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
204995246abbSSean Bruno 			iru.iru_pidx = pidx;
205095246abbSSean Bruno 			iru.iru_count = i;
205195246abbSSean Bruno 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
20524c7070dbSScott Long 			i = 0;
2053e035717eSSean Bruno 			pidx = idx;
2054fa5416a8SSean Bruno 			fl->ifl_pidx = idx;
2055a1b799caSStephen Hurd 			fl->ifl_credits = credits;
205687890dbaSSean Bruno 		}
20574c7070dbSScott Long 	}
2058fbec776dSAndrew Gallatin 
2059a1b799caSStephen Hurd 	if (i) {
2060a1b799caSStephen Hurd 		iru.iru_pidx = pidx;
2061a1b799caSStephen Hurd 		iru.iru_count = i;
2062a1b799caSStephen Hurd 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2063a1b799caSStephen Hurd 		fl->ifl_pidx = idx;
2064a1b799caSStephen Hurd 		fl->ifl_credits = credits;
2065a1b799caSStephen Hurd 	}
20664c7070dbSScott Long 	DBG_COUNTER_INC(rxd_flush);
20674c7070dbSScott Long 	if (fl->ifl_pidx == 0)
20684c7070dbSScott Long 		pidx = fl->ifl_size - 1;
20694c7070dbSScott Long 	else
20704c7070dbSScott Long 		pidx = fl->ifl_pidx - 1;
207195246abbSSean Bruno 
207295246abbSSean Bruno 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
207395246abbSSean Bruno 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
20744c7070dbSScott Long 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
20759e9b738aSPatrick Kelsey 	fl->ifl_fragidx = frag_idx + 1;
20769e9b738aSPatrick Kelsey 	if (fl->ifl_fragidx == fl->ifl_size)
20779e9b738aSPatrick Kelsey 		fl->ifl_fragidx = 0;
2078fb1a29b4SHans Petter Selasky 
2079fb1a29b4SHans Petter Selasky 	return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
20804c7070dbSScott Long }
20814c7070dbSScott Long 
2082fb1a29b4SHans Petter Selasky static __inline uint8_t
20833caff188SPatrick Kelsey __iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl)
20844c7070dbSScott Long {
20854c7070dbSScott Long 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
20864c7070dbSScott Long 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
20874c7070dbSScott Long #ifdef INVARIANTS
20884c7070dbSScott Long 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
20894c7070dbSScott Long #endif
20904c7070dbSScott Long 
20914c7070dbSScott Long 	MPASS(fl->ifl_credits <= fl->ifl_size);
20924c7070dbSScott Long 	MPASS(reclaimable == delta);
20934c7070dbSScott Long 
20944c7070dbSScott Long 	if (reclaimable > 0)
20953caff188SPatrick Kelsey 		return (_iflib_fl_refill(ctx, fl, reclaimable));
2096fb1a29b4SHans Petter Selasky 	return (0);
20974c7070dbSScott Long }
20984c7070dbSScott Long 
209977c1fcecSEric Joyner uint8_t
210077c1fcecSEric Joyner iflib_in_detach(if_ctx_t ctx)
210177c1fcecSEric Joyner {
210277c1fcecSEric Joyner 	bool in_detach;
21031722eeacSMarius Strobl 
210477c1fcecSEric Joyner 	STATE_LOCK(ctx);
210577c1fcecSEric Joyner 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
210677c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
210777c1fcecSEric Joyner 	return (in_detach);
210877c1fcecSEric Joyner }
210977c1fcecSEric Joyner 
21104c7070dbSScott Long static void
21114c7070dbSScott Long iflib_fl_bufs_free(iflib_fl_t fl)
21124c7070dbSScott Long {
21134c7070dbSScott Long 	iflib_dma_info_t idi = fl->ifl_ifdi;
21148a04b53dSKonstantin Belousov 	bus_dmamap_t sd_map;
21154c7070dbSScott Long 	uint32_t i;
21164c7070dbSScott Long 
21174c7070dbSScott Long 	for (i = 0; i < fl->ifl_size; i++) {
2118e035717eSSean Bruno 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2119e035717eSSean Bruno 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
21204c7070dbSScott Long 
2121fbec776dSAndrew Gallatin 		if (*sd_cl != NULL) {
21228a04b53dSKonstantin Belousov 			sd_map = fl->ifl_sds.ifsd_map[i];
2123bfce461eSMarius Strobl 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
21248a04b53dSKonstantin Belousov 			    BUS_DMASYNC_POSTREAD);
2125bfce461eSMarius Strobl 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2126fbec776dSAndrew Gallatin 			if (*sd_cl != NULL)
2127fbec776dSAndrew Gallatin 				uma_zfree(fl->ifl_zone, *sd_cl);
2128e035717eSSean Bruno 			if (*sd_m != NULL) {
2129e035717eSSean Bruno 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2130e035717eSSean Bruno 				uma_zfree(zone_mbuf, *sd_m);
2131e035717eSSean Bruno 			}
21324c7070dbSScott Long 		} else {
2133e035717eSSean Bruno 			MPASS(*sd_cl == NULL);
2134e035717eSSean Bruno 			MPASS(*sd_m == NULL);
21354c7070dbSScott Long 		}
21364c7070dbSScott Long #if MEMORY_LOGGING
21374c7070dbSScott Long 		fl->ifl_m_dequeued++;
21384c7070dbSScott Long 		fl->ifl_cl_dequeued++;
21394c7070dbSScott Long #endif
2140e035717eSSean Bruno 		*sd_cl = NULL;
2141e035717eSSean Bruno 		*sd_m = NULL;
21424c7070dbSScott Long 	}
214395246abbSSean Bruno #ifdef INVARIANTS
214495246abbSSean Bruno 	for (i = 0; i < fl->ifl_size; i++) {
214595246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
214695246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
214795246abbSSean Bruno 	}
214895246abbSSean Bruno #endif
21494c7070dbSScott Long 	/*
21504c7070dbSScott Long 	 * Reset free list values
21514c7070dbSScott Long 	 */
215287890dbaSSean Bruno 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
21534c7070dbSScott Long 	bzero(idi->idi_vaddr, idi->idi_size);
21544c7070dbSScott Long }
21554c7070dbSScott Long 
21564c7070dbSScott Long /*********************************************************************
21574c7070dbSScott Long  *
21581722eeacSMarius Strobl  *  Initialize a free list and its buffers.
21594c7070dbSScott Long  *
21604c7070dbSScott Long  **********************************************************************/
21614c7070dbSScott Long static int
21624c7070dbSScott Long iflib_fl_setup(iflib_fl_t fl)
21634c7070dbSScott Long {
21644c7070dbSScott Long 	iflib_rxq_t rxq = fl->ifl_rxq;
21654c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
2166b3813609SPatrick Kelsey 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
2167b3813609SPatrick Kelsey 	int qidx;
21684c7070dbSScott Long 
21697274b2f6SStephen Hurd 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
21704c7070dbSScott Long 	/*
21714c7070dbSScott Long 	** Free current RX buffer structs and their mbufs
21724c7070dbSScott Long 	*/
21734c7070dbSScott Long 	iflib_fl_bufs_free(fl);
21744c7070dbSScott Long 	/* Now replenish the mbufs */
21754c7070dbSScott Long 	MPASS(fl->ifl_credits == 0);
2176b3813609SPatrick Kelsey 	qidx = rxq->ifr_fl_offset + fl->ifl_id;
2177b3813609SPatrick Kelsey 	if (scctx->isc_rxd_buf_size[qidx] != 0)
2178b3813609SPatrick Kelsey 		fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx];
2179b3813609SPatrick Kelsey 	else
21801b9d9394SEric Joyner 		fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
2181b3813609SPatrick Kelsey 	/*
2182b3813609SPatrick Kelsey 	 * ifl_buf_size may be a driver-supplied value, so pull it up
2183b3813609SPatrick Kelsey 	 * to the selected mbuf size.
2184b3813609SPatrick Kelsey 	 */
2185b3813609SPatrick Kelsey 	fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size);
21864c7070dbSScott Long 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
21874c7070dbSScott Long 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
21884c7070dbSScott Long 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
21894c7070dbSScott Long 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
21904c7070dbSScott Long 
21914c7070dbSScott Long 
21924c7070dbSScott Long 	/* avoid pre-allocating zillions of clusters to an idle card
21934c7070dbSScott Long 	 * potentially speeding up attach
21944c7070dbSScott Long 	 */
2195fb1a29b4SHans Petter Selasky 	(void) _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
21964c7070dbSScott Long 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
21974c7070dbSScott Long 	if (min(128, fl->ifl_size) != fl->ifl_credits)
21984c7070dbSScott Long 		return (ENOBUFS);
21994c7070dbSScott Long 	/*
22004c7070dbSScott Long 	 * handle failure
22014c7070dbSScott Long 	 */
22024c7070dbSScott Long 	MPASS(rxq != NULL);
22034c7070dbSScott Long 	MPASS(fl->ifl_ifdi != NULL);
22044c7070dbSScott Long 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
22054c7070dbSScott Long 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
22064c7070dbSScott Long 	return (0);
22074c7070dbSScott Long }
22084c7070dbSScott Long 
22094c7070dbSScott Long /*********************************************************************
22104c7070dbSScott Long  *
22114c7070dbSScott Long  *  Free receive ring data structures
22124c7070dbSScott Long  *
22134c7070dbSScott Long  **********************************************************************/
22144c7070dbSScott Long static void
22154c7070dbSScott Long iflib_rx_sds_free(iflib_rxq_t rxq)
22164c7070dbSScott Long {
22174c7070dbSScott Long 	iflib_fl_t fl;
22188a04b53dSKonstantin Belousov 	int i, j;
22194c7070dbSScott Long 
22204c7070dbSScott Long 	if (rxq->ifr_fl != NULL) {
22214c7070dbSScott Long 		for (i = 0; i < rxq->ifr_nfl; i++) {
22224c7070dbSScott Long 			fl = &rxq->ifr_fl[i];
2223bfce461eSMarius Strobl 			if (fl->ifl_buf_tag != NULL) {
22248a04b53dSKonstantin Belousov 				if (fl->ifl_sds.ifsd_map != NULL) {
222577102fd6SAndrew Gallatin 					for (j = 0; j < fl->ifl_size; j++) {
22268a04b53dSKonstantin Belousov 						bus_dmamap_sync(
2227bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
222877102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j],
22298a04b53dSKonstantin Belousov 						    BUS_DMASYNC_POSTREAD);
22308a04b53dSKonstantin Belousov 						bus_dmamap_unload(
2231bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
223277102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j]);
2233db8e8f1eSEric Joyner 						bus_dmamap_destroy(
2234db8e8f1eSEric Joyner 						    fl->ifl_buf_tag,
2235db8e8f1eSEric Joyner 						    fl->ifl_sds.ifsd_map[j]);
22368a04b53dSKonstantin Belousov 					}
22378a04b53dSKonstantin Belousov 				}
2238bfce461eSMarius Strobl 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2239bfce461eSMarius Strobl 				fl->ifl_buf_tag = NULL;
22404c7070dbSScott Long 			}
2241e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2242e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2243fbec776dSAndrew Gallatin 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2244e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2245e035717eSSean Bruno 			fl->ifl_sds.ifsd_m = NULL;
2246e035717eSSean Bruno 			fl->ifl_sds.ifsd_cl = NULL;
2247fbec776dSAndrew Gallatin 			fl->ifl_sds.ifsd_ba = NULL;
2248e035717eSSean Bruno 			fl->ifl_sds.ifsd_map = NULL;
22494c7070dbSScott Long 		}
22504c7070dbSScott Long 		free(rxq->ifr_fl, M_IFLIB);
22514c7070dbSScott Long 		rxq->ifr_fl = NULL;
2252244e7cffSEric Joyner 		free(rxq->ifr_ifdi, M_IFLIB);
2253244e7cffSEric Joyner 		rxq->ifr_ifdi = NULL;
22541722eeacSMarius Strobl 		rxq->ifr_cq_cidx = 0;
22554c7070dbSScott Long 	}
22564c7070dbSScott Long }
22574c7070dbSScott Long 
22584c7070dbSScott Long /*
22591722eeacSMarius Strobl  * Timer routine
22604c7070dbSScott Long  */
22614c7070dbSScott Long static void
22624c7070dbSScott Long iflib_timer(void *arg)
22634c7070dbSScott Long {
2264ab2e3f79SStephen Hurd 	iflib_txq_t txq = arg;
22654c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
2266ab2e3f79SStephen Hurd 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2267dd7fbcf1SStephen Hurd 	uint64_t this_tick = ticks;
2268dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
22694c7070dbSScott Long 
22704c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
22714c7070dbSScott Long 		return;
22721722eeacSMarius Strobl 
22734c7070dbSScott Long 	/*
22744c7070dbSScott Long 	** Check on the state of the TX queue(s), this
22754c7070dbSScott Long 	** can be done without the lock because its RO
22764c7070dbSScott Long 	** and the HUNG state will be static if set.
22774c7070dbSScott Long 	*/
2278dd7fbcf1SStephen Hurd 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2279dd7fbcf1SStephen Hurd 		txq->ift_last_timer_tick = this_tick;
2280ab2e3f79SStephen Hurd 		IFDI_TIMER(ctx, txq->ift_id);
2281ab2e3f79SStephen Hurd 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2282ab2e3f79SStephen Hurd 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2283ab2e3f79SStephen Hurd 		     (sctx->isc_pause_frames == 0)))
2284ab2e3f79SStephen Hurd 			goto hung;
2285a9693502SSean Bruno 
2286f6afed72SEric Joyner 		if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2287f6afed72SEric Joyner 		    ifmp_ring_is_stalled(txq->ift_br)) {
2288f6afed72SEric Joyner 			KASSERT(ctx->ifc_link_state == LINK_STATE_UP, ("queue can't be marked as hung if interface is down"));
2289ab2e3f79SStephen Hurd 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2290f6afed72SEric Joyner 		}
2291ab2e3f79SStephen Hurd 		txq->ift_cleaned_prev = txq->ift_cleaned;
2292dd7fbcf1SStephen Hurd 	}
2293dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
2294dd7fbcf1SStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
229595dcf343SMarius Strobl 		iflib_netmap_timer_adjust(ctx, txq, &reset_on);
2296dd7fbcf1SStephen Hurd #endif
2297ab2e3f79SStephen Hurd 	/* handle any laggards */
2298ab2e3f79SStephen Hurd 	if (txq->ift_db_pending)
2299ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
2300a9693502SSean Bruno 
2301ab2e3f79SStephen Hurd 	sctx->isc_pause_frames = 0;
2302d300df01SStephen Hurd 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2303dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2304ab2e3f79SStephen Hurd 	return;
23051722eeacSMarius Strobl 
2306ab2e3f79SStephen Hurd  hung:
23071722eeacSMarius Strobl 	device_printf(ctx->ifc_dev,
23081722eeacSMarius Strobl 	    "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2309ab2e3f79SStephen Hurd 	    txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
23107b610b60SSean Bruno 	STATE_LOCK(ctx);
23117b610b60SSean Bruno 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23127b610b60SSean Bruno 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2313940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
231446fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
23154c7070dbSScott Long }
23164c7070dbSScott Long 
2317b3813609SPatrick Kelsey static uint16_t
2318b3813609SPatrick Kelsey iflib_get_mbuf_size_for(unsigned int size)
2319b3813609SPatrick Kelsey {
2320b3813609SPatrick Kelsey 
2321b3813609SPatrick Kelsey 	if (size <= MCLBYTES)
2322b3813609SPatrick Kelsey 		return (MCLBYTES);
2323b3813609SPatrick Kelsey 	else
2324b3813609SPatrick Kelsey 		return (MJUMPAGESIZE);
2325b3813609SPatrick Kelsey }
2326b3813609SPatrick Kelsey 
23274c7070dbSScott Long static void
23281b9d9394SEric Joyner iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
23291b9d9394SEric Joyner {
23301b9d9394SEric Joyner 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
23311b9d9394SEric Joyner 
23321b9d9394SEric Joyner 	/*
23331b9d9394SEric Joyner 	 * XXX don't set the max_frame_size to larger
23341b9d9394SEric Joyner 	 * than the hardware can handle
23351b9d9394SEric Joyner 	 */
2336b3813609SPatrick Kelsey 	ctx->ifc_rx_mbuf_sz =
2337b3813609SPatrick Kelsey 	    iflib_get_mbuf_size_for(sctx->isc_max_frame_size);
23381b9d9394SEric Joyner }
23391b9d9394SEric Joyner 
23401b9d9394SEric Joyner uint32_t
23411b9d9394SEric Joyner iflib_get_rx_mbuf_sz(if_ctx_t ctx)
23421b9d9394SEric Joyner {
23431722eeacSMarius Strobl 
23441b9d9394SEric Joyner 	return (ctx->ifc_rx_mbuf_sz);
23451b9d9394SEric Joyner }
23461b9d9394SEric Joyner 
23471b9d9394SEric Joyner static void
23484c7070dbSScott Long iflib_init_locked(if_ctx_t ctx)
23494c7070dbSScott Long {
23504c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
23511248952aSSean Bruno 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
23524c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
23534c7070dbSScott Long 	iflib_fl_t fl;
23544c7070dbSScott Long 	iflib_txq_t txq;
23554c7070dbSScott Long 	iflib_rxq_t rxq;
2356ab2e3f79SStephen Hurd 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
23574c7070dbSScott Long 
23584c7070dbSScott Long 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23594c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
23604c7070dbSScott Long 
23611248952aSSean Bruno 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
23621248952aSSean Bruno 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
23634c7070dbSScott Long 	/* Set hardware offload abilities */
23644c7070dbSScott Long 	if_clearhwassist(ifp);
23654c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
23661248952aSSean Bruno 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
23674c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
23681248952aSSean Bruno 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
23694c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO4)
23704c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
23714c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO6)
23724c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
23734c7070dbSScott Long 
23744c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
23754c7070dbSScott Long 		CALLOUT_LOCK(txq);
23764c7070dbSScott Long 		callout_stop(&txq->ift_timer);
23774c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
23784c7070dbSScott Long 		iflib_netmap_txq_init(ctx, txq);
23794c7070dbSScott Long 	}
23801b9d9394SEric Joyner 
23811b9d9394SEric Joyner 	/*
23821b9d9394SEric Joyner 	 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
23831b9d9394SEric Joyner 	 * that drivers can use the value when setting up the hardware receive
23841b9d9394SEric Joyner 	 * buffers.
23851b9d9394SEric Joyner 	 */
23861b9d9394SEric Joyner 	iflib_calc_rx_mbuf_sz(ctx);
23871b9d9394SEric Joyner 
238823ac9029SStephen Hurd #ifdef INVARIANTS
238923ac9029SStephen Hurd 	i = if_getdrvflags(ifp);
239023ac9029SStephen Hurd #endif
23914c7070dbSScott Long 	IFDI_INIT(ctx);
239223ac9029SStephen Hurd 	MPASS(if_getdrvflags(ifp) == i);
23934c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
239495246abbSSean Bruno 		/* XXX this should really be done on a per-queue basis */
2395d0d0ad0aSStephen Hurd 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2396d0d0ad0aSStephen Hurd 			MPASS(rxq->ifr_id == i);
2397d0d0ad0aSStephen Hurd 			iflib_netmap_rxq_init(ctx, rxq);
239895246abbSSean Bruno 			continue;
2399d0d0ad0aSStephen Hurd 		}
24004c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
24014c7070dbSScott Long 			if (iflib_fl_setup(fl)) {
24023d10e9edSMarius Strobl 				device_printf(ctx->ifc_dev,
24033d10e9edSMarius Strobl 				    "setting up free list %d failed - "
24043d10e9edSMarius Strobl 				    "check cluster settings\n", j);
24054c7070dbSScott Long 				goto done;
24064c7070dbSScott Long 			}
24074c7070dbSScott Long 		}
24084c7070dbSScott Long 	}
24094c7070dbSScott Long done:
24104c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
24114c7070dbSScott Long 	IFDI_INTR_ENABLE(ctx);
24124c7070dbSScott Long 	txq = ctx->ifc_txqs;
24134c7070dbSScott Long 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2414ab2e3f79SStephen Hurd 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2415ab2e3f79SStephen Hurd 			txq->ift_timer.c_cpu);
24164c7070dbSScott Long }
24174c7070dbSScott Long 
24184c7070dbSScott Long static int
24194c7070dbSScott Long iflib_media_change(if_t ifp)
24204c7070dbSScott Long {
24214c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
24224c7070dbSScott Long 	int err;
24234c7070dbSScott Long 
24244c7070dbSScott Long 	CTX_LOCK(ctx);
24254c7070dbSScott Long 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
24264c7070dbSScott Long 		iflib_init_locked(ctx);
24274c7070dbSScott Long 	CTX_UNLOCK(ctx);
24284c7070dbSScott Long 	return (err);
24294c7070dbSScott Long }
24304c7070dbSScott Long 
24314c7070dbSScott Long static void
24324c7070dbSScott Long iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
24334c7070dbSScott Long {
24344c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
24354c7070dbSScott Long 
24364c7070dbSScott Long 	CTX_LOCK(ctx);
2437ab2e3f79SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
24384c7070dbSScott Long 	IFDI_MEDIA_STATUS(ctx, ifmr);
24394c7070dbSScott Long 	CTX_UNLOCK(ctx);
24404c7070dbSScott Long }
24414c7070dbSScott Long 
244209f6ff4fSMatt Macy void
24434c7070dbSScott Long iflib_stop(if_ctx_t ctx)
24444c7070dbSScott Long {
24454c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
24464c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
24474c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
24484d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
24494c7070dbSScott Long 	iflib_dma_info_t di;
24504c7070dbSScott Long 	iflib_fl_t fl;
24514c7070dbSScott Long 	int i, j;
24524c7070dbSScott Long 
24534c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
24544c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
24554c7070dbSScott Long 
24564c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
2457ab2e3f79SStephen Hurd 	DELAY(1000);
2458da69b8f9SSean Bruno 	IFDI_STOP(ctx);
2459ab2e3f79SStephen Hurd 	DELAY(1000);
24604c7070dbSScott Long 
2461da69b8f9SSean Bruno 	iflib_debug_reset();
24624c7070dbSScott Long 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
24634c7070dbSScott Long 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
24644c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24654c7070dbSScott Long 
2466226fb85dSStephen Hurd 		CALLOUT_LOCK(txq);
2467226fb85dSStephen Hurd 		callout_stop(&txq->ift_timer);
2468226fb85dSStephen Hurd 		CALLOUT_UNLOCK(txq);
2469226fb85dSStephen Hurd 
24704c7070dbSScott Long 		/* clean any enqueued buffers */
2471da69b8f9SSean Bruno 		iflib_ifmp_purge(txq);
24724c7070dbSScott Long 		/* Free any existing tx buffers. */
247323ac9029SStephen Hurd 		for (j = 0; j < txq->ift_size; j++) {
24744c7070dbSScott Long 			iflib_txsd_free(ctx, txq, j);
24754c7070dbSScott Long 		}
2476ab2e3f79SStephen Hurd 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2477ab2e3f79SStephen Hurd 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
24784c7070dbSScott Long 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
24794c7070dbSScott Long 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2480ab2e3f79SStephen Hurd 		txq->ift_pullups = 0;
248195246abbSSean Bruno 		ifmp_ring_reset_stats(txq->ift_br);
24824d261ce2SStephen Hurd 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
24834c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24844c7070dbSScott Long 	}
24854c7070dbSScott Long 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
24864c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24874c7070dbSScott Long 
24881722eeacSMarius Strobl 		rxq->ifr_cq_cidx = 0;
24894d261ce2SStephen Hurd 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
24904c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24914c7070dbSScott Long 		/* also resets the free lists pidx/cidx */
24924c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
24934c7070dbSScott Long 			iflib_fl_bufs_free(fl);
24944c7070dbSScott Long 	}
24954c7070dbSScott Long }
24964c7070dbSScott Long 
249795246abbSSean Bruno static inline caddr_t
249895246abbSSean Bruno calc_next_rxd(iflib_fl_t fl, int cidx)
249995246abbSSean Bruno {
250095246abbSSean Bruno 	qidx_t size;
250195246abbSSean Bruno 	int nrxd;
250295246abbSSean Bruno 	caddr_t start, end, cur, next;
250395246abbSSean Bruno 
250495246abbSSean Bruno 	nrxd = fl->ifl_size;
250595246abbSSean Bruno 	size = fl->ifl_rxd_size;
250695246abbSSean Bruno 	start = fl->ifl_ifdi->idi_vaddr;
250795246abbSSean Bruno 
250895246abbSSean Bruno 	if (__predict_false(size == 0))
250995246abbSSean Bruno 		return (start);
251095246abbSSean Bruno 	cur = start + size*cidx;
251195246abbSSean Bruno 	end = start + size*nrxd;
251295246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
251395246abbSSean Bruno 	return (next < end ? next : start);
251495246abbSSean Bruno }
251595246abbSSean Bruno 
2516e035717eSSean Bruno static inline void
2517e035717eSSean Bruno prefetch_pkts(iflib_fl_t fl, int cidx)
2518e035717eSSean Bruno {
2519e035717eSSean Bruno 	int nextptr;
2520e035717eSSean Bruno 	int nrxd = fl->ifl_size;
252195246abbSSean Bruno 	caddr_t next_rxd;
252295246abbSSean Bruno 
2523e035717eSSean Bruno 
2524e035717eSSean Bruno 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2525e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2526e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
252795246abbSSean Bruno 	next_rxd = calc_next_rxd(fl, cidx);
252895246abbSSean Bruno 	prefetch(next_rxd);
2529e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2530e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2531e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2532e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2533e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2534e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2535e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2536e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2537e035717eSSean Bruno }
2538e035717eSSean Bruno 
25396d49b41eSAndrew Gallatin static struct mbuf *
25406d49b41eSAndrew Gallatin rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
25416d49b41eSAndrew Gallatin     int *pf_rv, if_rxd_info_t ri)
25424c7070dbSScott Long {
2543e035717eSSean Bruno 	bus_dmamap_t map;
25444c7070dbSScott Long 	iflib_fl_t fl;
25456d49b41eSAndrew Gallatin 	caddr_t payload;
25466d49b41eSAndrew Gallatin 	struct mbuf *m;
25476d49b41eSAndrew Gallatin 	int flid, cidx, len, next;
25484c7070dbSScott Long 
254995246abbSSean Bruno 	map = NULL;
25504c7070dbSScott Long 	flid = irf->irf_flid;
25514c7070dbSScott Long 	cidx = irf->irf_idx;
25524c7070dbSScott Long 	fl = &rxq->ifr_fl[flid];
255395246abbSSean Bruno 	sd->ifsd_fl = fl;
25546d49b41eSAndrew Gallatin 	m = fl->ifl_sds.ifsd_m[cidx];
255595246abbSSean Bruno 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
25564c7070dbSScott Long 	fl->ifl_credits--;
25574c7070dbSScott Long #if MEMORY_LOGGING
25584c7070dbSScott Long 	fl->ifl_m_dequeued++;
25594c7070dbSScott Long #endif
256095246abbSSean Bruno 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2561e035717eSSean Bruno 		prefetch_pkts(fl, cidx);
2562e035717eSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2563e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2564e035717eSSean Bruno 	map = fl->ifl_sds.ifsd_map[cidx];
25654c7070dbSScott Long 
2566bfce461eSMarius Strobl 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
25676d49b41eSAndrew Gallatin 
25684f2beb72SPatrick Kelsey 	if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL &&
25694f2beb72SPatrick Kelsey 	    irf->irf_len != 0) {
25706d49b41eSAndrew Gallatin 		payload  = *sd->ifsd_cl;
25716d49b41eSAndrew Gallatin 		payload +=  ri->iri_pad;
25726d49b41eSAndrew Gallatin 		len = ri->iri_len - ri->iri_pad;
25736d49b41eSAndrew Gallatin 		*pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp,
25746d49b41eSAndrew Gallatin 		    len | PFIL_MEMPTR | PFIL_IN, NULL);
25756d49b41eSAndrew Gallatin 		switch (*pf_rv) {
25766d49b41eSAndrew Gallatin 		case PFIL_DROPPED:
25776d49b41eSAndrew Gallatin 		case PFIL_CONSUMED:
25786d49b41eSAndrew Gallatin 			/*
25796d49b41eSAndrew Gallatin 			 * The filter ate it.  Everything is recycled.
25806d49b41eSAndrew Gallatin 			 */
25816d49b41eSAndrew Gallatin 			m = NULL;
25826d49b41eSAndrew Gallatin 			unload = 0;
25836d49b41eSAndrew Gallatin 			break;
25846d49b41eSAndrew Gallatin 		case PFIL_REALLOCED:
25856d49b41eSAndrew Gallatin 			/*
25866d49b41eSAndrew Gallatin 			 * The filter copied it.  Everything is recycled.
25876d49b41eSAndrew Gallatin 			 */
25886d49b41eSAndrew Gallatin 			m = pfil_mem2mbuf(payload);
25896d49b41eSAndrew Gallatin 			unload = 0;
25906d49b41eSAndrew Gallatin 			break;
25916d49b41eSAndrew Gallatin 		case PFIL_PASS:
25926d49b41eSAndrew Gallatin 			/*
25936d49b41eSAndrew Gallatin 			 * Filter said it was OK, so receive like
25946d49b41eSAndrew Gallatin 			 * normal
25956d49b41eSAndrew Gallatin 			 */
25966d49b41eSAndrew Gallatin 			fl->ifl_sds.ifsd_m[cidx] = NULL;
25976d49b41eSAndrew Gallatin 			break;
25986d49b41eSAndrew Gallatin 		default:
25996d49b41eSAndrew Gallatin 			MPASS(0);
26006d49b41eSAndrew Gallatin 		}
26016d49b41eSAndrew Gallatin 	} else {
26026d49b41eSAndrew Gallatin 		fl->ifl_sds.ifsd_m[cidx] = NULL;
26036d49b41eSAndrew Gallatin 		*pf_rv = PFIL_PASS;
26046d49b41eSAndrew Gallatin 	}
26056d49b41eSAndrew Gallatin 
26064f2beb72SPatrick Kelsey 	if (unload && irf->irf_len != 0)
2607bfce461eSMarius Strobl 		bus_dmamap_unload(fl->ifl_buf_tag, map);
260895246abbSSean Bruno 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
260995246abbSSean Bruno 	if (__predict_false(fl->ifl_cidx == 0))
26104c7070dbSScott Long 		fl->ifl_gen = 0;
261187890dbaSSean Bruno 	bit_clear(fl->ifl_rx_bitmap, cidx);
26126d49b41eSAndrew Gallatin 	return (m);
26134c7070dbSScott Long }
26144c7070dbSScott Long 
26154c7070dbSScott Long static struct mbuf *
26166d49b41eSAndrew Gallatin assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
26174c7070dbSScott Long {
261895246abbSSean Bruno 	struct mbuf *m, *mh, *mt;
261995246abbSSean Bruno 	caddr_t cl;
26206d49b41eSAndrew Gallatin 	int  *pf_rv_ptr, flags, i, padlen;
26216d49b41eSAndrew Gallatin 	bool consumed;
26224c7070dbSScott Long 
26234c7070dbSScott Long 	i = 0;
262423ac9029SStephen Hurd 	mh = NULL;
26256d49b41eSAndrew Gallatin 	consumed = false;
26266d49b41eSAndrew Gallatin 	*pf_rv = PFIL_PASS;
26276d49b41eSAndrew Gallatin 	pf_rv_ptr = pf_rv;
26284c7070dbSScott Long 	do {
26296d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
26306d49b41eSAndrew Gallatin 		    pf_rv_ptr, ri);
26314c7070dbSScott Long 
263295246abbSSean Bruno 		MPASS(*sd->ifsd_cl != NULL);
263323ac9029SStephen Hurd 
26346d49b41eSAndrew Gallatin 		/*
26356d49b41eSAndrew Gallatin 		 * Exclude zero-length frags & frags from
26366d49b41eSAndrew Gallatin 		 * packets the filter has consumed or dropped
26376d49b41eSAndrew Gallatin 		 */
26386d49b41eSAndrew Gallatin 		if (ri->iri_frags[i].irf_len == 0 || consumed ||
26396d49b41eSAndrew Gallatin 		    *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
26406d49b41eSAndrew Gallatin 			if (mh == NULL) {
26416d49b41eSAndrew Gallatin 				/* everything saved here */
26426d49b41eSAndrew Gallatin 				consumed = true;
26436d49b41eSAndrew Gallatin 				pf_rv_ptr = NULL;
264423ac9029SStephen Hurd 				continue;
264523ac9029SStephen Hurd 			}
26466d49b41eSAndrew Gallatin 			/* XXX we can save the cluster here, but not the mbuf */
26476d49b41eSAndrew Gallatin 			m_init(m, M_NOWAIT, MT_DATA, 0);
26486d49b41eSAndrew Gallatin 			m_free(m);
26496d49b41eSAndrew Gallatin 			continue;
26506d49b41eSAndrew Gallatin 		}
265123ac9029SStephen Hurd 		if (mh == NULL) {
26524c7070dbSScott Long 			flags = M_PKTHDR|M_EXT;
26534c7070dbSScott Long 			mh = mt = m;
26544c7070dbSScott Long 			padlen = ri->iri_pad;
26554c7070dbSScott Long 		} else {
26564c7070dbSScott Long 			flags = M_EXT;
26574c7070dbSScott Long 			mt->m_next = m;
26584c7070dbSScott Long 			mt = m;
26594c7070dbSScott Long 			/* assuming padding is only on the first fragment */
26604c7070dbSScott Long 			padlen = 0;
26614c7070dbSScott Long 		}
266295246abbSSean Bruno 		cl = *sd->ifsd_cl;
266395246abbSSean Bruno 		*sd->ifsd_cl = NULL;
26644c7070dbSScott Long 
26654c7070dbSScott Long 		/* Can these two be made one ? */
26664c7070dbSScott Long 		m_init(m, M_NOWAIT, MT_DATA, flags);
266795246abbSSean Bruno 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
26684c7070dbSScott Long 		/*
26694c7070dbSScott Long 		 * These must follow m_init and m_cljset
26704c7070dbSScott Long 		 */
26714c7070dbSScott Long 		m->m_data += padlen;
26724c7070dbSScott Long 		ri->iri_len -= padlen;
267323ac9029SStephen Hurd 		m->m_len = ri->iri_frags[i].irf_len;
26744c7070dbSScott Long 	} while (++i < ri->iri_nfrags);
26754c7070dbSScott Long 
26764c7070dbSScott Long 	return (mh);
26774c7070dbSScott Long }
26784c7070dbSScott Long 
26794c7070dbSScott Long /*
26804c7070dbSScott Long  * Process one software descriptor
26814c7070dbSScott Long  */
26824c7070dbSScott Long static struct mbuf *
26834c7070dbSScott Long iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
26844c7070dbSScott Long {
268595246abbSSean Bruno 	struct if_rxsd sd;
26864c7070dbSScott Long 	struct mbuf *m;
26876d49b41eSAndrew Gallatin 	int pf_rv;
26884c7070dbSScott Long 
26894c7070dbSScott Long 	/* should I merge this back in now that the two paths are basically duplicated? */
269023ac9029SStephen Hurd 	if (ri->iri_nfrags == 1 &&
26914f2beb72SPatrick Kelsey 	    ri->iri_frags[0].irf_len != 0 &&
269218628b74SMark Johnston 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
26936d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
26946d49b41eSAndrew Gallatin 		    &pf_rv, ri);
26956d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
26966d49b41eSAndrew Gallatin 			return (m);
26976d49b41eSAndrew Gallatin 		if (pf_rv == PFIL_PASS) {
26984c7070dbSScott Long 			m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
269995246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
270095246abbSSean Bruno 			if (!IP_ALIGNED(m))
270195246abbSSean Bruno 				m->m_data += 2;
270295246abbSSean Bruno #endif
270395246abbSSean Bruno 			memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
270423ac9029SStephen Hurd 			m->m_len = ri->iri_frags[0].irf_len;
27056d49b41eSAndrew Gallatin 		}
27064c7070dbSScott Long 	} else {
27076d49b41eSAndrew Gallatin 		m = assemble_segments(rxq, ri, &sd, &pf_rv);
27084f2beb72SPatrick Kelsey 		if (m == NULL)
27094f2beb72SPatrick Kelsey 			return (NULL);
27106d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
27116d49b41eSAndrew Gallatin 			return (m);
27124c7070dbSScott Long 	}
27134c7070dbSScott Long 	m->m_pkthdr.len = ri->iri_len;
27144c7070dbSScott Long 	m->m_pkthdr.rcvif = ri->iri_ifp;
27154c7070dbSScott Long 	m->m_flags |= ri->iri_flags;
27164c7070dbSScott Long 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
27174c7070dbSScott Long 	m->m_pkthdr.flowid = ri->iri_flowid;
27184c7070dbSScott Long 	M_HASHTYPE_SET(m, ri->iri_rsstype);
27194c7070dbSScott Long 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
27204c7070dbSScott Long 	m->m_pkthdr.csum_data = ri->iri_csum_data;
27214c7070dbSScott Long 	return (m);
27224c7070dbSScott Long }
27234c7070dbSScott Long 
272435e4e998SStephen Hurd #if defined(INET6) || defined(INET)
2725fe1bcadaSStephen Hurd static void
2726fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2727fe1bcadaSStephen Hurd {
2728fe1bcadaSStephen Hurd 	CURVNET_SET(lc->ifp->if_vnet);
2729fe1bcadaSStephen Hurd #if defined(INET6)
2730188adcb7SMarko Zec 	*v6 = V_ip6_forwarding;
2731fe1bcadaSStephen Hurd #endif
2732fe1bcadaSStephen Hurd #if defined(INET)
2733188adcb7SMarko Zec 	*v4 = V_ipforwarding;
2734fe1bcadaSStephen Hurd #endif
2735fe1bcadaSStephen Hurd 	CURVNET_RESTORE();
2736fe1bcadaSStephen Hurd }
2737fe1bcadaSStephen Hurd 
273835e4e998SStephen Hurd /*
273935e4e998SStephen Hurd  * Returns true if it's possible this packet could be LROed.
274035e4e998SStephen Hurd  * if it returns false, it is guaranteed that tcp_lro_rx()
274135e4e998SStephen Hurd  * would not return zero.
274235e4e998SStephen Hurd  */
274335e4e998SStephen Hurd static bool
2744fe1bcadaSStephen Hurd iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
274535e4e998SStephen Hurd {
274635e4e998SStephen Hurd 	struct ether_header *eh;
274735e4e998SStephen Hurd 
274835e4e998SStephen Hurd 	eh = mtod(m, struct ether_header *);
27496aee0bfaSMarko Zec 	switch (eh->ether_type) {
2750abec4724SSean Bruno #if defined(INET6)
27516aee0bfaSMarko Zec 		case htons(ETHERTYPE_IPV6):
27526aee0bfaSMarko Zec 			return (!v6_forwarding);
2753abec4724SSean Bruno #endif
2754abec4724SSean Bruno #if defined (INET)
27556aee0bfaSMarko Zec 		case htons(ETHERTYPE_IP):
27566aee0bfaSMarko Zec 			return (!v4_forwarding);
2757abec4724SSean Bruno #endif
275835e4e998SStephen Hurd 	}
275935e4e998SStephen Hurd 
276035e4e998SStephen Hurd 	return false;
276135e4e998SStephen Hurd }
2762fe1bcadaSStephen Hurd #else
2763fe1bcadaSStephen Hurd static void
2764fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2765fe1bcadaSStephen Hurd {
2766fe1bcadaSStephen Hurd }
276735e4e998SStephen Hurd #endif
276835e4e998SStephen Hurd 
2769fb1a29b4SHans Petter Selasky static void
2770fb1a29b4SHans Petter Selasky _task_fn_rx_watchdog(void *context)
2771fb1a29b4SHans Petter Selasky {
2772fb1a29b4SHans Petter Selasky 	iflib_rxq_t rxq = context;
2773fb1a29b4SHans Petter Selasky 
2774fb1a29b4SHans Petter Selasky 	GROUPTASK_ENQUEUE(&rxq->ifr_task);
2775fb1a29b4SHans Petter Selasky }
2776fb1a29b4SHans Petter Selasky 
2777fb1a29b4SHans Petter Selasky static uint8_t
277895246abbSSean Bruno iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
27794c7070dbSScott Long {
27801722eeacSMarius Strobl 	if_t ifp;
27814c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
27824c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
278323ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
27844c7070dbSScott Long 	int avail, i;
278595246abbSSean Bruno 	qidx_t *cidxp;
27864c7070dbSScott Long 	struct if_rxd_info ri;
27874c7070dbSScott Long 	int err, budget_left, rx_bytes, rx_pkts;
27884c7070dbSScott Long 	iflib_fl_t fl;
27894c7070dbSScott Long 	int lro_enabled;
2790f6cb0deaSMatt Macy 	bool v4_forwarding, v6_forwarding, lro_possible;
2791fb1a29b4SHans Petter Selasky 	uint8_t retval = 0;
279295246abbSSean Bruno 
27934c7070dbSScott Long 	/*
27944c7070dbSScott Long 	 * XXX early demux data packets so that if_input processing only handles
27954c7070dbSScott Long 	 * acks in interrupt context
27964c7070dbSScott Long 	 */
279720f63282SStephen Hurd 	struct mbuf *m, *mh, *mt, *mf;
27984c7070dbSScott Long 
27990b8df657SGleb Smirnoff 	NET_EPOCH_ASSERT();
28000b8df657SGleb Smirnoff 
2801f6cb0deaSMatt Macy 	lro_possible = v4_forwarding = v6_forwarding = false;
280295246abbSSean Bruno 	ifp = ctx->ifc_ifp;
28034c7070dbSScott Long 	mh = mt = NULL;
28044c7070dbSScott Long 	MPASS(budget > 0);
28054c7070dbSScott Long 	rx_pkts	= rx_bytes = 0;
280623ac9029SStephen Hurd 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
28074c7070dbSScott Long 		cidxp = &rxq->ifr_cq_cidx;
28084c7070dbSScott Long 	else
28094c7070dbSScott Long 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
281023ac9029SStephen Hurd 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
28114c7070dbSScott Long 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
28123caff188SPatrick Kelsey 			retval |= __iflib_fl_refill_all(ctx, fl);
28134c7070dbSScott Long 		DBG_COUNTER_INC(rx_unavail);
2814fb1a29b4SHans Petter Selasky 		return (retval);
28154c7070dbSScott Long 	}
28164c7070dbSScott Long 
28176d49b41eSAndrew Gallatin 	/* pfil needs the vnet to be set */
28186d49b41eSAndrew Gallatin 	CURVNET_SET_QUIET(ifp->if_vnet);
28198b8d9093SMarius Strobl 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
28204c7070dbSScott Long 		if (__predict_false(!CTX_ACTIVE(ctx))) {
28214c7070dbSScott Long 			DBG_COUNTER_INC(rx_ctx_inactive);
28224c7070dbSScott Long 			break;
28234c7070dbSScott Long 		}
28244c7070dbSScott Long 		/*
28254c7070dbSScott Long 		 * Reset client set fields to their default values
28264c7070dbSScott Long 		 */
282795246abbSSean Bruno 		rxd_info_zero(&ri);
28284c7070dbSScott Long 		ri.iri_qsidx = rxq->ifr_id;
28294c7070dbSScott Long 		ri.iri_cidx = *cidxp;
283095246abbSSean Bruno 		ri.iri_ifp = ifp;
28314c7070dbSScott Long 		ri.iri_frags = rxq->ifr_frags;
28324c7070dbSScott Long 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
28334c7070dbSScott Long 
283495246abbSSean Bruno 		if (err)
283595246abbSSean Bruno 			goto err;
28366d49b41eSAndrew Gallatin 		rx_pkts += 1;
28376d49b41eSAndrew Gallatin 		rx_bytes += ri.iri_len;
283823ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
283923ac9029SStephen Hurd 			*cidxp = ri.iri_cidx;
284023ac9029SStephen Hurd 			/* Update our consumer index */
284195246abbSSean Bruno 			/* XXX NB: shurd - check if this is still safe */
28421722eeacSMarius Strobl 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
284323ac9029SStephen Hurd 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
28444c7070dbSScott Long 			/* was this only a completion queue message? */
28454c7070dbSScott Long 			if (__predict_false(ri.iri_nfrags == 0))
28464c7070dbSScott Long 				continue;
28474c7070dbSScott Long 		}
28484c7070dbSScott Long 		MPASS(ri.iri_nfrags != 0);
28494c7070dbSScott Long 		MPASS(ri.iri_len != 0);
28504c7070dbSScott Long 
28514c7070dbSScott Long 		/* will advance the cidx on the corresponding free lists */
28524c7070dbSScott Long 		m = iflib_rxd_pkt_get(rxq, &ri);
28538b8d9093SMarius Strobl 		avail--;
28548b8d9093SMarius Strobl 		budget_left--;
28554c7070dbSScott Long 		if (avail == 0 && budget_left)
285623ac9029SStephen Hurd 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
28574c7070dbSScott Long 
28586d49b41eSAndrew Gallatin 		if (__predict_false(m == NULL))
28594c7070dbSScott Long 			continue;
28606d49b41eSAndrew Gallatin 
28614c7070dbSScott Long 		/* imm_pkt: -- cxgb */
28624c7070dbSScott Long 		if (mh == NULL)
28634c7070dbSScott Long 			mh = mt = m;
28644c7070dbSScott Long 		else {
28654c7070dbSScott Long 			mt->m_nextpkt = m;
28664c7070dbSScott Long 			mt = m;
28674c7070dbSScott Long 		}
28684c7070dbSScott Long 	}
28696d49b41eSAndrew Gallatin 	CURVNET_RESTORE();
28704c7070dbSScott Long 	/* make sure that we can refill faster than drain */
28714c7070dbSScott Long 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
28723caff188SPatrick Kelsey 		retval |= __iflib_fl_refill_all(ctx, fl);
28734c7070dbSScott Long 
28744c7070dbSScott Long 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2875fe1bcadaSStephen Hurd 	if (lro_enabled)
2876fe1bcadaSStephen Hurd 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
287720f63282SStephen Hurd 	mt = mf = NULL;
28784c7070dbSScott Long 	while (mh != NULL) {
28794c7070dbSScott Long 		m = mh;
28804c7070dbSScott Long 		mh = mh->m_nextpkt;
28814c7070dbSScott Long 		m->m_nextpkt = NULL;
288295246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
288395246abbSSean Bruno 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
288495246abbSSean Bruno 			continue;
288595246abbSSean Bruno #endif
28864c7070dbSScott Long 		rx_bytes += m->m_pkthdr.len;
28874c7070dbSScott Long 		rx_pkts++;
2888aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
288935e4e998SStephen Hurd 		if (lro_enabled) {
289035e4e998SStephen Hurd 			if (!lro_possible) {
2891fe1bcadaSStephen Hurd 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
289235e4e998SStephen Hurd 				if (lro_possible && mf != NULL) {
289335e4e998SStephen Hurd 					ifp->if_input(ifp, mf);
289435e4e998SStephen Hurd 					DBG_COUNTER_INC(rx_if_input);
289535e4e998SStephen Hurd 					mt = mf = NULL;
289635e4e998SStephen Hurd 				}
289735e4e998SStephen Hurd 			}
289825ac1dd5SStephen Hurd 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
289925ac1dd5SStephen Hurd 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
290035e4e998SStephen Hurd 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
29014c7070dbSScott Long 					continue;
290220f63282SStephen Hurd 			}
290325ac1dd5SStephen Hurd 		}
2904aaeb188aSBjoern A. Zeeb #endif
290535e4e998SStephen Hurd 		if (lro_possible) {
290635e4e998SStephen Hurd 			ifp->if_input(ifp, m);
290735e4e998SStephen Hurd 			DBG_COUNTER_INC(rx_if_input);
290835e4e998SStephen Hurd 			continue;
290935e4e998SStephen Hurd 		}
291035e4e998SStephen Hurd 
291135e4e998SStephen Hurd 		if (mf == NULL)
291235e4e998SStephen Hurd 			mf = m;
291320f63282SStephen Hurd 		if (mt != NULL)
291420f63282SStephen Hurd 			mt->m_nextpkt = m;
291520f63282SStephen Hurd 		mt = m;
291620f63282SStephen Hurd 	}
291720f63282SStephen Hurd 	if (mf != NULL) {
291820f63282SStephen Hurd 		ifp->if_input(ifp, mf);
29194c7070dbSScott Long 		DBG_COUNTER_INC(rx_if_input);
29204c7070dbSScott Long 	}
292123ac9029SStephen Hurd 
29224c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
29234c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
29244c7070dbSScott Long 
29254c7070dbSScott Long 	/*
29264c7070dbSScott Long 	 * Flush any outstanding LRO work
29274c7070dbSScott Long 	 */
2928aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
292923ac9029SStephen Hurd 	tcp_lro_flush_all(&rxq->ifr_lc);
2930aaeb188aSBjoern A. Zeeb #endif
2931fb1a29b4SHans Petter Selasky 	if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
2932fb1a29b4SHans Petter Selasky 		retval |= IFLIB_RXEOF_MORE;
2933fb1a29b4SHans Petter Selasky 	return (retval);
293495246abbSSean Bruno err:
29357b610b60SSean Bruno 	STATE_LOCK(ctx);
2936ab2e3f79SStephen Hurd 	ctx->ifc_flags |= IFC_DO_RESET;
2937940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
293846fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
2939fb1a29b4SHans Petter Selasky 	return (0);
294095246abbSSean Bruno }
294195246abbSSean Bruno 
294295246abbSSean Bruno #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
294395246abbSSean Bruno static inline qidx_t
294495246abbSSean Bruno txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
294595246abbSSean Bruno {
294695246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
294795246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
294895246abbSSean Bruno 	if (in_use > 4*minthresh)
294995246abbSSean Bruno 		return (notify_count);
295095246abbSSean Bruno 	if (in_use > 2*minthresh)
295195246abbSSean Bruno 		return (notify_count >> 1);
295295246abbSSean Bruno 	if (in_use > minthresh)
295395246abbSSean Bruno 		return (notify_count >> 3);
295495246abbSSean Bruno 	return (0);
295595246abbSSean Bruno }
295695246abbSSean Bruno 
295795246abbSSean Bruno static inline qidx_t
295895246abbSSean Bruno txq_max_rs_deferred(iflib_txq_t txq)
295995246abbSSean Bruno {
296095246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
296195246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
296295246abbSSean Bruno 	if (txq->ift_in_use > 4*minthresh)
296395246abbSSean Bruno 		return (notify_count);
296495246abbSSean Bruno 	if (txq->ift_in_use > 2*minthresh)
296595246abbSSean Bruno 		return (notify_count >> 1);
296695246abbSSean Bruno 	if (txq->ift_in_use > minthresh)
296795246abbSSean Bruno 		return (notify_count >> 2);
29682b2fc973SSean Bruno 	return (2);
29694c7070dbSScott Long }
29704c7070dbSScott Long 
29714c7070dbSScott Long #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
29724c7070dbSScott Long #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
297395246abbSSean Bruno 
297495246abbSSean Bruno #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
297595246abbSSean Bruno #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
297623ac9029SStephen Hurd #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
29774c7070dbSScott Long 
297895246abbSSean Bruno /* forward compatibility for cxgb */
297995246abbSSean Bruno #define FIRST_QSET(ctx) 0
298095246abbSSean Bruno #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
298195246abbSSean Bruno #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
298295246abbSSean Bruno #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
298395246abbSSean Bruno #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
298495246abbSSean Bruno 
298595246abbSSean Bruno /* XXX we should be setting this to something other than zero */
298695246abbSSean Bruno #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
29877474544bSMarius Strobl #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
29887474544bSMarius Strobl     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
298995246abbSSean Bruno 
299095246abbSSean Bruno static inline bool
299195246abbSSean Bruno iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
29924c7070dbSScott Long {
299395246abbSSean Bruno 	qidx_t dbval, max;
299495246abbSSean Bruno 	bool rang;
29954c7070dbSScott Long 
299695246abbSSean Bruno 	rang = false;
299795246abbSSean Bruno 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
299895246abbSSean Bruno 	if (ring || txq->ift_db_pending >= max) {
29994c7070dbSScott Long 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
300095dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
300195dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
30024c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
30034c7070dbSScott Long 		txq->ift_db_pending = txq->ift_npending = 0;
300495246abbSSean Bruno 		rang = true;
30054c7070dbSScott Long 	}
300695246abbSSean Bruno 	return (rang);
30074c7070dbSScott Long }
30084c7070dbSScott Long 
30094c7070dbSScott Long #ifdef PKT_DEBUG
30104c7070dbSScott Long static void
30114c7070dbSScott Long print_pkt(if_pkt_info_t pi)
30124c7070dbSScott Long {
30134c7070dbSScott Long 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
30144c7070dbSScott Long 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
30154c7070dbSScott Long 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
30164c7070dbSScott Long 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
30174c7070dbSScott Long 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
30184c7070dbSScott Long 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
30194c7070dbSScott Long }
30204c7070dbSScott Long #endif
30214c7070dbSScott Long 
30224c7070dbSScott Long #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3023a06424ddSEric Joyner #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
30244c7070dbSScott Long #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3025a06424ddSEric Joyner #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
30264c7070dbSScott Long 
30274c7070dbSScott Long static int
30284c7070dbSScott Long iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
30294c7070dbSScott Long {
3030ab2e3f79SStephen Hurd 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
30314c7070dbSScott Long 	struct ether_vlan_header *eh;
3032c9a49a4fSMarius Strobl 	struct mbuf *m;
30334c7070dbSScott Long 
30348b8d9093SMarius Strobl 	m = *mp;
3035ab2e3f79SStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3036ab2e3f79SStephen Hurd 	    M_WRITABLE(m) == 0) {
3037ab2e3f79SStephen Hurd 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3038ab2e3f79SStephen Hurd 			return (ENOMEM);
3039ab2e3f79SStephen Hurd 		} else {
3040ab2e3f79SStephen Hurd 			m_freem(*mp);
304164e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
30428b8d9093SMarius Strobl 			*mp = m;
3043ab2e3f79SStephen Hurd 		}
3044ab2e3f79SStephen Hurd 	}
30451248952aSSean Bruno 
30464c7070dbSScott Long 	/*
30474c7070dbSScott Long 	 * Determine where frame payload starts.
30484c7070dbSScott Long 	 * Jump over vlan headers if already present,
30494c7070dbSScott Long 	 * helpful for QinQ too.
30504c7070dbSScott Long 	 */
30514c7070dbSScott Long 	if (__predict_false(m->m_len < sizeof(*eh))) {
30524c7070dbSScott Long 		txq->ift_pullups++;
30534c7070dbSScott Long 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
30544c7070dbSScott Long 			return (ENOMEM);
30554c7070dbSScott Long 	}
30564c7070dbSScott Long 	eh = mtod(m, struct ether_vlan_header *);
30574c7070dbSScott Long 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
30584c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_proto);
30594c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
30604c7070dbSScott Long 	} else {
30614c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
30624c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
30634c7070dbSScott Long 	}
30644c7070dbSScott Long 
30654c7070dbSScott Long 	switch (pi->ipi_etype) {
30664c7070dbSScott Long #ifdef INET
30674c7070dbSScott Long 	case ETHERTYPE_IP:
30684c7070dbSScott Long 	{
3069c9a49a4fSMarius Strobl 		struct mbuf *n;
30704c7070dbSScott Long 		struct ip *ip = NULL;
30714c7070dbSScott Long 		struct tcphdr *th = NULL;
30724c7070dbSScott Long 		int minthlen;
30734c7070dbSScott Long 
30744c7070dbSScott Long 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
30754c7070dbSScott Long 		if (__predict_false(m->m_len < minthlen)) {
30764c7070dbSScott Long 			/*
30774c7070dbSScott Long 			 * if this code bloat is causing too much of a hit
30784c7070dbSScott Long 			 * move it to a separate function and mark it noinline
30794c7070dbSScott Long 			 */
30804c7070dbSScott Long 			if (m->m_len == pi->ipi_ehdrlen) {
30814c7070dbSScott Long 				n = m->m_next;
30824c7070dbSScott Long 				MPASS(n);
30834c7070dbSScott Long 				if (n->m_len >= sizeof(*ip))  {
30844c7070dbSScott Long 					ip = (struct ip *)n->m_data;
30854c7070dbSScott Long 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30864c7070dbSScott Long 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30874c7070dbSScott Long 				} else {
30884c7070dbSScott Long 					txq->ift_pullups++;
30894c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30904c7070dbSScott Long 						return (ENOMEM);
30914c7070dbSScott Long 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30924c7070dbSScott Long 				}
30934c7070dbSScott Long 			} else {
30944c7070dbSScott Long 				txq->ift_pullups++;
30954c7070dbSScott Long 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30964c7070dbSScott Long 					return (ENOMEM);
30974c7070dbSScott Long 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30984c7070dbSScott Long 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30994c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
31004c7070dbSScott Long 			}
31014c7070dbSScott Long 		} else {
31024c7070dbSScott Long 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
31034c7070dbSScott Long 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
31044c7070dbSScott Long 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
31054c7070dbSScott Long 		}
31064c7070dbSScott Long 		pi->ipi_ip_hlen = ip->ip_hl << 2;
31074c7070dbSScott Long 		pi->ipi_ipproto = ip->ip_p;
31084c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV4;
31094c7070dbSScott Long 
3110a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3111a06424ddSEric Joyner 		if (IS_TX_OFFLOAD4(pi)) {
3112a06424ddSEric Joyner 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
31134c7070dbSScott Long 				if (__predict_false(th == NULL)) {
31144c7070dbSScott Long 					txq->ift_pullups++;
31154c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
31164c7070dbSScott Long 						return (ENOMEM);
31174c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
31184c7070dbSScott Long 				}
31194c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
31204c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
31214c7070dbSScott Long 				pi->ipi_tcp_seq = th->th_seq;
31224c7070dbSScott Long 			}
3123a06424ddSEric Joyner 			if (IS_TSO4(pi)) {
31244c7070dbSScott Long 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
31254c7070dbSScott Long 					return (ENXIO);
31268d4ceb9cSStephen Hurd 				/*
31278d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
31288d4ceb9cSStephen Hurd 				 */
31298d4ceb9cSStephen Hurd 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
31304c7070dbSScott Long 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
31314c7070dbSScott Long 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
31324c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
31331248952aSSean Bruno 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
31341248952aSSean Bruno 					ip->ip_sum = 0;
31351248952aSSean Bruno 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
31361248952aSSean Bruno 				}
31374c7070dbSScott Long 			}
3138a06424ddSEric Joyner 		}
31398d4ceb9cSStephen Hurd 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
31408d4ceb9cSStephen Hurd                        ip->ip_sum = 0;
31418d4ceb9cSStephen Hurd 
31424c7070dbSScott Long 		break;
31434c7070dbSScott Long 	}
31444c7070dbSScott Long #endif
31454c7070dbSScott Long #ifdef INET6
31464c7070dbSScott Long 	case ETHERTYPE_IPV6:
31474c7070dbSScott Long 	{
31484c7070dbSScott Long 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
31494c7070dbSScott Long 		struct tcphdr *th;
31504c7070dbSScott Long 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
31514c7070dbSScott Long 
31524c7070dbSScott Long 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
315364e6fc13SStephen Hurd 			txq->ift_pullups++;
31544c7070dbSScott Long 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
31554c7070dbSScott Long 				return (ENOMEM);
31564c7070dbSScott Long 		}
31574c7070dbSScott Long 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
31584c7070dbSScott Long 
31594c7070dbSScott Long 		/* XXX-BZ this will go badly in case of ext hdrs. */
31604c7070dbSScott Long 		pi->ipi_ipproto = ip6->ip6_nxt;
31614c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV6;
31624c7070dbSScott Long 
3163a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3164a06424ddSEric Joyner 		if (IS_TX_OFFLOAD6(pi)) {
31654c7070dbSScott Long 			if (pi->ipi_ipproto == IPPROTO_TCP) {
31664c7070dbSScott Long 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3167a06424ddSEric Joyner 					txq->ift_pullups++;
31684c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
31694c7070dbSScott Long 						return (ENOMEM);
31704c7070dbSScott Long 				}
31714c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
31724c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
3173a06424ddSEric Joyner 				pi->ipi_tcp_seq = th->th_seq;
31744c7070dbSScott Long 			}
3175a06424ddSEric Joyner 			if (IS_TSO6(pi)) {
31764c7070dbSScott Long 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
31774c7070dbSScott Long 					return (ENXIO);
31784c7070dbSScott Long 				/*
31798d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
31804c7070dbSScott Long 				 */
3181a06424ddSEric Joyner 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
31824c7070dbSScott Long 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
31834c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
31844c7070dbSScott Long 			}
3185a06424ddSEric Joyner 		}
31864c7070dbSScott Long 		break;
31874c7070dbSScott Long 	}
31884c7070dbSScott Long #endif
31894c7070dbSScott Long 	default:
31904c7070dbSScott Long 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
31914c7070dbSScott Long 		pi->ipi_ip_hlen = 0;
31924c7070dbSScott Long 		break;
31934c7070dbSScott Long 	}
31944c7070dbSScott Long 	*mp = m;
31951248952aSSean Bruno 
31964c7070dbSScott Long 	return (0);
31974c7070dbSScott Long }
31984c7070dbSScott Long 
31994c7070dbSScott Long /*
32004c7070dbSScott Long  * If dodgy hardware rejects the scatter gather chain we've handed it
320123ac9029SStephen Hurd  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
320223ac9029SStephen Hurd  * m_defrag'd mbufs
32034c7070dbSScott Long  */
32044c7070dbSScott Long static __noinline struct mbuf *
320523ac9029SStephen Hurd iflib_remove_mbuf(iflib_txq_t txq)
32064c7070dbSScott Long {
3207fbec776dSAndrew Gallatin 	int ntxd, pidx;
3208fbec776dSAndrew Gallatin 	struct mbuf *m, **ifsd_m;
32094c7070dbSScott Long 
32104c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
321123ac9029SStephen Hurd 	ntxd = txq->ift_size;
3212fbec776dSAndrew Gallatin 	pidx = txq->ift_pidx & (ntxd - 1);
3213fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
3214fbec776dSAndrew Gallatin 	m = ifsd_m[pidx];
32154c7070dbSScott Long 	ifsd_m[pidx] = NULL;
3216bfce461eSMarius Strobl 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
32178a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL)
3218bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
32198a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[pidx]);
32204c7070dbSScott Long #if MEMORY_LOGGING
32214c7070dbSScott Long 	txq->ift_dequeued++;
32224c7070dbSScott Long #endif
3223fbec776dSAndrew Gallatin 	return (m);
32244c7070dbSScott Long }
32254c7070dbSScott Long 
322695246abbSSean Bruno static inline caddr_t
322795246abbSSean Bruno calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
322895246abbSSean Bruno {
322995246abbSSean Bruno 	qidx_t size;
323095246abbSSean Bruno 	int ntxd;
323195246abbSSean Bruno 	caddr_t start, end, cur, next;
323295246abbSSean Bruno 
323395246abbSSean Bruno 	ntxd = txq->ift_size;
323495246abbSSean Bruno 	size = txq->ift_txd_size[qid];
323595246abbSSean Bruno 	start = txq->ift_ifdi[qid].idi_vaddr;
323695246abbSSean Bruno 
323795246abbSSean Bruno 	if (__predict_false(size == 0))
323895246abbSSean Bruno 		return (start);
323995246abbSSean Bruno 	cur = start + size*cidx;
324095246abbSSean Bruno 	end = start + size*ntxd;
324195246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
324295246abbSSean Bruno 	return (next < end ? next : start);
324395246abbSSean Bruno }
324495246abbSSean Bruno 
3245d14c853bSStephen Hurd /*
3246d14c853bSStephen Hurd  * Pad an mbuf to ensure a minimum ethernet frame size.
3247d14c853bSStephen Hurd  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3248d14c853bSStephen Hurd  */
3249d14c853bSStephen Hurd static __noinline int
3250a15fbbb8SStephen Hurd iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3251d14c853bSStephen Hurd {
3252d14c853bSStephen Hurd 	/*
3253d14c853bSStephen Hurd 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3254d14c853bSStephen Hurd 	 * and ARP message is the smallest common payload I can think of
3255d14c853bSStephen Hurd 	 */
3256d14c853bSStephen Hurd 	static char pad[18];	/* just zeros */
3257d14c853bSStephen Hurd 	int n;
3258a15fbbb8SStephen Hurd 	struct mbuf *new_head;
3259d14c853bSStephen Hurd 
3260a15fbbb8SStephen Hurd 	if (!M_WRITABLE(*m_head)) {
3261a15fbbb8SStephen Hurd 		new_head = m_dup(*m_head, M_NOWAIT);
3262a15fbbb8SStephen Hurd 		if (new_head == NULL) {
326304993890SStephen Hurd 			m_freem(*m_head);
3264a15fbbb8SStephen Hurd 			device_printf(dev, "cannot pad short frame, m_dup() failed");
326506c47d48SStephen Hurd 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
326664e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
3267a15fbbb8SStephen Hurd 			return ENOMEM;
3268a15fbbb8SStephen Hurd 		}
3269a15fbbb8SStephen Hurd 		m_freem(*m_head);
3270a15fbbb8SStephen Hurd 		*m_head = new_head;
3271a15fbbb8SStephen Hurd 	}
3272a15fbbb8SStephen Hurd 
3273a15fbbb8SStephen Hurd 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3274d14c853bSStephen Hurd 	     n > 0; n -= sizeof(pad))
3275a15fbbb8SStephen Hurd 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3276d14c853bSStephen Hurd 			break;
3277d14c853bSStephen Hurd 
3278d14c853bSStephen Hurd 	if (n > 0) {
3279a15fbbb8SStephen Hurd 		m_freem(*m_head);
3280d14c853bSStephen Hurd 		device_printf(dev, "cannot pad short frame\n");
3281d14c853bSStephen Hurd 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
328264e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3283d14c853bSStephen Hurd 		return (ENOBUFS);
3284d14c853bSStephen Hurd 	}
3285d14c853bSStephen Hurd 
3286d14c853bSStephen Hurd 	return 0;
3287d14c853bSStephen Hurd }
3288d14c853bSStephen Hurd 
32894c7070dbSScott Long static int
32904c7070dbSScott Long iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
32914c7070dbSScott Long {
32924c7070dbSScott Long 	if_ctx_t		ctx;
32934c7070dbSScott Long 	if_shared_ctx_t		sctx;
32944c7070dbSScott Long 	if_softc_ctx_t		scctx;
3295bfce461eSMarius Strobl 	bus_dma_tag_t		buf_tag;
32964c7070dbSScott Long 	bus_dma_segment_t	*segs;
3297fbec776dSAndrew Gallatin 	struct mbuf		*m_head, **ifsd_m;
329895246abbSSean Bruno 	void			*next_txd;
32994c7070dbSScott Long 	bus_dmamap_t		map;
33004c7070dbSScott Long 	struct if_pkt_info	pi;
33014c7070dbSScott Long 	int remap = 0;
33024c7070dbSScott Long 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
33034c7070dbSScott Long 
33044c7070dbSScott Long 	ctx = txq->ift_ctx;
33054c7070dbSScott Long 	sctx = ctx->ifc_sctx;
33064c7070dbSScott Long 	scctx = &ctx->ifc_softc_ctx;
33074c7070dbSScott Long 	segs = txq->ift_segs;
330823ac9029SStephen Hurd 	ntxd = txq->ift_size;
33094c7070dbSScott Long 	m_head = *m_headp;
33104c7070dbSScott Long 	map = NULL;
33114c7070dbSScott Long 
33124c7070dbSScott Long 	/*
33134c7070dbSScott Long 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
33144c7070dbSScott Long 	 */
33154c7070dbSScott Long 	cidx = txq->ift_cidx;
33164c7070dbSScott Long 	pidx = txq->ift_pidx;
331795246abbSSean Bruno 	if (ctx->ifc_flags & IFC_PREFETCH) {
33184c7070dbSScott Long 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
331995246abbSSean Bruno 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
332095246abbSSean Bruno 			next_txd = calc_next_txd(txq, cidx, 0);
332195246abbSSean Bruno 			prefetch(next_txd);
332295246abbSSean Bruno 		}
33234c7070dbSScott Long 
33244c7070dbSScott Long 		/* prefetch the next cache line of mbuf pointers and flags */
33254c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_m[next]);
33264c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_map[next]);
33274c7070dbSScott Long 		next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
33284c7070dbSScott Long 	}
332995246abbSSean Bruno 	map = txq->ift_sds.ifsd_map[pidx];
3330fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
33314c7070dbSScott Long 
33324c7070dbSScott Long 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3333bfce461eSMarius Strobl 		buf_tag = txq->ift_tso_buf_tag;
33344c7070dbSScott Long 		max_segs = scctx->isc_tx_tso_segments_max;
33358a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[pidx];
3336bfce461eSMarius Strobl 		MPASS(buf_tag != NULL);
33377f87c040SMarius Strobl 		MPASS(max_segs > 0);
33384c7070dbSScott Long 	} else {
3339bfce461eSMarius Strobl 		buf_tag = txq->ift_buf_tag;
33404c7070dbSScott Long 		max_segs = scctx->isc_tx_nsegments;
33418a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_map[pidx];
33424c7070dbSScott Long 	}
3343d14c853bSStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3344d14c853bSStephen Hurd 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3345a15fbbb8SStephen Hurd 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
334664e6fc13SStephen Hurd 		if (err) {
334764e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
3348d14c853bSStephen Hurd 			return err;
3349d14c853bSStephen Hurd 		}
335064e6fc13SStephen Hurd 	}
3351a15fbbb8SStephen Hurd 	m_head = *m_headp;
335295246abbSSean Bruno 
335395246abbSSean Bruno 	pkt_info_zero(&pi);
3354ab2e3f79SStephen Hurd 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3355ab2e3f79SStephen Hurd 	pi.ipi_pidx = pidx;
3356ab2e3f79SStephen Hurd 	pi.ipi_qsidx = txq->ift_id;
33573429c02fSStephen Hurd 	pi.ipi_len = m_head->m_pkthdr.len;
33583429c02fSStephen Hurd 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
33591722eeacSMarius Strobl 	pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
33604c7070dbSScott Long 
33614c7070dbSScott Long 	/* deliberate bitwise OR to make one condition */
33624c7070dbSScott Long 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
336364e6fc13SStephen Hurd 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
336464e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
33654c7070dbSScott Long 			return (err);
336664e6fc13SStephen Hurd 		}
33674c7070dbSScott Long 		m_head = *m_headp;
33684c7070dbSScott Long 	}
33694c7070dbSScott Long 
33704c7070dbSScott Long retry:
3371bfce461eSMarius Strobl 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3372fbec776dSAndrew Gallatin 	    BUS_DMA_NOWAIT);
33734c7070dbSScott Long defrag:
33744c7070dbSScott Long 	if (__predict_false(err)) {
33754c7070dbSScott Long 		switch (err) {
33764c7070dbSScott Long 		case EFBIG:
33774c7070dbSScott Long 			/* try collapse once and defrag once */
3378f7594707SAndrew Gallatin 			if (remap == 0) {
33794c7070dbSScott Long 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3380f7594707SAndrew Gallatin 				/* try defrag if collapsing fails */
3381f7594707SAndrew Gallatin 				if (m_head == NULL)
3382f7594707SAndrew Gallatin 					remap++;
3383f7594707SAndrew Gallatin 			}
338464e6fc13SStephen Hurd 			if (remap == 1) {
338564e6fc13SStephen Hurd 				txq->ift_mbuf_defrag++;
33864c7070dbSScott Long 				m_head = m_defrag(*m_headp, M_NOWAIT);
338764e6fc13SStephen Hurd 			}
33883e8d1baeSEric Joyner 			/*
33893e8d1baeSEric Joyner 			 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
33903e8d1baeSEric Joyner 			 * failed to map an mbuf that was run through m_defrag
33913e8d1baeSEric Joyner 			 */
33923e8d1baeSEric Joyner 			MPASS(remap <= 1);
33933e8d1baeSEric Joyner 			if (__predict_false(m_head == NULL || remap > 1))
33944c7070dbSScott Long 				goto defrag_failed;
33953e8d1baeSEric Joyner 			remap++;
33964c7070dbSScott Long 			*m_headp = m_head;
33974c7070dbSScott Long 			goto retry;
33984c7070dbSScott Long 			break;
33994c7070dbSScott Long 		case ENOMEM:
34004c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
34014c7070dbSScott Long 			break;
34024c7070dbSScott Long 		default:
34034c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
34044c7070dbSScott Long 			m_freem(*m_headp);
34054c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
34064c7070dbSScott Long 			*m_headp = NULL;
34074c7070dbSScott Long 			break;
34084c7070dbSScott Long 		}
34094c7070dbSScott Long 		txq->ift_map_failed++;
34104c7070dbSScott Long 		DBG_COUNTER_INC(encap_load_mbuf_fail);
341164e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
34124c7070dbSScott Long 		return (err);
34134c7070dbSScott Long 	}
3414fbec776dSAndrew Gallatin 	ifsd_m[pidx] = m_head;
34154c7070dbSScott Long 	/*
34164c7070dbSScott Long 	 * XXX assumes a 1 to 1 relationship between segments and
34174c7070dbSScott Long 	 *        descriptors - this does not hold true on all drivers, e.g.
34184c7070dbSScott Long 	 *        cxgb
34194c7070dbSScott Long 	 */
34204c7070dbSScott Long 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
34214c7070dbSScott Long 		txq->ift_no_desc_avail++;
3422bfce461eSMarius Strobl 		bus_dmamap_unload(buf_tag, map);
34234c7070dbSScott Long 		DBG_COUNTER_INC(encap_txq_avail_fail);
342464e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
342523ac9029SStephen Hurd 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
34264c7070dbSScott Long 			GROUPTASK_ENQUEUE(&txq->ift_task);
34274c7070dbSScott Long 		return (ENOBUFS);
34284c7070dbSScott Long 	}
342995246abbSSean Bruno 	/*
343095246abbSSean Bruno 	 * On Intel cards we can greatly reduce the number of TX interrupts
343195246abbSSean Bruno 	 * we see by only setting report status on every Nth descriptor.
343295246abbSSean Bruno 	 * However, this also means that the driver will need to keep track
343395246abbSSean Bruno 	 * of the descriptors that RS was set on to check them for the DD bit.
343495246abbSSean Bruno 	 */
343595246abbSSean Bruno 	txq->ift_rs_pending += nsegs + 1;
343695246abbSSean Bruno 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
34371f7ce05dSAndrew Gallatin 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
343895246abbSSean Bruno 		pi.ipi_flags |= IPI_TX_INTR;
343995246abbSSean Bruno 		txq->ift_rs_pending = 0;
344095246abbSSean Bruno 	}
344195246abbSSean Bruno 
34424c7070dbSScott Long 	pi.ipi_segs = segs;
34434c7070dbSScott Long 	pi.ipi_nsegs = nsegs;
34444c7070dbSScott Long 
344523ac9029SStephen Hurd 	MPASS(pidx >= 0 && pidx < txq->ift_size);
34464c7070dbSScott Long #ifdef PKT_DEBUG
34474c7070dbSScott Long 	print_pkt(&pi);
34484c7070dbSScott Long #endif
34494c7070dbSScott Long 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
345095dcf343SMarius Strobl 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
34514c7070dbSScott Long 		DBG_COUNTER_INC(tx_encap);
345295246abbSSean Bruno 		MPASS(pi.ipi_new_pidx < txq->ift_size);
34534c7070dbSScott Long 
34544c7070dbSScott Long 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
34554c7070dbSScott Long 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
345623ac9029SStephen Hurd 			ndesc += txq->ift_size;
34574c7070dbSScott Long 			txq->ift_gen = 1;
34584c7070dbSScott Long 		}
34591248952aSSean Bruno 		/*
34601248952aSSean Bruno 		 * drivers can need as many as
34611248952aSSean Bruno 		 * two sentinels
34621248952aSSean Bruno 		 */
34631248952aSSean Bruno 		MPASS(ndesc <= pi.ipi_nsegs + 2);
34644c7070dbSScott Long 		MPASS(pi.ipi_new_pidx != pidx);
34654c7070dbSScott Long 		MPASS(ndesc > 0);
34664c7070dbSScott Long 		txq->ift_in_use += ndesc;
346795246abbSSean Bruno 
34684c7070dbSScott Long 		/*
34694c7070dbSScott Long 		 * We update the last software descriptor again here because there may
34704c7070dbSScott Long 		 * be a sentinel and/or there may be more mbufs than segments
34714c7070dbSScott Long 		 */
34724c7070dbSScott Long 		txq->ift_pidx = pi.ipi_new_pidx;
34734c7070dbSScott Long 		txq->ift_npending += pi.ipi_ndescs;
3474f7594707SAndrew Gallatin 	} else {
347523ac9029SStephen Hurd 		*m_headp = m_head = iflib_remove_mbuf(txq);
3476f7594707SAndrew Gallatin 		if (err == EFBIG) {
34774c7070dbSScott Long 			txq->ift_txd_encap_efbig++;
3478f7594707SAndrew Gallatin 			if (remap < 2) {
3479f7594707SAndrew Gallatin 				remap = 1;
34804c7070dbSScott Long 				goto defrag;
3481f7594707SAndrew Gallatin 			}
3482f7594707SAndrew Gallatin 		}
3483f7594707SAndrew Gallatin 		goto defrag_failed;
3484f7594707SAndrew Gallatin 	}
348564e6fc13SStephen Hurd 	/*
348664e6fc13SStephen Hurd 	 * err can't possibly be non-zero here, so we don't neet to test it
348764e6fc13SStephen Hurd 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
348864e6fc13SStephen Hurd 	 */
34894c7070dbSScott Long 	return (err);
34904c7070dbSScott Long 
34914c7070dbSScott Long defrag_failed:
34924c7070dbSScott Long 	txq->ift_mbuf_defrag_failed++;
34934c7070dbSScott Long 	txq->ift_map_failed++;
34944c7070dbSScott Long 	m_freem(*m_headp);
34954c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
34964c7070dbSScott Long 	*m_headp = NULL;
349764e6fc13SStephen Hurd 	DBG_COUNTER_INC(encap_txd_encap_fail);
34984c7070dbSScott Long 	return (ENOMEM);
34994c7070dbSScott Long }
35004c7070dbSScott Long 
35014c7070dbSScott Long static void
35024c7070dbSScott Long iflib_tx_desc_free(iflib_txq_t txq, int n)
35034c7070dbSScott Long {
35044c7070dbSScott Long 	uint32_t qsize, cidx, mask, gen;
35054c7070dbSScott Long 	struct mbuf *m, **ifsd_m;
350695246abbSSean Bruno 	bool do_prefetch;
35074c7070dbSScott Long 
35084c7070dbSScott Long 	cidx = txq->ift_cidx;
35094c7070dbSScott Long 	gen = txq->ift_gen;
351023ac9029SStephen Hurd 	qsize = txq->ift_size;
35114c7070dbSScott Long 	mask = qsize-1;
35124c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
351395246abbSSean Bruno 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
35144c7070dbSScott Long 
351594618825SMark Johnston 	while (n-- > 0) {
351695246abbSSean Bruno 		if (do_prefetch) {
35174c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 3) & mask]);
35184c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 4) & mask]);
351995246abbSSean Bruno 		}
35204c7070dbSScott Long 		if ((m = ifsd_m[cidx]) != NULL) {
3521fbec776dSAndrew Gallatin 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
35228a04b53dSKonstantin Belousov 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3523bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_tso_buf_tag,
35248a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx],
35258a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3526bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_tso_buf_tag,
35278a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx]);
35288a04b53dSKonstantin Belousov 			} else {
3529bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_buf_tag,
35308a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx],
35318a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3532bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_buf_tag,
35338a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx]);
35348a04b53dSKonstantin Belousov 			}
35354c7070dbSScott Long 			/* XXX we don't support any drivers that batch packets yet */
35364c7070dbSScott Long 			MPASS(m->m_nextpkt == NULL);
35375c5ca36cSSean Bruno 			m_freem(m);
35384c7070dbSScott Long 			ifsd_m[cidx] = NULL;
35394c7070dbSScott Long #if MEMORY_LOGGING
35404c7070dbSScott Long 			txq->ift_dequeued++;
35414c7070dbSScott Long #endif
35424c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
35434c7070dbSScott Long 		}
35444c7070dbSScott Long 		if (__predict_false(++cidx == qsize)) {
35454c7070dbSScott Long 			cidx = 0;
35464c7070dbSScott Long 			gen = 0;
35474c7070dbSScott Long 		}
35484c7070dbSScott Long 	}
35494c7070dbSScott Long 	txq->ift_cidx = cidx;
35504c7070dbSScott Long 	txq->ift_gen = gen;
35514c7070dbSScott Long }
35524c7070dbSScott Long 
35534c7070dbSScott Long static __inline int
35544c7070dbSScott Long iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
35554c7070dbSScott Long {
35564c7070dbSScott Long 	int reclaim;
35574c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35584c7070dbSScott Long 
35594c7070dbSScott Long 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
35604c7070dbSScott Long 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
35614c7070dbSScott Long 
35624c7070dbSScott Long 	/*
35634c7070dbSScott Long 	 * Need a rate-limiting check so that this isn't called every time
35644c7070dbSScott Long 	 */
35654c7070dbSScott Long 	iflib_tx_credits_update(ctx, txq);
35664c7070dbSScott Long 	reclaim = DESC_RECLAIMABLE(txq);
35674c7070dbSScott Long 
35684c7070dbSScott Long 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
35694c7070dbSScott Long #ifdef INVARIANTS
35704c7070dbSScott Long 		if (iflib_verbose_debug) {
35714c7070dbSScott Long 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
35724c7070dbSScott Long 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
35734c7070dbSScott Long 			       reclaim, thresh);
35744c7070dbSScott Long 
35754c7070dbSScott Long 		}
35764c7070dbSScott Long #endif
35774c7070dbSScott Long 		return (0);
35784c7070dbSScott Long 	}
35794c7070dbSScott Long 	iflib_tx_desc_free(txq, reclaim);
35804c7070dbSScott Long 	txq->ift_cleaned += reclaim;
35814c7070dbSScott Long 	txq->ift_in_use -= reclaim;
35824c7070dbSScott Long 
35834c7070dbSScott Long 	return (reclaim);
35844c7070dbSScott Long }
35854c7070dbSScott Long 
35864c7070dbSScott Long static struct mbuf **
358795246abbSSean Bruno _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
35884c7070dbSScott Long {
358995246abbSSean Bruno 	int next, size;
359095246abbSSean Bruno 	struct mbuf **items;
35914c7070dbSScott Long 
359295246abbSSean Bruno 	size = r->size;
359395246abbSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
359495246abbSSean Bruno 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
359595246abbSSean Bruno 
359695246abbSSean Bruno 	prefetch(items[(cidx + offset) & (size-1)]);
359795246abbSSean Bruno 	if (remaining > 1) {
35983429c02fSStephen Hurd 		prefetch2cachelines(&items[next]);
35993429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
36003429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
36013429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
360295246abbSSean Bruno 	}
360395246abbSSean Bruno 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
36044c7070dbSScott Long }
36054c7070dbSScott Long 
36064c7070dbSScott Long static void
36074c7070dbSScott Long iflib_txq_check_drain(iflib_txq_t txq, int budget)
36084c7070dbSScott Long {
36094c7070dbSScott Long 
361095246abbSSean Bruno 	ifmp_ring_check_drainage(txq->ift_br, budget);
36114c7070dbSScott Long }
36124c7070dbSScott Long 
36134c7070dbSScott Long static uint32_t
36144c7070dbSScott Long iflib_txq_can_drain(struct ifmp_ring *r)
36154c7070dbSScott Long {
36164c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
36174c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
36184c7070dbSScott Long 
361995dcf343SMarius Strobl 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
362095dcf343SMarius Strobl 		return (1);
36218a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
36228a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
362395dcf343SMarius Strobl 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
362495dcf343SMarius Strobl 	    false));
36254c7070dbSScott Long }
36264c7070dbSScott Long 
36274c7070dbSScott Long static uint32_t
36284c7070dbSScott Long iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
36294c7070dbSScott Long {
36304c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
36314c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
36321722eeacSMarius Strobl 	if_t ifp = ctx->ifc_ifp;
3633c2c5d1e7SMarius Strobl 	struct mbuf *m, **mp;
3634c2c5d1e7SMarius Strobl 	int avail, bytes_sent, consumed, count, err, i, in_use_prev;
3635c2c5d1e7SMarius Strobl 	int mcast_sent, pkt_sent, reclaimed, txq_avail;
3636c2c5d1e7SMarius Strobl 	bool do_prefetch, rang, ring;
36374c7070dbSScott Long 
36384c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
36394c7070dbSScott Long 			    !LINK_ACTIVE(ctx))) {
36404c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_notready);
36414c7070dbSScott Long 		return (0);
36424c7070dbSScott Long 	}
364395246abbSSean Bruno 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
364495246abbSSean Bruno 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
36454c7070dbSScott Long 	avail = IDXDIFF(pidx, cidx, r->size);
36464c7070dbSScott Long 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
36474c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_flushing);
36484c7070dbSScott Long 		for (i = 0; i < avail; i++) {
3649bc0e855bSStephen Hurd 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
365023ac9029SStephen Hurd 				m_free(r->items[(cidx + i) & (r->size-1)]);
36514c7070dbSScott Long 			r->items[(cidx + i) & (r->size-1)] = NULL;
36524c7070dbSScott Long 		}
36534c7070dbSScott Long 		return (avail);
36544c7070dbSScott Long 	}
365595246abbSSean Bruno 
36564c7070dbSScott Long 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
36574c7070dbSScott Long 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36584c7070dbSScott Long 		CALLOUT_LOCK(txq);
36594c7070dbSScott Long 		callout_stop(&txq->ift_timer);
36604c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
36614c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_oactive);
36624c7070dbSScott Long 		return (0);
36634c7070dbSScott Long 	}
366495246abbSSean Bruno 	if (reclaimed)
366595246abbSSean Bruno 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36664c7070dbSScott Long 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
36674c7070dbSScott Long 	count = MIN(avail, TX_BATCH_SIZE);
3668da69b8f9SSean Bruno #ifdef INVARIANTS
3669da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3670da69b8f9SSean Bruno 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3671da69b8f9SSean Bruno 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3672da69b8f9SSean Bruno #endif
367395246abbSSean Bruno 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3674c2c5d1e7SMarius Strobl 	txq_avail = TXQ_AVAIL(txq);
36751ae4848cSMatt Macy 	err = 0;
3676c2c5d1e7SMarius Strobl 	for (i = 0; i < count && txq_avail > MAX_TX_DESC(ctx) + 2; i++) {
36771ae4848cSMatt Macy 		int rem = do_prefetch ? count - i : 0;
36784c7070dbSScott Long 
367995246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, rem);
3680da69b8f9SSean Bruno 		MPASS(mp != NULL && *mp != NULL);
368195246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq)) {
368295246abbSSean Bruno 			consumed++;
368395246abbSSean Bruno 			continue;
368495246abbSSean Bruno 		}
36854c7070dbSScott Long 		in_use_prev = txq->ift_in_use;
368695246abbSSean Bruno 		err = iflib_encap(txq, mp);
368795246abbSSean Bruno 		if (__predict_false(err)) {
3688da69b8f9SSean Bruno 			/* no room - bail out */
368995246abbSSean Bruno 			if (err == ENOBUFS)
36904c7070dbSScott Long 				break;
36914c7070dbSScott Long 			consumed++;
3692da69b8f9SSean Bruno 			/* we can't send this packet - skip it */
36934c7070dbSScott Long 			continue;
3694da69b8f9SSean Bruno 		}
369595246abbSSean Bruno 		consumed++;
36964c7070dbSScott Long 		pkt_sent++;
36974c7070dbSScott Long 		m = *mp;
36984c7070dbSScott Long 		DBG_COUNTER_INC(tx_sent);
36994c7070dbSScott Long 		bytes_sent += m->m_pkthdr.len;
370095246abbSSean Bruno 		mcast_sent += !!(m->m_flags & M_MCAST);
3701c2c5d1e7SMarius Strobl 		txq_avail = TXQ_AVAIL(txq);
37024c7070dbSScott Long 
37034c7070dbSScott Long 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
37044c7070dbSScott Long 		ETHER_BPF_MTAP(ifp, m);
370595246abbSSean Bruno 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
37064c7070dbSScott Long 			break;
370795246abbSSean Bruno 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
37084c7070dbSScott Long 	}
37094c7070dbSScott Long 
371095246abbSSean Bruno 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
371195246abbSSean Bruno 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
371295246abbSSean Bruno 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
37134c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
37144c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
37154c7070dbSScott Long 	if (mcast_sent)
37164c7070dbSScott Long 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3717da69b8f9SSean Bruno #ifdef INVARIANTS
3718da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3719da69b8f9SSean Bruno 		printf("consumed=%d\n", consumed);
3720da69b8f9SSean Bruno #endif
37214c7070dbSScott Long 	return (consumed);
37224c7070dbSScott Long }
37234c7070dbSScott Long 
3724da69b8f9SSean Bruno static uint32_t
3725da69b8f9SSean Bruno iflib_txq_drain_always(struct ifmp_ring *r)
3726da69b8f9SSean Bruno {
3727da69b8f9SSean Bruno 	return (1);
3728da69b8f9SSean Bruno }
3729da69b8f9SSean Bruno 
3730da69b8f9SSean Bruno static uint32_t
3731da69b8f9SSean Bruno iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3732da69b8f9SSean Bruno {
3733da69b8f9SSean Bruno 	int i, avail;
3734da69b8f9SSean Bruno 	struct mbuf **mp;
3735da69b8f9SSean Bruno 	iflib_txq_t txq;
3736da69b8f9SSean Bruno 
3737da69b8f9SSean Bruno 	txq = r->cookie;
3738da69b8f9SSean Bruno 
3739da69b8f9SSean Bruno 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3740da69b8f9SSean Bruno 	CALLOUT_LOCK(txq);
3741da69b8f9SSean Bruno 	callout_stop(&txq->ift_timer);
3742da69b8f9SSean Bruno 	CALLOUT_UNLOCK(txq);
3743da69b8f9SSean Bruno 
3744da69b8f9SSean Bruno 	avail = IDXDIFF(pidx, cidx, r->size);
3745da69b8f9SSean Bruno 	for (i = 0; i < avail; i++) {
374695246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, avail - i);
374795246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq))
374895246abbSSean Bruno 			continue;
3749da69b8f9SSean Bruno 		m_freem(*mp);
375064e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3751da69b8f9SSean Bruno 	}
3752da69b8f9SSean Bruno 	MPASS(ifmp_ring_is_stalled(r) == 0);
3753da69b8f9SSean Bruno 	return (avail);
3754da69b8f9SSean Bruno }
3755da69b8f9SSean Bruno 
3756da69b8f9SSean Bruno static void
3757da69b8f9SSean Bruno iflib_ifmp_purge(iflib_txq_t txq)
3758da69b8f9SSean Bruno {
3759da69b8f9SSean Bruno 	struct ifmp_ring *r;
3760da69b8f9SSean Bruno 
376195246abbSSean Bruno 	r = txq->ift_br;
3762da69b8f9SSean Bruno 	r->drain = iflib_txq_drain_free;
3763da69b8f9SSean Bruno 	r->can_drain = iflib_txq_drain_always;
3764da69b8f9SSean Bruno 
3765da69b8f9SSean Bruno 	ifmp_ring_check_drainage(r, r->size);
3766da69b8f9SSean Bruno 
3767da69b8f9SSean Bruno 	r->drain = iflib_txq_drain;
3768da69b8f9SSean Bruno 	r->can_drain = iflib_txq_can_drain;
3769da69b8f9SSean Bruno }
3770da69b8f9SSean Bruno 
37714c7070dbSScott Long static void
377223ac9029SStephen Hurd _task_fn_tx(void *context)
37734c7070dbSScott Long {
37744c7070dbSScott Long 	iflib_txq_t txq = context;
37754c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
3776a6611c93SMarius Strobl #if defined(ALTQ) || defined(DEV_NETMAP)
3777a6611c93SMarius Strobl 	if_t ifp = ctx->ifc_ifp;
3778a6611c93SMarius Strobl #endif
3779fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
37804c7070dbSScott Long 
37811248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
37821248952aSSean Bruno 	txq->ift_cpu_exec_count[curcpu]++;
37831248952aSSean Bruno #endif
37844c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
37854c7070dbSScott Long 		return;
378695dcf343SMarius Strobl #ifdef DEV_NETMAP
3787a6611c93SMarius Strobl 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
37888a04b53dSKonstantin Belousov 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
37898a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
379095246abbSSean Bruno 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3791a6611c93SMarius Strobl 			netmap_tx_irq(ifp, txq->ift_id);
37923d10e9edSMarius Strobl 		if (ctx->ifc_flags & IFC_LEGACY)
37933d10e9edSMarius Strobl 			IFDI_INTR_ENABLE(ctx);
37943d10e9edSMarius Strobl 		else
379595246abbSSean Bruno 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
379695246abbSSean Bruno 		return;
379795246abbSSean Bruno 	}
379895dcf343SMarius Strobl #endif
3799b8ca4756SPatrick Kelsey #ifdef ALTQ
3800b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3801b8ca4756SPatrick Kelsey 		iflib_altq_if_start(ifp);
3802b8ca4756SPatrick Kelsey #endif
380395246abbSSean Bruno 	if (txq->ift_db_pending)
3804fe51d4cdSStephen Hurd 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3805fe51d4cdSStephen Hurd 	else if (!abdicate)
3806fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3807fe51d4cdSStephen Hurd 	/*
3808fe51d4cdSStephen Hurd 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3809fe51d4cdSStephen Hurd 	 */
3810fe51d4cdSStephen Hurd 	if (abdicate)
3811fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
381295246abbSSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
381395246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
38143d10e9edSMarius Strobl 	else
38151ae4848cSMatt Macy 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
38164c7070dbSScott Long }
38174c7070dbSScott Long 
38184c7070dbSScott Long static void
381923ac9029SStephen Hurd _task_fn_rx(void *context)
38204c7070dbSScott Long {
38214c7070dbSScott Long 	iflib_rxq_t rxq = context;
38224c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
3823fb1a29b4SHans Petter Selasky 	uint8_t more;
3824f4d2154eSStephen Hurd 	uint16_t budget;
38254c7070dbSScott Long 
38261248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
38271248952aSSean Bruno 	rxq->ifr_cpu_exec_count[curcpu]++;
38281248952aSSean Bruno #endif
38294c7070dbSScott Long 	DBG_COUNTER_INC(task_fn_rxs);
38304c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38314c7070dbSScott Long 		return;
3832d0d0ad0aSStephen Hurd #ifdef DEV_NETMAP
3833d0d0ad0aSStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3834d0d0ad0aSStephen Hurd 		u_int work = 0;
3835d0d0ad0aSStephen Hurd 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3836fb1a29b4SHans Petter Selasky 			more = 0;
3837fb1a29b4SHans Petter Selasky 			goto skip_rxeof;
3838d0d0ad0aSStephen Hurd 		}
3839d0d0ad0aSStephen Hurd 	}
3840d0d0ad0aSStephen Hurd #endif
3841f4d2154eSStephen Hurd 	budget = ctx->ifc_sysctl_rx_budget;
3842f4d2154eSStephen Hurd 	if (budget == 0)
3843f4d2154eSStephen Hurd 		budget = 16;	/* XXX */
3844fb1a29b4SHans Petter Selasky 	more = iflib_rxeof(rxq, budget);
3845fb1a29b4SHans Petter Selasky #ifdef DEV_NETMAP
3846fb1a29b4SHans Petter Selasky skip_rxeof:
3847fb1a29b4SHans Petter Selasky #endif
3848fb1a29b4SHans Petter Selasky 	if ((more & IFLIB_RXEOF_MORE) == 0) {
38494c7070dbSScott Long 		if (ctx->ifc_flags & IFC_LEGACY)
38504c7070dbSScott Long 			IFDI_INTR_ENABLE(ctx);
38513d10e9edSMarius Strobl 		else
38521ae4848cSMatt Macy 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
38531ae4848cSMatt Macy 		DBG_COUNTER_INC(rx_intr_enables);
38544c7070dbSScott Long 	}
38554c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38564c7070dbSScott Long 		return;
3857fb1a29b4SHans Petter Selasky 
3858fb1a29b4SHans Petter Selasky 	if (more & IFLIB_RXEOF_MORE)
38594c7070dbSScott Long 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3860fb1a29b4SHans Petter Selasky 	else if (more & IFLIB_RXEOF_EMPTY)
3861fb1a29b4SHans Petter Selasky 		callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
38624c7070dbSScott Long }
38634c7070dbSScott Long 
38644c7070dbSScott Long static void
386523ac9029SStephen Hurd _task_fn_admin(void *context)
38664c7070dbSScott Long {
38674c7070dbSScott Long 	if_ctx_t ctx = context;
38684c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
38694c7070dbSScott Long 	iflib_txq_t txq;
3870ab2e3f79SStephen Hurd 	int i;
387177c1fcecSEric Joyner 	bool oactive, running, do_reset, do_watchdog, in_detach;
3872dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
3873ab2e3f79SStephen Hurd 
38747b610b60SSean Bruno 	STATE_LOCK(ctx);
38757b610b60SSean Bruno 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
38767b610b60SSean Bruno 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
38777b610b60SSean Bruno 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
38787b610b60SSean Bruno 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
387977c1fcecSEric Joyner 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
38807b610b60SSean Bruno 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
38817b610b60SSean Bruno 	STATE_UNLOCK(ctx);
38827b610b60SSean Bruno 
388377c1fcecSEric Joyner 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
388477c1fcecSEric Joyner 		return;
388577c1fcecSEric Joyner 	if (in_detach)
3886ab2e3f79SStephen Hurd 		return;
38874c7070dbSScott Long 
38884c7070dbSScott Long 	CTX_LOCK(ctx);
38894c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
38904c7070dbSScott Long 		CALLOUT_LOCK(txq);
38914c7070dbSScott Long 		callout_stop(&txq->ift_timer);
38924c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
38934c7070dbSScott Long 	}
38947b610b60SSean Bruno 	if (do_watchdog) {
38957b610b60SSean Bruno 		ctx->ifc_watchdog_events++;
38967b610b60SSean Bruno 		IFDI_WATCHDOG_RESET(ctx);
38977b610b60SSean Bruno 	}
3898d300df01SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3899dd7fbcf1SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3900dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
3901dd7fbcf1SStephen Hurd 		reset_on = hz / 2;
3902dd7fbcf1SStephen Hurd 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
390395dcf343SMarius Strobl 			iflib_netmap_timer_adjust(ctx, txq, &reset_on);
3904dd7fbcf1SStephen Hurd #endif
3905dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3906dd7fbcf1SStephen Hurd 	}
3907ab2e3f79SStephen Hurd 	IFDI_LINK_INTR_ENABLE(ctx);
39087b610b60SSean Bruno 	if (do_reset)
3909ab2e3f79SStephen Hurd 		iflib_if_init_locked(ctx);
39104c7070dbSScott Long 	CTX_UNLOCK(ctx);
39114c7070dbSScott Long 
3912ab2e3f79SStephen Hurd 	if (LINK_ACTIVE(ctx) == 0)
39134c7070dbSScott Long 		return;
39144c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
39154c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
39164c7070dbSScott Long }
39174c7070dbSScott Long 
39184c7070dbSScott Long 
39194c7070dbSScott Long static void
392023ac9029SStephen Hurd _task_fn_iov(void *context)
39214c7070dbSScott Long {
39224c7070dbSScott Long 	if_ctx_t ctx = context;
39234c7070dbSScott Long 
392477c1fcecSEric Joyner 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
392577c1fcecSEric Joyner 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
39264c7070dbSScott Long 		return;
39274c7070dbSScott Long 
39284c7070dbSScott Long 	CTX_LOCK(ctx);
39294c7070dbSScott Long 	IFDI_VFLR_HANDLE(ctx);
39304c7070dbSScott Long 	CTX_UNLOCK(ctx);
39314c7070dbSScott Long }
39324c7070dbSScott Long 
39334c7070dbSScott Long static int
39344c7070dbSScott Long iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
39354c7070dbSScott Long {
39364c7070dbSScott Long 	int err;
39374c7070dbSScott Long 	if_int_delay_info_t info;
39384c7070dbSScott Long 	if_ctx_t ctx;
39394c7070dbSScott Long 
39404c7070dbSScott Long 	info = (if_int_delay_info_t)arg1;
39414c7070dbSScott Long 	ctx = info->iidi_ctx;
39424c7070dbSScott Long 	info->iidi_req = req;
39434c7070dbSScott Long 	info->iidi_oidp = oidp;
39444c7070dbSScott Long 	CTX_LOCK(ctx);
39454c7070dbSScott Long 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
39464c7070dbSScott Long 	CTX_UNLOCK(ctx);
39474c7070dbSScott Long 	return (err);
39484c7070dbSScott Long }
39494c7070dbSScott Long 
39504c7070dbSScott Long /*********************************************************************
39514c7070dbSScott Long  *
39524c7070dbSScott Long  *  IFNET FUNCTIONS
39534c7070dbSScott Long  *
39544c7070dbSScott Long  **********************************************************************/
39554c7070dbSScott Long 
39564c7070dbSScott Long static void
39574c7070dbSScott Long iflib_if_init_locked(if_ctx_t ctx)
39584c7070dbSScott Long {
39594c7070dbSScott Long 	iflib_stop(ctx);
39604c7070dbSScott Long 	iflib_init_locked(ctx);
39614c7070dbSScott Long }
39624c7070dbSScott Long 
39634c7070dbSScott Long 
39644c7070dbSScott Long static void
39654c7070dbSScott Long iflib_if_init(void *arg)
39664c7070dbSScott Long {
39674c7070dbSScott Long 	if_ctx_t ctx = arg;
39684c7070dbSScott Long 
39694c7070dbSScott Long 	CTX_LOCK(ctx);
39704c7070dbSScott Long 	iflib_if_init_locked(ctx);
39714c7070dbSScott Long 	CTX_UNLOCK(ctx);
39724c7070dbSScott Long }
39734c7070dbSScott Long 
39744c7070dbSScott Long static int
39754c7070dbSScott Long iflib_if_transmit(if_t ifp, struct mbuf *m)
39764c7070dbSScott Long {
39774c7070dbSScott Long 	if_ctx_t	ctx = if_getsoftc(ifp);
39784c7070dbSScott Long 
39794c7070dbSScott Long 	iflib_txq_t txq;
398023ac9029SStephen Hurd 	int err, qidx;
3981fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
39824c7070dbSScott Long 
39834c7070dbSScott Long 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
39844c7070dbSScott Long 		DBG_COUNTER_INC(tx_frees);
39854c7070dbSScott Long 		m_freem(m);
3986225eae1bSEric Joyner 		return (ENETDOWN);
39874c7070dbSScott Long 	}
39884c7070dbSScott Long 
398923ac9029SStephen Hurd 	MPASS(m->m_nextpkt == NULL);
3990b8ca4756SPatrick Kelsey 	/* ALTQ-enabled interfaces always use queue 0. */
39914c7070dbSScott Long 	qidx = 0;
3992b8ca4756SPatrick Kelsey 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
39934c7070dbSScott Long 		qidx = QIDX(ctx, m);
39944c7070dbSScott Long 	/*
39954c7070dbSScott Long 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
39964c7070dbSScott Long 	 */
39974c7070dbSScott Long 	txq = &ctx->ifc_txqs[qidx];
39984c7070dbSScott Long 
39994c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
40004c7070dbSScott Long 	if (txq->ift_closed) {
40014c7070dbSScott Long 		while (m != NULL) {
40024c7070dbSScott Long 			next = m->m_nextpkt;
40034c7070dbSScott Long 			m->m_nextpkt = NULL;
40044c7070dbSScott Long 			m_freem(m);
400564e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
40064c7070dbSScott Long 			m = next;
40074c7070dbSScott Long 		}
40084c7070dbSScott Long 		return (ENOBUFS);
40094c7070dbSScott Long 	}
40104c7070dbSScott Long #endif
401123ac9029SStephen Hurd #ifdef notyet
40124c7070dbSScott Long 	qidx = count = 0;
40134c7070dbSScott Long 	mp = marr;
40144c7070dbSScott Long 	next = m;
40154c7070dbSScott Long 	do {
40164c7070dbSScott Long 		count++;
40174c7070dbSScott Long 		next = next->m_nextpkt;
40184c7070dbSScott Long 	} while (next != NULL);
40194c7070dbSScott Long 
402016fb86abSConrad Meyer 	if (count > nitems(marr))
40214c7070dbSScott Long 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
40224c7070dbSScott Long 			/* XXX check nextpkt */
40234c7070dbSScott Long 			m_freem(m);
40244c7070dbSScott Long 			/* XXX simplify for now */
40254c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
40264c7070dbSScott Long 			return (ENOBUFS);
40274c7070dbSScott Long 		}
40284c7070dbSScott Long 	for (next = m, i = 0; next != NULL; i++) {
40294c7070dbSScott Long 		mp[i] = next;
40304c7070dbSScott Long 		next = next->m_nextpkt;
40314c7070dbSScott Long 		mp[i]->m_nextpkt = NULL;
40324c7070dbSScott Long 	}
403323ac9029SStephen Hurd #endif
40344c7070dbSScott Long 	DBG_COUNTER_INC(tx_seen);
4035fe51d4cdSStephen Hurd 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
40364c7070dbSScott Long 
4037fe51d4cdSStephen Hurd 	if (abdicate)
4038ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
40391225d9daSStephen Hurd  	if (err) {
4040fe51d4cdSStephen Hurd 		if (!abdicate)
4041fe51d4cdSStephen Hurd 			GROUPTASK_ENQUEUE(&txq->ift_task);
40424c7070dbSScott Long 		/* support forthcoming later */
40434c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
40444c7070dbSScott Long 		txq->ift_closed = TRUE;
40454c7070dbSScott Long #endif
404695246abbSSean Bruno 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
404723ac9029SStephen Hurd 		m_freem(m);
404864e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
40494c7070dbSScott Long 	}
40504c7070dbSScott Long 
40514c7070dbSScott Long 	return (err);
40524c7070dbSScott Long }
40534c7070dbSScott Long 
4054b8ca4756SPatrick Kelsey #ifdef ALTQ
4055b8ca4756SPatrick Kelsey /*
4056b8ca4756SPatrick Kelsey  * The overall approach to integrating iflib with ALTQ is to continue to use
4057b8ca4756SPatrick Kelsey  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4058b8ca4756SPatrick Kelsey  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4059b8ca4756SPatrick Kelsey  * is redundant/unnecessary, but doing so minimizes the amount of
4060b8ca4756SPatrick Kelsey  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4061b8ca4756SPatrick Kelsey  * redundantly queueing to an intermediate mp_ring is swamped by the
4062b8ca4756SPatrick Kelsey  * performance limitations inherent in using ALTQ.
4063b8ca4756SPatrick Kelsey  *
4064b8ca4756SPatrick Kelsey  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4065b8ca4756SPatrick Kelsey  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4066b8ca4756SPatrick Kelsey  * given interface.  If ALTQ is enabled for an interface, then all
4067b8ca4756SPatrick Kelsey  * transmitted packets for that interface will be submitted to the ALTQ
4068b8ca4756SPatrick Kelsey  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4069b8ca4756SPatrick Kelsey  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4070b8ca4756SPatrick Kelsey  * update stats that the iflib machinery handles, and which is sensitve to
4071b8ca4756SPatrick Kelsey  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4072b8ca4756SPatrick Kelsey  * will be installed as the start routine for use by ALTQ facilities that
4073b8ca4756SPatrick Kelsey  * need to trigger queue drains on a scheduled basis.
4074b8ca4756SPatrick Kelsey  *
4075b8ca4756SPatrick Kelsey  */
4076b8ca4756SPatrick Kelsey static void
4077b8ca4756SPatrick Kelsey iflib_altq_if_start(if_t ifp)
4078b8ca4756SPatrick Kelsey {
4079b8ca4756SPatrick Kelsey 	struct ifaltq *ifq = &ifp->if_snd;
4080b8ca4756SPatrick Kelsey 	struct mbuf *m;
4081b8ca4756SPatrick Kelsey 
4082b8ca4756SPatrick Kelsey 	IFQ_LOCK(ifq);
4083b8ca4756SPatrick Kelsey 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4084b8ca4756SPatrick Kelsey 	while (m != NULL) {
4085b8ca4756SPatrick Kelsey 		iflib_if_transmit(ifp, m);
4086b8ca4756SPatrick Kelsey 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4087b8ca4756SPatrick Kelsey 	}
4088b8ca4756SPatrick Kelsey 	IFQ_UNLOCK(ifq);
4089b8ca4756SPatrick Kelsey }
4090b8ca4756SPatrick Kelsey 
4091b8ca4756SPatrick Kelsey static int
4092b8ca4756SPatrick Kelsey iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4093b8ca4756SPatrick Kelsey {
4094b8ca4756SPatrick Kelsey 	int err;
4095b8ca4756SPatrick Kelsey 
4096b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4097b8ca4756SPatrick Kelsey 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
4098b8ca4756SPatrick Kelsey 		if (err == 0)
4099b8ca4756SPatrick Kelsey 			iflib_altq_if_start(ifp);
4100b8ca4756SPatrick Kelsey 	} else
4101b8ca4756SPatrick Kelsey 		err = iflib_if_transmit(ifp, m);
4102b8ca4756SPatrick Kelsey 
4103b8ca4756SPatrick Kelsey 	return (err);
4104b8ca4756SPatrick Kelsey }
4105b8ca4756SPatrick Kelsey #endif /* ALTQ */
4106b8ca4756SPatrick Kelsey 
41074c7070dbSScott Long static void
41084c7070dbSScott Long iflib_if_qflush(if_t ifp)
41094c7070dbSScott Long {
41104c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
41114c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
41124c7070dbSScott Long 	int i;
41134c7070dbSScott Long 
41147b610b60SSean Bruno 	STATE_LOCK(ctx);
41154c7070dbSScott Long 	ctx->ifc_flags |= IFC_QFLUSH;
41167b610b60SSean Bruno 	STATE_UNLOCK(ctx);
41174c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
411895246abbSSean Bruno 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
41194c7070dbSScott Long 			iflib_txq_check_drain(txq, 0);
41207b610b60SSean Bruno 	STATE_LOCK(ctx);
41214c7070dbSScott Long 	ctx->ifc_flags &= ~IFC_QFLUSH;
41227b610b60SSean Bruno 	STATE_UNLOCK(ctx);
41234c7070dbSScott Long 
4124b8ca4756SPatrick Kelsey 	/*
4125b8ca4756SPatrick Kelsey 	 * When ALTQ is enabled, this will also take care of purging the
4126b8ca4756SPatrick Kelsey 	 * ALTQ queue(s).
4127b8ca4756SPatrick Kelsey 	 */
41284c7070dbSScott Long 	if_qflush(ifp);
41294c7070dbSScott Long }
41304c7070dbSScott Long 
41314c7070dbSScott Long 
41320c919c23SStephen Hurd #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
41330c919c23SStephen Hurd 		     IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
41340c919c23SStephen Hurd 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
41356554362cSAndrew Gallatin 		     IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_NOMAP)
41364c7070dbSScott Long 
41374c7070dbSScott Long static int
41384c7070dbSScott Long iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
41394c7070dbSScott Long {
41404c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
41414c7070dbSScott Long 	struct ifreq	*ifr = (struct ifreq *)data;
41424c7070dbSScott Long #if defined(INET) || defined(INET6)
41434c7070dbSScott Long 	struct ifaddr	*ifa = (struct ifaddr *)data;
41444c7070dbSScott Long #endif
41451722eeacSMarius Strobl 	bool		avoid_reset = false;
41464c7070dbSScott Long 	int		err = 0, reinit = 0, bits;
41474c7070dbSScott Long 
41484c7070dbSScott Long 	switch (command) {
41494c7070dbSScott Long 	case SIOCSIFADDR:
41504c7070dbSScott Long #ifdef INET
41514c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET)
41521722eeacSMarius Strobl 			avoid_reset = true;
41534c7070dbSScott Long #endif
41544c7070dbSScott Long #ifdef INET6
41554c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET6)
41561722eeacSMarius Strobl 			avoid_reset = true;
41574c7070dbSScott Long #endif
41584c7070dbSScott Long 		/*
41594c7070dbSScott Long 		** Calling init results in link renegotiation,
41604c7070dbSScott Long 		** so we avoid doing it when possible.
41614c7070dbSScott Long 		*/
41624c7070dbSScott Long 		if (avoid_reset) {
41634c7070dbSScott Long 			if_setflagbits(ifp, IFF_UP,0);
41644c7070dbSScott Long 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
41654c7070dbSScott Long 				reinit = 1;
41664c7070dbSScott Long #ifdef INET
41674c7070dbSScott Long 			if (!(if_getflags(ifp) & IFF_NOARP))
41684c7070dbSScott Long 				arp_ifinit(ifp, ifa);
41694c7070dbSScott Long #endif
41704c7070dbSScott Long 		} else
41714c7070dbSScott Long 			err = ether_ioctl(ifp, command, data);
41724c7070dbSScott Long 		break;
41734c7070dbSScott Long 	case SIOCSIFMTU:
41744c7070dbSScott Long 		CTX_LOCK(ctx);
41754c7070dbSScott Long 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
41764c7070dbSScott Long 			CTX_UNLOCK(ctx);
41774c7070dbSScott Long 			break;
41784c7070dbSScott Long 		}
41794c7070dbSScott Long 		bits = if_getdrvflags(ifp);
41804c7070dbSScott Long 		/* stop the driver and free any clusters before proceeding */
41814c7070dbSScott Long 		iflib_stop(ctx);
41824c7070dbSScott Long 
41834c7070dbSScott Long 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
41847b610b60SSean Bruno 			STATE_LOCK(ctx);
41854c7070dbSScott Long 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
41864c7070dbSScott Long 				ctx->ifc_flags |= IFC_MULTISEG;
41874c7070dbSScott Long 			else
41884c7070dbSScott Long 				ctx->ifc_flags &= ~IFC_MULTISEG;
41897b610b60SSean Bruno 			STATE_UNLOCK(ctx);
41904c7070dbSScott Long 			err = if_setmtu(ifp, ifr->ifr_mtu);
41914c7070dbSScott Long 		}
41924c7070dbSScott Long 		iflib_init_locked(ctx);
41937b610b60SSean Bruno 		STATE_LOCK(ctx);
41944c7070dbSScott Long 		if_setdrvflags(ifp, bits);
41957b610b60SSean Bruno 		STATE_UNLOCK(ctx);
41964c7070dbSScott Long 		CTX_UNLOCK(ctx);
41974c7070dbSScott Long 		break;
41984c7070dbSScott Long 	case SIOCSIFFLAGS:
4199ab2e3f79SStephen Hurd 		CTX_LOCK(ctx);
4200ab2e3f79SStephen Hurd 		if (if_getflags(ifp) & IFF_UP) {
4201ab2e3f79SStephen Hurd 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4202ab2e3f79SStephen Hurd 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4203ab2e3f79SStephen Hurd 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4204ab2e3f79SStephen Hurd 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4205ab2e3f79SStephen Hurd 				}
4206ab2e3f79SStephen Hurd 			} else
4207ab2e3f79SStephen Hurd 				reinit = 1;
4208ab2e3f79SStephen Hurd 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4209ab2e3f79SStephen Hurd 			iflib_stop(ctx);
4210ab2e3f79SStephen Hurd 		}
4211ab2e3f79SStephen Hurd 		ctx->ifc_if_flags = if_getflags(ifp);
4212ab2e3f79SStephen Hurd 		CTX_UNLOCK(ctx);
42134c7070dbSScott Long 		break;
42144c7070dbSScott Long 	case SIOCADDMULTI:
42154c7070dbSScott Long 	case SIOCDELMULTI:
42164c7070dbSScott Long 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4217ab2e3f79SStephen Hurd 			CTX_LOCK(ctx);
4218ab2e3f79SStephen Hurd 			IFDI_INTR_DISABLE(ctx);
4219ab2e3f79SStephen Hurd 			IFDI_MULTI_SET(ctx);
4220ab2e3f79SStephen Hurd 			IFDI_INTR_ENABLE(ctx);
4221ab2e3f79SStephen Hurd 			CTX_UNLOCK(ctx);
42224c7070dbSScott Long 		}
42234c7070dbSScott Long 		break;
42244c7070dbSScott Long 	case SIOCSIFMEDIA:
42254c7070dbSScott Long 		CTX_LOCK(ctx);
42264c7070dbSScott Long 		IFDI_MEDIA_SET(ctx);
42274c7070dbSScott Long 		CTX_UNLOCK(ctx);
42281722eeacSMarius Strobl 		/* FALLTHROUGH */
42294c7070dbSScott Long 	case SIOCGIFMEDIA:
4230a027c8e9SStephen Hurd 	case SIOCGIFXMEDIA:
4231e2621d96SMatt Macy 		err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
42324c7070dbSScott Long 		break;
42334c7070dbSScott Long 	case SIOCGI2C:
42344c7070dbSScott Long 	{
42354c7070dbSScott Long 		struct ifi2creq i2c;
42364c7070dbSScott Long 
4237541d96aaSBrooks Davis 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
42384c7070dbSScott Long 		if (err != 0)
42394c7070dbSScott Long 			break;
42404c7070dbSScott Long 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
42414c7070dbSScott Long 			err = EINVAL;
42424c7070dbSScott Long 			break;
42434c7070dbSScott Long 		}
42444c7070dbSScott Long 		if (i2c.len > sizeof(i2c.data)) {
42454c7070dbSScott Long 			err = EINVAL;
42464c7070dbSScott Long 			break;
42474c7070dbSScott Long 		}
42484c7070dbSScott Long 
42494c7070dbSScott Long 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4250541d96aaSBrooks Davis 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4251541d96aaSBrooks Davis 			    sizeof(i2c));
42524c7070dbSScott Long 		break;
42534c7070dbSScott Long 	}
42544c7070dbSScott Long 	case SIOCSIFCAP:
42554c7070dbSScott Long 	{
42560c919c23SStephen Hurd 		int mask, setmask, oldmask;
42574c7070dbSScott Long 
42580c919c23SStephen Hurd 		oldmask = if_getcapenable(ifp);
42590c919c23SStephen Hurd 		mask = ifr->ifr_reqcap ^ oldmask;
42606554362cSAndrew Gallatin 		mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_NOMAP;
42614c7070dbSScott Long 		setmask = 0;
42624c7070dbSScott Long #ifdef TCP_OFFLOAD
42634c7070dbSScott Long 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
42644c7070dbSScott Long #endif
42654c7070dbSScott Long 		setmask |= (mask & IFCAP_FLAGS);
42660c919c23SStephen Hurd 		setmask |= (mask & IFCAP_WOL);
42674c7070dbSScott Long 
42680c919c23SStephen Hurd 		/*
4269a42546dfSStephen Hurd 		 * If any RX csum has changed, change all the ones that
4270a42546dfSStephen Hurd 		 * are supported by the driver.
42710c919c23SStephen Hurd 		 */
4272a42546dfSStephen Hurd 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4273a42546dfSStephen Hurd 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4274a42546dfSStephen Hurd 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4275a42546dfSStephen Hurd 		}
42760c919c23SStephen Hurd 
42774c7070dbSScott Long 		/*
42784c7070dbSScott Long 		 * want to ensure that traffic has stopped before we change any of the flags
42794c7070dbSScott Long 		 */
42804c7070dbSScott Long 		if (setmask) {
42814c7070dbSScott Long 			CTX_LOCK(ctx);
42824c7070dbSScott Long 			bits = if_getdrvflags(ifp);
42830c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42844c7070dbSScott Long 				iflib_stop(ctx);
42857b610b60SSean Bruno 			STATE_LOCK(ctx);
42864c7070dbSScott Long 			if_togglecapenable(ifp, setmask);
42877b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42880c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42894c7070dbSScott Long 				iflib_init_locked(ctx);
42907b610b60SSean Bruno 			STATE_LOCK(ctx);
42914c7070dbSScott Long 			if_setdrvflags(ifp, bits);
42927b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42934c7070dbSScott Long 			CTX_UNLOCK(ctx);
42944c7070dbSScott Long 		}
42950c919c23SStephen Hurd 		if_vlancap(ifp);
42964c7070dbSScott Long 		break;
42974c7070dbSScott Long 	}
42984c7070dbSScott Long 	case SIOCGPRIVATE_0:
42994c7070dbSScott Long 	case SIOCSDRVSPEC:
43004c7070dbSScott Long 	case SIOCGDRVSPEC:
43014c7070dbSScott Long 		CTX_LOCK(ctx);
43024c7070dbSScott Long 		err = IFDI_PRIV_IOCTL(ctx, command, data);
43034c7070dbSScott Long 		CTX_UNLOCK(ctx);
43044c7070dbSScott Long 		break;
43054c7070dbSScott Long 	default:
43064c7070dbSScott Long 		err = ether_ioctl(ifp, command, data);
43074c7070dbSScott Long 		break;
43084c7070dbSScott Long 	}
43094c7070dbSScott Long 	if (reinit)
43104c7070dbSScott Long 		iflib_if_init(ctx);
43114c7070dbSScott Long 	return (err);
43124c7070dbSScott Long }
43134c7070dbSScott Long 
43144c7070dbSScott Long static uint64_t
43154c7070dbSScott Long iflib_if_get_counter(if_t ifp, ift_counter cnt)
43164c7070dbSScott Long {
43174c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43184c7070dbSScott Long 
43194c7070dbSScott Long 	return (IFDI_GET_COUNTER(ctx, cnt));
43204c7070dbSScott Long }
43214c7070dbSScott Long 
43224c7070dbSScott Long /*********************************************************************
43234c7070dbSScott Long  *
43244c7070dbSScott Long  *  OTHER FUNCTIONS EXPORTED TO THE STACK
43254c7070dbSScott Long  *
43264c7070dbSScott Long  **********************************************************************/
43274c7070dbSScott Long 
43284c7070dbSScott Long static void
43294c7070dbSScott Long iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
43304c7070dbSScott Long {
43314c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43324c7070dbSScott Long 
43334c7070dbSScott Long 	if ((void *)ctx != arg)
43344c7070dbSScott Long 		return;
43354c7070dbSScott Long 
43364c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
43374c7070dbSScott Long 		return;
43384c7070dbSScott Long 
433953b5b9b0SEric Joyner 	if (iflib_in_detach(ctx))
434053b5b9b0SEric Joyner 		return;
434153b5b9b0SEric Joyner 
43424c7070dbSScott Long 	CTX_LOCK(ctx);
43434c7070dbSScott Long 	IFDI_VLAN_REGISTER(ctx, vtag);
43444c7070dbSScott Long 	/* Re-init to load the changes */
43454c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
434621e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43474c7070dbSScott Long 	CTX_UNLOCK(ctx);
43484c7070dbSScott Long }
43494c7070dbSScott Long 
43504c7070dbSScott Long static void
43514c7070dbSScott Long iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
43524c7070dbSScott Long {
43534c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43544c7070dbSScott Long 
43554c7070dbSScott Long 	if ((void *)ctx != arg)
43564c7070dbSScott Long 		return;
43574c7070dbSScott Long 
43584c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
43594c7070dbSScott Long 		return;
43604c7070dbSScott Long 
43614c7070dbSScott Long 	CTX_LOCK(ctx);
43624c7070dbSScott Long 	IFDI_VLAN_UNREGISTER(ctx, vtag);
43634c7070dbSScott Long 	/* Re-init to load the changes */
43644c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
436521e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43664c7070dbSScott Long 	CTX_UNLOCK(ctx);
43674c7070dbSScott Long }
43684c7070dbSScott Long 
43694c7070dbSScott Long static void
43704c7070dbSScott Long iflib_led_func(void *arg, int onoff)
43714c7070dbSScott Long {
43724c7070dbSScott Long 	if_ctx_t ctx = arg;
43734c7070dbSScott Long 
43744c7070dbSScott Long 	CTX_LOCK(ctx);
43754c7070dbSScott Long 	IFDI_LED_FUNC(ctx, onoff);
43764c7070dbSScott Long 	CTX_UNLOCK(ctx);
43774c7070dbSScott Long }
43784c7070dbSScott Long 
43794c7070dbSScott Long /*********************************************************************
43804c7070dbSScott Long  *
43814c7070dbSScott Long  *  BUS FUNCTION DEFINITIONS
43824c7070dbSScott Long  *
43834c7070dbSScott Long  **********************************************************************/
43844c7070dbSScott Long 
43854c7070dbSScott Long int
43864c7070dbSScott Long iflib_device_probe(device_t dev)
43874c7070dbSScott Long {
4388d49e83eaSMarius Strobl 	const pci_vendor_info_t *ent;
43894c7070dbSScott Long 	if_shared_ctx_t sctx;
4390d49e83eaSMarius Strobl 	uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
4391d49e83eaSMarius Strobl 	uint16_t pci_vendor_id;
43924c7070dbSScott Long 
43934c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
43944c7070dbSScott Long 		return (ENOTSUP);
43954c7070dbSScott Long 
43964c7070dbSScott Long 	pci_vendor_id = pci_get_vendor(dev);
43974c7070dbSScott Long 	pci_device_id = pci_get_device(dev);
43984c7070dbSScott Long 	pci_subvendor_id = pci_get_subvendor(dev);
43994c7070dbSScott Long 	pci_subdevice_id = pci_get_subdevice(dev);
44004c7070dbSScott Long 	pci_rev_id = pci_get_revid(dev);
44014c7070dbSScott Long 	if (sctx->isc_parse_devinfo != NULL)
44024c7070dbSScott Long 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
44034c7070dbSScott Long 
44044c7070dbSScott Long 	ent = sctx->isc_vendor_info;
44054c7070dbSScott Long 	while (ent->pvi_vendor_id != 0) {
44064c7070dbSScott Long 		if (pci_vendor_id != ent->pvi_vendor_id) {
44074c7070dbSScott Long 			ent++;
44084c7070dbSScott Long 			continue;
44094c7070dbSScott Long 		}
44104c7070dbSScott Long 		if ((pci_device_id == ent->pvi_device_id) &&
44114c7070dbSScott Long 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
44124c7070dbSScott Long 		     (ent->pvi_subvendor_id == 0)) &&
44134c7070dbSScott Long 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
44144c7070dbSScott Long 		     (ent->pvi_subdevice_id == 0)) &&
44154c7070dbSScott Long 		    ((pci_rev_id == ent->pvi_rev_id) ||
44164c7070dbSScott Long 		     (ent->pvi_rev_id == 0))) {
44174c7070dbSScott Long 
44184c7070dbSScott Long 			device_set_desc_copy(dev, ent->pvi_name);
44194c7070dbSScott Long 			/* this needs to be changed to zero if the bus probing code
44204c7070dbSScott Long 			 * ever stops re-probing on best match because the sctx
44214c7070dbSScott Long 			 * may have its values over written by register calls
44224c7070dbSScott Long 			 * in subsequent probes
44234c7070dbSScott Long 			 */
44244c7070dbSScott Long 			return (BUS_PROBE_DEFAULT);
44254c7070dbSScott Long 		}
44264c7070dbSScott Long 		ent++;
44274c7070dbSScott Long 	}
44284c7070dbSScott Long 	return (ENXIO);
44294c7070dbSScott Long }
44304c7070dbSScott Long 
4431668d6dbbSEric Joyner int
4432668d6dbbSEric Joyner iflib_device_probe_vendor(device_t dev)
4433668d6dbbSEric Joyner {
4434668d6dbbSEric Joyner 	int probe;
4435668d6dbbSEric Joyner 
4436668d6dbbSEric Joyner 	probe = iflib_device_probe(dev);
4437668d6dbbSEric Joyner 	if (probe == BUS_PROBE_DEFAULT)
4438668d6dbbSEric Joyner 		return (BUS_PROBE_VENDOR);
4439668d6dbbSEric Joyner 	else
4440668d6dbbSEric Joyner 		return (probe);
4441668d6dbbSEric Joyner }
4442668d6dbbSEric Joyner 
444309f6ff4fSMatt Macy static void
444409f6ff4fSMatt Macy iflib_reset_qvalues(if_ctx_t ctx)
44454c7070dbSScott Long {
444609f6ff4fSMatt Macy 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
444709f6ff4fSMatt Macy 	if_shared_ctx_t sctx = ctx->ifc_sctx;
444809f6ff4fSMatt Macy 	device_t dev = ctx->ifc_dev;
444946d0f824SMatt Macy 	int i;
44504c7070dbSScott Long 
445123ac9029SStephen Hurd 	if (ctx->ifc_sysctl_ntxqs != 0)
445223ac9029SStephen Hurd 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
445323ac9029SStephen Hurd 	if (ctx->ifc_sysctl_nrxqs != 0)
445423ac9029SStephen Hurd 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
445523ac9029SStephen Hurd 
445623ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
445723ac9029SStephen Hurd 		if (ctx->ifc_sysctl_ntxds[i] != 0)
445823ac9029SStephen Hurd 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
445923ac9029SStephen Hurd 		else
446023ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
446123ac9029SStephen Hurd 	}
446223ac9029SStephen Hurd 
446323ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
446423ac9029SStephen Hurd 		if (ctx->ifc_sysctl_nrxds[i] != 0)
446523ac9029SStephen Hurd 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
446623ac9029SStephen Hurd 		else
446723ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
446823ac9029SStephen Hurd 	}
446923ac9029SStephen Hurd 
447023ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
447123ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
447223ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
447323ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
447423ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
447523ac9029SStephen Hurd 		}
447623ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
447723ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
447823ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
447923ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
448023ac9029SStephen Hurd 		}
4481afb77372SEric Joyner 		if (!powerof2(scctx->isc_nrxd[i])) {
4482afb77372SEric Joyner 			device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4483afb77372SEric Joyner 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4484afb77372SEric Joyner 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4485afb77372SEric Joyner 		}
448623ac9029SStephen Hurd 	}
448723ac9029SStephen Hurd 
448823ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
448923ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
449023ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
449123ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
449223ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
449323ac9029SStephen Hurd 		}
449423ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
449523ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
449623ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
449723ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
449823ac9029SStephen Hurd 		}
4499afb77372SEric Joyner 		if (!powerof2(scctx->isc_ntxd[i])) {
4500afb77372SEric Joyner 			device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4501afb77372SEric Joyner 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4502afb77372SEric Joyner 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4503afb77372SEric Joyner 		}
450423ac9029SStephen Hurd 	}
450509f6ff4fSMatt Macy }
4506ab2e3f79SStephen Hurd 
45076d49b41eSAndrew Gallatin static void
45086d49b41eSAndrew Gallatin iflib_add_pfil(if_ctx_t ctx)
45096d49b41eSAndrew Gallatin {
45106d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
45116d49b41eSAndrew Gallatin 	struct pfil_head_args pa;
45126d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
45136d49b41eSAndrew Gallatin 	int i;
45146d49b41eSAndrew Gallatin 
45156d49b41eSAndrew Gallatin 	pa.pa_version = PFIL_VERSION;
45166d49b41eSAndrew Gallatin 	pa.pa_flags = PFIL_IN;
45176d49b41eSAndrew Gallatin 	pa.pa_type = PFIL_TYPE_ETHERNET;
45186d49b41eSAndrew Gallatin 	pa.pa_headname = ctx->ifc_ifp->if_xname;
45196d49b41eSAndrew Gallatin 	pfil = pfil_head_register(&pa);
45206d49b41eSAndrew Gallatin 
45216d49b41eSAndrew Gallatin 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
45226d49b41eSAndrew Gallatin 		rxq->pfil = pfil;
45236d49b41eSAndrew Gallatin 	}
45246d49b41eSAndrew Gallatin }
45256d49b41eSAndrew Gallatin 
45266d49b41eSAndrew Gallatin static void
45276d49b41eSAndrew Gallatin iflib_rem_pfil(if_ctx_t ctx)
45286d49b41eSAndrew Gallatin {
45296d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
45306d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
45316d49b41eSAndrew Gallatin 	int i;
45326d49b41eSAndrew Gallatin 
45336d49b41eSAndrew Gallatin 	rxq = ctx->ifc_rxqs;
45346d49b41eSAndrew Gallatin 	pfil = rxq->pfil;
45356d49b41eSAndrew Gallatin 	for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
45366d49b41eSAndrew Gallatin 		rxq->pfil = NULL;
45376d49b41eSAndrew Gallatin 	}
45386d49b41eSAndrew Gallatin 	pfil_head_unregister(pfil);
45396d49b41eSAndrew Gallatin }
45406d49b41eSAndrew Gallatin 
4541f154ece0SStephen Hurd static uint16_t
4542f154ece0SStephen Hurd get_ctx_core_offset(if_ctx_t ctx)
4543f154ece0SStephen Hurd {
4544f154ece0SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4545f154ece0SStephen Hurd 	struct cpu_offset *op;
4546f154ece0SStephen Hurd 	uint16_t qc;
4547f154ece0SStephen Hurd 	uint16_t ret = ctx->ifc_sysctl_core_offset;
4548f154ece0SStephen Hurd 
4549f154ece0SStephen Hurd 	if (ret != CORE_OFFSET_UNSPECIFIED)
4550f154ece0SStephen Hurd 		return (ret);
4551f154ece0SStephen Hurd 
4552f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx)
4553f154ece0SStephen Hurd 		qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets;
4554f154ece0SStephen Hurd 	else
4555f154ece0SStephen Hurd 		qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets);
4556f154ece0SStephen Hurd 
4557f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4558f154ece0SStephen Hurd 	SLIST_FOREACH(op, &cpu_offsets, entries) {
4559f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4560f154ece0SStephen Hurd 			ret = op->offset;
4561f154ece0SStephen Hurd 			op->offset += qc;
4562f154ece0SStephen Hurd 			MPASS(op->refcount < UINT_MAX);
4563f154ece0SStephen Hurd 			op->refcount++;
4564f154ece0SStephen Hurd 			break;
4565f154ece0SStephen Hurd 		}
4566f154ece0SStephen Hurd 	}
4567f154ece0SStephen Hurd 	if (ret == CORE_OFFSET_UNSPECIFIED) {
4568f154ece0SStephen Hurd 		ret = 0;
4569f154ece0SStephen Hurd 		op = malloc(sizeof(struct cpu_offset), M_IFLIB,
4570f154ece0SStephen Hurd 		    M_NOWAIT | M_ZERO);
4571f154ece0SStephen Hurd 		if (op == NULL) {
4572f154ece0SStephen Hurd 			device_printf(ctx->ifc_dev,
4573f154ece0SStephen Hurd 			    "allocation for cpu offset failed.\n");
4574f154ece0SStephen Hurd 		} else {
4575f154ece0SStephen Hurd 			op->offset = qc;
4576f154ece0SStephen Hurd 			op->refcount = 1;
4577f154ece0SStephen Hurd 			CPU_COPY(&ctx->ifc_cpus, &op->set);
4578f154ece0SStephen Hurd 			SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
4579f154ece0SStephen Hurd 		}
4580f154ece0SStephen Hurd 	}
4581f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4582f154ece0SStephen Hurd 
4583f154ece0SStephen Hurd 	return (ret);
4584f154ece0SStephen Hurd }
4585f154ece0SStephen Hurd 
4586f154ece0SStephen Hurd static void
4587f154ece0SStephen Hurd unref_ctx_core_offset(if_ctx_t ctx)
4588f154ece0SStephen Hurd {
4589f154ece0SStephen Hurd 	struct cpu_offset *op, *top;
4590f154ece0SStephen Hurd 
4591f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4592f154ece0SStephen Hurd 	SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
4593f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4594f154ece0SStephen Hurd 			MPASS(op->refcount > 0);
4595f154ece0SStephen Hurd 			op->refcount--;
4596f154ece0SStephen Hurd 			if (op->refcount == 0) {
4597f154ece0SStephen Hurd 				SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
4598f154ece0SStephen Hurd 				free(op, M_IFLIB);
4599f154ece0SStephen Hurd 			}
4600f154ece0SStephen Hurd 			break;
4601f154ece0SStephen Hurd 		}
4602f154ece0SStephen Hurd 	}
4603f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4604f154ece0SStephen Hurd }
4605f154ece0SStephen Hurd 
460609f6ff4fSMatt Macy int
460709f6ff4fSMatt Macy iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
460809f6ff4fSMatt Macy {
460909f6ff4fSMatt Macy 	if_ctx_t ctx;
461009f6ff4fSMatt Macy 	if_t ifp;
461109f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
46123d10e9edSMarius Strobl 	kobjop_desc_t kobj_desc;
46133d10e9edSMarius Strobl 	kobj_method_t *kobj_method;
4614afb77372SEric Joyner 	int err, msix, rid;
46153d10e9edSMarius Strobl 	uint16_t main_rxq, main_txq;
461609f6ff4fSMatt Macy 
461709f6ff4fSMatt Macy 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
461809f6ff4fSMatt Macy 
461909f6ff4fSMatt Macy 	if (sc == NULL) {
462009f6ff4fSMatt Macy 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
462109f6ff4fSMatt Macy 		device_set_softc(dev, ctx);
462209f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
462309f6ff4fSMatt Macy 	}
462409f6ff4fSMatt Macy 
462509f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
462609f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
462709f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
462809f6ff4fSMatt Macy 
462909f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
463009f6ff4fSMatt Macy 		device_printf(dev, "iflib_register failed %d\n", err);
46317f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
463209f6ff4fSMatt Macy 	}
463309f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
463409f6ff4fSMatt Macy 
463509f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
463609f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
463709f6ff4fSMatt Macy 
463809f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4639aa8a24d3SStephen Hurd 	CTX_LOCK(ctx);
4640ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
46414c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
46427f3eb9daSPatrick Kelsey 		goto fail_unlock;
46434c7070dbSScott Long 	}
46441248952aSSean Bruno 	_iflib_pre_assert(scctx);
46451248952aSSean Bruno 	ctx->ifc_txrx = *scctx->isc_txrx;
46461248952aSSean Bruno 
4647e2621d96SMatt Macy 	if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
4648e2621d96SMatt Macy 		ctx->ifc_mediap = scctx->isc_media;
4649e2621d96SMatt Macy 
46501248952aSSean Bruno #ifdef INVARIANTS
46517f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
46521248952aSSean Bruno 		MPASS(scctx->isc_tx_csum_flags);
46531248952aSSean Bruno #endif
46541248952aSSean Bruno 
46556554362cSAndrew Gallatin 	if_setcapabilities(ifp,
46566554362cSAndrew Gallatin 	    scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_NOMAP);
46576554362cSAndrew Gallatin 	if_setcapenable(ifp,
46586554362cSAndrew Gallatin 	    scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_NOMAP);
46591248952aSSean Bruno 
46601248952aSSean Bruno 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
46611248952aSSean Bruno 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
46621248952aSSean Bruno 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
46631248952aSSean Bruno 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
466423ac9029SStephen Hurd 
466595246abbSSean Bruno 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
466695246abbSSean Bruno 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
466723ac9029SStephen Hurd 
466823ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
46691722eeacSMarius Strobl 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
467023ac9029SStephen Hurd 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
467123ac9029SStephen Hurd 
467223ac9029SStephen Hurd 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
467323ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
467423ac9029SStephen Hurd 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
467523ac9029SStephen Hurd 		    MAX_SINGLE_PACKET_FRACTION);
467623ac9029SStephen Hurd 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
467723ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
467823ac9029SStephen Hurd 		scctx->isc_tx_tso_segments_max = max(1,
467923ac9029SStephen Hurd 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
46804c7070dbSScott Long 
46814c7070dbSScott Long 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
46827f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
46837f87c040SMarius Strobl 		/*
46847f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
46857f87c040SMarius Strobl 		 * but some MACs do.
46867f87c040SMarius Strobl 		 */
46877f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
46887f87c040SMarius Strobl 		    IP_MAXPACKET));
46897f87c040SMarius Strobl 		/*
46907f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
46917f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
46927f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
46937f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
46947f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
46957f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
46967f87c040SMarius Strobl 		 */
46977f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
46987f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
46997f87c040SMarius Strobl 	}
47004c7070dbSScott Long 	if (scctx->isc_rss_table_size == 0)
47014c7070dbSScott Long 		scctx->isc_rss_table_size = 64;
470223ac9029SStephen Hurd 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4703da69b8f9SSean Bruno 
4704da69b8f9SSean Bruno 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4705da69b8f9SSean Bruno 	/* XXX format name */
4706f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4707f855ec81SMarius Strobl 	    NULL, NULL, "admin");
4708e516b535SStephen Hurd 
4709772593dbSStephen Hurd 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4710e516b535SStephen Hurd 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4711e516b535SStephen Hurd 		device_printf(dev, "Unable to fetch CPU list\n");
4712e516b535SStephen Hurd 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4713e516b535SStephen Hurd 	}
4714e516b535SStephen Hurd 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4715e516b535SStephen Hurd 
47164c7070dbSScott Long 	/*
4717b97de13aSMarius Strobl 	** Now set up MSI or MSI-X, should return us the number of supported
4718b97de13aSMarius Strobl 	** vectors (will be 1 for a legacy interrupt and MSI).
47194c7070dbSScott Long 	*/
47204c7070dbSScott Long 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
47214c7070dbSScott Long 		msix = scctx->isc_vectors;
47224c7070dbSScott Long 	} else if (scctx->isc_msix_bar != 0)
4723f7ae9a84SSean Bruno 	       /*
4724f7ae9a84SSean Bruno 		* The simple fact that isc_msix_bar is not 0 does not mean we
4725f7ae9a84SSean Bruno 		* we have a good value there that is known to work.
4726f7ae9a84SSean Bruno 		*/
47274c7070dbSScott Long 		msix = iflib_msix_init(ctx);
47284c7070dbSScott Long 	else {
47294c7070dbSScott Long 		scctx->isc_vectors = 1;
47304c7070dbSScott Long 		scctx->isc_ntxqsets = 1;
47314c7070dbSScott Long 		scctx->isc_nrxqsets = 1;
47324c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
47334c7070dbSScott Long 		msix = 0;
47344c7070dbSScott Long 	}
47354c7070dbSScott Long 	/* Get memory for the station queues */
47364c7070dbSScott Long 	if ((err = iflib_queues_alloc(ctx))) {
47374c7070dbSScott Long 		device_printf(dev, "Unable to allocate queue memory\n");
47387f3eb9daSPatrick Kelsey 		goto fail_intr_free;
47394c7070dbSScott Long 	}
47404c7070dbSScott Long 
4741ac88e6daSStephen Hurd 	if ((err = iflib_qset_structures_setup(ctx)))
47424c7070dbSScott Long 		goto fail_queues;
474369b7fc3eSSean Bruno 
4744bd84f700SSean Bruno 	/*
4745f154ece0SStephen Hurd 	 * Now that we know how many queues there are, get the core offset.
4746f154ece0SStephen Hurd 	 */
4747f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
4748f154ece0SStephen Hurd 
4749f154ece0SStephen Hurd 	/*
4750bd84f700SSean Bruno 	 * Group taskqueues aren't properly set up until SMP is started,
4751bd84f700SSean Bruno 	 * so we disable interrupts until we can handle them post
4752bd84f700SSean Bruno 	 * SI_SUB_SMP.
4753bd84f700SSean Bruno 	 *
4754bd84f700SSean Bruno 	 * XXX: disabling interrupts doesn't actually work, at least for
4755bd84f700SSean Bruno 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4756bd84f700SSean Bruno 	 * we do null handling and depend on this not causing too large an
4757bd84f700SSean Bruno 	 * interrupt storm.
4758bd84f700SSean Bruno 	 */
47591248952aSSean Bruno 	IFDI_INTR_DISABLE(ctx);
47603d10e9edSMarius Strobl 
47613d10e9edSMarius Strobl 	if (msix > 1) {
47623d10e9edSMarius Strobl 		/*
47633d10e9edSMarius Strobl 		 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
47643d10e9edSMarius Strobl 		 * aren't the default NULL implementation.
47653d10e9edSMarius Strobl 		 */
47663d10e9edSMarius Strobl 		kobj_desc = &ifdi_rx_queue_intr_enable_desc;
47673d10e9edSMarius Strobl 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
47683d10e9edSMarius Strobl 		    kobj_desc);
47693d10e9edSMarius Strobl 		if (kobj_method == &kobj_desc->deflt) {
47703d10e9edSMarius Strobl 			device_printf(dev,
47713d10e9edSMarius Strobl 			    "MSI-X requires ifdi_rx_queue_intr_enable method");
47723d10e9edSMarius Strobl 			err = EOPNOTSUPP;
47737f3eb9daSPatrick Kelsey 			goto fail_queues;
47744c7070dbSScott Long 		}
47753d10e9edSMarius Strobl 		kobj_desc = &ifdi_tx_queue_intr_enable_desc;
47763d10e9edSMarius Strobl 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
47773d10e9edSMarius Strobl 		    kobj_desc);
47783d10e9edSMarius Strobl 		if (kobj_method == &kobj_desc->deflt) {
47793d10e9edSMarius Strobl 			device_printf(dev,
47803d10e9edSMarius Strobl 			    "MSI-X requires ifdi_tx_queue_intr_enable method");
47813d10e9edSMarius Strobl 			err = EOPNOTSUPP;
47823d10e9edSMarius Strobl 			goto fail_queues;
47833d10e9edSMarius Strobl 		}
47843d10e9edSMarius Strobl 
47853d10e9edSMarius Strobl 		/*
47863d10e9edSMarius Strobl 		 * Assign the MSI-X vectors.
47873d10e9edSMarius Strobl 		 * Note that the default NULL ifdi_msix_intr_assign method will
47883d10e9edSMarius Strobl 		 * fail here, too.
47893d10e9edSMarius Strobl 		 */
47903d10e9edSMarius Strobl 		err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
47913d10e9edSMarius Strobl 		if (err != 0) {
47923d10e9edSMarius Strobl 			device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
47933d10e9edSMarius Strobl 			    err);
47943d10e9edSMarius Strobl 			goto fail_queues;
47953d10e9edSMarius Strobl 		}
4796197c6798SEric Joyner 	} else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
47974c7070dbSScott Long 		rid = 0;
47984c7070dbSScott Long 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
47994c7070dbSScott Long 			MPASS(msix == 1);
48004c7070dbSScott Long 			rid = 1;
48014c7070dbSScott Long 		}
480223ac9029SStephen Hurd 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
48034c7070dbSScott Long 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
48047f3eb9daSPatrick Kelsey 			goto fail_queues;
48054c7070dbSScott Long 		}
4806197c6798SEric Joyner 	} else {
4807197c6798SEric Joyner 		device_printf(dev,
4808197c6798SEric Joyner 		    "Cannot use iflib with only 1 MSI-X interrupt!\n");
4809197c6798SEric Joyner 		err = ENODEV;
4810197c6798SEric Joyner 		goto fail_intr_free;
48114c7070dbSScott Long 	}
48127f87c040SMarius Strobl 
48131fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
48147f87c040SMarius Strobl 
4815ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
48164c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
48174c7070dbSScott Long 		goto fail_detach;
48184c7070dbSScott Long 	}
48197f87c040SMarius Strobl 
48207f87c040SMarius Strobl 	/*
48217f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
48227f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
48237f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
48247f87c040SMarius Strobl 	 */
48257f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
48267f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
48277f87c040SMarius Strobl 
48284c7070dbSScott Long 	if ((err = iflib_netmap_attach(ctx))) {
48294c7070dbSScott Long 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
48304c7070dbSScott Long 		goto fail_detach;
48314c7070dbSScott Long 	}
48324c7070dbSScott Long 	*ctxp = ctx;
48334c7070dbSScott Long 
48347790c8c1SConrad Meyer 	DEBUGNET_SET(ctx->ifc_ifp, iflib);
483594618825SMark Johnston 
483623ac9029SStephen Hurd 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
48374c7070dbSScott Long 	iflib_add_device_sysctl_post(ctx);
48386d49b41eSAndrew Gallatin 	iflib_add_pfil(ctx);
48394ecb427aSSean Bruno 	ctx->ifc_flags |= IFC_INIT_DONE;
4840aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
48413d10e9edSMarius Strobl 
48424c7070dbSScott Long 	return (0);
484377c1fcecSEric Joyner 
48444c7070dbSScott Long fail_detach:
48454c7070dbSScott Long 	ether_ifdetach(ctx->ifc_ifp);
48464c7070dbSScott Long fail_intr_free:
48477f3eb9daSPatrick Kelsey 	iflib_free_intr_mem(ctx);
48484c7070dbSScott Long fail_queues:
48496108c013SStephen Hurd 	iflib_tx_structures_free(ctx);
48506108c013SStephen Hurd 	iflib_rx_structures_free(ctx);
4851197c6798SEric Joyner 	taskqgroup_detach(qgroup_if_config_tqg, &ctx->ifc_admin_task);
48524c7070dbSScott Long 	IFDI_DETACH(ctx);
48537f3eb9daSPatrick Kelsey fail_unlock:
4854aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
485556614414SEric Joyner 	iflib_deregister(ctx);
48567f3eb9daSPatrick Kelsey fail_ctx_free:
48577f3f6aadSEric Joyner 	device_set_softc(ctx->ifc_dev, NULL);
48587f3eb9daSPatrick Kelsey         if (ctx->ifc_flags & IFC_SC_ALLOCATED)
48597f3eb9daSPatrick Kelsey                 free(ctx->ifc_softc, M_IFLIB);
48607f3eb9daSPatrick Kelsey         free(ctx, M_IFLIB);
48614c7070dbSScott Long 	return (err);
48624c7070dbSScott Long }
48634c7070dbSScott Long 
48644c7070dbSScott Long int
486509f6ff4fSMatt Macy iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
486609f6ff4fSMatt Macy 					  struct iflib_cloneattach_ctx *clctx)
486709f6ff4fSMatt Macy {
486809f6ff4fSMatt Macy 	int err;
486909f6ff4fSMatt Macy 	if_ctx_t ctx;
487009f6ff4fSMatt Macy 	if_t ifp;
487109f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
487209f6ff4fSMatt Macy 	int i;
487309f6ff4fSMatt Macy 	void *sc;
487409f6ff4fSMatt Macy 	uint16_t main_txq;
487509f6ff4fSMatt Macy 	uint16_t main_rxq;
487609f6ff4fSMatt Macy 
487709f6ff4fSMatt Macy 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
487809f6ff4fSMatt Macy 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
487909f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
488009f6ff4fSMatt Macy 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
488109f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_PSEUDO;
488209f6ff4fSMatt Macy 
488309f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
488409f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
488509f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
488609f6ff4fSMatt Macy 
488709f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
488809f6ff4fSMatt Macy 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
48897f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
489009f6ff4fSMatt Macy 	}
489109f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
489209f6ff4fSMatt Macy 
489309f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
489409f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
489509f6ff4fSMatt Macy 
489609f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4897aac9c817SEric Joyner 	CTX_LOCK(ctx);
489809f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
489909f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4900aac9c817SEric Joyner 		goto fail_unlock;
490109f6ff4fSMatt Macy 	}
490209f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_GEN_MAC)
49031fd8c72cSKyle Evans 		ether_gen_addr(ifp, &ctx->ifc_mac);
490409f6ff4fSMatt Macy 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
490509f6ff4fSMatt Macy 								clctx->cc_params)) != 0) {
490609f6ff4fSMatt Macy 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
49077f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
490809f6ff4fSMatt Macy 	}
4909e2621d96SMatt Macy 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
4910e2621d96SMatt Macy 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL);
4911e2621d96SMatt Macy 	ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO);
491209f6ff4fSMatt Macy 
491309f6ff4fSMatt Macy #ifdef INVARIANTS
49147f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
491509f6ff4fSMatt Macy 		MPASS(scctx->isc_tx_csum_flags);
491609f6ff4fSMatt Macy #endif
491709f6ff4fSMatt Macy 
49187f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
491909f6ff4fSMatt Macy 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
492009f6ff4fSMatt Macy 
492109f6ff4fSMatt Macy 	ifp->if_flags |= IFF_NOGROUP;
492209f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_PSEUDO) {
49231fd8c72cSKyle Evans 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
492409f6ff4fSMatt Macy 
492509f6ff4fSMatt Macy 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
492609f6ff4fSMatt Macy 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
492709f6ff4fSMatt Macy 			goto fail_detach;
492809f6ff4fSMatt Macy 		}
492909f6ff4fSMatt Macy 		*ctxp = ctx;
493009f6ff4fSMatt Macy 
49317f87c040SMarius Strobl 		/*
49327f87c040SMarius Strobl 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
49337f87c040SMarius Strobl 		 * This must appear after the call to ether_ifattach() because
49347f87c040SMarius Strobl 		 * ether_ifattach() sets if_hdrlen to the default value.
49357f87c040SMarius Strobl 		 */
49367f87c040SMarius Strobl 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
49377f87c040SMarius Strobl 			if_setifheaderlen(ifp,
49387f87c040SMarius Strobl 			    sizeof(struct ether_vlan_header));
49397f87c040SMarius Strobl 
494009f6ff4fSMatt Macy 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
494109f6ff4fSMatt Macy 		iflib_add_device_sysctl_post(ctx);
494209f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_INIT_DONE;
494309f6ff4fSMatt Macy 		return (0);
494409f6ff4fSMatt Macy 	}
494509f6ff4fSMatt Macy 	_iflib_pre_assert(scctx);
494609f6ff4fSMatt Macy 	ctx->ifc_txrx = *scctx->isc_txrx;
494709f6ff4fSMatt Macy 
494809f6ff4fSMatt Macy 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
494909f6ff4fSMatt Macy 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
495009f6ff4fSMatt Macy 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
495109f6ff4fSMatt Macy 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
495209f6ff4fSMatt Macy 
495309f6ff4fSMatt Macy 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
495409f6ff4fSMatt Macy 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
495509f6ff4fSMatt Macy 
495609f6ff4fSMatt Macy 	/* XXX change for per-queue sizes */
49571722eeacSMarius Strobl 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
495809f6ff4fSMatt Macy 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
495909f6ff4fSMatt Macy 
496009f6ff4fSMatt Macy 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
496109f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
496209f6ff4fSMatt Macy 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
496309f6ff4fSMatt Macy 		    MAX_SINGLE_PACKET_FRACTION);
496409f6ff4fSMatt Macy 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
496509f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
496609f6ff4fSMatt Macy 		scctx->isc_tx_tso_segments_max = max(1,
496709f6ff4fSMatt Macy 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
496809f6ff4fSMatt Macy 
496909f6ff4fSMatt Macy 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
49707f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
49717f87c040SMarius Strobl 		/*
49727f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
49737f87c040SMarius Strobl 		 * but some MACs do.
49747f87c040SMarius Strobl 		 */
49757f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
49767f87c040SMarius Strobl 		    IP_MAXPACKET));
49777f87c040SMarius Strobl 		/*
49787f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
49797f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
49807f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
49817f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
49827f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
49837f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
49847f87c040SMarius Strobl 		 */
49857f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
49867f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
49877f87c040SMarius Strobl 	}
498809f6ff4fSMatt Macy 	if (scctx->isc_rss_table_size == 0)
498909f6ff4fSMatt Macy 		scctx->isc_rss_table_size = 64;
499009f6ff4fSMatt Macy 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
499109f6ff4fSMatt Macy 
499209f6ff4fSMatt Macy 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
499309f6ff4fSMatt Macy 	/* XXX format name */
4994f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4995f855ec81SMarius Strobl 	    NULL, NULL, "admin");
499609f6ff4fSMatt Macy 
499709f6ff4fSMatt Macy 	/* XXX --- can support > 1 -- but keep it simple for now */
499809f6ff4fSMatt Macy 	scctx->isc_intr = IFLIB_INTR_LEGACY;
499909f6ff4fSMatt Macy 
500009f6ff4fSMatt Macy 	/* Get memory for the station queues */
500109f6ff4fSMatt Macy 	if ((err = iflib_queues_alloc(ctx))) {
500209f6ff4fSMatt Macy 		device_printf(dev, "Unable to allocate queue memory\n");
50037f3eb9daSPatrick Kelsey 		goto fail_iflib_detach;
500409f6ff4fSMatt Macy 	}
500509f6ff4fSMatt Macy 
500609f6ff4fSMatt Macy 	if ((err = iflib_qset_structures_setup(ctx))) {
500709f6ff4fSMatt Macy 		device_printf(dev, "qset structure setup failed %d\n", err);
500809f6ff4fSMatt Macy 		goto fail_queues;
500909f6ff4fSMatt Macy 	}
50107f87c040SMarius Strobl 
501109f6ff4fSMatt Macy 	/*
501209f6ff4fSMatt Macy 	 * XXX What if anything do we want to do about interrupts?
501309f6ff4fSMatt Macy 	 */
50141fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
501509f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
501609f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
501709f6ff4fSMatt Macy 		goto fail_detach;
501809f6ff4fSMatt Macy 	}
50197f87c040SMarius Strobl 
50207f87c040SMarius Strobl 	/*
50217f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
50227f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
50237f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
50247f87c040SMarius Strobl 	 */
50257f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
50267f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
50277f87c040SMarius Strobl 
502809f6ff4fSMatt Macy 	/* XXX handle more than one queue */
502909f6ff4fSMatt Macy 	for (i = 0; i < scctx->isc_nrxqsets; i++)
503009f6ff4fSMatt Macy 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
503109f6ff4fSMatt Macy 
503209f6ff4fSMatt Macy 	*ctxp = ctx;
503309f6ff4fSMatt Macy 
503409f6ff4fSMatt Macy 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
503509f6ff4fSMatt Macy 	iflib_add_device_sysctl_post(ctx);
503609f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_INIT_DONE;
5037aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
50383d10e9edSMarius Strobl 
503909f6ff4fSMatt Macy 	return (0);
504009f6ff4fSMatt Macy fail_detach:
504109f6ff4fSMatt Macy 	ether_ifdetach(ctx->ifc_ifp);
504209f6ff4fSMatt Macy fail_queues:
504309f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
504409f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
50457f3eb9daSPatrick Kelsey fail_iflib_detach:
504609f6ff4fSMatt Macy 	IFDI_DETACH(ctx);
5047aac9c817SEric Joyner fail_unlock:
5048aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
504956614414SEric Joyner 	iflib_deregister(ctx);
50507f3eb9daSPatrick Kelsey fail_ctx_free:
50517f3eb9daSPatrick Kelsey 	free(ctx->ifc_softc, M_IFLIB);
50527f3eb9daSPatrick Kelsey 	free(ctx, M_IFLIB);
505309f6ff4fSMatt Macy 	return (err);
505409f6ff4fSMatt Macy }
505509f6ff4fSMatt Macy 
505609f6ff4fSMatt Macy int
505709f6ff4fSMatt Macy iflib_pseudo_deregister(if_ctx_t ctx)
505809f6ff4fSMatt Macy {
505909f6ff4fSMatt Macy 	if_t ifp = ctx->ifc_ifp;
506009f6ff4fSMatt Macy 	iflib_txq_t txq;
506109f6ff4fSMatt Macy 	iflib_rxq_t rxq;
506209f6ff4fSMatt Macy 	int i, j;
506309f6ff4fSMatt Macy 	struct taskqgroup *tqg;
506409f6ff4fSMatt Macy 	iflib_fl_t fl;
506509f6ff4fSMatt Macy 
50661558015eSEric Joyner 	/* Unregister VLAN event handlers early */
50671558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
50681558015eSEric Joyner 
506909f6ff4fSMatt Macy 	ether_ifdetach(ifp);
507009f6ff4fSMatt Macy 	/* XXX drain any dependent tasks */
507109f6ff4fSMatt Macy 	tqg = qgroup_if_io_tqg;
507209f6ff4fSMatt Macy 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
507309f6ff4fSMatt Macy 		callout_drain(&txq->ift_timer);
507409f6ff4fSMatt Macy 		if (txq->ift_task.gt_uniq != NULL)
507509f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &txq->ift_task);
507609f6ff4fSMatt Macy 	}
507709f6ff4fSMatt Macy 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5078fb1a29b4SHans Petter Selasky 		callout_drain(&rxq->ifr_watchdog);
507909f6ff4fSMatt Macy 		if (rxq->ifr_task.gt_uniq != NULL)
508009f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &rxq->ifr_task);
508109f6ff4fSMatt Macy 
508209f6ff4fSMatt Macy 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
508309f6ff4fSMatt Macy 			free(fl->ifl_rx_bitmap, M_IFLIB);
508409f6ff4fSMatt Macy 	}
508509f6ff4fSMatt Macy 	tqg = qgroup_if_config_tqg;
508609f6ff4fSMatt Macy 	if (ctx->ifc_admin_task.gt_uniq != NULL)
508709f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
508809f6ff4fSMatt Macy 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
508909f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
509009f6ff4fSMatt Macy 
509109f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
509209f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
509356614414SEric Joyner 
509456614414SEric Joyner 	iflib_deregister(ctx);
509556614414SEric Joyner 
509609f6ff4fSMatt Macy 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
509709f6ff4fSMatt Macy 		free(ctx->ifc_softc, M_IFLIB);
509809f6ff4fSMatt Macy 	free(ctx, M_IFLIB);
509909f6ff4fSMatt Macy 	return (0);
510009f6ff4fSMatt Macy }
510109f6ff4fSMatt Macy 
510209f6ff4fSMatt Macy int
51034c7070dbSScott Long iflib_device_attach(device_t dev)
51044c7070dbSScott Long {
51054c7070dbSScott Long 	if_ctx_t ctx;
51064c7070dbSScott Long 	if_shared_ctx_t sctx;
51074c7070dbSScott Long 
51084c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
51094c7070dbSScott Long 		return (ENOTSUP);
51104c7070dbSScott Long 
51114c7070dbSScott Long 	pci_enable_busmaster(dev);
51124c7070dbSScott Long 
51134c7070dbSScott Long 	return (iflib_device_register(dev, NULL, sctx, &ctx));
51144c7070dbSScott Long }
51154c7070dbSScott Long 
51164c7070dbSScott Long int
51174c7070dbSScott Long iflib_device_deregister(if_ctx_t ctx)
51184c7070dbSScott Long {
51194c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
51204c7070dbSScott Long 	iflib_txq_t txq;
51214c7070dbSScott Long 	iflib_rxq_t rxq;
51224c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
512387890dbaSSean Bruno 	int i, j;
51244c7070dbSScott Long 	struct taskqgroup *tqg;
512587890dbaSSean Bruno 	iflib_fl_t fl;
51264c7070dbSScott Long 
51274c7070dbSScott Long 	/* Make sure VLANS are not using driver */
51284c7070dbSScott Long 	if (if_vlantrunkinuse(ifp)) {
51294c7070dbSScott Long 		device_printf(dev, "Vlan in use, detach first\n");
51304c7070dbSScott Long 		return (EBUSY);
51314c7070dbSScott Long 	}
513277c1fcecSEric Joyner #ifdef PCI_IOV
513377c1fcecSEric Joyner 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
513477c1fcecSEric Joyner 		device_printf(dev, "SR-IOV in use; detach first.\n");
513577c1fcecSEric Joyner 		return (EBUSY);
513677c1fcecSEric Joyner 	}
513777c1fcecSEric Joyner #endif
513877c1fcecSEric Joyner 
513977c1fcecSEric Joyner 	STATE_LOCK(ctx);
514077c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
514177c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
51424c7070dbSScott Long 
51431558015eSEric Joyner 	/* Unregister VLAN handlers before calling iflib_stop() */
51441558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
51451558015eSEric Joyner 
51461558015eSEric Joyner 	iflib_netmap_detach(ifp);
51471558015eSEric Joyner 	ether_ifdetach(ifp);
51481558015eSEric Joyner 
51494c7070dbSScott Long 	CTX_LOCK(ctx);
51504c7070dbSScott Long 	iflib_stop(ctx);
51514c7070dbSScott Long 	CTX_UNLOCK(ctx);
51524c7070dbSScott Long 
51536d49b41eSAndrew Gallatin 	iflib_rem_pfil(ctx);
51544c7070dbSScott Long 	if (ctx->ifc_led_dev != NULL)
51554c7070dbSScott Long 		led_destroy(ctx->ifc_led_dev);
51564c7070dbSScott Long 	/* XXX drain any dependent tasks */
5157ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
515823ac9029SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
51594c7070dbSScott Long 		callout_drain(&txq->ift_timer);
51604c7070dbSScott Long 		if (txq->ift_task.gt_uniq != NULL)
51614c7070dbSScott Long 			taskqgroup_detach(tqg, &txq->ift_task);
51624c7070dbSScott Long 	}
51634c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
51644c7070dbSScott Long 		if (rxq->ifr_task.gt_uniq != NULL)
51654c7070dbSScott Long 			taskqgroup_detach(tqg, &rxq->ifr_task);
516687890dbaSSean Bruno 
516787890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
516887890dbaSSean Bruno 			free(fl->ifl_rx_bitmap, M_IFLIB);
51694c7070dbSScott Long 	}
5170ab2e3f79SStephen Hurd 	tqg = qgroup_if_config_tqg;
51714c7070dbSScott Long 	if (ctx->ifc_admin_task.gt_uniq != NULL)
51724c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
51734c7070dbSScott Long 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
51744c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
51756c3c3194SMatt Macy 	CTX_LOCK(ctx);
51764c7070dbSScott Long 	IFDI_DETACH(ctx);
51776c3c3194SMatt Macy 	CTX_UNLOCK(ctx);
51786c3c3194SMatt Macy 
51796c3c3194SMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
518077c1fcecSEric Joyner 	iflib_free_intr_mem(ctx);
518177c1fcecSEric Joyner 
518277c1fcecSEric Joyner 	bus_generic_detach(dev);
518377c1fcecSEric Joyner 
518477c1fcecSEric Joyner 	iflib_tx_structures_free(ctx);
518577c1fcecSEric Joyner 	iflib_rx_structures_free(ctx);
518656614414SEric Joyner 
518756614414SEric Joyner 	iflib_deregister(ctx);
518856614414SEric Joyner 
518956614414SEric Joyner 	device_set_softc(ctx->ifc_dev, NULL);
519077c1fcecSEric Joyner 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
519177c1fcecSEric Joyner 		free(ctx->ifc_softc, M_IFLIB);
5192f154ece0SStephen Hurd 	unref_ctx_core_offset(ctx);
519377c1fcecSEric Joyner 	free(ctx, M_IFLIB);
519477c1fcecSEric Joyner 	return (0);
519577c1fcecSEric Joyner }
519677c1fcecSEric Joyner 
519777c1fcecSEric Joyner static void
519877c1fcecSEric Joyner iflib_free_intr_mem(if_ctx_t ctx)
519977c1fcecSEric Joyner {
520077c1fcecSEric Joyner 
52014c7070dbSScott Long 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
52024c7070dbSScott Long 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
52034c7070dbSScott Long 	}
5204b97de13aSMarius Strobl 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5205b97de13aSMarius Strobl 		pci_release_msi(ctx->ifc_dev);
5206b97de13aSMarius Strobl 	}
52074c7070dbSScott Long 	if (ctx->ifc_msix_mem != NULL) {
52084c7070dbSScott Long 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5209b97de13aSMarius Strobl 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
52104c7070dbSScott Long 		ctx->ifc_msix_mem = NULL;
52114c7070dbSScott Long 	}
52124c7070dbSScott Long }
52134c7070dbSScott Long 
52144c7070dbSScott Long int
52154c7070dbSScott Long iflib_device_detach(device_t dev)
52164c7070dbSScott Long {
52174c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52184c7070dbSScott Long 
52194c7070dbSScott Long 	return (iflib_device_deregister(ctx));
52204c7070dbSScott Long }
52214c7070dbSScott Long 
52224c7070dbSScott Long int
52234c7070dbSScott Long iflib_device_suspend(device_t dev)
52244c7070dbSScott Long {
52254c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52264c7070dbSScott Long 
52274c7070dbSScott Long 	CTX_LOCK(ctx);
52284c7070dbSScott Long 	IFDI_SUSPEND(ctx);
52294c7070dbSScott Long 	CTX_UNLOCK(ctx);
52304c7070dbSScott Long 
52314c7070dbSScott Long 	return bus_generic_suspend(dev);
52324c7070dbSScott Long }
52334c7070dbSScott Long int
52344c7070dbSScott Long iflib_device_shutdown(device_t dev)
52354c7070dbSScott Long {
52364c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52374c7070dbSScott Long 
52384c7070dbSScott Long 	CTX_LOCK(ctx);
52394c7070dbSScott Long 	IFDI_SHUTDOWN(ctx);
52404c7070dbSScott Long 	CTX_UNLOCK(ctx);
52414c7070dbSScott Long 
52424c7070dbSScott Long 	return bus_generic_suspend(dev);
52434c7070dbSScott Long }
52444c7070dbSScott Long 
52454c7070dbSScott Long 
52464c7070dbSScott Long int
52474c7070dbSScott Long iflib_device_resume(device_t dev)
52484c7070dbSScott Long {
52494c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52504c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
52514c7070dbSScott Long 
52524c7070dbSScott Long 	CTX_LOCK(ctx);
52534c7070dbSScott Long 	IFDI_RESUME(ctx);
5254cd28ea92SStephen Hurd 	iflib_if_init_locked(ctx);
52554c7070dbSScott Long 	CTX_UNLOCK(ctx);
52564c7070dbSScott Long 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
52574c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
52584c7070dbSScott Long 
52594c7070dbSScott Long 	return (bus_generic_resume(dev));
52604c7070dbSScott Long }
52614c7070dbSScott Long 
52624c7070dbSScott Long int
52634c7070dbSScott Long iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
52644c7070dbSScott Long {
52654c7070dbSScott Long 	int error;
52664c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52674c7070dbSScott Long 
52684c7070dbSScott Long 	CTX_LOCK(ctx);
52694c7070dbSScott Long 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
52704c7070dbSScott Long 	CTX_UNLOCK(ctx);
52714c7070dbSScott Long 
52724c7070dbSScott Long 	return (error);
52734c7070dbSScott Long }
52744c7070dbSScott Long 
52754c7070dbSScott Long void
52764c7070dbSScott Long iflib_device_iov_uninit(device_t dev)
52774c7070dbSScott Long {
52784c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52794c7070dbSScott Long 
52804c7070dbSScott Long 	CTX_LOCK(ctx);
52814c7070dbSScott Long 	IFDI_IOV_UNINIT(ctx);
52824c7070dbSScott Long 	CTX_UNLOCK(ctx);
52834c7070dbSScott Long }
52844c7070dbSScott Long 
52854c7070dbSScott Long int
52864c7070dbSScott Long iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
52874c7070dbSScott Long {
52884c7070dbSScott Long 	int error;
52894c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52904c7070dbSScott Long 
52914c7070dbSScott Long 	CTX_LOCK(ctx);
52924c7070dbSScott Long 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
52934c7070dbSScott Long 	CTX_UNLOCK(ctx);
52944c7070dbSScott Long 
52954c7070dbSScott Long 	return (error);
52964c7070dbSScott Long }
52974c7070dbSScott Long 
52984c7070dbSScott Long /*********************************************************************
52994c7070dbSScott Long  *
53004c7070dbSScott Long  *  MODULE FUNCTION DEFINITIONS
53014c7070dbSScott Long  *
53024c7070dbSScott Long  **********************************************************************/
53034c7070dbSScott Long 
5304ab2e3f79SStephen Hurd /*
5305ab2e3f79SStephen Hurd  * - Start a fast taskqueue thread for each core
5306ab2e3f79SStephen Hurd  * - Start a taskqueue for control operations
5307ab2e3f79SStephen Hurd  */
53084c7070dbSScott Long static int
53094c7070dbSScott Long iflib_module_init(void)
53104c7070dbSScott Long {
53114c7070dbSScott Long 	return (0);
53124c7070dbSScott Long }
53134c7070dbSScott Long 
53144c7070dbSScott Long static int
53154c7070dbSScott Long iflib_module_event_handler(module_t mod, int what, void *arg)
53164c7070dbSScott Long {
53174c7070dbSScott Long 	int err;
53184c7070dbSScott Long 
53194c7070dbSScott Long 	switch (what) {
53204c7070dbSScott Long 	case MOD_LOAD:
53214c7070dbSScott Long 		if ((err = iflib_module_init()) != 0)
53224c7070dbSScott Long 			return (err);
53234c7070dbSScott Long 		break;
53244c7070dbSScott Long 	case MOD_UNLOAD:
53254c7070dbSScott Long 		return (EBUSY);
53264c7070dbSScott Long 	default:
53274c7070dbSScott Long 		return (EOPNOTSUPP);
53284c7070dbSScott Long 	}
53294c7070dbSScott Long 
53304c7070dbSScott Long 	return (0);
53314c7070dbSScott Long }
53324c7070dbSScott Long 
53334c7070dbSScott Long /*********************************************************************
53344c7070dbSScott Long  *
53354c7070dbSScott Long  *  PUBLIC FUNCTION DEFINITIONS
53364c7070dbSScott Long  *     ordered as in iflib.h
53374c7070dbSScott Long  *
53384c7070dbSScott Long  **********************************************************************/
53394c7070dbSScott Long 
53404c7070dbSScott Long 
53414c7070dbSScott Long static void
53424c7070dbSScott Long _iflib_assert(if_shared_ctx_t sctx)
53434c7070dbSScott Long {
5344afb77372SEric Joyner 	int i;
5345afb77372SEric Joyner 
53464c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsize);
53474c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsegsize);
53484c7070dbSScott Long 
53494c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsize);
53504c7070dbSScott Long 	MPASS(sctx->isc_rx_nsegments);
53514c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsegsize);
53524c7070dbSScott Long 
5353afb77372SEric Joyner 	MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5354afb77372SEric Joyner 	for (i = 0; i < sctx->isc_nrxqs; i++) {
5355afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_min[i]);
5356afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_min[i]));
5357afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_max[i]);
5358afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_max[i]));
5359afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_default[i]);
5360afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_default[i]));
5361afb77372SEric Joyner 	}
5362afb77372SEric Joyner 
5363afb77372SEric Joyner 	MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5364afb77372SEric Joyner 	for (i = 0; i < sctx->isc_ntxqs; i++) {
5365afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_min[i]);
5366afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_min[i]));
5367afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_max[i]);
5368afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_max[i]));
5369afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_default[i]);
5370afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_default[i]));
5371afb77372SEric Joyner 	}
53724c7070dbSScott Long }
53734c7070dbSScott Long 
53741248952aSSean Bruno static void
53751248952aSSean Bruno _iflib_pre_assert(if_softc_ctx_t scctx)
53761248952aSSean Bruno {
53771248952aSSean Bruno 
53781248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_encap);
53791248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_flush);
53801248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
53811248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_available);
53821248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
53831248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_refill);
53841248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_flush);
53851248952aSSean Bruno }
53862fe66646SSean Bruno 
53874c7070dbSScott Long static int
53884c7070dbSScott Long iflib_register(if_ctx_t ctx)
53894c7070dbSScott Long {
53904c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
53914c7070dbSScott Long 	driver_t *driver = sctx->isc_driver;
53924c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
53934c7070dbSScott Long 	if_t ifp;
53944c7070dbSScott Long 
53954c7070dbSScott Long 	_iflib_assert(sctx);
53964c7070dbSScott Long 
5397aa8a24d3SStephen Hurd 	CTX_LOCK_INIT(ctx);
53987b610b60SSean Bruno 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
539977c1fcecSEric Joyner 	ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
54004c7070dbSScott Long 	if (ifp == NULL) {
54014c7070dbSScott Long 		device_printf(dev, "can not allocate ifnet structure\n");
54024c7070dbSScott Long 		return (ENOMEM);
54034c7070dbSScott Long 	}
54044c7070dbSScott Long 
54054c7070dbSScott Long 	/*
54064c7070dbSScott Long 	 * Initialize our context's device specific methods
54074c7070dbSScott Long 	 */
54084c7070dbSScott Long 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
54094c7070dbSScott Long 	kobj_class_compile((kobj_class_t) driver);
54104c7070dbSScott Long 
54114c7070dbSScott Long 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
54124c7070dbSScott Long 	if_setsoftc(ifp, ctx);
54134c7070dbSScott Long 	if_setdev(ifp, dev);
54144c7070dbSScott Long 	if_setinitfn(ifp, iflib_if_init);
54154c7070dbSScott Long 	if_setioctlfn(ifp, iflib_if_ioctl);
5416b8ca4756SPatrick Kelsey #ifdef ALTQ
5417b8ca4756SPatrick Kelsey 	if_setstartfn(ifp, iflib_altq_if_start);
5418b8ca4756SPatrick Kelsey 	if_settransmitfn(ifp, iflib_altq_if_transmit);
54198f410865SPatrick Kelsey 	if_setsendqready(ifp);
5420b8ca4756SPatrick Kelsey #else
54214c7070dbSScott Long 	if_settransmitfn(ifp, iflib_if_transmit);
5422b8ca4756SPatrick Kelsey #endif
54234c7070dbSScott Long 	if_setqflushfn(ifp, iflib_if_qflush);
5424e87c4940SGleb Smirnoff 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
5425e87c4940SGleb Smirnoff 	    IFF_KNOWSEPOCH);
54264c7070dbSScott Long 
54274c7070dbSScott Long 	ctx->ifc_vlan_attach_event =
54284c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
54294c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
54304c7070dbSScott Long 	ctx->ifc_vlan_detach_event =
54314c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
54324c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
54334c7070dbSScott Long 
5434e2621d96SMatt Macy 	if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5435e2621d96SMatt Macy 		ctx->ifc_mediap = &ctx->ifc_media;
5436e2621d96SMatt Macy 		ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
54374c7070dbSScott Long 		    iflib_media_change, iflib_media_status);
5438e2621d96SMatt Macy 	}
54394c7070dbSScott Long 	return (0);
54404c7070dbSScott Long }
54414c7070dbSScott Long 
544256614414SEric Joyner static void
54431558015eSEric Joyner iflib_unregister_vlan_handlers(if_ctx_t ctx)
544456614414SEric Joyner {
544556614414SEric Joyner 	/* Unregister VLAN events */
544656614414SEric Joyner 	if (ctx->ifc_vlan_attach_event != NULL) {
544756614414SEric Joyner 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
544856614414SEric Joyner 		ctx->ifc_vlan_attach_event = NULL;
544956614414SEric Joyner 	}
545056614414SEric Joyner 	if (ctx->ifc_vlan_detach_event != NULL) {
545156614414SEric Joyner 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
545256614414SEric Joyner 		ctx->ifc_vlan_detach_event = NULL;
545356614414SEric Joyner 	}
545456614414SEric Joyner 
54551558015eSEric Joyner }
54561558015eSEric Joyner 
54571558015eSEric Joyner static void
54581558015eSEric Joyner iflib_deregister(if_ctx_t ctx)
54591558015eSEric Joyner {
54601558015eSEric Joyner 	if_t ifp = ctx->ifc_ifp;
54611558015eSEric Joyner 
54621558015eSEric Joyner 	/* Remove all media */
54631558015eSEric Joyner 	ifmedia_removeall(&ctx->ifc_media);
54641558015eSEric Joyner 
54651558015eSEric Joyner 	/* Ensure that VLAN event handlers are unregistered */
54661558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
54671558015eSEric Joyner 
546856614414SEric Joyner 	/* Release kobject reference */
546956614414SEric Joyner 	kobj_delete((kobj_t) ctx, NULL);
547056614414SEric Joyner 
547156614414SEric Joyner 	/* Free the ifnet structure */
547256614414SEric Joyner 	if_free(ifp);
547356614414SEric Joyner 
547456614414SEric Joyner 	STATE_LOCK_DESTROY(ctx);
547556614414SEric Joyner 
547656614414SEric Joyner 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
547756614414SEric Joyner 	CTX_LOCK_DESTROY(ctx);
547856614414SEric Joyner }
547956614414SEric Joyner 
54804c7070dbSScott Long static int
54814c7070dbSScott Long iflib_queues_alloc(if_ctx_t ctx)
54824c7070dbSScott Long {
54834c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
548423ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
54854c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
548623ac9029SStephen Hurd 	int nrxqsets = scctx->isc_nrxqsets;
548723ac9029SStephen Hurd 	int ntxqsets = scctx->isc_ntxqsets;
54884c7070dbSScott Long 	iflib_txq_t txq;
54894c7070dbSScott Long 	iflib_rxq_t rxq;
54904c7070dbSScott Long 	iflib_fl_t fl = NULL;
549123ac9029SStephen Hurd 	int i, j, cpu, err, txconf, rxconf;
54924c7070dbSScott Long 	iflib_dma_info_t ifdip;
549323ac9029SStephen Hurd 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
549423ac9029SStephen Hurd 	uint32_t *txqsizes = scctx->isc_txqsizes;
54954c7070dbSScott Long 	uint8_t nrxqs = sctx->isc_nrxqs;
54964c7070dbSScott Long 	uint8_t ntxqs = sctx->isc_ntxqs;
54974c7070dbSScott Long 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
54984c7070dbSScott Long 	caddr_t *vaddrs;
54994c7070dbSScott Long 	uint64_t *paddrs;
55004c7070dbSScott Long 
550123ac9029SStephen Hurd 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
550223ac9029SStephen Hurd 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
55034c7070dbSScott Long 
55044c7070dbSScott Long 	/* Allocate the TX ring struct memory */
5505b89827a0SStephen Hurd 	if (!(ctx->ifc_txqs =
5506ac2fffa4SPedro F. Giffuni 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5507ac2fffa4SPedro F. Giffuni 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
55084c7070dbSScott Long 		device_printf(dev, "Unable to allocate TX ring memory\n");
55094c7070dbSScott Long 		err = ENOMEM;
55104c7070dbSScott Long 		goto fail;
55114c7070dbSScott Long 	}
55124c7070dbSScott Long 
55134c7070dbSScott Long 	/* Now allocate the RX */
5514b89827a0SStephen Hurd 	if (!(ctx->ifc_rxqs =
5515ac2fffa4SPedro F. Giffuni 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5516ac2fffa4SPedro F. Giffuni 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
55174c7070dbSScott Long 		device_printf(dev, "Unable to allocate RX ring memory\n");
55184c7070dbSScott Long 		err = ENOMEM;
55194c7070dbSScott Long 		goto rx_fail;
55204c7070dbSScott Long 	}
55214c7070dbSScott Long 
5522b89827a0SStephen Hurd 	txq = ctx->ifc_txqs;
5523b89827a0SStephen Hurd 	rxq = ctx->ifc_rxqs;
55244c7070dbSScott Long 
55254c7070dbSScott Long 	/*
55264c7070dbSScott Long 	 * XXX handle allocation failure
55274c7070dbSScott Long 	 */
552896c85efbSNathan Whitehorn 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
55294c7070dbSScott Long 		/* Set up some basics */
55304c7070dbSScott Long 
5531bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5532bfce461eSMarius Strobl 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5533bfce461eSMarius Strobl 			device_printf(dev,
5534bfce461eSMarius Strobl 			    "Unable to allocate TX DMA info memory\n");
55354c7070dbSScott Long 			err = ENOMEM;
55360d0338afSConrad Meyer 			goto err_tx_desc;
55374c7070dbSScott Long 		}
55384c7070dbSScott Long 		txq->ift_ifdi = ifdip;
55394c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, ifdip++) {
5540bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5541bfce461eSMarius Strobl 				device_printf(dev,
5542bfce461eSMarius Strobl 				    "Unable to allocate TX descriptors\n");
55434c7070dbSScott Long 				err = ENOMEM;
55444c7070dbSScott Long 				goto err_tx_desc;
55454c7070dbSScott Long 			}
554695246abbSSean Bruno 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
55474c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
55484c7070dbSScott Long 		}
55494c7070dbSScott Long 		txq->ift_ctx = ctx;
55504c7070dbSScott Long 		txq->ift_id = i;
555123ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
555223ac9029SStephen Hurd 			txq->ift_br_offset = 1;
555323ac9029SStephen Hurd 		} else {
555423ac9029SStephen Hurd 			txq->ift_br_offset = 0;
555523ac9029SStephen Hurd 		}
55564c7070dbSScott Long 		/* XXX fix this */
555796c85efbSNathan Whitehorn 		txq->ift_timer.c_cpu = cpu;
55584c7070dbSScott Long 
55594c7070dbSScott Long 		if (iflib_txsd_alloc(txq)) {
55604c7070dbSScott Long 			device_printf(dev, "Critical Failure setting up TX buffers\n");
55614c7070dbSScott Long 			err = ENOMEM;
55624c7070dbSScott Long 			goto err_tx_desc;
55634c7070dbSScott Long 		}
55644c7070dbSScott Long 
55654c7070dbSScott Long 		/* Initialize the TX lock */
55661722eeacSMarius Strobl 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
55674c7070dbSScott Long 		    device_get_nameunit(dev), txq->ift_id);
55684c7070dbSScott Long 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
55694c7070dbSScott Long 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
55704c7070dbSScott Long 
557195246abbSSean Bruno 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
55724c7070dbSScott Long 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
55734c7070dbSScott Long 		if (err) {
55744c7070dbSScott Long 			/* XXX free any allocated rings */
55754c7070dbSScott Long 			device_printf(dev, "Unable to allocate buf_ring\n");
55760d0338afSConrad Meyer 			goto err_tx_desc;
55774c7070dbSScott Long 		}
55784c7070dbSScott Long 	}
55794c7070dbSScott Long 
55804c7070dbSScott Long 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
55814c7070dbSScott Long 		/* Set up some basics */
5582fb1a29b4SHans Petter Selasky 		callout_init(&rxq->ifr_watchdog, 1);
55834c7070dbSScott Long 
5584bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5585bfce461eSMarius Strobl 		   M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5586bfce461eSMarius Strobl 			device_printf(dev,
5587bfce461eSMarius Strobl 			    "Unable to allocate RX DMA info memory\n");
55884c7070dbSScott Long 			err = ENOMEM;
55890d0338afSConrad Meyer 			goto err_tx_desc;
55904c7070dbSScott Long 		}
55914c7070dbSScott Long 
55924c7070dbSScott Long 		rxq->ifr_ifdi = ifdip;
559395246abbSSean Bruno 		/* XXX this needs to be changed if #rx queues != #tx queues */
559495246abbSSean Bruno 		rxq->ifr_ntxqirq = 1;
559595246abbSSean Bruno 		rxq->ifr_txqid[0] = i;
55964c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, ifdip++) {
5597bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5598bfce461eSMarius Strobl 				device_printf(dev,
5599bfce461eSMarius Strobl 				    "Unable to allocate RX descriptors\n");
56004c7070dbSScott Long 				err = ENOMEM;
56014c7070dbSScott Long 				goto err_tx_desc;
56024c7070dbSScott Long 			}
56034c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
56044c7070dbSScott Long 		}
56054c7070dbSScott Long 		rxq->ifr_ctx = ctx;
56064c7070dbSScott Long 		rxq->ifr_id = i;
560723ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
560823ac9029SStephen Hurd 			rxq->ifr_fl_offset = 1;
56094c7070dbSScott Long 		} else {
561023ac9029SStephen Hurd 			rxq->ifr_fl_offset = 0;
56114c7070dbSScott Long 		}
56124c7070dbSScott Long 		rxq->ifr_nfl = nfree_lists;
56134c7070dbSScott Long 		if (!(fl =
5614ac2fffa4SPedro F. Giffuni 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
56154c7070dbSScott Long 			device_printf(dev, "Unable to allocate free list memory\n");
56164c7070dbSScott Long 			err = ENOMEM;
56170d0338afSConrad Meyer 			goto err_tx_desc;
56184c7070dbSScott Long 		}
56194c7070dbSScott Long 		rxq->ifr_fl = fl;
56204c7070dbSScott Long 		for (j = 0; j < nfree_lists; j++) {
562195246abbSSean Bruno 			fl[j].ifl_rxq = rxq;
562295246abbSSean Bruno 			fl[j].ifl_id = j;
562395246abbSSean Bruno 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
562495246abbSSean Bruno 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
56254c7070dbSScott Long 		}
56264c7070dbSScott Long 		/* Allocate receive buffers for the ring */
56274c7070dbSScott Long 		if (iflib_rxsd_alloc(rxq)) {
56284c7070dbSScott Long 			device_printf(dev,
56294c7070dbSScott Long 			    "Critical Failure setting up receive buffers\n");
56304c7070dbSScott Long 			err = ENOMEM;
56314c7070dbSScott Long 			goto err_rx_desc;
56324c7070dbSScott Long 		}
563387890dbaSSean Bruno 
563487890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
56353db348b5SMarius Strobl 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
56363db348b5SMarius Strobl 			    M_WAITOK);
56374c7070dbSScott Long 	}
56384c7070dbSScott Long 
56394c7070dbSScott Long 	/* TXQs */
56404c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
56414c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
56424c7070dbSScott Long 	for (i = 0; i < ntxqsets; i++) {
56434c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
56444c7070dbSScott Long 
56454c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, di++) {
56464c7070dbSScott Long 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
56474c7070dbSScott Long 			paddrs[i*ntxqs + j] = di->idi_paddr;
56484c7070dbSScott Long 		}
56494c7070dbSScott Long 	}
56504c7070dbSScott Long 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5651bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5652bfce461eSMarius Strobl 		    "Unable to allocate device TX queue\n");
56534c7070dbSScott Long 		iflib_tx_structures_free(ctx);
56544c7070dbSScott Long 		free(vaddrs, M_IFLIB);
56554c7070dbSScott Long 		free(paddrs, M_IFLIB);
56564c7070dbSScott Long 		goto err_rx_desc;
56574c7070dbSScott Long 	}
56584c7070dbSScott Long 	free(vaddrs, M_IFLIB);
56594c7070dbSScott Long 	free(paddrs, M_IFLIB);
56604c7070dbSScott Long 
56614c7070dbSScott Long 	/* RXQs */
56624c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
56634c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
56644c7070dbSScott Long 	for (i = 0; i < nrxqsets; i++) {
56654c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
56664c7070dbSScott Long 
56674c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, di++) {
56684c7070dbSScott Long 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
56694c7070dbSScott Long 			paddrs[i*nrxqs + j] = di->idi_paddr;
56704c7070dbSScott Long 		}
56714c7070dbSScott Long 	}
56724c7070dbSScott Long 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5673bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5674bfce461eSMarius Strobl 		    "Unable to allocate device RX queue\n");
56754c7070dbSScott Long 		iflib_tx_structures_free(ctx);
56764c7070dbSScott Long 		free(vaddrs, M_IFLIB);
56774c7070dbSScott Long 		free(paddrs, M_IFLIB);
56784c7070dbSScott Long 		goto err_rx_desc;
56794c7070dbSScott Long 	}
56804c7070dbSScott Long 	free(vaddrs, M_IFLIB);
56814c7070dbSScott Long 	free(paddrs, M_IFLIB);
56824c7070dbSScott Long 
56834c7070dbSScott Long 	return (0);
56844c7070dbSScott Long 
56854c7070dbSScott Long /* XXX handle allocation failure changes */
56864c7070dbSScott Long err_rx_desc:
56874c7070dbSScott Long err_tx_desc:
5688b89827a0SStephen Hurd rx_fail:
56894c7070dbSScott Long 	if (ctx->ifc_rxqs != NULL)
56904c7070dbSScott Long 		free(ctx->ifc_rxqs, M_IFLIB);
56914c7070dbSScott Long 	ctx->ifc_rxqs = NULL;
56924c7070dbSScott Long 	if (ctx->ifc_txqs != NULL)
56934c7070dbSScott Long 		free(ctx->ifc_txqs, M_IFLIB);
56944c7070dbSScott Long 	ctx->ifc_txqs = NULL;
56954c7070dbSScott Long fail:
56964c7070dbSScott Long 	return (err);
56974c7070dbSScott Long }
56984c7070dbSScott Long 
56994c7070dbSScott Long static int
57004c7070dbSScott Long iflib_tx_structures_setup(if_ctx_t ctx)
57014c7070dbSScott Long {
57024c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
57034c7070dbSScott Long 	int i;
57044c7070dbSScott Long 
57054c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
57064c7070dbSScott Long 		iflib_txq_setup(txq);
57074c7070dbSScott Long 
57084c7070dbSScott Long 	return (0);
57094c7070dbSScott Long }
57104c7070dbSScott Long 
57114c7070dbSScott Long static void
57124c7070dbSScott Long iflib_tx_structures_free(if_ctx_t ctx)
57134c7070dbSScott Long {
57144c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
57154d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
57164c7070dbSScott Long 	int i, j;
57174c7070dbSScott Long 
57184c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
57194d261ce2SStephen Hurd 		for (j = 0; j < sctx->isc_ntxqs; j++)
57204c7070dbSScott Long 			iflib_dma_free(&txq->ift_ifdi[j]);
5721244e7cffSEric Joyner 		iflib_txq_destroy(txq);
57224c7070dbSScott Long 	}
57234c7070dbSScott Long 	free(ctx->ifc_txqs, M_IFLIB);
57244c7070dbSScott Long 	ctx->ifc_txqs = NULL;
57254c7070dbSScott Long 	IFDI_QUEUES_FREE(ctx);
57264c7070dbSScott Long }
57274c7070dbSScott Long 
57284c7070dbSScott Long /*********************************************************************
57294c7070dbSScott Long  *
57304c7070dbSScott Long  *  Initialize all receive rings.
57314c7070dbSScott Long  *
57324c7070dbSScott Long  **********************************************************************/
57334c7070dbSScott Long static int
57344c7070dbSScott Long iflib_rx_structures_setup(if_ctx_t ctx)
57354c7070dbSScott Long {
57364c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5737aaeb188aSBjoern A. Zeeb 	int q;
5738aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57393d10e9edSMarius Strobl 	int err, i;
5740aaeb188aSBjoern A. Zeeb #endif
57414c7070dbSScott Long 
57424c7070dbSScott Long 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5743aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57443d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) {
57453d10e9edSMarius Strobl 			err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
574623ac9029SStephen Hurd 			    TCP_LRO_ENTRIES, min(1024,
57473d10e9edSMarius Strobl 			    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
57483d10e9edSMarius Strobl 			if (err != 0) {
57493d10e9edSMarius Strobl 				device_printf(ctx->ifc_dev,
57503d10e9edSMarius Strobl 				    "LRO Initialization failed!\n");
57514c7070dbSScott Long 				goto fail;
57524c7070dbSScott Long 			}
57533d10e9edSMarius Strobl 		}
5754aaeb188aSBjoern A. Zeeb #endif
57554c7070dbSScott Long 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
57564c7070dbSScott Long 	}
57574c7070dbSScott Long 	return (0);
5758aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57594c7070dbSScott Long fail:
57604c7070dbSScott Long 	/*
57613d10e9edSMarius Strobl 	 * Free LRO resources allocated so far, we will only handle
57624c7070dbSScott Long 	 * the rings that completed, the failing case will have
57634c7070dbSScott Long 	 * cleaned up for itself.  'q' failed, so its the terminus.
57644c7070dbSScott Long 	 */
57654c7070dbSScott Long 	rxq = ctx->ifc_rxqs;
57664c7070dbSScott Long 	for (i = 0; i < q; ++i, rxq++) {
57673d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO)
57683d10e9edSMarius Strobl 			tcp_lro_free(&rxq->ifr_lc);
57694c7070dbSScott Long 	}
57704c7070dbSScott Long 	return (err);
5771aaeb188aSBjoern A. Zeeb #endif
57724c7070dbSScott Long }
57734c7070dbSScott Long 
57744c7070dbSScott Long /*********************************************************************
57754c7070dbSScott Long  *
57764c7070dbSScott Long  *  Free all receive rings.
57774c7070dbSScott Long  *
57784c7070dbSScott Long  **********************************************************************/
57794c7070dbSScott Long static void
57804c7070dbSScott Long iflib_rx_structures_free(if_ctx_t ctx)
57814c7070dbSScott Long {
57824c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5783db8e8f1eSEric Joyner 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5784db8e8f1eSEric Joyner 	int i, j;
57854c7070dbSScott Long 
57863d10e9edSMarius Strobl 	for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5787db8e8f1eSEric Joyner 		for (j = 0; j < sctx->isc_nrxqs; j++)
5788db8e8f1eSEric Joyner 			iflib_dma_free(&rxq->ifr_ifdi[j]);
57894c7070dbSScott Long 		iflib_rx_sds_free(rxq);
5790007b804fSMarius Strobl #if defined(INET6) || defined(INET)
57913d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO)
57923d10e9edSMarius Strobl 			tcp_lro_free(&rxq->ifr_lc);
5793007b804fSMarius Strobl #endif
57944c7070dbSScott Long 	}
579577c1fcecSEric Joyner 	free(ctx->ifc_rxqs, M_IFLIB);
579677c1fcecSEric Joyner 	ctx->ifc_rxqs = NULL;
57974c7070dbSScott Long }
57984c7070dbSScott Long 
57994c7070dbSScott Long static int
58004c7070dbSScott Long iflib_qset_structures_setup(if_ctx_t ctx)
58014c7070dbSScott Long {
58024c7070dbSScott Long 	int err;
58034c7070dbSScott Long 
58046108c013SStephen Hurd 	/*
58056108c013SStephen Hurd 	 * It is expected that the caller takes care of freeing queues if this
58066108c013SStephen Hurd 	 * fails.
58076108c013SStephen Hurd 	 */
5808ac88e6daSStephen Hurd 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5809ac88e6daSStephen Hurd 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
58104c7070dbSScott Long 		return (err);
5811ac88e6daSStephen Hurd 	}
58124c7070dbSScott Long 
58136108c013SStephen Hurd 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
58144c7070dbSScott Long 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
58156108c013SStephen Hurd 
58164c7070dbSScott Long 	return (err);
58174c7070dbSScott Long }
58184c7070dbSScott Long 
58194c7070dbSScott Long int
58204c7070dbSScott Long iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
58213e0e6330SStephen Hurd 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
58224c7070dbSScott Long {
58234c7070dbSScott Long 
58244c7070dbSScott Long 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
58254c7070dbSScott Long }
58264c7070dbSScott Long 
5827b103855eSStephen Hurd #ifdef SMP
5828aa3c5dd8SSean Bruno static int
5829b103855eSStephen Hurd find_nth(if_ctx_t ctx, int qid)
58304c7070dbSScott Long {
5831b103855eSStephen Hurd 	cpuset_t cpus;
5832aa3c5dd8SSean Bruno 	int i, cpuid, eqid, count;
58334c7070dbSScott Long 
5834b103855eSStephen Hurd 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5835b103855eSStephen Hurd 	count = CPU_COUNT(&cpus);
5836aa3c5dd8SSean Bruno 	eqid = qid % count;
58374c7070dbSScott Long 	/* clear up to the qid'th bit */
5838aa3c5dd8SSean Bruno 	for (i = 0; i < eqid; i++) {
5839b103855eSStephen Hurd 		cpuid = CPU_FFS(&cpus);
5840aa3c5dd8SSean Bruno 		MPASS(cpuid != 0);
5841b103855eSStephen Hurd 		CPU_CLR(cpuid-1, &cpus);
58424c7070dbSScott Long 	}
5843b103855eSStephen Hurd 	cpuid = CPU_FFS(&cpus);
5844aa3c5dd8SSean Bruno 	MPASS(cpuid != 0);
5845aa3c5dd8SSean Bruno 	return (cpuid-1);
58464c7070dbSScott Long }
58474c7070dbSScott Long 
5848b103855eSStephen Hurd #ifdef SCHED_ULE
5849b103855eSStephen Hurd extern struct cpu_group *cpu_top;              /* CPU topology */
5850b103855eSStephen Hurd 
5851b103855eSStephen Hurd static int
5852b103855eSStephen Hurd find_child_with_core(int cpu, struct cpu_group *grp)
5853b103855eSStephen Hurd {
5854b103855eSStephen Hurd 	int i;
5855b103855eSStephen Hurd 
5856b103855eSStephen Hurd 	if (grp->cg_children == 0)
5857b103855eSStephen Hurd 		return -1;
5858b103855eSStephen Hurd 
5859b103855eSStephen Hurd 	MPASS(grp->cg_child);
5860b103855eSStephen Hurd 	for (i = 0; i < grp->cg_children; i++) {
5861b103855eSStephen Hurd 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5862b103855eSStephen Hurd 			return i;
5863b103855eSStephen Hurd 	}
5864b103855eSStephen Hurd 
5865b103855eSStephen Hurd 	return -1;
5866b103855eSStephen Hurd }
5867b103855eSStephen Hurd 
5868b103855eSStephen Hurd /*
58690b75ac77SStephen Hurd  * Find the nth "close" core to the specified core
58700b75ac77SStephen Hurd  * "close" is defined as the deepest level that shares
58710b75ac77SStephen Hurd  * at least an L2 cache.  With threads, this will be
5872f154ece0SStephen Hurd  * threads on the same core.  If the shared cache is L3
58730b75ac77SStephen Hurd  * or higher, simply returns the same core.
5874b103855eSStephen Hurd  */
5875b103855eSStephen Hurd static int
58760b75ac77SStephen Hurd find_close_core(int cpu, int core_offset)
5877b103855eSStephen Hurd {
5878b103855eSStephen Hurd 	struct cpu_group *grp;
5879b103855eSStephen Hurd 	int i;
58800b75ac77SStephen Hurd 	int fcpu;
5881b103855eSStephen Hurd 	cpuset_t cs;
5882b103855eSStephen Hurd 
5883b103855eSStephen Hurd 	grp = cpu_top;
5884b103855eSStephen Hurd 	if (grp == NULL)
5885b103855eSStephen Hurd 		return cpu;
5886b103855eSStephen Hurd 	i = 0;
5887b103855eSStephen Hurd 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5888b103855eSStephen Hurd 		/* If the child only has one cpu, don't descend */
5889b103855eSStephen Hurd 		if (grp->cg_child[i].cg_count <= 1)
5890b103855eSStephen Hurd 			break;
5891b103855eSStephen Hurd 		grp = &grp->cg_child[i];
5892b103855eSStephen Hurd 	}
5893b103855eSStephen Hurd 
5894b103855eSStephen Hurd 	/* If they don't share at least an L2 cache, use the same CPU */
5895b103855eSStephen Hurd 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5896b103855eSStephen Hurd 		return cpu;
5897b103855eSStephen Hurd 
5898b103855eSStephen Hurd 	/* Now pick one */
5899b103855eSStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
59000b75ac77SStephen Hurd 
59010b75ac77SStephen Hurd 	/* Add the selected CPU offset to core offset. */
59020b75ac77SStephen Hurd 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
59030b75ac77SStephen Hurd 		if (fcpu - 1 == cpu)
59040b75ac77SStephen Hurd 			break;
59050b75ac77SStephen Hurd 		CPU_CLR(fcpu - 1, &cs);
59060b75ac77SStephen Hurd 	}
59070b75ac77SStephen Hurd 	MPASS(fcpu);
59080b75ac77SStephen Hurd 
59090b75ac77SStephen Hurd 	core_offset += i;
59100b75ac77SStephen Hurd 
59110b75ac77SStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
59120b75ac77SStephen Hurd 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5913b103855eSStephen Hurd 		MPASS(CPU_FFS(&cs));
5914b103855eSStephen Hurd 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5915b103855eSStephen Hurd 	}
5916b103855eSStephen Hurd 	MPASS(CPU_FFS(&cs));
5917b103855eSStephen Hurd 	return CPU_FFS(&cs) - 1;
5918b103855eSStephen Hurd }
5919b103855eSStephen Hurd #else
5920b103855eSStephen Hurd static int
59210b75ac77SStephen Hurd find_close_core(int cpu, int core_offset __unused)
5922b103855eSStephen Hurd {
592397755e83SKonstantin Belousov 	return cpu;
5924b103855eSStephen Hurd }
5925b103855eSStephen Hurd #endif
5926b103855eSStephen Hurd 
5927b103855eSStephen Hurd static int
59280b75ac77SStephen Hurd get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5929b103855eSStephen Hurd {
5930b103855eSStephen Hurd 	switch (type) {
5931b103855eSStephen Hurd 	case IFLIB_INTR_TX:
59320b75ac77SStephen Hurd 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
59330b75ac77SStephen Hurd 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5934b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5935b103855eSStephen Hurd 	case IFLIB_INTR_RX:
5936b103855eSStephen Hurd 	case IFLIB_INTR_RXTX:
59370b75ac77SStephen Hurd 		/* RX queues get the specified core */
5938b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5939b103855eSStephen Hurd 	default:
5940b103855eSStephen Hurd 		return -1;
5941b103855eSStephen Hurd 	}
5942b103855eSStephen Hurd }
5943b103855eSStephen Hurd #else
59440b75ac77SStephen Hurd #define get_core_offset(ctx, type, qid)	CPU_FIRST()
59450b75ac77SStephen Hurd #define find_close_core(cpuid, tid)	CPU_FIRST()
5946b103855eSStephen Hurd #define find_nth(ctx, gid)		CPU_FIRST()
5947b103855eSStephen Hurd #endif
5948b103855eSStephen Hurd 
5949b103855eSStephen Hurd /* Just to avoid copy/paste */
5950b103855eSStephen Hurd static inline int
5951f855ec81SMarius Strobl iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
5952f855ec81SMarius Strobl     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
5953f855ec81SMarius Strobl     const char *name)
5954b103855eSStephen Hurd {
5955f855ec81SMarius Strobl 	device_t dev;
5956f154ece0SStephen Hurd 	int co, cpuid, err, tid;
5957b103855eSStephen Hurd 
5958f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
5959f154ece0SStephen Hurd 	co = ctx->ifc_sysctl_core_offset;
5960f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX)
5961f154ece0SStephen Hurd 		co += ctx->ifc_softc_ctx.isc_nrxqsets;
5962f154ece0SStephen Hurd 	cpuid = find_nth(ctx, qid + co);
59630b75ac77SStephen Hurd 	tid = get_core_offset(ctx, type, qid);
59643d10e9edSMarius Strobl 	if (tid < 0) {
59653d10e9edSMarius Strobl 		device_printf(dev, "get_core_offset failed\n");
59663d10e9edSMarius Strobl 		return (EOPNOTSUPP);
59673d10e9edSMarius Strobl 	}
59680b75ac77SStephen Hurd 	cpuid = find_close_core(cpuid, tid);
5969f855ec81SMarius Strobl 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
5970f855ec81SMarius Strobl 	    name);
5971b103855eSStephen Hurd 	if (err) {
5972f855ec81SMarius Strobl 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
5973b103855eSStephen Hurd 		return (err);
5974b103855eSStephen Hurd 	}
5975b103855eSStephen Hurd #ifdef notyet
5976b103855eSStephen Hurd 	if (cpuid > ctx->ifc_cpuid_highest)
5977b103855eSStephen Hurd 		ctx->ifc_cpuid_highest = cpuid;
5978b103855eSStephen Hurd #endif
59793d10e9edSMarius Strobl 	return (0);
5980b103855eSStephen Hurd }
5981b103855eSStephen Hurd 
59824c7070dbSScott Long int
59834c7070dbSScott Long iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
59844c7070dbSScott Long 			iflib_intr_type_t type, driver_filter_t *filter,
59853e0e6330SStephen Hurd 			void *filter_arg, int qid, const char *name)
59864c7070dbSScott Long {
5987f855ec81SMarius Strobl 	device_t dev;
59884c7070dbSScott Long 	struct grouptask *gtask;
59894c7070dbSScott Long 	struct taskqgroup *tqg;
59904c7070dbSScott Long 	iflib_filter_info_t info;
599123ac9029SStephen Hurd 	gtask_fn_t *fn;
5992b103855eSStephen Hurd 	int tqrid, err;
599395246abbSSean Bruno 	driver_filter_t *intr_fast;
59944c7070dbSScott Long 	void *q;
59954c7070dbSScott Long 
59964c7070dbSScott Long 	info = &ctx->ifc_filter_info;
5997add6f7d0SSean Bruno 	tqrid = rid;
59984c7070dbSScott Long 
59994c7070dbSScott Long 	switch (type) {
60004c7070dbSScott Long 	/* XXX merge tx/rx for netmap? */
60014c7070dbSScott Long 	case IFLIB_INTR_TX:
60024c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
60034c7070dbSScott Long 		info = &ctx->ifc_txqs[qid].ift_filter_info;
60044c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
6005ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60064c7070dbSScott Long 		fn = _task_fn_tx;
600795246abbSSean Bruno 		intr_fast = iflib_fast_intr;
6008da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
60095ee36c68SStephen Hurd 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
60104c7070dbSScott Long 		break;
60114c7070dbSScott Long 	case IFLIB_INTR_RX:
60124c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
60134c7070dbSScott Long 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
60144c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6015ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60164c7070dbSScott Long 		fn = _task_fn_rx;
6017ab2e3f79SStephen Hurd 		intr_fast = iflib_fast_intr;
60186c3e93cbSGleb Smirnoff 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
601995246abbSSean Bruno 		break;
602095246abbSSean Bruno 	case IFLIB_INTR_RXTX:
602195246abbSSean Bruno 		q = &ctx->ifc_rxqs[qid];
602295246abbSSean Bruno 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
602395246abbSSean Bruno 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6024ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
602595246abbSSean Bruno 		fn = _task_fn_rx;
602695246abbSSean Bruno 		intr_fast = iflib_fast_intr_rxtx;
60276c3e93cbSGleb Smirnoff 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
60284c7070dbSScott Long 		break;
60294c7070dbSScott Long 	case IFLIB_INTR_ADMIN:
60304c7070dbSScott Long 		q = ctx;
6031da69b8f9SSean Bruno 		tqrid = -1;
60324c7070dbSScott Long 		info = &ctx->ifc_filter_info;
60334c7070dbSScott Long 		gtask = &ctx->ifc_admin_task;
6034ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
60354c7070dbSScott Long 		fn = _task_fn_admin;
603695246abbSSean Bruno 		intr_fast = iflib_fast_intr_ctx;
60374c7070dbSScott Long 		break;
60384c7070dbSScott Long 	default:
60393d10e9edSMarius Strobl 		device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
60403d10e9edSMarius Strobl 		    __func__);
60413d10e9edSMarius Strobl 		return (EINVAL);
60424c7070dbSScott Long 	}
60434c7070dbSScott Long 
60444c7070dbSScott Long 	info->ifi_filter = filter;
60454c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
60464c7070dbSScott Long 	info->ifi_task = gtask;
604795246abbSSean Bruno 	info->ifi_ctx = q;
60484c7070dbSScott Long 
6049f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
605095246abbSSean Bruno 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
6051da69b8f9SSean Bruno 	if (err != 0) {
6052f855ec81SMarius Strobl 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
60534c7070dbSScott Long 		return (err);
6054da69b8f9SSean Bruno 	}
6055da69b8f9SSean Bruno 	if (type == IFLIB_INTR_ADMIN)
6056da69b8f9SSean Bruno 		return (0);
6057da69b8f9SSean Bruno 
60584c7070dbSScott Long 	if (tqrid != -1) {
6059f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
6060f855ec81SMarius Strobl 		    q, name);
6061b103855eSStephen Hurd 		if (err)
6062b103855eSStephen Hurd 			return (err);
6063aa3c5dd8SSean Bruno 	} else {
6064f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6065aa3c5dd8SSean Bruno 	}
60664c7070dbSScott Long 
60674c7070dbSScott Long 	return (0);
60684c7070dbSScott Long }
60694c7070dbSScott Long 
60704c7070dbSScott Long void
60713e0e6330SStephen Hurd iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
60724c7070dbSScott Long {
60734c7070dbSScott Long 	struct grouptask *gtask;
60744c7070dbSScott Long 	struct taskqgroup *tqg;
607523ac9029SStephen Hurd 	gtask_fn_t *fn;
60764c7070dbSScott Long 	void *q;
6077b103855eSStephen Hurd 	int err;
60784c7070dbSScott Long 
60794c7070dbSScott Long 	switch (type) {
60804c7070dbSScott Long 	case IFLIB_INTR_TX:
60814c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
60824c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
6083ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60844c7070dbSScott Long 		fn = _task_fn_tx;
6085f98977b5SHans Petter Selasky 		GROUPTASK_INIT(gtask, 0, fn, q);
60864c7070dbSScott Long 		break;
60874c7070dbSScott Long 	case IFLIB_INTR_RX:
60884c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
60894c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6090ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60914c7070dbSScott Long 		fn = _task_fn_rx;
6092f98977b5SHans Petter Selasky 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
60934c7070dbSScott Long 		break;
60944c7070dbSScott Long 	case IFLIB_INTR_IOV:
60954c7070dbSScott Long 		q = ctx;
60964c7070dbSScott Long 		gtask = &ctx->ifc_vflr_task;
6097ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
60984c7070dbSScott Long 		fn = _task_fn_iov;
6099f98977b5SHans Petter Selasky 		GROUPTASK_INIT(gtask, 0, fn, q);
61004c7070dbSScott Long 		break;
61014c7070dbSScott Long 	default:
61024c7070dbSScott Long 		panic("unknown net intr type");
61034c7070dbSScott Long 	}
6104f855ec81SMarius Strobl 	if (irq != NULL) {
6105f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
6106f855ec81SMarius Strobl 		    q, name);
6107b103855eSStephen Hurd 		if (err)
6108f855ec81SMarius Strobl 			taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
6109f855ec81SMarius Strobl 			    irq->ii_res, name);
6110f855ec81SMarius Strobl 	} else {
6111f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
6112b103855eSStephen Hurd 	}
6113b103855eSStephen Hurd }
61144c7070dbSScott Long 
61154c7070dbSScott Long void
61164c7070dbSScott Long iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
61174c7070dbSScott Long {
6118b97de13aSMarius Strobl 
61194c7070dbSScott Long 	if (irq->ii_tag)
61204c7070dbSScott Long 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
61214c7070dbSScott Long 
61224c7070dbSScott Long 	if (irq->ii_res)
6123b97de13aSMarius Strobl 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6124b97de13aSMarius Strobl 		    rman_get_rid(irq->ii_res), irq->ii_res);
61254c7070dbSScott Long }
61264c7070dbSScott Long 
61274c7070dbSScott Long static int
61283e0e6330SStephen Hurd iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
61294c7070dbSScott Long {
61304c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
61314c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
61324c7070dbSScott Long 	if_irq_t irq = &ctx->ifc_legacy_irq;
61334c7070dbSScott Long 	iflib_filter_info_t info;
6134f855ec81SMarius Strobl 	device_t dev;
61354c7070dbSScott Long 	struct grouptask *gtask;
6136f855ec81SMarius Strobl 	struct resource *res;
61374c7070dbSScott Long 	struct taskqgroup *tqg;
61384c7070dbSScott Long 	void *q;
6139d49e83eaSMarius Strobl 	int err, tqrid;
614041669133SMark Johnston 	bool rx_only;
61414c7070dbSScott Long 
61424c7070dbSScott Long 	q = &ctx->ifc_rxqs[0];
61434c7070dbSScott Long 	info = &rxq[0].ifr_filter_info;
61444c7070dbSScott Long 	gtask = &rxq[0].ifr_task;
6145ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
6146d49e83eaSMarius Strobl 	tqrid = *rid;
614741669133SMark Johnston 	rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
61484c7070dbSScott Long 
61494c7070dbSScott Long 	ctx->ifc_flags |= IFC_LEGACY;
61504c7070dbSScott Long 	info->ifi_filter = filter;
61514c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
61524c7070dbSScott Long 	info->ifi_task = gtask;
615341669133SMark Johnston 	info->ifi_ctx = rx_only ? ctx : q;
61544c7070dbSScott Long 
6155f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
61564c7070dbSScott Long 	/* We allocate a single interrupt resource */
615741669133SMark Johnston 	err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx :
615841669133SMark Johnston 	    iflib_fast_intr_rxtx, NULL, info, name);
615941669133SMark Johnston 	if (err != 0)
61604c7070dbSScott Long 		return (err);
6161f98977b5SHans Petter Selasky 	NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q);
6162f855ec81SMarius Strobl 	res = irq->ii_res;
6163f855ec81SMarius Strobl 	taskqgroup_attach(tqg, gtask, q, dev, res, name);
61644c7070dbSScott Long 
61654c7070dbSScott Long 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6166f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6167f855ec81SMarius Strobl 	    "tx");
61684c7070dbSScott Long 	return (0);
61694c7070dbSScott Long }
61704c7070dbSScott Long 
61714c7070dbSScott Long void
61724c7070dbSScott Long iflib_led_create(if_ctx_t ctx)
61734c7070dbSScott Long {
61744c7070dbSScott Long 
61754c7070dbSScott Long 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
61764c7070dbSScott Long 	    device_get_nameunit(ctx->ifc_dev));
61774c7070dbSScott Long }
61784c7070dbSScott Long 
61794c7070dbSScott Long void
61804c7070dbSScott Long iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
61814c7070dbSScott Long {
61824c7070dbSScott Long 
61834c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
61844c7070dbSScott Long }
61854c7070dbSScott Long 
61864c7070dbSScott Long void
61874c7070dbSScott Long iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
61884c7070dbSScott Long {
61894c7070dbSScott Long 
61904c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
61914c7070dbSScott Long }
61924c7070dbSScott Long 
61934c7070dbSScott Long void
61944c7070dbSScott Long iflib_admin_intr_deferred(if_ctx_t ctx)
61954c7070dbSScott Long {
619646fa0c25SEric Joyner 
6197*ed6611ccSEd Maste 	MPASS(ctx->ifc_admin_task.gt_taskqueue != NULL);
61984c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
61994c7070dbSScott Long }
62004c7070dbSScott Long 
62014c7070dbSScott Long void
62024c7070dbSScott Long iflib_iov_intr_deferred(if_ctx_t ctx)
62034c7070dbSScott Long {
62044c7070dbSScott Long 
62054c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
62064c7070dbSScott Long }
62074c7070dbSScott Long 
62084c7070dbSScott Long void
6209d49e83eaSMarius Strobl iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
62104c7070dbSScott Long {
62114c7070dbSScott Long 
6212f855ec81SMarius Strobl 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6213f855ec81SMarius Strobl 	    name);
62144c7070dbSScott Long }
62154c7070dbSScott Long 
62164c7070dbSScott Long void
6217aa8a24d3SStephen Hurd iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
6218aa8a24d3SStephen Hurd 	const char *name)
62194c7070dbSScott Long {
62204c7070dbSScott Long 
62214c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, ctx);
6222f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
6223f855ec81SMarius Strobl 	    name);
62244c7070dbSScott Long }
62254c7070dbSScott Long 
62264c7070dbSScott Long void
622723ac9029SStephen Hurd iflib_config_gtask_deinit(struct grouptask *gtask)
622823ac9029SStephen Hurd {
622923ac9029SStephen Hurd 
6230ab2e3f79SStephen Hurd 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
623123ac9029SStephen Hurd }
623223ac9029SStephen Hurd 
623323ac9029SStephen Hurd void
623423ac9029SStephen Hurd iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
62354c7070dbSScott Long {
62364c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
62374c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
62384c7070dbSScott Long 
62394c7070dbSScott Long 	if_setbaudrate(ifp, baudrate);
62407b610b60SSean Bruno 	if (baudrate >= IF_Gbps(10)) {
62417b610b60SSean Bruno 		STATE_LOCK(ctx);
624295246abbSSean Bruno 		ctx->ifc_flags |= IFC_PREFETCH;
62437b610b60SSean Bruno 		STATE_UNLOCK(ctx);
62447b610b60SSean Bruno 	}
62454c7070dbSScott Long 	/* If link down, disable watchdog */
62464c7070dbSScott Long 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
62474c7070dbSScott Long 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
62484c7070dbSScott Long 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
62494c7070dbSScott Long 	}
62504c7070dbSScott Long 	ctx->ifc_link_state = link_state;
62514c7070dbSScott Long 	if_link_state_change(ifp, link_state);
62524c7070dbSScott Long }
62534c7070dbSScott Long 
62544c7070dbSScott Long static int
62554c7070dbSScott Long iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
62564c7070dbSScott Long {
62574c7070dbSScott Long 	int credits;
62581248952aSSean Bruno #ifdef INVARIANTS
62591248952aSSean Bruno 	int credits_pre = txq->ift_cidx_processed;
62601248952aSSean Bruno #endif
62614c7070dbSScott Long 
62628a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
62638a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
626495246abbSSean Bruno 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
62654c7070dbSScott Long 		return (0);
62664c7070dbSScott Long 
62674c7070dbSScott Long 	txq->ift_processed += credits;
62684c7070dbSScott Long 	txq->ift_cidx_processed += credits;
62694c7070dbSScott Long 
62701248952aSSean Bruno 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
62714c7070dbSScott Long 	if (txq->ift_cidx_processed >= txq->ift_size)
62724c7070dbSScott Long 		txq->ift_cidx_processed -= txq->ift_size;
62734c7070dbSScott Long 	return (credits);
62744c7070dbSScott Long }
62754c7070dbSScott Long 
62764c7070dbSScott Long static int
627795246abbSSean Bruno iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
62784c7070dbSScott Long {
627995dcf343SMarius Strobl 	iflib_fl_t fl;
628095dcf343SMarius Strobl 	u_int i;
62814c7070dbSScott Long 
628295dcf343SMarius Strobl 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
628395dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
628495dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
628523ac9029SStephen Hurd 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
628623ac9029SStephen Hurd 	    budget));
62874c7070dbSScott Long }
62884c7070dbSScott Long 
62894c7070dbSScott Long void
62904c7070dbSScott Long iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
62914c7070dbSScott Long 	const char *description, if_int_delay_info_t info,
62924c7070dbSScott Long 	int offset, int value)
62934c7070dbSScott Long {
62944c7070dbSScott Long 	info->iidi_ctx = ctx;
62954c7070dbSScott Long 	info->iidi_offset = offset;
62964c7070dbSScott Long 	info->iidi_value = value;
62974c7070dbSScott Long 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
62984c7070dbSScott Long 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
62997029da5cSPawel Biernacki 	    OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
63004c7070dbSScott Long 	    info, 0, iflib_sysctl_int_delay, "I", description);
63014c7070dbSScott Long }
63024c7070dbSScott Long 
6303aa8a24d3SStephen Hurd struct sx *
63044c7070dbSScott Long iflib_ctx_lock_get(if_ctx_t ctx)
63054c7070dbSScott Long {
63064c7070dbSScott Long 
6307aa8a24d3SStephen Hurd 	return (&ctx->ifc_ctx_sx);
63084c7070dbSScott Long }
63094c7070dbSScott Long 
63104c7070dbSScott Long static int
63114c7070dbSScott Long iflib_msix_init(if_ctx_t ctx)
63124c7070dbSScott Long {
63134c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
63144c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
63154c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
63163d10e9edSMarius Strobl 	int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
63173d10e9edSMarius Strobl 	int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
63184c7070dbSScott Long 
6319d2735264SStephen Hurd 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6320d2735264SStephen Hurd 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
632123ac9029SStephen Hurd 
6322b97de13aSMarius Strobl 	if (bootverbose)
6323b97de13aSMarius Strobl 		device_printf(dev, "msix_init qsets capped at %d\n",
6324b97de13aSMarius Strobl 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
63251248952aSSean Bruno 
63264c7070dbSScott Long 	/* Override by tuneable */
6327ea351d3fSSean Bruno 	if (scctx->isc_disable_msix)
63284c7070dbSScott Long 		goto msi;
63294c7070dbSScott Long 
6330b97de13aSMarius Strobl 	/* First try MSI-X */
6331b97de13aSMarius Strobl 	if ((msgs = pci_msix_count(dev)) == 0) {
6332b97de13aSMarius Strobl 		if (bootverbose)
6333b97de13aSMarius Strobl 			device_printf(dev, "MSI-X not supported or disabled\n");
6334b97de13aSMarius Strobl 		goto msi;
6335b97de13aSMarius Strobl 	}
63363d10e9edSMarius Strobl 
63373d10e9edSMarius Strobl 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
63384c7070dbSScott Long 	/*
63394c7070dbSScott Long 	 * bar == -1 => "trust me I know what I'm doing"
63404c7070dbSScott Long 	 * Some drivers are for hardware that is so shoddily
63414c7070dbSScott Long 	 * documented that no one knows which bars are which
63424c7070dbSScott Long 	 * so the developer has to map all bars. This hack
6343b97de13aSMarius Strobl 	 * allows shoddy garbage to use MSI-X in this framework.
63444c7070dbSScott Long 	 */
63454c7070dbSScott Long 	if (bar != -1) {
63464c7070dbSScott Long 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
63474c7070dbSScott Long 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
63484c7070dbSScott Long 		if (ctx->ifc_msix_mem == NULL) {
6349b97de13aSMarius Strobl 			device_printf(dev, "Unable to map MSI-X table\n");
63504c7070dbSScott Long 			goto msi;
63514c7070dbSScott Long 		}
63524c7070dbSScott Long 	}
63533d10e9edSMarius Strobl 
63543d10e9edSMarius Strobl 	admincnt = sctx->isc_admin_intrcnt;
63554c7070dbSScott Long #if IFLIB_DEBUG
63564c7070dbSScott Long 	/* use only 1 qset in debug mode */
63574c7070dbSScott Long 	queuemsgs = min(msgs - admincnt, 1);
63584c7070dbSScott Long #else
63594c7070dbSScott Long 	queuemsgs = msgs - admincnt;
63604c7070dbSScott Long #endif
63614c7070dbSScott Long #ifdef RSS
63624c7070dbSScott Long 	queues = imin(queuemsgs, rss_getnumbuckets());
63634c7070dbSScott Long #else
63644c7070dbSScott Long 	queues = queuemsgs;
63654c7070dbSScott Long #endif
63664c7070dbSScott Long 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6367b97de13aSMarius Strobl 	if (bootverbose)
6368b97de13aSMarius Strobl 		device_printf(dev,
6369b97de13aSMarius Strobl 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
63704c7070dbSScott Long 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
63714c7070dbSScott Long #ifdef  RSS
63724c7070dbSScott Long 	/* If we're doing RSS, clamp at the number of RSS buckets */
63734c7070dbSScott Long 	if (queues > rss_getnumbuckets())
63744c7070dbSScott Long 		queues = rss_getnumbuckets();
63754c7070dbSScott Long #endif
637623ac9029SStephen Hurd 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
637723ac9029SStephen Hurd 		rx_queues = iflib_num_rx_queues;
63784c7070dbSScott Long 	else
63794c7070dbSScott Long 		rx_queues = queues;
6380d2735264SStephen Hurd 
6381d2735264SStephen Hurd 	if (rx_queues > scctx->isc_nrxqsets)
6382d2735264SStephen Hurd 		rx_queues = scctx->isc_nrxqsets;
6383d2735264SStephen Hurd 
638423ac9029SStephen Hurd 	/*
638523ac9029SStephen Hurd 	 * We want this to be all logical CPUs by default
638623ac9029SStephen Hurd 	 */
63874c7070dbSScott Long 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
63884c7070dbSScott Long 		tx_queues = iflib_num_tx_queues;
63894c7070dbSScott Long 	else
639023ac9029SStephen Hurd 		tx_queues = mp_ncpus;
639123ac9029SStephen Hurd 
6392d2735264SStephen Hurd 	if (tx_queues > scctx->isc_ntxqsets)
6393d2735264SStephen Hurd 		tx_queues = scctx->isc_ntxqsets;
6394d2735264SStephen Hurd 
639523ac9029SStephen Hurd 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
639623ac9029SStephen Hurd #ifdef INVARIANTS
639723ac9029SStephen Hurd 		if (tx_queues != rx_queues)
639877c1fcecSEric Joyner 			device_printf(dev,
639977c1fcecSEric Joyner 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
640023ac9029SStephen Hurd 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
640123ac9029SStephen Hurd #endif
640223ac9029SStephen Hurd 		tx_queues = min(rx_queues, tx_queues);
640323ac9029SStephen Hurd 		rx_queues = min(rx_queues, tx_queues);
640423ac9029SStephen Hurd 	}
64054c7070dbSScott Long 
64063d10e9edSMarius Strobl 	vectors = rx_queues + admincnt;
64073d10e9edSMarius Strobl 	if (msgs < vectors) {
64083d10e9edSMarius Strobl 		device_printf(dev,
64093d10e9edSMarius Strobl 		    "insufficient number of MSI-X vectors "
64103d10e9edSMarius Strobl 		    "(supported %d, need %d)\n", msgs, vectors);
64113d10e9edSMarius Strobl 		goto msi;
64123d10e9edSMarius Strobl 	}
64133d10e9edSMarius Strobl 
64141722eeacSMarius Strobl 	device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
64151722eeacSMarius Strobl 	    tx_queues);
64163d10e9edSMarius Strobl 	msgs = vectors;
64174c7070dbSScott Long 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
64183d10e9edSMarius Strobl 		if (vectors != msgs) {
64193d10e9edSMarius Strobl 			device_printf(dev,
64203d10e9edSMarius Strobl 			    "Unable to allocate sufficient MSI-X vectors "
64213d10e9edSMarius Strobl 			    "(got %d, need %d)\n", vectors, msgs);
64223d10e9edSMarius Strobl 			pci_release_msi(dev);
64233d10e9edSMarius Strobl 			if (bar != -1) {
64243d10e9edSMarius Strobl 				bus_release_resource(dev, SYS_RES_MEMORY, bar,
64253d10e9edSMarius Strobl 				    ctx->ifc_msix_mem);
64263d10e9edSMarius Strobl 				ctx->ifc_msix_mem = NULL;
64273d10e9edSMarius Strobl 			}
64283d10e9edSMarius Strobl 			goto msi;
64293d10e9edSMarius Strobl 		}
6430b97de13aSMarius Strobl 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6431b97de13aSMarius Strobl 		    vectors);
64324c7070dbSScott Long 		scctx->isc_vectors = vectors;
64334c7070dbSScott Long 		scctx->isc_nrxqsets = rx_queues;
64344c7070dbSScott Long 		scctx->isc_ntxqsets = tx_queues;
64354c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSIX;
643623ac9029SStephen Hurd 
64374c7070dbSScott Long 		return (vectors);
64384c7070dbSScott Long 	} else {
643977c1fcecSEric Joyner 		device_printf(dev,
64403d10e9edSMarius Strobl 		    "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
64413d10e9edSMarius Strobl 		    err);
64423d10e9edSMarius Strobl 		if (bar != -1) {
6443e4defe55SMarius Strobl 			bus_release_resource(dev, SYS_RES_MEMORY, bar,
6444e4defe55SMarius Strobl 			    ctx->ifc_msix_mem);
6445e4defe55SMarius Strobl 			ctx->ifc_msix_mem = NULL;
64464c7070dbSScott Long 		}
64473d10e9edSMarius Strobl 	}
64483d10e9edSMarius Strobl 
64494c7070dbSScott Long msi:
64504c7070dbSScott Long 	vectors = pci_msi_count(dev);
64514c7070dbSScott Long 	scctx->isc_nrxqsets = 1;
64524c7070dbSScott Long 	scctx->isc_ntxqsets = 1;
64534c7070dbSScott Long 	scctx->isc_vectors = vectors;
64544c7070dbSScott Long 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
64554c7070dbSScott Long 		device_printf(dev,"Using an MSI interrupt\n");
64564c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSI;
64574c7070dbSScott Long 	} else {
6458e4defe55SMarius Strobl 		scctx->isc_vectors = 1;
64594c7070dbSScott Long 		device_printf(dev,"Using a Legacy interrupt\n");
64604c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
64614c7070dbSScott Long 	}
64624c7070dbSScott Long 
64634c7070dbSScott Long 	return (vectors);
64644c7070dbSScott Long }
64654c7070dbSScott Long 
6466e4defe55SMarius Strobl static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
64674c7070dbSScott Long 
64684c7070dbSScott Long static int
64694c7070dbSScott Long mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
64704c7070dbSScott Long {
64714c7070dbSScott Long 	int rc;
64724c7070dbSScott Long 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
64734c7070dbSScott Long 	struct sbuf *sb;
6474e4defe55SMarius Strobl 	const char *ring_state = "UNKNOWN";
64754c7070dbSScott Long 
64764c7070dbSScott Long 	/* XXX needed ? */
64774c7070dbSScott Long 	rc = sysctl_wire_old_buffer(req, 0);
64784c7070dbSScott Long 	MPASS(rc == 0);
64794c7070dbSScott Long 	if (rc != 0)
64804c7070dbSScott Long 		return (rc);
64814c7070dbSScott Long 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
64824c7070dbSScott Long 	MPASS(sb != NULL);
64834c7070dbSScott Long 	if (sb == NULL)
64844c7070dbSScott Long 		return (ENOMEM);
64854c7070dbSScott Long 	if (state[3] <= 3)
64864c7070dbSScott Long 		ring_state = ring_states[state[3]];
64874c7070dbSScott Long 
64884c7070dbSScott Long 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
64894c7070dbSScott Long 		    state[0], state[1], state[2], ring_state);
64904c7070dbSScott Long 	rc = sbuf_finish(sb);
64914c7070dbSScott Long 	sbuf_delete(sb);
64924c7070dbSScott Long         return(rc);
64934c7070dbSScott Long }
64944c7070dbSScott Long 
649523ac9029SStephen Hurd enum iflib_ndesc_handler {
649623ac9029SStephen Hurd 	IFLIB_NTXD_HANDLER,
649723ac9029SStephen Hurd 	IFLIB_NRXD_HANDLER,
649823ac9029SStephen Hurd };
64994c7070dbSScott Long 
650023ac9029SStephen Hurd static int
650123ac9029SStephen Hurd mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
650223ac9029SStephen Hurd {
650323ac9029SStephen Hurd 	if_ctx_t ctx = (void *)arg1;
650423ac9029SStephen Hurd 	enum iflib_ndesc_handler type = arg2;
650523ac9029SStephen Hurd 	char buf[256] = {0};
650695246abbSSean Bruno 	qidx_t *ndesc;
650723ac9029SStephen Hurd 	char *p, *next;
650823ac9029SStephen Hurd 	int nqs, rc, i;
650923ac9029SStephen Hurd 
651023ac9029SStephen Hurd 	nqs = 8;
651123ac9029SStephen Hurd 	switch(type) {
651223ac9029SStephen Hurd 	case IFLIB_NTXD_HANDLER:
651323ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_ntxds;
651423ac9029SStephen Hurd 		if (ctx->ifc_sctx)
651523ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_ntxqs;
651623ac9029SStephen Hurd 		break;
651723ac9029SStephen Hurd 	case IFLIB_NRXD_HANDLER:
651823ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_nrxds;
651923ac9029SStephen Hurd 		if (ctx->ifc_sctx)
652023ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_nrxqs;
652123ac9029SStephen Hurd 		break;
65221ae4848cSMatt Macy 	default:
65233d10e9edSMarius Strobl 		printf("%s: unhandled type\n", __func__);
65243d10e9edSMarius Strobl 		return (EINVAL);
652523ac9029SStephen Hurd 	}
652623ac9029SStephen Hurd 	if (nqs == 0)
652723ac9029SStephen Hurd 		nqs = 8;
652823ac9029SStephen Hurd 
652923ac9029SStephen Hurd 	for (i=0; i<8; i++) {
653023ac9029SStephen Hurd 		if (i >= nqs)
653123ac9029SStephen Hurd 			break;
653223ac9029SStephen Hurd 		if (i)
653323ac9029SStephen Hurd 			strcat(buf, ",");
653423ac9029SStephen Hurd 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
653523ac9029SStephen Hurd 	}
653623ac9029SStephen Hurd 
653723ac9029SStephen Hurd 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
653823ac9029SStephen Hurd 	if (rc || req->newptr == NULL)
653923ac9029SStephen Hurd 		return rc;
654023ac9029SStephen Hurd 
654123ac9029SStephen Hurd 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
654223ac9029SStephen Hurd 	    i++, p = strsep(&next, " ,")) {
654323ac9029SStephen Hurd 		ndesc[i] = strtoul(p, NULL, 10);
654423ac9029SStephen Hurd 	}
654523ac9029SStephen Hurd 
654623ac9029SStephen Hurd 	return(rc);
654723ac9029SStephen Hurd }
65484c7070dbSScott Long 
65494c7070dbSScott Long #define NAME_BUFLEN 32
65504c7070dbSScott Long static void
65514c7070dbSScott Long iflib_add_device_sysctl_pre(if_ctx_t ctx)
65524c7070dbSScott Long {
65534c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
65544c7070dbSScott Long 	struct sysctl_oid_list *child, *oid_list;
65554c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
65564c7070dbSScott Long 	struct sysctl_oid *node;
65574c7070dbSScott Long 
65584c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
65594c7070dbSScott Long 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
65604c7070dbSScott Long 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
65617029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IFLIB fields");
65624c7070dbSScott Long 	oid_list = SYSCTL_CHILDREN(node);
65634c7070dbSScott Long 
656410a1e981SEric Joyner 	SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
656510a1e981SEric Joyner 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version,
656623ac9029SStephen Hurd 		       "driver version");
656723ac9029SStephen Hurd 
65684c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
65694c7070dbSScott Long 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
65704c7070dbSScott Long 			"# of txqs to use, 0 => use default #");
65714c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
657223ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
657323ac9029SStephen Hurd 			"# of rxqs to use, 0 => use default #");
657423ac9029SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
657523ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
657623ac9029SStephen Hurd                        "permit #txq != #rxq");
6577ea351d3fSSean Bruno 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6578ea351d3fSSean Bruno                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6579b97de13aSMarius Strobl                       "disable MSI-X (default 0)");
6580f4d2154eSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6581f4d2154eSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
65821722eeacSMarius Strobl 		       "set the RX budget");
6583fe51d4cdSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6584fe51d4cdSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
65851722eeacSMarius Strobl 		       "cause TX to abdicate instead of running to completion");
6586f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6587f154ece0SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6588f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6589f154ece0SStephen Hurd 		       "offset to start using cores at");
6590f154ece0SStephen Hurd 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6591f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6592f154ece0SStephen Hurd 		       "use separate cores for TX and RX");
65934c7070dbSScott Long 
659423ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
659523ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
65967029da5cSPawel Biernacki 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
65977029da5cSPawel Biernacki 	    IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
65981722eeacSMarius Strobl 	    "list of # of TX descriptors to use, 0 = use default #");
659923ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
66007029da5cSPawel Biernacki 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
66017029da5cSPawel Biernacki 	    IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
66021722eeacSMarius Strobl 	    "list of # of RX descriptors to use, 0 = use default #");
66034c7070dbSScott Long }
66044c7070dbSScott Long 
66054c7070dbSScott Long static void
66064c7070dbSScott Long iflib_add_device_sysctl_post(if_ctx_t ctx)
66074c7070dbSScott Long {
66084c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
66094c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
66104c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
66114c7070dbSScott Long 	struct sysctl_oid_list *child;
66124c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
66134c7070dbSScott Long 	iflib_fl_t fl;
66144c7070dbSScott Long 	iflib_txq_t txq;
66154c7070dbSScott Long 	iflib_rxq_t rxq;
66164c7070dbSScott Long 	int i, j;
66174c7070dbSScott Long 	char namebuf[NAME_BUFLEN];
66184c7070dbSScott Long 	char *qfmt;
66194c7070dbSScott Long 	struct sysctl_oid *queue_node, *fl_node, *node;
66204c7070dbSScott Long 	struct sysctl_oid_list *queue_list, *fl_list;
66214c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
66224c7070dbSScott Long 
66234c7070dbSScott Long 	node = ctx->ifc_sysctl_node;
66244c7070dbSScott Long 	child = SYSCTL_CHILDREN(node);
66254c7070dbSScott Long 
66264c7070dbSScott Long 	if (scctx->isc_ntxqsets > 100)
66274c7070dbSScott Long 		qfmt = "txq%03d";
66284c7070dbSScott Long 	else if (scctx->isc_ntxqsets > 10)
66294c7070dbSScott Long 		qfmt = "txq%02d";
66304c7070dbSScott Long 	else
66314c7070dbSScott Long 		qfmt = "txq%d";
66324c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
66334c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
66344c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
66357029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
66364c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
66374c7070dbSScott Long #if MEMORY_LOGGING
66384c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
66394c7070dbSScott Long 				CTLFLAG_RD,
66404c7070dbSScott Long 				&txq->ift_dequeued, "total mbufs freed");
66414c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
66424c7070dbSScott Long 				CTLFLAG_RD,
66434c7070dbSScott Long 				&txq->ift_enqueued, "total mbufs enqueued");
66444c7070dbSScott Long #endif
66454c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
66464c7070dbSScott Long 				   CTLFLAG_RD,
66474c7070dbSScott Long 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
66484c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
66494c7070dbSScott Long 				   CTLFLAG_RD,
66504c7070dbSScott Long 				   &txq->ift_pullups, "# of times m_pullup was called");
66514c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
66524c7070dbSScott Long 				   CTLFLAG_RD,
66534c7070dbSScott Long 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
66544c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
66554c7070dbSScott Long 				   CTLFLAG_RD,
665623ac9029SStephen Hurd 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
66574c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
66584c7070dbSScott Long 				   CTLFLAG_RD,
66591722eeacSMarius Strobl 				   &txq->ift_map_failed, "# of times DMA map failed");
66604c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
66614c7070dbSScott Long 				   CTLFLAG_RD,
66624c7070dbSScott Long 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
66634c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
66644c7070dbSScott Long 				   CTLFLAG_RD,
66654c7070dbSScott Long 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
66664c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
66674c7070dbSScott Long 				   CTLFLAG_RD,
66684c7070dbSScott Long 				   &txq->ift_pidx, 1, "Producer Index");
66694c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
66704c7070dbSScott Long 				   CTLFLAG_RD,
66714c7070dbSScott Long 				   &txq->ift_cidx, 1, "Consumer Index");
66724c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
66734c7070dbSScott Long 				   CTLFLAG_RD,
66744c7070dbSScott Long 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
66754c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
66764c7070dbSScott Long 				   CTLFLAG_RD,
66774c7070dbSScott Long 				   &txq->ift_in_use, 1, "descriptors in use");
66784c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
66794c7070dbSScott Long 				   CTLFLAG_RD,
66804c7070dbSScott Long 				   &txq->ift_processed, "descriptors procesed for clean");
66814c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
66824c7070dbSScott Long 				   CTLFLAG_RD,
66834c7070dbSScott Long 				   &txq->ift_cleaned, "total cleaned");
66844c7070dbSScott Long 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
66857029da5cSPawel Biernacki 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
66867029da5cSPawel Biernacki 		    __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
66877029da5cSPawel Biernacki 		    mp_ring_state_handler, "A", "soft ring state");
66884c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
668995246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->enqueues,
66904c7070dbSScott Long 				       "# of enqueues to the mp_ring for this queue");
66914c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
669295246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->drops,
66934c7070dbSScott Long 				       "# of drops in the mp_ring for this queue");
66944c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
669595246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->starts,
66964c7070dbSScott Long 				       "# of normal consumer starts in the mp_ring for this queue");
66974c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
669895246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->stalls,
66994c7070dbSScott Long 					       "# of consumer stalls in the mp_ring for this queue");
67004c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
670195246abbSSean Bruno 			       CTLFLAG_RD, &txq->ift_br->restarts,
67024c7070dbSScott Long 				       "# of consumer restarts in the mp_ring for this queue");
67034c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
670495246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->abdications,
67054c7070dbSScott Long 				       "# of consumer abdications in the mp_ring for this queue");
67064c7070dbSScott Long 	}
67074c7070dbSScott Long 
67084c7070dbSScott Long 	if (scctx->isc_nrxqsets > 100)
67094c7070dbSScott Long 		qfmt = "rxq%03d";
67104c7070dbSScott Long 	else if (scctx->isc_nrxqsets > 10)
67114c7070dbSScott Long 		qfmt = "rxq%02d";
67124c7070dbSScott Long 	else
67134c7070dbSScott Long 		qfmt = "rxq%d";
67144c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
67154c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
67164c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
67177029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
67184c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
671923ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
67204c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
67214c7070dbSScott Long 				       CTLFLAG_RD,
67224c7070dbSScott Long 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
67234c7070dbSScott Long 		}
6724da69b8f9SSean Bruno 
67254c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
67264c7070dbSScott Long 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
67274c7070dbSScott Long 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
67287029da5cSPawel Biernacki 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist Name");
67294c7070dbSScott Long 			fl_list = SYSCTL_CHILDREN(fl_node);
67304c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
67314c7070dbSScott Long 				       CTLFLAG_RD,
67324c7070dbSScott Long 				       &fl->ifl_pidx, 1, "Producer Index");
67334c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
67344c7070dbSScott Long 				       CTLFLAG_RD,
67354c7070dbSScott Long 				       &fl->ifl_cidx, 1, "Consumer Index");
67364c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
67374c7070dbSScott Long 				       CTLFLAG_RD,
67384c7070dbSScott Long 				       &fl->ifl_credits, 1, "credits available");
6739b3813609SPatrick Kelsey 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size",
6740b3813609SPatrick Kelsey 				       CTLFLAG_RD,
6741b3813609SPatrick Kelsey 				       &fl->ifl_buf_size, 1, "buffer size");
67424c7070dbSScott Long #if MEMORY_LOGGING
67434c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
67444c7070dbSScott Long 					CTLFLAG_RD,
67454c7070dbSScott Long 					&fl->ifl_m_enqueued, "mbufs allocated");
67464c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
67474c7070dbSScott Long 					CTLFLAG_RD,
67484c7070dbSScott Long 					&fl->ifl_m_dequeued, "mbufs freed");
67494c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
67504c7070dbSScott Long 					CTLFLAG_RD,
67514c7070dbSScott Long 					&fl->ifl_cl_enqueued, "clusters allocated");
67524c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
67534c7070dbSScott Long 					CTLFLAG_RD,
67544c7070dbSScott Long 					&fl->ifl_cl_dequeued, "clusters freed");
67554c7070dbSScott Long #endif
67564c7070dbSScott Long 
67574c7070dbSScott Long 		}
67584c7070dbSScott Long 	}
67594c7070dbSScott Long 
67604c7070dbSScott Long }
676195246abbSSean Bruno 
676277c1fcecSEric Joyner void
676377c1fcecSEric Joyner iflib_request_reset(if_ctx_t ctx)
676477c1fcecSEric Joyner {
676577c1fcecSEric Joyner 
676677c1fcecSEric Joyner 	STATE_LOCK(ctx);
676777c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_DO_RESET;
676877c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
676977c1fcecSEric Joyner }
677077c1fcecSEric Joyner 
677195246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
677295246abbSSean Bruno static struct mbuf *
677395246abbSSean Bruno iflib_fixup_rx(struct mbuf *m)
677495246abbSSean Bruno {
677595246abbSSean Bruno 	struct mbuf *n;
677695246abbSSean Bruno 
677795246abbSSean Bruno 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
677895246abbSSean Bruno 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
677995246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
678095246abbSSean Bruno 		n = m;
678195246abbSSean Bruno 	} else {
678295246abbSSean Bruno 		MGETHDR(n, M_NOWAIT, MT_DATA);
678395246abbSSean Bruno 		if (n == NULL) {
678495246abbSSean Bruno 			m_freem(m);
678595246abbSSean Bruno 			return (NULL);
678695246abbSSean Bruno 		}
678795246abbSSean Bruno 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
678895246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
678995246abbSSean Bruno 		m->m_len -= ETHER_HDR_LEN;
679095246abbSSean Bruno 		n->m_len = ETHER_HDR_LEN;
679195246abbSSean Bruno 		M_MOVE_PKTHDR(n, m);
679295246abbSSean Bruno 		n->m_next = m;
679395246abbSSean Bruno 	}
679495246abbSSean Bruno 	return (n);
679595246abbSSean Bruno }
679695246abbSSean Bruno #endif
679794618825SMark Johnston 
67987790c8c1SConrad Meyer #ifdef DEBUGNET
679994618825SMark Johnston static void
68007790c8c1SConrad Meyer iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
680194618825SMark Johnston {
680294618825SMark Johnston 	if_ctx_t ctx;
680394618825SMark Johnston 
680494618825SMark Johnston 	ctx = if_getsoftc(ifp);
680594618825SMark Johnston 	CTX_LOCK(ctx);
680694618825SMark Johnston 	*nrxr = NRXQSETS(ctx);
680794618825SMark Johnston 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
680894618825SMark Johnston 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
680994618825SMark Johnston 	CTX_UNLOCK(ctx);
681094618825SMark Johnston }
681194618825SMark Johnston 
681294618825SMark Johnston static void
68137790c8c1SConrad Meyer iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
681494618825SMark Johnston {
681594618825SMark Johnston 	if_ctx_t ctx;
681694618825SMark Johnston 	if_softc_ctx_t scctx;
681794618825SMark Johnston 	iflib_fl_t fl;
681894618825SMark Johnston 	iflib_rxq_t rxq;
681994618825SMark Johnston 	int i, j;
682094618825SMark Johnston 
682194618825SMark Johnston 	ctx = if_getsoftc(ifp);
682294618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
682394618825SMark Johnston 
682494618825SMark Johnston 	switch (event) {
68257790c8c1SConrad Meyer 	case DEBUGNET_START:
682694618825SMark Johnston 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
682794618825SMark Johnston 			rxq = &ctx->ifc_rxqs[i];
682894618825SMark Johnston 			for (j = 0; j < rxq->ifr_nfl; j++) {
682994618825SMark Johnston 				fl = rxq->ifr_fl;
683094618825SMark Johnston 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
683194618825SMark Johnston 			}
683294618825SMark Johnston 		}
683394618825SMark Johnston 		iflib_no_tx_batch = 1;
683494618825SMark Johnston 		break;
683594618825SMark Johnston 	default:
683694618825SMark Johnston 		break;
683794618825SMark Johnston 	}
683894618825SMark Johnston }
683994618825SMark Johnston 
684094618825SMark Johnston static int
68417790c8c1SConrad Meyer iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
684294618825SMark Johnston {
684394618825SMark Johnston 	if_ctx_t ctx;
684494618825SMark Johnston 	iflib_txq_t txq;
684594618825SMark Johnston 	int error;
684694618825SMark Johnston 
684794618825SMark Johnston 	ctx = if_getsoftc(ifp);
684894618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
684994618825SMark Johnston 	    IFF_DRV_RUNNING)
685094618825SMark Johnston 		return (EBUSY);
685194618825SMark Johnston 
685294618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
685394618825SMark Johnston 	error = iflib_encap(txq, &m);
685494618825SMark Johnston 	if (error == 0)
685594618825SMark Johnston 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
685694618825SMark Johnston 	return (error);
685794618825SMark Johnston }
685894618825SMark Johnston 
685994618825SMark Johnston static int
68607790c8c1SConrad Meyer iflib_debugnet_poll(if_t ifp, int count)
686194618825SMark Johnston {
68620b8df657SGleb Smirnoff 	struct epoch_tracker et;
686394618825SMark Johnston 	if_ctx_t ctx;
686494618825SMark Johnston 	if_softc_ctx_t scctx;
686594618825SMark Johnston 	iflib_txq_t txq;
686694618825SMark Johnston 	int i;
686794618825SMark Johnston 
686894618825SMark Johnston 	ctx = if_getsoftc(ifp);
686994618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
687094618825SMark Johnston 
687194618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
687294618825SMark Johnston 	    IFF_DRV_RUNNING)
687394618825SMark Johnston 		return (EBUSY);
687494618825SMark Johnston 
687594618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
687694618825SMark Johnston 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
687794618825SMark Johnston 
68780b8df657SGleb Smirnoff 	NET_EPOCH_ENTER(et);
687994618825SMark Johnston 	for (i = 0; i < scctx->isc_nrxqsets; i++)
688094618825SMark Johnston 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
68810b8df657SGleb Smirnoff 	NET_EPOCH_EXIT(et);
688294618825SMark Johnston 	return (0);
688394618825SMark Johnston }
68847790c8c1SConrad Meyer #endif /* DEBUGNET */
6885