xref: /freebsd/sys/net/iflib.c (revision 8b8d90931d8089fc8329f9c67883b7ccd9266513)
14c7070dbSScott Long /*-
27b610b60SSean Bruno  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
34c7070dbSScott Long  * All rights reserved.
44c7070dbSScott Long  *
54c7070dbSScott Long  * Redistribution and use in source and binary forms, with or without
64c7070dbSScott Long  * modification, are permitted provided that the following conditions are met:
74c7070dbSScott Long  *
84c7070dbSScott Long  *  1. Redistributions of source code must retain the above copyright notice,
94c7070dbSScott Long  *     this list of conditions and the following disclaimer.
104c7070dbSScott Long  *
114c7070dbSScott Long  *  2. Neither the name of Matthew Macy nor the names of its
124c7070dbSScott Long  *     contributors may be used to endorse or promote products derived from
134c7070dbSScott Long  *     this software without specific prior written permission.
144c7070dbSScott Long  *
154c7070dbSScott Long  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
164c7070dbSScott Long  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
174c7070dbSScott Long  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
184c7070dbSScott Long  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
194c7070dbSScott Long  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204c7070dbSScott Long  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
214c7070dbSScott Long  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
224c7070dbSScott Long  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
234c7070dbSScott Long  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
244c7070dbSScott Long  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
254c7070dbSScott Long  * POSSIBILITY OF SUCH DAMAGE.
264c7070dbSScott Long  */
274c7070dbSScott Long 
284c7070dbSScott Long #include <sys/cdefs.h>
294c7070dbSScott Long __FBSDID("$FreeBSD$");
304c7070dbSScott Long 
31aaeb188aSBjoern A. Zeeb #include "opt_inet.h"
32aaeb188aSBjoern A. Zeeb #include "opt_inet6.h"
33aaeb188aSBjoern A. Zeeb #include "opt_acpi.h"
34b103855eSStephen Hurd #include "opt_sched.h"
35aaeb188aSBjoern A. Zeeb 
364c7070dbSScott Long #include <sys/param.h>
374c7070dbSScott Long #include <sys/types.h>
384c7070dbSScott Long #include <sys/bus.h>
394c7070dbSScott Long #include <sys/eventhandler.h>
4009f6ff4fSMatt Macy #include <sys/jail.h>
414c7070dbSScott Long #include <sys/kernel.h>
424c7070dbSScott Long #include <sys/lock.h>
4309f6ff4fSMatt Macy #include <sys/md5.h>
444c7070dbSScott Long #include <sys/mutex.h>
454c7070dbSScott Long #include <sys/module.h>
464c7070dbSScott Long #include <sys/kobj.h>
474c7070dbSScott Long #include <sys/rman.h>
4809f6ff4fSMatt Macy #include <sys/proc.h>
494c7070dbSScott Long #include <sys/sbuf.h>
504c7070dbSScott Long #include <sys/smp.h>
514c7070dbSScott Long #include <sys/socket.h>
5209f6ff4fSMatt Macy #include <sys/sockio.h>
534c7070dbSScott Long #include <sys/sysctl.h>
544c7070dbSScott Long #include <sys/syslog.h>
554c7070dbSScott Long #include <sys/taskqueue.h>
5623ac9029SStephen Hurd #include <sys/limits.h>
574c7070dbSScott Long 
584c7070dbSScott Long #include <net/if.h>
594c7070dbSScott Long #include <net/if_var.h>
604c7070dbSScott Long #include <net/if_types.h>
614c7070dbSScott Long #include <net/if_media.h>
624c7070dbSScott Long #include <net/bpf.h>
634c7070dbSScott Long #include <net/ethernet.h>
644c7070dbSScott Long #include <net/mp_ring.h>
6535e4e998SStephen Hurd #include <net/vnet.h>
664c7070dbSScott Long 
674c7070dbSScott Long #include <netinet/in.h>
684c7070dbSScott Long #include <netinet/in_pcb.h>
694c7070dbSScott Long #include <netinet/tcp_lro.h>
704c7070dbSScott Long #include <netinet/in_systm.h>
714c7070dbSScott Long #include <netinet/if_ether.h>
724c7070dbSScott Long #include <netinet/ip.h>
734c7070dbSScott Long #include <netinet/ip6.h>
744c7070dbSScott Long #include <netinet/tcp.h>
7535e4e998SStephen Hurd #include <netinet/ip_var.h>
7694618825SMark Johnston #include <netinet/netdump/netdump.h>
7735e4e998SStephen Hurd #include <netinet6/ip6_var.h>
784c7070dbSScott Long 
794c7070dbSScott Long #include <machine/bus.h>
804c7070dbSScott Long #include <machine/in_cksum.h>
814c7070dbSScott Long 
824c7070dbSScott Long #include <vm/vm.h>
834c7070dbSScott Long #include <vm/pmap.h>
844c7070dbSScott Long 
854c7070dbSScott Long #include <dev/led/led.h>
864c7070dbSScott Long #include <dev/pci/pcireg.h>
874c7070dbSScott Long #include <dev/pci/pcivar.h>
884c7070dbSScott Long #include <dev/pci/pci_private.h>
894c7070dbSScott Long 
904c7070dbSScott Long #include <net/iflib.h>
9109f6ff4fSMatt Macy #include <net/iflib_private.h>
924c7070dbSScott Long 
934c7070dbSScott Long #include "ifdi_if.h"
944c7070dbSScott Long 
954c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
964c7070dbSScott Long #include <sys/memdesc.h>
974c7070dbSScott Long #include <machine/bus.h>
984c7070dbSScott Long #include <machine/md_var.h>
994c7070dbSScott Long #include <machine/specialreg.h>
1004c7070dbSScott Long #include <x86/include/busdma_impl.h>
1014c7070dbSScott Long #include <x86/iommu/busdma_dmar.h>
1024c7070dbSScott Long #endif
1034c7070dbSScott Long 
10487890dbaSSean Bruno #include <sys/bitstring.h>
1054c7070dbSScott Long /*
10695246abbSSean Bruno  * enable accounting of every mbuf as it comes in to and goes out of
10795246abbSSean Bruno  * iflib's software descriptor references
1084c7070dbSScott Long  */
1094c7070dbSScott Long #define MEMORY_LOGGING 0
1104c7070dbSScott Long /*
1114c7070dbSScott Long  * Enable mbuf vectors for compressing long mbuf chains
1124c7070dbSScott Long  */
1134c7070dbSScott Long 
1144c7070dbSScott Long /*
1154c7070dbSScott Long  * NB:
1164c7070dbSScott Long  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
1174c7070dbSScott Long  *   we prefetch needs to be determined by the time spent in m_free vis a vis
1184c7070dbSScott Long  *   the cost of a prefetch. This will of course vary based on the workload:
1194c7070dbSScott Long  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
1204c7070dbSScott Long  *        is quite expensive, thus suggesting very little prefetch.
1214c7070dbSScott Long  *      - small packet forwarding which is just returning a single mbuf to
1224c7070dbSScott Long  *        UMA will typically be very fast vis a vis the cost of a memory
1234c7070dbSScott Long  *        access.
1244c7070dbSScott Long  */
1254c7070dbSScott Long 
1264c7070dbSScott Long 
1274c7070dbSScott Long /*
1284c7070dbSScott Long  * File organization:
1294c7070dbSScott Long  *  - private structures
1304c7070dbSScott Long  *  - iflib private utility functions
1314c7070dbSScott Long  *  - ifnet functions
1324c7070dbSScott Long  *  - vlan registry and other exported functions
1334c7070dbSScott Long  *  - iflib public core functions
1344c7070dbSScott Long  *
1354c7070dbSScott Long  *
1364c7070dbSScott Long  */
13709f6ff4fSMatt Macy MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
1384c7070dbSScott Long 
1394c7070dbSScott Long struct iflib_txq;
1404c7070dbSScott Long typedef struct iflib_txq *iflib_txq_t;
1414c7070dbSScott Long struct iflib_rxq;
1424c7070dbSScott Long typedef struct iflib_rxq *iflib_rxq_t;
1434c7070dbSScott Long struct iflib_fl;
1444c7070dbSScott Long typedef struct iflib_fl *iflib_fl_t;
1454c7070dbSScott Long 
1464ecb427aSSean Bruno struct iflib_ctx;
1474ecb427aSSean Bruno 
1482d873474SStephen Hurd static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
149dd7fbcf1SStephen Hurd static void iflib_timer(void *arg);
1502d873474SStephen Hurd 
1514c7070dbSScott Long typedef struct iflib_filter_info {
1524c7070dbSScott Long 	driver_filter_t *ifi_filter;
1534c7070dbSScott Long 	void *ifi_filter_arg;
1544c7070dbSScott Long 	struct grouptask *ifi_task;
15595246abbSSean Bruno 	void *ifi_ctx;
1564c7070dbSScott Long } *iflib_filter_info_t;
1574c7070dbSScott Long 
1584c7070dbSScott Long struct iflib_ctx {
1594c7070dbSScott Long 	KOBJ_FIELDS;
1604c7070dbSScott Long    /*
1614c7070dbSScott Long    * Pointer to hardware driver's softc
1624c7070dbSScott Long    */
1634c7070dbSScott Long 	void *ifc_softc;
1644c7070dbSScott Long 	device_t ifc_dev;
1654c7070dbSScott Long 	if_t ifc_ifp;
1664c7070dbSScott Long 
1674c7070dbSScott Long 	cpuset_t ifc_cpus;
1684c7070dbSScott Long 	if_shared_ctx_t ifc_sctx;
1694c7070dbSScott Long 	struct if_softc_ctx ifc_softc_ctx;
1704c7070dbSScott Long 
171aa8a24d3SStephen Hurd 	struct sx ifc_ctx_sx;
1727b610b60SSean Bruno 	struct mtx ifc_state_mtx;
1734c7070dbSScott Long 
1744c7070dbSScott Long 	uint16_t ifc_nhwtxqs;
1754c7070dbSScott Long 
1764c7070dbSScott Long 	iflib_txq_t ifc_txqs;
1774c7070dbSScott Long 	iflib_rxq_t ifc_rxqs;
1784c7070dbSScott Long 	uint32_t ifc_if_flags;
1794c7070dbSScott Long 	uint32_t ifc_flags;
1804c7070dbSScott Long 	uint32_t ifc_max_fl_buf_size;
1814c7070dbSScott Long 	int ifc_in_detach;
1824c7070dbSScott Long 
1834c7070dbSScott Long 	int ifc_link_state;
1844c7070dbSScott Long 	int ifc_link_irq;
1854c7070dbSScott Long 	int ifc_watchdog_events;
1864c7070dbSScott Long 	struct cdev *ifc_led_dev;
1874c7070dbSScott Long 	struct resource *ifc_msix_mem;
1884c7070dbSScott Long 
1894c7070dbSScott Long 	struct if_irq ifc_legacy_irq;
1904c7070dbSScott Long 	struct grouptask ifc_admin_task;
1914c7070dbSScott Long 	struct grouptask ifc_vflr_task;
1924c7070dbSScott Long 	struct iflib_filter_info ifc_filter_info;
1934c7070dbSScott Long 	struct ifmedia	ifc_media;
1944c7070dbSScott Long 
1954c7070dbSScott Long 	struct sysctl_oid *ifc_sysctl_node;
1964c7070dbSScott Long 	uint16_t ifc_sysctl_ntxqs;
1974c7070dbSScott Long 	uint16_t ifc_sysctl_nrxqs;
19823ac9029SStephen Hurd 	uint16_t ifc_sysctl_qs_eq_override;
199f4d2154eSStephen Hurd 	uint16_t ifc_sysctl_rx_budget;
200fe51d4cdSStephen Hurd 	uint16_t ifc_sysctl_tx_abdicate;
20123ac9029SStephen Hurd 
20295246abbSSean Bruno 	qidx_t ifc_sysctl_ntxds[8];
20395246abbSSean Bruno 	qidx_t ifc_sysctl_nrxds[8];
2044c7070dbSScott Long 	struct if_txrx ifc_txrx;
2054c7070dbSScott Long #define isc_txd_encap  ifc_txrx.ift_txd_encap
2064c7070dbSScott Long #define isc_txd_flush  ifc_txrx.ift_txd_flush
2074c7070dbSScott Long #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
2084c7070dbSScott Long #define isc_rxd_available ifc_txrx.ift_rxd_available
2094c7070dbSScott Long #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
2104c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2114c7070dbSScott Long #define isc_rxd_flush ifc_txrx.ift_rxd_flush
2124c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2134c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2144c7070dbSScott Long #define isc_legacy_intr ifc_txrx.ift_legacy_intr
2154c7070dbSScott Long 	eventhandler_tag ifc_vlan_attach_event;
2164c7070dbSScott Long 	eventhandler_tag ifc_vlan_detach_event;
2174c7070dbSScott Long 	uint8_t ifc_mac[ETHER_ADDR_LEN];
2184c7070dbSScott Long 	char ifc_mtx_name[16];
2194c7070dbSScott Long };
2204c7070dbSScott Long 
2214c7070dbSScott Long 
2224c7070dbSScott Long void *
2234c7070dbSScott Long iflib_get_softc(if_ctx_t ctx)
2244c7070dbSScott Long {
2254c7070dbSScott Long 
2264c7070dbSScott Long 	return (ctx->ifc_softc);
2274c7070dbSScott Long }
2284c7070dbSScott Long 
2294c7070dbSScott Long device_t
2304c7070dbSScott Long iflib_get_dev(if_ctx_t ctx)
2314c7070dbSScott Long {
2324c7070dbSScott Long 
2334c7070dbSScott Long 	return (ctx->ifc_dev);
2344c7070dbSScott Long }
2354c7070dbSScott Long 
2364c7070dbSScott Long if_t
2374c7070dbSScott Long iflib_get_ifp(if_ctx_t ctx)
2384c7070dbSScott Long {
2394c7070dbSScott Long 
2404c7070dbSScott Long 	return (ctx->ifc_ifp);
2414c7070dbSScott Long }
2424c7070dbSScott Long 
2434c7070dbSScott Long struct ifmedia *
2444c7070dbSScott Long iflib_get_media(if_ctx_t ctx)
2454c7070dbSScott Long {
2464c7070dbSScott Long 
2474c7070dbSScott Long 	return (&ctx->ifc_media);
2484c7070dbSScott Long }
2494c7070dbSScott Long 
25009f6ff4fSMatt Macy uint32_t
25109f6ff4fSMatt Macy iflib_get_flags(if_ctx_t ctx)
25209f6ff4fSMatt Macy {
25309f6ff4fSMatt Macy 	return (ctx->ifc_flags);
25409f6ff4fSMatt Macy }
25509f6ff4fSMatt Macy 
25609f6ff4fSMatt Macy void
25709f6ff4fSMatt Macy iflib_set_detach(if_ctx_t ctx)
25809f6ff4fSMatt Macy {
25909f6ff4fSMatt Macy 	ctx->ifc_in_detach = 1;
26009f6ff4fSMatt Macy }
26109f6ff4fSMatt Macy 
2624c7070dbSScott Long void
2634c7070dbSScott Long iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
2644c7070dbSScott Long {
2654c7070dbSScott Long 
2664c7070dbSScott Long 	bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
2674c7070dbSScott Long }
2684c7070dbSScott Long 
2694c7070dbSScott Long if_softc_ctx_t
2704c7070dbSScott Long iflib_get_softc_ctx(if_ctx_t ctx)
2714c7070dbSScott Long {
2724c7070dbSScott Long 
2734c7070dbSScott Long 	return (&ctx->ifc_softc_ctx);
2744c7070dbSScott Long }
2754c7070dbSScott Long 
2764c7070dbSScott Long if_shared_ctx_t
2774c7070dbSScott Long iflib_get_sctx(if_ctx_t ctx)
2784c7070dbSScott Long {
2794c7070dbSScott Long 
2804c7070dbSScott Long 	return (ctx->ifc_sctx);
2814c7070dbSScott Long }
2824c7070dbSScott Long 
28395246abbSSean Bruno #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
2844c7070dbSScott Long #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
2855e888388SSean Bruno #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
2864c7070dbSScott Long 
2874c7070dbSScott Long #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
2884c7070dbSScott Long #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
2894c7070dbSScott Long 
2904c7070dbSScott Long #define RX_SW_DESC_MAP_CREATED	(1 << 0)
291ab2e3f79SStephen Hurd #define TX_SW_DESC_MAP_CREATED	(1 << 1)
292ab2e3f79SStephen Hurd #define RX_SW_DESC_INUSE        (1 << 3)
293ab2e3f79SStephen Hurd #define TX_SW_DESC_MAPPED       (1 << 4)
2944c7070dbSScott Long 
2952cc3b2eeSGleb Smirnoff #define	M_TOOBIG		M_PROTO1
2965c5ca36cSSean Bruno 
297e035717eSSean Bruno typedef struct iflib_sw_rx_desc_array {
298e035717eSSean Bruno 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
299e035717eSSean Bruno 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
300e035717eSSean Bruno 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
301e035717eSSean Bruno 	uint8_t		*ifsd_flags;
302e035717eSSean Bruno } iflib_rxsd_array_t;
3034c7070dbSScott Long 
3044c7070dbSScott Long typedef struct iflib_sw_tx_desc_array {
3054c7070dbSScott Long 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
3064c7070dbSScott Long 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
3074c7070dbSScott Long 	uint8_t		*ifsd_flags;
30895246abbSSean Bruno } if_txsd_vec_t;
3094c7070dbSScott Long 
3104c7070dbSScott Long 
3114c7070dbSScott Long /* magic number that should be high enough for any hardware */
3124c7070dbSScott Long #define IFLIB_MAX_TX_SEGS		128
31309b57b7fSStephen Hurd /* bnxt supports 64 with hardware LRO enabled */
31409b57b7fSStephen Hurd #define IFLIB_MAX_RX_SEGS		64
31595246abbSSean Bruno #define IFLIB_RX_COPY_THRESH		128
3164c7070dbSScott Long #define IFLIB_MAX_RX_REFRESH		32
31795246abbSSean Bruno /* The minimum descriptors per second before we start coalescing */
31895246abbSSean Bruno #define IFLIB_MIN_DESC_SEC		16384
31995246abbSSean Bruno #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
3204c7070dbSScott Long #define IFLIB_QUEUE_IDLE		0
3214c7070dbSScott Long #define IFLIB_QUEUE_HUNG		1
3224c7070dbSScott Long #define IFLIB_QUEUE_WORKING		2
32395246abbSSean Bruno /* maximum number of txqs that can share an rx interrupt */
32495246abbSSean Bruno #define IFLIB_MAX_TX_SHARED_INTR	4
3254c7070dbSScott Long 
32695246abbSSean Bruno /* this should really scale with ring size - this is a fairly arbitrary value */
32795246abbSSean Bruno #define TX_BATCH_SIZE			32
3284c7070dbSScott Long 
3294c7070dbSScott Long #define IFLIB_RESTART_BUDGET		8
3304c7070dbSScott Long 
3314c7070dbSScott Long 
3324c7070dbSScott Long #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
3334c7070dbSScott Long 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
3344c7070dbSScott Long 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
3354c7070dbSScott Long struct iflib_txq {
33695246abbSSean Bruno 	qidx_t		ift_in_use;
33795246abbSSean Bruno 	qidx_t		ift_cidx;
33895246abbSSean Bruno 	qidx_t		ift_cidx_processed;
33995246abbSSean Bruno 	qidx_t		ift_pidx;
3404c7070dbSScott Long 	uint8_t		ift_gen;
34123ac9029SStephen Hurd 	uint8_t		ift_br_offset;
34295246abbSSean Bruno 	uint16_t	ift_npending;
34395246abbSSean Bruno 	uint16_t	ift_db_pending;
34495246abbSSean Bruno 	uint16_t	ift_rs_pending;
3454c7070dbSScott Long 	/* implicit pad */
34695246abbSSean Bruno 	uint8_t		ift_txd_size[8];
3474c7070dbSScott Long 	uint64_t	ift_processed;
3484c7070dbSScott Long 	uint64_t	ift_cleaned;
34995246abbSSean Bruno 	uint64_t	ift_cleaned_prev;
3504c7070dbSScott Long #if MEMORY_LOGGING
3514c7070dbSScott Long 	uint64_t	ift_enqueued;
3524c7070dbSScott Long 	uint64_t	ift_dequeued;
3534c7070dbSScott Long #endif
3544c7070dbSScott Long 	uint64_t	ift_no_tx_dma_setup;
3554c7070dbSScott Long 	uint64_t	ift_no_desc_avail;
3564c7070dbSScott Long 	uint64_t	ift_mbuf_defrag_failed;
3574c7070dbSScott Long 	uint64_t	ift_mbuf_defrag;
3584c7070dbSScott Long 	uint64_t	ift_map_failed;
3594c7070dbSScott Long 	uint64_t	ift_txd_encap_efbig;
3604c7070dbSScott Long 	uint64_t	ift_pullups;
361dd7fbcf1SStephen Hurd 	uint64_t	ift_last_timer_tick;
3624c7070dbSScott Long 
3634c7070dbSScott Long 	struct mtx	ift_mtx;
3644c7070dbSScott Long 	struct mtx	ift_db_mtx;
3654c7070dbSScott Long 
3664c7070dbSScott Long 	/* constant values */
3674c7070dbSScott Long 	if_ctx_t	ift_ctx;
36895246abbSSean Bruno 	struct ifmp_ring        *ift_br;
3694c7070dbSScott Long 	struct grouptask	ift_task;
37095246abbSSean Bruno 	qidx_t		ift_size;
3714c7070dbSScott Long 	uint16_t	ift_id;
3724c7070dbSScott Long 	struct callout	ift_timer;
3734c7070dbSScott Long 
37495246abbSSean Bruno 	if_txsd_vec_t	ift_sds;
3754c7070dbSScott Long 	uint8_t		ift_qstatus;
3764c7070dbSScott Long 	uint8_t		ift_closed;
37795246abbSSean Bruno 	uint8_t		ift_update_freq;
3784c7070dbSScott Long 	struct iflib_filter_info ift_filter_info;
3794c7070dbSScott Long 	bus_dma_tag_t		ift_desc_tag;
3804c7070dbSScott Long 	bus_dma_tag_t		ift_tso_desc_tag;
3814c7070dbSScott Long 	iflib_dma_info_t	ift_ifdi;
3824c7070dbSScott Long #define MTX_NAME_LEN 16
3834c7070dbSScott Long 	char                    ift_mtx_name[MTX_NAME_LEN];
3844c7070dbSScott Long 	char                    ift_db_mtx_name[MTX_NAME_LEN];
3854c7070dbSScott Long 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
3861248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
3871248952aSSean Bruno 	uint64_t ift_cpu_exec_count[256];
3881248952aSSean Bruno #endif
3894c7070dbSScott Long } __aligned(CACHE_LINE_SIZE);
3904c7070dbSScott Long 
3914c7070dbSScott Long struct iflib_fl {
39295246abbSSean Bruno 	qidx_t		ifl_cidx;
39395246abbSSean Bruno 	qidx_t		ifl_pidx;
39495246abbSSean Bruno 	qidx_t		ifl_credits;
3954c7070dbSScott Long 	uint8_t		ifl_gen;
39695246abbSSean Bruno 	uint8_t		ifl_rxd_size;
3974c7070dbSScott Long #if MEMORY_LOGGING
3984c7070dbSScott Long 	uint64_t	ifl_m_enqueued;
3994c7070dbSScott Long 	uint64_t	ifl_m_dequeued;
4004c7070dbSScott Long 	uint64_t	ifl_cl_enqueued;
4014c7070dbSScott Long 	uint64_t	ifl_cl_dequeued;
4024c7070dbSScott Long #endif
4034c7070dbSScott Long 	/* implicit pad */
4044c7070dbSScott Long 
40587890dbaSSean Bruno 	bitstr_t 	*ifl_rx_bitmap;
40687890dbaSSean Bruno 	qidx_t		ifl_fragidx;
4074c7070dbSScott Long 	/* constant */
40895246abbSSean Bruno 	qidx_t		ifl_size;
4094c7070dbSScott Long 	uint16_t	ifl_buf_size;
4104c7070dbSScott Long 	uint16_t	ifl_cltype;
4114c7070dbSScott Long 	uma_zone_t	ifl_zone;
412e035717eSSean Bruno 	iflib_rxsd_array_t	ifl_sds;
4134c7070dbSScott Long 	iflib_rxq_t	ifl_rxq;
4144c7070dbSScott Long 	uint8_t		ifl_id;
4154c7070dbSScott Long 	bus_dma_tag_t           ifl_desc_tag;
4164c7070dbSScott Long 	iflib_dma_info_t	ifl_ifdi;
4174c7070dbSScott Long 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
4184c7070dbSScott Long 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
41995246abbSSean Bruno 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
4204c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4214c7070dbSScott Long 
42295246abbSSean Bruno static inline qidx_t
42395246abbSSean Bruno get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
4244c7070dbSScott Long {
42595246abbSSean Bruno 	qidx_t used;
4264c7070dbSScott Long 
4274c7070dbSScott Long 	if (pidx > cidx)
4284c7070dbSScott Long 		used = pidx - cidx;
4294c7070dbSScott Long 	else if (pidx < cidx)
4304c7070dbSScott Long 		used = size - cidx + pidx;
4314c7070dbSScott Long 	else if (gen == 0 && pidx == cidx)
4324c7070dbSScott Long 		used = 0;
4334c7070dbSScott Long 	else if (gen == 1 && pidx == cidx)
4344c7070dbSScott Long 		used = size;
4354c7070dbSScott Long 	else
4364c7070dbSScott Long 		panic("bad state");
4374c7070dbSScott Long 
4384c7070dbSScott Long 	return (used);
4394c7070dbSScott Long }
4404c7070dbSScott Long 
4414c7070dbSScott Long #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
4424c7070dbSScott Long 
4434c7070dbSScott Long #define IDXDIFF(head, tail, wrap) \
4444c7070dbSScott Long 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
4454c7070dbSScott Long 
4464c7070dbSScott Long struct iflib_rxq {
4474c7070dbSScott Long 	/* If there is a separate completion queue -
4484c7070dbSScott Long 	 * these are the cq cidx and pidx. Otherwise
4494c7070dbSScott Long 	 * these are unused.
4504c7070dbSScott Long 	 */
45195246abbSSean Bruno 	qidx_t		ifr_size;
45295246abbSSean Bruno 	qidx_t		ifr_cq_cidx;
45395246abbSSean Bruno 	qidx_t		ifr_cq_pidx;
4544c7070dbSScott Long 	uint8_t		ifr_cq_gen;
45523ac9029SStephen Hurd 	uint8_t		ifr_fl_offset;
4564c7070dbSScott Long 
4574c7070dbSScott Long 	if_ctx_t	ifr_ctx;
4584c7070dbSScott Long 	iflib_fl_t	ifr_fl;
4594c7070dbSScott Long 	uint64_t	ifr_rx_irq;
4604c7070dbSScott Long 	uint16_t	ifr_id;
4614c7070dbSScott Long 	uint8_t		ifr_lro_enabled;
4624c7070dbSScott Long 	uint8_t		ifr_nfl;
46395246abbSSean Bruno 	uint8_t		ifr_ntxqirq;
46495246abbSSean Bruno 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
4654c7070dbSScott Long 	struct lro_ctrl			ifr_lc;
4664c7070dbSScott Long 	struct grouptask        ifr_task;
4674c7070dbSScott Long 	struct iflib_filter_info ifr_filter_info;
4684c7070dbSScott Long 	iflib_dma_info_t		ifr_ifdi;
469ab2e3f79SStephen Hurd 
4704c7070dbSScott Long 	/* dynamically allocate if any drivers need a value substantially larger than this */
4714c7070dbSScott Long 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
4721248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
4731248952aSSean Bruno 	uint64_t ifr_cpu_exec_count[256];
4741248952aSSean Bruno #endif
4754c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4764c7070dbSScott Long 
47795246abbSSean Bruno typedef struct if_rxsd {
47895246abbSSean Bruno 	caddr_t *ifsd_cl;
47995246abbSSean Bruno 	struct mbuf **ifsd_m;
48095246abbSSean Bruno 	iflib_fl_t ifsd_fl;
48195246abbSSean Bruno 	qidx_t ifsd_cidx;
48295246abbSSean Bruno } *if_rxsd_t;
48395246abbSSean Bruno 
48495246abbSSean Bruno /* multiple of word size */
48595246abbSSean Bruno #ifdef __LP64__
486ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	6
48795246abbSSean Bruno #define RXD_INFO_SIZE	5
48895246abbSSean Bruno #define PKT_TYPE uint64_t
48995246abbSSean Bruno #else
490ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	11
49195246abbSSean Bruno #define RXD_INFO_SIZE	8
49295246abbSSean Bruno #define PKT_TYPE uint32_t
49395246abbSSean Bruno #endif
49495246abbSSean Bruno #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
49595246abbSSean Bruno #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
49695246abbSSean Bruno 
49795246abbSSean Bruno typedef struct if_pkt_info_pad {
49895246abbSSean Bruno 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
49995246abbSSean Bruno } *if_pkt_info_pad_t;
50095246abbSSean Bruno typedef struct if_rxd_info_pad {
50195246abbSSean Bruno 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
50295246abbSSean Bruno } *if_rxd_info_pad_t;
50395246abbSSean Bruno 
50495246abbSSean Bruno CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
50595246abbSSean Bruno CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
50695246abbSSean Bruno 
50795246abbSSean Bruno 
50895246abbSSean Bruno static inline void
50995246abbSSean Bruno pkt_info_zero(if_pkt_info_t pi)
51095246abbSSean Bruno {
51195246abbSSean Bruno 	if_pkt_info_pad_t pi_pad;
51295246abbSSean Bruno 
51395246abbSSean Bruno 	pi_pad = (if_pkt_info_pad_t)pi;
51495246abbSSean Bruno 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
51595246abbSSean Bruno 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
51695246abbSSean Bruno #ifndef __LP64__
517ab2e3f79SStephen Hurd 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
518ab2e3f79SStephen Hurd 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
51995246abbSSean Bruno #endif
52095246abbSSean Bruno }
52195246abbSSean Bruno 
52209f6ff4fSMatt Macy static device_method_t iflib_pseudo_methods[] = {
52309f6ff4fSMatt Macy 	DEVMETHOD(device_attach, noop_attach),
52409f6ff4fSMatt Macy 	DEVMETHOD(device_detach, iflib_pseudo_detach),
52509f6ff4fSMatt Macy 	DEVMETHOD_END
52609f6ff4fSMatt Macy };
52709f6ff4fSMatt Macy 
52809f6ff4fSMatt Macy driver_t iflib_pseudodriver = {
52909f6ff4fSMatt Macy 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
53009f6ff4fSMatt Macy };
53109f6ff4fSMatt Macy 
53295246abbSSean Bruno static inline void
53395246abbSSean Bruno rxd_info_zero(if_rxd_info_t ri)
53495246abbSSean Bruno {
53595246abbSSean Bruno 	if_rxd_info_pad_t ri_pad;
53695246abbSSean Bruno 	int i;
53795246abbSSean Bruno 
53895246abbSSean Bruno 	ri_pad = (if_rxd_info_pad_t)ri;
53995246abbSSean Bruno 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
54095246abbSSean Bruno 		ri_pad->rxd_val[i] = 0;
54195246abbSSean Bruno 		ri_pad->rxd_val[i+1] = 0;
54295246abbSSean Bruno 		ri_pad->rxd_val[i+2] = 0;
54395246abbSSean Bruno 		ri_pad->rxd_val[i+3] = 0;
54495246abbSSean Bruno 	}
54595246abbSSean Bruno #ifdef __LP64__
54695246abbSSean Bruno 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
54795246abbSSean Bruno #endif
54895246abbSSean Bruno }
54995246abbSSean Bruno 
5504c7070dbSScott Long /*
5514c7070dbSScott Long  * Only allow a single packet to take up most 1/nth of the tx ring
5524c7070dbSScott Long  */
5534c7070dbSScott Long #define MAX_SINGLE_PACKET_FRACTION 12
5544c7070dbSScott Long #define IF_BAD_DMA (bus_addr_t)-1
5554c7070dbSScott Long 
5564c7070dbSScott Long #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
5574c7070dbSScott Long 
558aa8a24d3SStephen Hurd #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
559aa8a24d3SStephen Hurd #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
560aa8a24d3SStephen Hurd #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
561aa8a24d3SStephen Hurd #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
5624c7070dbSScott Long 
5637b610b60SSean Bruno 
5647b610b60SSean Bruno #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
5657b610b60SSean Bruno #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
5667b610b60SSean Bruno #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
5677b610b60SSean Bruno #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
5687b610b60SSean Bruno 
569ab2e3f79SStephen Hurd 
5704c7070dbSScott Long 
5714c7070dbSScott Long #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
5724c7070dbSScott Long #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
5734c7070dbSScott Long 
5744c7070dbSScott Long 
5754c7070dbSScott Long /* Our boot-time initialization hook */
5764c7070dbSScott Long static int	iflib_module_event_handler(module_t, int, void *);
5774c7070dbSScott Long 
5784c7070dbSScott Long static moduledata_t iflib_moduledata = {
5794c7070dbSScott Long 	"iflib",
5804c7070dbSScott Long 	iflib_module_event_handler,
5814c7070dbSScott Long 	NULL
5824c7070dbSScott Long };
5834c7070dbSScott Long 
5844c7070dbSScott Long DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
5854c7070dbSScott Long MODULE_VERSION(iflib, 1);
5864c7070dbSScott Long 
5874c7070dbSScott Long MODULE_DEPEND(iflib, pci, 1, 1, 1);
5884c7070dbSScott Long MODULE_DEPEND(iflib, ether, 1, 1, 1);
5894c7070dbSScott Long 
590ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
591ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
592ab2e3f79SStephen Hurd 
5934c7070dbSScott Long #ifndef IFLIB_DEBUG_COUNTERS
5944c7070dbSScott Long #ifdef INVARIANTS
5954c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 1
5964c7070dbSScott Long #else
5974c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 0
5984c7070dbSScott Long #endif /* !INVARIANTS */
5994c7070dbSScott Long #endif
6004c7070dbSScott Long 
601ab2e3f79SStephen Hurd static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
602ab2e3f79SStephen Hurd                    "iflib driver parameters");
603ab2e3f79SStephen Hurd 
6044c7070dbSScott Long /*
6054c7070dbSScott Long  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
6064c7070dbSScott Long  */
6074c7070dbSScott Long static int iflib_min_tx_latency = 0;
6084c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
609da69b8f9SSean Bruno 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
61095246abbSSean Bruno static int iflib_no_tx_batch = 0;
61195246abbSSean Bruno SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
61295246abbSSean Bruno 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
6134c7070dbSScott Long 
6144c7070dbSScott Long 
6154c7070dbSScott Long #if IFLIB_DEBUG_COUNTERS
6164c7070dbSScott Long 
6174c7070dbSScott Long static int iflib_tx_seen;
6184c7070dbSScott Long static int iflib_tx_sent;
6194c7070dbSScott Long static int iflib_tx_encap;
6204c7070dbSScott Long static int iflib_rx_allocs;
6214c7070dbSScott Long static int iflib_fl_refills;
6224c7070dbSScott Long static int iflib_fl_refills_large;
6234c7070dbSScott Long static int iflib_tx_frees;
6244c7070dbSScott Long 
6254c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
6264c7070dbSScott Long 		   &iflib_tx_seen, 0, "# tx mbufs seen");
6274c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
6284c7070dbSScott Long 		   &iflib_tx_sent, 0, "# tx mbufs sent");
6294c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
6304c7070dbSScott Long 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
6314c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
6324c7070dbSScott Long 		   &iflib_tx_frees, 0, "# tx frees");
6334c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
6344c7070dbSScott Long 		   &iflib_rx_allocs, 0, "# rx allocations");
6354c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
6364c7070dbSScott Long 		   &iflib_fl_refills, 0, "# refills");
6374c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
6384c7070dbSScott Long 		   &iflib_fl_refills_large, 0, "# large refills");
6394c7070dbSScott Long 
6404c7070dbSScott Long 
6414c7070dbSScott Long static int iflib_txq_drain_flushing;
6424c7070dbSScott Long static int iflib_txq_drain_oactive;
6434c7070dbSScott Long static int iflib_txq_drain_notready;
6444c7070dbSScott Long static int iflib_txq_drain_encapfail;
6454c7070dbSScott Long 
6464c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
6474c7070dbSScott Long 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
6484c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
6494c7070dbSScott Long 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
6504c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
6514c7070dbSScott Long 		   &iflib_txq_drain_notready, 0, "# drain notready");
6524c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
6534c7070dbSScott Long 		   &iflib_txq_drain_encapfail, 0, "# drain encap fails");
6544c7070dbSScott Long 
6554c7070dbSScott Long 
6564c7070dbSScott Long static int iflib_encap_load_mbuf_fail;
657d14c853bSStephen Hurd static int iflib_encap_pad_mbuf_fail;
6584c7070dbSScott Long static int iflib_encap_txq_avail_fail;
6594c7070dbSScott Long static int iflib_encap_txd_encap_fail;
6604c7070dbSScott Long 
6614c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
6624c7070dbSScott Long 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
663d14c853bSStephen Hurd SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
664d14c853bSStephen Hurd 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
6654c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
6664c7070dbSScott Long 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
6674c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
6684c7070dbSScott Long 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
6694c7070dbSScott Long 
6704c7070dbSScott Long static int iflib_task_fn_rxs;
6714c7070dbSScott Long static int iflib_rx_intr_enables;
6724c7070dbSScott Long static int iflib_fast_intrs;
6734c7070dbSScott Long static int iflib_intr_link;
6744c7070dbSScott Long static int iflib_intr_msix;
6754c7070dbSScott Long static int iflib_rx_unavail;
6764c7070dbSScott Long static int iflib_rx_ctx_inactive;
6774c7070dbSScott Long static int iflib_rx_zero_len;
6784c7070dbSScott Long static int iflib_rx_if_input;
6794c7070dbSScott Long static int iflib_rx_mbuf_null;
6804c7070dbSScott Long static int iflib_rxd_flush;
6814c7070dbSScott Long 
6824c7070dbSScott Long static int iflib_verbose_debug;
6834c7070dbSScott Long 
6844c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
6854c7070dbSScott Long 		   &iflib_intr_link, 0, "# intr link calls");
6864c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
6874c7070dbSScott Long 		   &iflib_intr_msix, 0, "# intr msix calls");
6884c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
6894c7070dbSScott Long 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
6904c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
6914c7070dbSScott Long 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
6924c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
6934c7070dbSScott Long 		   &iflib_fast_intrs, 0, "# fast_intr calls");
6944c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
6954c7070dbSScott Long 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
6964c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
6974c7070dbSScott Long 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
6984c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
6994c7070dbSScott Long 		   &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
7004c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
7014c7070dbSScott Long 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
7024c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
7034c7070dbSScott Long 		   &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
7044c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
7054c7070dbSScott Long 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
7064c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
7074c7070dbSScott Long 		   &iflib_verbose_debug, 0, "enable verbose debugging");
7084c7070dbSScott Long 
7094c7070dbSScott Long #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
710da69b8f9SSean Bruno static void
711da69b8f9SSean Bruno iflib_debug_reset(void)
712da69b8f9SSean Bruno {
713da69b8f9SSean Bruno 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
714da69b8f9SSean Bruno 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
715da69b8f9SSean Bruno 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
716da69b8f9SSean Bruno 		iflib_txq_drain_notready = iflib_txq_drain_encapfail =
717d14c853bSStephen Hurd 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
718d14c853bSStephen Hurd 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
719d14c853bSStephen Hurd 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
720d14c853bSStephen Hurd 		iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
721da69b8f9SSean Bruno 		iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
722da69b8f9SSean Bruno 		iflib_rx_mbuf_null = iflib_rxd_flush = 0;
723da69b8f9SSean Bruno }
7244c7070dbSScott Long 
7254c7070dbSScott Long #else
7264c7070dbSScott Long #define DBG_COUNTER_INC(name)
727da69b8f9SSean Bruno static void iflib_debug_reset(void) {}
7284c7070dbSScott Long #endif
7294c7070dbSScott Long 
7304c7070dbSScott Long #define IFLIB_DEBUG 0
7314c7070dbSScott Long 
7324c7070dbSScott Long static void iflib_tx_structures_free(if_ctx_t ctx);
7334c7070dbSScott Long static void iflib_rx_structures_free(if_ctx_t ctx);
7344c7070dbSScott Long static int iflib_queues_alloc(if_ctx_t ctx);
7354c7070dbSScott Long static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
73695246abbSSean Bruno static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
7374c7070dbSScott Long static int iflib_qset_structures_setup(if_ctx_t ctx);
7384c7070dbSScott Long static int iflib_msix_init(if_ctx_t ctx);
7393e0e6330SStephen Hurd static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
7404c7070dbSScott Long static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
7414c7070dbSScott Long static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
7424c7070dbSScott Long static int iflib_register(if_ctx_t);
7434c7070dbSScott Long static void iflib_init_locked(if_ctx_t ctx);
7444c7070dbSScott Long static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
7454c7070dbSScott Long static void iflib_add_device_sysctl_post(if_ctx_t ctx);
746da69b8f9SSean Bruno static void iflib_ifmp_purge(iflib_txq_t txq);
7471248952aSSean Bruno static void _iflib_pre_assert(if_softc_ctx_t scctx);
74895246abbSSean Bruno static void iflib_if_init_locked(if_ctx_t ctx);
74995246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
75095246abbSSean Bruno static struct mbuf * iflib_fixup_rx(struct mbuf *m);
75195246abbSSean Bruno #endif
7524c7070dbSScott Long 
75394618825SMark Johnston NETDUMP_DEFINE(iflib);
75494618825SMark Johnston 
7554c7070dbSScott Long #ifdef DEV_NETMAP
7564c7070dbSScott Long #include <sys/selinfo.h>
7574c7070dbSScott Long #include <net/netmap.h>
7584c7070dbSScott Long #include <dev/netmap/netmap_kern.h>
7594c7070dbSScott Long 
7604c7070dbSScott Long MODULE_DEPEND(iflib, netmap, 1, 1, 1);
7614c7070dbSScott Long 
7622d873474SStephen Hurd static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
7632d873474SStephen Hurd 
7644c7070dbSScott Long /*
7654c7070dbSScott Long  * device-specific sysctl variables:
7664c7070dbSScott Long  *
76791d546a0SConrad Meyer  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
7684c7070dbSScott Long  *	During regular operations the CRC is stripped, but on some
7694c7070dbSScott Long  *	hardware reception of frames not multiple of 64 is slower,
7704c7070dbSScott Long  *	so using crcstrip=0 helps in benchmarks.
7714c7070dbSScott Long  *
77291d546a0SConrad Meyer  * iflib_rx_miss, iflib_rx_miss_bufs:
7734c7070dbSScott Long  *	count packets that might be missed due to lost interrupts.
7744c7070dbSScott Long  */
7754c7070dbSScott Long SYSCTL_DECL(_dev_netmap);
7764c7070dbSScott Long /*
7774c7070dbSScott Long  * The xl driver by default strips CRCs and we do not override it.
7784c7070dbSScott Long  */
7794c7070dbSScott Long 
7804c7070dbSScott Long int iflib_crcstrip = 1;
7814c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
7824c7070dbSScott Long     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
7834c7070dbSScott Long 
7844c7070dbSScott Long int iflib_rx_miss, iflib_rx_miss_bufs;
7854c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
7864c7070dbSScott Long     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
78791d546a0SConrad Meyer SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
7884c7070dbSScott Long     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
7894c7070dbSScott Long 
7904c7070dbSScott Long /*
7914c7070dbSScott Long  * Register/unregister. We are already under netmap lock.
7924c7070dbSScott Long  * Only called on the first register or the last unregister.
7934c7070dbSScott Long  */
7944c7070dbSScott Long static int
7954c7070dbSScott Long iflib_netmap_register(struct netmap_adapter *na, int onoff)
7964c7070dbSScott Long {
7974c7070dbSScott Long 	struct ifnet *ifp = na->ifp;
7984c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
79995246abbSSean Bruno 	int status;
8004c7070dbSScott Long 
8014c7070dbSScott Long 	CTX_LOCK(ctx);
8024c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
8034c7070dbSScott Long 
8044c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
8054c7070dbSScott Long 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
8064c7070dbSScott Long 
8074c7070dbSScott Long 	if (!CTX_IS_VF(ctx))
8081248952aSSean Bruno 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
8094c7070dbSScott Long 
8104c7070dbSScott Long 	/* enable or disable flags and callbacks in na and ifp */
8114c7070dbSScott Long 	if (onoff) {
8124c7070dbSScott Long 		nm_set_native_flags(na);
8134c7070dbSScott Long 	} else {
8144c7070dbSScott Long 		nm_clear_native_flags(na);
8154c7070dbSScott Long 	}
81695246abbSSean Bruno 	iflib_stop(ctx);
81795246abbSSean Bruno 	iflib_init_locked(ctx);
8181248952aSSean Bruno 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
81995246abbSSean Bruno 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
82095246abbSSean Bruno 	if (status)
82195246abbSSean Bruno 		nm_clear_native_flags(na);
8224c7070dbSScott Long 	CTX_UNLOCK(ctx);
82395246abbSSean Bruno 	return (status);
8244c7070dbSScott Long }
8254c7070dbSScott Long 
8262d873474SStephen Hurd static int
8272d873474SStephen Hurd netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
8282d873474SStephen Hurd {
8292d873474SStephen Hurd 	struct netmap_adapter *na = kring->na;
8302d873474SStephen Hurd 	u_int const lim = kring->nkr_num_slots - 1;
8312d873474SStephen Hurd 	u_int head = kring->rhead;
8322d873474SStephen Hurd 	struct netmap_ring *ring = kring->ring;
8332d873474SStephen Hurd 	bus_dmamap_t *map;
8342d873474SStephen Hurd 	struct if_rxd_update iru;
8352d873474SStephen Hurd 	if_ctx_t ctx = rxq->ifr_ctx;
8362d873474SStephen Hurd 	iflib_fl_t fl = &rxq->ifr_fl[0];
8372d873474SStephen Hurd 	uint32_t refill_pidx, nic_i;
8382d873474SStephen Hurd 
8392d873474SStephen Hurd 	if (nm_i == head && __predict_true(!init))
8402d873474SStephen Hurd 		return 0;
8412d873474SStephen Hurd 	iru_init(&iru, rxq, 0 /* flid */);
8422d873474SStephen Hurd 	map = fl->ifl_sds.ifsd_map;
8432d873474SStephen Hurd 	refill_pidx = netmap_idx_k2n(kring, nm_i);
8442d873474SStephen Hurd 	/*
8452d873474SStephen Hurd 	 * IMPORTANT: we must leave one free slot in the ring,
8462d873474SStephen Hurd 	 * so move head back by one unit
8472d873474SStephen Hurd 	 */
8482d873474SStephen Hurd 	head = nm_prev(head, lim);
8491ae4848cSMatt Macy 	nic_i = UINT_MAX;
8502d873474SStephen Hurd 	while (nm_i != head) {
8512d873474SStephen Hurd 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
8522d873474SStephen Hurd 			struct netmap_slot *slot = &ring->slot[nm_i];
8532d873474SStephen Hurd 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
8542d873474SStephen Hurd 			uint32_t nic_i_dma = refill_pidx;
8552d873474SStephen Hurd 			nic_i = netmap_idx_k2n(kring, nm_i);
8562d873474SStephen Hurd 
8572d873474SStephen Hurd 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
8582d873474SStephen Hurd 
8592d873474SStephen Hurd 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
8602d873474SStephen Hurd 			        return netmap_ring_reinit(kring);
8612d873474SStephen Hurd 
8622d873474SStephen Hurd 			fl->ifl_vm_addrs[tmp_pidx] = addr;
8632d873474SStephen Hurd 			if (__predict_false(init) && map) {
8642d873474SStephen Hurd 				netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
8652d873474SStephen Hurd 			} else if (map && (slot->flags & NS_BUF_CHANGED)) {
8662d873474SStephen Hurd 				/* buffer has changed, reload map */
8672d873474SStephen Hurd 				netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
8682d873474SStephen Hurd 			}
8692d873474SStephen Hurd 			slot->flags &= ~NS_BUF_CHANGED;
8702d873474SStephen Hurd 
8712d873474SStephen Hurd 			nm_i = nm_next(nm_i, lim);
8722d873474SStephen Hurd 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
8732d873474SStephen Hurd 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
8742d873474SStephen Hurd 				continue;
8752d873474SStephen Hurd 
8762d873474SStephen Hurd 			iru.iru_pidx = refill_pidx;
8772d873474SStephen Hurd 			iru.iru_count = tmp_pidx+1;
8782d873474SStephen Hurd 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
8792d873474SStephen Hurd 
8802d873474SStephen Hurd 			refill_pidx = nic_i;
8812d873474SStephen Hurd 			if (map == NULL)
8822d873474SStephen Hurd 				continue;
8832d873474SStephen Hurd 
8842d873474SStephen Hurd 			for (int n = 0; n < iru.iru_count; n++) {
8852d873474SStephen Hurd 				bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
8862d873474SStephen Hurd 						BUS_DMASYNC_PREREAD);
8872d873474SStephen Hurd 				/* XXX - change this to not use the netmap func*/
8882d873474SStephen Hurd 				nic_i_dma = nm_next(nic_i_dma, lim);
8892d873474SStephen Hurd 			}
8902d873474SStephen Hurd 		}
8912d873474SStephen Hurd 	}
8922d873474SStephen Hurd 	kring->nr_hwcur = head;
8932d873474SStephen Hurd 
8942d873474SStephen Hurd 	if (map)
8952d873474SStephen Hurd 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
8962d873474SStephen Hurd 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8971ae4848cSMatt Macy 	if (__predict_true(nic_i != UINT_MAX))
8982d873474SStephen Hurd 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
8992d873474SStephen Hurd 	return (0);
9002d873474SStephen Hurd }
9012d873474SStephen Hurd 
9024c7070dbSScott Long /*
9034c7070dbSScott Long  * Reconcile kernel and user view of the transmit ring.
9044c7070dbSScott Long  *
9054c7070dbSScott Long  * All information is in the kring.
9064c7070dbSScott Long  * Userspace wants to send packets up to the one before kring->rhead,
9074c7070dbSScott Long  * kernel knows kring->nr_hwcur is the first unsent packet.
9084c7070dbSScott Long  *
9094c7070dbSScott Long  * Here we push packets out (as many as possible), and possibly
9104c7070dbSScott Long  * reclaim buffers from previously completed transmission.
9114c7070dbSScott Long  *
9124c7070dbSScott Long  * The caller (netmap) guarantees that there is only one instance
9134c7070dbSScott Long  * running at any time. Any interference with other driver
9144c7070dbSScott Long  * methods should be handled by the individual drivers.
9154c7070dbSScott Long  */
9164c7070dbSScott Long static int
9174c7070dbSScott Long iflib_netmap_txsync(struct netmap_kring *kring, int flags)
9184c7070dbSScott Long {
9194c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
9204c7070dbSScott Long 	struct ifnet *ifp = na->ifp;
9214c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
922dd7fbcf1SStephen Hurd 	u_int nm_i;	/* index into the netmap kring */
9234c7070dbSScott Long 	u_int nic_i;	/* index into the NIC ring */
9244c7070dbSScott Long 	u_int n;
9254c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
9264c7070dbSScott Long 	u_int const head = kring->rhead;
9274c7070dbSScott Long 	struct if_pkt_info pi;
9284c7070dbSScott Long 
9294c7070dbSScott Long 	/*
9304c7070dbSScott Long 	 * interrupts on every tx packet are expensive so request
9314c7070dbSScott Long 	 * them every half ring, or where NS_REPORT is set
9324c7070dbSScott Long 	 */
9334c7070dbSScott Long 	u_int report_frequency = kring->nkr_num_slots >> 1;
9344c7070dbSScott Long 	/* device-specific */
9354c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
9364c7070dbSScott Long 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
9374c7070dbSScott Long 
93895246abbSSean Bruno 	if (txq->ift_sds.ifsd_map)
9394c7070dbSScott Long 		bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
9404c7070dbSScott Long 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9414c7070dbSScott Long 
9424c7070dbSScott Long 
9434c7070dbSScott Long 	/*
9444c7070dbSScott Long 	 * First part: process new packets to send.
945dd7fbcf1SStephen Hurd 	 * nm_i is the current index in the netmap kring,
9464c7070dbSScott Long 	 * nic_i is the corresponding index in the NIC ring.
9474c7070dbSScott Long 	 *
9484c7070dbSScott Long 	 * If we have packets to send (nm_i != head)
9494c7070dbSScott Long 	 * iterate over the netmap ring, fetch length and update
9504c7070dbSScott Long 	 * the corresponding slot in the NIC ring. Some drivers also
9514c7070dbSScott Long 	 * need to update the buffer's physical address in the NIC slot
9524c7070dbSScott Long 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
9534c7070dbSScott Long 	 *
9544c7070dbSScott Long 	 * The netmap_reload_map() calls is especially expensive,
9554c7070dbSScott Long 	 * even when (as in this case) the tag is 0, so do only
9564c7070dbSScott Long 	 * when the buffer has actually changed.
9574c7070dbSScott Long 	 *
9584c7070dbSScott Long 	 * If possible do not set the report/intr bit on all slots,
9594c7070dbSScott Long 	 * but only a few times per ring or when NS_REPORT is set.
9604c7070dbSScott Long 	 *
9614c7070dbSScott Long 	 * Finally, on 10G and faster drivers, it might be useful
9624c7070dbSScott Long 	 * to prefetch the next slot and txr entry.
9634c7070dbSScott Long 	 */
9644c7070dbSScott Long 
965dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
9665ee36c68SStephen Hurd 	if (nm_i != head) {	/* we have new packets to send */
96795246abbSSean Bruno 		pkt_info_zero(&pi);
96895246abbSSean Bruno 		pi.ipi_segs = txq->ift_segs;
96995246abbSSean Bruno 		pi.ipi_qsidx = kring->ring_id;
9704c7070dbSScott Long 		nic_i = netmap_idx_k2n(kring, nm_i);
9714c7070dbSScott Long 
9724c7070dbSScott Long 		__builtin_prefetch(&ring->slot[nm_i]);
9734c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
97495246abbSSean Bruno 		if (txq->ift_sds.ifsd_map)
9754c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
9764c7070dbSScott Long 
9774c7070dbSScott Long 		for (n = 0; nm_i != head; n++) {
9784c7070dbSScott Long 			struct netmap_slot *slot = &ring->slot[nm_i];
9794c7070dbSScott Long 			u_int len = slot->len;
9800a1b74a3SSean Bruno 			uint64_t paddr;
9814c7070dbSScott Long 			void *addr = PNMB(na, slot, &paddr);
9824c7070dbSScott Long 			int flags = (slot->flags & NS_REPORT ||
9834c7070dbSScott Long 				nic_i == 0 || nic_i == report_frequency) ?
9844c7070dbSScott Long 				IPI_TX_INTR : 0;
9854c7070dbSScott Long 
9864c7070dbSScott Long 			/* device-specific */
98795246abbSSean Bruno 			pi.ipi_len = len;
98895246abbSSean Bruno 			pi.ipi_segs[0].ds_addr = paddr;
98995246abbSSean Bruno 			pi.ipi_segs[0].ds_len = len;
99095246abbSSean Bruno 			pi.ipi_nsegs = 1;
99195246abbSSean Bruno 			pi.ipi_ndescs = 0;
9924c7070dbSScott Long 			pi.ipi_pidx = nic_i;
9934c7070dbSScott Long 			pi.ipi_flags = flags;
9944c7070dbSScott Long 
9954c7070dbSScott Long 			/* Fill the slot in the NIC ring. */
9964c7070dbSScott Long 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
9974c7070dbSScott Long 
9984c7070dbSScott Long 			/* prefetch for next round */
9994c7070dbSScott Long 			__builtin_prefetch(&ring->slot[nm_i + 1]);
10004c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
100195246abbSSean Bruno 			if (txq->ift_sds.ifsd_map) {
10024c7070dbSScott Long 				__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
10034c7070dbSScott Long 
10044c7070dbSScott Long 				NM_CHECK_ADDR_LEN(na, addr, len);
10054c7070dbSScott Long 
10064c7070dbSScott Long 				if (slot->flags & NS_BUF_CHANGED) {
10074c7070dbSScott Long 					/* buffer has changed, reload map */
10084c7070dbSScott Long 					netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
10094c7070dbSScott Long 				}
10104c7070dbSScott Long 				/* make sure changes to the buffer are synced */
10114c7070dbSScott Long 				bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
10124c7070dbSScott Long 						BUS_DMASYNC_PREWRITE);
101395246abbSSean Bruno 			}
101495246abbSSean Bruno 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
10154c7070dbSScott Long 			nm_i = nm_next(nm_i, lim);
10164c7070dbSScott Long 			nic_i = nm_next(nic_i, lim);
10174c7070dbSScott Long 		}
1018dd7fbcf1SStephen Hurd 		kring->nr_hwcur = nm_i;
10194c7070dbSScott Long 
10204c7070dbSScott Long 		/* synchronize the NIC ring */
102195246abbSSean Bruno 		if (txq->ift_sds.ifsd_map)
10224c7070dbSScott Long 			bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
10234c7070dbSScott Long 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10244c7070dbSScott Long 
10254c7070dbSScott Long 		/* (re)start the tx unit up to slot nic_i (excluded) */
10264c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
10274c7070dbSScott Long 	}
10284c7070dbSScott Long 
10294c7070dbSScott Long 	/*
10304c7070dbSScott Long 	 * Second part: reclaim buffers for completed transmissions.
10315ee36c68SStephen Hurd 	 *
10325ee36c68SStephen Hurd 	 * If there are unclaimed buffers, attempt to reclaim them.
10335ee36c68SStephen Hurd 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
10345ee36c68SStephen Hurd 	 * minimal delay, then trigger the tx handler which will spin in the
10355ee36c68SStephen Hurd 	 * group task queue.
10364c7070dbSScott Long 	 */
1037dd7fbcf1SStephen Hurd 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
10384c7070dbSScott Long 		if (iflib_tx_credits_update(ctx, txq)) {
10394c7070dbSScott Long 			/* some tx completed, increment avail */
10404c7070dbSScott Long 			nic_i = txq->ift_cidx_processed;
10414c7070dbSScott Long 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
10424c7070dbSScott Long 		}
10435ee36c68SStephen Hurd 	}
1044dd7fbcf1SStephen Hurd 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1045dd7fbcf1SStephen Hurd 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1046dd7fbcf1SStephen Hurd 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1047dd7fbcf1SStephen Hurd 			    iflib_timer, txq, txq->ift_timer.c_cpu);
10485ee36c68SStephen Hurd 	}
10494c7070dbSScott Long 	return (0);
10504c7070dbSScott Long }
10514c7070dbSScott Long 
10524c7070dbSScott Long /*
10534c7070dbSScott Long  * Reconcile kernel and user view of the receive ring.
10544c7070dbSScott Long  * Same as for the txsync, this routine must be efficient.
10554c7070dbSScott Long  * The caller guarantees a single invocations, but races against
10564c7070dbSScott Long  * the rest of the driver should be handled here.
10574c7070dbSScott Long  *
10584c7070dbSScott Long  * On call, kring->rhead is the first packet that userspace wants
10594c7070dbSScott Long  * to keep, and kring->rcur is the wakeup point.
10604c7070dbSScott Long  * The kernel has previously reported packets up to kring->rtail.
10614c7070dbSScott Long  *
10624c7070dbSScott Long  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
10634c7070dbSScott Long  * of whether or not we received an interrupt.
10644c7070dbSScott Long  */
10654c7070dbSScott Long static int
10664c7070dbSScott Long iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
10674c7070dbSScott Long {
10684c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
10694c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
107095246abbSSean Bruno 	uint32_t nm_i;	/* index into the netmap ring */
10712d873474SStephen Hurd 	uint32_t nic_i;	/* index into the NIC ring */
10724c7070dbSScott Long 	u_int i, n;
10734c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
1074dd7fbcf1SStephen Hurd 	u_int const head = kring->rhead;
10754c7070dbSScott Long 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1076ab2e3f79SStephen Hurd 	struct if_rxd_info ri;
107795246abbSSean Bruno 
107895246abbSSean Bruno 	struct ifnet *ifp = na->ifp;
10794c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
10804c7070dbSScott Long 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
10814c7070dbSScott Long 	iflib_fl_t fl = rxq->ifr_fl;
10824c7070dbSScott Long 	if (head > lim)
10834c7070dbSScott Long 		return netmap_ring_reinit(kring);
10844c7070dbSScott Long 
10854c7070dbSScott Long 	/* XXX check sync modes */
108695246abbSSean Bruno 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
108795246abbSSean Bruno 		if (fl->ifl_sds.ifsd_map == NULL)
108895246abbSSean Bruno 			continue;
10894c7070dbSScott Long 		bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
10904c7070dbSScott Long 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
109195246abbSSean Bruno 	}
10924c7070dbSScott Long 	/*
10934c7070dbSScott Long 	 * First part: import newly received packets.
10944c7070dbSScott Long 	 *
10954c7070dbSScott Long 	 * nm_i is the index of the next free slot in the netmap ring,
10964c7070dbSScott Long 	 * nic_i is the index of the next received packet in the NIC ring,
10974c7070dbSScott Long 	 * and they may differ in case if_init() has been called while
10984c7070dbSScott Long 	 * in netmap mode. For the receive ring we have
10994c7070dbSScott Long 	 *
11004c7070dbSScott Long 	 *	nic_i = rxr->next_check;
11014c7070dbSScott Long 	 *	nm_i = kring->nr_hwtail (previous)
11024c7070dbSScott Long 	 * and
11034c7070dbSScott Long 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
11044c7070dbSScott Long 	 *
11054c7070dbSScott Long 	 * rxr->next_check is set to 0 on a ring reinit
11064c7070dbSScott Long 	 */
11074c7070dbSScott Long 	if (netmap_no_pendintr || force_update) {
11084c7070dbSScott Long 		int crclen = iflib_crcstrip ? 0 : 4;
11094c7070dbSScott Long 		int error, avail;
11104c7070dbSScott Long 
11112d873474SStephen Hurd 		for (i = 0; i < rxq->ifr_nfl; i++) {
11122d873474SStephen Hurd 			fl = &rxq->ifr_fl[i];
11134c7070dbSScott Long 			nic_i = fl->ifl_cidx;
11144c7070dbSScott Long 			nm_i = netmap_idx_n2k(kring, nic_i);
111595246abbSSean Bruno 			avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
11164c7070dbSScott Long 			for (n = 0; avail > 0; n++, avail--) {
1117ab2e3f79SStephen Hurd 				rxd_info_zero(&ri);
1118ab2e3f79SStephen Hurd 				ri.iri_frags = rxq->ifr_frags;
1119ab2e3f79SStephen Hurd 				ri.iri_qsidx = kring->ring_id;
1120ab2e3f79SStephen Hurd 				ri.iri_ifp = ctx->ifc_ifp;
1121ab2e3f79SStephen Hurd 				ri.iri_cidx = nic_i;
112295246abbSSean Bruno 
1123ab2e3f79SStephen Hurd 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1124ab2e3f79SStephen Hurd 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
11257cb7c6e3SNavdeep Parhar 				ring->slot[nm_i].flags = 0;
112695246abbSSean Bruno 				if (fl->ifl_sds.ifsd_map)
11274c7070dbSScott Long 					bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
1128e035717eSSean Bruno 							fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
11294c7070dbSScott Long 				nm_i = nm_next(nm_i, lim);
11304c7070dbSScott Long 				nic_i = nm_next(nic_i, lim);
11314c7070dbSScott Long 			}
11324c7070dbSScott Long 			if (n) { /* update the state variables */
11334c7070dbSScott Long 				if (netmap_no_pendintr && !force_update) {
11344c7070dbSScott Long 					/* diagnostics */
11354c7070dbSScott Long 					iflib_rx_miss ++;
11364c7070dbSScott Long 					iflib_rx_miss_bufs += n;
11374c7070dbSScott Long 				}
11384c7070dbSScott Long 				fl->ifl_cidx = nic_i;
1139dd7fbcf1SStephen Hurd 				kring->nr_hwtail = nm_i;
11404c7070dbSScott Long 			}
11414c7070dbSScott Long 			kring->nr_kflags &= ~NKR_PENDINTR;
11424c7070dbSScott Long 		}
11434c7070dbSScott Long 	}
11444c7070dbSScott Long 	/*
11454c7070dbSScott Long 	 * Second part: skip past packets that userspace has released.
11464c7070dbSScott Long 	 * (kring->nr_hwcur to head excluded),
11474c7070dbSScott Long 	 * and make the buffers available for reception.
11484c7070dbSScott Long 	 * As usual nm_i is the index in the netmap ring,
11494c7070dbSScott Long 	 * nic_i is the index in the NIC ring, and
11504c7070dbSScott Long 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
11514c7070dbSScott Long 	 */
11524c7070dbSScott Long 	/* XXX not sure how this will work with multiple free lists */
1153dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
115495246abbSSean Bruno 
11552d873474SStephen Hurd 	return (netmap_fl_refill(rxq, kring, nm_i, false));
11564c7070dbSScott Long }
11574c7070dbSScott Long 
115895246abbSSean Bruno static void
115995246abbSSean Bruno iflib_netmap_intr(struct netmap_adapter *na, int onoff)
116095246abbSSean Bruno {
116195246abbSSean Bruno 	struct ifnet *ifp = na->ifp;
116295246abbSSean Bruno 	if_ctx_t ctx = ifp->if_softc;
116395246abbSSean Bruno 
1164ab2e3f79SStephen Hurd 	CTX_LOCK(ctx);
116595246abbSSean Bruno 	if (onoff) {
116695246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
116795246abbSSean Bruno 	} else {
116895246abbSSean Bruno 		IFDI_INTR_DISABLE(ctx);
116995246abbSSean Bruno 	}
1170ab2e3f79SStephen Hurd 	CTX_UNLOCK(ctx);
117195246abbSSean Bruno }
117295246abbSSean Bruno 
117395246abbSSean Bruno 
11744c7070dbSScott Long static int
11754c7070dbSScott Long iflib_netmap_attach(if_ctx_t ctx)
11764c7070dbSScott Long {
11774c7070dbSScott Long 	struct netmap_adapter na;
117823ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
11794c7070dbSScott Long 
11804c7070dbSScott Long 	bzero(&na, sizeof(na));
11814c7070dbSScott Long 
11824c7070dbSScott Long 	na.ifp = ctx->ifc_ifp;
11834c7070dbSScott Long 	na.na_flags = NAF_BDG_MAYSLEEP;
11844c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
11854c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
11864c7070dbSScott Long 
118723ac9029SStephen Hurd 	na.num_tx_desc = scctx->isc_ntxd[0];
118823ac9029SStephen Hurd 	na.num_rx_desc = scctx->isc_nrxd[0];
11894c7070dbSScott Long 	na.nm_txsync = iflib_netmap_txsync;
11904c7070dbSScott Long 	na.nm_rxsync = iflib_netmap_rxsync;
11914c7070dbSScott Long 	na.nm_register = iflib_netmap_register;
119295246abbSSean Bruno 	na.nm_intr = iflib_netmap_intr;
11934c7070dbSScott Long 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
11944c7070dbSScott Long 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
11954c7070dbSScott Long 	return (netmap_attach(&na));
11964c7070dbSScott Long }
11974c7070dbSScott Long 
11984c7070dbSScott Long static void
11994c7070dbSScott Long iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
12004c7070dbSScott Long {
12014c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
12024c7070dbSScott Long 	struct netmap_slot *slot;
12034c7070dbSScott Long 
12044c7070dbSScott Long 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1205e099b90bSPedro F. Giffuni 	if (slot == NULL)
12064c7070dbSScott Long 		return;
120795246abbSSean Bruno 	if (txq->ift_sds.ifsd_map == NULL)
120895246abbSSean Bruno 		return;
12094c7070dbSScott Long 
121023ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
12114c7070dbSScott Long 
12124c7070dbSScott Long 		/*
12134c7070dbSScott Long 		 * In netmap mode, set the map for the packet buffer.
12144c7070dbSScott Long 		 * NOTE: Some drivers (not this one) also need to set
12154c7070dbSScott Long 		 * the physical buffer address in the NIC ring.
12164c7070dbSScott Long 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
12174c7070dbSScott Long 		 * netmap slot index, si
12184c7070dbSScott Long 		 */
12192ff91c17SVincenzo Maffione 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
12204c7070dbSScott Long 		netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
12214c7070dbSScott Long 	}
12224c7070dbSScott Long }
12232d873474SStephen Hurd 
12244c7070dbSScott Long static void
12254c7070dbSScott Long iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
12264c7070dbSScott Long {
12274c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
12282ff91c17SVincenzo Maffione 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
12294c7070dbSScott Long 	struct netmap_slot *slot;
12302d873474SStephen Hurd 	uint32_t nm_i;
12314c7070dbSScott Long 
12324c7070dbSScott Long 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1233e099b90bSPedro F. Giffuni 	if (slot == NULL)
12344c7070dbSScott Long 		return;
12352d873474SStephen Hurd 	nm_i = netmap_idx_n2k(kring, 0);
12362d873474SStephen Hurd 	netmap_fl_refill(rxq, kring, nm_i, true);
12374c7070dbSScott Long }
12384c7070dbSScott Long 
1239dd7fbcf1SStephen Hurd static void
1240dd7fbcf1SStephen Hurd iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on)
1241dd7fbcf1SStephen Hurd {
1242dd7fbcf1SStephen Hurd 	struct netmap_kring *kring;
1243dd7fbcf1SStephen Hurd 
1244dd7fbcf1SStephen Hurd 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1245dd7fbcf1SStephen Hurd 
1246dd7fbcf1SStephen Hurd 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
1247dd7fbcf1SStephen Hurd 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1248dd7fbcf1SStephen Hurd 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1249dd7fbcf1SStephen Hurd 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1250dd7fbcf1SStephen Hurd 			if (hz < 2000)
1251dd7fbcf1SStephen Hurd 				*reset_on = 1;
1252dd7fbcf1SStephen Hurd 			else
1253dd7fbcf1SStephen Hurd 				*reset_on = hz / 1000;
1254dd7fbcf1SStephen Hurd 		}
1255dd7fbcf1SStephen Hurd 	}
1256dd7fbcf1SStephen Hurd }
1257dd7fbcf1SStephen Hurd 
12584c7070dbSScott Long #define iflib_netmap_detach(ifp) netmap_detach(ifp)
12594c7070dbSScott Long 
12604c7070dbSScott Long #else
12614c7070dbSScott Long #define iflib_netmap_txq_init(ctx, txq)
12624c7070dbSScott Long #define iflib_netmap_rxq_init(ctx, rxq)
12634c7070dbSScott Long #define iflib_netmap_detach(ifp)
12644c7070dbSScott Long 
12654c7070dbSScott Long #define iflib_netmap_attach(ctx) (0)
12664c7070dbSScott Long #define netmap_rx_irq(ifp, qid, budget) (0)
126795246abbSSean Bruno #define netmap_tx_irq(ifp, qid) do {} while (0)
1268dd7fbcf1SStephen Hurd #define iflib_netmap_timer_adjust(ctx, txqid, reset_on)
12694c7070dbSScott Long 
12704c7070dbSScott Long #endif
12714c7070dbSScott Long 
12724c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
12734c7070dbSScott Long static __inline void
12744c7070dbSScott Long prefetch(void *x)
12754c7070dbSScott Long {
12764c7070dbSScott Long 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12774c7070dbSScott Long }
12783429c02fSStephen Hurd static __inline void
12793429c02fSStephen Hurd prefetch2cachelines(void *x)
12803429c02fSStephen Hurd {
12813429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12823429c02fSStephen Hurd #if (CACHE_LINE_SIZE < 128)
12833429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
12843429c02fSStephen Hurd #endif
12853429c02fSStephen Hurd }
12864c7070dbSScott Long #else
12874c7070dbSScott Long #define prefetch(x)
12883429c02fSStephen Hurd #define prefetch2cachelines(x)
12894c7070dbSScott Long #endif
12904c7070dbSScott Long 
12914c7070dbSScott Long static void
129209f6ff4fSMatt Macy iflib_gen_mac(if_ctx_t ctx)
129309f6ff4fSMatt Macy {
129409f6ff4fSMatt Macy 	struct thread *td;
129509f6ff4fSMatt Macy 	MD5_CTX mdctx;
129609f6ff4fSMatt Macy 	char uuid[HOSTUUIDLEN+1];
129709f6ff4fSMatt Macy 	char buf[HOSTUUIDLEN+16];
129809f6ff4fSMatt Macy 	uint8_t *mac;
129909f6ff4fSMatt Macy 	unsigned char digest[16];
130009f6ff4fSMatt Macy 
130109f6ff4fSMatt Macy 	td = curthread;
130209f6ff4fSMatt Macy 	mac = ctx->ifc_mac;
130309f6ff4fSMatt Macy 	uuid[HOSTUUIDLEN] = 0;
130409f6ff4fSMatt Macy 	bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
130509f6ff4fSMatt Macy 	snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
130609f6ff4fSMatt Macy 	/*
130709f6ff4fSMatt Macy 	 * Generate a pseudo-random, deterministic MAC
130809f6ff4fSMatt Macy 	 * address based on the UUID and unit number.
130909f6ff4fSMatt Macy 	 * The FreeBSD Foundation OUI of 58-9C-FC is used.
131009f6ff4fSMatt Macy 	 */
131109f6ff4fSMatt Macy 	MD5Init(&mdctx);
131209f6ff4fSMatt Macy 	MD5Update(&mdctx, buf, strlen(buf));
131309f6ff4fSMatt Macy 	MD5Final(digest, &mdctx);
131409f6ff4fSMatt Macy 
131509f6ff4fSMatt Macy 	mac[0] = 0x58;
131609f6ff4fSMatt Macy 	mac[1] = 0x9C;
131709f6ff4fSMatt Macy 	mac[2] = 0xFC;
131809f6ff4fSMatt Macy 	mac[3] = digest[0];
131909f6ff4fSMatt Macy 	mac[4] = digest[1];
132009f6ff4fSMatt Macy 	mac[5] = digest[2];
132109f6ff4fSMatt Macy }
132209f6ff4fSMatt Macy 
132309f6ff4fSMatt Macy static void
132410e0d938SStephen Hurd iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
132510e0d938SStephen Hurd {
132610e0d938SStephen Hurd 	iflib_fl_t fl;
132710e0d938SStephen Hurd 
132810e0d938SStephen Hurd 	fl = &rxq->ifr_fl[flid];
132910e0d938SStephen Hurd 	iru->iru_paddrs = fl->ifl_bus_addrs;
133010e0d938SStephen Hurd 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
133110e0d938SStephen Hurd 	iru->iru_idxs = fl->ifl_rxd_idxs;
133210e0d938SStephen Hurd 	iru->iru_qsidx = rxq->ifr_id;
133310e0d938SStephen Hurd 	iru->iru_buf_size = fl->ifl_buf_size;
133410e0d938SStephen Hurd 	iru->iru_flidx = fl->ifl_id;
133510e0d938SStephen Hurd }
133610e0d938SStephen Hurd 
133710e0d938SStephen Hurd static void
13384c7070dbSScott Long _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
13394c7070dbSScott Long {
13404c7070dbSScott Long 	if (err)
13414c7070dbSScott Long 		return;
13424c7070dbSScott Long 	*(bus_addr_t *) arg = segs[0].ds_addr;
13434c7070dbSScott Long }
13444c7070dbSScott Long 
13454c7070dbSScott Long int
13464c7070dbSScott Long iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
13474c7070dbSScott Long {
13484c7070dbSScott Long 	int err;
13494c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
13504c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
13514c7070dbSScott Long 
13524c7070dbSScott Long 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
13534c7070dbSScott Long 
13544c7070dbSScott Long 	err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
13554c7070dbSScott Long 				sctx->isc_q_align, 0,	/* alignment, bounds */
13564c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* lowaddr */
13574c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* highaddr */
13584c7070dbSScott Long 				NULL, NULL,		/* filter, filterarg */
13594c7070dbSScott Long 				size,			/* maxsize */
13604c7070dbSScott Long 				1,			/* nsegments */
13614c7070dbSScott Long 				size,			/* maxsegsize */
13624c7070dbSScott Long 				BUS_DMA_ALLOCNOW,	/* flags */
13634c7070dbSScott Long 				NULL,			/* lockfunc */
13644c7070dbSScott Long 				NULL,			/* lockarg */
13654c7070dbSScott Long 				&dma->idi_tag);
13664c7070dbSScott Long 	if (err) {
13674c7070dbSScott Long 		device_printf(dev,
13684c7070dbSScott Long 		    "%s: bus_dma_tag_create failed: %d\n",
13694c7070dbSScott Long 		    __func__, err);
13704c7070dbSScott Long 		goto fail_0;
13714c7070dbSScott Long 	}
13724c7070dbSScott Long 
13734c7070dbSScott Long 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
13744c7070dbSScott Long 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
13754c7070dbSScott Long 	if (err) {
13764c7070dbSScott Long 		device_printf(dev,
13774c7070dbSScott Long 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
13784c7070dbSScott Long 		    __func__, (uintmax_t)size, err);
13794c7070dbSScott Long 		goto fail_1;
13804c7070dbSScott Long 	}
13814c7070dbSScott Long 
13824c7070dbSScott Long 	dma->idi_paddr = IF_BAD_DMA;
13834c7070dbSScott Long 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
13844c7070dbSScott Long 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
13854c7070dbSScott Long 	if (err || dma->idi_paddr == IF_BAD_DMA) {
13864c7070dbSScott Long 		device_printf(dev,
13874c7070dbSScott Long 		    "%s: bus_dmamap_load failed: %d\n",
13884c7070dbSScott Long 		    __func__, err);
13894c7070dbSScott Long 		goto fail_2;
13904c7070dbSScott Long 	}
13914c7070dbSScott Long 
13924c7070dbSScott Long 	dma->idi_size = size;
13934c7070dbSScott Long 	return (0);
13944c7070dbSScott Long 
13954c7070dbSScott Long fail_2:
13964c7070dbSScott Long 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13974c7070dbSScott Long fail_1:
13984c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
13994c7070dbSScott Long fail_0:
14004c7070dbSScott Long 	dma->idi_tag = NULL;
14014c7070dbSScott Long 
14024c7070dbSScott Long 	return (err);
14034c7070dbSScott Long }
14044c7070dbSScott Long 
14054c7070dbSScott Long int
14064c7070dbSScott Long iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
14074c7070dbSScott Long {
14084c7070dbSScott Long 	int i, err;
14094c7070dbSScott Long 	iflib_dma_info_t *dmaiter;
14104c7070dbSScott Long 
14114c7070dbSScott Long 	dmaiter = dmalist;
14124c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++) {
14134c7070dbSScott Long 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
14144c7070dbSScott Long 			break;
14154c7070dbSScott Long 	}
14164c7070dbSScott Long 	if (err)
14174c7070dbSScott Long 		iflib_dma_free_multi(dmalist, i);
14184c7070dbSScott Long 	return (err);
14194c7070dbSScott Long }
14204c7070dbSScott Long 
14214c7070dbSScott Long void
14224c7070dbSScott Long iflib_dma_free(iflib_dma_info_t dma)
14234c7070dbSScott Long {
14244c7070dbSScott Long 	if (dma->idi_tag == NULL)
14254c7070dbSScott Long 		return;
14264c7070dbSScott Long 	if (dma->idi_paddr != IF_BAD_DMA) {
14274c7070dbSScott Long 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
14284c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
14294c7070dbSScott Long 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
14304c7070dbSScott Long 		dma->idi_paddr = IF_BAD_DMA;
14314c7070dbSScott Long 	}
14324c7070dbSScott Long 	if (dma->idi_vaddr != NULL) {
14334c7070dbSScott Long 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
14344c7070dbSScott Long 		dma->idi_vaddr = NULL;
14354c7070dbSScott Long 	}
14364c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
14374c7070dbSScott Long 	dma->idi_tag = NULL;
14384c7070dbSScott Long }
14394c7070dbSScott Long 
14404c7070dbSScott Long void
14414c7070dbSScott Long iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
14424c7070dbSScott Long {
14434c7070dbSScott Long 	int i;
14444c7070dbSScott Long 	iflib_dma_info_t *dmaiter = dmalist;
14454c7070dbSScott Long 
14464c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++)
14474c7070dbSScott Long 		iflib_dma_free(*dmaiter);
14484c7070dbSScott Long }
14494c7070dbSScott Long 
1450bd84f700SSean Bruno #ifdef EARLY_AP_STARTUP
1451bd84f700SSean Bruno static const int iflib_started = 1;
1452bd84f700SSean Bruno #else
1453bd84f700SSean Bruno /*
1454bd84f700SSean Bruno  * We used to abuse the smp_started flag to decide if the queues have been
1455bd84f700SSean Bruno  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1456bd84f700SSean Bruno  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1457bd84f700SSean Bruno  * is set.  Run a SYSINIT() strictly after that to just set a usable
1458bd84f700SSean Bruno  * completion flag.
1459bd84f700SSean Bruno  */
1460bd84f700SSean Bruno 
1461bd84f700SSean Bruno static int iflib_started;
1462bd84f700SSean Bruno 
1463bd84f700SSean Bruno static void
1464bd84f700SSean Bruno iflib_record_started(void *arg)
1465bd84f700SSean Bruno {
1466bd84f700SSean Bruno 	iflib_started = 1;
1467bd84f700SSean Bruno }
1468bd84f700SSean Bruno 
1469bd84f700SSean Bruno SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1470bd84f700SSean Bruno 	iflib_record_started, NULL);
1471bd84f700SSean Bruno #endif
1472bd84f700SSean Bruno 
14734c7070dbSScott Long static int
14744c7070dbSScott Long iflib_fast_intr(void *arg)
14754c7070dbSScott Long {
14764c7070dbSScott Long 	iflib_filter_info_t info = arg;
14774c7070dbSScott Long 	struct grouptask *gtask = info->ifi_task;
147895246abbSSean Bruno 	if (!iflib_started)
147995246abbSSean Bruno 		return (FILTER_HANDLED);
148095246abbSSean Bruno 
148195246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
148295246abbSSean Bruno 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
148395246abbSSean Bruno 		return (FILTER_HANDLED);
148495246abbSSean Bruno 
148595246abbSSean Bruno 	GROUPTASK_ENQUEUE(gtask);
148695246abbSSean Bruno 	return (FILTER_HANDLED);
148795246abbSSean Bruno }
148895246abbSSean Bruno 
148995246abbSSean Bruno static int
149095246abbSSean Bruno iflib_fast_intr_rxtx(void *arg)
149195246abbSSean Bruno {
149295246abbSSean Bruno 	iflib_filter_info_t info = arg;
149395246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
149495246abbSSean Bruno 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
14951ae4848cSMatt Macy 	if_ctx_t ctx = NULL;;
149695246abbSSean Bruno 	int i, cidx;
149795246abbSSean Bruno 
149895246abbSSean Bruno 	if (!iflib_started)
149995246abbSSean Bruno 		return (FILTER_HANDLED);
150095246abbSSean Bruno 
150195246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
150295246abbSSean Bruno 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
150395246abbSSean Bruno 		return (FILTER_HANDLED);
150495246abbSSean Bruno 
15051ae4848cSMatt Macy 	MPASS(rxq->ifr_ntxqirq);
150695246abbSSean Bruno 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
150795246abbSSean Bruno 		qidx_t txqid = rxq->ifr_txqid[i];
150895246abbSSean Bruno 
1509ab2e3f79SStephen Hurd 		ctx = rxq->ifr_ctx;
1510ab2e3f79SStephen Hurd 
151195246abbSSean Bruno 		if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
151295246abbSSean Bruno 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
151395246abbSSean Bruno 			continue;
151495246abbSSean Bruno 		}
151595246abbSSean Bruno 		GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
151695246abbSSean Bruno 	}
151795246abbSSean Bruno 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
151895246abbSSean Bruno 		cidx = rxq->ifr_cq_cidx;
151995246abbSSean Bruno 	else
152095246abbSSean Bruno 		cidx = rxq->ifr_fl[0].ifl_cidx;
152195246abbSSean Bruno 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
152295246abbSSean Bruno 		GROUPTASK_ENQUEUE(gtask);
152395246abbSSean Bruno 	else
152495246abbSSean Bruno 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
152595246abbSSean Bruno 	return (FILTER_HANDLED);
152695246abbSSean Bruno }
152795246abbSSean Bruno 
152895246abbSSean Bruno 
152995246abbSSean Bruno static int
153095246abbSSean Bruno iflib_fast_intr_ctx(void *arg)
153195246abbSSean Bruno {
153295246abbSSean Bruno 	iflib_filter_info_t info = arg;
153395246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
15344c7070dbSScott Long 
1535bd84f700SSean Bruno 	if (!iflib_started)
15361248952aSSean Bruno 		return (FILTER_HANDLED);
15371248952aSSean Bruno 
15384c7070dbSScott Long 	DBG_COUNTER_INC(fast_intrs);
15394c7070dbSScott Long 	if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
15404c7070dbSScott Long 		return (FILTER_HANDLED);
15414c7070dbSScott Long 
15424c7070dbSScott Long 	GROUPTASK_ENQUEUE(gtask);
15434c7070dbSScott Long 	return (FILTER_HANDLED);
15444c7070dbSScott Long }
15454c7070dbSScott Long 
15464c7070dbSScott Long static int
15474c7070dbSScott Long _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
15484c7070dbSScott Long 		 driver_filter_t filter, driver_intr_t handler, void *arg,
15493e0e6330SStephen Hurd 		 const char *name)
15504c7070dbSScott Long {
15512b2fc973SSean Bruno 	int rc, flags;
15524c7070dbSScott Long 	struct resource *res;
15532b2fc973SSean Bruno 	void *tag = NULL;
15544c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15554c7070dbSScott Long 
15562b2fc973SSean Bruno 	flags = RF_ACTIVE;
15572b2fc973SSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
15582b2fc973SSean Bruno 		flags |= RF_SHAREABLE;
15594c7070dbSScott Long 	MPASS(rid < 512);
15604c7070dbSScott Long 	irq->ii_rid = rid;
15612b2fc973SSean Bruno 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
15624c7070dbSScott Long 	if (res == NULL) {
15634c7070dbSScott Long 		device_printf(dev,
15644c7070dbSScott Long 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
15654c7070dbSScott Long 		return (ENOMEM);
15664c7070dbSScott Long 	}
15674c7070dbSScott Long 	irq->ii_res = res;
15684c7070dbSScott Long 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
15694c7070dbSScott Long 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
15704c7070dbSScott Long 						filter, handler, arg, &tag);
15714c7070dbSScott Long 	if (rc != 0) {
15724c7070dbSScott Long 		device_printf(dev,
15734c7070dbSScott Long 		    "failed to setup interrupt for rid %d, name %s: %d\n",
15744c7070dbSScott Long 					  rid, name ? name : "unknown", rc);
15754c7070dbSScott Long 		return (rc);
15764c7070dbSScott Long 	} else if (name)
1577f454e7ebSJohn Baldwin 		bus_describe_intr(dev, res, tag, "%s", name);
15784c7070dbSScott Long 
15794c7070dbSScott Long 	irq->ii_tag = tag;
15804c7070dbSScott Long 	return (0);
15814c7070dbSScott Long }
15824c7070dbSScott Long 
15834c7070dbSScott Long 
15844c7070dbSScott Long /*********************************************************************
15854c7070dbSScott Long  *
15864c7070dbSScott Long  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
15874c7070dbSScott Long  *  the information needed to transmit a packet on the wire. This is
15884c7070dbSScott Long  *  called only once at attach, setup is done every reset.
15894c7070dbSScott Long  *
15904c7070dbSScott Long  **********************************************************************/
15914c7070dbSScott Long 
15924c7070dbSScott Long static int
15934c7070dbSScott Long iflib_txsd_alloc(iflib_txq_t txq)
15944c7070dbSScott Long {
15954c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
15964c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
15974c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
15984c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15997f87c040SMarius Strobl 	bus_size_t tsomaxsize;
16004c7070dbSScott Long 	int err, nsegments, ntsosegments;
16014c7070dbSScott Long 
16024c7070dbSScott Long 	nsegments = scctx->isc_tx_nsegments;
16034c7070dbSScott Long 	ntsosegments = scctx->isc_tx_tso_segments_max;
16047f87c040SMarius Strobl 	tsomaxsize = scctx->isc_tx_tso_size_max;
16057f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
16067f87c040SMarius Strobl 		tsomaxsize += sizeof(struct ether_vlan_header);
160723ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[0] > 0);
160823ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
16094c7070dbSScott Long 	MPASS(nsegments > 0);
16107f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
16114c7070dbSScott Long 		MPASS(ntsosegments > 0);
16127f87c040SMarius Strobl 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
16137f87c040SMarius Strobl 	}
16147f87c040SMarius Strobl 
16154c7070dbSScott Long 	/*
16164c7070dbSScott Long 	 * Setup DMA descriptor areas.
16174c7070dbSScott Long 	 */
16184c7070dbSScott Long 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
16194c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16204c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16214c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16224c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16234c7070dbSScott Long 			       sctx->isc_tx_maxsize,		/* maxsize */
16244c7070dbSScott Long 			       nsegments,	/* nsegments */
16254c7070dbSScott Long 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
16264c7070dbSScott Long 			       0,			/* flags */
16274c7070dbSScott Long 			       NULL,			/* lockfunc */
16284c7070dbSScott Long 			       NULL,			/* lockfuncarg */
16294c7070dbSScott Long 			       &txq->ift_desc_tag))) {
16304c7070dbSScott Long 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
16319d0a88deSDimitry Andric 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
16329d0a88deSDimitry Andric 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
16334c7070dbSScott Long 		goto fail;
16344c7070dbSScott Long 	}
16357f87c040SMarius Strobl 	if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) &
16367f87c040SMarius Strobl 	    (err = bus_dma_tag_create(bus_get_dma_tag(dev),
16374c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16384c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16394c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16404c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16417f87c040SMarius Strobl 			       tsomaxsize,		/* maxsize */
16424c7070dbSScott Long 			       ntsosegments,	/* nsegments */
16437f87c040SMarius Strobl 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
16444c7070dbSScott Long 			       0,			/* flags */
16454c7070dbSScott Long 			       NULL,			/* lockfunc */
16464c7070dbSScott Long 			       NULL,			/* lockfuncarg */
16474c7070dbSScott Long 			       &txq->ift_tso_desc_tag))) {
16484c7070dbSScott Long 		device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
16494c7070dbSScott Long 
16504c7070dbSScott Long 		goto fail;
16514c7070dbSScott Long 	}
16524c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_flags =
1653ac2fffa4SPedro F. Giffuni 	    (uint8_t *) malloc(sizeof(uint8_t) *
1654ac2fffa4SPedro F. Giffuni 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
16554c7070dbSScott Long 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
16564c7070dbSScott Long 		err = ENOMEM;
16574c7070dbSScott Long 		goto fail;
16584c7070dbSScott Long 	}
16594c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_m =
1660ac2fffa4SPedro F. Giffuni 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1661ac2fffa4SPedro F. Giffuni 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
16624c7070dbSScott Long 		device_printf(dev, "Unable to allocate tx_buffer memory\n");
16634c7070dbSScott Long 		err = ENOMEM;
16644c7070dbSScott Long 		goto fail;
16654c7070dbSScott Long 	}
16664c7070dbSScott Long 
16674c7070dbSScott Long         /* Create the descriptor buffer dma maps */
166895246abbSSean Bruno #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
16694c7070dbSScott Long 	if ((ctx->ifc_flags & IFC_DMAR) == 0)
16704c7070dbSScott Long 		return (0);
16714c7070dbSScott Long 
16724c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_map =
1673ac2fffa4SPedro F. Giffuni 	    (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
16744c7070dbSScott Long 		device_printf(dev, "Unable to allocate tx_buffer map memory\n");
16754c7070dbSScott Long 		err = ENOMEM;
16764c7070dbSScott Long 		goto fail;
16774c7070dbSScott Long 	}
16784c7070dbSScott Long 
167923ac9029SStephen Hurd 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
16804c7070dbSScott Long 		err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
16814c7070dbSScott Long 		if (err != 0) {
16824c7070dbSScott Long 			device_printf(dev, "Unable to create TX DMA map\n");
16834c7070dbSScott Long 			goto fail;
16844c7070dbSScott Long 		}
16854c7070dbSScott Long 	}
16864c7070dbSScott Long #endif
16874c7070dbSScott Long 	return (0);
16884c7070dbSScott Long fail:
16894c7070dbSScott Long 	/* We free all, it handles case where we are in the middle */
16904c7070dbSScott Long 	iflib_tx_structures_free(ctx);
16914c7070dbSScott Long 	return (err);
16924c7070dbSScott Long }
16934c7070dbSScott Long 
16944c7070dbSScott Long static void
16954c7070dbSScott Long iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
16964c7070dbSScott Long {
16974c7070dbSScott Long 	bus_dmamap_t map;
16984c7070dbSScott Long 
16994c7070dbSScott Long 	map = NULL;
17004c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL)
17014c7070dbSScott Long 		map = txq->ift_sds.ifsd_map[i];
17024c7070dbSScott Long 	if (map != NULL) {
17034c7070dbSScott Long 		bus_dmamap_unload(txq->ift_desc_tag, map);
17044c7070dbSScott Long 		bus_dmamap_destroy(txq->ift_desc_tag, map);
17054c7070dbSScott Long 		txq->ift_sds.ifsd_map[i] = NULL;
17064c7070dbSScott Long 	}
17074c7070dbSScott Long }
17084c7070dbSScott Long 
17094c7070dbSScott Long static void
17104c7070dbSScott Long iflib_txq_destroy(iflib_txq_t txq)
17114c7070dbSScott Long {
17124c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
17134c7070dbSScott Long 
171423ac9029SStephen Hurd 	for (int i = 0; i < txq->ift_size; i++)
17154c7070dbSScott Long 		iflib_txsd_destroy(ctx, txq, i);
17164c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
17174c7070dbSScott Long 		free(txq->ift_sds.ifsd_map, M_IFLIB);
17184c7070dbSScott Long 		txq->ift_sds.ifsd_map = NULL;
17194c7070dbSScott Long 	}
17204c7070dbSScott Long 	if (txq->ift_sds.ifsd_m != NULL) {
17214c7070dbSScott Long 		free(txq->ift_sds.ifsd_m, M_IFLIB);
17224c7070dbSScott Long 		txq->ift_sds.ifsd_m = NULL;
17234c7070dbSScott Long 	}
17244c7070dbSScott Long 	if (txq->ift_sds.ifsd_flags != NULL) {
17254c7070dbSScott Long 		free(txq->ift_sds.ifsd_flags, M_IFLIB);
17264c7070dbSScott Long 		txq->ift_sds.ifsd_flags = NULL;
17274c7070dbSScott Long 	}
17284c7070dbSScott Long 	if (txq->ift_desc_tag != NULL) {
17294c7070dbSScott Long 		bus_dma_tag_destroy(txq->ift_desc_tag);
17304c7070dbSScott Long 		txq->ift_desc_tag = NULL;
17314c7070dbSScott Long 	}
17324c7070dbSScott Long 	if (txq->ift_tso_desc_tag != NULL) {
17334c7070dbSScott Long 		bus_dma_tag_destroy(txq->ift_tso_desc_tag);
17344c7070dbSScott Long 		txq->ift_tso_desc_tag = NULL;
17354c7070dbSScott Long 	}
17364c7070dbSScott Long }
17374c7070dbSScott Long 
17384c7070dbSScott Long static void
17394c7070dbSScott Long iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
17404c7070dbSScott Long {
17414c7070dbSScott Long 	struct mbuf **mp;
17424c7070dbSScott Long 
17434c7070dbSScott Long 	mp = &txq->ift_sds.ifsd_m[i];
17444c7070dbSScott Long 	if (*mp == NULL)
17454c7070dbSScott Long 		return;
17464c7070dbSScott Long 
17474c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
17484c7070dbSScott Long 		bus_dmamap_sync(txq->ift_desc_tag,
17494c7070dbSScott Long 				txq->ift_sds.ifsd_map[i],
17504c7070dbSScott Long 				BUS_DMASYNC_POSTWRITE);
17514c7070dbSScott Long 		bus_dmamap_unload(txq->ift_desc_tag,
17524c7070dbSScott Long 				  txq->ift_sds.ifsd_map[i]);
17534c7070dbSScott Long 	}
175423ac9029SStephen Hurd 	m_free(*mp);
17554c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
17564c7070dbSScott Long 	*mp = NULL;
17574c7070dbSScott Long }
17584c7070dbSScott Long 
17594c7070dbSScott Long static int
17604c7070dbSScott Long iflib_txq_setup(iflib_txq_t txq)
17614c7070dbSScott Long {
17624c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
176323ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
17644c7070dbSScott Long 	iflib_dma_info_t di;
17654c7070dbSScott Long 	int i;
17664c7070dbSScott Long 
17674c7070dbSScott Long 	/* Set number of descriptors available */
17684c7070dbSScott Long 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
176995246abbSSean Bruno 	/* XXX make configurable */
177095246abbSSean Bruno 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
17714c7070dbSScott Long 
17724c7070dbSScott Long 	/* Reset indices */
177395246abbSSean Bruno 	txq->ift_cidx_processed = 0;
177495246abbSSean Bruno 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
177523ac9029SStephen Hurd 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
17764c7070dbSScott Long 
17774c7070dbSScott Long 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
17784c7070dbSScott Long 		bzero((void *)di->idi_vaddr, di->idi_size);
17794c7070dbSScott Long 
17804c7070dbSScott Long 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
17814c7070dbSScott Long 	for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
17824c7070dbSScott Long 		bus_dmamap_sync(di->idi_tag, di->idi_map,
17834c7070dbSScott Long 						BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
17844c7070dbSScott Long 	return (0);
17854c7070dbSScott Long }
17864c7070dbSScott Long 
17874c7070dbSScott Long /*********************************************************************
17884c7070dbSScott Long  *
17894c7070dbSScott Long  *  Allocate memory for rx_buffer structures. Since we use one
17904c7070dbSScott Long  *  rx_buffer per received packet, the maximum number of rx_buffer's
17914c7070dbSScott Long  *  that we'll need is equal to the number of receive descriptors
17924c7070dbSScott Long  *  that we've allocated.
17934c7070dbSScott Long  *
17944c7070dbSScott Long  **********************************************************************/
17954c7070dbSScott Long static int
17964c7070dbSScott Long iflib_rxsd_alloc(iflib_rxq_t rxq)
17974c7070dbSScott Long {
17984c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
17994c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
180023ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
18014c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
18024c7070dbSScott Long 	iflib_fl_t fl;
18034c7070dbSScott Long 	int			err;
18044c7070dbSScott Long 
180523ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[0] > 0);
180623ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
18074c7070dbSScott Long 
18084c7070dbSScott Long 	fl = rxq->ifr_fl;
18094c7070dbSScott Long 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
181023ac9029SStephen Hurd 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
18114c7070dbSScott Long 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
18124c7070dbSScott Long 					 1, 0,			/* alignment, bounds */
18134c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* lowaddr */
18144c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* highaddr */
18154c7070dbSScott Long 					 NULL, NULL,		/* filter, filterarg */
18164c7070dbSScott Long 					 sctx->isc_rx_maxsize,	/* maxsize */
18174c7070dbSScott Long 					 sctx->isc_rx_nsegments,	/* nsegments */
18184c7070dbSScott Long 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
18194c7070dbSScott Long 					 0,			/* flags */
18204c7070dbSScott Long 					 NULL,			/* lockfunc */
18214c7070dbSScott Long 					 NULL,			/* lockarg */
18224c7070dbSScott Long 					 &fl->ifl_desc_tag);
18234c7070dbSScott Long 		if (err) {
18244c7070dbSScott Long 			device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
18254c7070dbSScott Long 				__func__, err);
18264c7070dbSScott Long 			goto fail;
18274c7070dbSScott Long 		}
1828e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_flags =
1829ac2fffa4SPedro F. Giffuni 		      (uint8_t *) malloc(sizeof(uint8_t) *
1830ac2fffa4SPedro F. Giffuni 					 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1831e035717eSSean Bruno 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1832e035717eSSean Bruno 			err = ENOMEM;
1833e035717eSSean Bruno 			goto fail;
1834e035717eSSean Bruno 		}
1835e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_m =
1836ac2fffa4SPedro F. Giffuni 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1837ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1838e035717eSSean Bruno 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1839e035717eSSean Bruno 			err = ENOMEM;
1840e035717eSSean Bruno 			goto fail;
1841e035717eSSean Bruno 		}
1842e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_cl =
1843ac2fffa4SPedro F. Giffuni 		      (caddr_t *) malloc(sizeof(caddr_t) *
1844ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1845e035717eSSean Bruno 			device_printf(dev, "Unable to allocate tx_buffer memory\n");
1846e035717eSSean Bruno 			err = ENOMEM;
1847e035717eSSean Bruno 			goto fail;
1848e035717eSSean Bruno 		}
18494c7070dbSScott Long 
1850e035717eSSean Bruno 		/* Create the descriptor buffer dma maps */
185195246abbSSean Bruno #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
1852e035717eSSean Bruno 		if ((ctx->ifc_flags & IFC_DMAR) == 0)
1853e035717eSSean Bruno 			continue;
1854e035717eSSean Bruno 
1855e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_map =
1856ac2fffa4SPedro F. Giffuni 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1857e035717eSSean Bruno 			device_printf(dev, "Unable to allocate tx_buffer map memory\n");
1858e035717eSSean Bruno 			err = ENOMEM;
1859e035717eSSean Bruno 			goto fail;
1860e035717eSSean Bruno 		}
1861e035717eSSean Bruno 
1862e035717eSSean Bruno 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1863e035717eSSean Bruno 			err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
1864e035717eSSean Bruno 			if (err != 0) {
186595246abbSSean Bruno 				device_printf(dev, "Unable to create RX buffer DMA map\n");
18664c7070dbSScott Long 				goto fail;
18674c7070dbSScott Long 			}
18684c7070dbSScott Long 		}
1869e035717eSSean Bruno #endif
1870835809f9SSean Bruno 	}
18714c7070dbSScott Long 	return (0);
18724c7070dbSScott Long 
18734c7070dbSScott Long fail:
18744c7070dbSScott Long 	iflib_rx_structures_free(ctx);
18754c7070dbSScott Long 	return (err);
18764c7070dbSScott Long }
18774c7070dbSScott Long 
18784c7070dbSScott Long 
18794c7070dbSScott Long /*
18804c7070dbSScott Long  * Internal service routines
18814c7070dbSScott Long  */
18824c7070dbSScott Long 
18834c7070dbSScott Long struct rxq_refill_cb_arg {
18844c7070dbSScott Long 	int               error;
18854c7070dbSScott Long 	bus_dma_segment_t seg;
18864c7070dbSScott Long 	int               nseg;
18874c7070dbSScott Long };
18884c7070dbSScott Long 
18894c7070dbSScott Long static void
18904c7070dbSScott Long _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
18914c7070dbSScott Long {
18924c7070dbSScott Long 	struct rxq_refill_cb_arg *cb_arg = arg;
18934c7070dbSScott Long 
18944c7070dbSScott Long 	cb_arg->error = error;
18954c7070dbSScott Long 	cb_arg->seg = segs[0];
18964c7070dbSScott Long 	cb_arg->nseg = nseg;
18974c7070dbSScott Long }
18984c7070dbSScott Long 
18994c7070dbSScott Long 
19004c7070dbSScott Long #ifdef ACPI_DMAR
19014c7070dbSScott Long #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
19024c7070dbSScott Long #else
19034c7070dbSScott Long #define IS_DMAR(ctx) (0)
19044c7070dbSScott Long #endif
19054c7070dbSScott Long 
19064c7070dbSScott Long /**
19074c7070dbSScott Long  *	rxq_refill - refill an rxq  free-buffer list
19084c7070dbSScott Long  *	@ctx: the iflib context
19094c7070dbSScott Long  *	@rxq: the free-list to refill
19104c7070dbSScott Long  *	@n: the number of new buffers to allocate
19114c7070dbSScott Long  *
19124c7070dbSScott Long  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
19134c7070dbSScott Long  *	The caller must assure that @n does not exceed the queue's capacity.
19144c7070dbSScott Long  */
19154c7070dbSScott Long static void
19164c7070dbSScott Long _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
19174c7070dbSScott Long {
19184c7070dbSScott Long 	struct mbuf *m;
191987890dbaSSean Bruno 	int idx, frag_idx = fl->ifl_fragidx;
192087890dbaSSean Bruno         int pidx = fl->ifl_pidx;
1921e035717eSSean Bruno 	caddr_t cl, *sd_cl;
1922e035717eSSean Bruno 	struct mbuf **sd_m;
1923e035717eSSean Bruno 	uint8_t *sd_flags;
192495246abbSSean Bruno 	struct if_rxd_update iru;
1925e035717eSSean Bruno 	bus_dmamap_t *sd_map;
19264c7070dbSScott Long 	int n, i = 0;
19274c7070dbSScott Long 	uint64_t bus_addr;
19284c7070dbSScott Long 	int err;
1929a1b799caSStephen Hurd 	qidx_t credits;
19304c7070dbSScott Long 
1931e035717eSSean Bruno 	sd_m = fl->ifl_sds.ifsd_m;
1932e035717eSSean Bruno 	sd_map = fl->ifl_sds.ifsd_map;
1933e035717eSSean Bruno 	sd_cl = fl->ifl_sds.ifsd_cl;
1934e035717eSSean Bruno 	sd_flags = fl->ifl_sds.ifsd_flags;
1935e035717eSSean Bruno 	idx = pidx;
1936a1b799caSStephen Hurd 	credits = fl->ifl_credits;
1937e035717eSSean Bruno 
19384c7070dbSScott Long 	n  = count;
19394c7070dbSScott Long 	MPASS(n > 0);
1940a1b799caSStephen Hurd 	MPASS(credits + n <= fl->ifl_size);
19414c7070dbSScott Long 
19424c7070dbSScott Long 	if (pidx < fl->ifl_cidx)
19434c7070dbSScott Long 		MPASS(pidx + n <= fl->ifl_cidx);
1944a1b799caSStephen Hurd 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
19454c7070dbSScott Long 		MPASS(fl->ifl_gen == 0);
19464c7070dbSScott Long 	if (pidx > fl->ifl_cidx)
19474c7070dbSScott Long 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
19484c7070dbSScott Long 
19494c7070dbSScott Long 	DBG_COUNTER_INC(fl_refills);
19504c7070dbSScott Long 	if (n > 8)
19514c7070dbSScott Long 		DBG_COUNTER_INC(fl_refills_large);
19522d873474SStephen Hurd 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
19534c7070dbSScott Long 	while (n--) {
19544c7070dbSScott Long 		/*
19554c7070dbSScott Long 		 * We allocate an uninitialized mbuf + cluster, mbuf is
19564c7070dbSScott Long 		 * initialized after rx.
19574c7070dbSScott Long 		 *
19584c7070dbSScott Long 		 * If the cluster is still set then we know a minimum sized packet was received
19594c7070dbSScott Long 		 */
196087890dbaSSean Bruno 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,  &frag_idx);
196187890dbaSSean Bruno 		if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
196287890dbaSSean Bruno                 	bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
196387890dbaSSean Bruno 		if ((cl = sd_cl[frag_idx]) == NULL) {
196487890dbaSSean Bruno                        if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
19654c7070dbSScott Long 				break;
19664c7070dbSScott Long #if MEMORY_LOGGING
19674c7070dbSScott Long 			fl->ifl_cl_enqueued++;
19684c7070dbSScott Long #endif
19694c7070dbSScott Long 		}
19704c7070dbSScott Long 		if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
19714c7070dbSScott Long 			break;
19724c7070dbSScott Long 		}
19734c7070dbSScott Long #if MEMORY_LOGGING
19744c7070dbSScott Long 		fl->ifl_m_enqueued++;
19754c7070dbSScott Long #endif
19764c7070dbSScott Long 
19774c7070dbSScott Long 		DBG_COUNTER_INC(rx_allocs);
19784c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
19794c7070dbSScott Long 		if (!IS_DMAR(ctx)) {
19804c7070dbSScott Long 			bus_addr = pmap_kextract((vm_offset_t)cl);
19814c7070dbSScott Long 		} else
19824c7070dbSScott Long #endif
19834c7070dbSScott Long 		{
19844c7070dbSScott Long 			struct rxq_refill_cb_arg cb_arg;
19854c7070dbSScott Long 
19864c7070dbSScott Long 			cb_arg.error = 0;
198795246abbSSean Bruno 			MPASS(sd_map != NULL);
198887890dbaSSean Bruno 			MPASS(sd_map[frag_idx] != NULL);
198987890dbaSSean Bruno 			err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
19904c7070dbSScott Long 		         cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
199187890dbaSSean Bruno 			bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
199287890dbaSSean Bruno 					BUS_DMASYNC_PREREAD);
19934c7070dbSScott Long 
19944c7070dbSScott Long 			if (err != 0 || cb_arg.error) {
19954c7070dbSScott Long 				/*
19964c7070dbSScott Long 				 * !zone_pack ?
19974c7070dbSScott Long 				 */
19984c7070dbSScott Long 				if (fl->ifl_zone == zone_pack)
19994c7070dbSScott Long 					uma_zfree(fl->ifl_zone, cl);
20004c7070dbSScott Long 				m_free(m);
20014c7070dbSScott Long 				n = 0;
20024c7070dbSScott Long 				goto done;
20034c7070dbSScott Long 			}
20044c7070dbSScott Long 			bus_addr = cb_arg.seg.ds_addr;
20054c7070dbSScott Long 		}
200687890dbaSSean Bruno                 bit_set(fl->ifl_rx_bitmap, frag_idx);
200787890dbaSSean Bruno 		sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
20084c7070dbSScott Long 
200987890dbaSSean Bruno 		MPASS(sd_m[frag_idx] == NULL);
201087890dbaSSean Bruno 		sd_cl[frag_idx] = cl;
201187890dbaSSean Bruno 		sd_m[frag_idx] = m;
201287890dbaSSean Bruno 		fl->ifl_rxd_idxs[i] = frag_idx;
20134c7070dbSScott Long 		fl->ifl_bus_addrs[i] = bus_addr;
20144c7070dbSScott Long 		fl->ifl_vm_addrs[i] = cl;
2015a1b799caSStephen Hurd 		credits++;
20164c7070dbSScott Long 		i++;
2017a1b799caSStephen Hurd 		MPASS(credits <= fl->ifl_size);
2018e035717eSSean Bruno 		if (++idx == fl->ifl_size) {
20194c7070dbSScott Long 			fl->ifl_gen = 1;
2020e035717eSSean Bruno 			idx = 0;
20214c7070dbSScott Long 		}
20224c7070dbSScott Long 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
202395246abbSSean Bruno 			iru.iru_pidx = pidx;
202495246abbSSean Bruno 			iru.iru_count = i;
202595246abbSSean Bruno 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
20264c7070dbSScott Long 			i = 0;
2027e035717eSSean Bruno 			pidx = idx;
2028fa5416a8SSean Bruno 			fl->ifl_pidx = idx;
2029a1b799caSStephen Hurd 			fl->ifl_credits = credits;
203087890dbaSSean Bruno 		}
2031e035717eSSean Bruno 
20324c7070dbSScott Long 	}
20334c7070dbSScott Long done:
2034a1b799caSStephen Hurd 	if (i) {
2035a1b799caSStephen Hurd 		iru.iru_pidx = pidx;
2036a1b799caSStephen Hurd 		iru.iru_count = i;
2037a1b799caSStephen Hurd 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2038a1b799caSStephen Hurd 		fl->ifl_pidx = idx;
2039a1b799caSStephen Hurd 		fl->ifl_credits = credits;
2040a1b799caSStephen Hurd 	}
20414c7070dbSScott Long 	DBG_COUNTER_INC(rxd_flush);
20424c7070dbSScott Long 	if (fl->ifl_pidx == 0)
20434c7070dbSScott Long 		pidx = fl->ifl_size - 1;
20444c7070dbSScott Long 	else
20454c7070dbSScott Long 		pidx = fl->ifl_pidx - 1;
204695246abbSSean Bruno 
204795246abbSSean Bruno 	if (sd_map)
204895246abbSSean Bruno 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
204995246abbSSean Bruno 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
20504c7070dbSScott Long 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
205187890dbaSSean Bruno 	fl->ifl_fragidx = frag_idx;
20524c7070dbSScott Long }
20534c7070dbSScott Long 
20544c7070dbSScott Long static __inline void
20554c7070dbSScott Long __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
20564c7070dbSScott Long {
20574c7070dbSScott Long 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
20584c7070dbSScott Long 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
20594c7070dbSScott Long #ifdef INVARIANTS
20604c7070dbSScott Long 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
20614c7070dbSScott Long #endif
20624c7070dbSScott Long 
20634c7070dbSScott Long 	MPASS(fl->ifl_credits <= fl->ifl_size);
20644c7070dbSScott Long 	MPASS(reclaimable == delta);
20654c7070dbSScott Long 
20664c7070dbSScott Long 	if (reclaimable > 0)
20674c7070dbSScott Long 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
20684c7070dbSScott Long }
20694c7070dbSScott Long 
20704c7070dbSScott Long static void
20714c7070dbSScott Long iflib_fl_bufs_free(iflib_fl_t fl)
20724c7070dbSScott Long {
20734c7070dbSScott Long 	iflib_dma_info_t idi = fl->ifl_ifdi;
20744c7070dbSScott Long 	uint32_t i;
20754c7070dbSScott Long 
20764c7070dbSScott Long 	for (i = 0; i < fl->ifl_size; i++) {
2077e035717eSSean Bruno 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2078e035717eSSean Bruno 		uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
2079e035717eSSean Bruno 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
20804c7070dbSScott Long 
2081e035717eSSean Bruno 		if (*sd_flags & RX_SW_DESC_INUSE) {
2082e035717eSSean Bruno 			if (fl->ifl_sds.ifsd_map != NULL) {
2083e035717eSSean Bruno 				bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
2084e035717eSSean Bruno 				bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
2085a4e59607SStephen Hurd 				if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
2086e035717eSSean Bruno 					bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
20874c7070dbSScott Long 			}
2088e035717eSSean Bruno 			if (*sd_m != NULL) {
2089e035717eSSean Bruno 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2090e035717eSSean Bruno 				uma_zfree(zone_mbuf, *sd_m);
2091e035717eSSean Bruno 			}
2092e035717eSSean Bruno 			if (*sd_cl != NULL)
2093e035717eSSean Bruno 				uma_zfree(fl->ifl_zone, *sd_cl);
2094e035717eSSean Bruno 			*sd_flags = 0;
20954c7070dbSScott Long 		} else {
2096e035717eSSean Bruno 			MPASS(*sd_cl == NULL);
2097e035717eSSean Bruno 			MPASS(*sd_m == NULL);
20984c7070dbSScott Long 		}
20994c7070dbSScott Long #if MEMORY_LOGGING
21004c7070dbSScott Long 		fl->ifl_m_dequeued++;
21014c7070dbSScott Long 		fl->ifl_cl_dequeued++;
21024c7070dbSScott Long #endif
2103e035717eSSean Bruno 		*sd_cl = NULL;
2104e035717eSSean Bruno 		*sd_m = NULL;
21054c7070dbSScott Long 	}
210695246abbSSean Bruno #ifdef INVARIANTS
210795246abbSSean Bruno 	for (i = 0; i < fl->ifl_size; i++) {
2108ab2e3f79SStephen Hurd 		MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
210995246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
211095246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
211195246abbSSean Bruno 	}
211295246abbSSean Bruno #endif
21134c7070dbSScott Long 	/*
21144c7070dbSScott Long 	 * Reset free list values
21154c7070dbSScott Long 	 */
211687890dbaSSean Bruno 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
21174c7070dbSScott Long 	bzero(idi->idi_vaddr, idi->idi_size);
21184c7070dbSScott Long }
21194c7070dbSScott Long 
21204c7070dbSScott Long /*********************************************************************
21214c7070dbSScott Long  *
21224c7070dbSScott Long  *  Initialize a receive ring and its buffers.
21234c7070dbSScott Long  *
21244c7070dbSScott Long  **********************************************************************/
21254c7070dbSScott Long static int
21264c7070dbSScott Long iflib_fl_setup(iflib_fl_t fl)
21274c7070dbSScott Long {
21284c7070dbSScott Long 	iflib_rxq_t rxq = fl->ifl_rxq;
21294c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
21304c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
21314c7070dbSScott Long 
21327274b2f6SStephen Hurd 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
21334c7070dbSScott Long 	/*
21344c7070dbSScott Long 	** Free current RX buffer structs and their mbufs
21354c7070dbSScott Long 	*/
21364c7070dbSScott Long 	iflib_fl_bufs_free(fl);
21374c7070dbSScott Long 	/* Now replenish the mbufs */
21384c7070dbSScott Long 	MPASS(fl->ifl_credits == 0);
21394c7070dbSScott Long 	/*
21404c7070dbSScott Long 	 * XXX don't set the max_frame_size to larger
21414c7070dbSScott Long 	 * than the hardware can handle
21424c7070dbSScott Long 	 */
21434c7070dbSScott Long 	if (sctx->isc_max_frame_size <= 2048)
21444c7070dbSScott Long 		fl->ifl_buf_size = MCLBYTES;
214595246abbSSean Bruno #ifndef CONTIGMALLOC_WORKS
214695246abbSSean Bruno 	else
214795246abbSSean Bruno 		fl->ifl_buf_size = MJUMPAGESIZE;
214895246abbSSean Bruno #else
21494c7070dbSScott Long 	else if (sctx->isc_max_frame_size <= 4096)
21504c7070dbSScott Long 		fl->ifl_buf_size = MJUMPAGESIZE;
21514c7070dbSScott Long 	else if (sctx->isc_max_frame_size <= 9216)
21524c7070dbSScott Long 		fl->ifl_buf_size = MJUM9BYTES;
21534c7070dbSScott Long 	else
21544c7070dbSScott Long 		fl->ifl_buf_size = MJUM16BYTES;
215595246abbSSean Bruno #endif
21564c7070dbSScott Long 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
21574c7070dbSScott Long 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
21584c7070dbSScott Long 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
21594c7070dbSScott Long 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
21604c7070dbSScott Long 
21614c7070dbSScott Long 
21624c7070dbSScott Long 	/* avoid pre-allocating zillions of clusters to an idle card
21634c7070dbSScott Long 	 * potentially speeding up attach
21644c7070dbSScott Long 	 */
21654c7070dbSScott Long 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
21664c7070dbSScott Long 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
21674c7070dbSScott Long 	if (min(128, fl->ifl_size) != fl->ifl_credits)
21684c7070dbSScott Long 		return (ENOBUFS);
21694c7070dbSScott Long 	/*
21704c7070dbSScott Long 	 * handle failure
21714c7070dbSScott Long 	 */
21724c7070dbSScott Long 	MPASS(rxq != NULL);
21734c7070dbSScott Long 	MPASS(fl->ifl_ifdi != NULL);
21744c7070dbSScott Long 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
21754c7070dbSScott Long 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
21764c7070dbSScott Long 	return (0);
21774c7070dbSScott Long }
21784c7070dbSScott Long 
21794c7070dbSScott Long /*********************************************************************
21804c7070dbSScott Long  *
21814c7070dbSScott Long  *  Free receive ring data structures
21824c7070dbSScott Long  *
21834c7070dbSScott Long  **********************************************************************/
21844c7070dbSScott Long static void
21854c7070dbSScott Long iflib_rx_sds_free(iflib_rxq_t rxq)
21864c7070dbSScott Long {
21874c7070dbSScott Long 	iflib_fl_t fl;
21884c7070dbSScott Long 	int i;
21894c7070dbSScott Long 
21904c7070dbSScott Long 	if (rxq->ifr_fl != NULL) {
21914c7070dbSScott Long 		for (i = 0; i < rxq->ifr_nfl; i++) {
21924c7070dbSScott Long 			fl = &rxq->ifr_fl[i];
21934c7070dbSScott Long 			if (fl->ifl_desc_tag != NULL) {
21944c7070dbSScott Long 				bus_dma_tag_destroy(fl->ifl_desc_tag);
21954c7070dbSScott Long 				fl->ifl_desc_tag = NULL;
21964c7070dbSScott Long 			}
2197e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2198e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2199e035717eSSean Bruno 			/* XXX destroy maps first */
2200e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2201e035717eSSean Bruno 			fl->ifl_sds.ifsd_m = NULL;
2202e035717eSSean Bruno 			fl->ifl_sds.ifsd_cl = NULL;
2203e035717eSSean Bruno 			fl->ifl_sds.ifsd_map = NULL;
22044c7070dbSScott Long 		}
22054c7070dbSScott Long 		free(rxq->ifr_fl, M_IFLIB);
22064c7070dbSScott Long 		rxq->ifr_fl = NULL;
22074c7070dbSScott Long 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
22084c7070dbSScott Long 	}
22094c7070dbSScott Long }
22104c7070dbSScott Long 
22114c7070dbSScott Long /*
22124c7070dbSScott Long  * MI independent logic
22134c7070dbSScott Long  *
22144c7070dbSScott Long  */
22154c7070dbSScott Long static void
22164c7070dbSScott Long iflib_timer(void *arg)
22174c7070dbSScott Long {
2218ab2e3f79SStephen Hurd 	iflib_txq_t txq = arg;
22194c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
2220ab2e3f79SStephen Hurd 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2221dd7fbcf1SStephen Hurd 	uint64_t this_tick = ticks;
2222dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
22234c7070dbSScott Long 
22244c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
22254c7070dbSScott Long 		return;
22264c7070dbSScott Long 	/*
22274c7070dbSScott Long 	** Check on the state of the TX queue(s), this
22284c7070dbSScott Long 	** can be done without the lock because its RO
22294c7070dbSScott Long 	** and the HUNG state will be static if set.
22304c7070dbSScott Long 	*/
2231dd7fbcf1SStephen Hurd 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2232dd7fbcf1SStephen Hurd 		txq->ift_last_timer_tick = this_tick;
2233ab2e3f79SStephen Hurd 		IFDI_TIMER(ctx, txq->ift_id);
2234ab2e3f79SStephen Hurd 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2235ab2e3f79SStephen Hurd 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2236ab2e3f79SStephen Hurd 		     (sctx->isc_pause_frames == 0)))
2237ab2e3f79SStephen Hurd 			goto hung;
2238a9693502SSean Bruno 
2239ab2e3f79SStephen Hurd 		if (ifmp_ring_is_stalled(txq->ift_br))
2240ab2e3f79SStephen Hurd 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2241ab2e3f79SStephen Hurd 		txq->ift_cleaned_prev = txq->ift_cleaned;
2242dd7fbcf1SStephen Hurd 	}
2243dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
2244dd7fbcf1SStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
2245dd7fbcf1SStephen Hurd 		iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
2246dd7fbcf1SStephen Hurd #endif
2247ab2e3f79SStephen Hurd 	/* handle any laggards */
2248ab2e3f79SStephen Hurd 	if (txq->ift_db_pending)
2249ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
2250a9693502SSean Bruno 
2251ab2e3f79SStephen Hurd 	sctx->isc_pause_frames = 0;
2252d300df01SStephen Hurd 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2253dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2254ab2e3f79SStephen Hurd 	return;
2255ab2e3f79SStephen Hurd  hung:
2256ab2e3f79SStephen Hurd 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2257ab2e3f79SStephen Hurd 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
22587b610b60SSean Bruno 	STATE_LOCK(ctx);
22597b610b60SSean Bruno 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
22607b610b60SSean Bruno 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2261ab2e3f79SStephen Hurd 	iflib_admin_intr_deferred(ctx);
22627b610b60SSean Bruno 	STATE_UNLOCK(ctx);
22634c7070dbSScott Long }
22644c7070dbSScott Long 
22654c7070dbSScott Long static void
22664c7070dbSScott Long iflib_init_locked(if_ctx_t ctx)
22674c7070dbSScott Long {
22684c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
22691248952aSSean Bruno 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
22704c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
22714c7070dbSScott Long 	iflib_fl_t fl;
22724c7070dbSScott Long 	iflib_txq_t txq;
22734c7070dbSScott Long 	iflib_rxq_t rxq;
2274ab2e3f79SStephen Hurd 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
22754c7070dbSScott Long 
22764c7070dbSScott Long 
22774c7070dbSScott Long 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
22784c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
22794c7070dbSScott Long 
22801248952aSSean Bruno 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
22811248952aSSean Bruno 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
22824c7070dbSScott Long 	/* Set hardware offload abilities */
22834c7070dbSScott Long 	if_clearhwassist(ifp);
22844c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
22851248952aSSean Bruno 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
22864c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
22871248952aSSean Bruno 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
22884c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO4)
22894c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
22904c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO6)
22914c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
22924c7070dbSScott Long 
22934c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
22944c7070dbSScott Long 		CALLOUT_LOCK(txq);
22954c7070dbSScott Long 		callout_stop(&txq->ift_timer);
22964c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
22974c7070dbSScott Long 		iflib_netmap_txq_init(ctx, txq);
22984c7070dbSScott Long 	}
229923ac9029SStephen Hurd #ifdef INVARIANTS
230023ac9029SStephen Hurd 	i = if_getdrvflags(ifp);
230123ac9029SStephen Hurd #endif
23024c7070dbSScott Long 	IFDI_INIT(ctx);
230323ac9029SStephen Hurd 	MPASS(if_getdrvflags(ifp) == i);
23044c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
230595246abbSSean Bruno 		/* XXX this should really be done on a per-queue basis */
2306d0d0ad0aSStephen Hurd 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2307d0d0ad0aSStephen Hurd 			MPASS(rxq->ifr_id == i);
2308d0d0ad0aSStephen Hurd 			iflib_netmap_rxq_init(ctx, rxq);
230995246abbSSean Bruno 			continue;
2310d0d0ad0aSStephen Hurd 		}
23114c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
23124c7070dbSScott Long 			if (iflib_fl_setup(fl)) {
23134c7070dbSScott Long 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
23144c7070dbSScott Long 				goto done;
23154c7070dbSScott Long 			}
23164c7070dbSScott Long 		}
23174c7070dbSScott Long 	}
23184c7070dbSScott Long done:
23194c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
23204c7070dbSScott Long 	IFDI_INTR_ENABLE(ctx);
23214c7070dbSScott Long 	txq = ctx->ifc_txqs;
23224c7070dbSScott Long 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2323ab2e3f79SStephen Hurd 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2324ab2e3f79SStephen Hurd 			txq->ift_timer.c_cpu);
23254c7070dbSScott Long }
23264c7070dbSScott Long 
23274c7070dbSScott Long static int
23284c7070dbSScott Long iflib_media_change(if_t ifp)
23294c7070dbSScott Long {
23304c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
23314c7070dbSScott Long 	int err;
23324c7070dbSScott Long 
23334c7070dbSScott Long 	CTX_LOCK(ctx);
23344c7070dbSScott Long 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
23354c7070dbSScott Long 		iflib_init_locked(ctx);
23364c7070dbSScott Long 	CTX_UNLOCK(ctx);
23374c7070dbSScott Long 	return (err);
23384c7070dbSScott Long }
23394c7070dbSScott Long 
23404c7070dbSScott Long static void
23414c7070dbSScott Long iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
23424c7070dbSScott Long {
23434c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
23444c7070dbSScott Long 
23454c7070dbSScott Long 	CTX_LOCK(ctx);
2346ab2e3f79SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
23474c7070dbSScott Long 	IFDI_MEDIA_STATUS(ctx, ifmr);
23484c7070dbSScott Long 	CTX_UNLOCK(ctx);
23494c7070dbSScott Long }
23504c7070dbSScott Long 
235109f6ff4fSMatt Macy void
23524c7070dbSScott Long iflib_stop(if_ctx_t ctx)
23534c7070dbSScott Long {
23544c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
23554c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
23564c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
23574c7070dbSScott Long 	iflib_dma_info_t di;
23584c7070dbSScott Long 	iflib_fl_t fl;
23594c7070dbSScott Long 	int i, j;
23604c7070dbSScott Long 
23614c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
23624c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23634c7070dbSScott Long 
23644c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
2365ab2e3f79SStephen Hurd 	DELAY(1000);
2366da69b8f9SSean Bruno 	IFDI_STOP(ctx);
2367ab2e3f79SStephen Hurd 	DELAY(1000);
23684c7070dbSScott Long 
2369da69b8f9SSean Bruno 	iflib_debug_reset();
23704c7070dbSScott Long 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
23714c7070dbSScott Long 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
23724c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
23734c7070dbSScott Long 
2374226fb85dSStephen Hurd 		CALLOUT_LOCK(txq);
2375226fb85dSStephen Hurd 		callout_stop(&txq->ift_timer);
2376226fb85dSStephen Hurd 		CALLOUT_UNLOCK(txq);
2377226fb85dSStephen Hurd 
23784c7070dbSScott Long 		/* clean any enqueued buffers */
2379da69b8f9SSean Bruno 		iflib_ifmp_purge(txq);
23804c7070dbSScott Long 		/* Free any existing tx buffers. */
238123ac9029SStephen Hurd 		for (j = 0; j < txq->ift_size; j++) {
23824c7070dbSScott Long 			iflib_txsd_free(ctx, txq, j);
23834c7070dbSScott Long 		}
2384ab2e3f79SStephen Hurd 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2385ab2e3f79SStephen Hurd 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
23864c7070dbSScott Long 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
23874c7070dbSScott Long 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2388ab2e3f79SStephen Hurd 		txq->ift_pullups = 0;
238995246abbSSean Bruno 		ifmp_ring_reset_stats(txq->ift_br);
23904c7070dbSScott Long 		for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
23914c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
23924c7070dbSScott Long 	}
23934c7070dbSScott Long 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
23944c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
23954c7070dbSScott Long 
239666def526SMateusz Guzik 		for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++)
23974c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
23984c7070dbSScott Long 		/* also resets the free lists pidx/cidx */
23994c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
24004c7070dbSScott Long 			iflib_fl_bufs_free(fl);
24014c7070dbSScott Long 	}
24024c7070dbSScott Long }
24034c7070dbSScott Long 
240495246abbSSean Bruno static inline caddr_t
240595246abbSSean Bruno calc_next_rxd(iflib_fl_t fl, int cidx)
240695246abbSSean Bruno {
240795246abbSSean Bruno 	qidx_t size;
240895246abbSSean Bruno 	int nrxd;
240995246abbSSean Bruno 	caddr_t start, end, cur, next;
241095246abbSSean Bruno 
241195246abbSSean Bruno 	nrxd = fl->ifl_size;
241295246abbSSean Bruno 	size = fl->ifl_rxd_size;
241395246abbSSean Bruno 	start = fl->ifl_ifdi->idi_vaddr;
241495246abbSSean Bruno 
241595246abbSSean Bruno 	if (__predict_false(size == 0))
241695246abbSSean Bruno 		return (start);
241795246abbSSean Bruno 	cur = start + size*cidx;
241895246abbSSean Bruno 	end = start + size*nrxd;
241995246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
242095246abbSSean Bruno 	return (next < end ? next : start);
242195246abbSSean Bruno }
242295246abbSSean Bruno 
2423e035717eSSean Bruno static inline void
2424e035717eSSean Bruno prefetch_pkts(iflib_fl_t fl, int cidx)
2425e035717eSSean Bruno {
2426e035717eSSean Bruno 	int nextptr;
2427e035717eSSean Bruno 	int nrxd = fl->ifl_size;
242895246abbSSean Bruno 	caddr_t next_rxd;
242995246abbSSean Bruno 
2430e035717eSSean Bruno 
2431e035717eSSean Bruno 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2432e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2433e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
243495246abbSSean Bruno 	next_rxd = calc_next_rxd(fl, cidx);
243595246abbSSean Bruno 	prefetch(next_rxd);
2436e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2437e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2438e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2439e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2440e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2441e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2442e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2443e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2444e035717eSSean Bruno }
2445e035717eSSean Bruno 
2446e035717eSSean Bruno static void
244795246abbSSean Bruno rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
24484c7070dbSScott Long {
24494c7070dbSScott Long 	int flid, cidx;
2450e035717eSSean Bruno 	bus_dmamap_t map;
24514c7070dbSScott Long 	iflib_fl_t fl;
24524c7070dbSScott Long 	iflib_dma_info_t di;
2453e035717eSSean Bruno 	int next;
24544c7070dbSScott Long 
245595246abbSSean Bruno 	map = NULL;
24564c7070dbSScott Long 	flid = irf->irf_flid;
24574c7070dbSScott Long 	cidx = irf->irf_idx;
24584c7070dbSScott Long 	fl = &rxq->ifr_fl[flid];
245995246abbSSean Bruno 	sd->ifsd_fl = fl;
246095246abbSSean Bruno 	sd->ifsd_cidx = cidx;
246195246abbSSean Bruno 	sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
246295246abbSSean Bruno 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
24634c7070dbSScott Long 	fl->ifl_credits--;
24644c7070dbSScott Long #if MEMORY_LOGGING
24654c7070dbSScott Long 	fl->ifl_m_dequeued++;
24664c7070dbSScott Long #endif
246795246abbSSean Bruno 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2468e035717eSSean Bruno 		prefetch_pkts(fl, cidx);
2469e035717eSSean Bruno 	if (fl->ifl_sds.ifsd_map != NULL) {
2470e035717eSSean Bruno 		next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2471e035717eSSean Bruno 		prefetch(&fl->ifl_sds.ifsd_map[next]);
2472e035717eSSean Bruno 		map = fl->ifl_sds.ifsd_map[cidx];
24734c7070dbSScott Long 		di = fl->ifl_ifdi;
2474e035717eSSean Bruno 		next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2475e035717eSSean Bruno 		prefetch(&fl->ifl_sds.ifsd_flags[next]);
24764c7070dbSScott Long 		bus_dmamap_sync(di->idi_tag, di->idi_map,
24774c7070dbSScott Long 				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
24784c7070dbSScott Long 
24794c7070dbSScott Long 	/* not valid assert if bxe really does SGE from non-contiguous elements */
24804c7070dbSScott Long 		MPASS(fl->ifl_cidx == cidx);
24814c7070dbSScott Long 		if (unload)
2482e035717eSSean Bruno 			bus_dmamap_unload(fl->ifl_desc_tag, map);
2483e035717eSSean Bruno 	}
248495246abbSSean Bruno 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
248595246abbSSean Bruno 	if (__predict_false(fl->ifl_cidx == 0))
24864c7070dbSScott Long 		fl->ifl_gen = 0;
248795246abbSSean Bruno 	if (map != NULL)
248895246abbSSean Bruno 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
248995246abbSSean Bruno 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
249087890dbaSSean Bruno         bit_clear(fl->ifl_rx_bitmap, cidx);
24914c7070dbSScott Long }
24924c7070dbSScott Long 
24934c7070dbSScott Long static struct mbuf *
249495246abbSSean Bruno assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
24954c7070dbSScott Long {
249695246abbSSean Bruno 	int i, padlen , flags;
249795246abbSSean Bruno 	struct mbuf *m, *mh, *mt;
249895246abbSSean Bruno 	caddr_t cl;
24994c7070dbSScott Long 
25004c7070dbSScott Long 	i = 0;
250123ac9029SStephen Hurd 	mh = NULL;
25024c7070dbSScott Long 	do {
250395246abbSSean Bruno 		rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
25044c7070dbSScott Long 
250595246abbSSean Bruno 		MPASS(*sd->ifsd_cl != NULL);
250695246abbSSean Bruno 		MPASS(*sd->ifsd_m != NULL);
250723ac9029SStephen Hurd 
250823ac9029SStephen Hurd 		/* Don't include zero-length frags */
250923ac9029SStephen Hurd 		if (ri->iri_frags[i].irf_len == 0) {
251023ac9029SStephen Hurd 			/* XXX we can save the cluster here, but not the mbuf */
251195246abbSSean Bruno 			m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
251295246abbSSean Bruno 			m_free(*sd->ifsd_m);
251395246abbSSean Bruno 			*sd->ifsd_m = NULL;
251423ac9029SStephen Hurd 			continue;
251523ac9029SStephen Hurd 		}
251695246abbSSean Bruno 		m = *sd->ifsd_m;
251795246abbSSean Bruno 		*sd->ifsd_m = NULL;
251823ac9029SStephen Hurd 		if (mh == NULL) {
25194c7070dbSScott Long 			flags = M_PKTHDR|M_EXT;
25204c7070dbSScott Long 			mh = mt = m;
25214c7070dbSScott Long 			padlen = ri->iri_pad;
25224c7070dbSScott Long 		} else {
25234c7070dbSScott Long 			flags = M_EXT;
25244c7070dbSScott Long 			mt->m_next = m;
25254c7070dbSScott Long 			mt = m;
25264c7070dbSScott Long 			/* assuming padding is only on the first fragment */
25274c7070dbSScott Long 			padlen = 0;
25284c7070dbSScott Long 		}
252995246abbSSean Bruno 		cl = *sd->ifsd_cl;
253095246abbSSean Bruno 		*sd->ifsd_cl = NULL;
25314c7070dbSScott Long 
25324c7070dbSScott Long 		/* Can these two be made one ? */
25334c7070dbSScott Long 		m_init(m, M_NOWAIT, MT_DATA, flags);
253495246abbSSean Bruno 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
25354c7070dbSScott Long 		/*
25364c7070dbSScott Long 		 * These must follow m_init and m_cljset
25374c7070dbSScott Long 		 */
25384c7070dbSScott Long 		m->m_data += padlen;
25394c7070dbSScott Long 		ri->iri_len -= padlen;
254023ac9029SStephen Hurd 		m->m_len = ri->iri_frags[i].irf_len;
25414c7070dbSScott Long 	} while (++i < ri->iri_nfrags);
25424c7070dbSScott Long 
25434c7070dbSScott Long 	return (mh);
25444c7070dbSScott Long }
25454c7070dbSScott Long 
25464c7070dbSScott Long /*
25474c7070dbSScott Long  * Process one software descriptor
25484c7070dbSScott Long  */
25494c7070dbSScott Long static struct mbuf *
25504c7070dbSScott Long iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
25514c7070dbSScott Long {
255295246abbSSean Bruno 	struct if_rxsd sd;
25534c7070dbSScott Long 	struct mbuf *m;
25544c7070dbSScott Long 
25554c7070dbSScott Long 	/* should I merge this back in now that the two paths are basically duplicated? */
255623ac9029SStephen Hurd 	if (ri->iri_nfrags == 1 &&
255718628b74SMark Johnston 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
255895246abbSSean Bruno 		rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
255995246abbSSean Bruno 		m = *sd.ifsd_m;
256095246abbSSean Bruno 		*sd.ifsd_m = NULL;
25614c7070dbSScott Long 		m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
256295246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
256395246abbSSean Bruno 		if (!IP_ALIGNED(m))
256495246abbSSean Bruno 			m->m_data += 2;
256595246abbSSean Bruno #endif
256695246abbSSean Bruno 		memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
256723ac9029SStephen Hurd 		m->m_len = ri->iri_frags[0].irf_len;
25684c7070dbSScott Long        } else {
256995246abbSSean Bruno 		m = assemble_segments(rxq, ri, &sd);
25704c7070dbSScott Long 	}
25714c7070dbSScott Long 	m->m_pkthdr.len = ri->iri_len;
25724c7070dbSScott Long 	m->m_pkthdr.rcvif = ri->iri_ifp;
25734c7070dbSScott Long 	m->m_flags |= ri->iri_flags;
25744c7070dbSScott Long 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
25754c7070dbSScott Long 	m->m_pkthdr.flowid = ri->iri_flowid;
25764c7070dbSScott Long 	M_HASHTYPE_SET(m, ri->iri_rsstype);
25774c7070dbSScott Long 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
25784c7070dbSScott Long 	m->m_pkthdr.csum_data = ri->iri_csum_data;
25794c7070dbSScott Long 	return (m);
25804c7070dbSScott Long }
25814c7070dbSScott Long 
258235e4e998SStephen Hurd #if defined(INET6) || defined(INET)
2583fe1bcadaSStephen Hurd static void
2584fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2585fe1bcadaSStephen Hurd {
2586fe1bcadaSStephen Hurd 	CURVNET_SET(lc->ifp->if_vnet);
2587fe1bcadaSStephen Hurd #if defined(INET6)
2588fe1bcadaSStephen Hurd 	*v6 = VNET(ip6_forwarding);
2589fe1bcadaSStephen Hurd #endif
2590fe1bcadaSStephen Hurd #if defined(INET)
2591fe1bcadaSStephen Hurd 	*v4 = VNET(ipforwarding);
2592fe1bcadaSStephen Hurd #endif
2593fe1bcadaSStephen Hurd 	CURVNET_RESTORE();
2594fe1bcadaSStephen Hurd }
2595fe1bcadaSStephen Hurd 
259635e4e998SStephen Hurd /*
259735e4e998SStephen Hurd  * Returns true if it's possible this packet could be LROed.
259835e4e998SStephen Hurd  * if it returns false, it is guaranteed that tcp_lro_rx()
259935e4e998SStephen Hurd  * would not return zero.
260035e4e998SStephen Hurd  */
260135e4e998SStephen Hurd static bool
2602fe1bcadaSStephen Hurd iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
260335e4e998SStephen Hurd {
260435e4e998SStephen Hurd 	struct ether_header *eh;
260535e4e998SStephen Hurd 	uint16_t eh_type;
260635e4e998SStephen Hurd 
260735e4e998SStephen Hurd 	eh = mtod(m, struct ether_header *);
260835e4e998SStephen Hurd 	eh_type = ntohs(eh->ether_type);
260935e4e998SStephen Hurd 	switch (eh_type) {
2610abec4724SSean Bruno #if defined(INET6)
261135e4e998SStephen Hurd 		case ETHERTYPE_IPV6:
2612fe1bcadaSStephen Hurd 			return !v6_forwarding;
2613abec4724SSean Bruno #endif
2614abec4724SSean Bruno #if defined (INET)
261535e4e998SStephen Hurd 		case ETHERTYPE_IP:
2616fe1bcadaSStephen Hurd 			return !v4_forwarding;
2617abec4724SSean Bruno #endif
261835e4e998SStephen Hurd 	}
261935e4e998SStephen Hurd 
262035e4e998SStephen Hurd 	return false;
262135e4e998SStephen Hurd }
2622fe1bcadaSStephen Hurd #else
2623fe1bcadaSStephen Hurd static void
2624fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2625fe1bcadaSStephen Hurd {
2626fe1bcadaSStephen Hurd }
262735e4e998SStephen Hurd #endif
262835e4e998SStephen Hurd 
26294c7070dbSScott Long static bool
263095246abbSSean Bruno iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
26314c7070dbSScott Long {
26324c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
26334c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
263423ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
26354c7070dbSScott Long 	int avail, i;
263695246abbSSean Bruno 	qidx_t *cidxp;
26374c7070dbSScott Long 	struct if_rxd_info ri;
26384c7070dbSScott Long 	int err, budget_left, rx_bytes, rx_pkts;
26394c7070dbSScott Long 	iflib_fl_t fl;
26404c7070dbSScott Long 	struct ifnet *ifp;
26414c7070dbSScott Long 	int lro_enabled;
2642f6cb0deaSMatt Macy 	bool v4_forwarding, v6_forwarding, lro_possible;
264395246abbSSean Bruno 
26444c7070dbSScott Long 	/*
26454c7070dbSScott Long 	 * XXX early demux data packets so that if_input processing only handles
26464c7070dbSScott Long 	 * acks in interrupt context
26474c7070dbSScott Long 	 */
264820f63282SStephen Hurd 	struct mbuf *m, *mh, *mt, *mf;
26494c7070dbSScott Long 
2650f6cb0deaSMatt Macy 	lro_possible = v4_forwarding = v6_forwarding = false;
265195246abbSSean Bruno 	ifp = ctx->ifc_ifp;
26524c7070dbSScott Long 	mh = mt = NULL;
26534c7070dbSScott Long 	MPASS(budget > 0);
26544c7070dbSScott Long 	rx_pkts	= rx_bytes = 0;
265523ac9029SStephen Hurd 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
26564c7070dbSScott Long 		cidxp = &rxq->ifr_cq_cidx;
26574c7070dbSScott Long 	else
26584c7070dbSScott Long 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
265923ac9029SStephen Hurd 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
26604c7070dbSScott Long 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
26614c7070dbSScott Long 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
26624c7070dbSScott Long 		DBG_COUNTER_INC(rx_unavail);
26634c7070dbSScott Long 		return (false);
26644c7070dbSScott Long 	}
26654c7070dbSScott Long 
2666*8b8d9093SMarius Strobl 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
26674c7070dbSScott Long 		if (__predict_false(!CTX_ACTIVE(ctx))) {
26684c7070dbSScott Long 			DBG_COUNTER_INC(rx_ctx_inactive);
26694c7070dbSScott Long 			break;
26704c7070dbSScott Long 		}
26714c7070dbSScott Long 		/*
26724c7070dbSScott Long 		 * Reset client set fields to their default values
26734c7070dbSScott Long 		 */
267495246abbSSean Bruno 		rxd_info_zero(&ri);
26754c7070dbSScott Long 		ri.iri_qsidx = rxq->ifr_id;
26764c7070dbSScott Long 		ri.iri_cidx = *cidxp;
267795246abbSSean Bruno 		ri.iri_ifp = ifp;
26784c7070dbSScott Long 		ri.iri_frags = rxq->ifr_frags;
26794c7070dbSScott Long 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
26804c7070dbSScott Long 
268195246abbSSean Bruno 		if (err)
268295246abbSSean Bruno 			goto err;
268323ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
268423ac9029SStephen Hurd 			*cidxp = ri.iri_cidx;
268523ac9029SStephen Hurd 			/* Update our consumer index */
268695246abbSSean Bruno 			/* XXX NB: shurd - check if this is still safe */
268723ac9029SStephen Hurd 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
268823ac9029SStephen Hurd 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
26894c7070dbSScott Long 				rxq->ifr_cq_gen = 0;
26904c7070dbSScott Long 			}
26914c7070dbSScott Long 			/* was this only a completion queue message? */
26924c7070dbSScott Long 			if (__predict_false(ri.iri_nfrags == 0))
26934c7070dbSScott Long 				continue;
26944c7070dbSScott Long 		}
26954c7070dbSScott Long 		MPASS(ri.iri_nfrags != 0);
26964c7070dbSScott Long 		MPASS(ri.iri_len != 0);
26974c7070dbSScott Long 
26984c7070dbSScott Long 		/* will advance the cidx on the corresponding free lists */
26994c7070dbSScott Long 		m = iflib_rxd_pkt_get(rxq, &ri);
2700*8b8d9093SMarius Strobl 		avail--;
2701*8b8d9093SMarius Strobl 		budget_left--;
27024c7070dbSScott Long 		if (avail == 0 && budget_left)
270323ac9029SStephen Hurd 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
27044c7070dbSScott Long 
27054c7070dbSScott Long 		if (__predict_false(m == NULL)) {
27064c7070dbSScott Long 			DBG_COUNTER_INC(rx_mbuf_null);
27074c7070dbSScott Long 			continue;
27084c7070dbSScott Long 		}
27094c7070dbSScott Long 		/* imm_pkt: -- cxgb */
27104c7070dbSScott Long 		if (mh == NULL)
27114c7070dbSScott Long 			mh = mt = m;
27124c7070dbSScott Long 		else {
27134c7070dbSScott Long 			mt->m_nextpkt = m;
27144c7070dbSScott Long 			mt = m;
27154c7070dbSScott Long 		}
27164c7070dbSScott Long 	}
27174c7070dbSScott Long 	/* make sure that we can refill faster than drain */
27184c7070dbSScott Long 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2719ab2e3f79SStephen Hurd 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
27204c7070dbSScott Long 
27214c7070dbSScott Long 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2722fe1bcadaSStephen Hurd 	if (lro_enabled)
2723fe1bcadaSStephen Hurd 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
272420f63282SStephen Hurd 	mt = mf = NULL;
27254c7070dbSScott Long 	while (mh != NULL) {
27264c7070dbSScott Long 		m = mh;
27274c7070dbSScott Long 		mh = mh->m_nextpkt;
27284c7070dbSScott Long 		m->m_nextpkt = NULL;
272995246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
273095246abbSSean Bruno 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
273195246abbSSean Bruno 			continue;
273295246abbSSean Bruno #endif
27334c7070dbSScott Long 		rx_bytes += m->m_pkthdr.len;
27344c7070dbSScott Long 		rx_pkts++;
2735aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
273635e4e998SStephen Hurd 		if (lro_enabled) {
273735e4e998SStephen Hurd 			if (!lro_possible) {
2738fe1bcadaSStephen Hurd 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
273935e4e998SStephen Hurd 				if (lro_possible && mf != NULL) {
274035e4e998SStephen Hurd 					ifp->if_input(ifp, mf);
274135e4e998SStephen Hurd 					DBG_COUNTER_INC(rx_if_input);
274235e4e998SStephen Hurd 					mt = mf = NULL;
274335e4e998SStephen Hurd 				}
274435e4e998SStephen Hurd 			}
274525ac1dd5SStephen Hurd 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
274625ac1dd5SStephen Hurd 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
274735e4e998SStephen Hurd 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
27484c7070dbSScott Long 					continue;
274920f63282SStephen Hurd 			}
275025ac1dd5SStephen Hurd 		}
2751aaeb188aSBjoern A. Zeeb #endif
275235e4e998SStephen Hurd 		if (lro_possible) {
275335e4e998SStephen Hurd 			ifp->if_input(ifp, m);
275435e4e998SStephen Hurd 			DBG_COUNTER_INC(rx_if_input);
275535e4e998SStephen Hurd 			continue;
275635e4e998SStephen Hurd 		}
275735e4e998SStephen Hurd 
275835e4e998SStephen Hurd 		if (mf == NULL)
275935e4e998SStephen Hurd 			mf = m;
276020f63282SStephen Hurd 		if (mt != NULL)
276120f63282SStephen Hurd 			mt->m_nextpkt = m;
276220f63282SStephen Hurd 		mt = m;
276320f63282SStephen Hurd 	}
276420f63282SStephen Hurd 	if (mf != NULL) {
276520f63282SStephen Hurd 		ifp->if_input(ifp, mf);
27664c7070dbSScott Long 		DBG_COUNTER_INC(rx_if_input);
27674c7070dbSScott Long 	}
276823ac9029SStephen Hurd 
27694c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
27704c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
27714c7070dbSScott Long 
27724c7070dbSScott Long 	/*
27734c7070dbSScott Long 	 * Flush any outstanding LRO work
27744c7070dbSScott Long 	 */
2775aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
277623ac9029SStephen Hurd 	tcp_lro_flush_all(&rxq->ifr_lc);
2777aaeb188aSBjoern A. Zeeb #endif
2778ab2e3f79SStephen Hurd 	if (avail)
2779ab2e3f79SStephen Hurd 		return true;
2780ab2e3f79SStephen Hurd 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
278195246abbSSean Bruno err:
27827b610b60SSean Bruno 	STATE_LOCK(ctx);
2783ab2e3f79SStephen Hurd 	ctx->ifc_flags |= IFC_DO_RESET;
2784ab2e3f79SStephen Hurd 	iflib_admin_intr_deferred(ctx);
27857b610b60SSean Bruno 	STATE_UNLOCK(ctx);
278695246abbSSean Bruno 	return (false);
278795246abbSSean Bruno }
278895246abbSSean Bruno 
278995246abbSSean Bruno #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
279095246abbSSean Bruno static inline qidx_t
279195246abbSSean Bruno txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
279295246abbSSean Bruno {
279395246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
279495246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
279595246abbSSean Bruno 	if (in_use > 4*minthresh)
279695246abbSSean Bruno 		return (notify_count);
279795246abbSSean Bruno 	if (in_use > 2*minthresh)
279895246abbSSean Bruno 		return (notify_count >> 1);
279995246abbSSean Bruno 	if (in_use > minthresh)
280095246abbSSean Bruno 		return (notify_count >> 3);
280195246abbSSean Bruno 	return (0);
280295246abbSSean Bruno }
280395246abbSSean Bruno 
280495246abbSSean Bruno static inline qidx_t
280595246abbSSean Bruno txq_max_rs_deferred(iflib_txq_t txq)
280695246abbSSean Bruno {
280795246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
280895246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
280995246abbSSean Bruno 	if (txq->ift_in_use > 4*minthresh)
281095246abbSSean Bruno 		return (notify_count);
281195246abbSSean Bruno 	if (txq->ift_in_use > 2*minthresh)
281295246abbSSean Bruno 		return (notify_count >> 1);
281395246abbSSean Bruno 	if (txq->ift_in_use > minthresh)
281495246abbSSean Bruno 		return (notify_count >> 2);
28152b2fc973SSean Bruno 	return (2);
28164c7070dbSScott Long }
28174c7070dbSScott Long 
28184c7070dbSScott Long #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
28194c7070dbSScott Long #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
282095246abbSSean Bruno 
282195246abbSSean Bruno #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
282295246abbSSean Bruno #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
282323ac9029SStephen Hurd #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
28244c7070dbSScott Long 
282595246abbSSean Bruno /* forward compatibility for cxgb */
282695246abbSSean Bruno #define FIRST_QSET(ctx) 0
282795246abbSSean Bruno #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
282895246abbSSean Bruno #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
282995246abbSSean Bruno #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
283095246abbSSean Bruno #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
283195246abbSSean Bruno 
283295246abbSSean Bruno /* XXX we should be setting this to something other than zero */
283395246abbSSean Bruno #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
283495246abbSSean Bruno #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
283595246abbSSean Bruno 
283695246abbSSean Bruno static inline bool
283795246abbSSean Bruno iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
28384c7070dbSScott Long {
283995246abbSSean Bruno 	qidx_t dbval, max;
284095246abbSSean Bruno 	bool rang;
28414c7070dbSScott Long 
284295246abbSSean Bruno 	rang = false;
284395246abbSSean Bruno 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
284495246abbSSean Bruno 	if (ring || txq->ift_db_pending >= max) {
28454c7070dbSScott Long 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
28464c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
28474c7070dbSScott Long 		txq->ift_db_pending = txq->ift_npending = 0;
284895246abbSSean Bruno 		rang = true;
28494c7070dbSScott Long 	}
285095246abbSSean Bruno 	return (rang);
28514c7070dbSScott Long }
28524c7070dbSScott Long 
28534c7070dbSScott Long #ifdef PKT_DEBUG
28544c7070dbSScott Long static void
28554c7070dbSScott Long print_pkt(if_pkt_info_t pi)
28564c7070dbSScott Long {
28574c7070dbSScott Long 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
28584c7070dbSScott Long 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
28594c7070dbSScott Long 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
28604c7070dbSScott Long 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
28614c7070dbSScott Long 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
28624c7070dbSScott Long 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
28634c7070dbSScott Long }
28644c7070dbSScott Long #endif
28654c7070dbSScott Long 
28664c7070dbSScott Long #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2867a06424ddSEric Joyner #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
28684c7070dbSScott Long #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2869a06424ddSEric Joyner #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
28704c7070dbSScott Long 
28714c7070dbSScott Long static int
28724c7070dbSScott Long iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
28734c7070dbSScott Long {
2874ab2e3f79SStephen Hurd 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
28754c7070dbSScott Long 	struct ether_vlan_header *eh;
287623ac9029SStephen Hurd 	struct mbuf *m, *n;
28774c7070dbSScott Long 
2878*8b8d9093SMarius Strobl 	m = *mp;
2879ab2e3f79SStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2880ab2e3f79SStephen Hurd 	    M_WRITABLE(m) == 0) {
2881ab2e3f79SStephen Hurd 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2882ab2e3f79SStephen Hurd 			return (ENOMEM);
2883ab2e3f79SStephen Hurd 		} else {
2884ab2e3f79SStephen Hurd 			m_freem(*mp);
2885*8b8d9093SMarius Strobl 			*mp = m;
2886ab2e3f79SStephen Hurd 		}
2887ab2e3f79SStephen Hurd 	}
28881248952aSSean Bruno 
28894c7070dbSScott Long 	/*
28904c7070dbSScott Long 	 * Determine where frame payload starts.
28914c7070dbSScott Long 	 * Jump over vlan headers if already present,
28924c7070dbSScott Long 	 * helpful for QinQ too.
28934c7070dbSScott Long 	 */
28944c7070dbSScott Long 	if (__predict_false(m->m_len < sizeof(*eh))) {
28954c7070dbSScott Long 		txq->ift_pullups++;
28964c7070dbSScott Long 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
28974c7070dbSScott Long 			return (ENOMEM);
28984c7070dbSScott Long 	}
28994c7070dbSScott Long 	eh = mtod(m, struct ether_vlan_header *);
29004c7070dbSScott Long 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
29014c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_proto);
29024c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
29034c7070dbSScott Long 	} else {
29044c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
29054c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
29064c7070dbSScott Long 	}
29074c7070dbSScott Long 
29084c7070dbSScott Long 	switch (pi->ipi_etype) {
29094c7070dbSScott Long #ifdef INET
29104c7070dbSScott Long 	case ETHERTYPE_IP:
29114c7070dbSScott Long 	{
29124c7070dbSScott Long 		struct ip *ip = NULL;
29134c7070dbSScott Long 		struct tcphdr *th = NULL;
29144c7070dbSScott Long 		int minthlen;
29154c7070dbSScott Long 
29164c7070dbSScott Long 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
29174c7070dbSScott Long 		if (__predict_false(m->m_len < minthlen)) {
29184c7070dbSScott Long 			/*
29194c7070dbSScott Long 			 * if this code bloat is causing too much of a hit
29204c7070dbSScott Long 			 * move it to a separate function and mark it noinline
29214c7070dbSScott Long 			 */
29224c7070dbSScott Long 			if (m->m_len == pi->ipi_ehdrlen) {
29234c7070dbSScott Long 				n = m->m_next;
29244c7070dbSScott Long 				MPASS(n);
29254c7070dbSScott Long 				if (n->m_len >= sizeof(*ip))  {
29264c7070dbSScott Long 					ip = (struct ip *)n->m_data;
29274c7070dbSScott Long 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
29284c7070dbSScott Long 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
29294c7070dbSScott Long 				} else {
29304c7070dbSScott Long 					txq->ift_pullups++;
29314c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
29324c7070dbSScott Long 						return (ENOMEM);
29334c7070dbSScott Long 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
29344c7070dbSScott Long 				}
29354c7070dbSScott Long 			} else {
29364c7070dbSScott Long 				txq->ift_pullups++;
29374c7070dbSScott Long 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
29384c7070dbSScott Long 					return (ENOMEM);
29394c7070dbSScott Long 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
29404c7070dbSScott Long 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
29414c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
29424c7070dbSScott Long 			}
29434c7070dbSScott Long 		} else {
29444c7070dbSScott Long 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
29454c7070dbSScott Long 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
29464c7070dbSScott Long 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
29474c7070dbSScott Long 		}
29484c7070dbSScott Long 		pi->ipi_ip_hlen = ip->ip_hl << 2;
29494c7070dbSScott Long 		pi->ipi_ipproto = ip->ip_p;
29504c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV4;
29514c7070dbSScott Long 
2952c5cf2172SStephen Hurd 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
29534c7070dbSScott Long                        ip->ip_sum = 0;
29544c7070dbSScott Long 
2955a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
2956a06424ddSEric Joyner 		if (IS_TX_OFFLOAD4(pi)) {
2957a06424ddSEric Joyner 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
29584c7070dbSScott Long 				if (__predict_false(th == NULL)) {
29594c7070dbSScott Long 					txq->ift_pullups++;
29604c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
29614c7070dbSScott Long 						return (ENOMEM);
29624c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
29634c7070dbSScott Long 				}
29644c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
29654c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
29664c7070dbSScott Long 				pi->ipi_tcp_seq = th->th_seq;
29674c7070dbSScott Long 			}
2968a06424ddSEric Joyner 			if (IS_TSO4(pi)) {
29694c7070dbSScott Long 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
29704c7070dbSScott Long 					return (ENXIO);
29714c7070dbSScott Long 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
29724c7070dbSScott Long 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
29734c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
29741248952aSSean Bruno 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
29751248952aSSean Bruno 					ip->ip_sum = 0;
29761248952aSSean Bruno 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
29771248952aSSean Bruno 				}
29784c7070dbSScott Long 			}
2979a06424ddSEric Joyner 		}
29804c7070dbSScott Long 		break;
29814c7070dbSScott Long 	}
29824c7070dbSScott Long #endif
29834c7070dbSScott Long #ifdef INET6
29844c7070dbSScott Long 	case ETHERTYPE_IPV6:
29854c7070dbSScott Long 	{
29864c7070dbSScott Long 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
29874c7070dbSScott Long 		struct tcphdr *th;
29884c7070dbSScott Long 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
29894c7070dbSScott Long 
29904c7070dbSScott Long 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
29914c7070dbSScott Long 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
29924c7070dbSScott Long 				return (ENOMEM);
29934c7070dbSScott Long 		}
29944c7070dbSScott Long 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
29954c7070dbSScott Long 
29964c7070dbSScott Long 		/* XXX-BZ this will go badly in case of ext hdrs. */
29974c7070dbSScott Long 		pi->ipi_ipproto = ip6->ip6_nxt;
29984c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV6;
29994c7070dbSScott Long 
3000a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3001a06424ddSEric Joyner 		if (IS_TX_OFFLOAD6(pi)) {
30024c7070dbSScott Long 			if (pi->ipi_ipproto == IPPROTO_TCP) {
30034c7070dbSScott Long 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3004a06424ddSEric Joyner 					txq->ift_pullups++;
30054c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
30064c7070dbSScott Long 						return (ENOMEM);
30074c7070dbSScott Long 				}
30084c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
30094c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
3010a06424ddSEric Joyner 				pi->ipi_tcp_seq = th->th_seq;
30114c7070dbSScott Long 			}
3012a06424ddSEric Joyner 			if (IS_TSO6(pi)) {
30134c7070dbSScott Long 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
30144c7070dbSScott Long 					return (ENXIO);
30154c7070dbSScott Long 				/*
30164c7070dbSScott Long 				 * The corresponding flag is set by the stack in the IPv4
30174c7070dbSScott Long 				 * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
30184c7070dbSScott Long 				 * So, set it here because the rest of the flow requires it.
30194c7070dbSScott Long 				 */
3020a06424ddSEric Joyner 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
30214c7070dbSScott Long 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
30224c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
30234c7070dbSScott Long 			}
3024a06424ddSEric Joyner 		}
30254c7070dbSScott Long 		break;
30264c7070dbSScott Long 	}
30274c7070dbSScott Long #endif
30284c7070dbSScott Long 	default:
30294c7070dbSScott Long 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
30304c7070dbSScott Long 		pi->ipi_ip_hlen = 0;
30314c7070dbSScott Long 		break;
30324c7070dbSScott Long 	}
30334c7070dbSScott Long 	*mp = m;
30341248952aSSean Bruno 
30354c7070dbSScott Long 	return (0);
30364c7070dbSScott Long }
30374c7070dbSScott Long 
30384c7070dbSScott Long static  __noinline  struct mbuf *
30394c7070dbSScott Long collapse_pkthdr(struct mbuf *m0)
30404c7070dbSScott Long {
30414c7070dbSScott Long 	struct mbuf *m, *m_next, *tmp;
30424c7070dbSScott Long 
30434c7070dbSScott Long 	m = m0;
30444c7070dbSScott Long 	m_next = m->m_next;
30454c7070dbSScott Long 	while (m_next != NULL && m_next->m_len == 0) {
30464c7070dbSScott Long 		m = m_next;
30474c7070dbSScott Long 		m->m_next = NULL;
30484c7070dbSScott Long 		m_free(m);
30494c7070dbSScott Long 		m_next = m_next->m_next;
30504c7070dbSScott Long 	}
30514c7070dbSScott Long 	m = m0;
30524c7070dbSScott Long 	m->m_next = m_next;
3053*8b8d9093SMarius Strobl 	if (m_next == NULL)
3054*8b8d9093SMarius Strobl 		return (m);
30554c7070dbSScott Long 	if ((m_next->m_flags & M_EXT) == 0) {
30564c7070dbSScott Long 		m = m_defrag(m, M_NOWAIT);
30574c7070dbSScott Long 	} else {
30584c7070dbSScott Long 		tmp = m_next->m_next;
30594c7070dbSScott Long 		memcpy(m_next, m, MPKTHSIZE);
30604c7070dbSScott Long 		m = m_next;
30614c7070dbSScott Long 		m->m_next = tmp;
30624c7070dbSScott Long 	}
30634c7070dbSScott Long 	return (m);
30644c7070dbSScott Long }
30654c7070dbSScott Long 
30664c7070dbSScott Long /*
30674c7070dbSScott Long  * If dodgy hardware rejects the scatter gather chain we've handed it
306823ac9029SStephen Hurd  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
306923ac9029SStephen Hurd  * m_defrag'd mbufs
30704c7070dbSScott Long  */
30714c7070dbSScott Long static __noinline struct mbuf *
307223ac9029SStephen Hurd iflib_remove_mbuf(iflib_txq_t txq)
30734c7070dbSScott Long {
307423ac9029SStephen Hurd 	int ntxd, i, pidx;
30754c7070dbSScott Long 	struct mbuf *m, *mh, **ifsd_m;
30764c7070dbSScott Long 
30774c7070dbSScott Long 	pidx = txq->ift_pidx;
30784c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
307923ac9029SStephen Hurd 	ntxd = txq->ift_size;
30804c7070dbSScott Long 	mh = m = ifsd_m[pidx];
30814c7070dbSScott Long 	ifsd_m[pidx] = NULL;
30824c7070dbSScott Long #if MEMORY_LOGGING
30834c7070dbSScott Long 	txq->ift_dequeued++;
30844c7070dbSScott Long #endif
30854c7070dbSScott Long 	i = 1;
30864c7070dbSScott Long 
308723ac9029SStephen Hurd 	while (m) {
30884c7070dbSScott Long 		ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
30894c7070dbSScott Long #if MEMORY_LOGGING
30904c7070dbSScott Long 		txq->ift_dequeued++;
30914c7070dbSScott Long #endif
30924c7070dbSScott Long 		m = m->m_next;
30934c7070dbSScott Long 		i++;
30944c7070dbSScott Long 	}
30954c7070dbSScott Long 	return (mh);
30964c7070dbSScott Long }
30974c7070dbSScott Long 
30984c7070dbSScott Long static int
30994c7070dbSScott Long iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
31004c7070dbSScott Long 			  struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
31014c7070dbSScott Long 			  int max_segs, int flags)
31024c7070dbSScott Long {
31034c7070dbSScott Long 	if_ctx_t ctx;
31044c7070dbSScott Long 	if_shared_ctx_t		sctx;
310523ac9029SStephen Hurd 	if_softc_ctx_t		scctx;
310625d52811SSean Bruno 	int i, next, pidx, err, ntxd, count;
3107aa8fa07cSSean Bruno 	struct mbuf *m, *tmp, **ifsd_m;
31084c7070dbSScott Long 
31094c7070dbSScott Long 	m = *m0;
31104c7070dbSScott Long 
31114c7070dbSScott Long 	/*
31124c7070dbSScott Long 	 * Please don't ever do this
31134c7070dbSScott Long 	 */
31144c7070dbSScott Long 	if (__predict_false(m->m_len == 0))
3115*8b8d9093SMarius Strobl 		*m0 = collapse_pkthdr(m);
31164c7070dbSScott Long 
31174c7070dbSScott Long 	ctx = txq->ift_ctx;
31184c7070dbSScott Long 	sctx = ctx->ifc_sctx;
311923ac9029SStephen Hurd 	scctx = &ctx->ifc_softc_ctx;
31204c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
312123ac9029SStephen Hurd 	ntxd = txq->ift_size;
31224c7070dbSScott Long 	pidx = txq->ift_pidx;
3123ab2e3f79SStephen Hurd 	if (map != NULL) {
31244c7070dbSScott Long 		uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
3125ab2e3f79SStephen Hurd 
31264c7070dbSScott Long 		err = bus_dmamap_load_mbuf_sg(tag, map,
31274c7070dbSScott Long 					      *m0, segs, nsegs, BUS_DMA_NOWAIT);
31284c7070dbSScott Long 		if (err)
31294c7070dbSScott Long 			return (err);
31304c7070dbSScott Long 		ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
3131aa8fa07cSSean Bruno 		count = 0;
31324c7070dbSScott Long 		m = *m0;
31334c7070dbSScott Long 		do {
3134aa8fa07cSSean Bruno 			if (__predict_false(m->m_len <= 0)) {
3135aa8fa07cSSean Bruno 				tmp = m;
31364c7070dbSScott Long 				m = m->m_next;
3137aa8fa07cSSean Bruno 				tmp->m_next = NULL;
3138aa8fa07cSSean Bruno 				m_free(tmp);
3139aa8fa07cSSean Bruno 				continue;
3140aa8fa07cSSean Bruno 			}
3141dcbc025fSSean Bruno 			m = m->m_next;
3142dcbc025fSSean Bruno 			count++;
3143dcbc025fSSean Bruno 		} while (m != NULL);
31445c5ca36cSSean Bruno 		if (count > *nsegs) {
31455c5ca36cSSean Bruno 			ifsd_m[pidx] = *m0;
31465c5ca36cSSean Bruno 			ifsd_m[pidx]->m_flags |= M_TOOBIG;
3147dcbc025fSSean Bruno 			return (0);
31485c5ca36cSSean Bruno 		}
3149dcbc025fSSean Bruno 		m = *m0;
3150dcbc025fSSean Bruno 		count = 0;
3151dcbc025fSSean Bruno 		do {
3152aa8fa07cSSean Bruno 			next = (pidx + count) & (ntxd-1);
3153aa8fa07cSSean Bruno 			MPASS(ifsd_m[next] == NULL);
3154aa8fa07cSSean Bruno 			ifsd_m[next] = m;
3155aa8fa07cSSean Bruno 			count++;
3156aa8fa07cSSean Bruno 			tmp = m;
3157aa8fa07cSSean Bruno 			m = m->m_next;
31584c7070dbSScott Long 		} while (m != NULL);
31594c7070dbSScott Long 	} else {
316025d52811SSean Bruno 		int buflen, sgsize, maxsegsz, max_sgsize;
31614c7070dbSScott Long 		vm_offset_t vaddr;
31624c7070dbSScott Long 		vm_paddr_t curaddr;
31634c7070dbSScott Long 
31644c7070dbSScott Long 		count = i = 0;
31654c7070dbSScott Long 		m = *m0;
316625d52811SSean Bruno 		if (m->m_pkthdr.csum_flags & CSUM_TSO)
316725d52811SSean Bruno 			maxsegsz = scctx->isc_tx_tso_segsize_max;
316825d52811SSean Bruno 		else
316925d52811SSean Bruno 			maxsegsz = sctx->isc_tx_maxsegsize;
317025d52811SSean Bruno 
31714c7070dbSScott Long 		do {
31724c7070dbSScott Long 			if (__predict_false(m->m_len <= 0)) {
31734c7070dbSScott Long 				tmp = m;
31744c7070dbSScott Long 				m = m->m_next;
31754c7070dbSScott Long 				tmp->m_next = NULL;
31764c7070dbSScott Long 				m_free(tmp);
31774c7070dbSScott Long 				continue;
31784c7070dbSScott Long 			}
31794c7070dbSScott Long 			buflen = m->m_len;
31804c7070dbSScott Long 			vaddr = (vm_offset_t)m->m_data;
31814c7070dbSScott Long 			/*
31824c7070dbSScott Long 			 * see if we can't be smarter about physically
31834c7070dbSScott Long 			 * contiguous mappings
31844c7070dbSScott Long 			 */
31854c7070dbSScott Long 			next = (pidx + count) & (ntxd-1);
31864c7070dbSScott Long 			MPASS(ifsd_m[next] == NULL);
31874c7070dbSScott Long #if MEMORY_LOGGING
31884c7070dbSScott Long 			txq->ift_enqueued++;
31894c7070dbSScott Long #endif
31904c7070dbSScott Long 			ifsd_m[next] = m;
31914c7070dbSScott Long 			while (buflen > 0) {
31929d35858fSSean Bruno 				if (i >= max_segs)
31939d35858fSSean Bruno 					goto err;
31944c7070dbSScott Long 				max_sgsize = MIN(buflen, maxsegsz);
31954c7070dbSScott Long 				curaddr = pmap_kextract(vaddr);
31964c7070dbSScott Long 				sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
31974c7070dbSScott Long 				sgsize = MIN(sgsize, max_sgsize);
31984c7070dbSScott Long 				segs[i].ds_addr = curaddr;
31994c7070dbSScott Long 				segs[i].ds_len = sgsize;
32004c7070dbSScott Long 				vaddr += sgsize;
32014c7070dbSScott Long 				buflen -= sgsize;
32024c7070dbSScott Long 				i++;
32034c7070dbSScott Long 			}
32044c7070dbSScott Long 			count++;
32054c7070dbSScott Long 			tmp = m;
32064c7070dbSScott Long 			m = m->m_next;
32074c7070dbSScott Long 		} while (m != NULL);
32084c7070dbSScott Long 		*nsegs = i;
32094c7070dbSScott Long 	}
32104c7070dbSScott Long 	return (0);
32114c7070dbSScott Long err:
321223ac9029SStephen Hurd 	*m0 = iflib_remove_mbuf(txq);
32134c7070dbSScott Long 	return (EFBIG);
32144c7070dbSScott Long }
32154c7070dbSScott Long 
321695246abbSSean Bruno static inline caddr_t
321795246abbSSean Bruno calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
321895246abbSSean Bruno {
321995246abbSSean Bruno 	qidx_t size;
322095246abbSSean Bruno 	int ntxd;
322195246abbSSean Bruno 	caddr_t start, end, cur, next;
322295246abbSSean Bruno 
322395246abbSSean Bruno 	ntxd = txq->ift_size;
322495246abbSSean Bruno 	size = txq->ift_txd_size[qid];
322595246abbSSean Bruno 	start = txq->ift_ifdi[qid].idi_vaddr;
322695246abbSSean Bruno 
322795246abbSSean Bruno 	if (__predict_false(size == 0))
322895246abbSSean Bruno 		return (start);
322995246abbSSean Bruno 	cur = start + size*cidx;
323095246abbSSean Bruno 	end = start + size*ntxd;
323195246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
323295246abbSSean Bruno 	return (next < end ? next : start);
323395246abbSSean Bruno }
323495246abbSSean Bruno 
3235d14c853bSStephen Hurd /*
3236d14c853bSStephen Hurd  * Pad an mbuf to ensure a minimum ethernet frame size.
3237d14c853bSStephen Hurd  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3238d14c853bSStephen Hurd  */
3239d14c853bSStephen Hurd static __noinline int
3240a15fbbb8SStephen Hurd iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3241d14c853bSStephen Hurd {
3242d14c853bSStephen Hurd 	/*
3243d14c853bSStephen Hurd 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3244d14c853bSStephen Hurd 	 * and ARP message is the smallest common payload I can think of
3245d14c853bSStephen Hurd 	 */
3246d14c853bSStephen Hurd 	static char pad[18];	/* just zeros */
3247d14c853bSStephen Hurd 	int n;
3248a15fbbb8SStephen Hurd 	struct mbuf *new_head;
3249d14c853bSStephen Hurd 
3250a15fbbb8SStephen Hurd 	if (!M_WRITABLE(*m_head)) {
3251a15fbbb8SStephen Hurd 		new_head = m_dup(*m_head, M_NOWAIT);
3252a15fbbb8SStephen Hurd 		if (new_head == NULL) {
325304993890SStephen Hurd 			m_freem(*m_head);
3254a15fbbb8SStephen Hurd 			device_printf(dev, "cannot pad short frame, m_dup() failed");
325506c47d48SStephen Hurd 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
3256a15fbbb8SStephen Hurd 			return ENOMEM;
3257a15fbbb8SStephen Hurd 		}
3258a15fbbb8SStephen Hurd 		m_freem(*m_head);
3259a15fbbb8SStephen Hurd 		*m_head = new_head;
3260a15fbbb8SStephen Hurd 	}
3261a15fbbb8SStephen Hurd 
3262a15fbbb8SStephen Hurd 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3263d14c853bSStephen Hurd 	     n > 0; n -= sizeof(pad))
3264a15fbbb8SStephen Hurd 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3265d14c853bSStephen Hurd 			break;
3266d14c853bSStephen Hurd 
3267d14c853bSStephen Hurd 	if (n > 0) {
3268a15fbbb8SStephen Hurd 		m_freem(*m_head);
3269d14c853bSStephen Hurd 		device_printf(dev, "cannot pad short frame\n");
3270d14c853bSStephen Hurd 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
3271d14c853bSStephen Hurd 		return (ENOBUFS);
3272d14c853bSStephen Hurd 	}
3273d14c853bSStephen Hurd 
3274d14c853bSStephen Hurd 	return 0;
3275d14c853bSStephen Hurd }
3276d14c853bSStephen Hurd 
32774c7070dbSScott Long static int
32784c7070dbSScott Long iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
32794c7070dbSScott Long {
32804c7070dbSScott Long 	if_ctx_t		ctx;
32814c7070dbSScott Long 	if_shared_ctx_t		sctx;
32824c7070dbSScott Long 	if_softc_ctx_t		scctx;
32834c7070dbSScott Long 	bus_dma_segment_t	*segs;
32844c7070dbSScott Long 	struct mbuf		*m_head;
328595246abbSSean Bruno 	void			*next_txd;
32864c7070dbSScott Long 	bus_dmamap_t		map;
32874c7070dbSScott Long 	struct if_pkt_info	pi;
32884c7070dbSScott Long 	int remap = 0;
32894c7070dbSScott Long 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
32904c7070dbSScott Long 	bus_dma_tag_t desc_tag;
32914c7070dbSScott Long 
32924c7070dbSScott Long 	ctx = txq->ift_ctx;
32934c7070dbSScott Long 	sctx = ctx->ifc_sctx;
32944c7070dbSScott Long 	scctx = &ctx->ifc_softc_ctx;
32954c7070dbSScott Long 	segs = txq->ift_segs;
329623ac9029SStephen Hurd 	ntxd = txq->ift_size;
32974c7070dbSScott Long 	m_head = *m_headp;
32984c7070dbSScott Long 	map = NULL;
32994c7070dbSScott Long 
33004c7070dbSScott Long 	/*
33014c7070dbSScott Long 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
33024c7070dbSScott Long 	 */
33034c7070dbSScott Long 	cidx = txq->ift_cidx;
33044c7070dbSScott Long 	pidx = txq->ift_pidx;
330595246abbSSean Bruno 	if (ctx->ifc_flags & IFC_PREFETCH) {
33064c7070dbSScott Long 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
330795246abbSSean Bruno 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
330895246abbSSean Bruno 			next_txd = calc_next_txd(txq, cidx, 0);
330995246abbSSean Bruno 			prefetch(next_txd);
331095246abbSSean Bruno 		}
33114c7070dbSScott Long 
33124c7070dbSScott Long 		/* prefetch the next cache line of mbuf pointers and flags */
33134c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_m[next]);
33144c7070dbSScott Long 		if (txq->ift_sds.ifsd_map != NULL) {
33154c7070dbSScott Long 			prefetch(&txq->ift_sds.ifsd_map[next]);
33164c7070dbSScott Long 			next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
33174c7070dbSScott Long 			prefetch(&txq->ift_sds.ifsd_flags[next]);
33184c7070dbSScott Long 		}
3319ab2e3f79SStephen Hurd 	} else if (txq->ift_sds.ifsd_map != NULL)
332095246abbSSean Bruno 		map = txq->ift_sds.ifsd_map[pidx];
33214c7070dbSScott Long 
33224c7070dbSScott Long 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
33234c7070dbSScott Long 		desc_tag = txq->ift_tso_desc_tag;
33244c7070dbSScott Long 		max_segs = scctx->isc_tx_tso_segments_max;
33257f87c040SMarius Strobl 		MPASS(desc_tag != NULL);
33267f87c040SMarius Strobl 		MPASS(max_segs > 0);
33274c7070dbSScott Long 	} else {
33284c7070dbSScott Long 		desc_tag = txq->ift_desc_tag;
33294c7070dbSScott Long 		max_segs = scctx->isc_tx_nsegments;
33304c7070dbSScott Long 	}
3331d14c853bSStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3332d14c853bSStephen Hurd 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3333a15fbbb8SStephen Hurd 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
3334d14c853bSStephen Hurd 		if (err)
3335d14c853bSStephen Hurd 			return err;
3336d14c853bSStephen Hurd 	}
3337a15fbbb8SStephen Hurd 	m_head = *m_headp;
333895246abbSSean Bruno 
333995246abbSSean Bruno 	pkt_info_zero(&pi);
3340ab2e3f79SStephen Hurd 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3341ab2e3f79SStephen Hurd 	pi.ipi_pidx = pidx;
3342ab2e3f79SStephen Hurd 	pi.ipi_qsidx = txq->ift_id;
33433429c02fSStephen Hurd 	pi.ipi_len = m_head->m_pkthdr.len;
33443429c02fSStephen Hurd 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
33453429c02fSStephen Hurd 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
33464c7070dbSScott Long 
33474c7070dbSScott Long 	/* deliberate bitwise OR to make one condition */
33484c7070dbSScott Long 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
33494c7070dbSScott Long 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
33504c7070dbSScott Long 			return (err);
33514c7070dbSScott Long 		m_head = *m_headp;
33524c7070dbSScott Long 	}
33534c7070dbSScott Long 
33544c7070dbSScott Long retry:
33554c7070dbSScott Long 	err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
33564c7070dbSScott Long defrag:
33574c7070dbSScott Long 	if (__predict_false(err)) {
33584c7070dbSScott Long 		switch (err) {
33594c7070dbSScott Long 		case EFBIG:
33604c7070dbSScott Long 			/* try collapse once and defrag once */
3361f7594707SAndrew Gallatin 			if (remap == 0) {
33624c7070dbSScott Long 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3363f7594707SAndrew Gallatin 				/* try defrag if collapsing fails */
3364f7594707SAndrew Gallatin 				if (m_head == NULL)
3365f7594707SAndrew Gallatin 					remap++;
3366f7594707SAndrew Gallatin 			}
33674c7070dbSScott Long 			if (remap == 1)
33684c7070dbSScott Long 				m_head = m_defrag(*m_headp, M_NOWAIT);
33694c7070dbSScott Long 			remap++;
33704c7070dbSScott Long 			if (__predict_false(m_head == NULL))
33714c7070dbSScott Long 				goto defrag_failed;
33724c7070dbSScott Long 			txq->ift_mbuf_defrag++;
33734c7070dbSScott Long 			*m_headp = m_head;
33744c7070dbSScott Long 			goto retry;
33754c7070dbSScott Long 			break;
33764c7070dbSScott Long 		case ENOMEM:
33774c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33784c7070dbSScott Long 			break;
33794c7070dbSScott Long 		default:
33804c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33814c7070dbSScott Long 			m_freem(*m_headp);
33824c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
33834c7070dbSScott Long 			*m_headp = NULL;
33844c7070dbSScott Long 			break;
33854c7070dbSScott Long 		}
33864c7070dbSScott Long 		txq->ift_map_failed++;
33874c7070dbSScott Long 		DBG_COUNTER_INC(encap_load_mbuf_fail);
33884c7070dbSScott Long 		return (err);
33894c7070dbSScott Long 	}
33904c7070dbSScott Long 
33914c7070dbSScott Long 	/*
33924c7070dbSScott Long 	 * XXX assumes a 1 to 1 relationship between segments and
33934c7070dbSScott Long 	 *        descriptors - this does not hold true on all drivers, e.g.
33944c7070dbSScott Long 	 *        cxgb
33954c7070dbSScott Long 	 */
33964c7070dbSScott Long 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
33974c7070dbSScott Long 		txq->ift_no_desc_avail++;
33984c7070dbSScott Long 		if (map != NULL)
33994c7070dbSScott Long 			bus_dmamap_unload(desc_tag, map);
34004c7070dbSScott Long 		DBG_COUNTER_INC(encap_txq_avail_fail);
340123ac9029SStephen Hurd 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
34024c7070dbSScott Long 			GROUPTASK_ENQUEUE(&txq->ift_task);
34034c7070dbSScott Long 		return (ENOBUFS);
34044c7070dbSScott Long 	}
340595246abbSSean Bruno 	/*
340695246abbSSean Bruno 	 * On Intel cards we can greatly reduce the number of TX interrupts
340795246abbSSean Bruno 	 * we see by only setting report status on every Nth descriptor.
340895246abbSSean Bruno 	 * However, this also means that the driver will need to keep track
340995246abbSSean Bruno 	 * of the descriptors that RS was set on to check them for the DD bit.
341095246abbSSean Bruno 	 */
341195246abbSSean Bruno 	txq->ift_rs_pending += nsegs + 1;
341295246abbSSean Bruno 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
34131f7ce05dSAndrew Gallatin 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
341495246abbSSean Bruno 		pi.ipi_flags |= IPI_TX_INTR;
341595246abbSSean Bruno 		txq->ift_rs_pending = 0;
341695246abbSSean Bruno 	}
341795246abbSSean Bruno 
34184c7070dbSScott Long 	pi.ipi_segs = segs;
34194c7070dbSScott Long 	pi.ipi_nsegs = nsegs;
34204c7070dbSScott Long 
342123ac9029SStephen Hurd 	MPASS(pidx >= 0 && pidx < txq->ift_size);
34224c7070dbSScott Long #ifdef PKT_DEBUG
34234c7070dbSScott Long 	print_pkt(&pi);
34244c7070dbSScott Long #endif
342595246abbSSean Bruno 	if (map != NULL)
342695246abbSSean Bruno 		bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
34274c7070dbSScott Long 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
342895246abbSSean Bruno 		if (map != NULL)
34294c7070dbSScott Long 			bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
34304c7070dbSScott Long 					BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
34314c7070dbSScott Long 		DBG_COUNTER_INC(tx_encap);
343295246abbSSean Bruno 		MPASS(pi.ipi_new_pidx < txq->ift_size);
34334c7070dbSScott Long 
34344c7070dbSScott Long 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
34354c7070dbSScott Long 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
343623ac9029SStephen Hurd 			ndesc += txq->ift_size;
34374c7070dbSScott Long 			txq->ift_gen = 1;
34384c7070dbSScott Long 		}
34391248952aSSean Bruno 		/*
34401248952aSSean Bruno 		 * drivers can need as many as
34411248952aSSean Bruno 		 * two sentinels
34421248952aSSean Bruno 		 */
34431248952aSSean Bruno 		MPASS(ndesc <= pi.ipi_nsegs + 2);
34444c7070dbSScott Long 		MPASS(pi.ipi_new_pidx != pidx);
34454c7070dbSScott Long 		MPASS(ndesc > 0);
34464c7070dbSScott Long 		txq->ift_in_use += ndesc;
344795246abbSSean Bruno 
34484c7070dbSScott Long 		/*
34494c7070dbSScott Long 		 * We update the last software descriptor again here because there may
34504c7070dbSScott Long 		 * be a sentinel and/or there may be more mbufs than segments
34514c7070dbSScott Long 		 */
34524c7070dbSScott Long 		txq->ift_pidx = pi.ipi_new_pidx;
34534c7070dbSScott Long 		txq->ift_npending += pi.ipi_ndescs;
3454f7594707SAndrew Gallatin 	} else {
345523ac9029SStephen Hurd 		*m_headp = m_head = iflib_remove_mbuf(txq);
3456f7594707SAndrew Gallatin 		if (err == EFBIG) {
34574c7070dbSScott Long 			txq->ift_txd_encap_efbig++;
3458f7594707SAndrew Gallatin 			if (remap < 2) {
3459f7594707SAndrew Gallatin 				remap = 1;
34604c7070dbSScott Long 				goto defrag;
3461f7594707SAndrew Gallatin 			}
3462f7594707SAndrew Gallatin 		}
34634c7070dbSScott Long 		DBG_COUNTER_INC(encap_txd_encap_fail);
3464f7594707SAndrew Gallatin 		goto defrag_failed;
3465f7594707SAndrew Gallatin 	}
34664c7070dbSScott Long 	return (err);
34674c7070dbSScott Long 
34684c7070dbSScott Long defrag_failed:
34694c7070dbSScott Long 	txq->ift_mbuf_defrag_failed++;
34704c7070dbSScott Long 	txq->ift_map_failed++;
34714c7070dbSScott Long 	m_freem(*m_headp);
34724c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
34734c7070dbSScott Long 	*m_headp = NULL;
34744c7070dbSScott Long 	return (ENOMEM);
34754c7070dbSScott Long }
34764c7070dbSScott Long 
34774c7070dbSScott Long static void
34784c7070dbSScott Long iflib_tx_desc_free(iflib_txq_t txq, int n)
34794c7070dbSScott Long {
34804c7070dbSScott Long 	int hasmap;
34814c7070dbSScott Long 	uint32_t qsize, cidx, mask, gen;
34824c7070dbSScott Long 	struct mbuf *m, **ifsd_m;
34834c7070dbSScott Long 	uint8_t *ifsd_flags;
34844c7070dbSScott Long 	bus_dmamap_t *ifsd_map;
348595246abbSSean Bruno 	bool do_prefetch;
34864c7070dbSScott Long 
34874c7070dbSScott Long 	cidx = txq->ift_cidx;
34884c7070dbSScott Long 	gen = txq->ift_gen;
348923ac9029SStephen Hurd 	qsize = txq->ift_size;
34904c7070dbSScott Long 	mask = qsize-1;
34914c7070dbSScott Long 	hasmap = txq->ift_sds.ifsd_map != NULL;
34924c7070dbSScott Long 	ifsd_flags = txq->ift_sds.ifsd_flags;
34934c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
34944c7070dbSScott Long 	ifsd_map = txq->ift_sds.ifsd_map;
349595246abbSSean Bruno 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
34964c7070dbSScott Long 
349794618825SMark Johnston 	while (n-- > 0) {
349895246abbSSean Bruno 		if (do_prefetch) {
34994c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 3) & mask]);
35004c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 4) & mask]);
350195246abbSSean Bruno 		}
35024c7070dbSScott Long 		if (ifsd_m[cidx] != NULL) {
35034c7070dbSScott Long 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
35044c7070dbSScott Long 			prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
35054c7070dbSScott Long 			if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
35064c7070dbSScott Long 				/*
35074c7070dbSScott Long 				 * does it matter if it's not the TSO tag? If so we'll
35084c7070dbSScott Long 				 * have to add the type to flags
35094c7070dbSScott Long 				 */
35104c7070dbSScott Long 				bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
35114c7070dbSScott Long 				ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
35124c7070dbSScott Long 			}
35134c7070dbSScott Long 			if ((m = ifsd_m[cidx]) != NULL) {
35144c7070dbSScott Long 				/* XXX we don't support any drivers that batch packets yet */
35154c7070dbSScott Long 				MPASS(m->m_nextpkt == NULL);
35165c5ca36cSSean Bruno 				/* if the number of clusters exceeds the number of segments
35175c5ca36cSSean Bruno 				 * there won't be space on the ring to save a pointer to each
35185c5ca36cSSean Bruno 				 * cluster so we simply free the list here
35195c5ca36cSSean Bruno 				 */
35205c5ca36cSSean Bruno 				if (m->m_flags & M_TOOBIG) {
35215c5ca36cSSean Bruno 					m_freem(m);
35225c5ca36cSSean Bruno 				} else {
352323ac9029SStephen Hurd 					m_free(m);
35245c5ca36cSSean Bruno 				}
35254c7070dbSScott Long 				ifsd_m[cidx] = NULL;
35264c7070dbSScott Long #if MEMORY_LOGGING
35274c7070dbSScott Long 				txq->ift_dequeued++;
35284c7070dbSScott Long #endif
35294c7070dbSScott Long 				DBG_COUNTER_INC(tx_frees);
35304c7070dbSScott Long 			}
35314c7070dbSScott Long 		}
35324c7070dbSScott Long 		if (__predict_false(++cidx == qsize)) {
35334c7070dbSScott Long 			cidx = 0;
35344c7070dbSScott Long 			gen = 0;
35354c7070dbSScott Long 		}
35364c7070dbSScott Long 	}
35374c7070dbSScott Long 	txq->ift_cidx = cidx;
35384c7070dbSScott Long 	txq->ift_gen = gen;
35394c7070dbSScott Long }
35404c7070dbSScott Long 
35414c7070dbSScott Long static __inline int
35424c7070dbSScott Long iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
35434c7070dbSScott Long {
35444c7070dbSScott Long 	int reclaim;
35454c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35464c7070dbSScott Long 
35474c7070dbSScott Long 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
35484c7070dbSScott Long 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
35494c7070dbSScott Long 
35504c7070dbSScott Long 	/*
35514c7070dbSScott Long 	 * Need a rate-limiting check so that this isn't called every time
35524c7070dbSScott Long 	 */
35534c7070dbSScott Long 	iflib_tx_credits_update(ctx, txq);
35544c7070dbSScott Long 	reclaim = DESC_RECLAIMABLE(txq);
35554c7070dbSScott Long 
35564c7070dbSScott Long 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
35574c7070dbSScott Long #ifdef INVARIANTS
35584c7070dbSScott Long 		if (iflib_verbose_debug) {
35594c7070dbSScott Long 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
35604c7070dbSScott Long 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
35614c7070dbSScott Long 			       reclaim, thresh);
35624c7070dbSScott Long 
35634c7070dbSScott Long 		}
35644c7070dbSScott Long #endif
35654c7070dbSScott Long 		return (0);
35664c7070dbSScott Long 	}
35674c7070dbSScott Long 	iflib_tx_desc_free(txq, reclaim);
35684c7070dbSScott Long 	txq->ift_cleaned += reclaim;
35694c7070dbSScott Long 	txq->ift_in_use -= reclaim;
35704c7070dbSScott Long 
35714c7070dbSScott Long 	return (reclaim);
35724c7070dbSScott Long }
35734c7070dbSScott Long 
35744c7070dbSScott Long static struct mbuf **
357595246abbSSean Bruno _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
35764c7070dbSScott Long {
357795246abbSSean Bruno 	int next, size;
357895246abbSSean Bruno 	struct mbuf **items;
35794c7070dbSScott Long 
358095246abbSSean Bruno 	size = r->size;
358195246abbSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
358295246abbSSean Bruno 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
358395246abbSSean Bruno 
358495246abbSSean Bruno 	prefetch(items[(cidx + offset) & (size-1)]);
358595246abbSSean Bruno 	if (remaining > 1) {
35863429c02fSStephen Hurd 		prefetch2cachelines(&items[next]);
35873429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
35883429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
35893429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
359095246abbSSean Bruno 	}
359195246abbSSean Bruno 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
35924c7070dbSScott Long }
35934c7070dbSScott Long 
35944c7070dbSScott Long static void
35954c7070dbSScott Long iflib_txq_check_drain(iflib_txq_t txq, int budget)
35964c7070dbSScott Long {
35974c7070dbSScott Long 
359895246abbSSean Bruno 	ifmp_ring_check_drainage(txq->ift_br, budget);
35994c7070dbSScott Long }
36004c7070dbSScott Long 
36014c7070dbSScott Long static uint32_t
36024c7070dbSScott Long iflib_txq_can_drain(struct ifmp_ring *r)
36034c7070dbSScott Long {
36044c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
36054c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
36064c7070dbSScott Long 
36071248952aSSean Bruno 	return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
360895246abbSSean Bruno 		ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
36094c7070dbSScott Long }
36104c7070dbSScott Long 
36114c7070dbSScott Long static uint32_t
36124c7070dbSScott Long iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
36134c7070dbSScott Long {
36144c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
36154c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
361695246abbSSean Bruno 	struct ifnet *ifp = ctx->ifc_ifp;
36174c7070dbSScott Long 	struct mbuf **mp, *m;
361895246abbSSean Bruno 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
361995246abbSSean Bruno 	int reclaimed, err, in_use_prev, desc_used;
362095246abbSSean Bruno 	bool do_prefetch, ring, rang;
36214c7070dbSScott Long 
36224c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
36234c7070dbSScott Long 			    !LINK_ACTIVE(ctx))) {
36244c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_notready);
36254c7070dbSScott Long 		return (0);
36264c7070dbSScott Long 	}
362795246abbSSean Bruno 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
362895246abbSSean Bruno 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
36294c7070dbSScott Long 	avail = IDXDIFF(pidx, cidx, r->size);
36304c7070dbSScott Long 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
36314c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_flushing);
36324c7070dbSScott Long 		for (i = 0; i < avail; i++) {
363323ac9029SStephen Hurd 			m_free(r->items[(cidx + i) & (r->size-1)]);
36344c7070dbSScott Long 			r->items[(cidx + i) & (r->size-1)] = NULL;
36354c7070dbSScott Long 		}
36364c7070dbSScott Long 		return (avail);
36374c7070dbSScott Long 	}
363895246abbSSean Bruno 
36394c7070dbSScott Long 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
36404c7070dbSScott Long 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36414c7070dbSScott Long 		CALLOUT_LOCK(txq);
36424c7070dbSScott Long 		callout_stop(&txq->ift_timer);
36434c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
36444c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_oactive);
36454c7070dbSScott Long 		return (0);
36464c7070dbSScott Long 	}
364795246abbSSean Bruno 	if (reclaimed)
364895246abbSSean Bruno 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36494c7070dbSScott Long 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
36504c7070dbSScott Long 	count = MIN(avail, TX_BATCH_SIZE);
3651da69b8f9SSean Bruno #ifdef INVARIANTS
3652da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3653da69b8f9SSean Bruno 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3654da69b8f9SSean Bruno 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3655da69b8f9SSean Bruno #endif
365695246abbSSean Bruno 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
365795246abbSSean Bruno 	avail = TXQ_AVAIL(txq);
36581ae4848cSMatt Macy 	err = 0;
365995246abbSSean Bruno 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
36601ae4848cSMatt Macy 		int rem = do_prefetch ? count - i : 0;
36614c7070dbSScott Long 
366295246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, rem);
3663da69b8f9SSean Bruno 		MPASS(mp != NULL && *mp != NULL);
366495246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq)) {
366595246abbSSean Bruno 			consumed++;
366695246abbSSean Bruno 			reclaimed++;
366795246abbSSean Bruno 			continue;
366895246abbSSean Bruno 		}
36694c7070dbSScott Long 		in_use_prev = txq->ift_in_use;
367095246abbSSean Bruno 		err = iflib_encap(txq, mp);
367195246abbSSean Bruno 		if (__predict_false(err)) {
36724c7070dbSScott Long 			DBG_COUNTER_INC(txq_drain_encapfail);
3673da69b8f9SSean Bruno 			/* no room - bail out */
367495246abbSSean Bruno 			if (err == ENOBUFS)
36754c7070dbSScott Long 				break;
36764c7070dbSScott Long 			consumed++;
3677da69b8f9SSean Bruno 			DBG_COUNTER_INC(txq_drain_encapfail);
3678da69b8f9SSean Bruno 			/* we can't send this packet - skip it */
36794c7070dbSScott Long 			continue;
3680da69b8f9SSean Bruno 		}
368195246abbSSean Bruno 		consumed++;
36824c7070dbSScott Long 		pkt_sent++;
36834c7070dbSScott Long 		m = *mp;
36844c7070dbSScott Long 		DBG_COUNTER_INC(tx_sent);
36854c7070dbSScott Long 		bytes_sent += m->m_pkthdr.len;
368695246abbSSean Bruno 		mcast_sent += !!(m->m_flags & M_MCAST);
368795246abbSSean Bruno 		avail = TXQ_AVAIL(txq);
36884c7070dbSScott Long 
36894c7070dbSScott Long 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
36904c7070dbSScott Long 		desc_used += (txq->ift_in_use - in_use_prev);
36914c7070dbSScott Long 		ETHER_BPF_MTAP(ifp, m);
369295246abbSSean Bruno 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
36934c7070dbSScott Long 			break;
369495246abbSSean Bruno 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
36954c7070dbSScott Long 	}
36964c7070dbSScott Long 
369795246abbSSean Bruno 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
369895246abbSSean Bruno 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
369995246abbSSean Bruno 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
37004c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
37014c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
37024c7070dbSScott Long 	if (mcast_sent)
37034c7070dbSScott Long 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3704da69b8f9SSean Bruno #ifdef INVARIANTS
3705da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3706da69b8f9SSean Bruno 		printf("consumed=%d\n", consumed);
3707da69b8f9SSean Bruno #endif
37084c7070dbSScott Long 	return (consumed);
37094c7070dbSScott Long }
37104c7070dbSScott Long 
3711da69b8f9SSean Bruno static uint32_t
3712da69b8f9SSean Bruno iflib_txq_drain_always(struct ifmp_ring *r)
3713da69b8f9SSean Bruno {
3714da69b8f9SSean Bruno 	return (1);
3715da69b8f9SSean Bruno }
3716da69b8f9SSean Bruno 
3717da69b8f9SSean Bruno static uint32_t
3718da69b8f9SSean Bruno iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3719da69b8f9SSean Bruno {
3720da69b8f9SSean Bruno 	int i, avail;
3721da69b8f9SSean Bruno 	struct mbuf **mp;
3722da69b8f9SSean Bruno 	iflib_txq_t txq;
3723da69b8f9SSean Bruno 
3724da69b8f9SSean Bruno 	txq = r->cookie;
3725da69b8f9SSean Bruno 
3726da69b8f9SSean Bruno 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3727da69b8f9SSean Bruno 	CALLOUT_LOCK(txq);
3728da69b8f9SSean Bruno 	callout_stop(&txq->ift_timer);
3729da69b8f9SSean Bruno 	CALLOUT_UNLOCK(txq);
3730da69b8f9SSean Bruno 
3731da69b8f9SSean Bruno 	avail = IDXDIFF(pidx, cidx, r->size);
3732da69b8f9SSean Bruno 	for (i = 0; i < avail; i++) {
373395246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, avail - i);
373495246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq))
373595246abbSSean Bruno 			continue;
3736da69b8f9SSean Bruno 		m_freem(*mp);
3737da69b8f9SSean Bruno 	}
3738da69b8f9SSean Bruno 	MPASS(ifmp_ring_is_stalled(r) == 0);
3739da69b8f9SSean Bruno 	return (avail);
3740da69b8f9SSean Bruno }
3741da69b8f9SSean Bruno 
3742da69b8f9SSean Bruno static void
3743da69b8f9SSean Bruno iflib_ifmp_purge(iflib_txq_t txq)
3744da69b8f9SSean Bruno {
3745da69b8f9SSean Bruno 	struct ifmp_ring *r;
3746da69b8f9SSean Bruno 
374795246abbSSean Bruno 	r = txq->ift_br;
3748da69b8f9SSean Bruno 	r->drain = iflib_txq_drain_free;
3749da69b8f9SSean Bruno 	r->can_drain = iflib_txq_drain_always;
3750da69b8f9SSean Bruno 
3751da69b8f9SSean Bruno 	ifmp_ring_check_drainage(r, r->size);
3752da69b8f9SSean Bruno 
3753da69b8f9SSean Bruno 	r->drain = iflib_txq_drain;
3754da69b8f9SSean Bruno 	r->can_drain = iflib_txq_can_drain;
3755da69b8f9SSean Bruno }
3756da69b8f9SSean Bruno 
37574c7070dbSScott Long static void
375823ac9029SStephen Hurd _task_fn_tx(void *context)
37594c7070dbSScott Long {
37604c7070dbSScott Long 	iflib_txq_t txq = context;
37614c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
376295246abbSSean Bruno 	struct ifnet *ifp = ctx->ifc_ifp;
3763fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
37644c7070dbSScott Long 
37651248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
37661248952aSSean Bruno 	txq->ift_cpu_exec_count[curcpu]++;
37671248952aSSean Bruno #endif
37684c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
37694c7070dbSScott Long 		return;
3770d0d0ad0aSStephen Hurd 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
377195246abbSSean Bruno 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
377295246abbSSean Bruno 			netmap_tx_irq(ifp, txq->ift_id);
377395246abbSSean Bruno 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
377495246abbSSean Bruno 		return;
377595246abbSSean Bruno 	}
377695246abbSSean Bruno 	if (txq->ift_db_pending)
3777fe51d4cdSStephen Hurd 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3778fe51d4cdSStephen Hurd 	else if (!abdicate)
3779fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3780fe51d4cdSStephen Hurd 	/*
3781fe51d4cdSStephen Hurd 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3782fe51d4cdSStephen Hurd 	 */
3783fe51d4cdSStephen Hurd 	if (abdicate)
3784fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
378595246abbSSean Bruno 	ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
378695246abbSSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
378795246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
378895246abbSSean Bruno 	else {
37891ae4848cSMatt Macy #ifdef INVARIANTS
37901ae4848cSMatt Macy 		int rc =
37911ae4848cSMatt Macy #endif
37921ae4848cSMatt Macy 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
379395246abbSSean Bruno 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
379495246abbSSean Bruno 	}
37954c7070dbSScott Long }
37964c7070dbSScott Long 
37974c7070dbSScott Long static void
379823ac9029SStephen Hurd _task_fn_rx(void *context)
37994c7070dbSScott Long {
38004c7070dbSScott Long 	iflib_rxq_t rxq = context;
38014c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
38024c7070dbSScott Long 	bool more;
3803f4d2154eSStephen Hurd 	uint16_t budget;
38044c7070dbSScott Long 
38051248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
38061248952aSSean Bruno 	rxq->ifr_cpu_exec_count[curcpu]++;
38071248952aSSean Bruno #endif
38084c7070dbSScott Long 	DBG_COUNTER_INC(task_fn_rxs);
38094c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38104c7070dbSScott Long 		return;
3811d0d0ad0aSStephen Hurd 	more = true;
3812d0d0ad0aSStephen Hurd #ifdef DEV_NETMAP
3813d0d0ad0aSStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3814d0d0ad0aSStephen Hurd 		u_int work = 0;
3815d0d0ad0aSStephen Hurd 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3816d0d0ad0aSStephen Hurd 			more = false;
3817d0d0ad0aSStephen Hurd 		}
3818d0d0ad0aSStephen Hurd 	}
3819d0d0ad0aSStephen Hurd #endif
3820f4d2154eSStephen Hurd 	budget = ctx->ifc_sysctl_rx_budget;
3821f4d2154eSStephen Hurd 	if (budget == 0)
3822f4d2154eSStephen Hurd 		budget = 16;	/* XXX */
3823f4d2154eSStephen Hurd 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
38244c7070dbSScott Long 		if (ctx->ifc_flags & IFC_LEGACY)
38254c7070dbSScott Long 			IFDI_INTR_ENABLE(ctx);
38264c7070dbSScott Long 		else {
38271ae4848cSMatt Macy #ifdef INVARIANTS
38281ae4848cSMatt Macy 			int rc =
38291ae4848cSMatt Macy #endif
38301ae4848cSMatt Macy 				IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
383123ac9029SStephen Hurd 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
38321ae4848cSMatt Macy 			DBG_COUNTER_INC(rx_intr_enables);
38334c7070dbSScott Long 		}
38344c7070dbSScott Long 	}
38354c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38364c7070dbSScott Long 		return;
38374c7070dbSScott Long 	if (more)
38384c7070dbSScott Long 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
38394c7070dbSScott Long }
38404c7070dbSScott Long 
38414c7070dbSScott Long static void
384223ac9029SStephen Hurd _task_fn_admin(void *context)
38434c7070dbSScott Long {
38444c7070dbSScott Long 	if_ctx_t ctx = context;
38454c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
38464c7070dbSScott Long 	iflib_txq_t txq;
3847ab2e3f79SStephen Hurd 	int i;
38487b610b60SSean Bruno 	bool oactive, running, do_reset, do_watchdog;
3849dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
3850ab2e3f79SStephen Hurd 
38517b610b60SSean Bruno 	STATE_LOCK(ctx);
38527b610b60SSean Bruno 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
38537b610b60SSean Bruno 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
38547b610b60SSean Bruno 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
38557b610b60SSean Bruno 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
38567b610b60SSean Bruno 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
38577b610b60SSean Bruno 	STATE_UNLOCK(ctx);
38587b610b60SSean Bruno 
38591d7ef186SEric Joyner 	if ((!running & !oactive) &&
38601d7ef186SEric Joyner 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
3861ab2e3f79SStephen Hurd 		return;
38624c7070dbSScott Long 
38634c7070dbSScott Long 	CTX_LOCK(ctx);
38644c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
38654c7070dbSScott Long 		CALLOUT_LOCK(txq);
38664c7070dbSScott Long 		callout_stop(&txq->ift_timer);
38674c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
38684c7070dbSScott Long 	}
38697b610b60SSean Bruno 	if (do_watchdog) {
38707b610b60SSean Bruno 		ctx->ifc_watchdog_events++;
38717b610b60SSean Bruno 		IFDI_WATCHDOG_RESET(ctx);
38727b610b60SSean Bruno 	}
3873d300df01SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3874dd7fbcf1SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3875dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
3876dd7fbcf1SStephen Hurd 		reset_on = hz / 2;
3877dd7fbcf1SStephen Hurd 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
3878dd7fbcf1SStephen Hurd 			iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on);
3879dd7fbcf1SStephen Hurd #endif
3880dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3881dd7fbcf1SStephen Hurd 	}
3882ab2e3f79SStephen Hurd 	IFDI_LINK_INTR_ENABLE(ctx);
38837b610b60SSean Bruno 	if (do_reset)
3884ab2e3f79SStephen Hurd 		iflib_if_init_locked(ctx);
38854c7070dbSScott Long 	CTX_UNLOCK(ctx);
38864c7070dbSScott Long 
3887ab2e3f79SStephen Hurd 	if (LINK_ACTIVE(ctx) == 0)
38884c7070dbSScott Long 		return;
38894c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
38904c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
38914c7070dbSScott Long }
38924c7070dbSScott Long 
38934c7070dbSScott Long 
38944c7070dbSScott Long static void
389523ac9029SStephen Hurd _task_fn_iov(void *context)
38964c7070dbSScott Long {
38974c7070dbSScott Long 	if_ctx_t ctx = context;
38984c7070dbSScott Long 
38994c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
39004c7070dbSScott Long 		return;
39014c7070dbSScott Long 
39024c7070dbSScott Long 	CTX_LOCK(ctx);
39034c7070dbSScott Long 	IFDI_VFLR_HANDLE(ctx);
39044c7070dbSScott Long 	CTX_UNLOCK(ctx);
39054c7070dbSScott Long }
39064c7070dbSScott Long 
39074c7070dbSScott Long static int
39084c7070dbSScott Long iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
39094c7070dbSScott Long {
39104c7070dbSScott Long 	int err;
39114c7070dbSScott Long 	if_int_delay_info_t info;
39124c7070dbSScott Long 	if_ctx_t ctx;
39134c7070dbSScott Long 
39144c7070dbSScott Long 	info = (if_int_delay_info_t)arg1;
39154c7070dbSScott Long 	ctx = info->iidi_ctx;
39164c7070dbSScott Long 	info->iidi_req = req;
39174c7070dbSScott Long 	info->iidi_oidp = oidp;
39184c7070dbSScott Long 	CTX_LOCK(ctx);
39194c7070dbSScott Long 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
39204c7070dbSScott Long 	CTX_UNLOCK(ctx);
39214c7070dbSScott Long 	return (err);
39224c7070dbSScott Long }
39234c7070dbSScott Long 
39244c7070dbSScott Long /*********************************************************************
39254c7070dbSScott Long  *
39264c7070dbSScott Long  *  IFNET FUNCTIONS
39274c7070dbSScott Long  *
39284c7070dbSScott Long  **********************************************************************/
39294c7070dbSScott Long 
39304c7070dbSScott Long static void
39314c7070dbSScott Long iflib_if_init_locked(if_ctx_t ctx)
39324c7070dbSScott Long {
39334c7070dbSScott Long 	iflib_stop(ctx);
39344c7070dbSScott Long 	iflib_init_locked(ctx);
39354c7070dbSScott Long }
39364c7070dbSScott Long 
39374c7070dbSScott Long 
39384c7070dbSScott Long static void
39394c7070dbSScott Long iflib_if_init(void *arg)
39404c7070dbSScott Long {
39414c7070dbSScott Long 	if_ctx_t ctx = arg;
39424c7070dbSScott Long 
39434c7070dbSScott Long 	CTX_LOCK(ctx);
39444c7070dbSScott Long 	iflib_if_init_locked(ctx);
39454c7070dbSScott Long 	CTX_UNLOCK(ctx);
39464c7070dbSScott Long }
39474c7070dbSScott Long 
39484c7070dbSScott Long static int
39494c7070dbSScott Long iflib_if_transmit(if_t ifp, struct mbuf *m)
39504c7070dbSScott Long {
39514c7070dbSScott Long 	if_ctx_t	ctx = if_getsoftc(ifp);
39524c7070dbSScott Long 
39534c7070dbSScott Long 	iflib_txq_t txq;
395423ac9029SStephen Hurd 	int err, qidx;
3955fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
39564c7070dbSScott Long 
39574c7070dbSScott Long 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
39584c7070dbSScott Long 		DBG_COUNTER_INC(tx_frees);
39594c7070dbSScott Long 		m_freem(m);
3960da69b8f9SSean Bruno 		return (ENOBUFS);
39614c7070dbSScott Long 	}
39624c7070dbSScott Long 
396323ac9029SStephen Hurd 	MPASS(m->m_nextpkt == NULL);
39644c7070dbSScott Long 	qidx = 0;
39654c7070dbSScott Long 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
39664c7070dbSScott Long 		qidx = QIDX(ctx, m);
39674c7070dbSScott Long 	/*
39684c7070dbSScott Long 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
39694c7070dbSScott Long 	 */
39704c7070dbSScott Long 	txq = &ctx->ifc_txqs[qidx];
39714c7070dbSScott Long 
39724c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
39734c7070dbSScott Long 	if (txq->ift_closed) {
39744c7070dbSScott Long 		while (m != NULL) {
39754c7070dbSScott Long 			next = m->m_nextpkt;
39764c7070dbSScott Long 			m->m_nextpkt = NULL;
39774c7070dbSScott Long 			m_freem(m);
39784c7070dbSScott Long 			m = next;
39794c7070dbSScott Long 		}
39804c7070dbSScott Long 		return (ENOBUFS);
39814c7070dbSScott Long 	}
39824c7070dbSScott Long #endif
398323ac9029SStephen Hurd #ifdef notyet
39844c7070dbSScott Long 	qidx = count = 0;
39854c7070dbSScott Long 	mp = marr;
39864c7070dbSScott Long 	next = m;
39874c7070dbSScott Long 	do {
39884c7070dbSScott Long 		count++;
39894c7070dbSScott Long 		next = next->m_nextpkt;
39904c7070dbSScott Long 	} while (next != NULL);
39914c7070dbSScott Long 
399216fb86abSConrad Meyer 	if (count > nitems(marr))
39934c7070dbSScott Long 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
39944c7070dbSScott Long 			/* XXX check nextpkt */
39954c7070dbSScott Long 			m_freem(m);
39964c7070dbSScott Long 			/* XXX simplify for now */
39974c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
39984c7070dbSScott Long 			return (ENOBUFS);
39994c7070dbSScott Long 		}
40004c7070dbSScott Long 	for (next = m, i = 0; next != NULL; i++) {
40014c7070dbSScott Long 		mp[i] = next;
40024c7070dbSScott Long 		next = next->m_nextpkt;
40034c7070dbSScott Long 		mp[i]->m_nextpkt = NULL;
40044c7070dbSScott Long 	}
400523ac9029SStephen Hurd #endif
40064c7070dbSScott Long 	DBG_COUNTER_INC(tx_seen);
4007fe51d4cdSStephen Hurd 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
40084c7070dbSScott Long 
4009fe51d4cdSStephen Hurd 	if (abdicate)
4010ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
40111225d9daSStephen Hurd  	if (err) {
4012fe51d4cdSStephen Hurd 		if (!abdicate)
4013fe51d4cdSStephen Hurd 			GROUPTASK_ENQUEUE(&txq->ift_task);
40144c7070dbSScott Long 		/* support forthcoming later */
40154c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
40164c7070dbSScott Long 		txq->ift_closed = TRUE;
40174c7070dbSScott Long #endif
401895246abbSSean Bruno 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
401923ac9029SStephen Hurd 		m_freem(m);
40204c7070dbSScott Long 	}
40214c7070dbSScott Long 
40224c7070dbSScott Long 	return (err);
40234c7070dbSScott Long }
40244c7070dbSScott Long 
40254c7070dbSScott Long static void
40264c7070dbSScott Long iflib_if_qflush(if_t ifp)
40274c7070dbSScott Long {
40284c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
40294c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
40304c7070dbSScott Long 	int i;
40314c7070dbSScott Long 
40327b610b60SSean Bruno 	STATE_LOCK(ctx);
40334c7070dbSScott Long 	ctx->ifc_flags |= IFC_QFLUSH;
40347b610b60SSean Bruno 	STATE_UNLOCK(ctx);
40354c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
403695246abbSSean Bruno 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
40374c7070dbSScott Long 			iflib_txq_check_drain(txq, 0);
40387b610b60SSean Bruno 	STATE_LOCK(ctx);
40394c7070dbSScott Long 	ctx->ifc_flags &= ~IFC_QFLUSH;
40407b610b60SSean Bruno 	STATE_UNLOCK(ctx);
40414c7070dbSScott Long 
40424c7070dbSScott Long 	if_qflush(ifp);
40434c7070dbSScott Long }
40444c7070dbSScott Long 
40454c7070dbSScott Long 
404623ac9029SStephen Hurd #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
404718a660b3SSean Bruno 		     IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
40484c7070dbSScott Long 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
40494c7070dbSScott Long 
40504c7070dbSScott Long static int
40514c7070dbSScott Long iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
40524c7070dbSScott Long {
40534c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
40544c7070dbSScott Long 	struct ifreq	*ifr = (struct ifreq *)data;
40554c7070dbSScott Long #if defined(INET) || defined(INET6)
40564c7070dbSScott Long 	struct ifaddr	*ifa = (struct ifaddr *)data;
40574c7070dbSScott Long #endif
40584c7070dbSScott Long 	bool		avoid_reset = FALSE;
40594c7070dbSScott Long 	int		err = 0, reinit = 0, bits;
40604c7070dbSScott Long 
40614c7070dbSScott Long 	switch (command) {
40624c7070dbSScott Long 	case SIOCSIFADDR:
40634c7070dbSScott Long #ifdef INET
40644c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET)
40654c7070dbSScott Long 			avoid_reset = TRUE;
40664c7070dbSScott Long #endif
40674c7070dbSScott Long #ifdef INET6
40684c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET6)
40694c7070dbSScott Long 			avoid_reset = TRUE;
40704c7070dbSScott Long #endif
40714c7070dbSScott Long 		/*
40724c7070dbSScott Long 		** Calling init results in link renegotiation,
40734c7070dbSScott Long 		** so we avoid doing it when possible.
40744c7070dbSScott Long 		*/
40754c7070dbSScott Long 		if (avoid_reset) {
40764c7070dbSScott Long 			if_setflagbits(ifp, IFF_UP,0);
40774c7070dbSScott Long 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
40784c7070dbSScott Long 				reinit = 1;
40794c7070dbSScott Long #ifdef INET
40804c7070dbSScott Long 			if (!(if_getflags(ifp) & IFF_NOARP))
40814c7070dbSScott Long 				arp_ifinit(ifp, ifa);
40824c7070dbSScott Long #endif
40834c7070dbSScott Long 		} else
40844c7070dbSScott Long 			err = ether_ioctl(ifp, command, data);
40854c7070dbSScott Long 		break;
40864c7070dbSScott Long 	case SIOCSIFMTU:
40874c7070dbSScott Long 		CTX_LOCK(ctx);
40884c7070dbSScott Long 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
40894c7070dbSScott Long 			CTX_UNLOCK(ctx);
40904c7070dbSScott Long 			break;
40914c7070dbSScott Long 		}
40924c7070dbSScott Long 		bits = if_getdrvflags(ifp);
40934c7070dbSScott Long 		/* stop the driver and free any clusters before proceeding */
40944c7070dbSScott Long 		iflib_stop(ctx);
40954c7070dbSScott Long 
40964c7070dbSScott Long 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
40977b610b60SSean Bruno 			STATE_LOCK(ctx);
40984c7070dbSScott Long 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
40994c7070dbSScott Long 				ctx->ifc_flags |= IFC_MULTISEG;
41004c7070dbSScott Long 			else
41014c7070dbSScott Long 				ctx->ifc_flags &= ~IFC_MULTISEG;
41027b610b60SSean Bruno 			STATE_UNLOCK(ctx);
41034c7070dbSScott Long 			err = if_setmtu(ifp, ifr->ifr_mtu);
41044c7070dbSScott Long 		}
41054c7070dbSScott Long 		iflib_init_locked(ctx);
41067b610b60SSean Bruno 		STATE_LOCK(ctx);
41074c7070dbSScott Long 		if_setdrvflags(ifp, bits);
41087b610b60SSean Bruno 		STATE_UNLOCK(ctx);
41094c7070dbSScott Long 		CTX_UNLOCK(ctx);
41104c7070dbSScott Long 		break;
41114c7070dbSScott Long 	case SIOCSIFFLAGS:
4112ab2e3f79SStephen Hurd 		CTX_LOCK(ctx);
4113ab2e3f79SStephen Hurd 		if (if_getflags(ifp) & IFF_UP) {
4114ab2e3f79SStephen Hurd 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4115ab2e3f79SStephen Hurd 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4116ab2e3f79SStephen Hurd 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4117ab2e3f79SStephen Hurd 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4118ab2e3f79SStephen Hurd 				}
4119ab2e3f79SStephen Hurd 			} else
4120ab2e3f79SStephen Hurd 				reinit = 1;
4121ab2e3f79SStephen Hurd 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4122ab2e3f79SStephen Hurd 			iflib_stop(ctx);
4123ab2e3f79SStephen Hurd 		}
4124ab2e3f79SStephen Hurd 		ctx->ifc_if_flags = if_getflags(ifp);
4125ab2e3f79SStephen Hurd 		CTX_UNLOCK(ctx);
41264c7070dbSScott Long 		break;
41274c7070dbSScott Long 	case SIOCADDMULTI:
41284c7070dbSScott Long 	case SIOCDELMULTI:
41294c7070dbSScott Long 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4130ab2e3f79SStephen Hurd 			CTX_LOCK(ctx);
4131ab2e3f79SStephen Hurd 			IFDI_INTR_DISABLE(ctx);
4132ab2e3f79SStephen Hurd 			IFDI_MULTI_SET(ctx);
4133ab2e3f79SStephen Hurd 			IFDI_INTR_ENABLE(ctx);
4134ab2e3f79SStephen Hurd 			CTX_UNLOCK(ctx);
41354c7070dbSScott Long 		}
41364c7070dbSScott Long 		break;
41374c7070dbSScott Long 	case SIOCSIFMEDIA:
41384c7070dbSScott Long 		CTX_LOCK(ctx);
41394c7070dbSScott Long 		IFDI_MEDIA_SET(ctx);
41404c7070dbSScott Long 		CTX_UNLOCK(ctx);
41414c7070dbSScott Long 		/* falls thru */
41424c7070dbSScott Long 	case SIOCGIFMEDIA:
4143a027c8e9SStephen Hurd 	case SIOCGIFXMEDIA:
41444c7070dbSScott Long 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
41454c7070dbSScott Long 		break;
41464c7070dbSScott Long 	case SIOCGI2C:
41474c7070dbSScott Long 	{
41484c7070dbSScott Long 		struct ifi2creq i2c;
41494c7070dbSScott Long 
4150541d96aaSBrooks Davis 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
41514c7070dbSScott Long 		if (err != 0)
41524c7070dbSScott Long 			break;
41534c7070dbSScott Long 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
41544c7070dbSScott Long 			err = EINVAL;
41554c7070dbSScott Long 			break;
41564c7070dbSScott Long 		}
41574c7070dbSScott Long 		if (i2c.len > sizeof(i2c.data)) {
41584c7070dbSScott Long 			err = EINVAL;
41594c7070dbSScott Long 			break;
41604c7070dbSScott Long 		}
41614c7070dbSScott Long 
41624c7070dbSScott Long 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4163541d96aaSBrooks Davis 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4164541d96aaSBrooks Davis 			    sizeof(i2c));
41654c7070dbSScott Long 		break;
41664c7070dbSScott Long 	}
41674c7070dbSScott Long 	case SIOCSIFCAP:
41684c7070dbSScott Long 	{
41694c7070dbSScott Long 		int mask, setmask;
41704c7070dbSScott Long 
41714c7070dbSScott Long 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
41724c7070dbSScott Long 		setmask = 0;
41734c7070dbSScott Long #ifdef TCP_OFFLOAD
41744c7070dbSScott Long 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
41754c7070dbSScott Long #endif
41764c7070dbSScott Long 		setmask |= (mask & IFCAP_FLAGS);
41774c7070dbSScott Long 
41788b2a1db9SSean Bruno 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
41798b2a1db9SSean Bruno 			setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
41804c7070dbSScott Long 		if ((mask & IFCAP_WOL) &&
41814c7070dbSScott Long 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0)
41824c7070dbSScott Long 			setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
41834c7070dbSScott Long 		if_vlancap(ifp);
41844c7070dbSScott Long 		/*
41854c7070dbSScott Long 		 * want to ensure that traffic has stopped before we change any of the flags
41864c7070dbSScott Long 		 */
41874c7070dbSScott Long 		if (setmask) {
41884c7070dbSScott Long 			CTX_LOCK(ctx);
41894c7070dbSScott Long 			bits = if_getdrvflags(ifp);
41908b2a1db9SSean Bruno 			if (bits & IFF_DRV_RUNNING)
41914c7070dbSScott Long 				iflib_stop(ctx);
41927b610b60SSean Bruno 			STATE_LOCK(ctx);
41934c7070dbSScott Long 			if_togglecapenable(ifp, setmask);
41947b610b60SSean Bruno 			STATE_UNLOCK(ctx);
41958b2a1db9SSean Bruno 			if (bits & IFF_DRV_RUNNING)
41964c7070dbSScott Long 				iflib_init_locked(ctx);
41977b610b60SSean Bruno 			STATE_LOCK(ctx);
41984c7070dbSScott Long 			if_setdrvflags(ifp, bits);
41997b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42004c7070dbSScott Long 			CTX_UNLOCK(ctx);
42014c7070dbSScott Long 		}
42024c7070dbSScott Long 		break;
42034c7070dbSScott Long 	}
42044c7070dbSScott Long 	case SIOCGPRIVATE_0:
42054c7070dbSScott Long 	case SIOCSDRVSPEC:
42064c7070dbSScott Long 	case SIOCGDRVSPEC:
42074c7070dbSScott Long 		CTX_LOCK(ctx);
42084c7070dbSScott Long 		err = IFDI_PRIV_IOCTL(ctx, command, data);
42094c7070dbSScott Long 		CTX_UNLOCK(ctx);
42104c7070dbSScott Long 		break;
42114c7070dbSScott Long 	default:
42124c7070dbSScott Long 		err = ether_ioctl(ifp, command, data);
42134c7070dbSScott Long 		break;
42144c7070dbSScott Long 	}
42154c7070dbSScott Long 	if (reinit)
42164c7070dbSScott Long 		iflib_if_init(ctx);
42174c7070dbSScott Long 	return (err);
42184c7070dbSScott Long }
42194c7070dbSScott Long 
42204c7070dbSScott Long static uint64_t
42214c7070dbSScott Long iflib_if_get_counter(if_t ifp, ift_counter cnt)
42224c7070dbSScott Long {
42234c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42244c7070dbSScott Long 
42254c7070dbSScott Long 	return (IFDI_GET_COUNTER(ctx, cnt));
42264c7070dbSScott Long }
42274c7070dbSScott Long 
42284c7070dbSScott Long /*********************************************************************
42294c7070dbSScott Long  *
42304c7070dbSScott Long  *  OTHER FUNCTIONS EXPORTED TO THE STACK
42314c7070dbSScott Long  *
42324c7070dbSScott Long  **********************************************************************/
42334c7070dbSScott Long 
42344c7070dbSScott Long static void
42354c7070dbSScott Long iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
42364c7070dbSScott Long {
42374c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42384c7070dbSScott Long 
42394c7070dbSScott Long 	if ((void *)ctx != arg)
42404c7070dbSScott Long 		return;
42414c7070dbSScott Long 
42424c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
42434c7070dbSScott Long 		return;
42444c7070dbSScott Long 
42454c7070dbSScott Long 	CTX_LOCK(ctx);
42464c7070dbSScott Long 	IFDI_VLAN_REGISTER(ctx, vtag);
42474c7070dbSScott Long 	/* Re-init to load the changes */
42484c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
424921e10b16SSean Bruno 		iflib_if_init_locked(ctx);
42504c7070dbSScott Long 	CTX_UNLOCK(ctx);
42514c7070dbSScott Long }
42524c7070dbSScott Long 
42534c7070dbSScott Long static void
42544c7070dbSScott Long iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
42554c7070dbSScott Long {
42564c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42574c7070dbSScott Long 
42584c7070dbSScott Long 	if ((void *)ctx != arg)
42594c7070dbSScott Long 		return;
42604c7070dbSScott Long 
42614c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
42624c7070dbSScott Long 		return;
42634c7070dbSScott Long 
42644c7070dbSScott Long 	CTX_LOCK(ctx);
42654c7070dbSScott Long 	IFDI_VLAN_UNREGISTER(ctx, vtag);
42664c7070dbSScott Long 	/* Re-init to load the changes */
42674c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
426821e10b16SSean Bruno 		iflib_if_init_locked(ctx);
42694c7070dbSScott Long 	CTX_UNLOCK(ctx);
42704c7070dbSScott Long }
42714c7070dbSScott Long 
42724c7070dbSScott Long static void
42734c7070dbSScott Long iflib_led_func(void *arg, int onoff)
42744c7070dbSScott Long {
42754c7070dbSScott Long 	if_ctx_t ctx = arg;
42764c7070dbSScott Long 
42774c7070dbSScott Long 	CTX_LOCK(ctx);
42784c7070dbSScott Long 	IFDI_LED_FUNC(ctx, onoff);
42794c7070dbSScott Long 	CTX_UNLOCK(ctx);
42804c7070dbSScott Long }
42814c7070dbSScott Long 
42824c7070dbSScott Long /*********************************************************************
42834c7070dbSScott Long  *
42844c7070dbSScott Long  *  BUS FUNCTION DEFINITIONS
42854c7070dbSScott Long  *
42864c7070dbSScott Long  **********************************************************************/
42874c7070dbSScott Long 
42884c7070dbSScott Long int
42894c7070dbSScott Long iflib_device_probe(device_t dev)
42904c7070dbSScott Long {
42914c7070dbSScott Long 	pci_vendor_info_t *ent;
42924c7070dbSScott Long 
42934c7070dbSScott Long 	uint16_t	pci_vendor_id, pci_device_id;
42944c7070dbSScott Long 	uint16_t	pci_subvendor_id, pci_subdevice_id;
42954c7070dbSScott Long 	uint16_t	pci_rev_id;
42964c7070dbSScott Long 	if_shared_ctx_t sctx;
42974c7070dbSScott Long 
42984c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
42994c7070dbSScott Long 		return (ENOTSUP);
43004c7070dbSScott Long 
43014c7070dbSScott Long 	pci_vendor_id = pci_get_vendor(dev);
43024c7070dbSScott Long 	pci_device_id = pci_get_device(dev);
43034c7070dbSScott Long 	pci_subvendor_id = pci_get_subvendor(dev);
43044c7070dbSScott Long 	pci_subdevice_id = pci_get_subdevice(dev);
43054c7070dbSScott Long 	pci_rev_id = pci_get_revid(dev);
43064c7070dbSScott Long 	if (sctx->isc_parse_devinfo != NULL)
43074c7070dbSScott Long 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
43084c7070dbSScott Long 
43094c7070dbSScott Long 	ent = sctx->isc_vendor_info;
43104c7070dbSScott Long 	while (ent->pvi_vendor_id != 0) {
43114c7070dbSScott Long 		if (pci_vendor_id != ent->pvi_vendor_id) {
43124c7070dbSScott Long 			ent++;
43134c7070dbSScott Long 			continue;
43144c7070dbSScott Long 		}
43154c7070dbSScott Long 		if ((pci_device_id == ent->pvi_device_id) &&
43164c7070dbSScott Long 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
43174c7070dbSScott Long 		     (ent->pvi_subvendor_id == 0)) &&
43184c7070dbSScott Long 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
43194c7070dbSScott Long 		     (ent->pvi_subdevice_id == 0)) &&
43204c7070dbSScott Long 		    ((pci_rev_id == ent->pvi_rev_id) ||
43214c7070dbSScott Long 		     (ent->pvi_rev_id == 0))) {
43224c7070dbSScott Long 
43234c7070dbSScott Long 			device_set_desc_copy(dev, ent->pvi_name);
43244c7070dbSScott Long 			/* this needs to be changed to zero if the bus probing code
43254c7070dbSScott Long 			 * ever stops re-probing on best match because the sctx
43264c7070dbSScott Long 			 * may have its values over written by register calls
43274c7070dbSScott Long 			 * in subsequent probes
43284c7070dbSScott Long 			 */
43294c7070dbSScott Long 			return (BUS_PROBE_DEFAULT);
43304c7070dbSScott Long 		}
43314c7070dbSScott Long 		ent++;
43324c7070dbSScott Long 	}
43334c7070dbSScott Long 	return (ENXIO);
43344c7070dbSScott Long }
43354c7070dbSScott Long 
433609f6ff4fSMatt Macy static void
433709f6ff4fSMatt Macy iflib_reset_qvalues(if_ctx_t ctx)
43384c7070dbSScott Long {
433909f6ff4fSMatt Macy 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
434009f6ff4fSMatt Macy 	if_shared_ctx_t sctx = ctx->ifc_sctx;
434109f6ff4fSMatt Macy 	device_t dev = ctx->ifc_dev;
434246d0f824SMatt Macy 	int i;
43434c7070dbSScott Long 
434409f6ff4fSMatt Macy 	scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
434509f6ff4fSMatt Macy 	scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
434623ac9029SStephen Hurd 	/*
434723ac9029SStephen Hurd 	 * XXX sanity check that ntxd & nrxd are a power of 2
434823ac9029SStephen Hurd 	 */
434923ac9029SStephen Hurd 	if (ctx->ifc_sysctl_ntxqs != 0)
435023ac9029SStephen Hurd 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
435123ac9029SStephen Hurd 	if (ctx->ifc_sysctl_nrxqs != 0)
435223ac9029SStephen Hurd 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
435323ac9029SStephen Hurd 
435423ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
435523ac9029SStephen Hurd 		if (ctx->ifc_sysctl_ntxds[i] != 0)
435623ac9029SStephen Hurd 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
435723ac9029SStephen Hurd 		else
435823ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
435923ac9029SStephen Hurd 	}
436023ac9029SStephen Hurd 
436123ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
436223ac9029SStephen Hurd 		if (ctx->ifc_sysctl_nrxds[i] != 0)
436323ac9029SStephen Hurd 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
436423ac9029SStephen Hurd 		else
436523ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
436623ac9029SStephen Hurd 	}
436723ac9029SStephen Hurd 
436823ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
436923ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
437023ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
437123ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
437223ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
437323ac9029SStephen Hurd 		}
437423ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
437523ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
437623ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
437723ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
437823ac9029SStephen Hurd 		}
437923ac9029SStephen Hurd 	}
438023ac9029SStephen Hurd 
438123ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
438223ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
438323ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
438423ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
438523ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
438623ac9029SStephen Hurd 		}
438723ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
438823ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
438923ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
439023ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
439123ac9029SStephen Hurd 		}
439223ac9029SStephen Hurd 	}
439309f6ff4fSMatt Macy }
4394ab2e3f79SStephen Hurd 
439509f6ff4fSMatt Macy int
439609f6ff4fSMatt Macy iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
439709f6ff4fSMatt Macy {
439809f6ff4fSMatt Macy 	int err, rid, msix;
439909f6ff4fSMatt Macy 	if_ctx_t ctx;
440009f6ff4fSMatt Macy 	if_t ifp;
440109f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
440209f6ff4fSMatt Macy 	int i;
440309f6ff4fSMatt Macy 	uint16_t main_txq;
440409f6ff4fSMatt Macy 	uint16_t main_rxq;
440509f6ff4fSMatt Macy 
440609f6ff4fSMatt Macy 
440709f6ff4fSMatt Macy 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
440809f6ff4fSMatt Macy 
440909f6ff4fSMatt Macy 	if (sc == NULL) {
441009f6ff4fSMatt Macy 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
441109f6ff4fSMatt Macy 		device_set_softc(dev, ctx);
441209f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
441309f6ff4fSMatt Macy 	}
441409f6ff4fSMatt Macy 
441509f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
441609f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
441709f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
441809f6ff4fSMatt Macy 
441909f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
442009f6ff4fSMatt Macy 		if (ctx->ifc_flags & IFC_SC_ALLOCATED)
442109f6ff4fSMatt Macy 			free(sc, M_IFLIB);
442209f6ff4fSMatt Macy 		free(ctx, M_IFLIB);
442309f6ff4fSMatt Macy 		device_printf(dev, "iflib_register failed %d\n", err);
442409f6ff4fSMatt Macy 		return (err);
442509f6ff4fSMatt Macy 	}
442609f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
442709f6ff4fSMatt Macy 
442809f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
442909f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
443009f6ff4fSMatt Macy 
443109f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4432aa8a24d3SStephen Hurd 	CTX_LOCK(ctx);
4433ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
4434aa8a24d3SStephen Hurd 		CTX_UNLOCK(ctx);
44354c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
44364c7070dbSScott Long 		return (err);
44374c7070dbSScott Long 	}
44381248952aSSean Bruno 	_iflib_pre_assert(scctx);
44391248952aSSean Bruno 	ctx->ifc_txrx = *scctx->isc_txrx;
44401248952aSSean Bruno 
44411248952aSSean Bruno #ifdef INVARIANTS
44427f87c040SMarius Strobl 	MPASS(scctx->isc_capabilities);
44437f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
44441248952aSSean Bruno 		MPASS(scctx->isc_tx_csum_flags);
44451248952aSSean Bruno #endif
44461248952aSSean Bruno 
44477f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
444818a660b3SSean Bruno 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
44491248952aSSean Bruno 
44501248952aSSean Bruno 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
44511248952aSSean Bruno 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
44521248952aSSean Bruno 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
44531248952aSSean Bruno 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
445423ac9029SStephen Hurd 
44554c7070dbSScott Long #ifdef ACPI_DMAR
44564c7070dbSScott Long 	if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
44574c7070dbSScott Long 		ctx->ifc_flags |= IFC_DMAR;
445895246abbSSean Bruno #elif !(defined(__i386__) || defined(__amd64__))
445995246abbSSean Bruno 	/* set unconditionally for !x86 */
446095246abbSSean Bruno 	ctx->ifc_flags |= IFC_DMAR;
44614c7070dbSScott Long #endif
44624c7070dbSScott Long 
446395246abbSSean Bruno 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
446495246abbSSean Bruno 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
446523ac9029SStephen Hurd 
446623ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
446723ac9029SStephen Hurd 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
446823ac9029SStephen Hurd 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
446923ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
447023ac9029SStephen Hurd 		if (!powerof2(scctx->isc_nrxd[i])) {
447123ac9029SStephen Hurd 			/* round down instead? */
447223ac9029SStephen Hurd 			device_printf(dev, "# rx descriptors must be a power of 2\n");
447323ac9029SStephen Hurd 			err = EINVAL;
447423ac9029SStephen Hurd 			goto fail;
447523ac9029SStephen Hurd 		}
447623ac9029SStephen Hurd 	}
447723ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
447823ac9029SStephen Hurd 		if (!powerof2(scctx->isc_ntxd[i])) {
447923ac9029SStephen Hurd 			device_printf(dev,
448023ac9029SStephen Hurd 			    "# tx descriptors must be a power of 2");
448123ac9029SStephen Hurd 			err = EINVAL;
448223ac9029SStephen Hurd 			goto fail;
448323ac9029SStephen Hurd 		}
448423ac9029SStephen Hurd 	}
448523ac9029SStephen Hurd 
448623ac9029SStephen Hurd 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
448723ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
448823ac9029SStephen Hurd 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
448923ac9029SStephen Hurd 		    MAX_SINGLE_PACKET_FRACTION);
449023ac9029SStephen Hurd 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
449123ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
449223ac9029SStephen Hurd 		scctx->isc_tx_tso_segments_max = max(1,
449323ac9029SStephen Hurd 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
44944c7070dbSScott Long 
44954c7070dbSScott Long 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
44967f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
44977f87c040SMarius Strobl 		/*
44987f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
44997f87c040SMarius Strobl 		 * but some MACs do.
45007f87c040SMarius Strobl 		 */
45017f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
45027f87c040SMarius Strobl 		    IP_MAXPACKET));
45037f87c040SMarius Strobl 		/*
45047f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
45057f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
45067f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
45077f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
45087f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
45097f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
45107f87c040SMarius Strobl 		 */
45117f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
45127f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
45137f87c040SMarius Strobl 	}
45144c7070dbSScott Long 	if (scctx->isc_rss_table_size == 0)
45154c7070dbSScott Long 		scctx->isc_rss_table_size = 64;
451623ac9029SStephen Hurd 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4517da69b8f9SSean Bruno 
4518da69b8f9SSean Bruno 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4519da69b8f9SSean Bruno 	/* XXX format name */
4520ab2e3f79SStephen Hurd 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
4521e516b535SStephen Hurd 
4522772593dbSStephen Hurd 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4523e516b535SStephen Hurd 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4524e516b535SStephen Hurd 		device_printf(dev, "Unable to fetch CPU list\n");
4525e516b535SStephen Hurd 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4526e516b535SStephen Hurd 	}
4527e516b535SStephen Hurd 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4528e516b535SStephen Hurd 
45294c7070dbSScott Long 	/*
45304c7070dbSScott Long 	** Now setup MSI or MSI/X, should
45314c7070dbSScott Long 	** return us the number of supported
45324c7070dbSScott Long 	** vectors. (Will be 1 for MSI)
45334c7070dbSScott Long 	*/
45344c7070dbSScott Long 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
45354c7070dbSScott Long 		msix = scctx->isc_vectors;
45364c7070dbSScott Long 	} else if (scctx->isc_msix_bar != 0)
4537f7ae9a84SSean Bruno 	       /*
4538f7ae9a84SSean Bruno 		* The simple fact that isc_msix_bar is not 0 does not mean we
4539f7ae9a84SSean Bruno 		* we have a good value there that is known to work.
4540f7ae9a84SSean Bruno 		*/
45414c7070dbSScott Long 		msix = iflib_msix_init(ctx);
45424c7070dbSScott Long 	else {
45434c7070dbSScott Long 		scctx->isc_vectors = 1;
45444c7070dbSScott Long 		scctx->isc_ntxqsets = 1;
45454c7070dbSScott Long 		scctx->isc_nrxqsets = 1;
45464c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
45474c7070dbSScott Long 		msix = 0;
45484c7070dbSScott Long 	}
45494c7070dbSScott Long 	/* Get memory for the station queues */
45504c7070dbSScott Long 	if ((err = iflib_queues_alloc(ctx))) {
45514c7070dbSScott Long 		device_printf(dev, "Unable to allocate queue memory\n");
45524c7070dbSScott Long 		goto fail;
45534c7070dbSScott Long 	}
45544c7070dbSScott Long 
4555ac88e6daSStephen Hurd 	if ((err = iflib_qset_structures_setup(ctx)))
45564c7070dbSScott Long 		goto fail_queues;
455769b7fc3eSSean Bruno 
4558bd84f700SSean Bruno 	/*
4559bd84f700SSean Bruno 	 * Group taskqueues aren't properly set up until SMP is started,
4560bd84f700SSean Bruno 	 * so we disable interrupts until we can handle them post
4561bd84f700SSean Bruno 	 * SI_SUB_SMP.
4562bd84f700SSean Bruno 	 *
4563bd84f700SSean Bruno 	 * XXX: disabling interrupts doesn't actually work, at least for
4564bd84f700SSean Bruno 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4565bd84f700SSean Bruno 	 * we do null handling and depend on this not causing too large an
4566bd84f700SSean Bruno 	 * interrupt storm.
4567bd84f700SSean Bruno 	 */
45681248952aSSean Bruno 	IFDI_INTR_DISABLE(ctx);
45694c7070dbSScott Long 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
45704c7070dbSScott Long 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
45714c7070dbSScott Long 		goto fail_intr_free;
45724c7070dbSScott Long 	}
45734c7070dbSScott Long 	if (msix <= 1) {
45744c7070dbSScott Long 		rid = 0;
45754c7070dbSScott Long 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
45764c7070dbSScott Long 			MPASS(msix == 1);
45774c7070dbSScott Long 			rid = 1;
45784c7070dbSScott Long 		}
457923ac9029SStephen Hurd 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
45804c7070dbSScott Long 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
45814c7070dbSScott Long 			goto fail_intr_free;
45824c7070dbSScott Long 		}
45834c7070dbSScott Long 	}
45847f87c040SMarius Strobl 
45854c7070dbSScott Long 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
45867f87c040SMarius Strobl 
4587ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
45884c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
45894c7070dbSScott Long 		goto fail_detach;
45904c7070dbSScott Long 	}
45917f87c040SMarius Strobl 
45927f87c040SMarius Strobl 	/*
45937f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
45947f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
45957f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
45967f87c040SMarius Strobl 	 */
45977f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
45987f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
45997f87c040SMarius Strobl 
46004c7070dbSScott Long 	if ((err = iflib_netmap_attach(ctx))) {
46014c7070dbSScott Long 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
46024c7070dbSScott Long 		goto fail_detach;
46034c7070dbSScott Long 	}
46044c7070dbSScott Long 	*ctxp = ctx;
46054c7070dbSScott Long 
460694618825SMark Johnston 	NETDUMP_SET(ctx->ifc_ifp, iflib);
460794618825SMark Johnston 
460823ac9029SStephen Hurd 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
46094c7070dbSScott Long 	iflib_add_device_sysctl_post(ctx);
46104ecb427aSSean Bruno 	ctx->ifc_flags |= IFC_INIT_DONE;
4611aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
46124c7070dbSScott Long 	return (0);
46134c7070dbSScott Long fail_detach:
46144c7070dbSScott Long 	ether_ifdetach(ctx->ifc_ifp);
46154c7070dbSScott Long fail_intr_free:
46164c7070dbSScott Long 	if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
46174c7070dbSScott Long 		pci_release_msi(ctx->ifc_dev);
46184c7070dbSScott Long fail_queues:
46196108c013SStephen Hurd 	iflib_tx_structures_free(ctx);
46206108c013SStephen Hurd 	iflib_rx_structures_free(ctx);
46214c7070dbSScott Long fail:
46224c7070dbSScott Long 	IFDI_DETACH(ctx);
4623aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
46244c7070dbSScott Long 	return (err);
46254c7070dbSScott Long }
46264c7070dbSScott Long 
46274c7070dbSScott Long int
462809f6ff4fSMatt Macy iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
462909f6ff4fSMatt Macy 					  struct iflib_cloneattach_ctx *clctx)
463009f6ff4fSMatt Macy {
463109f6ff4fSMatt Macy 	int err;
463209f6ff4fSMatt Macy 	if_ctx_t ctx;
463309f6ff4fSMatt Macy 	if_t ifp;
463409f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
463509f6ff4fSMatt Macy 	int i;
463609f6ff4fSMatt Macy 	void *sc;
463709f6ff4fSMatt Macy 	uint16_t main_txq;
463809f6ff4fSMatt Macy 	uint16_t main_rxq;
463909f6ff4fSMatt Macy 
464009f6ff4fSMatt Macy 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
464109f6ff4fSMatt Macy 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
464209f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
464309f6ff4fSMatt Macy 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
464409f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_PSEUDO;
464509f6ff4fSMatt Macy 
464609f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
464709f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
464809f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
464909f6ff4fSMatt Macy 
465009f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
465109f6ff4fSMatt Macy 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
465209f6ff4fSMatt Macy 		free(sc, M_IFLIB);
465309f6ff4fSMatt Macy 		free(ctx, M_IFLIB);
465409f6ff4fSMatt Macy 		return (err);
465509f6ff4fSMatt Macy 	}
465609f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
465709f6ff4fSMatt Macy 
465809f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
465909f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
466009f6ff4fSMatt Macy 
466109f6ff4fSMatt Macy 	/*
466209f6ff4fSMatt Macy 	 * XXX sanity check that ntxd & nrxd are a power of 2
466309f6ff4fSMatt Macy 	 */
466409f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
466509f6ff4fSMatt Macy 
466609f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
466709f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
466809f6ff4fSMatt Macy 		return (err);
466909f6ff4fSMatt Macy 	}
467009f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_GEN_MAC)
467109f6ff4fSMatt Macy 		iflib_gen_mac(ctx);
467209f6ff4fSMatt Macy 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
467309f6ff4fSMatt Macy 								clctx->cc_params)) != 0) {
467409f6ff4fSMatt Macy 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
467509f6ff4fSMatt Macy 		return (err);
467609f6ff4fSMatt Macy 	}
467709f6ff4fSMatt Macy 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
467809f6ff4fSMatt Macy 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
467909f6ff4fSMatt Macy 	ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
468009f6ff4fSMatt Macy 
468109f6ff4fSMatt Macy #ifdef INVARIANTS
46827f87c040SMarius Strobl 	MPASS(scctx->isc_capabilities);
46837f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
468409f6ff4fSMatt Macy 		MPASS(scctx->isc_tx_csum_flags);
468509f6ff4fSMatt Macy #endif
468609f6ff4fSMatt Macy 
46877f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
468809f6ff4fSMatt Macy 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
468909f6ff4fSMatt Macy 
469009f6ff4fSMatt Macy 	ifp->if_flags |= IFF_NOGROUP;
469109f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_PSEUDO) {
469209f6ff4fSMatt Macy 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
469309f6ff4fSMatt Macy 
469409f6ff4fSMatt Macy 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
469509f6ff4fSMatt Macy 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
469609f6ff4fSMatt Macy 			goto fail_detach;
469709f6ff4fSMatt Macy 		}
469809f6ff4fSMatt Macy 		*ctxp = ctx;
469909f6ff4fSMatt Macy 
47007f87c040SMarius Strobl 		/*
47017f87c040SMarius Strobl 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
47027f87c040SMarius Strobl 		 * This must appear after the call to ether_ifattach() because
47037f87c040SMarius Strobl 		 * ether_ifattach() sets if_hdrlen to the default value.
47047f87c040SMarius Strobl 		 */
47057f87c040SMarius Strobl 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
47067f87c040SMarius Strobl 			if_setifheaderlen(ifp,
47077f87c040SMarius Strobl 			    sizeof(struct ether_vlan_header));
47087f87c040SMarius Strobl 
470909f6ff4fSMatt Macy 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
471009f6ff4fSMatt Macy 		iflib_add_device_sysctl_post(ctx);
471109f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_INIT_DONE;
471209f6ff4fSMatt Macy 		return (0);
471309f6ff4fSMatt Macy 	}
471409f6ff4fSMatt Macy 	_iflib_pre_assert(scctx);
471509f6ff4fSMatt Macy 	ctx->ifc_txrx = *scctx->isc_txrx;
471609f6ff4fSMatt Macy 
471709f6ff4fSMatt Macy 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
471809f6ff4fSMatt Macy 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
471909f6ff4fSMatt Macy 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
472009f6ff4fSMatt Macy 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
472109f6ff4fSMatt Macy 
472209f6ff4fSMatt Macy 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
472309f6ff4fSMatt Macy 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
472409f6ff4fSMatt Macy 
472509f6ff4fSMatt Macy 	/* XXX change for per-queue sizes */
472609f6ff4fSMatt Macy 	device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
472709f6ff4fSMatt Macy 		      scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
472809f6ff4fSMatt Macy 	for (i = 0; i < sctx->isc_nrxqs; i++) {
472909f6ff4fSMatt Macy 		if (!powerof2(scctx->isc_nrxd[i])) {
473009f6ff4fSMatt Macy 			/* round down instead? */
473109f6ff4fSMatt Macy 			device_printf(dev, "# rx descriptors must be a power of 2\n");
473209f6ff4fSMatt Macy 			err = EINVAL;
473309f6ff4fSMatt Macy 			goto fail;
473409f6ff4fSMatt Macy 		}
473509f6ff4fSMatt Macy 	}
473609f6ff4fSMatt Macy 	for (i = 0; i < sctx->isc_ntxqs; i++) {
473709f6ff4fSMatt Macy 		if (!powerof2(scctx->isc_ntxd[i])) {
473809f6ff4fSMatt Macy 			device_printf(dev,
473909f6ff4fSMatt Macy 			    "# tx descriptors must be a power of 2");
474009f6ff4fSMatt Macy 			err = EINVAL;
474109f6ff4fSMatt Macy 			goto fail;
474209f6ff4fSMatt Macy 		}
474309f6ff4fSMatt Macy 	}
474409f6ff4fSMatt Macy 
474509f6ff4fSMatt Macy 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
474609f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
474709f6ff4fSMatt Macy 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
474809f6ff4fSMatt Macy 		    MAX_SINGLE_PACKET_FRACTION);
474909f6ff4fSMatt Macy 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
475009f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
475109f6ff4fSMatt Macy 		scctx->isc_tx_tso_segments_max = max(1,
475209f6ff4fSMatt Macy 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
475309f6ff4fSMatt Macy 
475409f6ff4fSMatt Macy 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
47557f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
47567f87c040SMarius Strobl 		/*
47577f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
47587f87c040SMarius Strobl 		 * but some MACs do.
47597f87c040SMarius Strobl 		 */
47607f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
47617f87c040SMarius Strobl 		    IP_MAXPACKET));
47627f87c040SMarius Strobl 		/*
47637f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
47647f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
47657f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
47667f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
47677f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
47687f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
47697f87c040SMarius Strobl 		 */
47707f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
47717f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
47727f87c040SMarius Strobl 	}
477309f6ff4fSMatt Macy 	if (scctx->isc_rss_table_size == 0)
477409f6ff4fSMatt Macy 		scctx->isc_rss_table_size = 64;
477509f6ff4fSMatt Macy 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
477609f6ff4fSMatt Macy 
477709f6ff4fSMatt Macy 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
477809f6ff4fSMatt Macy 	/* XXX format name */
477909f6ff4fSMatt Macy 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
478009f6ff4fSMatt Macy 
478109f6ff4fSMatt Macy 	/* XXX --- can support > 1 -- but keep it simple for now */
478209f6ff4fSMatt Macy 	scctx->isc_intr = IFLIB_INTR_LEGACY;
478309f6ff4fSMatt Macy 
478409f6ff4fSMatt Macy 	/* Get memory for the station queues */
478509f6ff4fSMatt Macy 	if ((err = iflib_queues_alloc(ctx))) {
478609f6ff4fSMatt Macy 		device_printf(dev, "Unable to allocate queue memory\n");
478709f6ff4fSMatt Macy 		goto fail;
478809f6ff4fSMatt Macy 	}
478909f6ff4fSMatt Macy 
479009f6ff4fSMatt Macy 	if ((err = iflib_qset_structures_setup(ctx))) {
479109f6ff4fSMatt Macy 		device_printf(dev, "qset structure setup failed %d\n", err);
479209f6ff4fSMatt Macy 		goto fail_queues;
479309f6ff4fSMatt Macy 	}
47947f87c040SMarius Strobl 
479509f6ff4fSMatt Macy 	/*
479609f6ff4fSMatt Macy 	 * XXX What if anything do we want to do about interrupts?
479709f6ff4fSMatt Macy 	 */
479809f6ff4fSMatt Macy 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
479909f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
480009f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
480109f6ff4fSMatt Macy 		goto fail_detach;
480209f6ff4fSMatt Macy 	}
48037f87c040SMarius Strobl 
48047f87c040SMarius Strobl 	/*
48057f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
48067f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
48077f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
48087f87c040SMarius Strobl 	 */
48097f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
48107f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
48117f87c040SMarius Strobl 
481209f6ff4fSMatt Macy 	/* XXX handle more than one queue */
481309f6ff4fSMatt Macy 	for (i = 0; i < scctx->isc_nrxqsets; i++)
481409f6ff4fSMatt Macy 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
481509f6ff4fSMatt Macy 
481609f6ff4fSMatt Macy 	*ctxp = ctx;
481709f6ff4fSMatt Macy 
481809f6ff4fSMatt Macy 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
481909f6ff4fSMatt Macy 	iflib_add_device_sysctl_post(ctx);
482009f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_INIT_DONE;
482109f6ff4fSMatt Macy 	return (0);
482209f6ff4fSMatt Macy fail_detach:
482309f6ff4fSMatt Macy 	ether_ifdetach(ctx->ifc_ifp);
482409f6ff4fSMatt Macy fail_queues:
482509f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
482609f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
482709f6ff4fSMatt Macy fail:
482809f6ff4fSMatt Macy 	IFDI_DETACH(ctx);
482909f6ff4fSMatt Macy 	return (err);
483009f6ff4fSMatt Macy }
483109f6ff4fSMatt Macy 
483209f6ff4fSMatt Macy int
483309f6ff4fSMatt Macy iflib_pseudo_deregister(if_ctx_t ctx)
483409f6ff4fSMatt Macy {
483509f6ff4fSMatt Macy 	if_t ifp = ctx->ifc_ifp;
483609f6ff4fSMatt Macy 	iflib_txq_t txq;
483709f6ff4fSMatt Macy 	iflib_rxq_t rxq;
483809f6ff4fSMatt Macy 	int i, j;
483909f6ff4fSMatt Macy 	struct taskqgroup *tqg;
484009f6ff4fSMatt Macy 	iflib_fl_t fl;
484109f6ff4fSMatt Macy 
484209f6ff4fSMatt Macy 	/* Unregister VLAN events */
484309f6ff4fSMatt Macy 	if (ctx->ifc_vlan_attach_event != NULL)
484409f6ff4fSMatt Macy 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
484509f6ff4fSMatt Macy 	if (ctx->ifc_vlan_detach_event != NULL)
484609f6ff4fSMatt Macy 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
484709f6ff4fSMatt Macy 
484809f6ff4fSMatt Macy 	ether_ifdetach(ifp);
484909f6ff4fSMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
485009f6ff4fSMatt Macy 	CTX_LOCK_DESTROY(ctx);
485109f6ff4fSMatt Macy 	/* XXX drain any dependent tasks */
485209f6ff4fSMatt Macy 	tqg = qgroup_if_io_tqg;
485309f6ff4fSMatt Macy 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
485409f6ff4fSMatt Macy 		callout_drain(&txq->ift_timer);
485509f6ff4fSMatt Macy 		if (txq->ift_task.gt_uniq != NULL)
485609f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &txq->ift_task);
485709f6ff4fSMatt Macy 	}
485809f6ff4fSMatt Macy 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
485909f6ff4fSMatt Macy 		if (rxq->ifr_task.gt_uniq != NULL)
486009f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &rxq->ifr_task);
486109f6ff4fSMatt Macy 
486209f6ff4fSMatt Macy 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
486309f6ff4fSMatt Macy 			free(fl->ifl_rx_bitmap, M_IFLIB);
486409f6ff4fSMatt Macy 	}
486509f6ff4fSMatt Macy 	tqg = qgroup_if_config_tqg;
486609f6ff4fSMatt Macy 	if (ctx->ifc_admin_task.gt_uniq != NULL)
486709f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
486809f6ff4fSMatt Macy 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
486909f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
487009f6ff4fSMatt Macy 
487109f6ff4fSMatt Macy 	if_free(ifp);
487209f6ff4fSMatt Macy 
487309f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
487409f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
487509f6ff4fSMatt Macy 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
487609f6ff4fSMatt Macy 		free(ctx->ifc_softc, M_IFLIB);
487709f6ff4fSMatt Macy 	free(ctx, M_IFLIB);
487809f6ff4fSMatt Macy 	return (0);
487909f6ff4fSMatt Macy }
488009f6ff4fSMatt Macy 
488109f6ff4fSMatt Macy int
48824c7070dbSScott Long iflib_device_attach(device_t dev)
48834c7070dbSScott Long {
48844c7070dbSScott Long 	if_ctx_t ctx;
48854c7070dbSScott Long 	if_shared_ctx_t sctx;
48864c7070dbSScott Long 
48874c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
48884c7070dbSScott Long 		return (ENOTSUP);
48894c7070dbSScott Long 
48904c7070dbSScott Long 	pci_enable_busmaster(dev);
48914c7070dbSScott Long 
48924c7070dbSScott Long 	return (iflib_device_register(dev, NULL, sctx, &ctx));
48934c7070dbSScott Long }
48944c7070dbSScott Long 
48954c7070dbSScott Long int
48964c7070dbSScott Long iflib_device_deregister(if_ctx_t ctx)
48974c7070dbSScott Long {
48984c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
48994c7070dbSScott Long 	iflib_txq_t txq;
49004c7070dbSScott Long 	iflib_rxq_t rxq;
49014c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
490287890dbaSSean Bruno 	int i, j;
49034c7070dbSScott Long 	struct taskqgroup *tqg;
490487890dbaSSean Bruno 	iflib_fl_t fl;
49054c7070dbSScott Long 
49064c7070dbSScott Long 	/* Make sure VLANS are not using driver */
49074c7070dbSScott Long 	if (if_vlantrunkinuse(ifp)) {
49084c7070dbSScott Long 		device_printf(dev,"Vlan in use, detach first\n");
49094c7070dbSScott Long 		return (EBUSY);
49104c7070dbSScott Long 	}
49114c7070dbSScott Long 
49124c7070dbSScott Long 	CTX_LOCK(ctx);
49134c7070dbSScott Long 	ctx->ifc_in_detach = 1;
49144c7070dbSScott Long 	iflib_stop(ctx);
49154c7070dbSScott Long 	CTX_UNLOCK(ctx);
49164c7070dbSScott Long 
49174c7070dbSScott Long 	/* Unregister VLAN events */
49184c7070dbSScott Long 	if (ctx->ifc_vlan_attach_event != NULL)
49194c7070dbSScott Long 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
49204c7070dbSScott Long 	if (ctx->ifc_vlan_detach_event != NULL)
49214c7070dbSScott Long 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
49224c7070dbSScott Long 
49234c7070dbSScott Long 	iflib_netmap_detach(ifp);
49244c7070dbSScott Long 	ether_ifdetach(ifp);
49254c7070dbSScott Long 	if (ctx->ifc_led_dev != NULL)
49264c7070dbSScott Long 		led_destroy(ctx->ifc_led_dev);
49274c7070dbSScott Long 	/* XXX drain any dependent tasks */
4928ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
492923ac9029SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
49304c7070dbSScott Long 		callout_drain(&txq->ift_timer);
49314c7070dbSScott Long 		if (txq->ift_task.gt_uniq != NULL)
49324c7070dbSScott Long 			taskqgroup_detach(tqg, &txq->ift_task);
49334c7070dbSScott Long 	}
49344c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
49354c7070dbSScott Long 		if (rxq->ifr_task.gt_uniq != NULL)
49364c7070dbSScott Long 			taskqgroup_detach(tqg, &rxq->ifr_task);
493787890dbaSSean Bruno 
493887890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
493987890dbaSSean Bruno 			free(fl->ifl_rx_bitmap, M_IFLIB);
494087890dbaSSean Bruno 
49414c7070dbSScott Long 	}
4942ab2e3f79SStephen Hurd 	tqg = qgroup_if_config_tqg;
49434c7070dbSScott Long 	if (ctx->ifc_admin_task.gt_uniq != NULL)
49444c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
49454c7070dbSScott Long 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
49464c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
49476c3c3194SMatt Macy 	CTX_LOCK(ctx);
49484c7070dbSScott Long 	IFDI_DETACH(ctx);
49496c3c3194SMatt Macy 	CTX_UNLOCK(ctx);
49506c3c3194SMatt Macy 
49516c3c3194SMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
49526c3c3194SMatt Macy 	CTX_LOCK_DESTROY(ctx);
495323ac9029SStephen Hurd 	device_set_softc(ctx->ifc_dev, NULL);
49544c7070dbSScott Long 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
49554c7070dbSScott Long 		pci_release_msi(dev);
49564c7070dbSScott Long 	}
49574c7070dbSScott Long 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
49584c7070dbSScott Long 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
49594c7070dbSScott Long 	}
49604c7070dbSScott Long 	if (ctx->ifc_msix_mem != NULL) {
49614c7070dbSScott Long 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
49624c7070dbSScott Long 			ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
49634c7070dbSScott Long 		ctx->ifc_msix_mem = NULL;
49644c7070dbSScott Long 	}
49654c7070dbSScott Long 
49664c7070dbSScott Long 	bus_generic_detach(dev);
49674c7070dbSScott Long 	if_free(ifp);
49684c7070dbSScott Long 
49694c7070dbSScott Long 	iflib_tx_structures_free(ctx);
49704c7070dbSScott Long 	iflib_rx_structures_free(ctx);
497123ac9029SStephen Hurd 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
497223ac9029SStephen Hurd 		free(ctx->ifc_softc, M_IFLIB);
497323ac9029SStephen Hurd 	free(ctx, M_IFLIB);
49744c7070dbSScott Long 	return (0);
49754c7070dbSScott Long }
49764c7070dbSScott Long 
49774c7070dbSScott Long 
49784c7070dbSScott Long int
49794c7070dbSScott Long iflib_device_detach(device_t dev)
49804c7070dbSScott Long {
49814c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
49824c7070dbSScott Long 
49834c7070dbSScott Long 	return (iflib_device_deregister(ctx));
49844c7070dbSScott Long }
49854c7070dbSScott Long 
49864c7070dbSScott Long int
49874c7070dbSScott Long iflib_device_suspend(device_t dev)
49884c7070dbSScott Long {
49894c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
49904c7070dbSScott Long 
49914c7070dbSScott Long 	CTX_LOCK(ctx);
49924c7070dbSScott Long 	IFDI_SUSPEND(ctx);
49934c7070dbSScott Long 	CTX_UNLOCK(ctx);
49944c7070dbSScott Long 
49954c7070dbSScott Long 	return bus_generic_suspend(dev);
49964c7070dbSScott Long }
49974c7070dbSScott Long int
49984c7070dbSScott Long iflib_device_shutdown(device_t dev)
49994c7070dbSScott Long {
50004c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
50014c7070dbSScott Long 
50024c7070dbSScott Long 	CTX_LOCK(ctx);
50034c7070dbSScott Long 	IFDI_SHUTDOWN(ctx);
50044c7070dbSScott Long 	CTX_UNLOCK(ctx);
50054c7070dbSScott Long 
50064c7070dbSScott Long 	return bus_generic_suspend(dev);
50074c7070dbSScott Long }
50084c7070dbSScott Long 
50094c7070dbSScott Long 
50104c7070dbSScott Long int
50114c7070dbSScott Long iflib_device_resume(device_t dev)
50124c7070dbSScott Long {
50134c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
50144c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
50154c7070dbSScott Long 
50164c7070dbSScott Long 	CTX_LOCK(ctx);
50174c7070dbSScott Long 	IFDI_RESUME(ctx);
50184c7070dbSScott Long 	iflib_init_locked(ctx);
50194c7070dbSScott Long 	CTX_UNLOCK(ctx);
50204c7070dbSScott Long 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
50214c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
50224c7070dbSScott Long 
50234c7070dbSScott Long 	return (bus_generic_resume(dev));
50244c7070dbSScott Long }
50254c7070dbSScott Long 
50264c7070dbSScott Long int
50274c7070dbSScott Long iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
50284c7070dbSScott Long {
50294c7070dbSScott Long 	int error;
50304c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
50314c7070dbSScott Long 
50324c7070dbSScott Long 	CTX_LOCK(ctx);
50334c7070dbSScott Long 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
50344c7070dbSScott Long 	CTX_UNLOCK(ctx);
50354c7070dbSScott Long 
50364c7070dbSScott Long 	return (error);
50374c7070dbSScott Long }
50384c7070dbSScott Long 
50394c7070dbSScott Long void
50404c7070dbSScott Long iflib_device_iov_uninit(device_t dev)
50414c7070dbSScott Long {
50424c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
50434c7070dbSScott Long 
50444c7070dbSScott Long 	CTX_LOCK(ctx);
50454c7070dbSScott Long 	IFDI_IOV_UNINIT(ctx);
50464c7070dbSScott Long 	CTX_UNLOCK(ctx);
50474c7070dbSScott Long }
50484c7070dbSScott Long 
50494c7070dbSScott Long int
50504c7070dbSScott Long iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
50514c7070dbSScott Long {
50524c7070dbSScott Long 	int error;
50534c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
50544c7070dbSScott Long 
50554c7070dbSScott Long 	CTX_LOCK(ctx);
50564c7070dbSScott Long 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
50574c7070dbSScott Long 	CTX_UNLOCK(ctx);
50584c7070dbSScott Long 
50594c7070dbSScott Long 	return (error);
50604c7070dbSScott Long }
50614c7070dbSScott Long 
50624c7070dbSScott Long /*********************************************************************
50634c7070dbSScott Long  *
50644c7070dbSScott Long  *  MODULE FUNCTION DEFINITIONS
50654c7070dbSScott Long  *
50664c7070dbSScott Long  **********************************************************************/
50674c7070dbSScott Long 
5068ab2e3f79SStephen Hurd /*
5069ab2e3f79SStephen Hurd  * - Start a fast taskqueue thread for each core
5070ab2e3f79SStephen Hurd  * - Start a taskqueue for control operations
5071ab2e3f79SStephen Hurd  */
50724c7070dbSScott Long static int
50734c7070dbSScott Long iflib_module_init(void)
50744c7070dbSScott Long {
50754c7070dbSScott Long 	return (0);
50764c7070dbSScott Long }
50774c7070dbSScott Long 
50784c7070dbSScott Long static int
50794c7070dbSScott Long iflib_module_event_handler(module_t mod, int what, void *arg)
50804c7070dbSScott Long {
50814c7070dbSScott Long 	int err;
50824c7070dbSScott Long 
50834c7070dbSScott Long 	switch (what) {
50844c7070dbSScott Long 	case MOD_LOAD:
50854c7070dbSScott Long 		if ((err = iflib_module_init()) != 0)
50864c7070dbSScott Long 			return (err);
50874c7070dbSScott Long 		break;
50884c7070dbSScott Long 	case MOD_UNLOAD:
50894c7070dbSScott Long 		return (EBUSY);
50904c7070dbSScott Long 	default:
50914c7070dbSScott Long 		return (EOPNOTSUPP);
50924c7070dbSScott Long 	}
50934c7070dbSScott Long 
50944c7070dbSScott Long 	return (0);
50954c7070dbSScott Long }
50964c7070dbSScott Long 
50974c7070dbSScott Long /*********************************************************************
50984c7070dbSScott Long  *
50994c7070dbSScott Long  *  PUBLIC FUNCTION DEFINITIONS
51004c7070dbSScott Long  *     ordered as in iflib.h
51014c7070dbSScott Long  *
51024c7070dbSScott Long  **********************************************************************/
51034c7070dbSScott Long 
51044c7070dbSScott Long 
51054c7070dbSScott Long static void
51064c7070dbSScott Long _iflib_assert(if_shared_ctx_t sctx)
51074c7070dbSScott Long {
51084c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsize);
51094c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsegsize);
51104c7070dbSScott Long 
51114c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsize);
51124c7070dbSScott Long 	MPASS(sctx->isc_rx_nsegments);
51134c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsegsize);
51144c7070dbSScott Long 
511523ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_min[0]);
511623ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_max[0]);
511723ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_default[0]);
511823ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_min[0]);
511923ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_max[0]);
512023ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_default[0]);
51214c7070dbSScott Long }
51224c7070dbSScott Long 
51231248952aSSean Bruno static void
51241248952aSSean Bruno _iflib_pre_assert(if_softc_ctx_t scctx)
51251248952aSSean Bruno {
51261248952aSSean Bruno 
51271248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_encap);
51281248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_flush);
51291248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
51301248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_available);
51311248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
51321248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_refill);
51331248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_flush);
51341248952aSSean Bruno }
51352fe66646SSean Bruno 
51364c7070dbSScott Long static int
51374c7070dbSScott Long iflib_register(if_ctx_t ctx)
51384c7070dbSScott Long {
51394c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
51404c7070dbSScott Long 	driver_t *driver = sctx->isc_driver;
51414c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
51424c7070dbSScott Long 	if_t ifp;
51434c7070dbSScott Long 
51444c7070dbSScott Long 	_iflib_assert(sctx);
51454c7070dbSScott Long 
5146aa8a24d3SStephen Hurd 	CTX_LOCK_INIT(ctx);
51477b610b60SSean Bruno 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
51484c7070dbSScott Long 	ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
51494c7070dbSScott Long 	if (ifp == NULL) {
51504c7070dbSScott Long 		device_printf(dev, "can not allocate ifnet structure\n");
51514c7070dbSScott Long 		return (ENOMEM);
51524c7070dbSScott Long 	}
51534c7070dbSScott Long 
51544c7070dbSScott Long 	/*
51554c7070dbSScott Long 	 * Initialize our context's device specific methods
51564c7070dbSScott Long 	 */
51574c7070dbSScott Long 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
51584c7070dbSScott Long 	kobj_class_compile((kobj_class_t) driver);
51594c7070dbSScott Long 	driver->refs++;
51604c7070dbSScott Long 
51614c7070dbSScott Long 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
51624c7070dbSScott Long 	if_setsoftc(ifp, ctx);
51634c7070dbSScott Long 	if_setdev(ifp, dev);
51644c7070dbSScott Long 	if_setinitfn(ifp, iflib_if_init);
51654c7070dbSScott Long 	if_setioctlfn(ifp, iflib_if_ioctl);
51664c7070dbSScott Long 	if_settransmitfn(ifp, iflib_if_transmit);
51674c7070dbSScott Long 	if_setqflushfn(ifp, iflib_if_qflush);
51684c7070dbSScott Long 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
51694c7070dbSScott Long 
51704c7070dbSScott Long 	ctx->ifc_vlan_attach_event =
51714c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
51724c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
51734c7070dbSScott Long 	ctx->ifc_vlan_detach_event =
51744c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
51754c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
51764c7070dbSScott Long 
51774c7070dbSScott Long 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
51784c7070dbSScott Long 					 iflib_media_change, iflib_media_status);
51794c7070dbSScott Long 
51804c7070dbSScott Long 	return (0);
51814c7070dbSScott Long }
51824c7070dbSScott Long 
51834c7070dbSScott Long 
51844c7070dbSScott Long static int
51854c7070dbSScott Long iflib_queues_alloc(if_ctx_t ctx)
51864c7070dbSScott Long {
51874c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
518823ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
51894c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
519023ac9029SStephen Hurd 	int nrxqsets = scctx->isc_nrxqsets;
519123ac9029SStephen Hurd 	int ntxqsets = scctx->isc_ntxqsets;
51924c7070dbSScott Long 	iflib_txq_t txq;
51934c7070dbSScott Long 	iflib_rxq_t rxq;
51944c7070dbSScott Long 	iflib_fl_t fl = NULL;
519523ac9029SStephen Hurd 	int i, j, cpu, err, txconf, rxconf;
51964c7070dbSScott Long 	iflib_dma_info_t ifdip;
519723ac9029SStephen Hurd 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
519823ac9029SStephen Hurd 	uint32_t *txqsizes = scctx->isc_txqsizes;
51994c7070dbSScott Long 	uint8_t nrxqs = sctx->isc_nrxqs;
52004c7070dbSScott Long 	uint8_t ntxqs = sctx->isc_ntxqs;
52014c7070dbSScott Long 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
52024c7070dbSScott Long 	caddr_t *vaddrs;
52034c7070dbSScott Long 	uint64_t *paddrs;
52044c7070dbSScott Long 
520523ac9029SStephen Hurd 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
520623ac9029SStephen Hurd 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
52074c7070dbSScott Long 
52084c7070dbSScott Long 	/* Allocate the TX ring struct memory */
5209b89827a0SStephen Hurd 	if (!(ctx->ifc_txqs =
5210ac2fffa4SPedro F. Giffuni 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5211ac2fffa4SPedro F. Giffuni 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
52124c7070dbSScott Long 		device_printf(dev, "Unable to allocate TX ring memory\n");
52134c7070dbSScott Long 		err = ENOMEM;
52144c7070dbSScott Long 		goto fail;
52154c7070dbSScott Long 	}
52164c7070dbSScott Long 
52174c7070dbSScott Long 	/* Now allocate the RX */
5218b89827a0SStephen Hurd 	if (!(ctx->ifc_rxqs =
5219ac2fffa4SPedro F. Giffuni 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5220ac2fffa4SPedro F. Giffuni 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
52214c7070dbSScott Long 		device_printf(dev, "Unable to allocate RX ring memory\n");
52224c7070dbSScott Long 		err = ENOMEM;
52234c7070dbSScott Long 		goto rx_fail;
52244c7070dbSScott Long 	}
52254c7070dbSScott Long 
5226b89827a0SStephen Hurd 	txq = ctx->ifc_txqs;
5227b89827a0SStephen Hurd 	rxq = ctx->ifc_rxqs;
52284c7070dbSScott Long 
52294c7070dbSScott Long 	/*
52304c7070dbSScott Long 	 * XXX handle allocation failure
52314c7070dbSScott Long 	 */
523296c85efbSNathan Whitehorn 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
52334c7070dbSScott Long 		/* Set up some basics */
52344c7070dbSScott Long 
52354c7070dbSScott Long 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
52364c7070dbSScott Long 			device_printf(dev, "failed to allocate iflib_dma_info\n");
52374c7070dbSScott Long 			err = ENOMEM;
52380d0338afSConrad Meyer 			goto err_tx_desc;
52394c7070dbSScott Long 		}
52404c7070dbSScott Long 		txq->ift_ifdi = ifdip;
52414c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, ifdip++) {
52424c7070dbSScott Long 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
52434c7070dbSScott Long 				device_printf(dev, "Unable to allocate Descriptor memory\n");
52444c7070dbSScott Long 				err = ENOMEM;
52454c7070dbSScott Long 				goto err_tx_desc;
52464c7070dbSScott Long 			}
524795246abbSSean Bruno 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
52484c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
52494c7070dbSScott Long 		}
52504c7070dbSScott Long 		txq->ift_ctx = ctx;
52514c7070dbSScott Long 		txq->ift_id = i;
525223ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
525323ac9029SStephen Hurd 			txq->ift_br_offset = 1;
525423ac9029SStephen Hurd 		} else {
525523ac9029SStephen Hurd 			txq->ift_br_offset = 0;
525623ac9029SStephen Hurd 		}
52574c7070dbSScott Long 		/* XXX fix this */
525896c85efbSNathan Whitehorn 		txq->ift_timer.c_cpu = cpu;
52594c7070dbSScott Long 
52604c7070dbSScott Long 		if (iflib_txsd_alloc(txq)) {
52614c7070dbSScott Long 			device_printf(dev, "Critical Failure setting up TX buffers\n");
52624c7070dbSScott Long 			err = ENOMEM;
52634c7070dbSScott Long 			goto err_tx_desc;
52644c7070dbSScott Long 		}
52654c7070dbSScott Long 
52664c7070dbSScott Long 		/* Initialize the TX lock */
52674c7070dbSScott Long 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
52684c7070dbSScott Long 		    device_get_nameunit(dev), txq->ift_id);
52694c7070dbSScott Long 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
52704c7070dbSScott Long 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
52714c7070dbSScott Long 
52724c7070dbSScott Long 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
52734c7070dbSScott Long 			 device_get_nameunit(dev), txq->ift_id);
52744c7070dbSScott Long 
527595246abbSSean Bruno 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
52764c7070dbSScott Long 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
52774c7070dbSScott Long 		if (err) {
52784c7070dbSScott Long 			/* XXX free any allocated rings */
52794c7070dbSScott Long 			device_printf(dev, "Unable to allocate buf_ring\n");
52800d0338afSConrad Meyer 			goto err_tx_desc;
52814c7070dbSScott Long 		}
52824c7070dbSScott Long 	}
52834c7070dbSScott Long 
52844c7070dbSScott Long 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
52854c7070dbSScott Long 		/* Set up some basics */
52864c7070dbSScott Long 
52874c7070dbSScott Long 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
52884c7070dbSScott Long 			device_printf(dev, "failed to allocate iflib_dma_info\n");
52894c7070dbSScott Long 			err = ENOMEM;
52900d0338afSConrad Meyer 			goto err_tx_desc;
52914c7070dbSScott Long 		}
52924c7070dbSScott Long 
52934c7070dbSScott Long 		rxq->ifr_ifdi = ifdip;
529495246abbSSean Bruno 		/* XXX this needs to be changed if #rx queues != #tx queues */
529595246abbSSean Bruno 		rxq->ifr_ntxqirq = 1;
529695246abbSSean Bruno 		rxq->ifr_txqid[0] = i;
52974c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, ifdip++) {
52984c7070dbSScott Long 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
52994c7070dbSScott Long 				device_printf(dev, "Unable to allocate Descriptor memory\n");
53004c7070dbSScott Long 				err = ENOMEM;
53014c7070dbSScott Long 				goto err_tx_desc;
53024c7070dbSScott Long 			}
53034c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
53044c7070dbSScott Long 		}
53054c7070dbSScott Long 		rxq->ifr_ctx = ctx;
53064c7070dbSScott Long 		rxq->ifr_id = i;
530723ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
530823ac9029SStephen Hurd 			rxq->ifr_fl_offset = 1;
53094c7070dbSScott Long 		} else {
531023ac9029SStephen Hurd 			rxq->ifr_fl_offset = 0;
53114c7070dbSScott Long 		}
53124c7070dbSScott Long 		rxq->ifr_nfl = nfree_lists;
53134c7070dbSScott Long 		if (!(fl =
5314ac2fffa4SPedro F. Giffuni 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
53154c7070dbSScott Long 			device_printf(dev, "Unable to allocate free list memory\n");
53164c7070dbSScott Long 			err = ENOMEM;
53170d0338afSConrad Meyer 			goto err_tx_desc;
53184c7070dbSScott Long 		}
53194c7070dbSScott Long 		rxq->ifr_fl = fl;
53204c7070dbSScott Long 		for (j = 0; j < nfree_lists; j++) {
532195246abbSSean Bruno 			fl[j].ifl_rxq = rxq;
532295246abbSSean Bruno 			fl[j].ifl_id = j;
532395246abbSSean Bruno 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
532495246abbSSean Bruno 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
53254c7070dbSScott Long 		}
53264c7070dbSScott Long         /* Allocate receive buffers for the ring*/
53274c7070dbSScott Long 		if (iflib_rxsd_alloc(rxq)) {
53284c7070dbSScott Long 			device_printf(dev,
53294c7070dbSScott Long 			    "Critical Failure setting up receive buffers\n");
53304c7070dbSScott Long 			err = ENOMEM;
53314c7070dbSScott Long 			goto err_rx_desc;
53324c7070dbSScott Long 		}
533387890dbaSSean Bruno 
533487890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
533587890dbaSSean Bruno 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
53364c7070dbSScott Long 	}
53374c7070dbSScott Long 
53384c7070dbSScott Long 	/* TXQs */
53394c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
53404c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
53414c7070dbSScott Long 	for (i = 0; i < ntxqsets; i++) {
53424c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
53434c7070dbSScott Long 
53444c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, di++) {
53454c7070dbSScott Long 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
53464c7070dbSScott Long 			paddrs[i*ntxqs + j] = di->idi_paddr;
53474c7070dbSScott Long 		}
53484c7070dbSScott Long 	}
53494c7070dbSScott Long 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
53504c7070dbSScott Long 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
53514c7070dbSScott Long 		iflib_tx_structures_free(ctx);
53524c7070dbSScott Long 		free(vaddrs, M_IFLIB);
53534c7070dbSScott Long 		free(paddrs, M_IFLIB);
53544c7070dbSScott Long 		goto err_rx_desc;
53554c7070dbSScott Long 	}
53564c7070dbSScott Long 	free(vaddrs, M_IFLIB);
53574c7070dbSScott Long 	free(paddrs, M_IFLIB);
53584c7070dbSScott Long 
53594c7070dbSScott Long 	/* RXQs */
53604c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
53614c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
53624c7070dbSScott Long 	for (i = 0; i < nrxqsets; i++) {
53634c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
53644c7070dbSScott Long 
53654c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, di++) {
53664c7070dbSScott Long 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
53674c7070dbSScott Long 			paddrs[i*nrxqs + j] = di->idi_paddr;
53684c7070dbSScott Long 		}
53694c7070dbSScott Long 	}
53704c7070dbSScott Long 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
53714c7070dbSScott Long 		device_printf(ctx->ifc_dev, "device queue allocation failed\n");
53724c7070dbSScott Long 		iflib_tx_structures_free(ctx);
53734c7070dbSScott Long 		free(vaddrs, M_IFLIB);
53744c7070dbSScott Long 		free(paddrs, M_IFLIB);
53754c7070dbSScott Long 		goto err_rx_desc;
53764c7070dbSScott Long 	}
53774c7070dbSScott Long 	free(vaddrs, M_IFLIB);
53784c7070dbSScott Long 	free(paddrs, M_IFLIB);
53794c7070dbSScott Long 
53804c7070dbSScott Long 	return (0);
53814c7070dbSScott Long 
53824c7070dbSScott Long /* XXX handle allocation failure changes */
53834c7070dbSScott Long err_rx_desc:
53844c7070dbSScott Long err_tx_desc:
5385b89827a0SStephen Hurd rx_fail:
53864c7070dbSScott Long 	if (ctx->ifc_rxqs != NULL)
53874c7070dbSScott Long 		free(ctx->ifc_rxqs, M_IFLIB);
53884c7070dbSScott Long 	ctx->ifc_rxqs = NULL;
53894c7070dbSScott Long 	if (ctx->ifc_txqs != NULL)
53904c7070dbSScott Long 		free(ctx->ifc_txqs, M_IFLIB);
53914c7070dbSScott Long 	ctx->ifc_txqs = NULL;
53924c7070dbSScott Long fail:
53934c7070dbSScott Long 	return (err);
53944c7070dbSScott Long }
53954c7070dbSScott Long 
53964c7070dbSScott Long static int
53974c7070dbSScott Long iflib_tx_structures_setup(if_ctx_t ctx)
53984c7070dbSScott Long {
53994c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
54004c7070dbSScott Long 	int i;
54014c7070dbSScott Long 
54024c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
54034c7070dbSScott Long 		iflib_txq_setup(txq);
54044c7070dbSScott Long 
54054c7070dbSScott Long 	return (0);
54064c7070dbSScott Long }
54074c7070dbSScott Long 
54084c7070dbSScott Long static void
54094c7070dbSScott Long iflib_tx_structures_free(if_ctx_t ctx)
54104c7070dbSScott Long {
54114c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
54124c7070dbSScott Long 	int i, j;
54134c7070dbSScott Long 
54144c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
54154c7070dbSScott Long 		iflib_txq_destroy(txq);
54164c7070dbSScott Long 		for (j = 0; j < ctx->ifc_nhwtxqs; j++)
54174c7070dbSScott Long 			iflib_dma_free(&txq->ift_ifdi[j]);
54184c7070dbSScott Long 	}
54194c7070dbSScott Long 	free(ctx->ifc_txqs, M_IFLIB);
54204c7070dbSScott Long 	ctx->ifc_txqs = NULL;
54214c7070dbSScott Long 	IFDI_QUEUES_FREE(ctx);
54224c7070dbSScott Long }
54234c7070dbSScott Long 
54244c7070dbSScott Long /*********************************************************************
54254c7070dbSScott Long  *
54264c7070dbSScott Long  *  Initialize all receive rings.
54274c7070dbSScott Long  *
54284c7070dbSScott Long  **********************************************************************/
54294c7070dbSScott Long static int
54304c7070dbSScott Long iflib_rx_structures_setup(if_ctx_t ctx)
54314c7070dbSScott Long {
54324c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5433aaeb188aSBjoern A. Zeeb 	int q;
5434aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
5435aaeb188aSBjoern A. Zeeb 	int i, err;
5436aaeb188aSBjoern A. Zeeb #endif
54374c7070dbSScott Long 
54384c7070dbSScott Long 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5439aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
54404c7070dbSScott Long 		tcp_lro_free(&rxq->ifr_lc);
544123ac9029SStephen Hurd 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
544223ac9029SStephen Hurd 		    TCP_LRO_ENTRIES, min(1024,
544323ac9029SStephen Hurd 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
54444c7070dbSScott Long 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
54454c7070dbSScott Long 			goto fail;
54464c7070dbSScott Long 		}
54474c7070dbSScott Long 		rxq->ifr_lro_enabled = TRUE;
5448aaeb188aSBjoern A. Zeeb #endif
54494c7070dbSScott Long 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
54504c7070dbSScott Long 	}
54514c7070dbSScott Long 	return (0);
5452aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
54534c7070dbSScott Long fail:
54544c7070dbSScott Long 	/*
54554c7070dbSScott Long 	 * Free RX software descriptors allocated so far, we will only handle
54564c7070dbSScott Long 	 * the rings that completed, the failing case will have
54574c7070dbSScott Long 	 * cleaned up for itself. 'q' failed, so its the terminus.
54584c7070dbSScott Long 	 */
54594c7070dbSScott Long 	rxq = ctx->ifc_rxqs;
54604c7070dbSScott Long 	for (i = 0; i < q; ++i, rxq++) {
54614c7070dbSScott Long 		iflib_rx_sds_free(rxq);
54624c7070dbSScott Long 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
54634c7070dbSScott Long 	}
54644c7070dbSScott Long 	return (err);
5465aaeb188aSBjoern A. Zeeb #endif
54664c7070dbSScott Long }
54674c7070dbSScott Long 
54684c7070dbSScott Long /*********************************************************************
54694c7070dbSScott Long  *
54704c7070dbSScott Long  *  Free all receive rings.
54714c7070dbSScott Long  *
54724c7070dbSScott Long  **********************************************************************/
54734c7070dbSScott Long static void
54744c7070dbSScott Long iflib_rx_structures_free(if_ctx_t ctx)
54754c7070dbSScott Long {
54764c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
54774c7070dbSScott Long 
547823ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
54794c7070dbSScott Long 		iflib_rx_sds_free(rxq);
54804c7070dbSScott Long 	}
54814c7070dbSScott Long }
54824c7070dbSScott Long 
54834c7070dbSScott Long static int
54844c7070dbSScott Long iflib_qset_structures_setup(if_ctx_t ctx)
54854c7070dbSScott Long {
54864c7070dbSScott Long 	int err;
54874c7070dbSScott Long 
54886108c013SStephen Hurd 	/*
54896108c013SStephen Hurd 	 * It is expected that the caller takes care of freeing queues if this
54906108c013SStephen Hurd 	 * fails.
54916108c013SStephen Hurd 	 */
5492ac88e6daSStephen Hurd 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5493ac88e6daSStephen Hurd 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
54944c7070dbSScott Long 		return (err);
5495ac88e6daSStephen Hurd 	}
54964c7070dbSScott Long 
54976108c013SStephen Hurd 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
54984c7070dbSScott Long 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
54996108c013SStephen Hurd 
55004c7070dbSScott Long 	return (err);
55014c7070dbSScott Long }
55024c7070dbSScott Long 
55034c7070dbSScott Long int
55044c7070dbSScott Long iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
55053e0e6330SStephen Hurd 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
55064c7070dbSScott Long {
55074c7070dbSScott Long 
55084c7070dbSScott Long 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
55094c7070dbSScott Long }
55104c7070dbSScott Long 
5511b103855eSStephen Hurd #ifdef SMP
5512aa3c5dd8SSean Bruno static int
5513b103855eSStephen Hurd find_nth(if_ctx_t ctx, int qid)
55144c7070dbSScott Long {
5515b103855eSStephen Hurd 	cpuset_t cpus;
5516aa3c5dd8SSean Bruno 	int i, cpuid, eqid, count;
55174c7070dbSScott Long 
5518b103855eSStephen Hurd 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5519b103855eSStephen Hurd 	count = CPU_COUNT(&cpus);
5520aa3c5dd8SSean Bruno 	eqid = qid % count;
55214c7070dbSScott Long 	/* clear up to the qid'th bit */
5522aa3c5dd8SSean Bruno 	for (i = 0; i < eqid; i++) {
5523b103855eSStephen Hurd 		cpuid = CPU_FFS(&cpus);
5524aa3c5dd8SSean Bruno 		MPASS(cpuid != 0);
5525b103855eSStephen Hurd 		CPU_CLR(cpuid-1, &cpus);
55264c7070dbSScott Long 	}
5527b103855eSStephen Hurd 	cpuid = CPU_FFS(&cpus);
5528aa3c5dd8SSean Bruno 	MPASS(cpuid != 0);
5529aa3c5dd8SSean Bruno 	return (cpuid-1);
55304c7070dbSScott Long }
55314c7070dbSScott Long 
5532b103855eSStephen Hurd #ifdef SCHED_ULE
5533b103855eSStephen Hurd extern struct cpu_group *cpu_top;              /* CPU topology */
5534b103855eSStephen Hurd 
5535b103855eSStephen Hurd static int
5536b103855eSStephen Hurd find_child_with_core(int cpu, struct cpu_group *grp)
5537b103855eSStephen Hurd {
5538b103855eSStephen Hurd 	int i;
5539b103855eSStephen Hurd 
5540b103855eSStephen Hurd 	if (grp->cg_children == 0)
5541b103855eSStephen Hurd 		return -1;
5542b103855eSStephen Hurd 
5543b103855eSStephen Hurd 	MPASS(grp->cg_child);
5544b103855eSStephen Hurd 	for (i = 0; i < grp->cg_children; i++) {
5545b103855eSStephen Hurd 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5546b103855eSStephen Hurd 			return i;
5547b103855eSStephen Hurd 	}
5548b103855eSStephen Hurd 
5549b103855eSStephen Hurd 	return -1;
5550b103855eSStephen Hurd }
5551b103855eSStephen Hurd 
5552b103855eSStephen Hurd /*
55530b75ac77SStephen Hurd  * Find the nth "close" core to the specified core
55540b75ac77SStephen Hurd  * "close" is defined as the deepest level that shares
55550b75ac77SStephen Hurd  * at least an L2 cache.  With threads, this will be
55560b75ac77SStephen Hurd  * threads on the same core.  If the sahred cache is L3
55570b75ac77SStephen Hurd  * or higher, simply returns the same core.
5558b103855eSStephen Hurd  */
5559b103855eSStephen Hurd static int
55600b75ac77SStephen Hurd find_close_core(int cpu, int core_offset)
5561b103855eSStephen Hurd {
5562b103855eSStephen Hurd 	struct cpu_group *grp;
5563b103855eSStephen Hurd 	int i;
55640b75ac77SStephen Hurd 	int fcpu;
5565b103855eSStephen Hurd 	cpuset_t cs;
5566b103855eSStephen Hurd 
5567b103855eSStephen Hurd 	grp = cpu_top;
5568b103855eSStephen Hurd 	if (grp == NULL)
5569b103855eSStephen Hurd 		return cpu;
5570b103855eSStephen Hurd 	i = 0;
5571b103855eSStephen Hurd 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5572b103855eSStephen Hurd 		/* If the child only has one cpu, don't descend */
5573b103855eSStephen Hurd 		if (grp->cg_child[i].cg_count <= 1)
5574b103855eSStephen Hurd 			break;
5575b103855eSStephen Hurd 		grp = &grp->cg_child[i];
5576b103855eSStephen Hurd 	}
5577b103855eSStephen Hurd 
5578b103855eSStephen Hurd 	/* If they don't share at least an L2 cache, use the same CPU */
5579b103855eSStephen Hurd 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5580b103855eSStephen Hurd 		return cpu;
5581b103855eSStephen Hurd 
5582b103855eSStephen Hurd 	/* Now pick one */
5583b103855eSStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
55840b75ac77SStephen Hurd 
55850b75ac77SStephen Hurd 	/* Add the selected CPU offset to core offset. */
55860b75ac77SStephen Hurd 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
55870b75ac77SStephen Hurd 		if (fcpu - 1 == cpu)
55880b75ac77SStephen Hurd 			break;
55890b75ac77SStephen Hurd 		CPU_CLR(fcpu - 1, &cs);
55900b75ac77SStephen Hurd 	}
55910b75ac77SStephen Hurd 	MPASS(fcpu);
55920b75ac77SStephen Hurd 
55930b75ac77SStephen Hurd 	core_offset += i;
55940b75ac77SStephen Hurd 
55950b75ac77SStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
55960b75ac77SStephen Hurd 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5597b103855eSStephen Hurd 		MPASS(CPU_FFS(&cs));
5598b103855eSStephen Hurd 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5599b103855eSStephen Hurd 	}
5600b103855eSStephen Hurd 	MPASS(CPU_FFS(&cs));
5601b103855eSStephen Hurd 	return CPU_FFS(&cs) - 1;
5602b103855eSStephen Hurd }
5603b103855eSStephen Hurd #else
5604b103855eSStephen Hurd static int
56050b75ac77SStephen Hurd find_close_core(int cpu, int core_offset __unused)
5606b103855eSStephen Hurd {
560797755e83SKonstantin Belousov 	return cpu;
5608b103855eSStephen Hurd }
5609b103855eSStephen Hurd #endif
5610b103855eSStephen Hurd 
5611b103855eSStephen Hurd static int
56120b75ac77SStephen Hurd get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5613b103855eSStephen Hurd {
5614b103855eSStephen Hurd 	switch (type) {
5615b103855eSStephen Hurd 	case IFLIB_INTR_TX:
56160b75ac77SStephen Hurd 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
56170b75ac77SStephen Hurd 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5618b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5619b103855eSStephen Hurd 	case IFLIB_INTR_RX:
5620b103855eSStephen Hurd 	case IFLIB_INTR_RXTX:
56210b75ac77SStephen Hurd 		/* RX queues get the specified core */
5622b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5623b103855eSStephen Hurd 	default:
5624b103855eSStephen Hurd 		return -1;
5625b103855eSStephen Hurd 	}
5626b103855eSStephen Hurd }
5627b103855eSStephen Hurd #else
56280b75ac77SStephen Hurd #define get_core_offset(ctx, type, qid)	CPU_FIRST()
56290b75ac77SStephen Hurd #define find_close_core(cpuid, tid)	CPU_FIRST()
5630b103855eSStephen Hurd #define find_nth(ctx, gid)		CPU_FIRST()
5631b103855eSStephen Hurd #endif
5632b103855eSStephen Hurd 
5633b103855eSStephen Hurd /* Just to avoid copy/paste */
5634b103855eSStephen Hurd static inline int
5635b103855eSStephen Hurd iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
56363e0e6330SStephen Hurd     struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name)
5637b103855eSStephen Hurd {
5638b103855eSStephen Hurd 	int cpuid;
5639b103855eSStephen Hurd 	int err, tid;
5640b103855eSStephen Hurd 
5641b103855eSStephen Hurd 	cpuid = find_nth(ctx, qid);
56420b75ac77SStephen Hurd 	tid = get_core_offset(ctx, type, qid);
5643b103855eSStephen Hurd 	MPASS(tid >= 0);
56440b75ac77SStephen Hurd 	cpuid = find_close_core(cpuid, tid);
5645b103855eSStephen Hurd 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
5646b103855eSStephen Hurd 	if (err) {
5647b103855eSStephen Hurd 		device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
5648b103855eSStephen Hurd 		return (err);
5649b103855eSStephen Hurd 	}
5650b103855eSStephen Hurd #ifdef notyet
5651b103855eSStephen Hurd 	if (cpuid > ctx->ifc_cpuid_highest)
5652b103855eSStephen Hurd 		ctx->ifc_cpuid_highest = cpuid;
5653b103855eSStephen Hurd #endif
5654b103855eSStephen Hurd 	return 0;
5655b103855eSStephen Hurd }
5656b103855eSStephen Hurd 
56574c7070dbSScott Long int
56584c7070dbSScott Long iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
56594c7070dbSScott Long 			iflib_intr_type_t type, driver_filter_t *filter,
56603e0e6330SStephen Hurd 			void *filter_arg, int qid, const char *name)
56614c7070dbSScott Long {
56624c7070dbSScott Long 	struct grouptask *gtask;
56634c7070dbSScott Long 	struct taskqgroup *tqg;
56644c7070dbSScott Long 	iflib_filter_info_t info;
566523ac9029SStephen Hurd 	gtask_fn_t *fn;
5666b103855eSStephen Hurd 	int tqrid, err;
566795246abbSSean Bruno 	driver_filter_t *intr_fast;
56684c7070dbSScott Long 	void *q;
56694c7070dbSScott Long 
56704c7070dbSScott Long 	info = &ctx->ifc_filter_info;
5671add6f7d0SSean Bruno 	tqrid = rid;
56724c7070dbSScott Long 
56734c7070dbSScott Long 	switch (type) {
56744c7070dbSScott Long 	/* XXX merge tx/rx for netmap? */
56754c7070dbSScott Long 	case IFLIB_INTR_TX:
56764c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
56774c7070dbSScott Long 		info = &ctx->ifc_txqs[qid].ift_filter_info;
56784c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
5679ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
56804c7070dbSScott Long 		fn = _task_fn_tx;
568195246abbSSean Bruno 		intr_fast = iflib_fast_intr;
5682da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
56835ee36c68SStephen Hurd 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
56844c7070dbSScott Long 		break;
56854c7070dbSScott Long 	case IFLIB_INTR_RX:
56864c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
56874c7070dbSScott Long 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
56884c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5689ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
56904c7070dbSScott Long 		fn = _task_fn_rx;
5691ab2e3f79SStephen Hurd 		intr_fast = iflib_fast_intr;
569295246abbSSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
569395246abbSSean Bruno 		break;
569495246abbSSean Bruno 	case IFLIB_INTR_RXTX:
569595246abbSSean Bruno 		q = &ctx->ifc_rxqs[qid];
569695246abbSSean Bruno 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
569795246abbSSean Bruno 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5698ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
569995246abbSSean Bruno 		fn = _task_fn_rx;
570095246abbSSean Bruno 		intr_fast = iflib_fast_intr_rxtx;
5701da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
57024c7070dbSScott Long 		break;
57034c7070dbSScott Long 	case IFLIB_INTR_ADMIN:
57044c7070dbSScott Long 		q = ctx;
5705da69b8f9SSean Bruno 		tqrid = -1;
57064c7070dbSScott Long 		info = &ctx->ifc_filter_info;
57074c7070dbSScott Long 		gtask = &ctx->ifc_admin_task;
5708ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
57094c7070dbSScott Long 		fn = _task_fn_admin;
571095246abbSSean Bruno 		intr_fast = iflib_fast_intr_ctx;
57114c7070dbSScott Long 		break;
57124c7070dbSScott Long 	default:
57134c7070dbSScott Long 		panic("unknown net intr type");
57144c7070dbSScott Long 	}
57154c7070dbSScott Long 
57164c7070dbSScott Long 	info->ifi_filter = filter;
57174c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
57184c7070dbSScott Long 	info->ifi_task = gtask;
571995246abbSSean Bruno 	info->ifi_ctx = q;
57204c7070dbSScott Long 
572195246abbSSean Bruno 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5722da69b8f9SSean Bruno 	if (err != 0) {
5723da69b8f9SSean Bruno 		device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
57244c7070dbSScott Long 		return (err);
5725da69b8f9SSean Bruno 	}
5726da69b8f9SSean Bruno 	if (type == IFLIB_INTR_ADMIN)
5727da69b8f9SSean Bruno 		return (0);
5728da69b8f9SSean Bruno 
57294c7070dbSScott Long 	if (tqrid != -1) {
5730b103855eSStephen Hurd 		err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
5731b103855eSStephen Hurd 		if (err)
5732b103855eSStephen Hurd 			return (err);
5733aa3c5dd8SSean Bruno 	} else {
57341c0054d2SStephen Hurd 		taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
5735aa3c5dd8SSean Bruno 	}
57364c7070dbSScott Long 
57374c7070dbSScott Long 	return (0);
57384c7070dbSScott Long }
57394c7070dbSScott Long 
57404c7070dbSScott Long void
57413e0e6330SStephen Hurd iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,  void *arg, int qid, const char *name)
57424c7070dbSScott Long {
57434c7070dbSScott Long 	struct grouptask *gtask;
57444c7070dbSScott Long 	struct taskqgroup *tqg;
574523ac9029SStephen Hurd 	gtask_fn_t *fn;
57464c7070dbSScott Long 	void *q;
57471c0054d2SStephen Hurd 	int irq_num = -1;
5748b103855eSStephen Hurd 	int err;
57494c7070dbSScott Long 
57504c7070dbSScott Long 	switch (type) {
57514c7070dbSScott Long 	case IFLIB_INTR_TX:
57524c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
57534c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
5754ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
57554c7070dbSScott Long 		fn = _task_fn_tx;
57561c0054d2SStephen Hurd 		if (irq != NULL)
57571c0054d2SStephen Hurd 			irq_num = rman_get_start(irq->ii_res);
57584c7070dbSScott Long 		break;
57594c7070dbSScott Long 	case IFLIB_INTR_RX:
57604c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
57614c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5762ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
57634c7070dbSScott Long 		fn = _task_fn_rx;
57641c0054d2SStephen Hurd 		if (irq != NULL)
57651c0054d2SStephen Hurd 			irq_num = rman_get_start(irq->ii_res);
57664c7070dbSScott Long 		break;
57674c7070dbSScott Long 	case IFLIB_INTR_IOV:
57684c7070dbSScott Long 		q = ctx;
57694c7070dbSScott Long 		gtask = &ctx->ifc_vflr_task;
5770ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
57714c7070dbSScott Long 		fn = _task_fn_iov;
57724c7070dbSScott Long 		break;
57734c7070dbSScott Long 	default:
57744c7070dbSScott Long 		panic("unknown net intr type");
57754c7070dbSScott Long 	}
57764c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, q);
5777b103855eSStephen Hurd 	if (irq_num != -1) {
5778b103855eSStephen Hurd 		err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
5779b103855eSStephen Hurd 		if (err)
57801c0054d2SStephen Hurd 			taskqgroup_attach(tqg, gtask, q, irq_num, name);
57814c7070dbSScott Long 	}
5782b103855eSStephen Hurd 	else {
5783b103855eSStephen Hurd 		taskqgroup_attach(tqg, gtask, q, irq_num, name);
5784b103855eSStephen Hurd 	}
5785b103855eSStephen Hurd }
57864c7070dbSScott Long 
57874c7070dbSScott Long void
57884c7070dbSScott Long iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
57894c7070dbSScott Long {
57904c7070dbSScott Long 	if (irq->ii_tag)
57914c7070dbSScott Long 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
57924c7070dbSScott Long 
57934c7070dbSScott Long 	if (irq->ii_res)
57944c7070dbSScott Long 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
57954c7070dbSScott Long }
57964c7070dbSScott Long 
57974c7070dbSScott Long static int
57983e0e6330SStephen Hurd iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
57994c7070dbSScott Long {
58004c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
58014c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
58024c7070dbSScott Long 	if_irq_t irq = &ctx->ifc_legacy_irq;
58034c7070dbSScott Long 	iflib_filter_info_t info;
58044c7070dbSScott Long 	struct grouptask *gtask;
58054c7070dbSScott Long 	struct taskqgroup *tqg;
580623ac9029SStephen Hurd 	gtask_fn_t *fn;
58074c7070dbSScott Long 	int tqrid;
58084c7070dbSScott Long 	void *q;
58094c7070dbSScott Long 	int err;
58104c7070dbSScott Long 
58114c7070dbSScott Long 	q = &ctx->ifc_rxqs[0];
58124c7070dbSScott Long 	info = &rxq[0].ifr_filter_info;
58134c7070dbSScott Long 	gtask = &rxq[0].ifr_task;
5814ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
58154c7070dbSScott Long 	tqrid = irq->ii_rid = *rid;
58164c7070dbSScott Long 	fn = _task_fn_rx;
58174c7070dbSScott Long 
58184c7070dbSScott Long 	ctx->ifc_flags |= IFC_LEGACY;
58194c7070dbSScott Long 	info->ifi_filter = filter;
58204c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
58214c7070dbSScott Long 	info->ifi_task = gtask;
58224ecb427aSSean Bruno 	info->ifi_ctx = ctx;
58234c7070dbSScott Long 
58244c7070dbSScott Long 	/* We allocate a single interrupt resource */
582595246abbSSean Bruno 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
58264c7070dbSScott Long 		return (err);
58274c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, q);
58289c58cafaSStephen Hurd 	taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
58294c7070dbSScott Long 
58304c7070dbSScott Long 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
58319c58cafaSStephen Hurd 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
58324c7070dbSScott Long 	return (0);
58334c7070dbSScott Long }
58344c7070dbSScott Long 
58354c7070dbSScott Long void
58364c7070dbSScott Long iflib_led_create(if_ctx_t ctx)
58374c7070dbSScott Long {
58384c7070dbSScott Long 
58394c7070dbSScott Long 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
58404c7070dbSScott Long 	    device_get_nameunit(ctx->ifc_dev));
58414c7070dbSScott Long }
58424c7070dbSScott Long 
58434c7070dbSScott Long void
58444c7070dbSScott Long iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
58454c7070dbSScott Long {
58464c7070dbSScott Long 
58474c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
58484c7070dbSScott Long }
58494c7070dbSScott Long 
58504c7070dbSScott Long void
58514c7070dbSScott Long iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
58524c7070dbSScott Long {
58534c7070dbSScott Long 
58544c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
58554c7070dbSScott Long }
58564c7070dbSScott Long 
58574c7070dbSScott Long void
58584c7070dbSScott Long iflib_admin_intr_deferred(if_ctx_t ctx)
58594c7070dbSScott Long {
58601248952aSSean Bruno #ifdef INVARIANTS
58611248952aSSean Bruno 	struct grouptask *gtask;
58621248952aSSean Bruno 
58631248952aSSean Bruno 	gtask = &ctx->ifc_admin_task;
5864d0d0ad0aSStephen Hurd 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
58651248952aSSean Bruno #endif
58664c7070dbSScott Long 
58674c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
58684c7070dbSScott Long }
58694c7070dbSScott Long 
58704c7070dbSScott Long void
58714c7070dbSScott Long iflib_iov_intr_deferred(if_ctx_t ctx)
58724c7070dbSScott Long {
58734c7070dbSScott Long 
58744c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
58754c7070dbSScott Long }
58764c7070dbSScott Long 
58774c7070dbSScott Long void
58784c7070dbSScott Long iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
58794c7070dbSScott Long {
58804c7070dbSScott Long 
5881ab2e3f79SStephen Hurd 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
58824c7070dbSScott Long }
58834c7070dbSScott Long 
58844c7070dbSScott Long void
5885aa8a24d3SStephen Hurd iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
5886aa8a24d3SStephen Hurd 	const char *name)
58874c7070dbSScott Long {
58884c7070dbSScott Long 
58894c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, ctx);
5890ab2e3f79SStephen Hurd 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
58914c7070dbSScott Long }
58924c7070dbSScott Long 
58934c7070dbSScott Long void
589423ac9029SStephen Hurd iflib_config_gtask_deinit(struct grouptask *gtask)
589523ac9029SStephen Hurd {
589623ac9029SStephen Hurd 
5897ab2e3f79SStephen Hurd 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
589823ac9029SStephen Hurd }
589923ac9029SStephen Hurd 
590023ac9029SStephen Hurd void
590123ac9029SStephen Hurd iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
59024c7070dbSScott Long {
59034c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
59044c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
59054c7070dbSScott Long 
59064c7070dbSScott Long 	if_setbaudrate(ifp, baudrate);
59077b610b60SSean Bruno 	if (baudrate >= IF_Gbps(10)) {
59087b610b60SSean Bruno 		STATE_LOCK(ctx);
590995246abbSSean Bruno 		ctx->ifc_flags |= IFC_PREFETCH;
59107b610b60SSean Bruno 		STATE_UNLOCK(ctx);
59117b610b60SSean Bruno 	}
59124c7070dbSScott Long 	/* If link down, disable watchdog */
59134c7070dbSScott Long 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
59144c7070dbSScott Long 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
59154c7070dbSScott Long 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
59164c7070dbSScott Long 	}
59174c7070dbSScott Long 	ctx->ifc_link_state = link_state;
59184c7070dbSScott Long 	if_link_state_change(ifp, link_state);
59194c7070dbSScott Long }
59204c7070dbSScott Long 
59214c7070dbSScott Long static int
59224c7070dbSScott Long iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
59234c7070dbSScott Long {
59244c7070dbSScott Long 	int credits;
59251248952aSSean Bruno #ifdef INVARIANTS
59261248952aSSean Bruno 	int credits_pre = txq->ift_cidx_processed;
59271248952aSSean Bruno #endif
59284c7070dbSScott Long 
59294c7070dbSScott Long 	if (ctx->isc_txd_credits_update == NULL)
59304c7070dbSScott Long 		return (0);
59314c7070dbSScott Long 
593295246abbSSean Bruno 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
59334c7070dbSScott Long 		return (0);
59344c7070dbSScott Long 
59354c7070dbSScott Long 	txq->ift_processed += credits;
59364c7070dbSScott Long 	txq->ift_cidx_processed += credits;
59374c7070dbSScott Long 
59381248952aSSean Bruno 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
59394c7070dbSScott Long 	if (txq->ift_cidx_processed >= txq->ift_size)
59404c7070dbSScott Long 		txq->ift_cidx_processed -= txq->ift_size;
59414c7070dbSScott Long 	return (credits);
59424c7070dbSScott Long }
59434c7070dbSScott Long 
59444c7070dbSScott Long static int
594595246abbSSean Bruno iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
59464c7070dbSScott Long {
59474c7070dbSScott Long 
594823ac9029SStephen Hurd 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
594923ac9029SStephen Hurd 	    budget));
59504c7070dbSScott Long }
59514c7070dbSScott Long 
59524c7070dbSScott Long void
59534c7070dbSScott Long iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
59544c7070dbSScott Long 	const char *description, if_int_delay_info_t info,
59554c7070dbSScott Long 	int offset, int value)
59564c7070dbSScott Long {
59574c7070dbSScott Long 	info->iidi_ctx = ctx;
59584c7070dbSScott Long 	info->iidi_offset = offset;
59594c7070dbSScott Long 	info->iidi_value = value;
59604c7070dbSScott Long 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
59614c7070dbSScott Long 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
59624c7070dbSScott Long 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
59634c7070dbSScott Long 	    info, 0, iflib_sysctl_int_delay, "I", description);
59644c7070dbSScott Long }
59654c7070dbSScott Long 
5966aa8a24d3SStephen Hurd struct sx *
59674c7070dbSScott Long iflib_ctx_lock_get(if_ctx_t ctx)
59684c7070dbSScott Long {
59694c7070dbSScott Long 
5970aa8a24d3SStephen Hurd 	return (&ctx->ifc_ctx_sx);
59714c7070dbSScott Long }
59724c7070dbSScott Long 
59734c7070dbSScott Long static int
59744c7070dbSScott Long iflib_msix_init(if_ctx_t ctx)
59754c7070dbSScott Long {
59764c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
59774c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
59784c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
59794c7070dbSScott Long 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
59804c7070dbSScott Long 	int iflib_num_tx_queues, iflib_num_rx_queues;
59814c7070dbSScott Long 	int err, admincnt, bar;
59824c7070dbSScott Long 
5983d2735264SStephen Hurd 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
5984d2735264SStephen Hurd 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
598523ac9029SStephen Hurd 
5986d2735264SStephen Hurd 	device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
59871248952aSSean Bruno 
59884c7070dbSScott Long 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
59894c7070dbSScott Long 	admincnt = sctx->isc_admin_intrcnt;
59904c7070dbSScott Long 	/* Override by tuneable */
5991ea351d3fSSean Bruno 	if (scctx->isc_disable_msix)
59924c7070dbSScott Long 		goto msi;
59934c7070dbSScott Long 
59944c7070dbSScott Long 	/*
59954c7070dbSScott Long 	 * bar == -1 => "trust me I know what I'm doing"
59964c7070dbSScott Long 	 * Some drivers are for hardware that is so shoddily
59974c7070dbSScott Long 	 * documented that no one knows which bars are which
59984c7070dbSScott Long 	 * so the developer has to map all bars. This hack
59994c7070dbSScott Long 	 * allows shoddy garbage to use msix in this framework.
60004c7070dbSScott Long 	 */
60014c7070dbSScott Long 	if (bar != -1) {
60024c7070dbSScott Long 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
60034c7070dbSScott Long 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
60044c7070dbSScott Long 		if (ctx->ifc_msix_mem == NULL) {
60054c7070dbSScott Long 			/* May not be enabled */
60064c7070dbSScott Long 			device_printf(dev, "Unable to map MSIX table \n");
60074c7070dbSScott Long 			goto msi;
60084c7070dbSScott Long 		}
60094c7070dbSScott Long 	}
60104c7070dbSScott Long 	/* First try MSI/X */
60114c7070dbSScott Long 	if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
60124c7070dbSScott Long 		device_printf(dev, "System has MSIX disabled \n");
60134c7070dbSScott Long 		bus_release_resource(dev, SYS_RES_MEMORY,
60144c7070dbSScott Long 		    bar, ctx->ifc_msix_mem);
60154c7070dbSScott Long 		ctx->ifc_msix_mem = NULL;
60164c7070dbSScott Long 		goto msi;
60174c7070dbSScott Long 	}
60184c7070dbSScott Long #if IFLIB_DEBUG
60194c7070dbSScott Long 	/* use only 1 qset in debug mode */
60204c7070dbSScott Long 	queuemsgs = min(msgs - admincnt, 1);
60214c7070dbSScott Long #else
60224c7070dbSScott Long 	queuemsgs = msgs - admincnt;
60234c7070dbSScott Long #endif
60244c7070dbSScott Long #ifdef RSS
60254c7070dbSScott Long 	queues = imin(queuemsgs, rss_getnumbuckets());
60264c7070dbSScott Long #else
60274c7070dbSScott Long 	queues = queuemsgs;
60284c7070dbSScott Long #endif
60294c7070dbSScott Long 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
60304c7070dbSScott Long 	device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
60314c7070dbSScott Long 				  CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
60324c7070dbSScott Long #ifdef  RSS
60334c7070dbSScott Long 	/* If we're doing RSS, clamp at the number of RSS buckets */
60344c7070dbSScott Long 	if (queues > rss_getnumbuckets())
60354c7070dbSScott Long 		queues = rss_getnumbuckets();
60364c7070dbSScott Long #endif
603723ac9029SStephen Hurd 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
603823ac9029SStephen Hurd 		rx_queues = iflib_num_rx_queues;
60394c7070dbSScott Long 	else
60404c7070dbSScott Long 		rx_queues = queues;
6041d2735264SStephen Hurd 
6042d2735264SStephen Hurd 	if (rx_queues > scctx->isc_nrxqsets)
6043d2735264SStephen Hurd 		rx_queues = scctx->isc_nrxqsets;
6044d2735264SStephen Hurd 
604523ac9029SStephen Hurd 	/*
604623ac9029SStephen Hurd 	 * We want this to be all logical CPUs by default
604723ac9029SStephen Hurd 	 */
60484c7070dbSScott Long 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
60494c7070dbSScott Long 		tx_queues = iflib_num_tx_queues;
60504c7070dbSScott Long 	else
605123ac9029SStephen Hurd 		tx_queues = mp_ncpus;
605223ac9029SStephen Hurd 
6053d2735264SStephen Hurd 	if (tx_queues > scctx->isc_ntxqsets)
6054d2735264SStephen Hurd 		tx_queues = scctx->isc_ntxqsets;
6055d2735264SStephen Hurd 
605623ac9029SStephen Hurd 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
605723ac9029SStephen Hurd #ifdef INVARIANTS
605823ac9029SStephen Hurd 		if (tx_queues != rx_queues)
605923ac9029SStephen Hurd 			device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
606023ac9029SStephen Hurd 				      min(rx_queues, tx_queues), min(rx_queues, tx_queues));
606123ac9029SStephen Hurd #endif
606223ac9029SStephen Hurd 		tx_queues = min(rx_queues, tx_queues);
606323ac9029SStephen Hurd 		rx_queues = min(rx_queues, tx_queues);
606423ac9029SStephen Hurd 	}
60654c7070dbSScott Long 
6066ab2e3f79SStephen Hurd 	device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
60674c7070dbSScott Long 
6068ab2e3f79SStephen Hurd 	vectors = rx_queues + admincnt;
60694c7070dbSScott Long 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
60704c7070dbSScott Long 		device_printf(dev,
60714c7070dbSScott Long 					  "Using MSIX interrupts with %d vectors\n", vectors);
60724c7070dbSScott Long 		scctx->isc_vectors = vectors;
60734c7070dbSScott Long 		scctx->isc_nrxqsets = rx_queues;
60744c7070dbSScott Long 		scctx->isc_ntxqsets = tx_queues;
60754c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSIX;
607623ac9029SStephen Hurd 
60774c7070dbSScott Long 		return (vectors);
60784c7070dbSScott Long 	} else {
60794c7070dbSScott Long 		device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
6080e4defe55SMarius Strobl 		bus_release_resource(dev, SYS_RES_MEMORY, bar,
6081e4defe55SMarius Strobl 		    ctx->ifc_msix_mem);
6082e4defe55SMarius Strobl 		ctx->ifc_msix_mem = NULL;
60834c7070dbSScott Long 	}
60844c7070dbSScott Long msi:
60854c7070dbSScott Long 	vectors = pci_msi_count(dev);
60864c7070dbSScott Long 	scctx->isc_nrxqsets = 1;
60874c7070dbSScott Long 	scctx->isc_ntxqsets = 1;
60884c7070dbSScott Long 	scctx->isc_vectors = vectors;
60894c7070dbSScott Long 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
60904c7070dbSScott Long 		device_printf(dev,"Using an MSI interrupt\n");
60914c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSI;
60924c7070dbSScott Long 	} else {
6093e4defe55SMarius Strobl 		scctx->isc_vectors = 1;
60944c7070dbSScott Long 		device_printf(dev,"Using a Legacy interrupt\n");
60954c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
60964c7070dbSScott Long 	}
60974c7070dbSScott Long 
60984c7070dbSScott Long 	return (vectors);
60994c7070dbSScott Long }
61004c7070dbSScott Long 
6101e4defe55SMarius Strobl static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
61024c7070dbSScott Long 
61034c7070dbSScott Long static int
61044c7070dbSScott Long mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
61054c7070dbSScott Long {
61064c7070dbSScott Long 	int rc;
61074c7070dbSScott Long 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
61084c7070dbSScott Long 	struct sbuf *sb;
6109e4defe55SMarius Strobl 	const char *ring_state = "UNKNOWN";
61104c7070dbSScott Long 
61114c7070dbSScott Long 	/* XXX needed ? */
61124c7070dbSScott Long 	rc = sysctl_wire_old_buffer(req, 0);
61134c7070dbSScott Long 	MPASS(rc == 0);
61144c7070dbSScott Long 	if (rc != 0)
61154c7070dbSScott Long 		return (rc);
61164c7070dbSScott Long 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
61174c7070dbSScott Long 	MPASS(sb != NULL);
61184c7070dbSScott Long 	if (sb == NULL)
61194c7070dbSScott Long 		return (ENOMEM);
61204c7070dbSScott Long 	if (state[3] <= 3)
61214c7070dbSScott Long 		ring_state = ring_states[state[3]];
61224c7070dbSScott Long 
61234c7070dbSScott Long 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
61244c7070dbSScott Long 		    state[0], state[1], state[2], ring_state);
61254c7070dbSScott Long 	rc = sbuf_finish(sb);
61264c7070dbSScott Long 	sbuf_delete(sb);
61274c7070dbSScott Long         return(rc);
61284c7070dbSScott Long }
61294c7070dbSScott Long 
613023ac9029SStephen Hurd enum iflib_ndesc_handler {
613123ac9029SStephen Hurd 	IFLIB_NTXD_HANDLER,
613223ac9029SStephen Hurd 	IFLIB_NRXD_HANDLER,
613323ac9029SStephen Hurd };
61344c7070dbSScott Long 
613523ac9029SStephen Hurd static int
613623ac9029SStephen Hurd mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
613723ac9029SStephen Hurd {
613823ac9029SStephen Hurd 	if_ctx_t ctx = (void *)arg1;
613923ac9029SStephen Hurd 	enum iflib_ndesc_handler type = arg2;
614023ac9029SStephen Hurd 	char buf[256] = {0};
614195246abbSSean Bruno 	qidx_t *ndesc;
614223ac9029SStephen Hurd 	char *p, *next;
614323ac9029SStephen Hurd 	int nqs, rc, i;
614423ac9029SStephen Hurd 
614523ac9029SStephen Hurd 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
614623ac9029SStephen Hurd 
614723ac9029SStephen Hurd 	nqs = 8;
614823ac9029SStephen Hurd 	switch(type) {
614923ac9029SStephen Hurd 	case IFLIB_NTXD_HANDLER:
615023ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_ntxds;
615123ac9029SStephen Hurd 		if (ctx->ifc_sctx)
615223ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_ntxqs;
615323ac9029SStephen Hurd 		break;
615423ac9029SStephen Hurd 	case IFLIB_NRXD_HANDLER:
615523ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_nrxds;
615623ac9029SStephen Hurd 		if (ctx->ifc_sctx)
615723ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_nrxqs;
615823ac9029SStephen Hurd 		break;
61591ae4848cSMatt Macy 	default:
61601ae4848cSMatt Macy 			panic("unhandled type");
616123ac9029SStephen Hurd 	}
616223ac9029SStephen Hurd 	if (nqs == 0)
616323ac9029SStephen Hurd 		nqs = 8;
616423ac9029SStephen Hurd 
616523ac9029SStephen Hurd 	for (i=0; i<8; i++) {
616623ac9029SStephen Hurd 		if (i >= nqs)
616723ac9029SStephen Hurd 			break;
616823ac9029SStephen Hurd 		if (i)
616923ac9029SStephen Hurd 			strcat(buf, ",");
617023ac9029SStephen Hurd 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
617123ac9029SStephen Hurd 	}
617223ac9029SStephen Hurd 
617323ac9029SStephen Hurd 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
617423ac9029SStephen Hurd 	if (rc || req->newptr == NULL)
617523ac9029SStephen Hurd 		return rc;
617623ac9029SStephen Hurd 
617723ac9029SStephen Hurd 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
617823ac9029SStephen Hurd 	    i++, p = strsep(&next, " ,")) {
617923ac9029SStephen Hurd 		ndesc[i] = strtoul(p, NULL, 10);
618023ac9029SStephen Hurd 	}
618123ac9029SStephen Hurd 
618223ac9029SStephen Hurd 	return(rc);
618323ac9029SStephen Hurd }
61844c7070dbSScott Long 
61854c7070dbSScott Long #define NAME_BUFLEN 32
61864c7070dbSScott Long static void
61874c7070dbSScott Long iflib_add_device_sysctl_pre(if_ctx_t ctx)
61884c7070dbSScott Long {
61894c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
61904c7070dbSScott Long 	struct sysctl_oid_list *child, *oid_list;
61914c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
61924c7070dbSScott Long 	struct sysctl_oid *node;
61934c7070dbSScott Long 
61944c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
61954c7070dbSScott Long 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
61964c7070dbSScott Long 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
61974c7070dbSScott Long 						      CTLFLAG_RD, NULL, "IFLIB fields");
61984c7070dbSScott Long 	oid_list = SYSCTL_CHILDREN(node);
61994c7070dbSScott Long 
620023ac9029SStephen Hurd 	SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
620123ac9029SStephen Hurd 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
620223ac9029SStephen Hurd 		       "driver version");
620323ac9029SStephen Hurd 
62044c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
62054c7070dbSScott Long 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
62064c7070dbSScott Long 			"# of txqs to use, 0 => use default #");
62074c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
620823ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
620923ac9029SStephen Hurd 			"# of rxqs to use, 0 => use default #");
621023ac9029SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
621123ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
621223ac9029SStephen Hurd                        "permit #txq != #rxq");
6213ea351d3fSSean Bruno 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6214ea351d3fSSean Bruno                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6215ea351d3fSSean Bruno                       "disable MSIX (default 0)");
6216f4d2154eSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6217f4d2154eSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6218f4d2154eSStephen Hurd                        "set the rx budget");
6219fe51d4cdSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6220fe51d4cdSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6221fe51d4cdSStephen Hurd 		       "cause tx to abdicate instead of running to completion");
62224c7070dbSScott Long 
622323ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
622423ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
622523ac9029SStephen Hurd 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
622623ac9029SStephen Hurd                        mp_ndesc_handler, "A",
622723ac9029SStephen Hurd                        "list of # of tx descriptors to use, 0 = use default #");
622823ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
622923ac9029SStephen Hurd 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
623023ac9029SStephen Hurd                        mp_ndesc_handler, "A",
623123ac9029SStephen Hurd                        "list of # of rx descriptors to use, 0 = use default #");
62324c7070dbSScott Long }
62334c7070dbSScott Long 
62344c7070dbSScott Long static void
62354c7070dbSScott Long iflib_add_device_sysctl_post(if_ctx_t ctx)
62364c7070dbSScott Long {
62374c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
62384c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
62394c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
62404c7070dbSScott Long 	struct sysctl_oid_list *child;
62414c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
62424c7070dbSScott Long 	iflib_fl_t fl;
62434c7070dbSScott Long 	iflib_txq_t txq;
62444c7070dbSScott Long 	iflib_rxq_t rxq;
62454c7070dbSScott Long 	int i, j;
62464c7070dbSScott Long 	char namebuf[NAME_BUFLEN];
62474c7070dbSScott Long 	char *qfmt;
62484c7070dbSScott Long 	struct sysctl_oid *queue_node, *fl_node, *node;
62494c7070dbSScott Long 	struct sysctl_oid_list *queue_list, *fl_list;
62504c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
62514c7070dbSScott Long 
62524c7070dbSScott Long 	node = ctx->ifc_sysctl_node;
62534c7070dbSScott Long 	child = SYSCTL_CHILDREN(node);
62544c7070dbSScott Long 
62554c7070dbSScott Long 	if (scctx->isc_ntxqsets > 100)
62564c7070dbSScott Long 		qfmt = "txq%03d";
62574c7070dbSScott Long 	else if (scctx->isc_ntxqsets > 10)
62584c7070dbSScott Long 		qfmt = "txq%02d";
62594c7070dbSScott Long 	else
62604c7070dbSScott Long 		qfmt = "txq%d";
62614c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
62624c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
62634c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
62644c7070dbSScott Long 					     CTLFLAG_RD, NULL, "Queue Name");
62654c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
62664c7070dbSScott Long #if MEMORY_LOGGING
62674c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
62684c7070dbSScott Long 				CTLFLAG_RD,
62694c7070dbSScott Long 				&txq->ift_dequeued, "total mbufs freed");
62704c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
62714c7070dbSScott Long 				CTLFLAG_RD,
62724c7070dbSScott Long 				&txq->ift_enqueued, "total mbufs enqueued");
62734c7070dbSScott Long #endif
62744c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
62754c7070dbSScott Long 				   CTLFLAG_RD,
62764c7070dbSScott Long 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
62774c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
62784c7070dbSScott Long 				   CTLFLAG_RD,
62794c7070dbSScott Long 				   &txq->ift_pullups, "# of times m_pullup was called");
62804c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
62814c7070dbSScott Long 				   CTLFLAG_RD,
62824c7070dbSScott Long 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
62834c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
62844c7070dbSScott Long 				   CTLFLAG_RD,
628523ac9029SStephen Hurd 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
62864c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
62874c7070dbSScott Long 				   CTLFLAG_RD,
62884c7070dbSScott Long 				   &txq->ift_map_failed, "# of times dma map failed");
62894c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
62904c7070dbSScott Long 				   CTLFLAG_RD,
62914c7070dbSScott Long 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
62924c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
62934c7070dbSScott Long 				   CTLFLAG_RD,
62944c7070dbSScott Long 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
62954c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
62964c7070dbSScott Long 				   CTLFLAG_RD,
62974c7070dbSScott Long 				   &txq->ift_pidx, 1, "Producer Index");
62984c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
62994c7070dbSScott Long 				   CTLFLAG_RD,
63004c7070dbSScott Long 				   &txq->ift_cidx, 1, "Consumer Index");
63014c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
63024c7070dbSScott Long 				   CTLFLAG_RD,
63034c7070dbSScott Long 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
63044c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
63054c7070dbSScott Long 				   CTLFLAG_RD,
63064c7070dbSScott Long 				   &txq->ift_in_use, 1, "descriptors in use");
63074c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
63084c7070dbSScott Long 				   CTLFLAG_RD,
63094c7070dbSScott Long 				   &txq->ift_processed, "descriptors procesed for clean");
63104c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
63114c7070dbSScott Long 				   CTLFLAG_RD,
63124c7070dbSScott Long 				   &txq->ift_cleaned, "total cleaned");
63134c7070dbSScott Long 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
631495246abbSSean Bruno 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
63154c7070dbSScott Long 				0, mp_ring_state_handler, "A", "soft ring state");
63164c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
631795246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->enqueues,
63184c7070dbSScott Long 				       "# of enqueues to the mp_ring for this queue");
63194c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
632095246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->drops,
63214c7070dbSScott Long 				       "# of drops in the mp_ring for this queue");
63224c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
632395246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->starts,
63244c7070dbSScott Long 				       "# of normal consumer starts in the mp_ring for this queue");
63254c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
632695246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->stalls,
63274c7070dbSScott Long 					       "# of consumer stalls in the mp_ring for this queue");
63284c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
632995246abbSSean Bruno 			       CTLFLAG_RD, &txq->ift_br->restarts,
63304c7070dbSScott Long 				       "# of consumer restarts in the mp_ring for this queue");
63314c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
633295246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->abdications,
63334c7070dbSScott Long 				       "# of consumer abdications in the mp_ring for this queue");
63344c7070dbSScott Long 	}
63354c7070dbSScott Long 
63364c7070dbSScott Long 	if (scctx->isc_nrxqsets > 100)
63374c7070dbSScott Long 		qfmt = "rxq%03d";
63384c7070dbSScott Long 	else if (scctx->isc_nrxqsets > 10)
63394c7070dbSScott Long 		qfmt = "rxq%02d";
63404c7070dbSScott Long 	else
63414c7070dbSScott Long 		qfmt = "rxq%d";
63424c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
63434c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
63444c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
63454c7070dbSScott Long 					     CTLFLAG_RD, NULL, "Queue Name");
63464c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
634723ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
63484c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
63494c7070dbSScott Long 				       CTLFLAG_RD,
63504c7070dbSScott Long 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
63514c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
63524c7070dbSScott Long 				       CTLFLAG_RD,
63534c7070dbSScott Long 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
63544c7070dbSScott Long 		}
6355da69b8f9SSean Bruno 
63564c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
63574c7070dbSScott Long 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
63584c7070dbSScott Long 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
63594c7070dbSScott Long 						     CTLFLAG_RD, NULL, "freelist Name");
63604c7070dbSScott Long 			fl_list = SYSCTL_CHILDREN(fl_node);
63614c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
63624c7070dbSScott Long 				       CTLFLAG_RD,
63634c7070dbSScott Long 				       &fl->ifl_pidx, 1, "Producer Index");
63644c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
63654c7070dbSScott Long 				       CTLFLAG_RD,
63664c7070dbSScott Long 				       &fl->ifl_cidx, 1, "Consumer Index");
63674c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
63684c7070dbSScott Long 				       CTLFLAG_RD,
63694c7070dbSScott Long 				       &fl->ifl_credits, 1, "credits available");
63704c7070dbSScott Long #if MEMORY_LOGGING
63714c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
63724c7070dbSScott Long 					CTLFLAG_RD,
63734c7070dbSScott Long 					&fl->ifl_m_enqueued, "mbufs allocated");
63744c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
63754c7070dbSScott Long 					CTLFLAG_RD,
63764c7070dbSScott Long 					&fl->ifl_m_dequeued, "mbufs freed");
63774c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
63784c7070dbSScott Long 					CTLFLAG_RD,
63794c7070dbSScott Long 					&fl->ifl_cl_enqueued, "clusters allocated");
63804c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
63814c7070dbSScott Long 					CTLFLAG_RD,
63824c7070dbSScott Long 					&fl->ifl_cl_dequeued, "clusters freed");
63834c7070dbSScott Long #endif
63844c7070dbSScott Long 
63854c7070dbSScott Long 		}
63864c7070dbSScott Long 	}
63874c7070dbSScott Long 
63884c7070dbSScott Long }
638995246abbSSean Bruno 
639095246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
639195246abbSSean Bruno static struct mbuf *
639295246abbSSean Bruno iflib_fixup_rx(struct mbuf *m)
639395246abbSSean Bruno {
639495246abbSSean Bruno 	struct mbuf *n;
639595246abbSSean Bruno 
639695246abbSSean Bruno 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
639795246abbSSean Bruno 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
639895246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
639995246abbSSean Bruno 		n = m;
640095246abbSSean Bruno 	} else {
640195246abbSSean Bruno 		MGETHDR(n, M_NOWAIT, MT_DATA);
640295246abbSSean Bruno 		if (n == NULL) {
640395246abbSSean Bruno 			m_freem(m);
640495246abbSSean Bruno 			return (NULL);
640595246abbSSean Bruno 		}
640695246abbSSean Bruno 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
640795246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
640895246abbSSean Bruno 		m->m_len -= ETHER_HDR_LEN;
640995246abbSSean Bruno 		n->m_len = ETHER_HDR_LEN;
641095246abbSSean Bruno 		M_MOVE_PKTHDR(n, m);
641195246abbSSean Bruno 		n->m_next = m;
641295246abbSSean Bruno 	}
641395246abbSSean Bruno 	return (n);
641495246abbSSean Bruno }
641595246abbSSean Bruno #endif
641694618825SMark Johnston 
641794618825SMark Johnston #ifdef NETDUMP
641894618825SMark Johnston static void
641994618825SMark Johnston iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
642094618825SMark Johnston {
642194618825SMark Johnston 	if_ctx_t ctx;
642294618825SMark Johnston 
642394618825SMark Johnston 	ctx = if_getsoftc(ifp);
642494618825SMark Johnston 	CTX_LOCK(ctx);
642594618825SMark Johnston 	*nrxr = NRXQSETS(ctx);
642694618825SMark Johnston 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
642794618825SMark Johnston 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
642894618825SMark Johnston 	CTX_UNLOCK(ctx);
642994618825SMark Johnston }
643094618825SMark Johnston 
643194618825SMark Johnston static void
643294618825SMark Johnston iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
643394618825SMark Johnston {
643494618825SMark Johnston 	if_ctx_t ctx;
643594618825SMark Johnston 	if_softc_ctx_t scctx;
643694618825SMark Johnston 	iflib_fl_t fl;
643794618825SMark Johnston 	iflib_rxq_t rxq;
643894618825SMark Johnston 	int i, j;
643994618825SMark Johnston 
644094618825SMark Johnston 	ctx = if_getsoftc(ifp);
644194618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
644294618825SMark Johnston 
644394618825SMark Johnston 	switch (event) {
644494618825SMark Johnston 	case NETDUMP_START:
644594618825SMark Johnston 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
644694618825SMark Johnston 			rxq = &ctx->ifc_rxqs[i];
644794618825SMark Johnston 			for (j = 0; j < rxq->ifr_nfl; j++) {
644894618825SMark Johnston 				fl = rxq->ifr_fl;
644994618825SMark Johnston 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
645094618825SMark Johnston 			}
645194618825SMark Johnston 		}
645294618825SMark Johnston 		iflib_no_tx_batch = 1;
645394618825SMark Johnston 		break;
645494618825SMark Johnston 	default:
645594618825SMark Johnston 		break;
645694618825SMark Johnston 	}
645794618825SMark Johnston }
645894618825SMark Johnston 
645994618825SMark Johnston static int
646094618825SMark Johnston iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
646194618825SMark Johnston {
646294618825SMark Johnston 	if_ctx_t ctx;
646394618825SMark Johnston 	iflib_txq_t txq;
646494618825SMark Johnston 	int error;
646594618825SMark Johnston 
646694618825SMark Johnston 	ctx = if_getsoftc(ifp);
646794618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
646894618825SMark Johnston 	    IFF_DRV_RUNNING)
646994618825SMark Johnston 		return (EBUSY);
647094618825SMark Johnston 
647194618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
647294618825SMark Johnston 	error = iflib_encap(txq, &m);
647394618825SMark Johnston 	if (error == 0)
647494618825SMark Johnston 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
647594618825SMark Johnston 	return (error);
647694618825SMark Johnston }
647794618825SMark Johnston 
647894618825SMark Johnston static int
647994618825SMark Johnston iflib_netdump_poll(struct ifnet *ifp, int count)
648094618825SMark Johnston {
648194618825SMark Johnston 	if_ctx_t ctx;
648294618825SMark Johnston 	if_softc_ctx_t scctx;
648394618825SMark Johnston 	iflib_txq_t txq;
648494618825SMark Johnston 	int i;
648594618825SMark Johnston 
648694618825SMark Johnston 	ctx = if_getsoftc(ifp);
648794618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
648894618825SMark Johnston 
648994618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
649094618825SMark Johnston 	    IFF_DRV_RUNNING)
649194618825SMark Johnston 		return (EBUSY);
649294618825SMark Johnston 
649394618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
649494618825SMark Johnston 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
649594618825SMark Johnston 
649694618825SMark Johnston 	for (i = 0; i < scctx->isc_nrxqsets; i++)
649794618825SMark Johnston 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
649894618825SMark Johnston 	return (0);
649994618825SMark Johnston }
650094618825SMark Johnston #endif /* NETDUMP */
6501