xref: /freebsd/sys/net/iflib.c (revision f154ece02e14b711e6f3518ded2f6ec5954e5425)
14c7070dbSScott Long /*-
27b610b60SSean Bruno  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
34c7070dbSScott Long  * All rights reserved.
44c7070dbSScott Long  *
54c7070dbSScott Long  * Redistribution and use in source and binary forms, with or without
64c7070dbSScott Long  * modification, are permitted provided that the following conditions are met:
74c7070dbSScott Long  *
84c7070dbSScott Long  *  1. Redistributions of source code must retain the above copyright notice,
94c7070dbSScott Long  *     this list of conditions and the following disclaimer.
104c7070dbSScott Long  *
114c7070dbSScott Long  *  2. Neither the name of Matthew Macy nor the names of its
124c7070dbSScott Long  *     contributors may be used to endorse or promote products derived from
134c7070dbSScott Long  *     this software without specific prior written permission.
144c7070dbSScott Long  *
154c7070dbSScott Long  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
164c7070dbSScott Long  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
174c7070dbSScott Long  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
184c7070dbSScott Long  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
194c7070dbSScott Long  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204c7070dbSScott Long  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
214c7070dbSScott Long  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
224c7070dbSScott Long  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
234c7070dbSScott Long  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
244c7070dbSScott Long  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
254c7070dbSScott Long  * POSSIBILITY OF SUCH DAMAGE.
264c7070dbSScott Long  */
274c7070dbSScott Long 
284c7070dbSScott Long #include <sys/cdefs.h>
294c7070dbSScott Long __FBSDID("$FreeBSD$");
304c7070dbSScott Long 
31aaeb188aSBjoern A. Zeeb #include "opt_inet.h"
32aaeb188aSBjoern A. Zeeb #include "opt_inet6.h"
33aaeb188aSBjoern A. Zeeb #include "opt_acpi.h"
34b103855eSStephen Hurd #include "opt_sched.h"
35aaeb188aSBjoern A. Zeeb 
364c7070dbSScott Long #include <sys/param.h>
374c7070dbSScott Long #include <sys/types.h>
384c7070dbSScott Long #include <sys/bus.h>
394c7070dbSScott Long #include <sys/eventhandler.h>
404c7070dbSScott Long #include <sys/kernel.h>
414c7070dbSScott Long #include <sys/lock.h>
424c7070dbSScott Long #include <sys/mutex.h>
434c7070dbSScott Long #include <sys/module.h>
444c7070dbSScott Long #include <sys/kobj.h>
454c7070dbSScott Long #include <sys/rman.h>
464c7070dbSScott Long #include <sys/sbuf.h>
474c7070dbSScott Long #include <sys/smp.h>
484c7070dbSScott Long #include <sys/socket.h>
4909f6ff4fSMatt Macy #include <sys/sockio.h>
504c7070dbSScott Long #include <sys/sysctl.h>
514c7070dbSScott Long #include <sys/syslog.h>
524c7070dbSScott Long #include <sys/taskqueue.h>
5323ac9029SStephen Hurd #include <sys/limits.h>
544c7070dbSScott Long 
554c7070dbSScott Long #include <net/if.h>
564c7070dbSScott Long #include <net/if_var.h>
574c7070dbSScott Long #include <net/if_types.h>
584c7070dbSScott Long #include <net/if_media.h>
594c7070dbSScott Long #include <net/bpf.h>
604c7070dbSScott Long #include <net/ethernet.h>
614c7070dbSScott Long #include <net/mp_ring.h>
626d49b41eSAndrew Gallatin #include <net/pfil.h>
6335e4e998SStephen Hurd #include <net/vnet.h>
644c7070dbSScott Long 
654c7070dbSScott Long #include <netinet/in.h>
664c7070dbSScott Long #include <netinet/in_pcb.h>
674c7070dbSScott Long #include <netinet/tcp_lro.h>
684c7070dbSScott Long #include <netinet/in_systm.h>
694c7070dbSScott Long #include <netinet/if_ether.h>
704c7070dbSScott Long #include <netinet/ip.h>
714c7070dbSScott Long #include <netinet/ip6.h>
724c7070dbSScott Long #include <netinet/tcp.h>
7335e4e998SStephen Hurd #include <netinet/ip_var.h>
7494618825SMark Johnston #include <netinet/netdump/netdump.h>
7535e4e998SStephen Hurd #include <netinet6/ip6_var.h>
764c7070dbSScott Long 
774c7070dbSScott Long #include <machine/bus.h>
784c7070dbSScott Long #include <machine/in_cksum.h>
794c7070dbSScott Long 
804c7070dbSScott Long #include <vm/vm.h>
814c7070dbSScott Long #include <vm/pmap.h>
824c7070dbSScott Long 
834c7070dbSScott Long #include <dev/led/led.h>
844c7070dbSScott Long #include <dev/pci/pcireg.h>
854c7070dbSScott Long #include <dev/pci/pcivar.h>
864c7070dbSScott Long #include <dev/pci/pci_private.h>
874c7070dbSScott Long 
884c7070dbSScott Long #include <net/iflib.h>
8909f6ff4fSMatt Macy #include <net/iflib_private.h>
904c7070dbSScott Long 
914c7070dbSScott Long #include "ifdi_if.h"
924c7070dbSScott Long 
9377c1fcecSEric Joyner #ifdef PCI_IOV
9477c1fcecSEric Joyner #include <dev/pci/pci_iov.h>
9577c1fcecSEric Joyner #endif
9677c1fcecSEric Joyner 
9787890dbaSSean Bruno #include <sys/bitstring.h>
984c7070dbSScott Long /*
9995246abbSSean Bruno  * enable accounting of every mbuf as it comes in to and goes out of
10095246abbSSean Bruno  * iflib's software descriptor references
1014c7070dbSScott Long  */
1024c7070dbSScott Long #define MEMORY_LOGGING 0
1034c7070dbSScott Long /*
1044c7070dbSScott Long  * Enable mbuf vectors for compressing long mbuf chains
1054c7070dbSScott Long  */
1064c7070dbSScott Long 
1074c7070dbSScott Long /*
1084c7070dbSScott Long  * NB:
1094c7070dbSScott Long  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
1104c7070dbSScott Long  *   we prefetch needs to be determined by the time spent in m_free vis a vis
1114c7070dbSScott Long  *   the cost of a prefetch. This will of course vary based on the workload:
1124c7070dbSScott Long  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
1134c7070dbSScott Long  *        is quite expensive, thus suggesting very little prefetch.
1144c7070dbSScott Long  *      - small packet forwarding which is just returning a single mbuf to
1154c7070dbSScott Long  *        UMA will typically be very fast vis a vis the cost of a memory
1164c7070dbSScott Long  *        access.
1174c7070dbSScott Long  */
1184c7070dbSScott Long 
1194c7070dbSScott Long 
1204c7070dbSScott Long /*
1214c7070dbSScott Long  * File organization:
1224c7070dbSScott Long  *  - private structures
1234c7070dbSScott Long  *  - iflib private utility functions
1244c7070dbSScott Long  *  - ifnet functions
1254c7070dbSScott Long  *  - vlan registry and other exported functions
1264c7070dbSScott Long  *  - iflib public core functions
1274c7070dbSScott Long  *
1284c7070dbSScott Long  *
1294c7070dbSScott Long  */
13009f6ff4fSMatt Macy MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
1314c7070dbSScott Long 
1324c7070dbSScott Long struct iflib_txq;
1334c7070dbSScott Long typedef struct iflib_txq *iflib_txq_t;
1344c7070dbSScott Long struct iflib_rxq;
1354c7070dbSScott Long typedef struct iflib_rxq *iflib_rxq_t;
1364c7070dbSScott Long struct iflib_fl;
1374c7070dbSScott Long typedef struct iflib_fl *iflib_fl_t;
1384c7070dbSScott Long 
1394ecb427aSSean Bruno struct iflib_ctx;
1404ecb427aSSean Bruno 
1412d873474SStephen Hurd static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
142dd7fbcf1SStephen Hurd static void iflib_timer(void *arg);
1432d873474SStephen Hurd 
1444c7070dbSScott Long typedef struct iflib_filter_info {
1454c7070dbSScott Long 	driver_filter_t *ifi_filter;
1464c7070dbSScott Long 	void *ifi_filter_arg;
1474c7070dbSScott Long 	struct grouptask *ifi_task;
14895246abbSSean Bruno 	void *ifi_ctx;
1494c7070dbSScott Long } *iflib_filter_info_t;
1504c7070dbSScott Long 
1514c7070dbSScott Long struct iflib_ctx {
1524c7070dbSScott Long 	KOBJ_FIELDS;
1534c7070dbSScott Long 	/*
1544c7070dbSScott Long 	 * Pointer to hardware driver's softc
1554c7070dbSScott Long 	 */
1564c7070dbSScott Long 	void *ifc_softc;
1574c7070dbSScott Long 	device_t ifc_dev;
1584c7070dbSScott Long 	if_t ifc_ifp;
1594c7070dbSScott Long 
1604c7070dbSScott Long 	cpuset_t ifc_cpus;
1614c7070dbSScott Long 	if_shared_ctx_t ifc_sctx;
1624c7070dbSScott Long 	struct if_softc_ctx ifc_softc_ctx;
1634c7070dbSScott Long 
164aa8a24d3SStephen Hurd 	struct sx ifc_ctx_sx;
1657b610b60SSean Bruno 	struct mtx ifc_state_mtx;
1664c7070dbSScott Long 
1674c7070dbSScott Long 	iflib_txq_t ifc_txqs;
1684c7070dbSScott Long 	iflib_rxq_t ifc_rxqs;
1694c7070dbSScott Long 	uint32_t ifc_if_flags;
1704c7070dbSScott Long 	uint32_t ifc_flags;
1714c7070dbSScott Long 	uint32_t ifc_max_fl_buf_size;
1721b9d9394SEric Joyner 	uint32_t ifc_rx_mbuf_sz;
1734c7070dbSScott Long 
1744c7070dbSScott Long 	int ifc_link_state;
1754c7070dbSScott Long 	int ifc_link_irq;
1764c7070dbSScott Long 	int ifc_watchdog_events;
1774c7070dbSScott Long 	struct cdev *ifc_led_dev;
1784c7070dbSScott Long 	struct resource *ifc_msix_mem;
1794c7070dbSScott Long 
1804c7070dbSScott Long 	struct if_irq ifc_legacy_irq;
1814c7070dbSScott Long 	struct grouptask ifc_admin_task;
1824c7070dbSScott Long 	struct grouptask ifc_vflr_task;
1834c7070dbSScott Long 	struct iflib_filter_info ifc_filter_info;
1844c7070dbSScott Long 	struct ifmedia	ifc_media;
1854c7070dbSScott Long 
1864c7070dbSScott Long 	struct sysctl_oid *ifc_sysctl_node;
1874c7070dbSScott Long 	uint16_t ifc_sysctl_ntxqs;
1884c7070dbSScott Long 	uint16_t ifc_sysctl_nrxqs;
18923ac9029SStephen Hurd 	uint16_t ifc_sysctl_qs_eq_override;
190f4d2154eSStephen Hurd 	uint16_t ifc_sysctl_rx_budget;
191fe51d4cdSStephen Hurd 	uint16_t ifc_sysctl_tx_abdicate;
192*f154ece0SStephen Hurd 	uint16_t ifc_sysctl_core_offset;
193*f154ece0SStephen Hurd #define	CORE_OFFSET_UNSPECIFIED	0xffff
194*f154ece0SStephen Hurd 	uint8_t  ifc_sysctl_separate_txrx;
19523ac9029SStephen Hurd 
19695246abbSSean Bruno 	qidx_t ifc_sysctl_ntxds[8];
19795246abbSSean Bruno 	qidx_t ifc_sysctl_nrxds[8];
1984c7070dbSScott Long 	struct if_txrx ifc_txrx;
1994c7070dbSScott Long #define isc_txd_encap  ifc_txrx.ift_txd_encap
2004c7070dbSScott Long #define isc_txd_flush  ifc_txrx.ift_txd_flush
2014c7070dbSScott Long #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
2024c7070dbSScott Long #define isc_rxd_available ifc_txrx.ift_rxd_available
2034c7070dbSScott Long #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
2044c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2054c7070dbSScott Long #define isc_rxd_flush ifc_txrx.ift_rxd_flush
2064c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2074c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2084c7070dbSScott Long #define isc_legacy_intr ifc_txrx.ift_legacy_intr
2094c7070dbSScott Long 	eventhandler_tag ifc_vlan_attach_event;
2104c7070dbSScott Long 	eventhandler_tag ifc_vlan_detach_event;
2111fd8c72cSKyle Evans 	struct ether_addr ifc_mac;
2124c7070dbSScott Long 	char ifc_mtx_name[16];
2134c7070dbSScott Long };
2144c7070dbSScott Long 
2154c7070dbSScott Long 
2164c7070dbSScott Long void *
2174c7070dbSScott Long iflib_get_softc(if_ctx_t ctx)
2184c7070dbSScott Long {
2194c7070dbSScott Long 
2204c7070dbSScott Long 	return (ctx->ifc_softc);
2214c7070dbSScott Long }
2224c7070dbSScott Long 
2234c7070dbSScott Long device_t
2244c7070dbSScott Long iflib_get_dev(if_ctx_t ctx)
2254c7070dbSScott Long {
2264c7070dbSScott Long 
2274c7070dbSScott Long 	return (ctx->ifc_dev);
2284c7070dbSScott Long }
2294c7070dbSScott Long 
2304c7070dbSScott Long if_t
2314c7070dbSScott Long iflib_get_ifp(if_ctx_t ctx)
2324c7070dbSScott Long {
2334c7070dbSScott Long 
2344c7070dbSScott Long 	return (ctx->ifc_ifp);
2354c7070dbSScott Long }
2364c7070dbSScott Long 
2374c7070dbSScott Long struct ifmedia *
2384c7070dbSScott Long iflib_get_media(if_ctx_t ctx)
2394c7070dbSScott Long {
2404c7070dbSScott Long 
2414c7070dbSScott Long 	return (&ctx->ifc_media);
2424c7070dbSScott Long }
2434c7070dbSScott Long 
24409f6ff4fSMatt Macy uint32_t
24509f6ff4fSMatt Macy iflib_get_flags(if_ctx_t ctx)
24609f6ff4fSMatt Macy {
24709f6ff4fSMatt Macy 	return (ctx->ifc_flags);
24809f6ff4fSMatt Macy }
24909f6ff4fSMatt Macy 
25009f6ff4fSMatt Macy void
2514c7070dbSScott Long iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
2524c7070dbSScott Long {
2534c7070dbSScott Long 
2541fd8c72cSKyle Evans 	bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
2554c7070dbSScott Long }
2564c7070dbSScott Long 
2574c7070dbSScott Long if_softc_ctx_t
2584c7070dbSScott Long iflib_get_softc_ctx(if_ctx_t ctx)
2594c7070dbSScott Long {
2604c7070dbSScott Long 
2614c7070dbSScott Long 	return (&ctx->ifc_softc_ctx);
2624c7070dbSScott Long }
2634c7070dbSScott Long 
2644c7070dbSScott Long if_shared_ctx_t
2654c7070dbSScott Long iflib_get_sctx(if_ctx_t ctx)
2664c7070dbSScott Long {
2674c7070dbSScott Long 
2684c7070dbSScott Long 	return (ctx->ifc_sctx);
2694c7070dbSScott Long }
2704c7070dbSScott Long 
27195246abbSSean Bruno #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
2724c7070dbSScott Long #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
2735e888388SSean Bruno #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
2744c7070dbSScott Long 
2754c7070dbSScott Long #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
2764c7070dbSScott Long #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
2774c7070dbSScott Long 
278e035717eSSean Bruno typedef struct iflib_sw_rx_desc_array {
279e035717eSSean Bruno 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
280e035717eSSean Bruno 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
281e035717eSSean Bruno 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
282fbec776dSAndrew Gallatin 	bus_addr_t	*ifsd_ba;          /* bus addr of cluster for rx */
283e035717eSSean Bruno } iflib_rxsd_array_t;
2844c7070dbSScott Long 
2854c7070dbSScott Long typedef struct iflib_sw_tx_desc_array {
2864c7070dbSScott Long 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
2878a04b53dSKonstantin Belousov 	bus_dmamap_t	*ifsd_tso_map;     /* bus_dma maps for TSO packet */
2884c7070dbSScott Long 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
28995246abbSSean Bruno } if_txsd_vec_t;
2904c7070dbSScott Long 
2914c7070dbSScott Long 
2924c7070dbSScott Long /* magic number that should be high enough for any hardware */
2934c7070dbSScott Long #define IFLIB_MAX_TX_SEGS		128
29495246abbSSean Bruno #define IFLIB_RX_COPY_THRESH		128
2954c7070dbSScott Long #define IFLIB_MAX_RX_REFRESH		32
29695246abbSSean Bruno /* The minimum descriptors per second before we start coalescing */
29795246abbSSean Bruno #define IFLIB_MIN_DESC_SEC		16384
29895246abbSSean Bruno #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
2994c7070dbSScott Long #define IFLIB_QUEUE_IDLE		0
3004c7070dbSScott Long #define IFLIB_QUEUE_HUNG		1
3014c7070dbSScott Long #define IFLIB_QUEUE_WORKING		2
30295246abbSSean Bruno /* maximum number of txqs that can share an rx interrupt */
30395246abbSSean Bruno #define IFLIB_MAX_TX_SHARED_INTR	4
3044c7070dbSScott Long 
30595246abbSSean Bruno /* this should really scale with ring size - this is a fairly arbitrary value */
30695246abbSSean Bruno #define TX_BATCH_SIZE			32
3074c7070dbSScott Long 
3084c7070dbSScott Long #define IFLIB_RESTART_BUDGET		8
3094c7070dbSScott Long 
3104c7070dbSScott Long 
3114c7070dbSScott Long #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
3124c7070dbSScott Long 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
3134c7070dbSScott Long 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
3144c7070dbSScott Long struct iflib_txq {
31595246abbSSean Bruno 	qidx_t		ift_in_use;
31695246abbSSean Bruno 	qidx_t		ift_cidx;
31795246abbSSean Bruno 	qidx_t		ift_cidx_processed;
31895246abbSSean Bruno 	qidx_t		ift_pidx;
3194c7070dbSScott Long 	uint8_t		ift_gen;
32023ac9029SStephen Hurd 	uint8_t		ift_br_offset;
32195246abbSSean Bruno 	uint16_t	ift_npending;
32295246abbSSean Bruno 	uint16_t	ift_db_pending;
32395246abbSSean Bruno 	uint16_t	ift_rs_pending;
3244c7070dbSScott Long 	/* implicit pad */
32595246abbSSean Bruno 	uint8_t		ift_txd_size[8];
3264c7070dbSScott Long 	uint64_t	ift_processed;
3274c7070dbSScott Long 	uint64_t	ift_cleaned;
32895246abbSSean Bruno 	uint64_t	ift_cleaned_prev;
3294c7070dbSScott Long #if MEMORY_LOGGING
3304c7070dbSScott Long 	uint64_t	ift_enqueued;
3314c7070dbSScott Long 	uint64_t	ift_dequeued;
3324c7070dbSScott Long #endif
3334c7070dbSScott Long 	uint64_t	ift_no_tx_dma_setup;
3344c7070dbSScott Long 	uint64_t	ift_no_desc_avail;
3354c7070dbSScott Long 	uint64_t	ift_mbuf_defrag_failed;
3364c7070dbSScott Long 	uint64_t	ift_mbuf_defrag;
3374c7070dbSScott Long 	uint64_t	ift_map_failed;
3384c7070dbSScott Long 	uint64_t	ift_txd_encap_efbig;
3394c7070dbSScott Long 	uint64_t	ift_pullups;
340dd7fbcf1SStephen Hurd 	uint64_t	ift_last_timer_tick;
3414c7070dbSScott Long 
3424c7070dbSScott Long 	struct mtx	ift_mtx;
3434c7070dbSScott Long 	struct mtx	ift_db_mtx;
3444c7070dbSScott Long 
3454c7070dbSScott Long 	/* constant values */
3464c7070dbSScott Long 	if_ctx_t	ift_ctx;
34795246abbSSean Bruno 	struct ifmp_ring        *ift_br;
3484c7070dbSScott Long 	struct grouptask	ift_task;
34995246abbSSean Bruno 	qidx_t		ift_size;
3504c7070dbSScott Long 	uint16_t	ift_id;
3514c7070dbSScott Long 	struct callout	ift_timer;
3524c7070dbSScott Long 
35395246abbSSean Bruno 	if_txsd_vec_t	ift_sds;
3544c7070dbSScott Long 	uint8_t		ift_qstatus;
3554c7070dbSScott Long 	uint8_t		ift_closed;
35695246abbSSean Bruno 	uint8_t		ift_update_freq;
3574c7070dbSScott Long 	struct iflib_filter_info ift_filter_info;
358bfce461eSMarius Strobl 	bus_dma_tag_t	ift_buf_tag;
359bfce461eSMarius Strobl 	bus_dma_tag_t	ift_tso_buf_tag;
3604c7070dbSScott Long 	iflib_dma_info_t	ift_ifdi;
3614c7070dbSScott Long #define MTX_NAME_LEN 16
3624c7070dbSScott Long 	char                    ift_mtx_name[MTX_NAME_LEN];
3634c7070dbSScott Long 	char                    ift_db_mtx_name[MTX_NAME_LEN];
3644c7070dbSScott Long 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
3651248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
3661248952aSSean Bruno 	uint64_t ift_cpu_exec_count[256];
3671248952aSSean Bruno #endif
3684c7070dbSScott Long } __aligned(CACHE_LINE_SIZE);
3694c7070dbSScott Long 
3704c7070dbSScott Long struct iflib_fl {
37195246abbSSean Bruno 	qidx_t		ifl_cidx;
37295246abbSSean Bruno 	qidx_t		ifl_pidx;
37395246abbSSean Bruno 	qidx_t		ifl_credits;
3744c7070dbSScott Long 	uint8_t		ifl_gen;
37595246abbSSean Bruno 	uint8_t		ifl_rxd_size;
3764c7070dbSScott Long #if MEMORY_LOGGING
3774c7070dbSScott Long 	uint64_t	ifl_m_enqueued;
3784c7070dbSScott Long 	uint64_t	ifl_m_dequeued;
3794c7070dbSScott Long 	uint64_t	ifl_cl_enqueued;
3804c7070dbSScott Long 	uint64_t	ifl_cl_dequeued;
3814c7070dbSScott Long #endif
3824c7070dbSScott Long 	/* implicit pad */
3834c7070dbSScott Long 
38487890dbaSSean Bruno 	bitstr_t 	*ifl_rx_bitmap;
38587890dbaSSean Bruno 	qidx_t		ifl_fragidx;
3864c7070dbSScott Long 	/* constant */
38795246abbSSean Bruno 	qidx_t		ifl_size;
3884c7070dbSScott Long 	uint16_t	ifl_buf_size;
3894c7070dbSScott Long 	uint16_t	ifl_cltype;
3904c7070dbSScott Long 	uma_zone_t	ifl_zone;
391e035717eSSean Bruno 	iflib_rxsd_array_t	ifl_sds;
3924c7070dbSScott Long 	iflib_rxq_t	ifl_rxq;
3934c7070dbSScott Long 	uint8_t		ifl_id;
394bfce461eSMarius Strobl 	bus_dma_tag_t	ifl_buf_tag;
3954c7070dbSScott Long 	iflib_dma_info_t	ifl_ifdi;
3964c7070dbSScott Long 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
3974c7070dbSScott Long 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
39895246abbSSean Bruno 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
3994c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4004c7070dbSScott Long 
40195246abbSSean Bruno static inline qidx_t
40295246abbSSean Bruno get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
4034c7070dbSScott Long {
40495246abbSSean Bruno 	qidx_t used;
4054c7070dbSScott Long 
4064c7070dbSScott Long 	if (pidx > cidx)
4074c7070dbSScott Long 		used = pidx - cidx;
4084c7070dbSScott Long 	else if (pidx < cidx)
4094c7070dbSScott Long 		used = size - cidx + pidx;
4104c7070dbSScott Long 	else if (gen == 0 && pidx == cidx)
4114c7070dbSScott Long 		used = 0;
4124c7070dbSScott Long 	else if (gen == 1 && pidx == cidx)
4134c7070dbSScott Long 		used = size;
4144c7070dbSScott Long 	else
4154c7070dbSScott Long 		panic("bad state");
4164c7070dbSScott Long 
4174c7070dbSScott Long 	return (used);
4184c7070dbSScott Long }
4194c7070dbSScott Long 
4204c7070dbSScott Long #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
4214c7070dbSScott Long 
4224c7070dbSScott Long #define IDXDIFF(head, tail, wrap) \
4234c7070dbSScott Long 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
4244c7070dbSScott Long 
4254c7070dbSScott Long struct iflib_rxq {
4264c7070dbSScott Long 	/* If there is a separate completion queue -
4274c7070dbSScott Long 	 * these are the cq cidx and pidx. Otherwise
4284c7070dbSScott Long 	 * these are unused.
4294c7070dbSScott Long 	 */
43095246abbSSean Bruno 	qidx_t		ifr_size;
43195246abbSSean Bruno 	qidx_t		ifr_cq_cidx;
43295246abbSSean Bruno 	qidx_t		ifr_cq_pidx;
4334c7070dbSScott Long 	uint8_t		ifr_cq_gen;
43423ac9029SStephen Hurd 	uint8_t		ifr_fl_offset;
4354c7070dbSScott Long 
4364c7070dbSScott Long 	if_ctx_t	ifr_ctx;
4374c7070dbSScott Long 	iflib_fl_t	ifr_fl;
4384c7070dbSScott Long 	uint64_t	ifr_rx_irq;
4396d49b41eSAndrew Gallatin 	struct pfil_head	*pfil;
4404c7070dbSScott Long 	uint16_t	ifr_id;
4414c7070dbSScott Long 	uint8_t		ifr_lro_enabled;
4424c7070dbSScott Long 	uint8_t		ifr_nfl;
44395246abbSSean Bruno 	uint8_t		ifr_ntxqirq;
44495246abbSSean Bruno 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
4454c7070dbSScott Long 	struct lro_ctrl			ifr_lc;
4464c7070dbSScott Long 	struct grouptask        ifr_task;
4474c7070dbSScott Long 	struct iflib_filter_info ifr_filter_info;
4484c7070dbSScott Long 	iflib_dma_info_t		ifr_ifdi;
449ab2e3f79SStephen Hurd 
4504c7070dbSScott Long 	/* dynamically allocate if any drivers need a value substantially larger than this */
4514c7070dbSScott Long 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
4521248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
4531248952aSSean Bruno 	uint64_t ifr_cpu_exec_count[256];
4541248952aSSean Bruno #endif
4554c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4564c7070dbSScott Long 
45795246abbSSean Bruno typedef struct if_rxsd {
45895246abbSSean Bruno 	caddr_t *ifsd_cl;
45995246abbSSean Bruno 	iflib_fl_t ifsd_fl;
46095246abbSSean Bruno 	qidx_t ifsd_cidx;
46195246abbSSean Bruno } *if_rxsd_t;
46295246abbSSean Bruno 
46395246abbSSean Bruno /* multiple of word size */
46495246abbSSean Bruno #ifdef __LP64__
465ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	6
46695246abbSSean Bruno #define RXD_INFO_SIZE	5
46795246abbSSean Bruno #define PKT_TYPE uint64_t
46895246abbSSean Bruno #else
469ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	11
47095246abbSSean Bruno #define RXD_INFO_SIZE	8
47195246abbSSean Bruno #define PKT_TYPE uint32_t
47295246abbSSean Bruno #endif
47395246abbSSean Bruno #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
47495246abbSSean Bruno #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
47595246abbSSean Bruno 
47695246abbSSean Bruno typedef struct if_pkt_info_pad {
47795246abbSSean Bruno 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
47895246abbSSean Bruno } *if_pkt_info_pad_t;
47995246abbSSean Bruno typedef struct if_rxd_info_pad {
48095246abbSSean Bruno 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
48195246abbSSean Bruno } *if_rxd_info_pad_t;
48295246abbSSean Bruno 
48395246abbSSean Bruno CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
48495246abbSSean Bruno CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
48595246abbSSean Bruno 
48695246abbSSean Bruno 
48795246abbSSean Bruno static inline void
48895246abbSSean Bruno pkt_info_zero(if_pkt_info_t pi)
48995246abbSSean Bruno {
49095246abbSSean Bruno 	if_pkt_info_pad_t pi_pad;
49195246abbSSean Bruno 
49295246abbSSean Bruno 	pi_pad = (if_pkt_info_pad_t)pi;
49395246abbSSean Bruno 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
49495246abbSSean Bruno 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
49595246abbSSean Bruno #ifndef __LP64__
496ab2e3f79SStephen Hurd 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
497ab2e3f79SStephen Hurd 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
49895246abbSSean Bruno #endif
49995246abbSSean Bruno }
50095246abbSSean Bruno 
50109f6ff4fSMatt Macy static device_method_t iflib_pseudo_methods[] = {
50209f6ff4fSMatt Macy 	DEVMETHOD(device_attach, noop_attach),
50309f6ff4fSMatt Macy 	DEVMETHOD(device_detach, iflib_pseudo_detach),
50409f6ff4fSMatt Macy 	DEVMETHOD_END
50509f6ff4fSMatt Macy };
50609f6ff4fSMatt Macy 
50709f6ff4fSMatt Macy driver_t iflib_pseudodriver = {
50809f6ff4fSMatt Macy 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
50909f6ff4fSMatt Macy };
51009f6ff4fSMatt Macy 
51195246abbSSean Bruno static inline void
51295246abbSSean Bruno rxd_info_zero(if_rxd_info_t ri)
51395246abbSSean Bruno {
51495246abbSSean Bruno 	if_rxd_info_pad_t ri_pad;
51595246abbSSean Bruno 	int i;
51695246abbSSean Bruno 
51795246abbSSean Bruno 	ri_pad = (if_rxd_info_pad_t)ri;
51895246abbSSean Bruno 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
51995246abbSSean Bruno 		ri_pad->rxd_val[i] = 0;
52095246abbSSean Bruno 		ri_pad->rxd_val[i+1] = 0;
52195246abbSSean Bruno 		ri_pad->rxd_val[i+2] = 0;
52295246abbSSean Bruno 		ri_pad->rxd_val[i+3] = 0;
52395246abbSSean Bruno 	}
52495246abbSSean Bruno #ifdef __LP64__
52595246abbSSean Bruno 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
52695246abbSSean Bruno #endif
52795246abbSSean Bruno }
52895246abbSSean Bruno 
5294c7070dbSScott Long /*
5304c7070dbSScott Long  * Only allow a single packet to take up most 1/nth of the tx ring
5314c7070dbSScott Long  */
5324c7070dbSScott Long #define MAX_SINGLE_PACKET_FRACTION 12
5334c7070dbSScott Long #define IF_BAD_DMA (bus_addr_t)-1
5344c7070dbSScott Long 
5354c7070dbSScott Long #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
5364c7070dbSScott Long 
537aa8a24d3SStephen Hurd #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
538aa8a24d3SStephen Hurd #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
539aa8a24d3SStephen Hurd #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
540aa8a24d3SStephen Hurd #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
5414c7070dbSScott Long 
5427b610b60SSean Bruno 
5437b610b60SSean Bruno #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
5447b610b60SSean Bruno #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
5457b610b60SSean Bruno #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
5467b610b60SSean Bruno #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
5477b610b60SSean Bruno 
548ab2e3f79SStephen Hurd 
5494c7070dbSScott Long 
5504c7070dbSScott Long #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
5514c7070dbSScott Long #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
5524c7070dbSScott Long 
55377c1fcecSEric Joyner void
55477c1fcecSEric Joyner iflib_set_detach(if_ctx_t ctx)
55577c1fcecSEric Joyner {
55677c1fcecSEric Joyner 	STATE_LOCK(ctx);
55777c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
55877c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
55977c1fcecSEric Joyner }
5604c7070dbSScott Long 
5614c7070dbSScott Long /* Our boot-time initialization hook */
5624c7070dbSScott Long static int	iflib_module_event_handler(module_t, int, void *);
5634c7070dbSScott Long 
5644c7070dbSScott Long static moduledata_t iflib_moduledata = {
5654c7070dbSScott Long 	"iflib",
5664c7070dbSScott Long 	iflib_module_event_handler,
5674c7070dbSScott Long 	NULL
5684c7070dbSScott Long };
5694c7070dbSScott Long 
5704c7070dbSScott Long DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
5714c7070dbSScott Long MODULE_VERSION(iflib, 1);
5724c7070dbSScott Long 
5734c7070dbSScott Long MODULE_DEPEND(iflib, pci, 1, 1, 1);
5744c7070dbSScott Long MODULE_DEPEND(iflib, ether, 1, 1, 1);
5754c7070dbSScott Long 
576ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
577ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
578ab2e3f79SStephen Hurd 
5794c7070dbSScott Long #ifndef IFLIB_DEBUG_COUNTERS
5804c7070dbSScott Long #ifdef INVARIANTS
5814c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 1
5824c7070dbSScott Long #else
5834c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 0
5844c7070dbSScott Long #endif /* !INVARIANTS */
5854c7070dbSScott Long #endif
5864c7070dbSScott Long 
587ab2e3f79SStephen Hurd static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
588ab2e3f79SStephen Hurd                    "iflib driver parameters");
589ab2e3f79SStephen Hurd 
5904c7070dbSScott Long /*
5914c7070dbSScott Long  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
5924c7070dbSScott Long  */
5934c7070dbSScott Long static int iflib_min_tx_latency = 0;
5944c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
595da69b8f9SSean Bruno 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
59695246abbSSean Bruno static int iflib_no_tx_batch = 0;
59795246abbSSean Bruno SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
59895246abbSSean Bruno 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
5994c7070dbSScott Long 
6004c7070dbSScott Long 
6014c7070dbSScott Long #if IFLIB_DEBUG_COUNTERS
6024c7070dbSScott Long 
6034c7070dbSScott Long static int iflib_tx_seen;
6044c7070dbSScott Long static int iflib_tx_sent;
6054c7070dbSScott Long static int iflib_tx_encap;
6064c7070dbSScott Long static int iflib_rx_allocs;
6074c7070dbSScott Long static int iflib_fl_refills;
6084c7070dbSScott Long static int iflib_fl_refills_large;
6094c7070dbSScott Long static int iflib_tx_frees;
6104c7070dbSScott Long 
6114c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
6124c7070dbSScott Long 		   &iflib_tx_seen, 0, "# tx mbufs seen");
6134c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
6144c7070dbSScott Long 		   &iflib_tx_sent, 0, "# tx mbufs sent");
6154c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
6164c7070dbSScott Long 		   &iflib_tx_encap, 0, "# tx mbufs encapped");
6174c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
6184c7070dbSScott Long 		   &iflib_tx_frees, 0, "# tx frees");
6194c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
6204c7070dbSScott Long 		   &iflib_rx_allocs, 0, "# rx allocations");
6214c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
6224c7070dbSScott Long 		   &iflib_fl_refills, 0, "# refills");
6234c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
6244c7070dbSScott Long 		   &iflib_fl_refills_large, 0, "# large refills");
6254c7070dbSScott Long 
6264c7070dbSScott Long 
6274c7070dbSScott Long static int iflib_txq_drain_flushing;
6284c7070dbSScott Long static int iflib_txq_drain_oactive;
6294c7070dbSScott Long static int iflib_txq_drain_notready;
6304c7070dbSScott Long 
6314c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
6324c7070dbSScott Long 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
6334c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
6344c7070dbSScott Long 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
6354c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
6364c7070dbSScott Long 		   &iflib_txq_drain_notready, 0, "# drain notready");
6374c7070dbSScott Long 
6384c7070dbSScott Long 
6394c7070dbSScott Long static int iflib_encap_load_mbuf_fail;
640d14c853bSStephen Hurd static int iflib_encap_pad_mbuf_fail;
6414c7070dbSScott Long static int iflib_encap_txq_avail_fail;
6424c7070dbSScott Long static int iflib_encap_txd_encap_fail;
6434c7070dbSScott Long 
6444c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
6454c7070dbSScott Long 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
646d14c853bSStephen Hurd SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
647d14c853bSStephen Hurd 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
6484c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
6494c7070dbSScott Long 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
6504c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
6514c7070dbSScott Long 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
6524c7070dbSScott Long 
6534c7070dbSScott Long static int iflib_task_fn_rxs;
6544c7070dbSScott Long static int iflib_rx_intr_enables;
6554c7070dbSScott Long static int iflib_fast_intrs;
6564c7070dbSScott Long static int iflib_rx_unavail;
6574c7070dbSScott Long static int iflib_rx_ctx_inactive;
6584c7070dbSScott Long static int iflib_rx_if_input;
6594c7070dbSScott Long static int iflib_rxd_flush;
6604c7070dbSScott Long 
6614c7070dbSScott Long static int iflib_verbose_debug;
6624c7070dbSScott Long 
6634c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
6644c7070dbSScott Long 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
6654c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
6664c7070dbSScott Long 		   &iflib_rx_intr_enables, 0, "# rx intr enables");
6674c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
6684c7070dbSScott Long 		   &iflib_fast_intrs, 0, "# fast_intr calls");
6694c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
6704c7070dbSScott Long 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
6714c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
6724c7070dbSScott Long 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
6734c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
6744c7070dbSScott Long 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
6754c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
6764c7070dbSScott Long 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
6774c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
6784c7070dbSScott Long 		   &iflib_verbose_debug, 0, "enable verbose debugging");
6794c7070dbSScott Long 
6804c7070dbSScott Long #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
681da69b8f9SSean Bruno static void
682da69b8f9SSean Bruno iflib_debug_reset(void)
683da69b8f9SSean Bruno {
684da69b8f9SSean Bruno 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
685da69b8f9SSean Bruno 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
686da69b8f9SSean Bruno 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
68764e6fc13SStephen Hurd 		iflib_txq_drain_notready =
688d14c853bSStephen Hurd 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
689d14c853bSStephen Hurd 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
690d14c853bSStephen Hurd 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
69164e6fc13SStephen Hurd 		iflib_rx_unavail =
69264e6fc13SStephen Hurd 		iflib_rx_ctx_inactive = iflib_rx_if_input =
6936d49b41eSAndrew Gallatin 		iflib_rxd_flush = 0;
694da69b8f9SSean Bruno }
6954c7070dbSScott Long 
6964c7070dbSScott Long #else
6974c7070dbSScott Long #define DBG_COUNTER_INC(name)
698da69b8f9SSean Bruno static void iflib_debug_reset(void) {}
6994c7070dbSScott Long #endif
7004c7070dbSScott Long 
7014c7070dbSScott Long #define IFLIB_DEBUG 0
7024c7070dbSScott Long 
7034c7070dbSScott Long static void iflib_tx_structures_free(if_ctx_t ctx);
7044c7070dbSScott Long static void iflib_rx_structures_free(if_ctx_t ctx);
7054c7070dbSScott Long static int iflib_queues_alloc(if_ctx_t ctx);
7064c7070dbSScott Long static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
70795246abbSSean Bruno static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
7084c7070dbSScott Long static int iflib_qset_structures_setup(if_ctx_t ctx);
7094c7070dbSScott Long static int iflib_msix_init(if_ctx_t ctx);
7103e0e6330SStephen Hurd static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
7114c7070dbSScott Long static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
7124c7070dbSScott Long static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
713b8ca4756SPatrick Kelsey #ifdef ALTQ
714b8ca4756SPatrick Kelsey static void iflib_altq_if_start(if_t ifp);
715b8ca4756SPatrick Kelsey static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
716b8ca4756SPatrick Kelsey #endif
7174c7070dbSScott Long static int iflib_register(if_ctx_t);
7184c7070dbSScott Long static void iflib_init_locked(if_ctx_t ctx);
7194c7070dbSScott Long static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
7204c7070dbSScott Long static void iflib_add_device_sysctl_post(if_ctx_t ctx);
721da69b8f9SSean Bruno static void iflib_ifmp_purge(iflib_txq_t txq);
7221248952aSSean Bruno static void _iflib_pre_assert(if_softc_ctx_t scctx);
72395246abbSSean Bruno static void iflib_if_init_locked(if_ctx_t ctx);
72477c1fcecSEric Joyner static void iflib_free_intr_mem(if_ctx_t ctx);
72595246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
72695246abbSSean Bruno static struct mbuf * iflib_fixup_rx(struct mbuf *m);
72795246abbSSean Bruno #endif
7284c7070dbSScott Long 
729*f154ece0SStephen Hurd static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
730*f154ece0SStephen Hurd     SLIST_HEAD_INITIALIZER(cpu_offsets);
731*f154ece0SStephen Hurd struct cpu_offset {
732*f154ece0SStephen Hurd 	SLIST_ENTRY(cpu_offset) entries;
733*f154ece0SStephen Hurd 	cpuset_t	set;
734*f154ece0SStephen Hurd 	unsigned int	refcount;
735*f154ece0SStephen Hurd 	uint16_t	offset;
736*f154ece0SStephen Hurd };
737*f154ece0SStephen Hurd static struct mtx cpu_offset_mtx;
738*f154ece0SStephen Hurd MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
739*f154ece0SStephen Hurd     MTX_DEF);
740*f154ece0SStephen Hurd 
74194618825SMark Johnston NETDUMP_DEFINE(iflib);
74294618825SMark Johnston 
7434c7070dbSScott Long #ifdef DEV_NETMAP
7444c7070dbSScott Long #include <sys/selinfo.h>
7454c7070dbSScott Long #include <net/netmap.h>
7464c7070dbSScott Long #include <dev/netmap/netmap_kern.h>
7474c7070dbSScott Long 
7484c7070dbSScott Long MODULE_DEPEND(iflib, netmap, 1, 1, 1);
7494c7070dbSScott Long 
7502d873474SStephen Hurd static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
7512d873474SStephen Hurd 
7524c7070dbSScott Long /*
7534c7070dbSScott Long  * device-specific sysctl variables:
7544c7070dbSScott Long  *
75591d546a0SConrad Meyer  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
7564c7070dbSScott Long  *	During regular operations the CRC is stripped, but on some
7574c7070dbSScott Long  *	hardware reception of frames not multiple of 64 is slower,
7584c7070dbSScott Long  *	so using crcstrip=0 helps in benchmarks.
7594c7070dbSScott Long  *
76091d546a0SConrad Meyer  * iflib_rx_miss, iflib_rx_miss_bufs:
7614c7070dbSScott Long  *	count packets that might be missed due to lost interrupts.
7624c7070dbSScott Long  */
7634c7070dbSScott Long SYSCTL_DECL(_dev_netmap);
7644c7070dbSScott Long /*
7654c7070dbSScott Long  * The xl driver by default strips CRCs and we do not override it.
7664c7070dbSScott Long  */
7674c7070dbSScott Long 
7684c7070dbSScott Long int iflib_crcstrip = 1;
7694c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
7704c7070dbSScott Long     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
7714c7070dbSScott Long 
7724c7070dbSScott Long int iflib_rx_miss, iflib_rx_miss_bufs;
7734c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
7744c7070dbSScott Long     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
77591d546a0SConrad Meyer SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
7764c7070dbSScott Long     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
7774c7070dbSScott Long 
7784c7070dbSScott Long /*
7794c7070dbSScott Long  * Register/unregister. We are already under netmap lock.
7804c7070dbSScott Long  * Only called on the first register or the last unregister.
7814c7070dbSScott Long  */
7824c7070dbSScott Long static int
7834c7070dbSScott Long iflib_netmap_register(struct netmap_adapter *na, int onoff)
7844c7070dbSScott Long {
7854c7070dbSScott Long 	struct ifnet *ifp = na->ifp;
7864c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
78795246abbSSean Bruno 	int status;
7884c7070dbSScott Long 
7894c7070dbSScott Long 	CTX_LOCK(ctx);
7904c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
7914c7070dbSScott Long 
7924c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
7934c7070dbSScott Long 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7944c7070dbSScott Long 
7954c7070dbSScott Long 	if (!CTX_IS_VF(ctx))
7961248952aSSean Bruno 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
7974c7070dbSScott Long 
7984c7070dbSScott Long 	/* enable or disable flags and callbacks in na and ifp */
7994c7070dbSScott Long 	if (onoff) {
8004c7070dbSScott Long 		nm_set_native_flags(na);
8014c7070dbSScott Long 	} else {
8024c7070dbSScott Long 		nm_clear_native_flags(na);
8034c7070dbSScott Long 	}
80495246abbSSean Bruno 	iflib_stop(ctx);
80595246abbSSean Bruno 	iflib_init_locked(ctx);
8061248952aSSean Bruno 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
80795246abbSSean Bruno 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
80895246abbSSean Bruno 	if (status)
80995246abbSSean Bruno 		nm_clear_native_flags(na);
8104c7070dbSScott Long 	CTX_UNLOCK(ctx);
81195246abbSSean Bruno 	return (status);
8124c7070dbSScott Long }
8134c7070dbSScott Long 
8142d873474SStephen Hurd static int
8152d873474SStephen Hurd netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
8162d873474SStephen Hurd {
8172d873474SStephen Hurd 	struct netmap_adapter *na = kring->na;
8182d873474SStephen Hurd 	u_int const lim = kring->nkr_num_slots - 1;
8192d873474SStephen Hurd 	u_int head = kring->rhead;
8202d873474SStephen Hurd 	struct netmap_ring *ring = kring->ring;
8212d873474SStephen Hurd 	bus_dmamap_t *map;
8222d873474SStephen Hurd 	struct if_rxd_update iru;
8232d873474SStephen Hurd 	if_ctx_t ctx = rxq->ifr_ctx;
8242d873474SStephen Hurd 	iflib_fl_t fl = &rxq->ifr_fl[0];
8252d873474SStephen Hurd 	uint32_t refill_pidx, nic_i;
82664e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
82764e6fc13SStephen Hurd 	int rf_count = 0;
82864e6fc13SStephen Hurd #endif
8292d873474SStephen Hurd 
8302d873474SStephen Hurd 	if (nm_i == head && __predict_true(!init))
8312d873474SStephen Hurd 		return 0;
8322d873474SStephen Hurd 	iru_init(&iru, rxq, 0 /* flid */);
8332d873474SStephen Hurd 	map = fl->ifl_sds.ifsd_map;
8342d873474SStephen Hurd 	refill_pidx = netmap_idx_k2n(kring, nm_i);
8352d873474SStephen Hurd 	/*
8362d873474SStephen Hurd 	 * IMPORTANT: we must leave one free slot in the ring,
8372d873474SStephen Hurd 	 * so move head back by one unit
8382d873474SStephen Hurd 	 */
8392d873474SStephen Hurd 	head = nm_prev(head, lim);
8401ae4848cSMatt Macy 	nic_i = UINT_MAX;
84164e6fc13SStephen Hurd 	DBG_COUNTER_INC(fl_refills);
8422d873474SStephen Hurd 	while (nm_i != head) {
84364e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
84464e6fc13SStephen Hurd 		if (++rf_count == 9)
84564e6fc13SStephen Hurd 			DBG_COUNTER_INC(fl_refills_large);
84664e6fc13SStephen Hurd #endif
8472d873474SStephen Hurd 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
8482d873474SStephen Hurd 			struct netmap_slot *slot = &ring->slot[nm_i];
8492d873474SStephen Hurd 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
8502d873474SStephen Hurd 			uint32_t nic_i_dma = refill_pidx;
8512d873474SStephen Hurd 			nic_i = netmap_idx_k2n(kring, nm_i);
8522d873474SStephen Hurd 
8532d873474SStephen Hurd 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
8542d873474SStephen Hurd 
8552d873474SStephen Hurd 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
8562d873474SStephen Hurd 			        return netmap_ring_reinit(kring);
8572d873474SStephen Hurd 
8582d873474SStephen Hurd 			fl->ifl_vm_addrs[tmp_pidx] = addr;
85995dcf343SMarius Strobl 			if (__predict_false(init)) {
86095dcf343SMarius Strobl 				netmap_load_map(na, fl->ifl_buf_tag,
86195dcf343SMarius Strobl 				    map[nic_i], addr);
86295dcf343SMarius Strobl 			} else if (slot->flags & NS_BUF_CHANGED) {
8632d873474SStephen Hurd 				/* buffer has changed, reload map */
86495dcf343SMarius Strobl 				netmap_reload_map(na, fl->ifl_buf_tag,
86595dcf343SMarius Strobl 				    map[nic_i], addr);
8662d873474SStephen Hurd 			}
8672d873474SStephen Hurd 			slot->flags &= ~NS_BUF_CHANGED;
8682d873474SStephen Hurd 
8692d873474SStephen Hurd 			nm_i = nm_next(nm_i, lim);
8702d873474SStephen Hurd 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
8712d873474SStephen Hurd 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
8722d873474SStephen Hurd 				continue;
8732d873474SStephen Hurd 
8742d873474SStephen Hurd 			iru.iru_pidx = refill_pidx;
8752d873474SStephen Hurd 			iru.iru_count = tmp_pidx+1;
8762d873474SStephen Hurd 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
8772d873474SStephen Hurd 			refill_pidx = nic_i;
8782d873474SStephen Hurd 			for (int n = 0; n < iru.iru_count; n++) {
87995dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma],
8802d873474SStephen Hurd 						BUS_DMASYNC_PREREAD);
8812d873474SStephen Hurd 				/* XXX - change this to not use the netmap func*/
8822d873474SStephen Hurd 				nic_i_dma = nm_next(nic_i_dma, lim);
8832d873474SStephen Hurd 			}
8842d873474SStephen Hurd 		}
8852d873474SStephen Hurd 	}
8862d873474SStephen Hurd 	kring->nr_hwcur = head;
8872d873474SStephen Hurd 
8882d873474SStephen Hurd 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
8892d873474SStephen Hurd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
89064e6fc13SStephen Hurd 	if (__predict_true(nic_i != UINT_MAX)) {
8912d873474SStephen Hurd 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
89264e6fc13SStephen Hurd 		DBG_COUNTER_INC(rxd_flush);
89364e6fc13SStephen Hurd 	}
8942d873474SStephen Hurd 	return (0);
8952d873474SStephen Hurd }
8962d873474SStephen Hurd 
8974c7070dbSScott Long /*
8984c7070dbSScott Long  * Reconcile kernel and user view of the transmit ring.
8994c7070dbSScott Long  *
9004c7070dbSScott Long  * All information is in the kring.
9014c7070dbSScott Long  * Userspace wants to send packets up to the one before kring->rhead,
9024c7070dbSScott Long  * kernel knows kring->nr_hwcur is the first unsent packet.
9034c7070dbSScott Long  *
9044c7070dbSScott Long  * Here we push packets out (as many as possible), and possibly
9054c7070dbSScott Long  * reclaim buffers from previously completed transmission.
9064c7070dbSScott Long  *
9074c7070dbSScott Long  * The caller (netmap) guarantees that there is only one instance
9084c7070dbSScott Long  * running at any time. Any interference with other driver
9094c7070dbSScott Long  * methods should be handled by the individual drivers.
9104c7070dbSScott Long  */
9114c7070dbSScott Long static int
9124c7070dbSScott Long iflib_netmap_txsync(struct netmap_kring *kring, int flags)
9134c7070dbSScott Long {
9144c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
9154c7070dbSScott Long 	struct ifnet *ifp = na->ifp;
9164c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
917dd7fbcf1SStephen Hurd 	u_int nm_i;	/* index into the netmap kring */
9184c7070dbSScott Long 	u_int nic_i;	/* index into the NIC ring */
9194c7070dbSScott Long 	u_int n;
9204c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
9214c7070dbSScott Long 	u_int const head = kring->rhead;
9224c7070dbSScott Long 	struct if_pkt_info pi;
9234c7070dbSScott Long 
9244c7070dbSScott Long 	/*
9254c7070dbSScott Long 	 * interrupts on every tx packet are expensive so request
9264c7070dbSScott Long 	 * them every half ring, or where NS_REPORT is set
9274c7070dbSScott Long 	 */
9284c7070dbSScott Long 	u_int report_frequency = kring->nkr_num_slots >> 1;
9294c7070dbSScott Long 	/* device-specific */
9304c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
9314c7070dbSScott Long 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
9324c7070dbSScott Long 
93395dcf343SMarius Strobl 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
9344c7070dbSScott Long 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9354c7070dbSScott Long 
9364c7070dbSScott Long 	/*
9374c7070dbSScott Long 	 * First part: process new packets to send.
938dd7fbcf1SStephen Hurd 	 * nm_i is the current index in the netmap kring,
9394c7070dbSScott Long 	 * nic_i is the corresponding index in the NIC ring.
9404c7070dbSScott Long 	 *
9414c7070dbSScott Long 	 * If we have packets to send (nm_i != head)
9424c7070dbSScott Long 	 * iterate over the netmap ring, fetch length and update
9434c7070dbSScott Long 	 * the corresponding slot in the NIC ring. Some drivers also
9444c7070dbSScott Long 	 * need to update the buffer's physical address in the NIC slot
9454c7070dbSScott Long 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
9464c7070dbSScott Long 	 *
9474c7070dbSScott Long 	 * The netmap_reload_map() calls is especially expensive,
9484c7070dbSScott Long 	 * even when (as in this case) the tag is 0, so do only
9494c7070dbSScott Long 	 * when the buffer has actually changed.
9504c7070dbSScott Long 	 *
9514c7070dbSScott Long 	 * If possible do not set the report/intr bit on all slots,
9524c7070dbSScott Long 	 * but only a few times per ring or when NS_REPORT is set.
9534c7070dbSScott Long 	 *
9544c7070dbSScott Long 	 * Finally, on 10G and faster drivers, it might be useful
9554c7070dbSScott Long 	 * to prefetch the next slot and txr entry.
9564c7070dbSScott Long 	 */
9574c7070dbSScott Long 
958dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
9595ee36c68SStephen Hurd 	if (nm_i != head) {	/* we have new packets to send */
96095246abbSSean Bruno 		pkt_info_zero(&pi);
96195246abbSSean Bruno 		pi.ipi_segs = txq->ift_segs;
96295246abbSSean Bruno 		pi.ipi_qsidx = kring->ring_id;
9634c7070dbSScott Long 		nic_i = netmap_idx_k2n(kring, nm_i);
9644c7070dbSScott Long 
9654c7070dbSScott Long 		__builtin_prefetch(&ring->slot[nm_i]);
9664c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
9674c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
9684c7070dbSScott Long 
9694c7070dbSScott Long 		for (n = 0; nm_i != head; n++) {
9704c7070dbSScott Long 			struct netmap_slot *slot = &ring->slot[nm_i];
9714c7070dbSScott Long 			u_int len = slot->len;
9720a1b74a3SSean Bruno 			uint64_t paddr;
9734c7070dbSScott Long 			void *addr = PNMB(na, slot, &paddr);
9744c7070dbSScott Long 			int flags = (slot->flags & NS_REPORT ||
9754c7070dbSScott Long 				nic_i == 0 || nic_i == report_frequency) ?
9764c7070dbSScott Long 				IPI_TX_INTR : 0;
9774c7070dbSScott Long 
9784c7070dbSScott Long 			/* device-specific */
97995246abbSSean Bruno 			pi.ipi_len = len;
98095246abbSSean Bruno 			pi.ipi_segs[0].ds_addr = paddr;
98195246abbSSean Bruno 			pi.ipi_segs[0].ds_len = len;
98295246abbSSean Bruno 			pi.ipi_nsegs = 1;
98395246abbSSean Bruno 			pi.ipi_ndescs = 0;
9844c7070dbSScott Long 			pi.ipi_pidx = nic_i;
9854c7070dbSScott Long 			pi.ipi_flags = flags;
9864c7070dbSScott Long 
9874c7070dbSScott Long 			/* Fill the slot in the NIC ring. */
9884c7070dbSScott Long 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
98964e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_encap);
9904c7070dbSScott Long 
9914c7070dbSScott Long 			/* prefetch for next round */
9924c7070dbSScott Long 			__builtin_prefetch(&ring->slot[nm_i + 1]);
9934c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
9944c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
9954c7070dbSScott Long 
9964c7070dbSScott Long 			NM_CHECK_ADDR_LEN(na, addr, len);
9974c7070dbSScott Long 
9984c7070dbSScott Long 			if (slot->flags & NS_BUF_CHANGED) {
9994c7070dbSScott Long 				/* buffer has changed, reload map */
1000bfce461eSMarius Strobl 				netmap_reload_map(na, txq->ift_buf_tag,
1001bfce461eSMarius Strobl 				    txq->ift_sds.ifsd_map[nic_i], addr);
10024c7070dbSScott Long 			}
10034c7070dbSScott Long 			/* make sure changes to the buffer are synced */
100495dcf343SMarius Strobl 			bus_dmamap_sync(txq->ift_buf_tag,
100595dcf343SMarius Strobl 			    txq->ift_sds.ifsd_map[nic_i],
10064c7070dbSScott Long 			    BUS_DMASYNC_PREWRITE);
100795dcf343SMarius Strobl 
100895246abbSSean Bruno 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
10094c7070dbSScott Long 			nm_i = nm_next(nm_i, lim);
10104c7070dbSScott Long 			nic_i = nm_next(nic_i, lim);
10114c7070dbSScott Long 		}
1012dd7fbcf1SStephen Hurd 		kring->nr_hwcur = nm_i;
10134c7070dbSScott Long 
10144c7070dbSScott Long 		/* synchronize the NIC ring */
101595dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
10164c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10174c7070dbSScott Long 
10184c7070dbSScott Long 		/* (re)start the tx unit up to slot nic_i (excluded) */
10194c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
10204c7070dbSScott Long 	}
10214c7070dbSScott Long 
10224c7070dbSScott Long 	/*
10234c7070dbSScott Long 	 * Second part: reclaim buffers for completed transmissions.
10245ee36c68SStephen Hurd 	 *
10255ee36c68SStephen Hurd 	 * If there are unclaimed buffers, attempt to reclaim them.
10265ee36c68SStephen Hurd 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
10275ee36c68SStephen Hurd 	 * minimal delay, then trigger the tx handler which will spin in the
10285ee36c68SStephen Hurd 	 * group task queue.
10294c7070dbSScott Long 	 */
1030dd7fbcf1SStephen Hurd 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
10314c7070dbSScott Long 		if (iflib_tx_credits_update(ctx, txq)) {
10324c7070dbSScott Long 			/* some tx completed, increment avail */
10334c7070dbSScott Long 			nic_i = txq->ift_cidx_processed;
10344c7070dbSScott Long 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
10354c7070dbSScott Long 		}
10365ee36c68SStephen Hurd 	}
1037dd7fbcf1SStephen Hurd 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1038dd7fbcf1SStephen Hurd 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1039dd7fbcf1SStephen Hurd 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1040dd7fbcf1SStephen Hurd 			    iflib_timer, txq, txq->ift_timer.c_cpu);
10415ee36c68SStephen Hurd 	}
10424c7070dbSScott Long 	return (0);
10434c7070dbSScott Long }
10444c7070dbSScott Long 
10454c7070dbSScott Long /*
10464c7070dbSScott Long  * Reconcile kernel and user view of the receive ring.
10474c7070dbSScott Long  * Same as for the txsync, this routine must be efficient.
10484c7070dbSScott Long  * The caller guarantees a single invocations, but races against
10494c7070dbSScott Long  * the rest of the driver should be handled here.
10504c7070dbSScott Long  *
10514c7070dbSScott Long  * On call, kring->rhead is the first packet that userspace wants
10524c7070dbSScott Long  * to keep, and kring->rcur is the wakeup point.
10534c7070dbSScott Long  * The kernel has previously reported packets up to kring->rtail.
10544c7070dbSScott Long  *
10554c7070dbSScott Long  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
10564c7070dbSScott Long  * of whether or not we received an interrupt.
10574c7070dbSScott Long  */
10584c7070dbSScott Long static int
10594c7070dbSScott Long iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
10604c7070dbSScott Long {
10614c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
10624c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
106395dcf343SMarius Strobl 	iflib_fl_t fl;
106495246abbSSean Bruno 	uint32_t nm_i;	/* index into the netmap ring */
10652d873474SStephen Hurd 	uint32_t nic_i;	/* index into the NIC ring */
10664c7070dbSScott Long 	u_int i, n;
10674c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
1068dd7fbcf1SStephen Hurd 	u_int const head = kring->rhead;
10694c7070dbSScott Long 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1070ab2e3f79SStephen Hurd 	struct if_rxd_info ri;
107195246abbSSean Bruno 
107295246abbSSean Bruno 	struct ifnet *ifp = na->ifp;
10734c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
10744c7070dbSScott Long 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
10754c7070dbSScott Long 	if (head > lim)
10764c7070dbSScott Long 		return netmap_ring_reinit(kring);
10774c7070dbSScott Long 
107895dcf343SMarius Strobl 	/*
107995dcf343SMarius Strobl 	 * XXX netmap_fl_refill() only ever (re)fills free list 0 so far.
108095dcf343SMarius Strobl 	 */
108195dcf343SMarius Strobl 
108295246abbSSean Bruno 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
108395dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
10844c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
108595246abbSSean Bruno 	}
108695dcf343SMarius Strobl 
10874c7070dbSScott Long 	/*
10884c7070dbSScott Long 	 * First part: import newly received packets.
10894c7070dbSScott Long 	 *
10904c7070dbSScott Long 	 * nm_i is the index of the next free slot in the netmap ring,
10914c7070dbSScott Long 	 * nic_i is the index of the next received packet in the NIC ring,
10924c7070dbSScott Long 	 * and they may differ in case if_init() has been called while
10934c7070dbSScott Long 	 * in netmap mode. For the receive ring we have
10944c7070dbSScott Long 	 *
10954c7070dbSScott Long 	 *	nic_i = rxr->next_check;
10964c7070dbSScott Long 	 *	nm_i = kring->nr_hwtail (previous)
10974c7070dbSScott Long 	 * and
10984c7070dbSScott Long 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
10994c7070dbSScott Long 	 *
11004c7070dbSScott Long 	 * rxr->next_check is set to 0 on a ring reinit
11014c7070dbSScott Long 	 */
11024c7070dbSScott Long 	if (netmap_no_pendintr || force_update) {
11034c7070dbSScott Long 		int crclen = iflib_crcstrip ? 0 : 4;
11044c7070dbSScott Long 		int error, avail;
11054c7070dbSScott Long 
11062d873474SStephen Hurd 		for (i = 0; i < rxq->ifr_nfl; i++) {
11072d873474SStephen Hurd 			fl = &rxq->ifr_fl[i];
11084c7070dbSScott Long 			nic_i = fl->ifl_cidx;
11094c7070dbSScott Long 			nm_i = netmap_idx_n2k(kring, nic_i);
111095dcf343SMarius Strobl 			avail = ctx->isc_rxd_available(ctx->ifc_softc,
111195dcf343SMarius Strobl 			    rxq->ifr_id, nic_i, USHRT_MAX);
11124c7070dbSScott Long 			for (n = 0; avail > 0; n++, avail--) {
1113ab2e3f79SStephen Hurd 				rxd_info_zero(&ri);
1114ab2e3f79SStephen Hurd 				ri.iri_frags = rxq->ifr_frags;
1115ab2e3f79SStephen Hurd 				ri.iri_qsidx = kring->ring_id;
1116ab2e3f79SStephen Hurd 				ri.iri_ifp = ctx->ifc_ifp;
1117ab2e3f79SStephen Hurd 				ri.iri_cidx = nic_i;
111895246abbSSean Bruno 
1119ab2e3f79SStephen Hurd 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1120ab2e3f79SStephen Hurd 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
11217cb7c6e3SNavdeep Parhar 				ring->slot[nm_i].flags = 0;
112295dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag,
1123e035717eSSean Bruno 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
11244c7070dbSScott Long 				nm_i = nm_next(nm_i, lim);
11254c7070dbSScott Long 				nic_i = nm_next(nic_i, lim);
11264c7070dbSScott Long 			}
11274c7070dbSScott Long 			if (n) { /* update the state variables */
11284c7070dbSScott Long 				if (netmap_no_pendintr && !force_update) {
11294c7070dbSScott Long 					/* diagnostics */
11304c7070dbSScott Long 					iflib_rx_miss ++;
11314c7070dbSScott Long 					iflib_rx_miss_bufs += n;
11324c7070dbSScott Long 				}
11334c7070dbSScott Long 				fl->ifl_cidx = nic_i;
1134dd7fbcf1SStephen Hurd 				kring->nr_hwtail = nm_i;
11354c7070dbSScott Long 			}
11364c7070dbSScott Long 			kring->nr_kflags &= ~NKR_PENDINTR;
11374c7070dbSScott Long 		}
11384c7070dbSScott Long 	}
11394c7070dbSScott Long 	/*
11404c7070dbSScott Long 	 * Second part: skip past packets that userspace has released.
11414c7070dbSScott Long 	 * (kring->nr_hwcur to head excluded),
11424c7070dbSScott Long 	 * and make the buffers available for reception.
11434c7070dbSScott Long 	 * As usual nm_i is the index in the netmap ring,
11444c7070dbSScott Long 	 * nic_i is the index in the NIC ring, and
11454c7070dbSScott Long 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
11464c7070dbSScott Long 	 */
11474c7070dbSScott Long 	/* XXX not sure how this will work with multiple free lists */
1148dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
114995246abbSSean Bruno 
11502d873474SStephen Hurd 	return (netmap_fl_refill(rxq, kring, nm_i, false));
11514c7070dbSScott Long }
11524c7070dbSScott Long 
115395246abbSSean Bruno static void
115495246abbSSean Bruno iflib_netmap_intr(struct netmap_adapter *na, int onoff)
115595246abbSSean Bruno {
115695246abbSSean Bruno 	struct ifnet *ifp = na->ifp;
115795246abbSSean Bruno 	if_ctx_t ctx = ifp->if_softc;
115895246abbSSean Bruno 
1159ab2e3f79SStephen Hurd 	CTX_LOCK(ctx);
116095246abbSSean Bruno 	if (onoff) {
116195246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
116295246abbSSean Bruno 	} else {
116395246abbSSean Bruno 		IFDI_INTR_DISABLE(ctx);
116495246abbSSean Bruno 	}
1165ab2e3f79SStephen Hurd 	CTX_UNLOCK(ctx);
116695246abbSSean Bruno }
116795246abbSSean Bruno 
116895246abbSSean Bruno 
11694c7070dbSScott Long static int
11704c7070dbSScott Long iflib_netmap_attach(if_ctx_t ctx)
11714c7070dbSScott Long {
11724c7070dbSScott Long 	struct netmap_adapter na;
117323ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
11744c7070dbSScott Long 
11754c7070dbSScott Long 	bzero(&na, sizeof(na));
11764c7070dbSScott Long 
11774c7070dbSScott Long 	na.ifp = ctx->ifc_ifp;
11784c7070dbSScott Long 	na.na_flags = NAF_BDG_MAYSLEEP;
11794c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
11804c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
11814c7070dbSScott Long 
118223ac9029SStephen Hurd 	na.num_tx_desc = scctx->isc_ntxd[0];
118323ac9029SStephen Hurd 	na.num_rx_desc = scctx->isc_nrxd[0];
11844c7070dbSScott Long 	na.nm_txsync = iflib_netmap_txsync;
11854c7070dbSScott Long 	na.nm_rxsync = iflib_netmap_rxsync;
11864c7070dbSScott Long 	na.nm_register = iflib_netmap_register;
118795246abbSSean Bruno 	na.nm_intr = iflib_netmap_intr;
11884c7070dbSScott Long 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
11894c7070dbSScott Long 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
11904c7070dbSScott Long 	return (netmap_attach(&na));
11914c7070dbSScott Long }
11924c7070dbSScott Long 
11934c7070dbSScott Long static void
11944c7070dbSScott Long iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
11954c7070dbSScott Long {
11964c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
11974c7070dbSScott Long 	struct netmap_slot *slot;
11984c7070dbSScott Long 
11994c7070dbSScott Long 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1200e099b90bSPedro F. Giffuni 	if (slot == NULL)
12014c7070dbSScott Long 		return;
120223ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
12034c7070dbSScott Long 
12044c7070dbSScott Long 		/*
12054c7070dbSScott Long 		 * In netmap mode, set the map for the packet buffer.
12064c7070dbSScott Long 		 * NOTE: Some drivers (not this one) also need to set
12074c7070dbSScott Long 		 * the physical buffer address in the NIC ring.
12084c7070dbSScott Long 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
12094c7070dbSScott Long 		 * netmap slot index, si
12104c7070dbSScott Long 		 */
12112ff91c17SVincenzo Maffione 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1212bfce461eSMarius Strobl 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1213bfce461eSMarius Strobl 		    NMB(na, slot + si));
12144c7070dbSScott Long 	}
12154c7070dbSScott Long }
12162d873474SStephen Hurd 
12174c7070dbSScott Long static void
12184c7070dbSScott Long iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
12194c7070dbSScott Long {
12204c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
12212ff91c17SVincenzo Maffione 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
12224c7070dbSScott Long 	struct netmap_slot *slot;
12232d873474SStephen Hurd 	uint32_t nm_i;
12244c7070dbSScott Long 
12254c7070dbSScott Long 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1226e099b90bSPedro F. Giffuni 	if (slot == NULL)
12274c7070dbSScott Long 		return;
12282d873474SStephen Hurd 	nm_i = netmap_idx_n2k(kring, 0);
12292d873474SStephen Hurd 	netmap_fl_refill(rxq, kring, nm_i, true);
12304c7070dbSScott Long }
12314c7070dbSScott Long 
1232dd7fbcf1SStephen Hurd static void
123395dcf343SMarius Strobl iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on)
1234dd7fbcf1SStephen Hurd {
1235dd7fbcf1SStephen Hurd 	struct netmap_kring *kring;
123695dcf343SMarius Strobl 	uint16_t txqid;
1237dd7fbcf1SStephen Hurd 
123895dcf343SMarius Strobl 	txqid = txq->ift_id;
1239dd7fbcf1SStephen Hurd 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1240dd7fbcf1SStephen Hurd 
1241dd7fbcf1SStephen Hurd 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
124295dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
124395dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD);
1244dd7fbcf1SStephen Hurd 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1245dd7fbcf1SStephen Hurd 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1246dd7fbcf1SStephen Hurd 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1247dd7fbcf1SStephen Hurd 			if (hz < 2000)
1248dd7fbcf1SStephen Hurd 				*reset_on = 1;
1249dd7fbcf1SStephen Hurd 			else
1250dd7fbcf1SStephen Hurd 				*reset_on = hz / 1000;
1251dd7fbcf1SStephen Hurd 		}
1252dd7fbcf1SStephen Hurd 	}
1253dd7fbcf1SStephen Hurd }
1254dd7fbcf1SStephen Hurd 
12554c7070dbSScott Long #define iflib_netmap_detach(ifp) netmap_detach(ifp)
12564c7070dbSScott Long 
12574c7070dbSScott Long #else
12584c7070dbSScott Long #define iflib_netmap_txq_init(ctx, txq)
12594c7070dbSScott Long #define iflib_netmap_rxq_init(ctx, rxq)
12604c7070dbSScott Long #define iflib_netmap_detach(ifp)
12614c7070dbSScott Long 
12624c7070dbSScott Long #define iflib_netmap_attach(ctx) (0)
12634c7070dbSScott Long #define netmap_rx_irq(ifp, qid, budget) (0)
126495246abbSSean Bruno #define netmap_tx_irq(ifp, qid) do {} while (0)
126595dcf343SMarius Strobl #define iflib_netmap_timer_adjust(ctx, txq, reset_on)
12664c7070dbSScott Long 
12674c7070dbSScott Long #endif
12684c7070dbSScott Long 
12694c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
12704c7070dbSScott Long static __inline void
12714c7070dbSScott Long prefetch(void *x)
12724c7070dbSScott Long {
12734c7070dbSScott Long 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12744c7070dbSScott Long }
12753429c02fSStephen Hurd static __inline void
12763429c02fSStephen Hurd prefetch2cachelines(void *x)
12773429c02fSStephen Hurd {
12783429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12793429c02fSStephen Hurd #if (CACHE_LINE_SIZE < 128)
12803429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
12813429c02fSStephen Hurd #endif
12823429c02fSStephen Hurd }
12834c7070dbSScott Long #else
12844c7070dbSScott Long #define prefetch(x)
12853429c02fSStephen Hurd #define prefetch2cachelines(x)
12864c7070dbSScott Long #endif
12874c7070dbSScott Long 
12884c7070dbSScott Long static void
128910e0d938SStephen Hurd iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
129010e0d938SStephen Hurd {
129110e0d938SStephen Hurd 	iflib_fl_t fl;
129210e0d938SStephen Hurd 
129310e0d938SStephen Hurd 	fl = &rxq->ifr_fl[flid];
129410e0d938SStephen Hurd 	iru->iru_paddrs = fl->ifl_bus_addrs;
129510e0d938SStephen Hurd 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
129610e0d938SStephen Hurd 	iru->iru_idxs = fl->ifl_rxd_idxs;
129710e0d938SStephen Hurd 	iru->iru_qsidx = rxq->ifr_id;
129810e0d938SStephen Hurd 	iru->iru_buf_size = fl->ifl_buf_size;
129910e0d938SStephen Hurd 	iru->iru_flidx = fl->ifl_id;
130010e0d938SStephen Hurd }
130110e0d938SStephen Hurd 
130210e0d938SStephen Hurd static void
13034c7070dbSScott Long _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
13044c7070dbSScott Long {
13054c7070dbSScott Long 	if (err)
13064c7070dbSScott Long 		return;
13074c7070dbSScott Long 	*(bus_addr_t *) arg = segs[0].ds_addr;
13084c7070dbSScott Long }
13094c7070dbSScott Long 
13104c7070dbSScott Long int
13118f82136aSPatrick Kelsey iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
13124c7070dbSScott Long {
13134c7070dbSScott Long 	int err;
13144c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
13154c7070dbSScott Long 
13164c7070dbSScott Long 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
13178f82136aSPatrick Kelsey 				align, 0,		/* alignment, bounds */
13184c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* lowaddr */
13194c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* highaddr */
13204c7070dbSScott Long 				NULL, NULL,		/* filter, filterarg */
13214c7070dbSScott Long 				size,			/* maxsize */
13224c7070dbSScott Long 				1,			/* nsegments */
13234c7070dbSScott Long 				size,			/* maxsegsize */
13244c7070dbSScott Long 				BUS_DMA_ALLOCNOW,	/* flags */
13254c7070dbSScott Long 				NULL,			/* lockfunc */
13264c7070dbSScott Long 				NULL,			/* lockarg */
13274c7070dbSScott Long 				&dma->idi_tag);
13284c7070dbSScott Long 	if (err) {
13294c7070dbSScott Long 		device_printf(dev,
13304c7070dbSScott Long 		    "%s: bus_dma_tag_create failed: %d\n",
13314c7070dbSScott Long 		    __func__, err);
13324c7070dbSScott Long 		goto fail_0;
13334c7070dbSScott Long 	}
13344c7070dbSScott Long 
13354c7070dbSScott Long 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
13364c7070dbSScott Long 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
13374c7070dbSScott Long 	if (err) {
13384c7070dbSScott Long 		device_printf(dev,
13394c7070dbSScott Long 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
13404c7070dbSScott Long 		    __func__, (uintmax_t)size, err);
13414c7070dbSScott Long 		goto fail_1;
13424c7070dbSScott Long 	}
13434c7070dbSScott Long 
13444c7070dbSScott Long 	dma->idi_paddr = IF_BAD_DMA;
13454c7070dbSScott Long 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
13464c7070dbSScott Long 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
13474c7070dbSScott Long 	if (err || dma->idi_paddr == IF_BAD_DMA) {
13484c7070dbSScott Long 		device_printf(dev,
13494c7070dbSScott Long 		    "%s: bus_dmamap_load failed: %d\n",
13504c7070dbSScott Long 		    __func__, err);
13514c7070dbSScott Long 		goto fail_2;
13524c7070dbSScott Long 	}
13534c7070dbSScott Long 
13544c7070dbSScott Long 	dma->idi_size = size;
13554c7070dbSScott Long 	return (0);
13564c7070dbSScott Long 
13574c7070dbSScott Long fail_2:
13584c7070dbSScott Long 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13594c7070dbSScott Long fail_1:
13604c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
13614c7070dbSScott Long fail_0:
13624c7070dbSScott Long 	dma->idi_tag = NULL;
13634c7070dbSScott Long 
13644c7070dbSScott Long 	return (err);
13654c7070dbSScott Long }
13664c7070dbSScott Long 
13674c7070dbSScott Long int
13688f82136aSPatrick Kelsey iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
13698f82136aSPatrick Kelsey {
13708f82136aSPatrick Kelsey 	if_shared_ctx_t sctx = ctx->ifc_sctx;
13718f82136aSPatrick Kelsey 
13728f82136aSPatrick Kelsey 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
13738f82136aSPatrick Kelsey 
13748f82136aSPatrick Kelsey 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
13758f82136aSPatrick Kelsey }
13768f82136aSPatrick Kelsey 
13778f82136aSPatrick Kelsey int
13784c7070dbSScott Long iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
13794c7070dbSScott Long {
13804c7070dbSScott Long 	int i, err;
13814c7070dbSScott Long 	iflib_dma_info_t *dmaiter;
13824c7070dbSScott Long 
13834c7070dbSScott Long 	dmaiter = dmalist;
13844c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++) {
13854c7070dbSScott Long 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
13864c7070dbSScott Long 			break;
13874c7070dbSScott Long 	}
13884c7070dbSScott Long 	if (err)
13894c7070dbSScott Long 		iflib_dma_free_multi(dmalist, i);
13904c7070dbSScott Long 	return (err);
13914c7070dbSScott Long }
13924c7070dbSScott Long 
13934c7070dbSScott Long void
13944c7070dbSScott Long iflib_dma_free(iflib_dma_info_t dma)
13954c7070dbSScott Long {
13964c7070dbSScott Long 	if (dma->idi_tag == NULL)
13974c7070dbSScott Long 		return;
13984c7070dbSScott Long 	if (dma->idi_paddr != IF_BAD_DMA) {
13994c7070dbSScott Long 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
14004c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
14014c7070dbSScott Long 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
14024c7070dbSScott Long 		dma->idi_paddr = IF_BAD_DMA;
14034c7070dbSScott Long 	}
14044c7070dbSScott Long 	if (dma->idi_vaddr != NULL) {
14054c7070dbSScott Long 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
14064c7070dbSScott Long 		dma->idi_vaddr = NULL;
14074c7070dbSScott Long 	}
14084c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
14094c7070dbSScott Long 	dma->idi_tag = NULL;
14104c7070dbSScott Long }
14114c7070dbSScott Long 
14124c7070dbSScott Long void
14134c7070dbSScott Long iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
14144c7070dbSScott Long {
14154c7070dbSScott Long 	int i;
14164c7070dbSScott Long 	iflib_dma_info_t *dmaiter = dmalist;
14174c7070dbSScott Long 
14184c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++)
14194c7070dbSScott Long 		iflib_dma_free(*dmaiter);
14204c7070dbSScott Long }
14214c7070dbSScott Long 
1422bd84f700SSean Bruno #ifdef EARLY_AP_STARTUP
1423bd84f700SSean Bruno static const int iflib_started = 1;
1424bd84f700SSean Bruno #else
1425bd84f700SSean Bruno /*
1426bd84f700SSean Bruno  * We used to abuse the smp_started flag to decide if the queues have been
1427bd84f700SSean Bruno  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1428bd84f700SSean Bruno  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1429bd84f700SSean Bruno  * is set.  Run a SYSINIT() strictly after that to just set a usable
1430bd84f700SSean Bruno  * completion flag.
1431bd84f700SSean Bruno  */
1432bd84f700SSean Bruno 
1433bd84f700SSean Bruno static int iflib_started;
1434bd84f700SSean Bruno 
1435bd84f700SSean Bruno static void
1436bd84f700SSean Bruno iflib_record_started(void *arg)
1437bd84f700SSean Bruno {
1438bd84f700SSean Bruno 	iflib_started = 1;
1439bd84f700SSean Bruno }
1440bd84f700SSean Bruno 
1441bd84f700SSean Bruno SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1442bd84f700SSean Bruno 	iflib_record_started, NULL);
1443bd84f700SSean Bruno #endif
1444bd84f700SSean Bruno 
14454c7070dbSScott Long static int
14464c7070dbSScott Long iflib_fast_intr(void *arg)
14474c7070dbSScott Long {
14484c7070dbSScott Long 	iflib_filter_info_t info = arg;
14494c7070dbSScott Long 	struct grouptask *gtask = info->ifi_task;
1450ca62461bSStephen Hurd 	int result;
1451ca62461bSStephen Hurd 
145295246abbSSean Bruno 	if (!iflib_started)
1453ca62461bSStephen Hurd 		return (FILTER_STRAY);
145495246abbSSean Bruno 
145595246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1456ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1457ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1458ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1459ca62461bSStephen Hurd 			return (result);
1460ca62461bSStephen Hurd 	}
146195246abbSSean Bruno 
146295246abbSSean Bruno 	GROUPTASK_ENQUEUE(gtask);
146395246abbSSean Bruno 	return (FILTER_HANDLED);
146495246abbSSean Bruno }
146595246abbSSean Bruno 
146695246abbSSean Bruno static int
146795246abbSSean Bruno iflib_fast_intr_rxtx(void *arg)
146895246abbSSean Bruno {
146995246abbSSean Bruno 	iflib_filter_info_t info = arg;
147095246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
147195dcf343SMarius Strobl 	if_ctx_t ctx;
147295246abbSSean Bruno 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
147395dcf343SMarius Strobl 	iflib_txq_t txq;
147495dcf343SMarius Strobl 	void *sc;
1475ca62461bSStephen Hurd 	int i, cidx, result;
147695dcf343SMarius Strobl 	qidx_t txqid;
147795246abbSSean Bruno 
147895246abbSSean Bruno 	if (!iflib_started)
1479ca62461bSStephen Hurd 		return (FILTER_STRAY);
148095246abbSSean Bruno 
148195246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1482ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1483ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1484ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1485ca62461bSStephen Hurd 			return (result);
1486ca62461bSStephen Hurd 	}
148795246abbSSean Bruno 
148895dcf343SMarius Strobl 	ctx = rxq->ifr_ctx;
148995dcf343SMarius Strobl 	sc = ctx->ifc_softc;
14901ae4848cSMatt Macy 	MPASS(rxq->ifr_ntxqirq);
149195246abbSSean Bruno 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
149295dcf343SMarius Strobl 		txqid = rxq->ifr_txqid[i];
149395dcf343SMarius Strobl 		txq = &ctx->ifc_txqs[txqid];
149495dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
14958a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
149695dcf343SMarius Strobl 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
149795246abbSSean Bruno 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
149895246abbSSean Bruno 			continue;
149995246abbSSean Bruno 		}
150095dcf343SMarius Strobl 		GROUPTASK_ENQUEUE(&txq->ift_task);
150195246abbSSean Bruno 	}
150295246abbSSean Bruno 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
150395246abbSSean Bruno 		cidx = rxq->ifr_cq_cidx;
150495246abbSSean Bruno 	else
150595246abbSSean Bruno 		cidx = rxq->ifr_fl[0].ifl_cidx;
150695246abbSSean Bruno 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
150795246abbSSean Bruno 		GROUPTASK_ENQUEUE(gtask);
150864e6fc13SStephen Hurd 	else {
150995246abbSSean Bruno 		IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
151064e6fc13SStephen Hurd 		DBG_COUNTER_INC(rx_intr_enables);
151164e6fc13SStephen Hurd 	}
151295246abbSSean Bruno 	return (FILTER_HANDLED);
151395246abbSSean Bruno }
151495246abbSSean Bruno 
151595246abbSSean Bruno 
151695246abbSSean Bruno static int
151795246abbSSean Bruno iflib_fast_intr_ctx(void *arg)
151895246abbSSean Bruno {
151995246abbSSean Bruno 	iflib_filter_info_t info = arg;
152095246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
1521ca62461bSStephen Hurd 	int result;
15224c7070dbSScott Long 
1523bd84f700SSean Bruno 	if (!iflib_started)
1524ca62461bSStephen Hurd 		return (FILTER_STRAY);
15251248952aSSean Bruno 
15264c7070dbSScott Long 	DBG_COUNTER_INC(fast_intrs);
1527ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1528ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1529ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1530ca62461bSStephen Hurd 			return (result);
1531ca62461bSStephen Hurd 	}
15324c7070dbSScott Long 
15334c7070dbSScott Long 	GROUPTASK_ENQUEUE(gtask);
15344c7070dbSScott Long 	return (FILTER_HANDLED);
15354c7070dbSScott Long }
15364c7070dbSScott Long 
15374c7070dbSScott Long static int
15384c7070dbSScott Long _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
15394c7070dbSScott Long 		 driver_filter_t filter, driver_intr_t handler, void *arg,
15403e0e6330SStephen Hurd 		 const char *name)
15414c7070dbSScott Long {
15422b2fc973SSean Bruno 	int rc, flags;
15434c7070dbSScott Long 	struct resource *res;
15442b2fc973SSean Bruno 	void *tag = NULL;
15454c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15464c7070dbSScott Long 
15472b2fc973SSean Bruno 	flags = RF_ACTIVE;
15482b2fc973SSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
15492b2fc973SSean Bruno 		flags |= RF_SHAREABLE;
15504c7070dbSScott Long 	MPASS(rid < 512);
15514c7070dbSScott Long 	irq->ii_rid = rid;
15522b2fc973SSean Bruno 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
15534c7070dbSScott Long 	if (res == NULL) {
15544c7070dbSScott Long 		device_printf(dev,
15554c7070dbSScott Long 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
15564c7070dbSScott Long 		return (ENOMEM);
15574c7070dbSScott Long 	}
15584c7070dbSScott Long 	irq->ii_res = res;
15594c7070dbSScott Long 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
15604c7070dbSScott Long 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
15614c7070dbSScott Long 						filter, handler, arg, &tag);
15624c7070dbSScott Long 	if (rc != 0) {
15634c7070dbSScott Long 		device_printf(dev,
15644c7070dbSScott Long 		    "failed to setup interrupt for rid %d, name %s: %d\n",
15654c7070dbSScott Long 					  rid, name ? name : "unknown", rc);
15664c7070dbSScott Long 		return (rc);
15674c7070dbSScott Long 	} else if (name)
1568f454e7ebSJohn Baldwin 		bus_describe_intr(dev, res, tag, "%s", name);
15694c7070dbSScott Long 
15704c7070dbSScott Long 	irq->ii_tag = tag;
15714c7070dbSScott Long 	return (0);
15724c7070dbSScott Long }
15734c7070dbSScott Long 
15744c7070dbSScott Long 
15754c7070dbSScott Long /*********************************************************************
15764c7070dbSScott Long  *
1577bfce461eSMarius Strobl  *  Allocate DMA resources for TX buffers as well as memory for the TX
1578bfce461eSMarius Strobl  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1579bfce461eSMarius Strobl  *  iflib_sw_tx_desc_array structure, storing all the information that
1580bfce461eSMarius Strobl  *  is needed to transmit a packet on the wire.  This is called only
1581bfce461eSMarius Strobl  *  once at attach, setup is done every reset.
15824c7070dbSScott Long  *
15834c7070dbSScott Long  **********************************************************************/
15844c7070dbSScott Long static int
15854c7070dbSScott Long iflib_txsd_alloc(iflib_txq_t txq)
15864c7070dbSScott Long {
15874c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
15884c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
15894c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
15904c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15917f87c040SMarius Strobl 	bus_size_t tsomaxsize;
15924c7070dbSScott Long 	int err, nsegments, ntsosegments;
15938a04b53dSKonstantin Belousov 	bool tso;
15944c7070dbSScott Long 
15954c7070dbSScott Long 	nsegments = scctx->isc_tx_nsegments;
15964c7070dbSScott Long 	ntsosegments = scctx->isc_tx_tso_segments_max;
15977f87c040SMarius Strobl 	tsomaxsize = scctx->isc_tx_tso_size_max;
15987f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
15997f87c040SMarius Strobl 		tsomaxsize += sizeof(struct ether_vlan_header);
160023ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[0] > 0);
160123ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
16024c7070dbSScott Long 	MPASS(nsegments > 0);
16037f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
16044c7070dbSScott Long 		MPASS(ntsosegments > 0);
16057f87c040SMarius Strobl 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
16067f87c040SMarius Strobl 	}
16077f87c040SMarius Strobl 
16084c7070dbSScott Long 	/*
1609bfce461eSMarius Strobl 	 * Set up DMA tags for TX buffers.
16104c7070dbSScott Long 	 */
16114c7070dbSScott Long 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
16124c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16134c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16144c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16154c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16164c7070dbSScott Long 			       sctx->isc_tx_maxsize,		/* maxsize */
16174c7070dbSScott Long 			       nsegments,	/* nsegments */
16184c7070dbSScott Long 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
16194c7070dbSScott Long 			       0,			/* flags */
16204c7070dbSScott Long 			       NULL,			/* lockfunc */
16214c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1622bfce461eSMarius Strobl 			       &txq->ift_buf_tag))) {
16234c7070dbSScott Long 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
16249d0a88deSDimitry Andric 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
16259d0a88deSDimitry Andric 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
16264c7070dbSScott Long 		goto fail;
16274c7070dbSScott Long 	}
16288a04b53dSKonstantin Belousov 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
16298a04b53dSKonstantin Belousov 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
16304c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16314c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16324c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16334c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16347f87c040SMarius Strobl 			       tsomaxsize,		/* maxsize */
16354c7070dbSScott Long 			       ntsosegments,	/* nsegments */
16367f87c040SMarius Strobl 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
16374c7070dbSScott Long 			       0,			/* flags */
16384c7070dbSScott Long 			       NULL,			/* lockfunc */
16394c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1640bfce461eSMarius Strobl 			       &txq->ift_tso_buf_tag))) {
1641bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1642bfce461eSMarius Strobl 		    err);
16434c7070dbSScott Long 		goto fail;
16444c7070dbSScott Long 	}
1645bfce461eSMarius Strobl 
1646bfce461eSMarius Strobl 	/* Allocate memory for the TX mbuf map. */
16474c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_m =
1648ac2fffa4SPedro F. Giffuni 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1649ac2fffa4SPedro F. Giffuni 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1650bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
16514c7070dbSScott Long 		err = ENOMEM;
16524c7070dbSScott Long 		goto fail;
16534c7070dbSScott Long 	}
16544c7070dbSScott Long 
1655bfce461eSMarius Strobl 	/*
1656bfce461eSMarius Strobl 	 * Create the DMA maps for TX buffers.
1657bfce461eSMarius Strobl 	 */
16588a04b53dSKonstantin Belousov 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
16598a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16608a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1661bfce461eSMarius Strobl 		device_printf(dev,
1662bfce461eSMarius Strobl 		    "Unable to allocate TX buffer DMA map memory\n");
16634c7070dbSScott Long 		err = ENOMEM;
16644c7070dbSScott Long 		goto fail;
16654c7070dbSScott Long 	}
16668a04b53dSKonstantin Belousov 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
16678a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16688a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1669bfce461eSMarius Strobl 		device_printf(dev,
1670bfce461eSMarius Strobl 		    "Unable to allocate TSO TX buffer map memory\n");
16718a04b53dSKonstantin Belousov 		err = ENOMEM;
16728a04b53dSKonstantin Belousov 		goto fail;
16738a04b53dSKonstantin Belousov 	}
167423ac9029SStephen Hurd 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1675bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
16768a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_map[i]);
16774c7070dbSScott Long 		if (err != 0) {
16784c7070dbSScott Long 			device_printf(dev, "Unable to create TX DMA map\n");
16794c7070dbSScott Long 			goto fail;
16804c7070dbSScott Long 		}
16818a04b53dSKonstantin Belousov 		if (!tso)
16828a04b53dSKonstantin Belousov 			continue;
1683bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
16848a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_tso_map[i]);
16858a04b53dSKonstantin Belousov 		if (err != 0) {
16868a04b53dSKonstantin Belousov 			device_printf(dev, "Unable to create TSO TX DMA map\n");
16878a04b53dSKonstantin Belousov 			goto fail;
16888a04b53dSKonstantin Belousov 		}
16894c7070dbSScott Long 	}
16904c7070dbSScott Long 	return (0);
16914c7070dbSScott Long fail:
16924c7070dbSScott Long 	/* We free all, it handles case where we are in the middle */
16934c7070dbSScott Long 	iflib_tx_structures_free(ctx);
16944c7070dbSScott Long 	return (err);
16954c7070dbSScott Long }
16964c7070dbSScott Long 
16974c7070dbSScott Long static void
16984c7070dbSScott Long iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
16994c7070dbSScott Long {
17004c7070dbSScott Long 	bus_dmamap_t map;
17014c7070dbSScott Long 
17024c7070dbSScott Long 	map = NULL;
17034c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL)
17044c7070dbSScott Long 		map = txq->ift_sds.ifsd_map[i];
17054c7070dbSScott Long 	if (map != NULL) {
1706bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1707bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, map);
1708bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_buf_tag, map);
17094c7070dbSScott Long 		txq->ift_sds.ifsd_map[i] = NULL;
17104c7070dbSScott Long 	}
17118a04b53dSKonstantin Belousov 
17128a04b53dSKonstantin Belousov 	map = NULL;
17138a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL)
17148a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[i];
17158a04b53dSKonstantin Belousov 	if (map != NULL) {
1716bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
17178a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTWRITE);
1718bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1719bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
17208a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map[i] = NULL;
17218a04b53dSKonstantin Belousov 	}
17224c7070dbSScott Long }
17234c7070dbSScott Long 
17244c7070dbSScott Long static void
17254c7070dbSScott Long iflib_txq_destroy(iflib_txq_t txq)
17264c7070dbSScott Long {
17274c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
17284c7070dbSScott Long 
172923ac9029SStephen Hurd 	for (int i = 0; i < txq->ift_size; i++)
17304c7070dbSScott Long 		iflib_txsd_destroy(ctx, txq, i);
17314c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
17324c7070dbSScott Long 		free(txq->ift_sds.ifsd_map, M_IFLIB);
17334c7070dbSScott Long 		txq->ift_sds.ifsd_map = NULL;
17344c7070dbSScott Long 	}
17358a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
17368a04b53dSKonstantin Belousov 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
17378a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map = NULL;
17388a04b53dSKonstantin Belousov 	}
17394c7070dbSScott Long 	if (txq->ift_sds.ifsd_m != NULL) {
17404c7070dbSScott Long 		free(txq->ift_sds.ifsd_m, M_IFLIB);
17414c7070dbSScott Long 		txq->ift_sds.ifsd_m = NULL;
17424c7070dbSScott Long 	}
1743bfce461eSMarius Strobl 	if (txq->ift_buf_tag != NULL) {
1744bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_buf_tag);
1745bfce461eSMarius Strobl 		txq->ift_buf_tag = NULL;
17464c7070dbSScott Long 	}
1747bfce461eSMarius Strobl 	if (txq->ift_tso_buf_tag != NULL) {
1748bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1749bfce461eSMarius Strobl 		txq->ift_tso_buf_tag = NULL;
17504c7070dbSScott Long 	}
17514c7070dbSScott Long }
17524c7070dbSScott Long 
17534c7070dbSScott Long static void
17544c7070dbSScott Long iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
17554c7070dbSScott Long {
17564c7070dbSScott Long 	struct mbuf **mp;
17574c7070dbSScott Long 
17584c7070dbSScott Long 	mp = &txq->ift_sds.ifsd_m[i];
17594c7070dbSScott Long 	if (*mp == NULL)
17604c7070dbSScott Long 		return;
17614c7070dbSScott Long 
17624c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
1763bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag,
17648a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1765bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
17668a04b53dSKonstantin Belousov 	}
17678a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1768bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag,
17698a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1770bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
17718a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i]);
17724c7070dbSScott Long 	}
177323ac9029SStephen Hurd 	m_free(*mp);
17744c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
17754c7070dbSScott Long 	*mp = NULL;
17764c7070dbSScott Long }
17774c7070dbSScott Long 
17784c7070dbSScott Long static int
17794c7070dbSScott Long iflib_txq_setup(iflib_txq_t txq)
17804c7070dbSScott Long {
17814c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
178223ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
17834d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
17844c7070dbSScott Long 	iflib_dma_info_t di;
17854c7070dbSScott Long 	int i;
17864c7070dbSScott Long 
17874c7070dbSScott Long 	/* Set number of descriptors available */
17884c7070dbSScott Long 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
178995246abbSSean Bruno 	/* XXX make configurable */
179095246abbSSean Bruno 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
17914c7070dbSScott Long 
17924c7070dbSScott Long 	/* Reset indices */
179395246abbSSean Bruno 	txq->ift_cidx_processed = 0;
179495246abbSSean Bruno 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
179523ac9029SStephen Hurd 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
17964c7070dbSScott Long 
17974d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
17984c7070dbSScott Long 		bzero((void *)di->idi_vaddr, di->idi_size);
17994c7070dbSScott Long 
18004c7070dbSScott Long 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
18014d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
18024c7070dbSScott Long 		bus_dmamap_sync(di->idi_tag, di->idi_map,
18034c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
18044c7070dbSScott Long 	return (0);
18054c7070dbSScott Long }
18064c7070dbSScott Long 
18074c7070dbSScott Long /*********************************************************************
18084c7070dbSScott Long  *
1809bfce461eSMarius Strobl  *  Allocate DMA resources for RX buffers as well as memory for the RX
1810bfce461eSMarius Strobl  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1811bfce461eSMarius Strobl  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1812bfce461eSMarius Strobl  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1813bfce461eSMarius Strobl  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1814bfce461eSMarius Strobl  *  packet, the maximum number of entries we'll need is equal to the
1815bfce461eSMarius Strobl  *  number of hardware receive descriptors that we've allocated.
18164c7070dbSScott Long  *
18174c7070dbSScott Long  **********************************************************************/
18184c7070dbSScott Long static int
18194c7070dbSScott Long iflib_rxsd_alloc(iflib_rxq_t rxq)
18204c7070dbSScott Long {
18214c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
18224c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
182323ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
18244c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
18254c7070dbSScott Long 	iflib_fl_t fl;
18264c7070dbSScott Long 	int			err;
18274c7070dbSScott Long 
182823ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[0] > 0);
182923ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
18304c7070dbSScott Long 
18314c7070dbSScott Long 	fl = rxq->ifr_fl;
18324c7070dbSScott Long 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
183323ac9029SStephen Hurd 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1834bfce461eSMarius Strobl 		/* Set up DMA tag for RX buffers. */
18354c7070dbSScott Long 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
18364c7070dbSScott Long 					 1, 0,			/* alignment, bounds */
18374c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* lowaddr */
18384c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* highaddr */
18394c7070dbSScott Long 					 NULL, NULL,		/* filter, filterarg */
18404c7070dbSScott Long 					 sctx->isc_rx_maxsize,	/* maxsize */
18414c7070dbSScott Long 					 sctx->isc_rx_nsegments,	/* nsegments */
18424c7070dbSScott Long 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
18434c7070dbSScott Long 					 0,			/* flags */
18444c7070dbSScott Long 					 NULL,			/* lockfunc */
18454c7070dbSScott Long 					 NULL,			/* lockarg */
1846bfce461eSMarius Strobl 					 &fl->ifl_buf_tag);
18474c7070dbSScott Long 		if (err) {
1848bfce461eSMarius Strobl 			device_printf(dev,
1849bfce461eSMarius Strobl 			    "Unable to allocate RX DMA tag: %d\n", err);
18504c7070dbSScott Long 			goto fail;
18514c7070dbSScott Long 		}
1852bfce461eSMarius Strobl 
1853bfce461eSMarius Strobl 		/* Allocate memory for the RX mbuf map. */
1854e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_m =
1855ac2fffa4SPedro F. Giffuni 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1856ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1857bfce461eSMarius Strobl 			device_printf(dev,
1858bfce461eSMarius Strobl 			    "Unable to allocate RX mbuf map memory\n");
1859e035717eSSean Bruno 			err = ENOMEM;
1860e035717eSSean Bruno 			goto fail;
1861e035717eSSean Bruno 		}
1862bfce461eSMarius Strobl 
1863bfce461eSMarius Strobl 		/* Allocate memory for the direct RX cluster pointer map. */
1864e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_cl =
1865ac2fffa4SPedro F. Giffuni 		      (caddr_t *) malloc(sizeof(caddr_t) *
1866ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1867bfce461eSMarius Strobl 			device_printf(dev,
1868bfce461eSMarius Strobl 			    "Unable to allocate RX cluster map memory\n");
1869e035717eSSean Bruno 			err = ENOMEM;
1870e035717eSSean Bruno 			goto fail;
1871e035717eSSean Bruno 		}
18724c7070dbSScott Long 
1873bfce461eSMarius Strobl 		/* Allocate memory for the RX cluster bus address map. */
1874fbec776dSAndrew Gallatin 		if (!(fl->ifl_sds.ifsd_ba =
1875fbec776dSAndrew Gallatin 		      (bus_addr_t *) malloc(sizeof(bus_addr_t) *
1876fbec776dSAndrew Gallatin 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1877bfce461eSMarius Strobl 			device_printf(dev,
1878bfce461eSMarius Strobl 			    "Unable to allocate RX bus address map memory\n");
1879fbec776dSAndrew Gallatin 			err = ENOMEM;
1880fbec776dSAndrew Gallatin 			goto fail;
1881fbec776dSAndrew Gallatin 		}
1882e035717eSSean Bruno 
1883bfce461eSMarius Strobl 		/*
1884bfce461eSMarius Strobl 		 * Create the DMA maps for RX buffers.
1885bfce461eSMarius Strobl 		 */
1886e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_map =
1887ac2fffa4SPedro F. Giffuni 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1888bfce461eSMarius Strobl 			device_printf(dev,
1889bfce461eSMarius Strobl 			    "Unable to allocate RX buffer DMA map memory\n");
1890e035717eSSean Bruno 			err = ENOMEM;
1891e035717eSSean Bruno 			goto fail;
1892e035717eSSean Bruno 		}
1893e035717eSSean Bruno 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1894bfce461eSMarius Strobl 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
1895bfce461eSMarius Strobl 			    &fl->ifl_sds.ifsd_map[i]);
1896e035717eSSean Bruno 			if (err != 0) {
189795246abbSSean Bruno 				device_printf(dev, "Unable to create RX buffer DMA map\n");
18984c7070dbSScott Long 				goto fail;
18994c7070dbSScott Long 			}
19004c7070dbSScott Long 		}
1901835809f9SSean Bruno 	}
19024c7070dbSScott Long 	return (0);
19034c7070dbSScott Long 
19044c7070dbSScott Long fail:
19054c7070dbSScott Long 	iflib_rx_structures_free(ctx);
19064c7070dbSScott Long 	return (err);
19074c7070dbSScott Long }
19084c7070dbSScott Long 
19094c7070dbSScott Long 
19104c7070dbSScott Long /*
19114c7070dbSScott Long  * Internal service routines
19124c7070dbSScott Long  */
19134c7070dbSScott Long 
19144c7070dbSScott Long struct rxq_refill_cb_arg {
19154c7070dbSScott Long 	int               error;
19164c7070dbSScott Long 	bus_dma_segment_t seg;
19174c7070dbSScott Long 	int               nseg;
19184c7070dbSScott Long };
19194c7070dbSScott Long 
19204c7070dbSScott Long static void
19214c7070dbSScott Long _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
19224c7070dbSScott Long {
19234c7070dbSScott Long 	struct rxq_refill_cb_arg *cb_arg = arg;
19244c7070dbSScott Long 
19254c7070dbSScott Long 	cb_arg->error = error;
19264c7070dbSScott Long 	cb_arg->seg = segs[0];
19274c7070dbSScott Long 	cb_arg->nseg = nseg;
19284c7070dbSScott Long }
19294c7070dbSScott Long 
19304c7070dbSScott Long /**
19314c7070dbSScott Long  *	rxq_refill - refill an rxq  free-buffer list
19324c7070dbSScott Long  *	@ctx: the iflib context
19334c7070dbSScott Long  *	@rxq: the free-list to refill
19344c7070dbSScott Long  *	@n: the number of new buffers to allocate
19354c7070dbSScott Long  *
19364c7070dbSScott Long  *	(Re)populate an rxq free-buffer list with up to @n new packet buffers.
19374c7070dbSScott Long  *	The caller must assure that @n does not exceed the queue's capacity.
19384c7070dbSScott Long  */
19394c7070dbSScott Long static void
19404c7070dbSScott Long _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
19414c7070dbSScott Long {
194295246abbSSean Bruno 	struct if_rxd_update iru;
1943fbec776dSAndrew Gallatin 	struct rxq_refill_cb_arg cb_arg;
19443db348b5SMarius Strobl 	struct mbuf *m;
19453db348b5SMarius Strobl 	caddr_t cl, *sd_cl;
19463db348b5SMarius Strobl 	struct mbuf **sd_m;
1947e035717eSSean Bruno 	bus_dmamap_t *sd_map;
1948fbec776dSAndrew Gallatin 	bus_addr_t bus_addr, *sd_ba;
19493db348b5SMarius Strobl 	int err, frag_idx, i, idx, n, pidx;
1950a1b799caSStephen Hurd 	qidx_t credits;
19514c7070dbSScott Long 
1952e035717eSSean Bruno 	sd_m = fl->ifl_sds.ifsd_m;
1953e035717eSSean Bruno 	sd_map = fl->ifl_sds.ifsd_map;
1954e035717eSSean Bruno 	sd_cl = fl->ifl_sds.ifsd_cl;
1955fbec776dSAndrew Gallatin 	sd_ba = fl->ifl_sds.ifsd_ba;
19563db348b5SMarius Strobl 	pidx = fl->ifl_pidx;
1957e035717eSSean Bruno 	idx = pidx;
19583db348b5SMarius Strobl 	frag_idx = fl->ifl_fragidx;
1959a1b799caSStephen Hurd 	credits = fl->ifl_credits;
1960e035717eSSean Bruno 
19613db348b5SMarius Strobl 	i = 0;
19624c7070dbSScott Long 	n = count;
19634c7070dbSScott Long 	MPASS(n > 0);
1964a1b799caSStephen Hurd 	MPASS(credits + n <= fl->ifl_size);
19654c7070dbSScott Long 
19664c7070dbSScott Long 	if (pidx < fl->ifl_cidx)
19674c7070dbSScott Long 		MPASS(pidx + n <= fl->ifl_cidx);
1968a1b799caSStephen Hurd 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
19694c7070dbSScott Long 		MPASS(fl->ifl_gen == 0);
19704c7070dbSScott Long 	if (pidx > fl->ifl_cidx)
19714c7070dbSScott Long 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
19724c7070dbSScott Long 
19734c7070dbSScott Long 	DBG_COUNTER_INC(fl_refills);
19744c7070dbSScott Long 	if (n > 8)
19754c7070dbSScott Long 		DBG_COUNTER_INC(fl_refills_large);
19762d873474SStephen Hurd 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
19774c7070dbSScott Long 	while (n--) {
19784c7070dbSScott Long 		/*
19794c7070dbSScott Long 		 * We allocate an uninitialized mbuf + cluster, mbuf is
19804c7070dbSScott Long 		 * initialized after rx.
19814c7070dbSScott Long 		 *
19824c7070dbSScott Long 		 * If the cluster is still set then we know a minimum sized packet was received
19834c7070dbSScott Long 		 */
19843db348b5SMarius Strobl 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
19853db348b5SMarius Strobl 		    &frag_idx);
19863db348b5SMarius Strobl 		if (frag_idx < 0)
198787890dbaSSean Bruno 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
19883db348b5SMarius Strobl 		MPASS(frag_idx >= 0);
198987890dbaSSean Bruno 		if ((cl = sd_cl[frag_idx]) == NULL) {
1990fbec776dSAndrew Gallatin 			if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
19914c7070dbSScott Long 				break;
19924c7070dbSScott Long 
19934c7070dbSScott Long 			cb_arg.error = 0;
199495246abbSSean Bruno 			MPASS(sd_map != NULL);
1995bfce461eSMarius Strobl 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
19968a04b53dSKonstantin Belousov 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
19978a04b53dSKonstantin Belousov 			    BUS_DMA_NOWAIT);
19984c7070dbSScott Long 			if (err != 0 || cb_arg.error) {
19994c7070dbSScott Long 				/*
20004c7070dbSScott Long 				 * !zone_pack ?
20014c7070dbSScott Long 				 */
20024c7070dbSScott Long 				if (fl->ifl_zone == zone_pack)
20034c7070dbSScott Long 					uma_zfree(fl->ifl_zone, cl);
2004fbec776dSAndrew Gallatin 				break;
20054c7070dbSScott Long 			}
20064c7070dbSScott Long 
2007fbec776dSAndrew Gallatin 			sd_ba[frag_idx] =  bus_addr = cb_arg.seg.ds_addr;
200887890dbaSSean Bruno 			sd_cl[frag_idx] = cl;
2009fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2010fbec776dSAndrew Gallatin 			fl->ifl_cl_enqueued++;
2011fbec776dSAndrew Gallatin #endif
2012fbec776dSAndrew Gallatin 		} else {
2013fbec776dSAndrew Gallatin 			bus_addr = sd_ba[frag_idx];
2014fbec776dSAndrew Gallatin 		}
201595dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
201695dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD);
2017fbec776dSAndrew Gallatin 
20186d49b41eSAndrew Gallatin 		if (sd_m[frag_idx] == NULL) {
2019fbec776dSAndrew Gallatin 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
2020fbec776dSAndrew Gallatin 				break;
2021fbec776dSAndrew Gallatin 			}
202287890dbaSSean Bruno 			sd_m[frag_idx] = m;
20236d49b41eSAndrew Gallatin 		}
20243db348b5SMarius Strobl 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2025fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2026fbec776dSAndrew Gallatin 		fl->ifl_m_enqueued++;
2027fbec776dSAndrew Gallatin #endif
2028fbec776dSAndrew Gallatin 
2029fbec776dSAndrew Gallatin 		DBG_COUNTER_INC(rx_allocs);
203087890dbaSSean Bruno 		fl->ifl_rxd_idxs[i] = frag_idx;
20314c7070dbSScott Long 		fl->ifl_bus_addrs[i] = bus_addr;
20324c7070dbSScott Long 		fl->ifl_vm_addrs[i] = cl;
2033a1b799caSStephen Hurd 		credits++;
20344c7070dbSScott Long 		i++;
2035a1b799caSStephen Hurd 		MPASS(credits <= fl->ifl_size);
2036e035717eSSean Bruno 		if (++idx == fl->ifl_size) {
20374c7070dbSScott Long 			fl->ifl_gen = 1;
2038e035717eSSean Bruno 			idx = 0;
20394c7070dbSScott Long 		}
20404c7070dbSScott Long 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
204195246abbSSean Bruno 			iru.iru_pidx = pidx;
204295246abbSSean Bruno 			iru.iru_count = i;
204395246abbSSean Bruno 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
20444c7070dbSScott Long 			i = 0;
2045e035717eSSean Bruno 			pidx = idx;
2046fa5416a8SSean Bruno 			fl->ifl_pidx = idx;
2047a1b799caSStephen Hurd 			fl->ifl_credits = credits;
204887890dbaSSean Bruno 		}
20494c7070dbSScott Long 	}
2050fbec776dSAndrew Gallatin 
2051a1b799caSStephen Hurd 	if (i) {
2052a1b799caSStephen Hurd 		iru.iru_pidx = pidx;
2053a1b799caSStephen Hurd 		iru.iru_count = i;
2054a1b799caSStephen Hurd 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2055a1b799caSStephen Hurd 		fl->ifl_pidx = idx;
2056a1b799caSStephen Hurd 		fl->ifl_credits = credits;
2057a1b799caSStephen Hurd 	}
20584c7070dbSScott Long 	DBG_COUNTER_INC(rxd_flush);
20594c7070dbSScott Long 	if (fl->ifl_pidx == 0)
20604c7070dbSScott Long 		pidx = fl->ifl_size - 1;
20614c7070dbSScott Long 	else
20624c7070dbSScott Long 		pidx = fl->ifl_pidx - 1;
206395246abbSSean Bruno 
206495246abbSSean Bruno 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
206595246abbSSean Bruno 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
20664c7070dbSScott Long 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
206787890dbaSSean Bruno 	fl->ifl_fragidx = frag_idx;
20684c7070dbSScott Long }
20694c7070dbSScott Long 
20704c7070dbSScott Long static __inline void
20714c7070dbSScott Long __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
20724c7070dbSScott Long {
20734c7070dbSScott Long 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
20744c7070dbSScott Long 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
20754c7070dbSScott Long #ifdef INVARIANTS
20764c7070dbSScott Long 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
20774c7070dbSScott Long #endif
20784c7070dbSScott Long 
20794c7070dbSScott Long 	MPASS(fl->ifl_credits <= fl->ifl_size);
20804c7070dbSScott Long 	MPASS(reclaimable == delta);
20814c7070dbSScott Long 
20824c7070dbSScott Long 	if (reclaimable > 0)
20834c7070dbSScott Long 		_iflib_fl_refill(ctx, fl, min(max, reclaimable));
20844c7070dbSScott Long }
20854c7070dbSScott Long 
208677c1fcecSEric Joyner uint8_t
208777c1fcecSEric Joyner iflib_in_detach(if_ctx_t ctx)
208877c1fcecSEric Joyner {
208977c1fcecSEric Joyner 	bool in_detach;
209077c1fcecSEric Joyner 	STATE_LOCK(ctx);
209177c1fcecSEric Joyner 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
209277c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
209377c1fcecSEric Joyner 	return (in_detach);
209477c1fcecSEric Joyner }
209577c1fcecSEric Joyner 
20964c7070dbSScott Long static void
20974c7070dbSScott Long iflib_fl_bufs_free(iflib_fl_t fl)
20984c7070dbSScott Long {
20994c7070dbSScott Long 	iflib_dma_info_t idi = fl->ifl_ifdi;
21008a04b53dSKonstantin Belousov 	bus_dmamap_t sd_map;
21014c7070dbSScott Long 	uint32_t i;
21024c7070dbSScott Long 
21034c7070dbSScott Long 	for (i = 0; i < fl->ifl_size; i++) {
2104e035717eSSean Bruno 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2105e035717eSSean Bruno 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
21064c7070dbSScott Long 
2107fbec776dSAndrew Gallatin 		if (*sd_cl != NULL) {
21088a04b53dSKonstantin Belousov 			sd_map = fl->ifl_sds.ifsd_map[i];
2109bfce461eSMarius Strobl 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
21108a04b53dSKonstantin Belousov 			    BUS_DMASYNC_POSTREAD);
2111bfce461eSMarius Strobl 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2112fbec776dSAndrew Gallatin 			if (*sd_cl != NULL)
2113fbec776dSAndrew Gallatin 				uma_zfree(fl->ifl_zone, *sd_cl);
211477c1fcecSEric Joyner 			// XXX: Should this get moved out?
211577c1fcecSEric Joyner 			if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
2116bfce461eSMarius Strobl 				bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
2117e035717eSSean Bruno 			if (*sd_m != NULL) {
2118e035717eSSean Bruno 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2119e035717eSSean Bruno 				uma_zfree(zone_mbuf, *sd_m);
2120e035717eSSean Bruno 			}
21214c7070dbSScott Long 		} else {
2122e035717eSSean Bruno 			MPASS(*sd_cl == NULL);
2123e035717eSSean Bruno 			MPASS(*sd_m == NULL);
21244c7070dbSScott Long 		}
21254c7070dbSScott Long #if MEMORY_LOGGING
21264c7070dbSScott Long 		fl->ifl_m_dequeued++;
21274c7070dbSScott Long 		fl->ifl_cl_dequeued++;
21284c7070dbSScott Long #endif
2129e035717eSSean Bruno 		*sd_cl = NULL;
2130e035717eSSean Bruno 		*sd_m = NULL;
21314c7070dbSScott Long 	}
213295246abbSSean Bruno #ifdef INVARIANTS
213395246abbSSean Bruno 	for (i = 0; i < fl->ifl_size; i++) {
213495246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
213595246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
213695246abbSSean Bruno 	}
213795246abbSSean Bruno #endif
21384c7070dbSScott Long 	/*
21394c7070dbSScott Long 	 * Reset free list values
21404c7070dbSScott Long 	 */
214187890dbaSSean Bruno 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
21424c7070dbSScott Long 	bzero(idi->idi_vaddr, idi->idi_size);
21434c7070dbSScott Long }
21444c7070dbSScott Long 
21454c7070dbSScott Long /*********************************************************************
21464c7070dbSScott Long  *
21474c7070dbSScott Long  *  Initialize a receive ring and its buffers.
21484c7070dbSScott Long  *
21494c7070dbSScott Long  **********************************************************************/
21504c7070dbSScott Long static int
21514c7070dbSScott Long iflib_fl_setup(iflib_fl_t fl)
21524c7070dbSScott Long {
21534c7070dbSScott Long 	iflib_rxq_t rxq = fl->ifl_rxq;
21544c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
21554c7070dbSScott Long 
21567274b2f6SStephen Hurd 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
21574c7070dbSScott Long 	/*
21584c7070dbSScott Long 	** Free current RX buffer structs and their mbufs
21594c7070dbSScott Long 	*/
21604c7070dbSScott Long 	iflib_fl_bufs_free(fl);
21614c7070dbSScott Long 	/* Now replenish the mbufs */
21624c7070dbSScott Long 	MPASS(fl->ifl_credits == 0);
21631b9d9394SEric Joyner 	fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
21644c7070dbSScott Long 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
21654c7070dbSScott Long 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
21664c7070dbSScott Long 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
21674c7070dbSScott Long 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
21684c7070dbSScott Long 
21694c7070dbSScott Long 
21704c7070dbSScott Long 	/* avoid pre-allocating zillions of clusters to an idle card
21714c7070dbSScott Long 	 * potentially speeding up attach
21724c7070dbSScott Long 	 */
21734c7070dbSScott Long 	_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
21744c7070dbSScott Long 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
21754c7070dbSScott Long 	if (min(128, fl->ifl_size) != fl->ifl_credits)
21764c7070dbSScott Long 		return (ENOBUFS);
21774c7070dbSScott Long 	/*
21784c7070dbSScott Long 	 * handle failure
21794c7070dbSScott Long 	 */
21804c7070dbSScott Long 	MPASS(rxq != NULL);
21814c7070dbSScott Long 	MPASS(fl->ifl_ifdi != NULL);
21824c7070dbSScott Long 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
21834c7070dbSScott Long 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
21844c7070dbSScott Long 	return (0);
21854c7070dbSScott Long }
21864c7070dbSScott Long 
21874c7070dbSScott Long /*********************************************************************
21884c7070dbSScott Long  *
21894c7070dbSScott Long  *  Free receive ring data structures
21904c7070dbSScott Long  *
21914c7070dbSScott Long  **********************************************************************/
21924c7070dbSScott Long static void
21934c7070dbSScott Long iflib_rx_sds_free(iflib_rxq_t rxq)
21944c7070dbSScott Long {
21954c7070dbSScott Long 	iflib_fl_t fl;
21968a04b53dSKonstantin Belousov 	int i, j;
21974c7070dbSScott Long 
21984c7070dbSScott Long 	if (rxq->ifr_fl != NULL) {
21994c7070dbSScott Long 		for (i = 0; i < rxq->ifr_nfl; i++) {
22004c7070dbSScott Long 			fl = &rxq->ifr_fl[i];
2201bfce461eSMarius Strobl 			if (fl->ifl_buf_tag != NULL) {
22028a04b53dSKonstantin Belousov 				if (fl->ifl_sds.ifsd_map != NULL) {
220377102fd6SAndrew Gallatin 					for (j = 0; j < fl->ifl_size; j++) {
220477102fd6SAndrew Gallatin 						if (fl->ifl_sds.ifsd_map[j] ==
22058a04b53dSKonstantin Belousov 						    NULL)
22068a04b53dSKonstantin Belousov 							continue;
22078a04b53dSKonstantin Belousov 						bus_dmamap_sync(
2208bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
220977102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j],
22108a04b53dSKonstantin Belousov 						    BUS_DMASYNC_POSTREAD);
22118a04b53dSKonstantin Belousov 						bus_dmamap_unload(
2212bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
221377102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j]);
22148a04b53dSKonstantin Belousov 					}
22158a04b53dSKonstantin Belousov 				}
2216bfce461eSMarius Strobl 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2217bfce461eSMarius Strobl 				fl->ifl_buf_tag = NULL;
22184c7070dbSScott Long 			}
2219e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2220e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2221fbec776dSAndrew Gallatin 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2222e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2223e035717eSSean Bruno 			fl->ifl_sds.ifsd_m = NULL;
2224e035717eSSean Bruno 			fl->ifl_sds.ifsd_cl = NULL;
2225fbec776dSAndrew Gallatin 			fl->ifl_sds.ifsd_ba = NULL;
2226e035717eSSean Bruno 			fl->ifl_sds.ifsd_map = NULL;
22274c7070dbSScott Long 		}
22284c7070dbSScott Long 		free(rxq->ifr_fl, M_IFLIB);
22294c7070dbSScott Long 		rxq->ifr_fl = NULL;
22304c7070dbSScott Long 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
22314c7070dbSScott Long 	}
22324c7070dbSScott Long }
22334c7070dbSScott Long 
22344c7070dbSScott Long /*
22354c7070dbSScott Long  * MI independent logic
22364c7070dbSScott Long  *
22374c7070dbSScott Long  */
22384c7070dbSScott Long static void
22394c7070dbSScott Long iflib_timer(void *arg)
22404c7070dbSScott Long {
2241ab2e3f79SStephen Hurd 	iflib_txq_t txq = arg;
22424c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
2243ab2e3f79SStephen Hurd 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2244dd7fbcf1SStephen Hurd 	uint64_t this_tick = ticks;
2245dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
22464c7070dbSScott Long 
22474c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
22484c7070dbSScott Long 		return;
22494c7070dbSScott Long 	/*
22504c7070dbSScott Long 	** Check on the state of the TX queue(s), this
22514c7070dbSScott Long 	** can be done without the lock because its RO
22524c7070dbSScott Long 	** and the HUNG state will be static if set.
22534c7070dbSScott Long 	*/
2254dd7fbcf1SStephen Hurd 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2255dd7fbcf1SStephen Hurd 		txq->ift_last_timer_tick = this_tick;
2256ab2e3f79SStephen Hurd 		IFDI_TIMER(ctx, txq->ift_id);
2257ab2e3f79SStephen Hurd 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2258ab2e3f79SStephen Hurd 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2259ab2e3f79SStephen Hurd 		     (sctx->isc_pause_frames == 0)))
2260ab2e3f79SStephen Hurd 			goto hung;
2261a9693502SSean Bruno 
2262ab2e3f79SStephen Hurd 		if (ifmp_ring_is_stalled(txq->ift_br))
2263ab2e3f79SStephen Hurd 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2264ab2e3f79SStephen Hurd 		txq->ift_cleaned_prev = txq->ift_cleaned;
2265dd7fbcf1SStephen Hurd 	}
2266dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
2267dd7fbcf1SStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
226895dcf343SMarius Strobl 		iflib_netmap_timer_adjust(ctx, txq, &reset_on);
2269dd7fbcf1SStephen Hurd #endif
2270ab2e3f79SStephen Hurd 	/* handle any laggards */
2271ab2e3f79SStephen Hurd 	if (txq->ift_db_pending)
2272ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
2273a9693502SSean Bruno 
2274ab2e3f79SStephen Hurd 	sctx->isc_pause_frames = 0;
2275d300df01SStephen Hurd 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2276dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2277ab2e3f79SStephen Hurd 	return;
2278ab2e3f79SStephen Hurd  hung:
2279ab2e3f79SStephen Hurd 	device_printf(ctx->ifc_dev,  "TX(%d) desc avail = %d, pidx = %d\n",
2280ab2e3f79SStephen Hurd 				  txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
22817b610b60SSean Bruno 	STATE_LOCK(ctx);
22827b610b60SSean Bruno 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
22837b610b60SSean Bruno 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2284940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
228546fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
22864c7070dbSScott Long }
22874c7070dbSScott Long 
22884c7070dbSScott Long static void
22891b9d9394SEric Joyner iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
22901b9d9394SEric Joyner {
22911b9d9394SEric Joyner 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
22921b9d9394SEric Joyner 
22931b9d9394SEric Joyner 	/*
22941b9d9394SEric Joyner 	 * XXX don't set the max_frame_size to larger
22951b9d9394SEric Joyner 	 * than the hardware can handle
22961b9d9394SEric Joyner 	 */
22971b9d9394SEric Joyner 	if (sctx->isc_max_frame_size <= MCLBYTES)
22981b9d9394SEric Joyner 		ctx->ifc_rx_mbuf_sz = MCLBYTES;
22991b9d9394SEric Joyner 	else
23001b9d9394SEric Joyner 		ctx->ifc_rx_mbuf_sz = MJUMPAGESIZE;
23011b9d9394SEric Joyner }
23021b9d9394SEric Joyner 
23031b9d9394SEric Joyner uint32_t
23041b9d9394SEric Joyner iflib_get_rx_mbuf_sz(if_ctx_t ctx)
23051b9d9394SEric Joyner {
23061b9d9394SEric Joyner 	return (ctx->ifc_rx_mbuf_sz);
23071b9d9394SEric Joyner }
23081b9d9394SEric Joyner 
23091b9d9394SEric Joyner static void
23104c7070dbSScott Long iflib_init_locked(if_ctx_t ctx)
23114c7070dbSScott Long {
23124c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
23131248952aSSean Bruno 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
23144c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
23154c7070dbSScott Long 	iflib_fl_t fl;
23164c7070dbSScott Long 	iflib_txq_t txq;
23174c7070dbSScott Long 	iflib_rxq_t rxq;
2318ab2e3f79SStephen Hurd 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
23194c7070dbSScott Long 
23204c7070dbSScott Long 
23214c7070dbSScott Long 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23224c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
23234c7070dbSScott Long 
23241248952aSSean Bruno 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
23251248952aSSean Bruno 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
23264c7070dbSScott Long 	/* Set hardware offload abilities */
23274c7070dbSScott Long 	if_clearhwassist(ifp);
23284c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
23291248952aSSean Bruno 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
23304c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
23311248952aSSean Bruno 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
23324c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO4)
23334c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
23344c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO6)
23354c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
23364c7070dbSScott Long 
23374c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
23384c7070dbSScott Long 		CALLOUT_LOCK(txq);
23394c7070dbSScott Long 		callout_stop(&txq->ift_timer);
23404c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
23414c7070dbSScott Long 		iflib_netmap_txq_init(ctx, txq);
23424c7070dbSScott Long 	}
23431b9d9394SEric Joyner 
23441b9d9394SEric Joyner 	/*
23451b9d9394SEric Joyner 	 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
23461b9d9394SEric Joyner 	 * that drivers can use the value when setting up the hardware receive
23471b9d9394SEric Joyner 	 * buffers.
23481b9d9394SEric Joyner 	 */
23491b9d9394SEric Joyner 	iflib_calc_rx_mbuf_sz(ctx);
23501b9d9394SEric Joyner 
235123ac9029SStephen Hurd #ifdef INVARIANTS
235223ac9029SStephen Hurd 	i = if_getdrvflags(ifp);
235323ac9029SStephen Hurd #endif
23544c7070dbSScott Long 	IFDI_INIT(ctx);
235523ac9029SStephen Hurd 	MPASS(if_getdrvflags(ifp) == i);
23564c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
235795246abbSSean Bruno 		/* XXX this should really be done on a per-queue basis */
2358d0d0ad0aSStephen Hurd 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2359d0d0ad0aSStephen Hurd 			MPASS(rxq->ifr_id == i);
2360d0d0ad0aSStephen Hurd 			iflib_netmap_rxq_init(ctx, rxq);
236195246abbSSean Bruno 			continue;
2362d0d0ad0aSStephen Hurd 		}
23634c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
23644c7070dbSScott Long 			if (iflib_fl_setup(fl)) {
23654c7070dbSScott Long 				device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
23664c7070dbSScott Long 				goto done;
23674c7070dbSScott Long 			}
23684c7070dbSScott Long 		}
23694c7070dbSScott Long 	}
23704c7070dbSScott Long done:
23714c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
23724c7070dbSScott Long 	IFDI_INTR_ENABLE(ctx);
23734c7070dbSScott Long 	txq = ctx->ifc_txqs;
23744c7070dbSScott Long 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2375ab2e3f79SStephen Hurd 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2376ab2e3f79SStephen Hurd 			txq->ift_timer.c_cpu);
23774c7070dbSScott Long }
23784c7070dbSScott Long 
23794c7070dbSScott Long static int
23804c7070dbSScott Long iflib_media_change(if_t ifp)
23814c7070dbSScott Long {
23824c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
23834c7070dbSScott Long 	int err;
23844c7070dbSScott Long 
23854c7070dbSScott Long 	CTX_LOCK(ctx);
23864c7070dbSScott Long 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
23874c7070dbSScott Long 		iflib_init_locked(ctx);
23884c7070dbSScott Long 	CTX_UNLOCK(ctx);
23894c7070dbSScott Long 	return (err);
23904c7070dbSScott Long }
23914c7070dbSScott Long 
23924c7070dbSScott Long static void
23934c7070dbSScott Long iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
23944c7070dbSScott Long {
23954c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
23964c7070dbSScott Long 
23974c7070dbSScott Long 	CTX_LOCK(ctx);
2398ab2e3f79SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
23994c7070dbSScott Long 	IFDI_MEDIA_STATUS(ctx, ifmr);
24004c7070dbSScott Long 	CTX_UNLOCK(ctx);
24014c7070dbSScott Long }
24024c7070dbSScott Long 
240309f6ff4fSMatt Macy void
24044c7070dbSScott Long iflib_stop(if_ctx_t ctx)
24054c7070dbSScott Long {
24064c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
24074c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
24084c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
24094d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
24104c7070dbSScott Long 	iflib_dma_info_t di;
24114c7070dbSScott Long 	iflib_fl_t fl;
24124c7070dbSScott Long 	int i, j;
24134c7070dbSScott Long 
24144c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
24154c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
24164c7070dbSScott Long 
24174c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
2418ab2e3f79SStephen Hurd 	DELAY(1000);
2419da69b8f9SSean Bruno 	IFDI_STOP(ctx);
2420ab2e3f79SStephen Hurd 	DELAY(1000);
24214c7070dbSScott Long 
2422da69b8f9SSean Bruno 	iflib_debug_reset();
24234c7070dbSScott Long 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
24244c7070dbSScott Long 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
24254c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24264c7070dbSScott Long 
2427226fb85dSStephen Hurd 		CALLOUT_LOCK(txq);
2428226fb85dSStephen Hurd 		callout_stop(&txq->ift_timer);
2429226fb85dSStephen Hurd 		CALLOUT_UNLOCK(txq);
2430226fb85dSStephen Hurd 
24314c7070dbSScott Long 		/* clean any enqueued buffers */
2432da69b8f9SSean Bruno 		iflib_ifmp_purge(txq);
24334c7070dbSScott Long 		/* Free any existing tx buffers. */
243423ac9029SStephen Hurd 		for (j = 0; j < txq->ift_size; j++) {
24354c7070dbSScott Long 			iflib_txsd_free(ctx, txq, j);
24364c7070dbSScott Long 		}
2437ab2e3f79SStephen Hurd 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2438ab2e3f79SStephen Hurd 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
24394c7070dbSScott Long 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
24404c7070dbSScott Long 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2441ab2e3f79SStephen Hurd 		txq->ift_pullups = 0;
244295246abbSSean Bruno 		ifmp_ring_reset_stats(txq->ift_br);
24434d261ce2SStephen Hurd 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
24444c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24454c7070dbSScott Long 	}
24464c7070dbSScott Long 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
24474c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24484c7070dbSScott Long 
24490efb1a46SStephen Hurd 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
24504d261ce2SStephen Hurd 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
24514c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24524c7070dbSScott Long 		/* also resets the free lists pidx/cidx */
24534c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
24544c7070dbSScott Long 			iflib_fl_bufs_free(fl);
24554c7070dbSScott Long 	}
24564c7070dbSScott Long }
24574c7070dbSScott Long 
245895246abbSSean Bruno static inline caddr_t
245995246abbSSean Bruno calc_next_rxd(iflib_fl_t fl, int cidx)
246095246abbSSean Bruno {
246195246abbSSean Bruno 	qidx_t size;
246295246abbSSean Bruno 	int nrxd;
246395246abbSSean Bruno 	caddr_t start, end, cur, next;
246495246abbSSean Bruno 
246595246abbSSean Bruno 	nrxd = fl->ifl_size;
246695246abbSSean Bruno 	size = fl->ifl_rxd_size;
246795246abbSSean Bruno 	start = fl->ifl_ifdi->idi_vaddr;
246895246abbSSean Bruno 
246995246abbSSean Bruno 	if (__predict_false(size == 0))
247095246abbSSean Bruno 		return (start);
247195246abbSSean Bruno 	cur = start + size*cidx;
247295246abbSSean Bruno 	end = start + size*nrxd;
247395246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
247495246abbSSean Bruno 	return (next < end ? next : start);
247595246abbSSean Bruno }
247695246abbSSean Bruno 
2477e035717eSSean Bruno static inline void
2478e035717eSSean Bruno prefetch_pkts(iflib_fl_t fl, int cidx)
2479e035717eSSean Bruno {
2480e035717eSSean Bruno 	int nextptr;
2481e035717eSSean Bruno 	int nrxd = fl->ifl_size;
248295246abbSSean Bruno 	caddr_t next_rxd;
248395246abbSSean Bruno 
2484e035717eSSean Bruno 
2485e035717eSSean Bruno 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2486e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2487e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
248895246abbSSean Bruno 	next_rxd = calc_next_rxd(fl, cidx);
248995246abbSSean Bruno 	prefetch(next_rxd);
2490e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2491e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2492e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2493e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2494e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2495e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2496e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2497e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2498e035717eSSean Bruno }
2499e035717eSSean Bruno 
25006d49b41eSAndrew Gallatin static struct mbuf *
25016d49b41eSAndrew Gallatin rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
25026d49b41eSAndrew Gallatin     int *pf_rv, if_rxd_info_t ri)
25034c7070dbSScott Long {
2504e035717eSSean Bruno 	bus_dmamap_t map;
25054c7070dbSScott Long 	iflib_fl_t fl;
25066d49b41eSAndrew Gallatin 	caddr_t payload;
25076d49b41eSAndrew Gallatin 	struct mbuf *m;
25086d49b41eSAndrew Gallatin 	int flid, cidx, len, next;
25094c7070dbSScott Long 
251095246abbSSean Bruno 	map = NULL;
25114c7070dbSScott Long 	flid = irf->irf_flid;
25124c7070dbSScott Long 	cidx = irf->irf_idx;
25134c7070dbSScott Long 	fl = &rxq->ifr_fl[flid];
251495246abbSSean Bruno 	sd->ifsd_fl = fl;
251595246abbSSean Bruno 	sd->ifsd_cidx = cidx;
25166d49b41eSAndrew Gallatin 	m = fl->ifl_sds.ifsd_m[cidx];
251795246abbSSean Bruno 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
25184c7070dbSScott Long 	fl->ifl_credits--;
25194c7070dbSScott Long #if MEMORY_LOGGING
25204c7070dbSScott Long 	fl->ifl_m_dequeued++;
25214c7070dbSScott Long #endif
252295246abbSSean Bruno 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2523e035717eSSean Bruno 		prefetch_pkts(fl, cidx);
2524e035717eSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2525e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2526e035717eSSean Bruno 	map = fl->ifl_sds.ifsd_map[cidx];
2527e035717eSSean Bruno 	next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
25284c7070dbSScott Long 
25294c7070dbSScott Long 	/* not valid assert if bxe really does SGE from non-contiguous elements */
25304c7070dbSScott Long 	MPASS(fl->ifl_cidx == cidx);
2531bfce461eSMarius Strobl 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
25326d49b41eSAndrew Gallatin 
25336d49b41eSAndrew Gallatin 	if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL) {
25346d49b41eSAndrew Gallatin 		payload  = *sd->ifsd_cl;
25356d49b41eSAndrew Gallatin 		payload +=  ri->iri_pad;
25366d49b41eSAndrew Gallatin 		len = ri->iri_len - ri->iri_pad;
25376d49b41eSAndrew Gallatin 		*pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp,
25386d49b41eSAndrew Gallatin 		    len | PFIL_MEMPTR | PFIL_IN, NULL);
25396d49b41eSAndrew Gallatin 		switch (*pf_rv) {
25406d49b41eSAndrew Gallatin 		case PFIL_DROPPED:
25416d49b41eSAndrew Gallatin 		case PFIL_CONSUMED:
25426d49b41eSAndrew Gallatin 			/*
25436d49b41eSAndrew Gallatin 			 * The filter ate it.  Everything is recycled.
25446d49b41eSAndrew Gallatin 			 */
25456d49b41eSAndrew Gallatin 			m = NULL;
25466d49b41eSAndrew Gallatin 			unload = 0;
25476d49b41eSAndrew Gallatin 			break;
25486d49b41eSAndrew Gallatin 		case PFIL_REALLOCED:
25496d49b41eSAndrew Gallatin 			/*
25506d49b41eSAndrew Gallatin 			 * The filter copied it.  Everything is recycled.
25516d49b41eSAndrew Gallatin 			 */
25526d49b41eSAndrew Gallatin 			m = pfil_mem2mbuf(payload);
25536d49b41eSAndrew Gallatin 			unload = 0;
25546d49b41eSAndrew Gallatin 			break;
25556d49b41eSAndrew Gallatin 		case PFIL_PASS:
25566d49b41eSAndrew Gallatin 			/*
25576d49b41eSAndrew Gallatin 			 * Filter said it was OK, so receive like
25586d49b41eSAndrew Gallatin 			 * normal
25596d49b41eSAndrew Gallatin 			 */
25606d49b41eSAndrew Gallatin 			fl->ifl_sds.ifsd_m[cidx] = NULL;
25616d49b41eSAndrew Gallatin 			break;
25626d49b41eSAndrew Gallatin 		default:
25636d49b41eSAndrew Gallatin 			MPASS(0);
25646d49b41eSAndrew Gallatin 		}
25656d49b41eSAndrew Gallatin 	} else {
25666d49b41eSAndrew Gallatin 		fl->ifl_sds.ifsd_m[cidx] = NULL;
25676d49b41eSAndrew Gallatin 		*pf_rv = PFIL_PASS;
25686d49b41eSAndrew Gallatin 	}
25696d49b41eSAndrew Gallatin 
25704c7070dbSScott Long 	if (unload)
2571bfce461eSMarius Strobl 		bus_dmamap_unload(fl->ifl_buf_tag, map);
257295246abbSSean Bruno 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
257395246abbSSean Bruno 	if (__predict_false(fl->ifl_cidx == 0))
25744c7070dbSScott Long 		fl->ifl_gen = 0;
257587890dbaSSean Bruno 	bit_clear(fl->ifl_rx_bitmap, cidx);
25766d49b41eSAndrew Gallatin 	return (m);
25774c7070dbSScott Long }
25784c7070dbSScott Long 
25794c7070dbSScott Long static struct mbuf *
25806d49b41eSAndrew Gallatin assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
25814c7070dbSScott Long {
258295246abbSSean Bruno 	struct mbuf *m, *mh, *mt;
258395246abbSSean Bruno 	caddr_t cl;
25846d49b41eSAndrew Gallatin 	int  *pf_rv_ptr, flags, i, padlen;
25856d49b41eSAndrew Gallatin 	bool consumed;
25864c7070dbSScott Long 
25874c7070dbSScott Long 	i = 0;
258823ac9029SStephen Hurd 	mh = NULL;
25896d49b41eSAndrew Gallatin 	consumed = false;
25906d49b41eSAndrew Gallatin 	*pf_rv = PFIL_PASS;
25916d49b41eSAndrew Gallatin 	pf_rv_ptr = pf_rv;
25924c7070dbSScott Long 	do {
25936d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
25946d49b41eSAndrew Gallatin 		    pf_rv_ptr, ri);
25954c7070dbSScott Long 
259695246abbSSean Bruno 		MPASS(*sd->ifsd_cl != NULL);
259723ac9029SStephen Hurd 
25986d49b41eSAndrew Gallatin 		/*
25996d49b41eSAndrew Gallatin 		 * Exclude zero-length frags & frags from
26006d49b41eSAndrew Gallatin 		 * packets the filter has consumed or dropped
26016d49b41eSAndrew Gallatin 		 */
26026d49b41eSAndrew Gallatin 		if (ri->iri_frags[i].irf_len == 0 || consumed ||
26036d49b41eSAndrew Gallatin 		    *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
26046d49b41eSAndrew Gallatin 			if (mh == NULL) {
26056d49b41eSAndrew Gallatin 				/* everything saved here */
26066d49b41eSAndrew Gallatin 				consumed = true;
26076d49b41eSAndrew Gallatin 				pf_rv_ptr = NULL;
260823ac9029SStephen Hurd 				continue;
260923ac9029SStephen Hurd 			}
26106d49b41eSAndrew Gallatin 			/* XXX we can save the cluster here, but not the mbuf */
26116d49b41eSAndrew Gallatin 			m_init(m, M_NOWAIT, MT_DATA, 0);
26126d49b41eSAndrew Gallatin 			m_free(m);
26136d49b41eSAndrew Gallatin 			continue;
26146d49b41eSAndrew Gallatin 		}
261523ac9029SStephen Hurd 		if (mh == NULL) {
26164c7070dbSScott Long 			flags = M_PKTHDR|M_EXT;
26174c7070dbSScott Long 			mh = mt = m;
26184c7070dbSScott Long 			padlen = ri->iri_pad;
26194c7070dbSScott Long 		} else {
26204c7070dbSScott Long 			flags = M_EXT;
26214c7070dbSScott Long 			mt->m_next = m;
26224c7070dbSScott Long 			mt = m;
26234c7070dbSScott Long 			/* assuming padding is only on the first fragment */
26244c7070dbSScott Long 			padlen = 0;
26254c7070dbSScott Long 		}
262695246abbSSean Bruno 		cl = *sd->ifsd_cl;
262795246abbSSean Bruno 		*sd->ifsd_cl = NULL;
26284c7070dbSScott Long 
26294c7070dbSScott Long 		/* Can these two be made one ? */
26304c7070dbSScott Long 		m_init(m, M_NOWAIT, MT_DATA, flags);
263195246abbSSean Bruno 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
26324c7070dbSScott Long 		/*
26334c7070dbSScott Long 		 * These must follow m_init and m_cljset
26344c7070dbSScott Long 		 */
26354c7070dbSScott Long 		m->m_data += padlen;
26364c7070dbSScott Long 		ri->iri_len -= padlen;
263723ac9029SStephen Hurd 		m->m_len = ri->iri_frags[i].irf_len;
26384c7070dbSScott Long 	} while (++i < ri->iri_nfrags);
26394c7070dbSScott Long 
26404c7070dbSScott Long 	return (mh);
26414c7070dbSScott Long }
26424c7070dbSScott Long 
26434c7070dbSScott Long /*
26444c7070dbSScott Long  * Process one software descriptor
26454c7070dbSScott Long  */
26464c7070dbSScott Long static struct mbuf *
26474c7070dbSScott Long iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
26484c7070dbSScott Long {
264995246abbSSean Bruno 	struct if_rxsd sd;
26504c7070dbSScott Long 	struct mbuf *m;
26516d49b41eSAndrew Gallatin 	int pf_rv;
26524c7070dbSScott Long 
26534c7070dbSScott Long 	/* should I merge this back in now that the two paths are basically duplicated? */
265423ac9029SStephen Hurd 	if (ri->iri_nfrags == 1 &&
265518628b74SMark Johnston 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
26566d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
26576d49b41eSAndrew Gallatin 		    &pf_rv, ri);
26586d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
26596d49b41eSAndrew Gallatin 			return (m);
26606d49b41eSAndrew Gallatin 		if (pf_rv == PFIL_PASS) {
26614c7070dbSScott Long 			m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
266295246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
266395246abbSSean Bruno 			if (!IP_ALIGNED(m))
266495246abbSSean Bruno 				m->m_data += 2;
266595246abbSSean Bruno #endif
266695246abbSSean Bruno 			memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
266723ac9029SStephen Hurd 			m->m_len = ri->iri_frags[0].irf_len;
26686d49b41eSAndrew Gallatin 		}
26694c7070dbSScott Long 	} else {
26706d49b41eSAndrew Gallatin 		m = assemble_segments(rxq, ri, &sd, &pf_rv);
26716d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
26726d49b41eSAndrew Gallatin 			return (m);
26734c7070dbSScott Long 	}
26744c7070dbSScott Long 	m->m_pkthdr.len = ri->iri_len;
26754c7070dbSScott Long 	m->m_pkthdr.rcvif = ri->iri_ifp;
26764c7070dbSScott Long 	m->m_flags |= ri->iri_flags;
26774c7070dbSScott Long 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
26784c7070dbSScott Long 	m->m_pkthdr.flowid = ri->iri_flowid;
26794c7070dbSScott Long 	M_HASHTYPE_SET(m, ri->iri_rsstype);
26804c7070dbSScott Long 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
26814c7070dbSScott Long 	m->m_pkthdr.csum_data = ri->iri_csum_data;
26824c7070dbSScott Long 	return (m);
26834c7070dbSScott Long }
26844c7070dbSScott Long 
268535e4e998SStephen Hurd #if defined(INET6) || defined(INET)
2686fe1bcadaSStephen Hurd static void
2687fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2688fe1bcadaSStephen Hurd {
2689fe1bcadaSStephen Hurd 	CURVNET_SET(lc->ifp->if_vnet);
2690fe1bcadaSStephen Hurd #if defined(INET6)
2691fe1bcadaSStephen Hurd 	*v6 = VNET(ip6_forwarding);
2692fe1bcadaSStephen Hurd #endif
2693fe1bcadaSStephen Hurd #if defined(INET)
2694fe1bcadaSStephen Hurd 	*v4 = VNET(ipforwarding);
2695fe1bcadaSStephen Hurd #endif
2696fe1bcadaSStephen Hurd 	CURVNET_RESTORE();
2697fe1bcadaSStephen Hurd }
2698fe1bcadaSStephen Hurd 
269935e4e998SStephen Hurd /*
270035e4e998SStephen Hurd  * Returns true if it's possible this packet could be LROed.
270135e4e998SStephen Hurd  * if it returns false, it is guaranteed that tcp_lro_rx()
270235e4e998SStephen Hurd  * would not return zero.
270335e4e998SStephen Hurd  */
270435e4e998SStephen Hurd static bool
2705fe1bcadaSStephen Hurd iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
270635e4e998SStephen Hurd {
270735e4e998SStephen Hurd 	struct ether_header *eh;
270835e4e998SStephen Hurd 	uint16_t eh_type;
270935e4e998SStephen Hurd 
271035e4e998SStephen Hurd 	eh = mtod(m, struct ether_header *);
271135e4e998SStephen Hurd 	eh_type = ntohs(eh->ether_type);
271235e4e998SStephen Hurd 	switch (eh_type) {
2713abec4724SSean Bruno #if defined(INET6)
271435e4e998SStephen Hurd 		case ETHERTYPE_IPV6:
2715fe1bcadaSStephen Hurd 			return !v6_forwarding;
2716abec4724SSean Bruno #endif
2717abec4724SSean Bruno #if defined (INET)
271835e4e998SStephen Hurd 		case ETHERTYPE_IP:
2719fe1bcadaSStephen Hurd 			return !v4_forwarding;
2720abec4724SSean Bruno #endif
272135e4e998SStephen Hurd 	}
272235e4e998SStephen Hurd 
272335e4e998SStephen Hurd 	return false;
272435e4e998SStephen Hurd }
2725fe1bcadaSStephen Hurd #else
2726fe1bcadaSStephen Hurd static void
2727fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2728fe1bcadaSStephen Hurd {
2729fe1bcadaSStephen Hurd }
273035e4e998SStephen Hurd #endif
273135e4e998SStephen Hurd 
27324c7070dbSScott Long static bool
273395246abbSSean Bruno iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
27344c7070dbSScott Long {
27354c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
27364c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
273723ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
27384c7070dbSScott Long 	int avail, i;
273995246abbSSean Bruno 	qidx_t *cidxp;
27404c7070dbSScott Long 	struct if_rxd_info ri;
27414c7070dbSScott Long 	int err, budget_left, rx_bytes, rx_pkts;
27424c7070dbSScott Long 	iflib_fl_t fl;
27434c7070dbSScott Long 	struct ifnet *ifp;
27444c7070dbSScott Long 	int lro_enabled;
2745f6cb0deaSMatt Macy 	bool v4_forwarding, v6_forwarding, lro_possible;
274695246abbSSean Bruno 
27474c7070dbSScott Long 	/*
27484c7070dbSScott Long 	 * XXX early demux data packets so that if_input processing only handles
27494c7070dbSScott Long 	 * acks in interrupt context
27504c7070dbSScott Long 	 */
275120f63282SStephen Hurd 	struct mbuf *m, *mh, *mt, *mf;
27524c7070dbSScott Long 
2753f6cb0deaSMatt Macy 	lro_possible = v4_forwarding = v6_forwarding = false;
275495246abbSSean Bruno 	ifp = ctx->ifc_ifp;
27554c7070dbSScott Long 	mh = mt = NULL;
27564c7070dbSScott Long 	MPASS(budget > 0);
27574c7070dbSScott Long 	rx_pkts	= rx_bytes = 0;
275823ac9029SStephen Hurd 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
27594c7070dbSScott Long 		cidxp = &rxq->ifr_cq_cidx;
27604c7070dbSScott Long 	else
27614c7070dbSScott Long 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
276223ac9029SStephen Hurd 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
27634c7070dbSScott Long 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
27644c7070dbSScott Long 			__iflib_fl_refill_lt(ctx, fl, budget + 8);
27654c7070dbSScott Long 		DBG_COUNTER_INC(rx_unavail);
27664c7070dbSScott Long 		return (false);
27674c7070dbSScott Long 	}
27684c7070dbSScott Long 
27696d49b41eSAndrew Gallatin 	/* pfil needs the vnet to be set */
27706d49b41eSAndrew Gallatin 	CURVNET_SET_QUIET(ifp->if_vnet);
27718b8d9093SMarius Strobl 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
27724c7070dbSScott Long 		if (__predict_false(!CTX_ACTIVE(ctx))) {
27734c7070dbSScott Long 			DBG_COUNTER_INC(rx_ctx_inactive);
27744c7070dbSScott Long 			break;
27754c7070dbSScott Long 		}
27764c7070dbSScott Long 		/*
27774c7070dbSScott Long 		 * Reset client set fields to their default values
27784c7070dbSScott Long 		 */
277995246abbSSean Bruno 		rxd_info_zero(&ri);
27804c7070dbSScott Long 		ri.iri_qsidx = rxq->ifr_id;
27814c7070dbSScott Long 		ri.iri_cidx = *cidxp;
278295246abbSSean Bruno 		ri.iri_ifp = ifp;
27834c7070dbSScott Long 		ri.iri_frags = rxq->ifr_frags;
27844c7070dbSScott Long 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
27854c7070dbSScott Long 
278695246abbSSean Bruno 		if (err)
278795246abbSSean Bruno 			goto err;
27886d49b41eSAndrew Gallatin 		rx_pkts += 1;
27896d49b41eSAndrew Gallatin 		rx_bytes += ri.iri_len;
279023ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
279123ac9029SStephen Hurd 			*cidxp = ri.iri_cidx;
279223ac9029SStephen Hurd 			/* Update our consumer index */
279395246abbSSean Bruno 			/* XXX NB: shurd - check if this is still safe */
279423ac9029SStephen Hurd 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
279523ac9029SStephen Hurd 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
27964c7070dbSScott Long 				rxq->ifr_cq_gen = 0;
27974c7070dbSScott Long 			}
27984c7070dbSScott Long 			/* was this only a completion queue message? */
27994c7070dbSScott Long 			if (__predict_false(ri.iri_nfrags == 0))
28004c7070dbSScott Long 				continue;
28014c7070dbSScott Long 		}
28024c7070dbSScott Long 		MPASS(ri.iri_nfrags != 0);
28034c7070dbSScott Long 		MPASS(ri.iri_len != 0);
28044c7070dbSScott Long 
28054c7070dbSScott Long 		/* will advance the cidx on the corresponding free lists */
28064c7070dbSScott Long 		m = iflib_rxd_pkt_get(rxq, &ri);
28078b8d9093SMarius Strobl 		avail--;
28088b8d9093SMarius Strobl 		budget_left--;
28094c7070dbSScott Long 		if (avail == 0 && budget_left)
281023ac9029SStephen Hurd 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
28114c7070dbSScott Long 
28126d49b41eSAndrew Gallatin 		if (__predict_false(m == NULL))
28134c7070dbSScott Long 			continue;
28146d49b41eSAndrew Gallatin 
28154c7070dbSScott Long 		/* imm_pkt: -- cxgb */
28164c7070dbSScott Long 		if (mh == NULL)
28174c7070dbSScott Long 			mh = mt = m;
28184c7070dbSScott Long 		else {
28194c7070dbSScott Long 			mt->m_nextpkt = m;
28204c7070dbSScott Long 			mt = m;
28214c7070dbSScott Long 		}
28224c7070dbSScott Long 	}
28236d49b41eSAndrew Gallatin 	CURVNET_RESTORE();
28244c7070dbSScott Long 	/* make sure that we can refill faster than drain */
28254c7070dbSScott Long 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2826ab2e3f79SStephen Hurd 		__iflib_fl_refill_lt(ctx, fl, budget + 8);
28274c7070dbSScott Long 
28284c7070dbSScott Long 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2829fe1bcadaSStephen Hurd 	if (lro_enabled)
2830fe1bcadaSStephen Hurd 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
283120f63282SStephen Hurd 	mt = mf = NULL;
28324c7070dbSScott Long 	while (mh != NULL) {
28334c7070dbSScott Long 		m = mh;
28344c7070dbSScott Long 		mh = mh->m_nextpkt;
28354c7070dbSScott Long 		m->m_nextpkt = NULL;
283695246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
283795246abbSSean Bruno 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
283895246abbSSean Bruno 			continue;
283995246abbSSean Bruno #endif
28404c7070dbSScott Long 		rx_bytes += m->m_pkthdr.len;
28414c7070dbSScott Long 		rx_pkts++;
2842aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
284335e4e998SStephen Hurd 		if (lro_enabled) {
284435e4e998SStephen Hurd 			if (!lro_possible) {
2845fe1bcadaSStephen Hurd 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
284635e4e998SStephen Hurd 				if (lro_possible && mf != NULL) {
284735e4e998SStephen Hurd 					ifp->if_input(ifp, mf);
284835e4e998SStephen Hurd 					DBG_COUNTER_INC(rx_if_input);
284935e4e998SStephen Hurd 					mt = mf = NULL;
285035e4e998SStephen Hurd 				}
285135e4e998SStephen Hurd 			}
285225ac1dd5SStephen Hurd 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
285325ac1dd5SStephen Hurd 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
285435e4e998SStephen Hurd 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
28554c7070dbSScott Long 					continue;
285620f63282SStephen Hurd 			}
285725ac1dd5SStephen Hurd 		}
2858aaeb188aSBjoern A. Zeeb #endif
285935e4e998SStephen Hurd 		if (lro_possible) {
286035e4e998SStephen Hurd 			ifp->if_input(ifp, m);
286135e4e998SStephen Hurd 			DBG_COUNTER_INC(rx_if_input);
286235e4e998SStephen Hurd 			continue;
286335e4e998SStephen Hurd 		}
286435e4e998SStephen Hurd 
286535e4e998SStephen Hurd 		if (mf == NULL)
286635e4e998SStephen Hurd 			mf = m;
286720f63282SStephen Hurd 		if (mt != NULL)
286820f63282SStephen Hurd 			mt->m_nextpkt = m;
286920f63282SStephen Hurd 		mt = m;
287020f63282SStephen Hurd 	}
287120f63282SStephen Hurd 	if (mf != NULL) {
287220f63282SStephen Hurd 		ifp->if_input(ifp, mf);
28734c7070dbSScott Long 		DBG_COUNTER_INC(rx_if_input);
28744c7070dbSScott Long 	}
287523ac9029SStephen Hurd 
28764c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
28774c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
28784c7070dbSScott Long 
28794c7070dbSScott Long 	/*
28804c7070dbSScott Long 	 * Flush any outstanding LRO work
28814c7070dbSScott Long 	 */
2882aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
288323ac9029SStephen Hurd 	tcp_lro_flush_all(&rxq->ifr_lc);
2884aaeb188aSBjoern A. Zeeb #endif
2885ab2e3f79SStephen Hurd 	if (avail)
2886ab2e3f79SStephen Hurd 		return true;
2887ab2e3f79SStephen Hurd 	return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
288895246abbSSean Bruno err:
28897b610b60SSean Bruno 	STATE_LOCK(ctx);
2890ab2e3f79SStephen Hurd 	ctx->ifc_flags |= IFC_DO_RESET;
2891940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
289246fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
289395246abbSSean Bruno 	return (false);
289495246abbSSean Bruno }
289595246abbSSean Bruno 
289695246abbSSean Bruno #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
289795246abbSSean Bruno static inline qidx_t
289895246abbSSean Bruno txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
289995246abbSSean Bruno {
290095246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
290195246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
290295246abbSSean Bruno 	if (in_use > 4*minthresh)
290395246abbSSean Bruno 		return (notify_count);
290495246abbSSean Bruno 	if (in_use > 2*minthresh)
290595246abbSSean Bruno 		return (notify_count >> 1);
290695246abbSSean Bruno 	if (in_use > minthresh)
290795246abbSSean Bruno 		return (notify_count >> 3);
290895246abbSSean Bruno 	return (0);
290995246abbSSean Bruno }
291095246abbSSean Bruno 
291195246abbSSean Bruno static inline qidx_t
291295246abbSSean Bruno txq_max_rs_deferred(iflib_txq_t txq)
291395246abbSSean Bruno {
291495246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
291595246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
291695246abbSSean Bruno 	if (txq->ift_in_use > 4*minthresh)
291795246abbSSean Bruno 		return (notify_count);
291895246abbSSean Bruno 	if (txq->ift_in_use > 2*minthresh)
291995246abbSSean Bruno 		return (notify_count >> 1);
292095246abbSSean Bruno 	if (txq->ift_in_use > minthresh)
292195246abbSSean Bruno 		return (notify_count >> 2);
29222b2fc973SSean Bruno 	return (2);
29234c7070dbSScott Long }
29244c7070dbSScott Long 
29254c7070dbSScott Long #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
29264c7070dbSScott Long #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
292795246abbSSean Bruno 
292895246abbSSean Bruno #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
292995246abbSSean Bruno #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
293023ac9029SStephen Hurd #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
29314c7070dbSScott Long 
293295246abbSSean Bruno /* forward compatibility for cxgb */
293395246abbSSean Bruno #define FIRST_QSET(ctx) 0
293495246abbSSean Bruno #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
293595246abbSSean Bruno #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
293695246abbSSean Bruno #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
293795246abbSSean Bruno #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
293895246abbSSean Bruno 
293995246abbSSean Bruno /* XXX we should be setting this to something other than zero */
294095246abbSSean Bruno #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
29417474544bSMarius Strobl #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
29427474544bSMarius Strobl     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
294395246abbSSean Bruno 
294495246abbSSean Bruno static inline bool
294595246abbSSean Bruno iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
29464c7070dbSScott Long {
294795246abbSSean Bruno 	qidx_t dbval, max;
294895246abbSSean Bruno 	bool rang;
29494c7070dbSScott Long 
295095246abbSSean Bruno 	rang = false;
295195246abbSSean Bruno 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
295295246abbSSean Bruno 	if (ring || txq->ift_db_pending >= max) {
29534c7070dbSScott Long 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
295495dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
295595dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
29564c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
29574c7070dbSScott Long 		txq->ift_db_pending = txq->ift_npending = 0;
295895246abbSSean Bruno 		rang = true;
29594c7070dbSScott Long 	}
296095246abbSSean Bruno 	return (rang);
29614c7070dbSScott Long }
29624c7070dbSScott Long 
29634c7070dbSScott Long #ifdef PKT_DEBUG
29644c7070dbSScott Long static void
29654c7070dbSScott Long print_pkt(if_pkt_info_t pi)
29664c7070dbSScott Long {
29674c7070dbSScott Long 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
29684c7070dbSScott Long 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
29694c7070dbSScott Long 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
29704c7070dbSScott Long 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
29714c7070dbSScott Long 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
29724c7070dbSScott Long 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
29734c7070dbSScott Long }
29744c7070dbSScott Long #endif
29754c7070dbSScott Long 
29764c7070dbSScott Long #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
2977a06424ddSEric Joyner #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
29784c7070dbSScott Long #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
2979a06424ddSEric Joyner #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
29804c7070dbSScott Long 
29814c7070dbSScott Long static int
29824c7070dbSScott Long iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
29834c7070dbSScott Long {
2984ab2e3f79SStephen Hurd 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
29854c7070dbSScott Long 	struct ether_vlan_header *eh;
2986c9a49a4fSMarius Strobl 	struct mbuf *m;
29874c7070dbSScott Long 
29888b8d9093SMarius Strobl 	m = *mp;
2989ab2e3f79SStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
2990ab2e3f79SStephen Hurd 	    M_WRITABLE(m) == 0) {
2991ab2e3f79SStephen Hurd 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
2992ab2e3f79SStephen Hurd 			return (ENOMEM);
2993ab2e3f79SStephen Hurd 		} else {
2994ab2e3f79SStephen Hurd 			m_freem(*mp);
299564e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
29968b8d9093SMarius Strobl 			*mp = m;
2997ab2e3f79SStephen Hurd 		}
2998ab2e3f79SStephen Hurd 	}
29991248952aSSean Bruno 
30004c7070dbSScott Long 	/*
30014c7070dbSScott Long 	 * Determine where frame payload starts.
30024c7070dbSScott Long 	 * Jump over vlan headers if already present,
30034c7070dbSScott Long 	 * helpful for QinQ too.
30044c7070dbSScott Long 	 */
30054c7070dbSScott Long 	if (__predict_false(m->m_len < sizeof(*eh))) {
30064c7070dbSScott Long 		txq->ift_pullups++;
30074c7070dbSScott Long 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
30084c7070dbSScott Long 			return (ENOMEM);
30094c7070dbSScott Long 	}
30104c7070dbSScott Long 	eh = mtod(m, struct ether_vlan_header *);
30114c7070dbSScott Long 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
30124c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_proto);
30134c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
30144c7070dbSScott Long 	} else {
30154c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
30164c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
30174c7070dbSScott Long 	}
30184c7070dbSScott Long 
30194c7070dbSScott Long 	switch (pi->ipi_etype) {
30204c7070dbSScott Long #ifdef INET
30214c7070dbSScott Long 	case ETHERTYPE_IP:
30224c7070dbSScott Long 	{
3023c9a49a4fSMarius Strobl 		struct mbuf *n;
30244c7070dbSScott Long 		struct ip *ip = NULL;
30254c7070dbSScott Long 		struct tcphdr *th = NULL;
30264c7070dbSScott Long 		int minthlen;
30274c7070dbSScott Long 
30284c7070dbSScott Long 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
30294c7070dbSScott Long 		if (__predict_false(m->m_len < minthlen)) {
30304c7070dbSScott Long 			/*
30314c7070dbSScott Long 			 * if this code bloat is causing too much of a hit
30324c7070dbSScott Long 			 * move it to a separate function and mark it noinline
30334c7070dbSScott Long 			 */
30344c7070dbSScott Long 			if (m->m_len == pi->ipi_ehdrlen) {
30354c7070dbSScott Long 				n = m->m_next;
30364c7070dbSScott Long 				MPASS(n);
30374c7070dbSScott Long 				if (n->m_len >= sizeof(*ip))  {
30384c7070dbSScott Long 					ip = (struct ip *)n->m_data;
30394c7070dbSScott Long 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30404c7070dbSScott Long 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30414c7070dbSScott Long 				} else {
30424c7070dbSScott Long 					txq->ift_pullups++;
30434c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30444c7070dbSScott Long 						return (ENOMEM);
30454c7070dbSScott Long 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30464c7070dbSScott Long 				}
30474c7070dbSScott Long 			} else {
30484c7070dbSScott Long 				txq->ift_pullups++;
30494c7070dbSScott Long 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30504c7070dbSScott Long 					return (ENOMEM);
30514c7070dbSScott Long 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30524c7070dbSScott Long 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30534c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30544c7070dbSScott Long 			}
30554c7070dbSScott Long 		} else {
30564c7070dbSScott Long 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30574c7070dbSScott Long 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30584c7070dbSScott Long 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30594c7070dbSScott Long 		}
30604c7070dbSScott Long 		pi->ipi_ip_hlen = ip->ip_hl << 2;
30614c7070dbSScott Long 		pi->ipi_ipproto = ip->ip_p;
30624c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV4;
30634c7070dbSScott Long 
3064a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3065a06424ddSEric Joyner 		if (IS_TX_OFFLOAD4(pi)) {
3066a06424ddSEric Joyner 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
30674c7070dbSScott Long 				if (__predict_false(th == NULL)) {
30684c7070dbSScott Long 					txq->ift_pullups++;
30694c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
30704c7070dbSScott Long 						return (ENOMEM);
30714c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
30724c7070dbSScott Long 				}
30734c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
30744c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
30754c7070dbSScott Long 				pi->ipi_tcp_seq = th->th_seq;
30764c7070dbSScott Long 			}
3077a06424ddSEric Joyner 			if (IS_TSO4(pi)) {
30784c7070dbSScott Long 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
30794c7070dbSScott Long 					return (ENXIO);
30808d4ceb9cSStephen Hurd 				/*
30818d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
30828d4ceb9cSStephen Hurd 				 */
30838d4ceb9cSStephen Hurd 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
30844c7070dbSScott Long 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
30854c7070dbSScott Long 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
30864c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
30871248952aSSean Bruno 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
30881248952aSSean Bruno 					ip->ip_sum = 0;
30891248952aSSean Bruno 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
30901248952aSSean Bruno 				}
30914c7070dbSScott Long 			}
3092a06424ddSEric Joyner 		}
30938d4ceb9cSStephen Hurd 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
30948d4ceb9cSStephen Hurd                        ip->ip_sum = 0;
30958d4ceb9cSStephen Hurd 
30964c7070dbSScott Long 		break;
30974c7070dbSScott Long 	}
30984c7070dbSScott Long #endif
30994c7070dbSScott Long #ifdef INET6
31004c7070dbSScott Long 	case ETHERTYPE_IPV6:
31014c7070dbSScott Long 	{
31024c7070dbSScott Long 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
31034c7070dbSScott Long 		struct tcphdr *th;
31044c7070dbSScott Long 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
31054c7070dbSScott Long 
31064c7070dbSScott Long 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
310764e6fc13SStephen Hurd 			txq->ift_pullups++;
31084c7070dbSScott Long 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
31094c7070dbSScott Long 				return (ENOMEM);
31104c7070dbSScott Long 		}
31114c7070dbSScott Long 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
31124c7070dbSScott Long 
31134c7070dbSScott Long 		/* XXX-BZ this will go badly in case of ext hdrs. */
31144c7070dbSScott Long 		pi->ipi_ipproto = ip6->ip6_nxt;
31154c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV6;
31164c7070dbSScott Long 
3117a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3118a06424ddSEric Joyner 		if (IS_TX_OFFLOAD6(pi)) {
31194c7070dbSScott Long 			if (pi->ipi_ipproto == IPPROTO_TCP) {
31204c7070dbSScott Long 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3121a06424ddSEric Joyner 					txq->ift_pullups++;
31224c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
31234c7070dbSScott Long 						return (ENOMEM);
31244c7070dbSScott Long 				}
31254c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
31264c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
3127a06424ddSEric Joyner 				pi->ipi_tcp_seq = th->th_seq;
31284c7070dbSScott Long 			}
3129a06424ddSEric Joyner 			if (IS_TSO6(pi)) {
31304c7070dbSScott Long 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
31314c7070dbSScott Long 					return (ENXIO);
31324c7070dbSScott Long 				/*
31338d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
31344c7070dbSScott Long 				 */
3135a06424ddSEric Joyner 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
31364c7070dbSScott Long 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
31374c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
31384c7070dbSScott Long 			}
3139a06424ddSEric Joyner 		}
31404c7070dbSScott Long 		break;
31414c7070dbSScott Long 	}
31424c7070dbSScott Long #endif
31434c7070dbSScott Long 	default:
31444c7070dbSScott Long 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
31454c7070dbSScott Long 		pi->ipi_ip_hlen = 0;
31464c7070dbSScott Long 		break;
31474c7070dbSScott Long 	}
31484c7070dbSScott Long 	*mp = m;
31491248952aSSean Bruno 
31504c7070dbSScott Long 	return (0);
31514c7070dbSScott Long }
31524c7070dbSScott Long 
31534c7070dbSScott Long /*
31544c7070dbSScott Long  * If dodgy hardware rejects the scatter gather chain we've handed it
315523ac9029SStephen Hurd  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
315623ac9029SStephen Hurd  * m_defrag'd mbufs
31574c7070dbSScott Long  */
31584c7070dbSScott Long static __noinline struct mbuf *
315923ac9029SStephen Hurd iflib_remove_mbuf(iflib_txq_t txq)
31604c7070dbSScott Long {
3161fbec776dSAndrew Gallatin 	int ntxd, pidx;
3162fbec776dSAndrew Gallatin 	struct mbuf *m, **ifsd_m;
31634c7070dbSScott Long 
31644c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
316523ac9029SStephen Hurd 	ntxd = txq->ift_size;
3166fbec776dSAndrew Gallatin 	pidx = txq->ift_pidx & (ntxd - 1);
3167fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
3168fbec776dSAndrew Gallatin 	m = ifsd_m[pidx];
31694c7070dbSScott Long 	ifsd_m[pidx] = NULL;
3170bfce461eSMarius Strobl 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
31718a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL)
3172bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
31738a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[pidx]);
31744c7070dbSScott Long #if MEMORY_LOGGING
31754c7070dbSScott Long 	txq->ift_dequeued++;
31764c7070dbSScott Long #endif
3177fbec776dSAndrew Gallatin 	return (m);
31784c7070dbSScott Long }
31794c7070dbSScott Long 
318095246abbSSean Bruno static inline caddr_t
318195246abbSSean Bruno calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
318295246abbSSean Bruno {
318395246abbSSean Bruno 	qidx_t size;
318495246abbSSean Bruno 	int ntxd;
318595246abbSSean Bruno 	caddr_t start, end, cur, next;
318695246abbSSean Bruno 
318795246abbSSean Bruno 	ntxd = txq->ift_size;
318895246abbSSean Bruno 	size = txq->ift_txd_size[qid];
318995246abbSSean Bruno 	start = txq->ift_ifdi[qid].idi_vaddr;
319095246abbSSean Bruno 
319195246abbSSean Bruno 	if (__predict_false(size == 0))
319295246abbSSean Bruno 		return (start);
319395246abbSSean Bruno 	cur = start + size*cidx;
319495246abbSSean Bruno 	end = start + size*ntxd;
319595246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
319695246abbSSean Bruno 	return (next < end ? next : start);
319795246abbSSean Bruno }
319895246abbSSean Bruno 
3199d14c853bSStephen Hurd /*
3200d14c853bSStephen Hurd  * Pad an mbuf to ensure a minimum ethernet frame size.
3201d14c853bSStephen Hurd  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3202d14c853bSStephen Hurd  */
3203d14c853bSStephen Hurd static __noinline int
3204a15fbbb8SStephen Hurd iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3205d14c853bSStephen Hurd {
3206d14c853bSStephen Hurd 	/*
3207d14c853bSStephen Hurd 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3208d14c853bSStephen Hurd 	 * and ARP message is the smallest common payload I can think of
3209d14c853bSStephen Hurd 	 */
3210d14c853bSStephen Hurd 	static char pad[18];	/* just zeros */
3211d14c853bSStephen Hurd 	int n;
3212a15fbbb8SStephen Hurd 	struct mbuf *new_head;
3213d14c853bSStephen Hurd 
3214a15fbbb8SStephen Hurd 	if (!M_WRITABLE(*m_head)) {
3215a15fbbb8SStephen Hurd 		new_head = m_dup(*m_head, M_NOWAIT);
3216a15fbbb8SStephen Hurd 		if (new_head == NULL) {
321704993890SStephen Hurd 			m_freem(*m_head);
3218a15fbbb8SStephen Hurd 			device_printf(dev, "cannot pad short frame, m_dup() failed");
321906c47d48SStephen Hurd 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
322064e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
3221a15fbbb8SStephen Hurd 			return ENOMEM;
3222a15fbbb8SStephen Hurd 		}
3223a15fbbb8SStephen Hurd 		m_freem(*m_head);
3224a15fbbb8SStephen Hurd 		*m_head = new_head;
3225a15fbbb8SStephen Hurd 	}
3226a15fbbb8SStephen Hurd 
3227a15fbbb8SStephen Hurd 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3228d14c853bSStephen Hurd 	     n > 0; n -= sizeof(pad))
3229a15fbbb8SStephen Hurd 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3230d14c853bSStephen Hurd 			break;
3231d14c853bSStephen Hurd 
3232d14c853bSStephen Hurd 	if (n > 0) {
3233a15fbbb8SStephen Hurd 		m_freem(*m_head);
3234d14c853bSStephen Hurd 		device_printf(dev, "cannot pad short frame\n");
3235d14c853bSStephen Hurd 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
323664e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3237d14c853bSStephen Hurd 		return (ENOBUFS);
3238d14c853bSStephen Hurd 	}
3239d14c853bSStephen Hurd 
3240d14c853bSStephen Hurd 	return 0;
3241d14c853bSStephen Hurd }
3242d14c853bSStephen Hurd 
32434c7070dbSScott Long static int
32444c7070dbSScott Long iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
32454c7070dbSScott Long {
32464c7070dbSScott Long 	if_ctx_t		ctx;
32474c7070dbSScott Long 	if_shared_ctx_t		sctx;
32484c7070dbSScott Long 	if_softc_ctx_t		scctx;
3249bfce461eSMarius Strobl 	bus_dma_tag_t		buf_tag;
32504c7070dbSScott Long 	bus_dma_segment_t	*segs;
3251fbec776dSAndrew Gallatin 	struct mbuf		*m_head, **ifsd_m;
325295246abbSSean Bruno 	void			*next_txd;
32534c7070dbSScott Long 	bus_dmamap_t		map;
32544c7070dbSScott Long 	struct if_pkt_info	pi;
32554c7070dbSScott Long 	int remap = 0;
32564c7070dbSScott Long 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
32574c7070dbSScott Long 
32584c7070dbSScott Long 	ctx = txq->ift_ctx;
32594c7070dbSScott Long 	sctx = ctx->ifc_sctx;
32604c7070dbSScott Long 	scctx = &ctx->ifc_softc_ctx;
32614c7070dbSScott Long 	segs = txq->ift_segs;
326223ac9029SStephen Hurd 	ntxd = txq->ift_size;
32634c7070dbSScott Long 	m_head = *m_headp;
32644c7070dbSScott Long 	map = NULL;
32654c7070dbSScott Long 
32664c7070dbSScott Long 	/*
32674c7070dbSScott Long 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
32684c7070dbSScott Long 	 */
32694c7070dbSScott Long 	cidx = txq->ift_cidx;
32704c7070dbSScott Long 	pidx = txq->ift_pidx;
327195246abbSSean Bruno 	if (ctx->ifc_flags & IFC_PREFETCH) {
32724c7070dbSScott Long 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
327395246abbSSean Bruno 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
327495246abbSSean Bruno 			next_txd = calc_next_txd(txq, cidx, 0);
327595246abbSSean Bruno 			prefetch(next_txd);
327695246abbSSean Bruno 		}
32774c7070dbSScott Long 
32784c7070dbSScott Long 		/* prefetch the next cache line of mbuf pointers and flags */
32794c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_m[next]);
32804c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_map[next]);
32814c7070dbSScott Long 		next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
32824c7070dbSScott Long 	}
328395246abbSSean Bruno 	map = txq->ift_sds.ifsd_map[pidx];
3284fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
32854c7070dbSScott Long 
32864c7070dbSScott Long 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3287bfce461eSMarius Strobl 		buf_tag = txq->ift_tso_buf_tag;
32884c7070dbSScott Long 		max_segs = scctx->isc_tx_tso_segments_max;
32898a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[pidx];
3290bfce461eSMarius Strobl 		MPASS(buf_tag != NULL);
32917f87c040SMarius Strobl 		MPASS(max_segs > 0);
32924c7070dbSScott Long 	} else {
3293bfce461eSMarius Strobl 		buf_tag = txq->ift_buf_tag;
32944c7070dbSScott Long 		max_segs = scctx->isc_tx_nsegments;
32958a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_map[pidx];
32964c7070dbSScott Long 	}
3297d14c853bSStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3298d14c853bSStephen Hurd 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3299a15fbbb8SStephen Hurd 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
330064e6fc13SStephen Hurd 		if (err) {
330164e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
3302d14c853bSStephen Hurd 			return err;
3303d14c853bSStephen Hurd 		}
330464e6fc13SStephen Hurd 	}
3305a15fbbb8SStephen Hurd 	m_head = *m_headp;
330695246abbSSean Bruno 
330795246abbSSean Bruno 	pkt_info_zero(&pi);
3308ab2e3f79SStephen Hurd 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3309ab2e3f79SStephen Hurd 	pi.ipi_pidx = pidx;
3310ab2e3f79SStephen Hurd 	pi.ipi_qsidx = txq->ift_id;
33113429c02fSStephen Hurd 	pi.ipi_len = m_head->m_pkthdr.len;
33123429c02fSStephen Hurd 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
33133429c02fSStephen Hurd 	pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
33144c7070dbSScott Long 
33154c7070dbSScott Long 	/* deliberate bitwise OR to make one condition */
33164c7070dbSScott Long 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
331764e6fc13SStephen Hurd 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
331864e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
33194c7070dbSScott Long 			return (err);
332064e6fc13SStephen Hurd 		}
33214c7070dbSScott Long 		m_head = *m_headp;
33224c7070dbSScott Long 	}
33234c7070dbSScott Long 
33244c7070dbSScott Long retry:
3325bfce461eSMarius Strobl 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3326fbec776dSAndrew Gallatin 	    BUS_DMA_NOWAIT);
33274c7070dbSScott Long defrag:
33284c7070dbSScott Long 	if (__predict_false(err)) {
33294c7070dbSScott Long 		switch (err) {
33304c7070dbSScott Long 		case EFBIG:
33314c7070dbSScott Long 			/* try collapse once and defrag once */
3332f7594707SAndrew Gallatin 			if (remap == 0) {
33334c7070dbSScott Long 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3334f7594707SAndrew Gallatin 				/* try defrag if collapsing fails */
3335f7594707SAndrew Gallatin 				if (m_head == NULL)
3336f7594707SAndrew Gallatin 					remap++;
3337f7594707SAndrew Gallatin 			}
333864e6fc13SStephen Hurd 			if (remap == 1) {
333964e6fc13SStephen Hurd 				txq->ift_mbuf_defrag++;
33404c7070dbSScott Long 				m_head = m_defrag(*m_headp, M_NOWAIT);
334164e6fc13SStephen Hurd 			}
33423e8d1baeSEric Joyner 			/*
33433e8d1baeSEric Joyner 			 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
33443e8d1baeSEric Joyner 			 * failed to map an mbuf that was run through m_defrag
33453e8d1baeSEric Joyner 			 */
33463e8d1baeSEric Joyner 			MPASS(remap <= 1);
33473e8d1baeSEric Joyner 			if (__predict_false(m_head == NULL || remap > 1))
33484c7070dbSScott Long 				goto defrag_failed;
33493e8d1baeSEric Joyner 			remap++;
33504c7070dbSScott Long 			*m_headp = m_head;
33514c7070dbSScott Long 			goto retry;
33524c7070dbSScott Long 			break;
33534c7070dbSScott Long 		case ENOMEM:
33544c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33554c7070dbSScott Long 			break;
33564c7070dbSScott Long 		default:
33574c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33584c7070dbSScott Long 			m_freem(*m_headp);
33594c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
33604c7070dbSScott Long 			*m_headp = NULL;
33614c7070dbSScott Long 			break;
33624c7070dbSScott Long 		}
33634c7070dbSScott Long 		txq->ift_map_failed++;
33644c7070dbSScott Long 		DBG_COUNTER_INC(encap_load_mbuf_fail);
336564e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
33664c7070dbSScott Long 		return (err);
33674c7070dbSScott Long 	}
3368fbec776dSAndrew Gallatin 	ifsd_m[pidx] = m_head;
33694c7070dbSScott Long 	/*
33704c7070dbSScott Long 	 * XXX assumes a 1 to 1 relationship between segments and
33714c7070dbSScott Long 	 *        descriptors - this does not hold true on all drivers, e.g.
33724c7070dbSScott Long 	 *        cxgb
33734c7070dbSScott Long 	 */
33744c7070dbSScott Long 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
33754c7070dbSScott Long 		txq->ift_no_desc_avail++;
3376bfce461eSMarius Strobl 		bus_dmamap_unload(buf_tag, map);
33774c7070dbSScott Long 		DBG_COUNTER_INC(encap_txq_avail_fail);
337864e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
337923ac9029SStephen Hurd 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
33804c7070dbSScott Long 			GROUPTASK_ENQUEUE(&txq->ift_task);
33814c7070dbSScott Long 		return (ENOBUFS);
33824c7070dbSScott Long 	}
338395246abbSSean Bruno 	/*
338495246abbSSean Bruno 	 * On Intel cards we can greatly reduce the number of TX interrupts
338595246abbSSean Bruno 	 * we see by only setting report status on every Nth descriptor.
338695246abbSSean Bruno 	 * However, this also means that the driver will need to keep track
338795246abbSSean Bruno 	 * of the descriptors that RS was set on to check them for the DD bit.
338895246abbSSean Bruno 	 */
338995246abbSSean Bruno 	txq->ift_rs_pending += nsegs + 1;
339095246abbSSean Bruno 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
33911f7ce05dSAndrew Gallatin 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
339295246abbSSean Bruno 		pi.ipi_flags |= IPI_TX_INTR;
339395246abbSSean Bruno 		txq->ift_rs_pending = 0;
339495246abbSSean Bruno 	}
339595246abbSSean Bruno 
33964c7070dbSScott Long 	pi.ipi_segs = segs;
33974c7070dbSScott Long 	pi.ipi_nsegs = nsegs;
33984c7070dbSScott Long 
339923ac9029SStephen Hurd 	MPASS(pidx >= 0 && pidx < txq->ift_size);
34004c7070dbSScott Long #ifdef PKT_DEBUG
34014c7070dbSScott Long 	print_pkt(&pi);
34024c7070dbSScott Long #endif
34034c7070dbSScott Long 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
340495dcf343SMarius Strobl 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
34054c7070dbSScott Long 		DBG_COUNTER_INC(tx_encap);
340695246abbSSean Bruno 		MPASS(pi.ipi_new_pidx < txq->ift_size);
34074c7070dbSScott Long 
34084c7070dbSScott Long 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
34094c7070dbSScott Long 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
341023ac9029SStephen Hurd 			ndesc += txq->ift_size;
34114c7070dbSScott Long 			txq->ift_gen = 1;
34124c7070dbSScott Long 		}
34131248952aSSean Bruno 		/*
34141248952aSSean Bruno 		 * drivers can need as many as
34151248952aSSean Bruno 		 * two sentinels
34161248952aSSean Bruno 		 */
34171248952aSSean Bruno 		MPASS(ndesc <= pi.ipi_nsegs + 2);
34184c7070dbSScott Long 		MPASS(pi.ipi_new_pidx != pidx);
34194c7070dbSScott Long 		MPASS(ndesc > 0);
34204c7070dbSScott Long 		txq->ift_in_use += ndesc;
342195246abbSSean Bruno 
34224c7070dbSScott Long 		/*
34234c7070dbSScott Long 		 * We update the last software descriptor again here because there may
34244c7070dbSScott Long 		 * be a sentinel and/or there may be more mbufs than segments
34254c7070dbSScott Long 		 */
34264c7070dbSScott Long 		txq->ift_pidx = pi.ipi_new_pidx;
34274c7070dbSScott Long 		txq->ift_npending += pi.ipi_ndescs;
3428f7594707SAndrew Gallatin 	} else {
342923ac9029SStephen Hurd 		*m_headp = m_head = iflib_remove_mbuf(txq);
3430f7594707SAndrew Gallatin 		if (err == EFBIG) {
34314c7070dbSScott Long 			txq->ift_txd_encap_efbig++;
3432f7594707SAndrew Gallatin 			if (remap < 2) {
3433f7594707SAndrew Gallatin 				remap = 1;
34344c7070dbSScott Long 				goto defrag;
3435f7594707SAndrew Gallatin 			}
3436f7594707SAndrew Gallatin 		}
3437f7594707SAndrew Gallatin 		goto defrag_failed;
3438f7594707SAndrew Gallatin 	}
343964e6fc13SStephen Hurd 	/*
344064e6fc13SStephen Hurd 	 * err can't possibly be non-zero here, so we don't neet to test it
344164e6fc13SStephen Hurd 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
344264e6fc13SStephen Hurd 	 */
34434c7070dbSScott Long 	return (err);
34444c7070dbSScott Long 
34454c7070dbSScott Long defrag_failed:
34464c7070dbSScott Long 	txq->ift_mbuf_defrag_failed++;
34474c7070dbSScott Long 	txq->ift_map_failed++;
34484c7070dbSScott Long 	m_freem(*m_headp);
34494c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
34504c7070dbSScott Long 	*m_headp = NULL;
345164e6fc13SStephen Hurd 	DBG_COUNTER_INC(encap_txd_encap_fail);
34524c7070dbSScott Long 	return (ENOMEM);
34534c7070dbSScott Long }
34544c7070dbSScott Long 
34554c7070dbSScott Long static void
34564c7070dbSScott Long iflib_tx_desc_free(iflib_txq_t txq, int n)
34574c7070dbSScott Long {
34584c7070dbSScott Long 	uint32_t qsize, cidx, mask, gen;
34594c7070dbSScott Long 	struct mbuf *m, **ifsd_m;
346095246abbSSean Bruno 	bool do_prefetch;
34614c7070dbSScott Long 
34624c7070dbSScott Long 	cidx = txq->ift_cidx;
34634c7070dbSScott Long 	gen = txq->ift_gen;
346423ac9029SStephen Hurd 	qsize = txq->ift_size;
34654c7070dbSScott Long 	mask = qsize-1;
34664c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
346795246abbSSean Bruno 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
34684c7070dbSScott Long 
346994618825SMark Johnston 	while (n-- > 0) {
347095246abbSSean Bruno 		if (do_prefetch) {
34714c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 3) & mask]);
34724c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 4) & mask]);
347395246abbSSean Bruno 		}
34744c7070dbSScott Long 		if ((m = ifsd_m[cidx]) != NULL) {
3475fbec776dSAndrew Gallatin 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
34768a04b53dSKonstantin Belousov 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3477bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_tso_buf_tag,
34788a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx],
34798a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3480bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_tso_buf_tag,
34818a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx]);
34828a04b53dSKonstantin Belousov 			} else {
3483bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_buf_tag,
34848a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx],
34858a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3486bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_buf_tag,
34878a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx]);
34888a04b53dSKonstantin Belousov 			}
34894c7070dbSScott Long 			/* XXX we don't support any drivers that batch packets yet */
34904c7070dbSScott Long 			MPASS(m->m_nextpkt == NULL);
34915c5ca36cSSean Bruno 			m_freem(m);
34924c7070dbSScott Long 			ifsd_m[cidx] = NULL;
34934c7070dbSScott Long #if MEMORY_LOGGING
34944c7070dbSScott Long 			txq->ift_dequeued++;
34954c7070dbSScott Long #endif
34964c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
34974c7070dbSScott Long 		}
34984c7070dbSScott Long 		if (__predict_false(++cidx == qsize)) {
34994c7070dbSScott Long 			cidx = 0;
35004c7070dbSScott Long 			gen = 0;
35014c7070dbSScott Long 		}
35024c7070dbSScott Long 	}
35034c7070dbSScott Long 	txq->ift_cidx = cidx;
35044c7070dbSScott Long 	txq->ift_gen = gen;
35054c7070dbSScott Long }
35064c7070dbSScott Long 
35074c7070dbSScott Long static __inline int
35084c7070dbSScott Long iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
35094c7070dbSScott Long {
35104c7070dbSScott Long 	int reclaim;
35114c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35124c7070dbSScott Long 
35134c7070dbSScott Long 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
35144c7070dbSScott Long 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
35154c7070dbSScott Long 
35164c7070dbSScott Long 	/*
35174c7070dbSScott Long 	 * Need a rate-limiting check so that this isn't called every time
35184c7070dbSScott Long 	 */
35194c7070dbSScott Long 	iflib_tx_credits_update(ctx, txq);
35204c7070dbSScott Long 	reclaim = DESC_RECLAIMABLE(txq);
35214c7070dbSScott Long 
35224c7070dbSScott Long 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
35234c7070dbSScott Long #ifdef INVARIANTS
35244c7070dbSScott Long 		if (iflib_verbose_debug) {
35254c7070dbSScott Long 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
35264c7070dbSScott Long 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
35274c7070dbSScott Long 			       reclaim, thresh);
35284c7070dbSScott Long 
35294c7070dbSScott Long 		}
35304c7070dbSScott Long #endif
35314c7070dbSScott Long 		return (0);
35324c7070dbSScott Long 	}
35334c7070dbSScott Long 	iflib_tx_desc_free(txq, reclaim);
35344c7070dbSScott Long 	txq->ift_cleaned += reclaim;
35354c7070dbSScott Long 	txq->ift_in_use -= reclaim;
35364c7070dbSScott Long 
35374c7070dbSScott Long 	return (reclaim);
35384c7070dbSScott Long }
35394c7070dbSScott Long 
35404c7070dbSScott Long static struct mbuf **
354195246abbSSean Bruno _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
35424c7070dbSScott Long {
354395246abbSSean Bruno 	int next, size;
354495246abbSSean Bruno 	struct mbuf **items;
35454c7070dbSScott Long 
354695246abbSSean Bruno 	size = r->size;
354795246abbSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
354895246abbSSean Bruno 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
354995246abbSSean Bruno 
355095246abbSSean Bruno 	prefetch(items[(cidx + offset) & (size-1)]);
355195246abbSSean Bruno 	if (remaining > 1) {
35523429c02fSStephen Hurd 		prefetch2cachelines(&items[next]);
35533429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
35543429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
35553429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
355695246abbSSean Bruno 	}
355795246abbSSean Bruno 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
35584c7070dbSScott Long }
35594c7070dbSScott Long 
35604c7070dbSScott Long static void
35614c7070dbSScott Long iflib_txq_check_drain(iflib_txq_t txq, int budget)
35624c7070dbSScott Long {
35634c7070dbSScott Long 
356495246abbSSean Bruno 	ifmp_ring_check_drainage(txq->ift_br, budget);
35654c7070dbSScott Long }
35664c7070dbSScott Long 
35674c7070dbSScott Long static uint32_t
35684c7070dbSScott Long iflib_txq_can_drain(struct ifmp_ring *r)
35694c7070dbSScott Long {
35704c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
35714c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35724c7070dbSScott Long 
357395dcf343SMarius Strobl 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
357495dcf343SMarius Strobl 		return (1);
35758a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
35768a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
357795dcf343SMarius Strobl 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
357895dcf343SMarius Strobl 	    false));
35794c7070dbSScott Long }
35804c7070dbSScott Long 
35814c7070dbSScott Long static uint32_t
35824c7070dbSScott Long iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
35834c7070dbSScott Long {
35844c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
35854c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
358695246abbSSean Bruno 	struct ifnet *ifp = ctx->ifc_ifp;
35874c7070dbSScott Long 	struct mbuf **mp, *m;
358895246abbSSean Bruno 	int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
358995246abbSSean Bruno 	int reclaimed, err, in_use_prev, desc_used;
359095246abbSSean Bruno 	bool do_prefetch, ring, rang;
35914c7070dbSScott Long 
35924c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
35934c7070dbSScott Long 			    !LINK_ACTIVE(ctx))) {
35944c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_notready);
35954c7070dbSScott Long 		return (0);
35964c7070dbSScott Long 	}
359795246abbSSean Bruno 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
359895246abbSSean Bruno 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
35994c7070dbSScott Long 	avail = IDXDIFF(pidx, cidx, r->size);
36004c7070dbSScott Long 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
36014c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_flushing);
36024c7070dbSScott Long 		for (i = 0; i < avail; i++) {
3603bc0e855bSStephen Hurd 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
360423ac9029SStephen Hurd 				m_free(r->items[(cidx + i) & (r->size-1)]);
36054c7070dbSScott Long 			r->items[(cidx + i) & (r->size-1)] = NULL;
36064c7070dbSScott Long 		}
36074c7070dbSScott Long 		return (avail);
36084c7070dbSScott Long 	}
360995246abbSSean Bruno 
36104c7070dbSScott Long 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
36114c7070dbSScott Long 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36124c7070dbSScott Long 		CALLOUT_LOCK(txq);
36134c7070dbSScott Long 		callout_stop(&txq->ift_timer);
36144c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
36154c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_oactive);
36164c7070dbSScott Long 		return (0);
36174c7070dbSScott Long 	}
361895246abbSSean Bruno 	if (reclaimed)
361995246abbSSean Bruno 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36204c7070dbSScott Long 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
36214c7070dbSScott Long 	count = MIN(avail, TX_BATCH_SIZE);
3622da69b8f9SSean Bruno #ifdef INVARIANTS
3623da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3624da69b8f9SSean Bruno 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3625da69b8f9SSean Bruno 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3626da69b8f9SSean Bruno #endif
362795246abbSSean Bruno 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
362895246abbSSean Bruno 	avail = TXQ_AVAIL(txq);
36291ae4848cSMatt Macy 	err = 0;
363095246abbSSean Bruno 	for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
36311ae4848cSMatt Macy 		int rem = do_prefetch ? count - i : 0;
36324c7070dbSScott Long 
363395246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, rem);
3634da69b8f9SSean Bruno 		MPASS(mp != NULL && *mp != NULL);
363595246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq)) {
363695246abbSSean Bruno 			consumed++;
363795246abbSSean Bruno 			reclaimed++;
363895246abbSSean Bruno 			continue;
363995246abbSSean Bruno 		}
36404c7070dbSScott Long 		in_use_prev = txq->ift_in_use;
364195246abbSSean Bruno 		err = iflib_encap(txq, mp);
364295246abbSSean Bruno 		if (__predict_false(err)) {
3643da69b8f9SSean Bruno 			/* no room - bail out */
364495246abbSSean Bruno 			if (err == ENOBUFS)
36454c7070dbSScott Long 				break;
36464c7070dbSScott Long 			consumed++;
3647da69b8f9SSean Bruno 			/* we can't send this packet - skip it */
36484c7070dbSScott Long 			continue;
3649da69b8f9SSean Bruno 		}
365095246abbSSean Bruno 		consumed++;
36514c7070dbSScott Long 		pkt_sent++;
36524c7070dbSScott Long 		m = *mp;
36534c7070dbSScott Long 		DBG_COUNTER_INC(tx_sent);
36544c7070dbSScott Long 		bytes_sent += m->m_pkthdr.len;
365595246abbSSean Bruno 		mcast_sent += !!(m->m_flags & M_MCAST);
365695246abbSSean Bruno 		avail = TXQ_AVAIL(txq);
36574c7070dbSScott Long 
36584c7070dbSScott Long 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
36594c7070dbSScott Long 		desc_used += (txq->ift_in_use - in_use_prev);
36604c7070dbSScott Long 		ETHER_BPF_MTAP(ifp, m);
366195246abbSSean Bruno 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
36624c7070dbSScott Long 			break;
366395246abbSSean Bruno 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
36644c7070dbSScott Long 	}
36654c7070dbSScott Long 
366695246abbSSean Bruno 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
366795246abbSSean Bruno 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
366895246abbSSean Bruno 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
36694c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
36704c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
36714c7070dbSScott Long 	if (mcast_sent)
36724c7070dbSScott Long 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3673da69b8f9SSean Bruno #ifdef INVARIANTS
3674da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3675da69b8f9SSean Bruno 		printf("consumed=%d\n", consumed);
3676da69b8f9SSean Bruno #endif
36774c7070dbSScott Long 	return (consumed);
36784c7070dbSScott Long }
36794c7070dbSScott Long 
3680da69b8f9SSean Bruno static uint32_t
3681da69b8f9SSean Bruno iflib_txq_drain_always(struct ifmp_ring *r)
3682da69b8f9SSean Bruno {
3683da69b8f9SSean Bruno 	return (1);
3684da69b8f9SSean Bruno }
3685da69b8f9SSean Bruno 
3686da69b8f9SSean Bruno static uint32_t
3687da69b8f9SSean Bruno iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3688da69b8f9SSean Bruno {
3689da69b8f9SSean Bruno 	int i, avail;
3690da69b8f9SSean Bruno 	struct mbuf **mp;
3691da69b8f9SSean Bruno 	iflib_txq_t txq;
3692da69b8f9SSean Bruno 
3693da69b8f9SSean Bruno 	txq = r->cookie;
3694da69b8f9SSean Bruno 
3695da69b8f9SSean Bruno 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3696da69b8f9SSean Bruno 	CALLOUT_LOCK(txq);
3697da69b8f9SSean Bruno 	callout_stop(&txq->ift_timer);
3698da69b8f9SSean Bruno 	CALLOUT_UNLOCK(txq);
3699da69b8f9SSean Bruno 
3700da69b8f9SSean Bruno 	avail = IDXDIFF(pidx, cidx, r->size);
3701da69b8f9SSean Bruno 	for (i = 0; i < avail; i++) {
370295246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, avail - i);
370395246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq))
370495246abbSSean Bruno 			continue;
3705da69b8f9SSean Bruno 		m_freem(*mp);
370664e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3707da69b8f9SSean Bruno 	}
3708da69b8f9SSean Bruno 	MPASS(ifmp_ring_is_stalled(r) == 0);
3709da69b8f9SSean Bruno 	return (avail);
3710da69b8f9SSean Bruno }
3711da69b8f9SSean Bruno 
3712da69b8f9SSean Bruno static void
3713da69b8f9SSean Bruno iflib_ifmp_purge(iflib_txq_t txq)
3714da69b8f9SSean Bruno {
3715da69b8f9SSean Bruno 	struct ifmp_ring *r;
3716da69b8f9SSean Bruno 
371795246abbSSean Bruno 	r = txq->ift_br;
3718da69b8f9SSean Bruno 	r->drain = iflib_txq_drain_free;
3719da69b8f9SSean Bruno 	r->can_drain = iflib_txq_drain_always;
3720da69b8f9SSean Bruno 
3721da69b8f9SSean Bruno 	ifmp_ring_check_drainage(r, r->size);
3722da69b8f9SSean Bruno 
3723da69b8f9SSean Bruno 	r->drain = iflib_txq_drain;
3724da69b8f9SSean Bruno 	r->can_drain = iflib_txq_can_drain;
3725da69b8f9SSean Bruno }
3726da69b8f9SSean Bruno 
37274c7070dbSScott Long static void
372823ac9029SStephen Hurd _task_fn_tx(void *context)
37294c7070dbSScott Long {
37304c7070dbSScott Long 	iflib_txq_t txq = context;
37314c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
3732a6611c93SMarius Strobl #if defined(ALTQ) || defined(DEV_NETMAP)
3733a6611c93SMarius Strobl 	if_t ifp = ctx->ifc_ifp;
3734a6611c93SMarius Strobl #endif
3735fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
37364c7070dbSScott Long 
37371248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
37381248952aSSean Bruno 	txq->ift_cpu_exec_count[curcpu]++;
37391248952aSSean Bruno #endif
37404c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
37414c7070dbSScott Long 		return;
374295dcf343SMarius Strobl #ifdef DEV_NETMAP
3743a6611c93SMarius Strobl 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
37448a04b53dSKonstantin Belousov 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
37458a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
374695246abbSSean Bruno 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3747a6611c93SMarius Strobl 			netmap_tx_irq(ifp, txq->ift_id);
374895246abbSSean Bruno 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
374995246abbSSean Bruno 		return;
375095246abbSSean Bruno 	}
375195dcf343SMarius Strobl #endif
3752b8ca4756SPatrick Kelsey #ifdef ALTQ
3753b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3754b8ca4756SPatrick Kelsey 		iflib_altq_if_start(ifp);
3755b8ca4756SPatrick Kelsey #endif
375695246abbSSean Bruno 	if (txq->ift_db_pending)
3757fe51d4cdSStephen Hurd 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3758fe51d4cdSStephen Hurd 	else if (!abdicate)
3759fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3760fe51d4cdSStephen Hurd 	/*
3761fe51d4cdSStephen Hurd 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3762fe51d4cdSStephen Hurd 	 */
3763fe51d4cdSStephen Hurd 	if (abdicate)
3764fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
376595246abbSSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
376695246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
376795246abbSSean Bruno 	else {
37681ae4848cSMatt Macy #ifdef INVARIANTS
37691ae4848cSMatt Macy 		int rc =
37701ae4848cSMatt Macy #endif
37711ae4848cSMatt Macy 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
377295246abbSSean Bruno 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
377395246abbSSean Bruno 	}
37744c7070dbSScott Long }
37754c7070dbSScott Long 
37764c7070dbSScott Long static void
377723ac9029SStephen Hurd _task_fn_rx(void *context)
37784c7070dbSScott Long {
37794c7070dbSScott Long 	iflib_rxq_t rxq = context;
37804c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
37814c7070dbSScott Long 	bool more;
3782f4d2154eSStephen Hurd 	uint16_t budget;
37834c7070dbSScott Long 
37841248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
37851248952aSSean Bruno 	rxq->ifr_cpu_exec_count[curcpu]++;
37861248952aSSean Bruno #endif
37874c7070dbSScott Long 	DBG_COUNTER_INC(task_fn_rxs);
37884c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
37894c7070dbSScott Long 		return;
3790d0d0ad0aSStephen Hurd 	more = true;
3791d0d0ad0aSStephen Hurd #ifdef DEV_NETMAP
3792d0d0ad0aSStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3793d0d0ad0aSStephen Hurd 		u_int work = 0;
3794d0d0ad0aSStephen Hurd 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3795d0d0ad0aSStephen Hurd 			more = false;
3796d0d0ad0aSStephen Hurd 		}
3797d0d0ad0aSStephen Hurd 	}
3798d0d0ad0aSStephen Hurd #endif
3799f4d2154eSStephen Hurd 	budget = ctx->ifc_sysctl_rx_budget;
3800f4d2154eSStephen Hurd 	if (budget == 0)
3801f4d2154eSStephen Hurd 		budget = 16;	/* XXX */
3802f4d2154eSStephen Hurd 	if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
38034c7070dbSScott Long 		if (ctx->ifc_flags & IFC_LEGACY)
38044c7070dbSScott Long 			IFDI_INTR_ENABLE(ctx);
38054c7070dbSScott Long 		else {
38061ae4848cSMatt Macy #ifdef INVARIANTS
38071ae4848cSMatt Macy 			int rc =
38081ae4848cSMatt Macy #endif
38091ae4848cSMatt Macy 				IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
381023ac9029SStephen Hurd 			KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
38111ae4848cSMatt Macy 			DBG_COUNTER_INC(rx_intr_enables);
38124c7070dbSScott Long 		}
38134c7070dbSScott Long 	}
38144c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38154c7070dbSScott Long 		return;
38164c7070dbSScott Long 	if (more)
38174c7070dbSScott Long 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
38184c7070dbSScott Long }
38194c7070dbSScott Long 
38204c7070dbSScott Long static void
382123ac9029SStephen Hurd _task_fn_admin(void *context)
38224c7070dbSScott Long {
38234c7070dbSScott Long 	if_ctx_t ctx = context;
38244c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
38254c7070dbSScott Long 	iflib_txq_t txq;
3826ab2e3f79SStephen Hurd 	int i;
382777c1fcecSEric Joyner 	bool oactive, running, do_reset, do_watchdog, in_detach;
3828dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
3829ab2e3f79SStephen Hurd 
38307b610b60SSean Bruno 	STATE_LOCK(ctx);
38317b610b60SSean Bruno 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
38327b610b60SSean Bruno 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
38337b610b60SSean Bruno 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
38347b610b60SSean Bruno 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
383577c1fcecSEric Joyner 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
38367b610b60SSean Bruno 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
38377b610b60SSean Bruno 	STATE_UNLOCK(ctx);
38387b610b60SSean Bruno 
383977c1fcecSEric Joyner 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
384077c1fcecSEric Joyner 		return;
384177c1fcecSEric Joyner 	if (in_detach)
3842ab2e3f79SStephen Hurd 		return;
38434c7070dbSScott Long 
38444c7070dbSScott Long 	CTX_LOCK(ctx);
38454c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
38464c7070dbSScott Long 		CALLOUT_LOCK(txq);
38474c7070dbSScott Long 		callout_stop(&txq->ift_timer);
38484c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
38494c7070dbSScott Long 	}
38507b610b60SSean Bruno 	if (do_watchdog) {
38517b610b60SSean Bruno 		ctx->ifc_watchdog_events++;
38527b610b60SSean Bruno 		IFDI_WATCHDOG_RESET(ctx);
38537b610b60SSean Bruno 	}
3854d300df01SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3855dd7fbcf1SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3856dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
3857dd7fbcf1SStephen Hurd 		reset_on = hz / 2;
3858dd7fbcf1SStephen Hurd 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
385995dcf343SMarius Strobl 			iflib_netmap_timer_adjust(ctx, txq, &reset_on);
3860dd7fbcf1SStephen Hurd #endif
3861dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3862dd7fbcf1SStephen Hurd 	}
3863ab2e3f79SStephen Hurd 	IFDI_LINK_INTR_ENABLE(ctx);
38647b610b60SSean Bruno 	if (do_reset)
3865ab2e3f79SStephen Hurd 		iflib_if_init_locked(ctx);
38664c7070dbSScott Long 	CTX_UNLOCK(ctx);
38674c7070dbSScott Long 
3868ab2e3f79SStephen Hurd 	if (LINK_ACTIVE(ctx) == 0)
38694c7070dbSScott Long 		return;
38704c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
38714c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
38724c7070dbSScott Long }
38734c7070dbSScott Long 
38744c7070dbSScott Long 
38754c7070dbSScott Long static void
387623ac9029SStephen Hurd _task_fn_iov(void *context)
38774c7070dbSScott Long {
38784c7070dbSScott Long 	if_ctx_t ctx = context;
38794c7070dbSScott Long 
388077c1fcecSEric Joyner 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
388177c1fcecSEric Joyner 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
38824c7070dbSScott Long 		return;
38834c7070dbSScott Long 
38844c7070dbSScott Long 	CTX_LOCK(ctx);
38854c7070dbSScott Long 	IFDI_VFLR_HANDLE(ctx);
38864c7070dbSScott Long 	CTX_UNLOCK(ctx);
38874c7070dbSScott Long }
38884c7070dbSScott Long 
38894c7070dbSScott Long static int
38904c7070dbSScott Long iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
38914c7070dbSScott Long {
38924c7070dbSScott Long 	int err;
38934c7070dbSScott Long 	if_int_delay_info_t info;
38944c7070dbSScott Long 	if_ctx_t ctx;
38954c7070dbSScott Long 
38964c7070dbSScott Long 	info = (if_int_delay_info_t)arg1;
38974c7070dbSScott Long 	ctx = info->iidi_ctx;
38984c7070dbSScott Long 	info->iidi_req = req;
38994c7070dbSScott Long 	info->iidi_oidp = oidp;
39004c7070dbSScott Long 	CTX_LOCK(ctx);
39014c7070dbSScott Long 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
39024c7070dbSScott Long 	CTX_UNLOCK(ctx);
39034c7070dbSScott Long 	return (err);
39044c7070dbSScott Long }
39054c7070dbSScott Long 
39064c7070dbSScott Long /*********************************************************************
39074c7070dbSScott Long  *
39084c7070dbSScott Long  *  IFNET FUNCTIONS
39094c7070dbSScott Long  *
39104c7070dbSScott Long  **********************************************************************/
39114c7070dbSScott Long 
39124c7070dbSScott Long static void
39134c7070dbSScott Long iflib_if_init_locked(if_ctx_t ctx)
39144c7070dbSScott Long {
39154c7070dbSScott Long 	iflib_stop(ctx);
39164c7070dbSScott Long 	iflib_init_locked(ctx);
39174c7070dbSScott Long }
39184c7070dbSScott Long 
39194c7070dbSScott Long 
39204c7070dbSScott Long static void
39214c7070dbSScott Long iflib_if_init(void *arg)
39224c7070dbSScott Long {
39234c7070dbSScott Long 	if_ctx_t ctx = arg;
39244c7070dbSScott Long 
39254c7070dbSScott Long 	CTX_LOCK(ctx);
39264c7070dbSScott Long 	iflib_if_init_locked(ctx);
39274c7070dbSScott Long 	CTX_UNLOCK(ctx);
39284c7070dbSScott Long }
39294c7070dbSScott Long 
39304c7070dbSScott Long static int
39314c7070dbSScott Long iflib_if_transmit(if_t ifp, struct mbuf *m)
39324c7070dbSScott Long {
39334c7070dbSScott Long 	if_ctx_t	ctx = if_getsoftc(ifp);
39344c7070dbSScott Long 
39354c7070dbSScott Long 	iflib_txq_t txq;
393623ac9029SStephen Hurd 	int err, qidx;
3937fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
39384c7070dbSScott Long 
39394c7070dbSScott Long 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
39404c7070dbSScott Long 		DBG_COUNTER_INC(tx_frees);
39414c7070dbSScott Long 		m_freem(m);
3942225eae1bSEric Joyner 		return (ENETDOWN);
39434c7070dbSScott Long 	}
39444c7070dbSScott Long 
394523ac9029SStephen Hurd 	MPASS(m->m_nextpkt == NULL);
3946b8ca4756SPatrick Kelsey 	/* ALTQ-enabled interfaces always use queue 0. */
39474c7070dbSScott Long 	qidx = 0;
3948b8ca4756SPatrick Kelsey 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
39494c7070dbSScott Long 		qidx = QIDX(ctx, m);
39504c7070dbSScott Long 	/*
39514c7070dbSScott Long 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
39524c7070dbSScott Long 	 */
39534c7070dbSScott Long 	txq = &ctx->ifc_txqs[qidx];
39544c7070dbSScott Long 
39554c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
39564c7070dbSScott Long 	if (txq->ift_closed) {
39574c7070dbSScott Long 		while (m != NULL) {
39584c7070dbSScott Long 			next = m->m_nextpkt;
39594c7070dbSScott Long 			m->m_nextpkt = NULL;
39604c7070dbSScott Long 			m_freem(m);
396164e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
39624c7070dbSScott Long 			m = next;
39634c7070dbSScott Long 		}
39644c7070dbSScott Long 		return (ENOBUFS);
39654c7070dbSScott Long 	}
39664c7070dbSScott Long #endif
396723ac9029SStephen Hurd #ifdef notyet
39684c7070dbSScott Long 	qidx = count = 0;
39694c7070dbSScott Long 	mp = marr;
39704c7070dbSScott Long 	next = m;
39714c7070dbSScott Long 	do {
39724c7070dbSScott Long 		count++;
39734c7070dbSScott Long 		next = next->m_nextpkt;
39744c7070dbSScott Long 	} while (next != NULL);
39754c7070dbSScott Long 
397616fb86abSConrad Meyer 	if (count > nitems(marr))
39774c7070dbSScott Long 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
39784c7070dbSScott Long 			/* XXX check nextpkt */
39794c7070dbSScott Long 			m_freem(m);
39804c7070dbSScott Long 			/* XXX simplify for now */
39814c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
39824c7070dbSScott Long 			return (ENOBUFS);
39834c7070dbSScott Long 		}
39844c7070dbSScott Long 	for (next = m, i = 0; next != NULL; i++) {
39854c7070dbSScott Long 		mp[i] = next;
39864c7070dbSScott Long 		next = next->m_nextpkt;
39874c7070dbSScott Long 		mp[i]->m_nextpkt = NULL;
39884c7070dbSScott Long 	}
398923ac9029SStephen Hurd #endif
39904c7070dbSScott Long 	DBG_COUNTER_INC(tx_seen);
3991fe51d4cdSStephen Hurd 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
39924c7070dbSScott Long 
3993fe51d4cdSStephen Hurd 	if (abdicate)
3994ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
39951225d9daSStephen Hurd  	if (err) {
3996fe51d4cdSStephen Hurd 		if (!abdicate)
3997fe51d4cdSStephen Hurd 			GROUPTASK_ENQUEUE(&txq->ift_task);
39984c7070dbSScott Long 		/* support forthcoming later */
39994c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
40004c7070dbSScott Long 		txq->ift_closed = TRUE;
40014c7070dbSScott Long #endif
400295246abbSSean Bruno 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
400323ac9029SStephen Hurd 		m_freem(m);
400464e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
40054c7070dbSScott Long 	}
40064c7070dbSScott Long 
40074c7070dbSScott Long 	return (err);
40084c7070dbSScott Long }
40094c7070dbSScott Long 
4010b8ca4756SPatrick Kelsey #ifdef ALTQ
4011b8ca4756SPatrick Kelsey /*
4012b8ca4756SPatrick Kelsey  * The overall approach to integrating iflib with ALTQ is to continue to use
4013b8ca4756SPatrick Kelsey  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4014b8ca4756SPatrick Kelsey  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4015b8ca4756SPatrick Kelsey  * is redundant/unnecessary, but doing so minimizes the amount of
4016b8ca4756SPatrick Kelsey  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4017b8ca4756SPatrick Kelsey  * redundantly queueing to an intermediate mp_ring is swamped by the
4018b8ca4756SPatrick Kelsey  * performance limitations inherent in using ALTQ.
4019b8ca4756SPatrick Kelsey  *
4020b8ca4756SPatrick Kelsey  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4021b8ca4756SPatrick Kelsey  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4022b8ca4756SPatrick Kelsey  * given interface.  If ALTQ is enabled for an interface, then all
4023b8ca4756SPatrick Kelsey  * transmitted packets for that interface will be submitted to the ALTQ
4024b8ca4756SPatrick Kelsey  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4025b8ca4756SPatrick Kelsey  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4026b8ca4756SPatrick Kelsey  * update stats that the iflib machinery handles, and which is sensitve to
4027b8ca4756SPatrick Kelsey  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4028b8ca4756SPatrick Kelsey  * will be installed as the start routine for use by ALTQ facilities that
4029b8ca4756SPatrick Kelsey  * need to trigger queue drains on a scheduled basis.
4030b8ca4756SPatrick Kelsey  *
4031b8ca4756SPatrick Kelsey  */
4032b8ca4756SPatrick Kelsey static void
4033b8ca4756SPatrick Kelsey iflib_altq_if_start(if_t ifp)
4034b8ca4756SPatrick Kelsey {
4035b8ca4756SPatrick Kelsey 	struct ifaltq *ifq = &ifp->if_snd;
4036b8ca4756SPatrick Kelsey 	struct mbuf *m;
4037b8ca4756SPatrick Kelsey 
4038b8ca4756SPatrick Kelsey 	IFQ_LOCK(ifq);
4039b8ca4756SPatrick Kelsey 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4040b8ca4756SPatrick Kelsey 	while (m != NULL) {
4041b8ca4756SPatrick Kelsey 		iflib_if_transmit(ifp, m);
4042b8ca4756SPatrick Kelsey 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4043b8ca4756SPatrick Kelsey 	}
4044b8ca4756SPatrick Kelsey 	IFQ_UNLOCK(ifq);
4045b8ca4756SPatrick Kelsey }
4046b8ca4756SPatrick Kelsey 
4047b8ca4756SPatrick Kelsey static int
4048b8ca4756SPatrick Kelsey iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4049b8ca4756SPatrick Kelsey {
4050b8ca4756SPatrick Kelsey 	int err;
4051b8ca4756SPatrick Kelsey 
4052b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4053b8ca4756SPatrick Kelsey 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
4054b8ca4756SPatrick Kelsey 		if (err == 0)
4055b8ca4756SPatrick Kelsey 			iflib_altq_if_start(ifp);
4056b8ca4756SPatrick Kelsey 	} else
4057b8ca4756SPatrick Kelsey 		err = iflib_if_transmit(ifp, m);
4058b8ca4756SPatrick Kelsey 
4059b8ca4756SPatrick Kelsey 	return (err);
4060b8ca4756SPatrick Kelsey }
4061b8ca4756SPatrick Kelsey #endif /* ALTQ */
4062b8ca4756SPatrick Kelsey 
40634c7070dbSScott Long static void
40644c7070dbSScott Long iflib_if_qflush(if_t ifp)
40654c7070dbSScott Long {
40664c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
40674c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
40684c7070dbSScott Long 	int i;
40694c7070dbSScott Long 
40707b610b60SSean Bruno 	STATE_LOCK(ctx);
40714c7070dbSScott Long 	ctx->ifc_flags |= IFC_QFLUSH;
40727b610b60SSean Bruno 	STATE_UNLOCK(ctx);
40734c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
407495246abbSSean Bruno 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
40754c7070dbSScott Long 			iflib_txq_check_drain(txq, 0);
40767b610b60SSean Bruno 	STATE_LOCK(ctx);
40774c7070dbSScott Long 	ctx->ifc_flags &= ~IFC_QFLUSH;
40787b610b60SSean Bruno 	STATE_UNLOCK(ctx);
40794c7070dbSScott Long 
4080b8ca4756SPatrick Kelsey 	/*
4081b8ca4756SPatrick Kelsey 	 * When ALTQ is enabled, this will also take care of purging the
4082b8ca4756SPatrick Kelsey 	 * ALTQ queue(s).
4083b8ca4756SPatrick Kelsey 	 */
40844c7070dbSScott Long 	if_qflush(ifp);
40854c7070dbSScott Long }
40864c7070dbSScott Long 
40874c7070dbSScott Long 
40880c919c23SStephen Hurd #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
40890c919c23SStephen Hurd 		     IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
40900c919c23SStephen Hurd 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
40910c919c23SStephen Hurd 		     IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)
40924c7070dbSScott Long 
40934c7070dbSScott Long static int
40944c7070dbSScott Long iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
40954c7070dbSScott Long {
40964c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
40974c7070dbSScott Long 	struct ifreq	*ifr = (struct ifreq *)data;
40984c7070dbSScott Long #if defined(INET) || defined(INET6)
40994c7070dbSScott Long 	struct ifaddr	*ifa = (struct ifaddr *)data;
41004c7070dbSScott Long #endif
41014c7070dbSScott Long 	bool		avoid_reset = FALSE;
41024c7070dbSScott Long 	int		err = 0, reinit = 0, bits;
41034c7070dbSScott Long 
41044c7070dbSScott Long 	switch (command) {
41054c7070dbSScott Long 	case SIOCSIFADDR:
41064c7070dbSScott Long #ifdef INET
41074c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET)
41084c7070dbSScott Long 			avoid_reset = TRUE;
41094c7070dbSScott Long #endif
41104c7070dbSScott Long #ifdef INET6
41114c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET6)
41124c7070dbSScott Long 			avoid_reset = TRUE;
41134c7070dbSScott Long #endif
41144c7070dbSScott Long 		/*
41154c7070dbSScott Long 		** Calling init results in link renegotiation,
41164c7070dbSScott Long 		** so we avoid doing it when possible.
41174c7070dbSScott Long 		*/
41184c7070dbSScott Long 		if (avoid_reset) {
41194c7070dbSScott Long 			if_setflagbits(ifp, IFF_UP,0);
41204c7070dbSScott Long 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
41214c7070dbSScott Long 				reinit = 1;
41224c7070dbSScott Long #ifdef INET
41234c7070dbSScott Long 			if (!(if_getflags(ifp) & IFF_NOARP))
41244c7070dbSScott Long 				arp_ifinit(ifp, ifa);
41254c7070dbSScott Long #endif
41264c7070dbSScott Long 		} else
41274c7070dbSScott Long 			err = ether_ioctl(ifp, command, data);
41284c7070dbSScott Long 		break;
41294c7070dbSScott Long 	case SIOCSIFMTU:
41304c7070dbSScott Long 		CTX_LOCK(ctx);
41314c7070dbSScott Long 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
41324c7070dbSScott Long 			CTX_UNLOCK(ctx);
41334c7070dbSScott Long 			break;
41344c7070dbSScott Long 		}
41354c7070dbSScott Long 		bits = if_getdrvflags(ifp);
41364c7070dbSScott Long 		/* stop the driver and free any clusters before proceeding */
41374c7070dbSScott Long 		iflib_stop(ctx);
41384c7070dbSScott Long 
41394c7070dbSScott Long 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
41407b610b60SSean Bruno 			STATE_LOCK(ctx);
41414c7070dbSScott Long 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
41424c7070dbSScott Long 				ctx->ifc_flags |= IFC_MULTISEG;
41434c7070dbSScott Long 			else
41444c7070dbSScott Long 				ctx->ifc_flags &= ~IFC_MULTISEG;
41457b610b60SSean Bruno 			STATE_UNLOCK(ctx);
41464c7070dbSScott Long 			err = if_setmtu(ifp, ifr->ifr_mtu);
41474c7070dbSScott Long 		}
41484c7070dbSScott Long 		iflib_init_locked(ctx);
41497b610b60SSean Bruno 		STATE_LOCK(ctx);
41504c7070dbSScott Long 		if_setdrvflags(ifp, bits);
41517b610b60SSean Bruno 		STATE_UNLOCK(ctx);
41524c7070dbSScott Long 		CTX_UNLOCK(ctx);
41534c7070dbSScott Long 		break;
41544c7070dbSScott Long 	case SIOCSIFFLAGS:
4155ab2e3f79SStephen Hurd 		CTX_LOCK(ctx);
4156ab2e3f79SStephen Hurd 		if (if_getflags(ifp) & IFF_UP) {
4157ab2e3f79SStephen Hurd 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4158ab2e3f79SStephen Hurd 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4159ab2e3f79SStephen Hurd 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4160ab2e3f79SStephen Hurd 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4161ab2e3f79SStephen Hurd 				}
4162ab2e3f79SStephen Hurd 			} else
4163ab2e3f79SStephen Hurd 				reinit = 1;
4164ab2e3f79SStephen Hurd 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4165ab2e3f79SStephen Hurd 			iflib_stop(ctx);
4166ab2e3f79SStephen Hurd 		}
4167ab2e3f79SStephen Hurd 		ctx->ifc_if_flags = if_getflags(ifp);
4168ab2e3f79SStephen Hurd 		CTX_UNLOCK(ctx);
41694c7070dbSScott Long 		break;
41704c7070dbSScott Long 	case SIOCADDMULTI:
41714c7070dbSScott Long 	case SIOCDELMULTI:
41724c7070dbSScott Long 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4173ab2e3f79SStephen Hurd 			CTX_LOCK(ctx);
4174ab2e3f79SStephen Hurd 			IFDI_INTR_DISABLE(ctx);
4175ab2e3f79SStephen Hurd 			IFDI_MULTI_SET(ctx);
4176ab2e3f79SStephen Hurd 			IFDI_INTR_ENABLE(ctx);
4177ab2e3f79SStephen Hurd 			CTX_UNLOCK(ctx);
41784c7070dbSScott Long 		}
41794c7070dbSScott Long 		break;
41804c7070dbSScott Long 	case SIOCSIFMEDIA:
41814c7070dbSScott Long 		CTX_LOCK(ctx);
41824c7070dbSScott Long 		IFDI_MEDIA_SET(ctx);
41834c7070dbSScott Long 		CTX_UNLOCK(ctx);
41844c7070dbSScott Long 		/* falls thru */
41854c7070dbSScott Long 	case SIOCGIFMEDIA:
4186a027c8e9SStephen Hurd 	case SIOCGIFXMEDIA:
41874c7070dbSScott Long 		err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
41884c7070dbSScott Long 		break;
41894c7070dbSScott Long 	case SIOCGI2C:
41904c7070dbSScott Long 	{
41914c7070dbSScott Long 		struct ifi2creq i2c;
41924c7070dbSScott Long 
4193541d96aaSBrooks Davis 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
41944c7070dbSScott Long 		if (err != 0)
41954c7070dbSScott Long 			break;
41964c7070dbSScott Long 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
41974c7070dbSScott Long 			err = EINVAL;
41984c7070dbSScott Long 			break;
41994c7070dbSScott Long 		}
42004c7070dbSScott Long 		if (i2c.len > sizeof(i2c.data)) {
42014c7070dbSScott Long 			err = EINVAL;
42024c7070dbSScott Long 			break;
42034c7070dbSScott Long 		}
42044c7070dbSScott Long 
42054c7070dbSScott Long 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4206541d96aaSBrooks Davis 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4207541d96aaSBrooks Davis 			    sizeof(i2c));
42084c7070dbSScott Long 		break;
42094c7070dbSScott Long 	}
42104c7070dbSScott Long 	case SIOCSIFCAP:
42114c7070dbSScott Long 	{
42120c919c23SStephen Hurd 		int mask, setmask, oldmask;
42134c7070dbSScott Long 
42140c919c23SStephen Hurd 		oldmask = if_getcapenable(ifp);
42150c919c23SStephen Hurd 		mask = ifr->ifr_reqcap ^ oldmask;
42160c919c23SStephen Hurd 		mask &= ctx->ifc_softc_ctx.isc_capabilities;
42174c7070dbSScott Long 		setmask = 0;
42184c7070dbSScott Long #ifdef TCP_OFFLOAD
42194c7070dbSScott Long 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
42204c7070dbSScott Long #endif
42214c7070dbSScott Long 		setmask |= (mask & IFCAP_FLAGS);
42220c919c23SStephen Hurd 		setmask |= (mask & IFCAP_WOL);
42234c7070dbSScott Long 
42240c919c23SStephen Hurd 		/*
4225a42546dfSStephen Hurd 		 * If any RX csum has changed, change all the ones that
4226a42546dfSStephen Hurd 		 * are supported by the driver.
42270c919c23SStephen Hurd 		 */
4228a42546dfSStephen Hurd 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4229a42546dfSStephen Hurd 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4230a42546dfSStephen Hurd 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4231a42546dfSStephen Hurd 		}
42320c919c23SStephen Hurd 
42334c7070dbSScott Long 		/*
42344c7070dbSScott Long 		 * want to ensure that traffic has stopped before we change any of the flags
42354c7070dbSScott Long 		 */
42364c7070dbSScott Long 		if (setmask) {
42374c7070dbSScott Long 			CTX_LOCK(ctx);
42384c7070dbSScott Long 			bits = if_getdrvflags(ifp);
42390c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42404c7070dbSScott Long 				iflib_stop(ctx);
42417b610b60SSean Bruno 			STATE_LOCK(ctx);
42424c7070dbSScott Long 			if_togglecapenable(ifp, setmask);
42437b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42440c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42454c7070dbSScott Long 				iflib_init_locked(ctx);
42467b610b60SSean Bruno 			STATE_LOCK(ctx);
42474c7070dbSScott Long 			if_setdrvflags(ifp, bits);
42487b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42494c7070dbSScott Long 			CTX_UNLOCK(ctx);
42504c7070dbSScott Long 		}
42510c919c23SStephen Hurd 		if_vlancap(ifp);
42524c7070dbSScott Long 		break;
42534c7070dbSScott Long 	}
42544c7070dbSScott Long 	case SIOCGPRIVATE_0:
42554c7070dbSScott Long 	case SIOCSDRVSPEC:
42564c7070dbSScott Long 	case SIOCGDRVSPEC:
42574c7070dbSScott Long 		CTX_LOCK(ctx);
42584c7070dbSScott Long 		err = IFDI_PRIV_IOCTL(ctx, command, data);
42594c7070dbSScott Long 		CTX_UNLOCK(ctx);
42604c7070dbSScott Long 		break;
42614c7070dbSScott Long 	default:
42624c7070dbSScott Long 		err = ether_ioctl(ifp, command, data);
42634c7070dbSScott Long 		break;
42644c7070dbSScott Long 	}
42654c7070dbSScott Long 	if (reinit)
42664c7070dbSScott Long 		iflib_if_init(ctx);
42674c7070dbSScott Long 	return (err);
42684c7070dbSScott Long }
42694c7070dbSScott Long 
42704c7070dbSScott Long static uint64_t
42714c7070dbSScott Long iflib_if_get_counter(if_t ifp, ift_counter cnt)
42724c7070dbSScott Long {
42734c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42744c7070dbSScott Long 
42754c7070dbSScott Long 	return (IFDI_GET_COUNTER(ctx, cnt));
42764c7070dbSScott Long }
42774c7070dbSScott Long 
42784c7070dbSScott Long /*********************************************************************
42794c7070dbSScott Long  *
42804c7070dbSScott Long  *  OTHER FUNCTIONS EXPORTED TO THE STACK
42814c7070dbSScott Long  *
42824c7070dbSScott Long  **********************************************************************/
42834c7070dbSScott Long 
42844c7070dbSScott Long static void
42854c7070dbSScott Long iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
42864c7070dbSScott Long {
42874c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42884c7070dbSScott Long 
42894c7070dbSScott Long 	if ((void *)ctx != arg)
42904c7070dbSScott Long 		return;
42914c7070dbSScott Long 
42924c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
42934c7070dbSScott Long 		return;
42944c7070dbSScott Long 
42954c7070dbSScott Long 	CTX_LOCK(ctx);
42964c7070dbSScott Long 	IFDI_VLAN_REGISTER(ctx, vtag);
42974c7070dbSScott Long 	/* Re-init to load the changes */
42984c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
429921e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43004c7070dbSScott Long 	CTX_UNLOCK(ctx);
43014c7070dbSScott Long }
43024c7070dbSScott Long 
43034c7070dbSScott Long static void
43044c7070dbSScott Long iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
43054c7070dbSScott Long {
43064c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43074c7070dbSScott Long 
43084c7070dbSScott Long 	if ((void *)ctx != arg)
43094c7070dbSScott Long 		return;
43104c7070dbSScott Long 
43114c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
43124c7070dbSScott Long 		return;
43134c7070dbSScott Long 
43144c7070dbSScott Long 	CTX_LOCK(ctx);
43154c7070dbSScott Long 	IFDI_VLAN_UNREGISTER(ctx, vtag);
43164c7070dbSScott Long 	/* Re-init to load the changes */
43174c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
431821e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43194c7070dbSScott Long 	CTX_UNLOCK(ctx);
43204c7070dbSScott Long }
43214c7070dbSScott Long 
43224c7070dbSScott Long static void
43234c7070dbSScott Long iflib_led_func(void *arg, int onoff)
43244c7070dbSScott Long {
43254c7070dbSScott Long 	if_ctx_t ctx = arg;
43264c7070dbSScott Long 
43274c7070dbSScott Long 	CTX_LOCK(ctx);
43284c7070dbSScott Long 	IFDI_LED_FUNC(ctx, onoff);
43294c7070dbSScott Long 	CTX_UNLOCK(ctx);
43304c7070dbSScott Long }
43314c7070dbSScott Long 
43324c7070dbSScott Long /*********************************************************************
43334c7070dbSScott Long  *
43344c7070dbSScott Long  *  BUS FUNCTION DEFINITIONS
43354c7070dbSScott Long  *
43364c7070dbSScott Long  **********************************************************************/
43374c7070dbSScott Long 
43384c7070dbSScott Long int
43394c7070dbSScott Long iflib_device_probe(device_t dev)
43404c7070dbSScott Long {
43414c7070dbSScott Long 	pci_vendor_info_t *ent;
43424c7070dbSScott Long 
43434c7070dbSScott Long 	uint16_t	pci_vendor_id, pci_device_id;
43444c7070dbSScott Long 	uint16_t	pci_subvendor_id, pci_subdevice_id;
43454c7070dbSScott Long 	uint16_t	pci_rev_id;
43464c7070dbSScott Long 	if_shared_ctx_t sctx;
43474c7070dbSScott Long 
43484c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
43494c7070dbSScott Long 		return (ENOTSUP);
43504c7070dbSScott Long 
43514c7070dbSScott Long 	pci_vendor_id = pci_get_vendor(dev);
43524c7070dbSScott Long 	pci_device_id = pci_get_device(dev);
43534c7070dbSScott Long 	pci_subvendor_id = pci_get_subvendor(dev);
43544c7070dbSScott Long 	pci_subdevice_id = pci_get_subdevice(dev);
43554c7070dbSScott Long 	pci_rev_id = pci_get_revid(dev);
43564c7070dbSScott Long 	if (sctx->isc_parse_devinfo != NULL)
43574c7070dbSScott Long 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
43584c7070dbSScott Long 
43594c7070dbSScott Long 	ent = sctx->isc_vendor_info;
43604c7070dbSScott Long 	while (ent->pvi_vendor_id != 0) {
43614c7070dbSScott Long 		if (pci_vendor_id != ent->pvi_vendor_id) {
43624c7070dbSScott Long 			ent++;
43634c7070dbSScott Long 			continue;
43644c7070dbSScott Long 		}
43654c7070dbSScott Long 		if ((pci_device_id == ent->pvi_device_id) &&
43664c7070dbSScott Long 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
43674c7070dbSScott Long 		     (ent->pvi_subvendor_id == 0)) &&
43684c7070dbSScott Long 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
43694c7070dbSScott Long 		     (ent->pvi_subdevice_id == 0)) &&
43704c7070dbSScott Long 		    ((pci_rev_id == ent->pvi_rev_id) ||
43714c7070dbSScott Long 		     (ent->pvi_rev_id == 0))) {
43724c7070dbSScott Long 
43734c7070dbSScott Long 			device_set_desc_copy(dev, ent->pvi_name);
43744c7070dbSScott Long 			/* this needs to be changed to zero if the bus probing code
43754c7070dbSScott Long 			 * ever stops re-probing on best match because the sctx
43764c7070dbSScott Long 			 * may have its values over written by register calls
43774c7070dbSScott Long 			 * in subsequent probes
43784c7070dbSScott Long 			 */
43794c7070dbSScott Long 			return (BUS_PROBE_DEFAULT);
43804c7070dbSScott Long 		}
43814c7070dbSScott Long 		ent++;
43824c7070dbSScott Long 	}
43834c7070dbSScott Long 	return (ENXIO);
43844c7070dbSScott Long }
43854c7070dbSScott Long 
438609f6ff4fSMatt Macy static void
438709f6ff4fSMatt Macy iflib_reset_qvalues(if_ctx_t ctx)
43884c7070dbSScott Long {
438909f6ff4fSMatt Macy 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
439009f6ff4fSMatt Macy 	if_shared_ctx_t sctx = ctx->ifc_sctx;
439109f6ff4fSMatt Macy 	device_t dev = ctx->ifc_dev;
439246d0f824SMatt Macy 	int i;
43934c7070dbSScott Long 
439409f6ff4fSMatt Macy 	scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
439509f6ff4fSMatt Macy 	scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
439623ac9029SStephen Hurd 	/*
439723ac9029SStephen Hurd 	 * XXX sanity check that ntxd & nrxd are a power of 2
439823ac9029SStephen Hurd 	 */
439923ac9029SStephen Hurd 	if (ctx->ifc_sysctl_ntxqs != 0)
440023ac9029SStephen Hurd 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
440123ac9029SStephen Hurd 	if (ctx->ifc_sysctl_nrxqs != 0)
440223ac9029SStephen Hurd 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
440323ac9029SStephen Hurd 
440423ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
440523ac9029SStephen Hurd 		if (ctx->ifc_sysctl_ntxds[i] != 0)
440623ac9029SStephen Hurd 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
440723ac9029SStephen Hurd 		else
440823ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
440923ac9029SStephen Hurd 	}
441023ac9029SStephen Hurd 
441123ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
441223ac9029SStephen Hurd 		if (ctx->ifc_sysctl_nrxds[i] != 0)
441323ac9029SStephen Hurd 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
441423ac9029SStephen Hurd 		else
441523ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
441623ac9029SStephen Hurd 	}
441723ac9029SStephen Hurd 
441823ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
441923ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
442023ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
442123ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
442223ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
442323ac9029SStephen Hurd 		}
442423ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
442523ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
442623ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
442723ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
442823ac9029SStephen Hurd 		}
442923ac9029SStephen Hurd 	}
443023ac9029SStephen Hurd 
443123ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
443223ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
443323ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
443423ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
443523ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
443623ac9029SStephen Hurd 		}
443723ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
443823ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
443923ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
444023ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
444123ac9029SStephen Hurd 		}
444223ac9029SStephen Hurd 	}
444309f6ff4fSMatt Macy }
4444ab2e3f79SStephen Hurd 
44456d49b41eSAndrew Gallatin static void
44466d49b41eSAndrew Gallatin iflib_add_pfil(if_ctx_t ctx)
44476d49b41eSAndrew Gallatin {
44486d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
44496d49b41eSAndrew Gallatin 	struct pfil_head_args pa;
44506d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
44516d49b41eSAndrew Gallatin 	int i;
44526d49b41eSAndrew Gallatin 
44536d49b41eSAndrew Gallatin 	pa.pa_version = PFIL_VERSION;
44546d49b41eSAndrew Gallatin 	pa.pa_flags = PFIL_IN;
44556d49b41eSAndrew Gallatin 	pa.pa_type = PFIL_TYPE_ETHERNET;
44566d49b41eSAndrew Gallatin 	pa.pa_headname = ctx->ifc_ifp->if_xname;
44576d49b41eSAndrew Gallatin 	pfil = pfil_head_register(&pa);
44586d49b41eSAndrew Gallatin 
44596d49b41eSAndrew Gallatin 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
44606d49b41eSAndrew Gallatin 		rxq->pfil = pfil;
44616d49b41eSAndrew Gallatin 	}
44626d49b41eSAndrew Gallatin }
44636d49b41eSAndrew Gallatin 
44646d49b41eSAndrew Gallatin static void
44656d49b41eSAndrew Gallatin iflib_rem_pfil(if_ctx_t ctx)
44666d49b41eSAndrew Gallatin {
44676d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
44686d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
44696d49b41eSAndrew Gallatin 	int i;
44706d49b41eSAndrew Gallatin 
44716d49b41eSAndrew Gallatin 	rxq = ctx->ifc_rxqs;
44726d49b41eSAndrew Gallatin 	pfil = rxq->pfil;
44736d49b41eSAndrew Gallatin 	for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
44746d49b41eSAndrew Gallatin 		rxq->pfil = NULL;
44756d49b41eSAndrew Gallatin 	}
44766d49b41eSAndrew Gallatin 	pfil_head_unregister(pfil);
44776d49b41eSAndrew Gallatin }
44786d49b41eSAndrew Gallatin 
4479*f154ece0SStephen Hurd static uint16_t
4480*f154ece0SStephen Hurd get_ctx_core_offset(if_ctx_t ctx)
4481*f154ece0SStephen Hurd {
4482*f154ece0SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4483*f154ece0SStephen Hurd 	struct cpu_offset *op;
4484*f154ece0SStephen Hurd 	uint16_t qc;
4485*f154ece0SStephen Hurd 	uint16_t ret = ctx->ifc_sysctl_core_offset;
4486*f154ece0SStephen Hurd 
4487*f154ece0SStephen Hurd 	if (ret != CORE_OFFSET_UNSPECIFIED)
4488*f154ece0SStephen Hurd 		return (ret);
4489*f154ece0SStephen Hurd 
4490*f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx)
4491*f154ece0SStephen Hurd 		qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets;
4492*f154ece0SStephen Hurd 	else
4493*f154ece0SStephen Hurd 		qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets);
4494*f154ece0SStephen Hurd 
4495*f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4496*f154ece0SStephen Hurd 	SLIST_FOREACH(op, &cpu_offsets, entries) {
4497*f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4498*f154ece0SStephen Hurd 			ret = op->offset;
4499*f154ece0SStephen Hurd 			op->offset += qc;
4500*f154ece0SStephen Hurd 			MPASS(op->refcount < UINT_MAX);
4501*f154ece0SStephen Hurd 			op->refcount++;
4502*f154ece0SStephen Hurd 			break;
4503*f154ece0SStephen Hurd 		}
4504*f154ece0SStephen Hurd 	}
4505*f154ece0SStephen Hurd 	if (ret == CORE_OFFSET_UNSPECIFIED) {
4506*f154ece0SStephen Hurd 		ret = 0;
4507*f154ece0SStephen Hurd 		op = malloc(sizeof(struct cpu_offset), M_IFLIB,
4508*f154ece0SStephen Hurd 		    M_NOWAIT | M_ZERO);
4509*f154ece0SStephen Hurd 		if (op == NULL) {
4510*f154ece0SStephen Hurd 			device_printf(ctx->ifc_dev,
4511*f154ece0SStephen Hurd 			    "allocation for cpu offset failed.\n");
4512*f154ece0SStephen Hurd 		} else {
4513*f154ece0SStephen Hurd 			op->offset = qc;
4514*f154ece0SStephen Hurd 			op->refcount = 1;
4515*f154ece0SStephen Hurd 			CPU_COPY(&ctx->ifc_cpus, &op->set);
4516*f154ece0SStephen Hurd 			SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
4517*f154ece0SStephen Hurd 		}
4518*f154ece0SStephen Hurd 	}
4519*f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4520*f154ece0SStephen Hurd 
4521*f154ece0SStephen Hurd 	return (ret);
4522*f154ece0SStephen Hurd }
4523*f154ece0SStephen Hurd 
4524*f154ece0SStephen Hurd static void
4525*f154ece0SStephen Hurd unref_ctx_core_offset(if_ctx_t ctx)
4526*f154ece0SStephen Hurd {
4527*f154ece0SStephen Hurd 	struct cpu_offset *op, *top;
4528*f154ece0SStephen Hurd 
4529*f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4530*f154ece0SStephen Hurd 	SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
4531*f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4532*f154ece0SStephen Hurd 			MPASS(op->refcount > 0);
4533*f154ece0SStephen Hurd 			op->refcount--;
4534*f154ece0SStephen Hurd 			if (op->refcount == 0) {
4535*f154ece0SStephen Hurd 				SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
4536*f154ece0SStephen Hurd 				free(op, M_IFLIB);
4537*f154ece0SStephen Hurd 			}
4538*f154ece0SStephen Hurd 			break;
4539*f154ece0SStephen Hurd 		}
4540*f154ece0SStephen Hurd 	}
4541*f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4542*f154ece0SStephen Hurd }
4543*f154ece0SStephen Hurd 
454409f6ff4fSMatt Macy int
454509f6ff4fSMatt Macy iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
454609f6ff4fSMatt Macy {
454709f6ff4fSMatt Macy 	int err, rid, msix;
454809f6ff4fSMatt Macy 	if_ctx_t ctx;
454909f6ff4fSMatt Macy 	if_t ifp;
455009f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
455109f6ff4fSMatt Macy 	int i;
455209f6ff4fSMatt Macy 	uint16_t main_txq;
455309f6ff4fSMatt Macy 	uint16_t main_rxq;
455409f6ff4fSMatt Macy 
455509f6ff4fSMatt Macy 
455609f6ff4fSMatt Macy 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
455709f6ff4fSMatt Macy 
455809f6ff4fSMatt Macy 	if (sc == NULL) {
455909f6ff4fSMatt Macy 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
456009f6ff4fSMatt Macy 		device_set_softc(dev, ctx);
456109f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
456209f6ff4fSMatt Macy 	}
456309f6ff4fSMatt Macy 
456409f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
456509f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
456609f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
456709f6ff4fSMatt Macy 
456809f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
456909f6ff4fSMatt Macy 		device_printf(dev, "iflib_register failed %d\n", err);
45707f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
457109f6ff4fSMatt Macy 	}
457209f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
457309f6ff4fSMatt Macy 
457409f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
457509f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
457609f6ff4fSMatt Macy 
457709f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4578aa8a24d3SStephen Hurd 	CTX_LOCK(ctx);
4579ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
45804c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
45817f3eb9daSPatrick Kelsey 		goto fail_unlock;
45824c7070dbSScott Long 	}
45831248952aSSean Bruno 	_iflib_pre_assert(scctx);
45841248952aSSean Bruno 	ctx->ifc_txrx = *scctx->isc_txrx;
45851248952aSSean Bruno 
45861248952aSSean Bruno #ifdef INVARIANTS
45877f87c040SMarius Strobl 	MPASS(scctx->isc_capabilities);
45887f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
45891248952aSSean Bruno 		MPASS(scctx->isc_tx_csum_flags);
45901248952aSSean Bruno #endif
45911248952aSSean Bruno 
45927f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
459318a660b3SSean Bruno 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
45941248952aSSean Bruno 
45951248952aSSean Bruno 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
45961248952aSSean Bruno 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
45971248952aSSean Bruno 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
45981248952aSSean Bruno 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
459923ac9029SStephen Hurd 
460095246abbSSean Bruno 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
460195246abbSSean Bruno 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
460223ac9029SStephen Hurd 
460323ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
4604b97de13aSMarius Strobl 	device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
460523ac9029SStephen Hurd 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
460623ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
460723ac9029SStephen Hurd 		if (!powerof2(scctx->isc_nrxd[i])) {
460823ac9029SStephen Hurd 			/* round down instead? */
460923ac9029SStephen Hurd 			device_printf(dev, "# rx descriptors must be a power of 2\n");
461023ac9029SStephen Hurd 			err = EINVAL;
46117f3eb9daSPatrick Kelsey 			goto fail_iflib_detach;
461223ac9029SStephen Hurd 		}
461323ac9029SStephen Hurd 	}
461423ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
461523ac9029SStephen Hurd 		if (!powerof2(scctx->isc_ntxd[i])) {
461623ac9029SStephen Hurd 			device_printf(dev,
461723ac9029SStephen Hurd 			    "# tx descriptors must be a power of 2");
461823ac9029SStephen Hurd 			err = EINVAL;
46197f3eb9daSPatrick Kelsey 			goto fail_iflib_detach;
462023ac9029SStephen Hurd 		}
462123ac9029SStephen Hurd 	}
462223ac9029SStephen Hurd 
462323ac9029SStephen Hurd 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
462423ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
462523ac9029SStephen Hurd 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
462623ac9029SStephen Hurd 		    MAX_SINGLE_PACKET_FRACTION);
462723ac9029SStephen Hurd 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
462823ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
462923ac9029SStephen Hurd 		scctx->isc_tx_tso_segments_max = max(1,
463023ac9029SStephen Hurd 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
46314c7070dbSScott Long 
46324c7070dbSScott Long 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
46337f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
46347f87c040SMarius Strobl 		/*
46357f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
46367f87c040SMarius Strobl 		 * but some MACs do.
46377f87c040SMarius Strobl 		 */
46387f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
46397f87c040SMarius Strobl 		    IP_MAXPACKET));
46407f87c040SMarius Strobl 		/*
46417f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
46427f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
46437f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
46447f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
46457f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
46467f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
46477f87c040SMarius Strobl 		 */
46487f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
46497f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
46507f87c040SMarius Strobl 	}
46514c7070dbSScott Long 	if (scctx->isc_rss_table_size == 0)
46524c7070dbSScott Long 		scctx->isc_rss_table_size = 64;
465323ac9029SStephen Hurd 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4654da69b8f9SSean Bruno 
4655da69b8f9SSean Bruno 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4656da69b8f9SSean Bruno 	/* XXX format name */
4657f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4658f855ec81SMarius Strobl 	    NULL, NULL, "admin");
4659e516b535SStephen Hurd 
4660772593dbSStephen Hurd 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4661e516b535SStephen Hurd 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4662e516b535SStephen Hurd 		device_printf(dev, "Unable to fetch CPU list\n");
4663e516b535SStephen Hurd 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4664e516b535SStephen Hurd 	}
4665e516b535SStephen Hurd 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4666e516b535SStephen Hurd 
46674c7070dbSScott Long 	/*
4668b97de13aSMarius Strobl 	** Now set up MSI or MSI-X, should return us the number of supported
4669b97de13aSMarius Strobl 	** vectors (will be 1 for a legacy interrupt and MSI).
46704c7070dbSScott Long 	*/
46714c7070dbSScott Long 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
46724c7070dbSScott Long 		msix = scctx->isc_vectors;
46734c7070dbSScott Long 	} else if (scctx->isc_msix_bar != 0)
4674f7ae9a84SSean Bruno 	       /*
4675f7ae9a84SSean Bruno 		* The simple fact that isc_msix_bar is not 0 does not mean we
4676f7ae9a84SSean Bruno 		* we have a good value there that is known to work.
4677f7ae9a84SSean Bruno 		*/
46784c7070dbSScott Long 		msix = iflib_msix_init(ctx);
46794c7070dbSScott Long 	else {
46804c7070dbSScott Long 		scctx->isc_vectors = 1;
46814c7070dbSScott Long 		scctx->isc_ntxqsets = 1;
46824c7070dbSScott Long 		scctx->isc_nrxqsets = 1;
46834c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
46844c7070dbSScott Long 		msix = 0;
46854c7070dbSScott Long 	}
46864c7070dbSScott Long 	/* Get memory for the station queues */
46874c7070dbSScott Long 	if ((err = iflib_queues_alloc(ctx))) {
46884c7070dbSScott Long 		device_printf(dev, "Unable to allocate queue memory\n");
46897f3eb9daSPatrick Kelsey 		goto fail_intr_free;
46904c7070dbSScott Long 	}
46914c7070dbSScott Long 
4692ac88e6daSStephen Hurd 	if ((err = iflib_qset_structures_setup(ctx)))
46934c7070dbSScott Long 		goto fail_queues;
469469b7fc3eSSean Bruno 
4695bd84f700SSean Bruno 	/*
4696*f154ece0SStephen Hurd 	 * Now that we know how many queues there are, get the core offset.
4697*f154ece0SStephen Hurd 	 */
4698*f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
4699*f154ece0SStephen Hurd 
4700*f154ece0SStephen Hurd 	/*
4701bd84f700SSean Bruno 	 * Group taskqueues aren't properly set up until SMP is started,
4702bd84f700SSean Bruno 	 * so we disable interrupts until we can handle them post
4703bd84f700SSean Bruno 	 * SI_SUB_SMP.
4704bd84f700SSean Bruno 	 *
4705bd84f700SSean Bruno 	 * XXX: disabling interrupts doesn't actually work, at least for
4706bd84f700SSean Bruno 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4707bd84f700SSean Bruno 	 * we do null handling and depend on this not causing too large an
4708bd84f700SSean Bruno 	 * interrupt storm.
4709bd84f700SSean Bruno 	 */
47101248952aSSean Bruno 	IFDI_INTR_DISABLE(ctx);
47114c7070dbSScott Long 	if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
47124c7070dbSScott Long 		device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
47137f3eb9daSPatrick Kelsey 		goto fail_queues;
47144c7070dbSScott Long 	}
47154c7070dbSScott Long 	if (msix <= 1) {
47164c7070dbSScott Long 		rid = 0;
47174c7070dbSScott Long 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
47184c7070dbSScott Long 			MPASS(msix == 1);
47194c7070dbSScott Long 			rid = 1;
47204c7070dbSScott Long 		}
472123ac9029SStephen Hurd 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
47224c7070dbSScott Long 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
47237f3eb9daSPatrick Kelsey 			goto fail_queues;
47244c7070dbSScott Long 		}
47254c7070dbSScott Long 	}
47267f87c040SMarius Strobl 
47271fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
47287f87c040SMarius Strobl 
4729ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
47304c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
47314c7070dbSScott Long 		goto fail_detach;
47324c7070dbSScott Long 	}
47337f87c040SMarius Strobl 
47347f87c040SMarius Strobl 	/*
47357f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
47367f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
47377f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
47387f87c040SMarius Strobl 	 */
47397f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
47407f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
47417f87c040SMarius Strobl 
47424c7070dbSScott Long 	if ((err = iflib_netmap_attach(ctx))) {
47434c7070dbSScott Long 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
47444c7070dbSScott Long 		goto fail_detach;
47454c7070dbSScott Long 	}
47464c7070dbSScott Long 	*ctxp = ctx;
47474c7070dbSScott Long 
474894618825SMark Johnston 	NETDUMP_SET(ctx->ifc_ifp, iflib);
474994618825SMark Johnston 
475023ac9029SStephen Hurd 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
47514c7070dbSScott Long 	iflib_add_device_sysctl_post(ctx);
47526d49b41eSAndrew Gallatin 	iflib_add_pfil(ctx);
47534ecb427aSSean Bruno 	ctx->ifc_flags |= IFC_INIT_DONE;
4754aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
47554c7070dbSScott Long 	return (0);
475677c1fcecSEric Joyner 
47574c7070dbSScott Long fail_detach:
47584c7070dbSScott Long 	ether_ifdetach(ctx->ifc_ifp);
47594c7070dbSScott Long fail_intr_free:
47607f3eb9daSPatrick Kelsey 	iflib_free_intr_mem(ctx);
47614c7070dbSScott Long fail_queues:
47626108c013SStephen Hurd 	iflib_tx_structures_free(ctx);
47636108c013SStephen Hurd 	iflib_rx_structures_free(ctx);
47647f3eb9daSPatrick Kelsey fail_iflib_detach:
47654c7070dbSScott Long 	IFDI_DETACH(ctx);
47667f3eb9daSPatrick Kelsey fail_unlock:
4767aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
47687f3eb9daSPatrick Kelsey fail_ctx_free:
47697f3eb9daSPatrick Kelsey         if (ctx->ifc_flags & IFC_SC_ALLOCATED)
47707f3eb9daSPatrick Kelsey                 free(ctx->ifc_softc, M_IFLIB);
47717f3eb9daSPatrick Kelsey         free(ctx, M_IFLIB);
47724c7070dbSScott Long 	return (err);
47734c7070dbSScott Long }
47744c7070dbSScott Long 
47754c7070dbSScott Long int
477609f6ff4fSMatt Macy iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
477709f6ff4fSMatt Macy 					  struct iflib_cloneattach_ctx *clctx)
477809f6ff4fSMatt Macy {
477909f6ff4fSMatt Macy 	int err;
478009f6ff4fSMatt Macy 	if_ctx_t ctx;
478109f6ff4fSMatt Macy 	if_t ifp;
478209f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
478309f6ff4fSMatt Macy 	int i;
478409f6ff4fSMatt Macy 	void *sc;
478509f6ff4fSMatt Macy 	uint16_t main_txq;
478609f6ff4fSMatt Macy 	uint16_t main_rxq;
478709f6ff4fSMatt Macy 
478809f6ff4fSMatt Macy 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
478909f6ff4fSMatt Macy 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
479009f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
479109f6ff4fSMatt Macy 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
479209f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_PSEUDO;
479309f6ff4fSMatt Macy 
479409f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
479509f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
479609f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
479709f6ff4fSMatt Macy 
479809f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
479909f6ff4fSMatt Macy 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
48007f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
480109f6ff4fSMatt Macy 	}
480209f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
480309f6ff4fSMatt Macy 
480409f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
480509f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
480609f6ff4fSMatt Macy 
480709f6ff4fSMatt Macy 	/*
480809f6ff4fSMatt Macy 	 * XXX sanity check that ntxd & nrxd are a power of 2
480909f6ff4fSMatt Macy 	 */
481009f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4811aac9c817SEric Joyner 	CTX_LOCK(ctx);
481209f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
481309f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4814aac9c817SEric Joyner 		goto fail_unlock;
481509f6ff4fSMatt Macy 	}
481609f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_GEN_MAC)
48171fd8c72cSKyle Evans 		ether_gen_addr(ifp, &ctx->ifc_mac);
481809f6ff4fSMatt Macy 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
481909f6ff4fSMatt Macy 								clctx->cc_params)) != 0) {
482009f6ff4fSMatt Macy 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
48217f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
482209f6ff4fSMatt Macy 	}
482309f6ff4fSMatt Macy 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
482409f6ff4fSMatt Macy 	ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
482509f6ff4fSMatt Macy 	ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
482609f6ff4fSMatt Macy 
482709f6ff4fSMatt Macy #ifdef INVARIANTS
48287f87c040SMarius Strobl 	MPASS(scctx->isc_capabilities);
48297f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
483009f6ff4fSMatt Macy 		MPASS(scctx->isc_tx_csum_flags);
483109f6ff4fSMatt Macy #endif
483209f6ff4fSMatt Macy 
48337f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
483409f6ff4fSMatt Macy 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
483509f6ff4fSMatt Macy 
483609f6ff4fSMatt Macy 	ifp->if_flags |= IFF_NOGROUP;
483709f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_PSEUDO) {
48381fd8c72cSKyle Evans 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
483909f6ff4fSMatt Macy 
484009f6ff4fSMatt Macy 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
484109f6ff4fSMatt Macy 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
484209f6ff4fSMatt Macy 			goto fail_detach;
484309f6ff4fSMatt Macy 		}
484409f6ff4fSMatt Macy 		*ctxp = ctx;
484509f6ff4fSMatt Macy 
48467f87c040SMarius Strobl 		/*
48477f87c040SMarius Strobl 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
48487f87c040SMarius Strobl 		 * This must appear after the call to ether_ifattach() because
48497f87c040SMarius Strobl 		 * ether_ifattach() sets if_hdrlen to the default value.
48507f87c040SMarius Strobl 		 */
48517f87c040SMarius Strobl 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
48527f87c040SMarius Strobl 			if_setifheaderlen(ifp,
48537f87c040SMarius Strobl 			    sizeof(struct ether_vlan_header));
48547f87c040SMarius Strobl 
485509f6ff4fSMatt Macy 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
485609f6ff4fSMatt Macy 		iflib_add_device_sysctl_post(ctx);
485709f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_INIT_DONE;
485809f6ff4fSMatt Macy 		return (0);
485909f6ff4fSMatt Macy 	}
486009f6ff4fSMatt Macy 	_iflib_pre_assert(scctx);
486109f6ff4fSMatt Macy 	ctx->ifc_txrx = *scctx->isc_txrx;
486209f6ff4fSMatt Macy 
486309f6ff4fSMatt Macy 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
486409f6ff4fSMatt Macy 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
486509f6ff4fSMatt Macy 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
486609f6ff4fSMatt Macy 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
486709f6ff4fSMatt Macy 
486809f6ff4fSMatt Macy 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
486909f6ff4fSMatt Macy 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
487009f6ff4fSMatt Macy 
487109f6ff4fSMatt Macy 	/* XXX change for per-queue sizes */
4872b97de13aSMarius Strobl 	device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
487309f6ff4fSMatt Macy 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
487409f6ff4fSMatt Macy 	for (i = 0; i < sctx->isc_nrxqs; i++) {
487509f6ff4fSMatt Macy 		if (!powerof2(scctx->isc_nrxd[i])) {
487609f6ff4fSMatt Macy 			/* round down instead? */
487709f6ff4fSMatt Macy 			device_printf(dev, "# rx descriptors must be a power of 2\n");
487809f6ff4fSMatt Macy 			err = EINVAL;
48797f3eb9daSPatrick Kelsey 			goto fail_iflib_detach;
488009f6ff4fSMatt Macy 		}
488109f6ff4fSMatt Macy 	}
488209f6ff4fSMatt Macy 	for (i = 0; i < sctx->isc_ntxqs; i++) {
488309f6ff4fSMatt Macy 		if (!powerof2(scctx->isc_ntxd[i])) {
488409f6ff4fSMatt Macy 			device_printf(dev,
488509f6ff4fSMatt Macy 			    "# tx descriptors must be a power of 2");
488609f6ff4fSMatt Macy 			err = EINVAL;
48877f3eb9daSPatrick Kelsey 			goto fail_iflib_detach;
488809f6ff4fSMatt Macy 		}
488909f6ff4fSMatt Macy 	}
489009f6ff4fSMatt Macy 
489109f6ff4fSMatt Macy 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
489209f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
489309f6ff4fSMatt Macy 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
489409f6ff4fSMatt Macy 		    MAX_SINGLE_PACKET_FRACTION);
489509f6ff4fSMatt Macy 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
489609f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
489709f6ff4fSMatt Macy 		scctx->isc_tx_tso_segments_max = max(1,
489809f6ff4fSMatt Macy 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
489909f6ff4fSMatt Macy 
490009f6ff4fSMatt Macy 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
49017f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
49027f87c040SMarius Strobl 		/*
49037f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
49047f87c040SMarius Strobl 		 * but some MACs do.
49057f87c040SMarius Strobl 		 */
49067f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
49077f87c040SMarius Strobl 		    IP_MAXPACKET));
49087f87c040SMarius Strobl 		/*
49097f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
49107f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
49117f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
49127f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
49137f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
49147f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
49157f87c040SMarius Strobl 		 */
49167f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
49177f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
49187f87c040SMarius Strobl 	}
491909f6ff4fSMatt Macy 	if (scctx->isc_rss_table_size == 0)
492009f6ff4fSMatt Macy 		scctx->isc_rss_table_size = 64;
492109f6ff4fSMatt Macy 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
492209f6ff4fSMatt Macy 
492309f6ff4fSMatt Macy 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
492409f6ff4fSMatt Macy 	/* XXX format name */
4925f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4926f855ec81SMarius Strobl 	    NULL, NULL, "admin");
492709f6ff4fSMatt Macy 
492809f6ff4fSMatt Macy 	/* XXX --- can support > 1 -- but keep it simple for now */
492909f6ff4fSMatt Macy 	scctx->isc_intr = IFLIB_INTR_LEGACY;
493009f6ff4fSMatt Macy 
493109f6ff4fSMatt Macy 	/* Get memory for the station queues */
493209f6ff4fSMatt Macy 	if ((err = iflib_queues_alloc(ctx))) {
493309f6ff4fSMatt Macy 		device_printf(dev, "Unable to allocate queue memory\n");
49347f3eb9daSPatrick Kelsey 		goto fail_iflib_detach;
493509f6ff4fSMatt Macy 	}
493609f6ff4fSMatt Macy 
493709f6ff4fSMatt Macy 	if ((err = iflib_qset_structures_setup(ctx))) {
493809f6ff4fSMatt Macy 		device_printf(dev, "qset structure setup failed %d\n", err);
493909f6ff4fSMatt Macy 		goto fail_queues;
494009f6ff4fSMatt Macy 	}
49417f87c040SMarius Strobl 
494209f6ff4fSMatt Macy 	/*
494309f6ff4fSMatt Macy 	 * XXX What if anything do we want to do about interrupts?
494409f6ff4fSMatt Macy 	 */
49451fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
494609f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
494709f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
494809f6ff4fSMatt Macy 		goto fail_detach;
494909f6ff4fSMatt Macy 	}
49507f87c040SMarius Strobl 
49517f87c040SMarius Strobl 	/*
49527f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
49537f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
49547f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
49557f87c040SMarius Strobl 	 */
49567f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
49577f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
49587f87c040SMarius Strobl 
495909f6ff4fSMatt Macy 	/* XXX handle more than one queue */
496009f6ff4fSMatt Macy 	for (i = 0; i < scctx->isc_nrxqsets; i++)
496109f6ff4fSMatt Macy 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
496209f6ff4fSMatt Macy 
496309f6ff4fSMatt Macy 	*ctxp = ctx;
496409f6ff4fSMatt Macy 
496509f6ff4fSMatt Macy 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
496609f6ff4fSMatt Macy 	iflib_add_device_sysctl_post(ctx);
496709f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_INIT_DONE;
4968aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
496909f6ff4fSMatt Macy 	return (0);
497009f6ff4fSMatt Macy fail_detach:
497109f6ff4fSMatt Macy 	ether_ifdetach(ctx->ifc_ifp);
497209f6ff4fSMatt Macy fail_queues:
497309f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
497409f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
49757f3eb9daSPatrick Kelsey fail_iflib_detach:
497609f6ff4fSMatt Macy 	IFDI_DETACH(ctx);
4977aac9c817SEric Joyner fail_unlock:
4978aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
49797f3eb9daSPatrick Kelsey fail_ctx_free:
49807f3eb9daSPatrick Kelsey 	free(ctx->ifc_softc, M_IFLIB);
49817f3eb9daSPatrick Kelsey 	free(ctx, M_IFLIB);
498209f6ff4fSMatt Macy 	return (err);
498309f6ff4fSMatt Macy }
498409f6ff4fSMatt Macy 
498509f6ff4fSMatt Macy int
498609f6ff4fSMatt Macy iflib_pseudo_deregister(if_ctx_t ctx)
498709f6ff4fSMatt Macy {
498809f6ff4fSMatt Macy 	if_t ifp = ctx->ifc_ifp;
498909f6ff4fSMatt Macy 	iflib_txq_t txq;
499009f6ff4fSMatt Macy 	iflib_rxq_t rxq;
499109f6ff4fSMatt Macy 	int i, j;
499209f6ff4fSMatt Macy 	struct taskqgroup *tqg;
499309f6ff4fSMatt Macy 	iflib_fl_t fl;
499409f6ff4fSMatt Macy 
499509f6ff4fSMatt Macy 	/* Unregister VLAN events */
499609f6ff4fSMatt Macy 	if (ctx->ifc_vlan_attach_event != NULL)
499709f6ff4fSMatt Macy 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
499809f6ff4fSMatt Macy 	if (ctx->ifc_vlan_detach_event != NULL)
499909f6ff4fSMatt Macy 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
500009f6ff4fSMatt Macy 
500109f6ff4fSMatt Macy 	ether_ifdetach(ifp);
500209f6ff4fSMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
500309f6ff4fSMatt Macy 	CTX_LOCK_DESTROY(ctx);
500409f6ff4fSMatt Macy 	/* XXX drain any dependent tasks */
500509f6ff4fSMatt Macy 	tqg = qgroup_if_io_tqg;
500609f6ff4fSMatt Macy 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
500709f6ff4fSMatt Macy 		callout_drain(&txq->ift_timer);
500809f6ff4fSMatt Macy 		if (txq->ift_task.gt_uniq != NULL)
500909f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &txq->ift_task);
501009f6ff4fSMatt Macy 	}
501109f6ff4fSMatt Macy 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
501209f6ff4fSMatt Macy 		if (rxq->ifr_task.gt_uniq != NULL)
501309f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &rxq->ifr_task);
501409f6ff4fSMatt Macy 
501509f6ff4fSMatt Macy 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
501609f6ff4fSMatt Macy 			free(fl->ifl_rx_bitmap, M_IFLIB);
501709f6ff4fSMatt Macy 	}
501809f6ff4fSMatt Macy 	tqg = qgroup_if_config_tqg;
501909f6ff4fSMatt Macy 	if (ctx->ifc_admin_task.gt_uniq != NULL)
502009f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
502109f6ff4fSMatt Macy 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
502209f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
502309f6ff4fSMatt Macy 
502409f6ff4fSMatt Macy 	if_free(ifp);
502509f6ff4fSMatt Macy 
502609f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
502709f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
502809f6ff4fSMatt Macy 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
502909f6ff4fSMatt Macy 		free(ctx->ifc_softc, M_IFLIB);
503009f6ff4fSMatt Macy 	free(ctx, M_IFLIB);
503109f6ff4fSMatt Macy 	return (0);
503209f6ff4fSMatt Macy }
503309f6ff4fSMatt Macy 
503409f6ff4fSMatt Macy int
50354c7070dbSScott Long iflib_device_attach(device_t dev)
50364c7070dbSScott Long {
50374c7070dbSScott Long 	if_ctx_t ctx;
50384c7070dbSScott Long 	if_shared_ctx_t sctx;
50394c7070dbSScott Long 
50404c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
50414c7070dbSScott Long 		return (ENOTSUP);
50424c7070dbSScott Long 
50434c7070dbSScott Long 	pci_enable_busmaster(dev);
50444c7070dbSScott Long 
50454c7070dbSScott Long 	return (iflib_device_register(dev, NULL, sctx, &ctx));
50464c7070dbSScott Long }
50474c7070dbSScott Long 
50484c7070dbSScott Long int
50494c7070dbSScott Long iflib_device_deregister(if_ctx_t ctx)
50504c7070dbSScott Long {
50514c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
50524c7070dbSScott Long 	iflib_txq_t txq;
50534c7070dbSScott Long 	iflib_rxq_t rxq;
50544c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
505587890dbaSSean Bruno 	int i, j;
50564c7070dbSScott Long 	struct taskqgroup *tqg;
505787890dbaSSean Bruno 	iflib_fl_t fl;
50584c7070dbSScott Long 
50594c7070dbSScott Long 	/* Make sure VLANS are not using driver */
50604c7070dbSScott Long 	if (if_vlantrunkinuse(ifp)) {
50614c7070dbSScott Long 		device_printf(dev, "Vlan in use, detach first\n");
50624c7070dbSScott Long 		return (EBUSY);
50634c7070dbSScott Long 	}
506477c1fcecSEric Joyner #ifdef PCI_IOV
506577c1fcecSEric Joyner 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
506677c1fcecSEric Joyner 		device_printf(dev, "SR-IOV in use; detach first.\n");
506777c1fcecSEric Joyner 		return (EBUSY);
506877c1fcecSEric Joyner 	}
506977c1fcecSEric Joyner #endif
507077c1fcecSEric Joyner 
507177c1fcecSEric Joyner 	STATE_LOCK(ctx);
507277c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
507377c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
50744c7070dbSScott Long 
50754c7070dbSScott Long 	CTX_LOCK(ctx);
50764c7070dbSScott Long 	iflib_stop(ctx);
50774c7070dbSScott Long 	CTX_UNLOCK(ctx);
50784c7070dbSScott Long 
50794c7070dbSScott Long 	/* Unregister VLAN events */
50804c7070dbSScott Long 	if (ctx->ifc_vlan_attach_event != NULL)
50814c7070dbSScott Long 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
50824c7070dbSScott Long 	if (ctx->ifc_vlan_detach_event != NULL)
50834c7070dbSScott Long 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
50844c7070dbSScott Long 
50854c7070dbSScott Long 	iflib_netmap_detach(ifp);
50864c7070dbSScott Long 	ether_ifdetach(ifp);
50876d49b41eSAndrew Gallatin 	iflib_rem_pfil(ctx);
50884c7070dbSScott Long 	if (ctx->ifc_led_dev != NULL)
50894c7070dbSScott Long 		led_destroy(ctx->ifc_led_dev);
50904c7070dbSScott Long 	/* XXX drain any dependent tasks */
5091ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
509223ac9029SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
50934c7070dbSScott Long 		callout_drain(&txq->ift_timer);
50944c7070dbSScott Long 		if (txq->ift_task.gt_uniq != NULL)
50954c7070dbSScott Long 			taskqgroup_detach(tqg, &txq->ift_task);
50964c7070dbSScott Long 	}
50974c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
50984c7070dbSScott Long 		if (rxq->ifr_task.gt_uniq != NULL)
50994c7070dbSScott Long 			taskqgroup_detach(tqg, &rxq->ifr_task);
510087890dbaSSean Bruno 
510187890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
510287890dbaSSean Bruno 			free(fl->ifl_rx_bitmap, M_IFLIB);
51034c7070dbSScott Long 	}
5104ab2e3f79SStephen Hurd 	tqg = qgroup_if_config_tqg;
51054c7070dbSScott Long 	if (ctx->ifc_admin_task.gt_uniq != NULL)
51064c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
51074c7070dbSScott Long 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
51084c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
51096c3c3194SMatt Macy 	CTX_LOCK(ctx);
51104c7070dbSScott Long 	IFDI_DETACH(ctx);
51116c3c3194SMatt Macy 	CTX_UNLOCK(ctx);
51126c3c3194SMatt Macy 
51136c3c3194SMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
51146c3c3194SMatt Macy 	CTX_LOCK_DESTROY(ctx);
511523ac9029SStephen Hurd 	device_set_softc(ctx->ifc_dev, NULL);
511677c1fcecSEric Joyner 	iflib_free_intr_mem(ctx);
511777c1fcecSEric Joyner 
511877c1fcecSEric Joyner 	bus_generic_detach(dev);
511977c1fcecSEric Joyner 	if_free(ifp);
512077c1fcecSEric Joyner 
512177c1fcecSEric Joyner 	iflib_tx_structures_free(ctx);
512277c1fcecSEric Joyner 	iflib_rx_structures_free(ctx);
512377c1fcecSEric Joyner 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
512477c1fcecSEric Joyner 		free(ctx->ifc_softc, M_IFLIB);
5125*f154ece0SStephen Hurd 	unref_ctx_core_offset(ctx);
512677c1fcecSEric Joyner 	STATE_LOCK_DESTROY(ctx);
512777c1fcecSEric Joyner 	free(ctx, M_IFLIB);
512877c1fcecSEric Joyner 	return (0);
512977c1fcecSEric Joyner }
513077c1fcecSEric Joyner 
513177c1fcecSEric Joyner static void
513277c1fcecSEric Joyner iflib_free_intr_mem(if_ctx_t ctx)
513377c1fcecSEric Joyner {
513477c1fcecSEric Joyner 
51354c7070dbSScott Long 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
51364c7070dbSScott Long 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
51374c7070dbSScott Long 	}
5138b97de13aSMarius Strobl 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5139b97de13aSMarius Strobl 		pci_release_msi(ctx->ifc_dev);
5140b97de13aSMarius Strobl 	}
51414c7070dbSScott Long 	if (ctx->ifc_msix_mem != NULL) {
51424c7070dbSScott Long 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5143b97de13aSMarius Strobl 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
51444c7070dbSScott Long 		ctx->ifc_msix_mem = NULL;
51454c7070dbSScott Long 	}
51464c7070dbSScott Long }
51474c7070dbSScott Long 
51484c7070dbSScott Long int
51494c7070dbSScott Long iflib_device_detach(device_t dev)
51504c7070dbSScott Long {
51514c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
51524c7070dbSScott Long 
51534c7070dbSScott Long 	return (iflib_device_deregister(ctx));
51544c7070dbSScott Long }
51554c7070dbSScott Long 
51564c7070dbSScott Long int
51574c7070dbSScott Long iflib_device_suspend(device_t dev)
51584c7070dbSScott Long {
51594c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
51604c7070dbSScott Long 
51614c7070dbSScott Long 	CTX_LOCK(ctx);
51624c7070dbSScott Long 	IFDI_SUSPEND(ctx);
51634c7070dbSScott Long 	CTX_UNLOCK(ctx);
51644c7070dbSScott Long 
51654c7070dbSScott Long 	return bus_generic_suspend(dev);
51664c7070dbSScott Long }
51674c7070dbSScott Long int
51684c7070dbSScott Long iflib_device_shutdown(device_t dev)
51694c7070dbSScott Long {
51704c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
51714c7070dbSScott Long 
51724c7070dbSScott Long 	CTX_LOCK(ctx);
51734c7070dbSScott Long 	IFDI_SHUTDOWN(ctx);
51744c7070dbSScott Long 	CTX_UNLOCK(ctx);
51754c7070dbSScott Long 
51764c7070dbSScott Long 	return bus_generic_suspend(dev);
51774c7070dbSScott Long }
51784c7070dbSScott Long 
51794c7070dbSScott Long 
51804c7070dbSScott Long int
51814c7070dbSScott Long iflib_device_resume(device_t dev)
51824c7070dbSScott Long {
51834c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
51844c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
51854c7070dbSScott Long 
51864c7070dbSScott Long 	CTX_LOCK(ctx);
51874c7070dbSScott Long 	IFDI_RESUME(ctx);
5188cd28ea92SStephen Hurd 	iflib_if_init_locked(ctx);
51894c7070dbSScott Long 	CTX_UNLOCK(ctx);
51904c7070dbSScott Long 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
51914c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
51924c7070dbSScott Long 
51934c7070dbSScott Long 	return (bus_generic_resume(dev));
51944c7070dbSScott Long }
51954c7070dbSScott Long 
51964c7070dbSScott Long int
51974c7070dbSScott Long iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
51984c7070dbSScott Long {
51994c7070dbSScott Long 	int error;
52004c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52014c7070dbSScott Long 
52024c7070dbSScott Long 	CTX_LOCK(ctx);
52034c7070dbSScott Long 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
52044c7070dbSScott Long 	CTX_UNLOCK(ctx);
52054c7070dbSScott Long 
52064c7070dbSScott Long 	return (error);
52074c7070dbSScott Long }
52084c7070dbSScott Long 
52094c7070dbSScott Long void
52104c7070dbSScott Long iflib_device_iov_uninit(device_t dev)
52114c7070dbSScott Long {
52124c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52134c7070dbSScott Long 
52144c7070dbSScott Long 	CTX_LOCK(ctx);
52154c7070dbSScott Long 	IFDI_IOV_UNINIT(ctx);
52164c7070dbSScott Long 	CTX_UNLOCK(ctx);
52174c7070dbSScott Long }
52184c7070dbSScott Long 
52194c7070dbSScott Long int
52204c7070dbSScott Long iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
52214c7070dbSScott Long {
52224c7070dbSScott Long 	int error;
52234c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52244c7070dbSScott Long 
52254c7070dbSScott Long 	CTX_LOCK(ctx);
52264c7070dbSScott Long 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
52274c7070dbSScott Long 	CTX_UNLOCK(ctx);
52284c7070dbSScott Long 
52294c7070dbSScott Long 	return (error);
52304c7070dbSScott Long }
52314c7070dbSScott Long 
52324c7070dbSScott Long /*********************************************************************
52334c7070dbSScott Long  *
52344c7070dbSScott Long  *  MODULE FUNCTION DEFINITIONS
52354c7070dbSScott Long  *
52364c7070dbSScott Long  **********************************************************************/
52374c7070dbSScott Long 
5238ab2e3f79SStephen Hurd /*
5239ab2e3f79SStephen Hurd  * - Start a fast taskqueue thread for each core
5240ab2e3f79SStephen Hurd  * - Start a taskqueue for control operations
5241ab2e3f79SStephen Hurd  */
52424c7070dbSScott Long static int
52434c7070dbSScott Long iflib_module_init(void)
52444c7070dbSScott Long {
52454c7070dbSScott Long 	return (0);
52464c7070dbSScott Long }
52474c7070dbSScott Long 
52484c7070dbSScott Long static int
52494c7070dbSScott Long iflib_module_event_handler(module_t mod, int what, void *arg)
52504c7070dbSScott Long {
52514c7070dbSScott Long 	int err;
52524c7070dbSScott Long 
52534c7070dbSScott Long 	switch (what) {
52544c7070dbSScott Long 	case MOD_LOAD:
52554c7070dbSScott Long 		if ((err = iflib_module_init()) != 0)
52564c7070dbSScott Long 			return (err);
52574c7070dbSScott Long 		break;
52584c7070dbSScott Long 	case MOD_UNLOAD:
52594c7070dbSScott Long 		return (EBUSY);
52604c7070dbSScott Long 	default:
52614c7070dbSScott Long 		return (EOPNOTSUPP);
52624c7070dbSScott Long 	}
52634c7070dbSScott Long 
52644c7070dbSScott Long 	return (0);
52654c7070dbSScott Long }
52664c7070dbSScott Long 
52674c7070dbSScott Long /*********************************************************************
52684c7070dbSScott Long  *
52694c7070dbSScott Long  *  PUBLIC FUNCTION DEFINITIONS
52704c7070dbSScott Long  *     ordered as in iflib.h
52714c7070dbSScott Long  *
52724c7070dbSScott Long  **********************************************************************/
52734c7070dbSScott Long 
52744c7070dbSScott Long 
52754c7070dbSScott Long static void
52764c7070dbSScott Long _iflib_assert(if_shared_ctx_t sctx)
52774c7070dbSScott Long {
52784c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsize);
52794c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsegsize);
52804c7070dbSScott Long 
52814c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsize);
52824c7070dbSScott Long 	MPASS(sctx->isc_rx_nsegments);
52834c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsegsize);
52844c7070dbSScott Long 
528523ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_min[0]);
528623ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_max[0]);
528723ac9029SStephen Hurd 	MPASS(sctx->isc_nrxd_default[0]);
528823ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_min[0]);
528923ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_max[0]);
529023ac9029SStephen Hurd 	MPASS(sctx->isc_ntxd_default[0]);
52914c7070dbSScott Long }
52924c7070dbSScott Long 
52931248952aSSean Bruno static void
52941248952aSSean Bruno _iflib_pre_assert(if_softc_ctx_t scctx)
52951248952aSSean Bruno {
52961248952aSSean Bruno 
52971248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_encap);
52981248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_flush);
52991248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
53001248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_available);
53011248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
53021248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_refill);
53031248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_flush);
53041248952aSSean Bruno }
53052fe66646SSean Bruno 
53064c7070dbSScott Long static int
53074c7070dbSScott Long iflib_register(if_ctx_t ctx)
53084c7070dbSScott Long {
53094c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
53104c7070dbSScott Long 	driver_t *driver = sctx->isc_driver;
53114c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
53124c7070dbSScott Long 	if_t ifp;
53134c7070dbSScott Long 
53144c7070dbSScott Long 	_iflib_assert(sctx);
53154c7070dbSScott Long 
5316aa8a24d3SStephen Hurd 	CTX_LOCK_INIT(ctx);
53177b610b60SSean Bruno 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
531877c1fcecSEric Joyner 	ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
53194c7070dbSScott Long 	if (ifp == NULL) {
53204c7070dbSScott Long 		device_printf(dev, "can not allocate ifnet structure\n");
53214c7070dbSScott Long 		return (ENOMEM);
53224c7070dbSScott Long 	}
53234c7070dbSScott Long 
53244c7070dbSScott Long 	/*
53254c7070dbSScott Long 	 * Initialize our context's device specific methods
53264c7070dbSScott Long 	 */
53274c7070dbSScott Long 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
53284c7070dbSScott Long 	kobj_class_compile((kobj_class_t) driver);
53294c7070dbSScott Long 	driver->refs++;
53304c7070dbSScott Long 
53314c7070dbSScott Long 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
53324c7070dbSScott Long 	if_setsoftc(ifp, ctx);
53334c7070dbSScott Long 	if_setdev(ifp, dev);
53344c7070dbSScott Long 	if_setinitfn(ifp, iflib_if_init);
53354c7070dbSScott Long 	if_setioctlfn(ifp, iflib_if_ioctl);
5336b8ca4756SPatrick Kelsey #ifdef ALTQ
5337b8ca4756SPatrick Kelsey 	if_setstartfn(ifp, iflib_altq_if_start);
5338b8ca4756SPatrick Kelsey 	if_settransmitfn(ifp, iflib_altq_if_transmit);
53398f410865SPatrick Kelsey 	if_setsendqready(ifp);
5340b8ca4756SPatrick Kelsey #else
53414c7070dbSScott Long 	if_settransmitfn(ifp, iflib_if_transmit);
5342b8ca4756SPatrick Kelsey #endif
53434c7070dbSScott Long 	if_setqflushfn(ifp, iflib_if_qflush);
53444c7070dbSScott Long 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
53454c7070dbSScott Long 
53464c7070dbSScott Long 	ctx->ifc_vlan_attach_event =
53474c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
53484c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
53494c7070dbSScott Long 	ctx->ifc_vlan_detach_event =
53504c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
53514c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
53524c7070dbSScott Long 
53534c7070dbSScott Long 	ifmedia_init(&ctx->ifc_media, IFM_IMASK,
53544c7070dbSScott Long 					 iflib_media_change, iflib_media_status);
53554c7070dbSScott Long 
53564c7070dbSScott Long 	return (0);
53574c7070dbSScott Long }
53584c7070dbSScott Long 
53594c7070dbSScott Long 
53604c7070dbSScott Long static int
53614c7070dbSScott Long iflib_queues_alloc(if_ctx_t ctx)
53624c7070dbSScott Long {
53634c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
536423ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
53654c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
536623ac9029SStephen Hurd 	int nrxqsets = scctx->isc_nrxqsets;
536723ac9029SStephen Hurd 	int ntxqsets = scctx->isc_ntxqsets;
53684c7070dbSScott Long 	iflib_txq_t txq;
53694c7070dbSScott Long 	iflib_rxq_t rxq;
53704c7070dbSScott Long 	iflib_fl_t fl = NULL;
537123ac9029SStephen Hurd 	int i, j, cpu, err, txconf, rxconf;
53724c7070dbSScott Long 	iflib_dma_info_t ifdip;
537323ac9029SStephen Hurd 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
537423ac9029SStephen Hurd 	uint32_t *txqsizes = scctx->isc_txqsizes;
53754c7070dbSScott Long 	uint8_t nrxqs = sctx->isc_nrxqs;
53764c7070dbSScott Long 	uint8_t ntxqs = sctx->isc_ntxqs;
53774c7070dbSScott Long 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
53784c7070dbSScott Long 	caddr_t *vaddrs;
53794c7070dbSScott Long 	uint64_t *paddrs;
53804c7070dbSScott Long 
538123ac9029SStephen Hurd 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
538223ac9029SStephen Hurd 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
53834c7070dbSScott Long 
53844c7070dbSScott Long 	/* Allocate the TX ring struct memory */
5385b89827a0SStephen Hurd 	if (!(ctx->ifc_txqs =
5386ac2fffa4SPedro F. Giffuni 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5387ac2fffa4SPedro F. Giffuni 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
53884c7070dbSScott Long 		device_printf(dev, "Unable to allocate TX ring memory\n");
53894c7070dbSScott Long 		err = ENOMEM;
53904c7070dbSScott Long 		goto fail;
53914c7070dbSScott Long 	}
53924c7070dbSScott Long 
53934c7070dbSScott Long 	/* Now allocate the RX */
5394b89827a0SStephen Hurd 	if (!(ctx->ifc_rxqs =
5395ac2fffa4SPedro F. Giffuni 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5396ac2fffa4SPedro F. Giffuni 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
53974c7070dbSScott Long 		device_printf(dev, "Unable to allocate RX ring memory\n");
53984c7070dbSScott Long 		err = ENOMEM;
53994c7070dbSScott Long 		goto rx_fail;
54004c7070dbSScott Long 	}
54014c7070dbSScott Long 
5402b89827a0SStephen Hurd 	txq = ctx->ifc_txqs;
5403b89827a0SStephen Hurd 	rxq = ctx->ifc_rxqs;
54044c7070dbSScott Long 
54054c7070dbSScott Long 	/*
54064c7070dbSScott Long 	 * XXX handle allocation failure
54074c7070dbSScott Long 	 */
540896c85efbSNathan Whitehorn 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
54094c7070dbSScott Long 		/* Set up some basics */
54104c7070dbSScott Long 
5411bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5412bfce461eSMarius Strobl 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5413bfce461eSMarius Strobl 			device_printf(dev,
5414bfce461eSMarius Strobl 			    "Unable to allocate TX DMA info memory\n");
54154c7070dbSScott Long 			err = ENOMEM;
54160d0338afSConrad Meyer 			goto err_tx_desc;
54174c7070dbSScott Long 		}
54184c7070dbSScott Long 		txq->ift_ifdi = ifdip;
54194c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, ifdip++) {
5420bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5421bfce461eSMarius Strobl 				device_printf(dev,
5422bfce461eSMarius Strobl 				    "Unable to allocate TX descriptors\n");
54234c7070dbSScott Long 				err = ENOMEM;
54244c7070dbSScott Long 				goto err_tx_desc;
54254c7070dbSScott Long 			}
542695246abbSSean Bruno 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
54274c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
54284c7070dbSScott Long 		}
54294c7070dbSScott Long 		txq->ift_ctx = ctx;
54304c7070dbSScott Long 		txq->ift_id = i;
543123ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
543223ac9029SStephen Hurd 			txq->ift_br_offset = 1;
543323ac9029SStephen Hurd 		} else {
543423ac9029SStephen Hurd 			txq->ift_br_offset = 0;
543523ac9029SStephen Hurd 		}
54364c7070dbSScott Long 		/* XXX fix this */
543796c85efbSNathan Whitehorn 		txq->ift_timer.c_cpu = cpu;
54384c7070dbSScott Long 
54394c7070dbSScott Long 		if (iflib_txsd_alloc(txq)) {
54404c7070dbSScott Long 			device_printf(dev, "Critical Failure setting up TX buffers\n");
54414c7070dbSScott Long 			err = ENOMEM;
54424c7070dbSScott Long 			goto err_tx_desc;
54434c7070dbSScott Long 		}
54444c7070dbSScott Long 
54454c7070dbSScott Long 		/* Initialize the TX lock */
54464c7070dbSScott Long 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
54474c7070dbSScott Long 		    device_get_nameunit(dev), txq->ift_id);
54484c7070dbSScott Long 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
54494c7070dbSScott Long 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
54504c7070dbSScott Long 
54514c7070dbSScott Long 		snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
54524c7070dbSScott Long 			 device_get_nameunit(dev), txq->ift_id);
54534c7070dbSScott Long 
545495246abbSSean Bruno 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
54554c7070dbSScott Long 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
54564c7070dbSScott Long 		if (err) {
54574c7070dbSScott Long 			/* XXX free any allocated rings */
54584c7070dbSScott Long 			device_printf(dev, "Unable to allocate buf_ring\n");
54590d0338afSConrad Meyer 			goto err_tx_desc;
54604c7070dbSScott Long 		}
54614c7070dbSScott Long 	}
54624c7070dbSScott Long 
54634c7070dbSScott Long 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
54644c7070dbSScott Long 		/* Set up some basics */
54654c7070dbSScott Long 
5466bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5467bfce461eSMarius Strobl 		   M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5468bfce461eSMarius Strobl 			device_printf(dev,
5469bfce461eSMarius Strobl 			    "Unable to allocate RX DMA info memory\n");
54704c7070dbSScott Long 			err = ENOMEM;
54710d0338afSConrad Meyer 			goto err_tx_desc;
54724c7070dbSScott Long 		}
54734c7070dbSScott Long 
54744c7070dbSScott Long 		rxq->ifr_ifdi = ifdip;
547595246abbSSean Bruno 		/* XXX this needs to be changed if #rx queues != #tx queues */
547695246abbSSean Bruno 		rxq->ifr_ntxqirq = 1;
547795246abbSSean Bruno 		rxq->ifr_txqid[0] = i;
54784c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, ifdip++) {
5479bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5480bfce461eSMarius Strobl 				device_printf(dev,
5481bfce461eSMarius Strobl 				    "Unable to allocate RX descriptors\n");
54824c7070dbSScott Long 				err = ENOMEM;
54834c7070dbSScott Long 				goto err_tx_desc;
54844c7070dbSScott Long 			}
54854c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
54864c7070dbSScott Long 		}
54874c7070dbSScott Long 		rxq->ifr_ctx = ctx;
54884c7070dbSScott Long 		rxq->ifr_id = i;
548923ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
549023ac9029SStephen Hurd 			rxq->ifr_fl_offset = 1;
54914c7070dbSScott Long 		} else {
549223ac9029SStephen Hurd 			rxq->ifr_fl_offset = 0;
54934c7070dbSScott Long 		}
54944c7070dbSScott Long 		rxq->ifr_nfl = nfree_lists;
54954c7070dbSScott Long 		if (!(fl =
5496ac2fffa4SPedro F. Giffuni 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
54974c7070dbSScott Long 			device_printf(dev, "Unable to allocate free list memory\n");
54984c7070dbSScott Long 			err = ENOMEM;
54990d0338afSConrad Meyer 			goto err_tx_desc;
55004c7070dbSScott Long 		}
55014c7070dbSScott Long 		rxq->ifr_fl = fl;
55024c7070dbSScott Long 		for (j = 0; j < nfree_lists; j++) {
550395246abbSSean Bruno 			fl[j].ifl_rxq = rxq;
550495246abbSSean Bruno 			fl[j].ifl_id = j;
550595246abbSSean Bruno 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
550695246abbSSean Bruno 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
55074c7070dbSScott Long 		}
55084c7070dbSScott Long 		/* Allocate receive buffers for the ring */
55094c7070dbSScott Long 		if (iflib_rxsd_alloc(rxq)) {
55104c7070dbSScott Long 			device_printf(dev,
55114c7070dbSScott Long 			    "Critical Failure setting up receive buffers\n");
55124c7070dbSScott Long 			err = ENOMEM;
55134c7070dbSScott Long 			goto err_rx_desc;
55144c7070dbSScott Long 		}
551587890dbaSSean Bruno 
551687890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
55173db348b5SMarius Strobl 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
55183db348b5SMarius Strobl 			    M_WAITOK);
55194c7070dbSScott Long 	}
55204c7070dbSScott Long 
55214c7070dbSScott Long 	/* TXQs */
55224c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
55234c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
55244c7070dbSScott Long 	for (i = 0; i < ntxqsets; i++) {
55254c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
55264c7070dbSScott Long 
55274c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, di++) {
55284c7070dbSScott Long 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
55294c7070dbSScott Long 			paddrs[i*ntxqs + j] = di->idi_paddr;
55304c7070dbSScott Long 		}
55314c7070dbSScott Long 	}
55324c7070dbSScott Long 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5533bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5534bfce461eSMarius Strobl 		    "Unable to allocate device TX queue\n");
55354c7070dbSScott Long 		iflib_tx_structures_free(ctx);
55364c7070dbSScott Long 		free(vaddrs, M_IFLIB);
55374c7070dbSScott Long 		free(paddrs, M_IFLIB);
55384c7070dbSScott Long 		goto err_rx_desc;
55394c7070dbSScott Long 	}
55404c7070dbSScott Long 	free(vaddrs, M_IFLIB);
55414c7070dbSScott Long 	free(paddrs, M_IFLIB);
55424c7070dbSScott Long 
55434c7070dbSScott Long 	/* RXQs */
55444c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
55454c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
55464c7070dbSScott Long 	for (i = 0; i < nrxqsets; i++) {
55474c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
55484c7070dbSScott Long 
55494c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, di++) {
55504c7070dbSScott Long 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
55514c7070dbSScott Long 			paddrs[i*nrxqs + j] = di->idi_paddr;
55524c7070dbSScott Long 		}
55534c7070dbSScott Long 	}
55544c7070dbSScott Long 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5555bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5556bfce461eSMarius Strobl 		    "Unable to allocate device RX queue\n");
55574c7070dbSScott Long 		iflib_tx_structures_free(ctx);
55584c7070dbSScott Long 		free(vaddrs, M_IFLIB);
55594c7070dbSScott Long 		free(paddrs, M_IFLIB);
55604c7070dbSScott Long 		goto err_rx_desc;
55614c7070dbSScott Long 	}
55624c7070dbSScott Long 	free(vaddrs, M_IFLIB);
55634c7070dbSScott Long 	free(paddrs, M_IFLIB);
55644c7070dbSScott Long 
55654c7070dbSScott Long 	return (0);
55664c7070dbSScott Long 
55674c7070dbSScott Long /* XXX handle allocation failure changes */
55684c7070dbSScott Long err_rx_desc:
55694c7070dbSScott Long err_tx_desc:
5570b89827a0SStephen Hurd rx_fail:
55714c7070dbSScott Long 	if (ctx->ifc_rxqs != NULL)
55724c7070dbSScott Long 		free(ctx->ifc_rxqs, M_IFLIB);
55734c7070dbSScott Long 	ctx->ifc_rxqs = NULL;
55744c7070dbSScott Long 	if (ctx->ifc_txqs != NULL)
55754c7070dbSScott Long 		free(ctx->ifc_txqs, M_IFLIB);
55764c7070dbSScott Long 	ctx->ifc_txqs = NULL;
55774c7070dbSScott Long fail:
55784c7070dbSScott Long 	return (err);
55794c7070dbSScott Long }
55804c7070dbSScott Long 
55814c7070dbSScott Long static int
55824c7070dbSScott Long iflib_tx_structures_setup(if_ctx_t ctx)
55834c7070dbSScott Long {
55844c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
55854c7070dbSScott Long 	int i;
55864c7070dbSScott Long 
55874c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
55884c7070dbSScott Long 		iflib_txq_setup(txq);
55894c7070dbSScott Long 
55904c7070dbSScott Long 	return (0);
55914c7070dbSScott Long }
55924c7070dbSScott Long 
55934c7070dbSScott Long static void
55944c7070dbSScott Long iflib_tx_structures_free(if_ctx_t ctx)
55954c7070dbSScott Long {
55964c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
55974d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
55984c7070dbSScott Long 	int i, j;
55994c7070dbSScott Long 
56004c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
56014c7070dbSScott Long 		iflib_txq_destroy(txq);
56024d261ce2SStephen Hurd 		for (j = 0; j < sctx->isc_ntxqs; j++)
56034c7070dbSScott Long 			iflib_dma_free(&txq->ift_ifdi[j]);
56044c7070dbSScott Long 	}
56054c7070dbSScott Long 	free(ctx->ifc_txqs, M_IFLIB);
56064c7070dbSScott Long 	ctx->ifc_txqs = NULL;
56074c7070dbSScott Long 	IFDI_QUEUES_FREE(ctx);
56084c7070dbSScott Long }
56094c7070dbSScott Long 
56104c7070dbSScott Long /*********************************************************************
56114c7070dbSScott Long  *
56124c7070dbSScott Long  *  Initialize all receive rings.
56134c7070dbSScott Long  *
56144c7070dbSScott Long  **********************************************************************/
56154c7070dbSScott Long static int
56164c7070dbSScott Long iflib_rx_structures_setup(if_ctx_t ctx)
56174c7070dbSScott Long {
56184c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5619aaeb188aSBjoern A. Zeeb 	int q;
5620aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
5621aaeb188aSBjoern A. Zeeb 	int i, err;
5622aaeb188aSBjoern A. Zeeb #endif
56234c7070dbSScott Long 
56244c7070dbSScott Long 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5625aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
56264c7070dbSScott Long 		tcp_lro_free(&rxq->ifr_lc);
562723ac9029SStephen Hurd 		if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
562823ac9029SStephen Hurd 		    TCP_LRO_ENTRIES, min(1024,
562923ac9029SStephen Hurd 		    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
56304c7070dbSScott Long 			device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
56314c7070dbSScott Long 			goto fail;
56324c7070dbSScott Long 		}
56334c7070dbSScott Long 		rxq->ifr_lro_enabled = TRUE;
5634aaeb188aSBjoern A. Zeeb #endif
56354c7070dbSScott Long 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
56364c7070dbSScott Long 	}
56374c7070dbSScott Long 	return (0);
5638aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
56394c7070dbSScott Long fail:
56404c7070dbSScott Long 	/*
56414c7070dbSScott Long 	 * Free RX software descriptors allocated so far, we will only handle
56424c7070dbSScott Long 	 * the rings that completed, the failing case will have
56434c7070dbSScott Long 	 * cleaned up for itself. 'q' failed, so its the terminus.
56444c7070dbSScott Long 	 */
56454c7070dbSScott Long 	rxq = ctx->ifc_rxqs;
56464c7070dbSScott Long 	for (i = 0; i < q; ++i, rxq++) {
56474c7070dbSScott Long 		iflib_rx_sds_free(rxq);
56484c7070dbSScott Long 		rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
56494c7070dbSScott Long 	}
56504c7070dbSScott Long 	return (err);
5651aaeb188aSBjoern A. Zeeb #endif
56524c7070dbSScott Long }
56534c7070dbSScott Long 
56544c7070dbSScott Long /*********************************************************************
56554c7070dbSScott Long  *
56564c7070dbSScott Long  *  Free all receive rings.
56574c7070dbSScott Long  *
56584c7070dbSScott Long  **********************************************************************/
56594c7070dbSScott Long static void
56604c7070dbSScott Long iflib_rx_structures_free(if_ctx_t ctx)
56614c7070dbSScott Long {
56624c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
56634c7070dbSScott Long 
566423ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
56654c7070dbSScott Long 		iflib_rx_sds_free(rxq);
56664c7070dbSScott Long 	}
566777c1fcecSEric Joyner 	free(ctx->ifc_rxqs, M_IFLIB);
566877c1fcecSEric Joyner 	ctx->ifc_rxqs = NULL;
56694c7070dbSScott Long }
56704c7070dbSScott Long 
56714c7070dbSScott Long static int
56724c7070dbSScott Long iflib_qset_structures_setup(if_ctx_t ctx)
56734c7070dbSScott Long {
56744c7070dbSScott Long 	int err;
56754c7070dbSScott Long 
56766108c013SStephen Hurd 	/*
56776108c013SStephen Hurd 	 * It is expected that the caller takes care of freeing queues if this
56786108c013SStephen Hurd 	 * fails.
56796108c013SStephen Hurd 	 */
5680ac88e6daSStephen Hurd 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5681ac88e6daSStephen Hurd 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
56824c7070dbSScott Long 		return (err);
5683ac88e6daSStephen Hurd 	}
56844c7070dbSScott Long 
56856108c013SStephen Hurd 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
56864c7070dbSScott Long 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
56876108c013SStephen Hurd 
56884c7070dbSScott Long 	return (err);
56894c7070dbSScott Long }
56904c7070dbSScott Long 
56914c7070dbSScott Long int
56924c7070dbSScott Long iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
56933e0e6330SStephen Hurd 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
56944c7070dbSScott Long {
56954c7070dbSScott Long 
56964c7070dbSScott Long 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
56974c7070dbSScott Long }
56984c7070dbSScott Long 
5699b103855eSStephen Hurd #ifdef SMP
5700aa3c5dd8SSean Bruno static int
5701b103855eSStephen Hurd find_nth(if_ctx_t ctx, int qid)
57024c7070dbSScott Long {
5703b103855eSStephen Hurd 	cpuset_t cpus;
5704aa3c5dd8SSean Bruno 	int i, cpuid, eqid, count;
57054c7070dbSScott Long 
5706b103855eSStephen Hurd 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5707b103855eSStephen Hurd 	count = CPU_COUNT(&cpus);
5708aa3c5dd8SSean Bruno 	eqid = qid % count;
57094c7070dbSScott Long 	/* clear up to the qid'th bit */
5710aa3c5dd8SSean Bruno 	for (i = 0; i < eqid; i++) {
5711b103855eSStephen Hurd 		cpuid = CPU_FFS(&cpus);
5712aa3c5dd8SSean Bruno 		MPASS(cpuid != 0);
5713b103855eSStephen Hurd 		CPU_CLR(cpuid-1, &cpus);
57144c7070dbSScott Long 	}
5715b103855eSStephen Hurd 	cpuid = CPU_FFS(&cpus);
5716aa3c5dd8SSean Bruno 	MPASS(cpuid != 0);
5717aa3c5dd8SSean Bruno 	return (cpuid-1);
57184c7070dbSScott Long }
57194c7070dbSScott Long 
5720b103855eSStephen Hurd #ifdef SCHED_ULE
5721b103855eSStephen Hurd extern struct cpu_group *cpu_top;              /* CPU topology */
5722b103855eSStephen Hurd 
5723b103855eSStephen Hurd static int
5724b103855eSStephen Hurd find_child_with_core(int cpu, struct cpu_group *grp)
5725b103855eSStephen Hurd {
5726b103855eSStephen Hurd 	int i;
5727b103855eSStephen Hurd 
5728b103855eSStephen Hurd 	if (grp->cg_children == 0)
5729b103855eSStephen Hurd 		return -1;
5730b103855eSStephen Hurd 
5731b103855eSStephen Hurd 	MPASS(grp->cg_child);
5732b103855eSStephen Hurd 	for (i = 0; i < grp->cg_children; i++) {
5733b103855eSStephen Hurd 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5734b103855eSStephen Hurd 			return i;
5735b103855eSStephen Hurd 	}
5736b103855eSStephen Hurd 
5737b103855eSStephen Hurd 	return -1;
5738b103855eSStephen Hurd }
5739b103855eSStephen Hurd 
5740b103855eSStephen Hurd /*
57410b75ac77SStephen Hurd  * Find the nth "close" core to the specified core
57420b75ac77SStephen Hurd  * "close" is defined as the deepest level that shares
57430b75ac77SStephen Hurd  * at least an L2 cache.  With threads, this will be
5744*f154ece0SStephen Hurd  * threads on the same core.  If the shared cache is L3
57450b75ac77SStephen Hurd  * or higher, simply returns the same core.
5746b103855eSStephen Hurd  */
5747b103855eSStephen Hurd static int
57480b75ac77SStephen Hurd find_close_core(int cpu, int core_offset)
5749b103855eSStephen Hurd {
5750b103855eSStephen Hurd 	struct cpu_group *grp;
5751b103855eSStephen Hurd 	int i;
57520b75ac77SStephen Hurd 	int fcpu;
5753b103855eSStephen Hurd 	cpuset_t cs;
5754b103855eSStephen Hurd 
5755b103855eSStephen Hurd 	grp = cpu_top;
5756b103855eSStephen Hurd 	if (grp == NULL)
5757b103855eSStephen Hurd 		return cpu;
5758b103855eSStephen Hurd 	i = 0;
5759b103855eSStephen Hurd 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5760b103855eSStephen Hurd 		/* If the child only has one cpu, don't descend */
5761b103855eSStephen Hurd 		if (grp->cg_child[i].cg_count <= 1)
5762b103855eSStephen Hurd 			break;
5763b103855eSStephen Hurd 		grp = &grp->cg_child[i];
5764b103855eSStephen Hurd 	}
5765b103855eSStephen Hurd 
5766b103855eSStephen Hurd 	/* If they don't share at least an L2 cache, use the same CPU */
5767b103855eSStephen Hurd 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5768b103855eSStephen Hurd 		return cpu;
5769b103855eSStephen Hurd 
5770b103855eSStephen Hurd 	/* Now pick one */
5771b103855eSStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
57720b75ac77SStephen Hurd 
57730b75ac77SStephen Hurd 	/* Add the selected CPU offset to core offset. */
57740b75ac77SStephen Hurd 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
57750b75ac77SStephen Hurd 		if (fcpu - 1 == cpu)
57760b75ac77SStephen Hurd 			break;
57770b75ac77SStephen Hurd 		CPU_CLR(fcpu - 1, &cs);
57780b75ac77SStephen Hurd 	}
57790b75ac77SStephen Hurd 	MPASS(fcpu);
57800b75ac77SStephen Hurd 
57810b75ac77SStephen Hurd 	core_offset += i;
57820b75ac77SStephen Hurd 
57830b75ac77SStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
57840b75ac77SStephen Hurd 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5785b103855eSStephen Hurd 		MPASS(CPU_FFS(&cs));
5786b103855eSStephen Hurd 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5787b103855eSStephen Hurd 	}
5788b103855eSStephen Hurd 	MPASS(CPU_FFS(&cs));
5789b103855eSStephen Hurd 	return CPU_FFS(&cs) - 1;
5790b103855eSStephen Hurd }
5791b103855eSStephen Hurd #else
5792b103855eSStephen Hurd static int
57930b75ac77SStephen Hurd find_close_core(int cpu, int core_offset __unused)
5794b103855eSStephen Hurd {
579597755e83SKonstantin Belousov 	return cpu;
5796b103855eSStephen Hurd }
5797b103855eSStephen Hurd #endif
5798b103855eSStephen Hurd 
5799b103855eSStephen Hurd static int
58000b75ac77SStephen Hurd get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5801b103855eSStephen Hurd {
5802b103855eSStephen Hurd 	switch (type) {
5803b103855eSStephen Hurd 	case IFLIB_INTR_TX:
58040b75ac77SStephen Hurd 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
58050b75ac77SStephen Hurd 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5806b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5807b103855eSStephen Hurd 	case IFLIB_INTR_RX:
5808b103855eSStephen Hurd 	case IFLIB_INTR_RXTX:
58090b75ac77SStephen Hurd 		/* RX queues get the specified core */
5810b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5811b103855eSStephen Hurd 	default:
5812b103855eSStephen Hurd 		return -1;
5813b103855eSStephen Hurd 	}
5814b103855eSStephen Hurd }
5815b103855eSStephen Hurd #else
58160b75ac77SStephen Hurd #define get_core_offset(ctx, type, qid)	CPU_FIRST()
58170b75ac77SStephen Hurd #define find_close_core(cpuid, tid)	CPU_FIRST()
5818b103855eSStephen Hurd #define find_nth(ctx, gid)		CPU_FIRST()
5819b103855eSStephen Hurd #endif
5820b103855eSStephen Hurd 
5821b103855eSStephen Hurd /* Just to avoid copy/paste */
5822b103855eSStephen Hurd static inline int
5823f855ec81SMarius Strobl iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
5824f855ec81SMarius Strobl     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
5825f855ec81SMarius Strobl     const char *name)
5826b103855eSStephen Hurd {
5827f855ec81SMarius Strobl 	device_t dev;
5828*f154ece0SStephen Hurd 	int co, cpuid, err, tid;
5829b103855eSStephen Hurd 
5830f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
5831*f154ece0SStephen Hurd 	co = ctx->ifc_sysctl_core_offset;
5832*f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX)
5833*f154ece0SStephen Hurd 		co += ctx->ifc_softc_ctx.isc_nrxqsets;
5834*f154ece0SStephen Hurd 	cpuid = find_nth(ctx, qid + co);
58350b75ac77SStephen Hurd 	tid = get_core_offset(ctx, type, qid);
5836b103855eSStephen Hurd 	MPASS(tid >= 0);
58370b75ac77SStephen Hurd 	cpuid = find_close_core(cpuid, tid);
5838f855ec81SMarius Strobl 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
5839f855ec81SMarius Strobl 	    name);
5840b103855eSStephen Hurd 	if (err) {
5841f855ec81SMarius Strobl 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
5842b103855eSStephen Hurd 		return (err);
5843b103855eSStephen Hurd 	}
5844b103855eSStephen Hurd #ifdef notyet
5845b103855eSStephen Hurd 	if (cpuid > ctx->ifc_cpuid_highest)
5846b103855eSStephen Hurd 		ctx->ifc_cpuid_highest = cpuid;
5847b103855eSStephen Hurd #endif
5848b103855eSStephen Hurd 	return 0;
5849b103855eSStephen Hurd }
5850b103855eSStephen Hurd 
58514c7070dbSScott Long int
58524c7070dbSScott Long iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
58534c7070dbSScott Long 			iflib_intr_type_t type, driver_filter_t *filter,
58543e0e6330SStephen Hurd 			void *filter_arg, int qid, const char *name)
58554c7070dbSScott Long {
5856f855ec81SMarius Strobl 	device_t dev;
58574c7070dbSScott Long 	struct grouptask *gtask;
58584c7070dbSScott Long 	struct taskqgroup *tqg;
58594c7070dbSScott Long 	iflib_filter_info_t info;
586023ac9029SStephen Hurd 	gtask_fn_t *fn;
5861b103855eSStephen Hurd 	int tqrid, err;
586295246abbSSean Bruno 	driver_filter_t *intr_fast;
58634c7070dbSScott Long 	void *q;
58644c7070dbSScott Long 
58654c7070dbSScott Long 	info = &ctx->ifc_filter_info;
5866add6f7d0SSean Bruno 	tqrid = rid;
58674c7070dbSScott Long 
58684c7070dbSScott Long 	switch (type) {
58694c7070dbSScott Long 	/* XXX merge tx/rx for netmap? */
58704c7070dbSScott Long 	case IFLIB_INTR_TX:
58714c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
58724c7070dbSScott Long 		info = &ctx->ifc_txqs[qid].ift_filter_info;
58734c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
5874ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
58754c7070dbSScott Long 		fn = _task_fn_tx;
587695246abbSSean Bruno 		intr_fast = iflib_fast_intr;
5877da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
58785ee36c68SStephen Hurd 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
58794c7070dbSScott Long 		break;
58804c7070dbSScott Long 	case IFLIB_INTR_RX:
58814c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
58824c7070dbSScott Long 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
58834c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5884ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
58854c7070dbSScott Long 		fn = _task_fn_rx;
5886ab2e3f79SStephen Hurd 		intr_fast = iflib_fast_intr;
588795246abbSSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
588895246abbSSean Bruno 		break;
588995246abbSSean Bruno 	case IFLIB_INTR_RXTX:
589095246abbSSean Bruno 		q = &ctx->ifc_rxqs[qid];
589195246abbSSean Bruno 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
589295246abbSSean Bruno 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5893ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
589495246abbSSean Bruno 		fn = _task_fn_rx;
589595246abbSSean Bruno 		intr_fast = iflib_fast_intr_rxtx;
5896da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
58974c7070dbSScott Long 		break;
58984c7070dbSScott Long 	case IFLIB_INTR_ADMIN:
58994c7070dbSScott Long 		q = ctx;
5900da69b8f9SSean Bruno 		tqrid = -1;
59014c7070dbSScott Long 		info = &ctx->ifc_filter_info;
59024c7070dbSScott Long 		gtask = &ctx->ifc_admin_task;
5903ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
59044c7070dbSScott Long 		fn = _task_fn_admin;
590595246abbSSean Bruno 		intr_fast = iflib_fast_intr_ctx;
59064c7070dbSScott Long 		break;
59074c7070dbSScott Long 	default:
59084c7070dbSScott Long 		panic("unknown net intr type");
59094c7070dbSScott Long 	}
59104c7070dbSScott Long 
59114c7070dbSScott Long 	info->ifi_filter = filter;
59124c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
59134c7070dbSScott Long 	info->ifi_task = gtask;
591495246abbSSean Bruno 	info->ifi_ctx = q;
59154c7070dbSScott Long 
5916f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
591795246abbSSean Bruno 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
5918da69b8f9SSean Bruno 	if (err != 0) {
5919f855ec81SMarius Strobl 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
59204c7070dbSScott Long 		return (err);
5921da69b8f9SSean Bruno 	}
5922da69b8f9SSean Bruno 	if (type == IFLIB_INTR_ADMIN)
5923da69b8f9SSean Bruno 		return (0);
5924da69b8f9SSean Bruno 
59254c7070dbSScott Long 	if (tqrid != -1) {
5926f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
5927f855ec81SMarius Strobl 		    q, name);
5928b103855eSStephen Hurd 		if (err)
5929b103855eSStephen Hurd 			return (err);
5930aa3c5dd8SSean Bruno 	} else {
5931f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
5932aa3c5dd8SSean Bruno 	}
59334c7070dbSScott Long 
59344c7070dbSScott Long 	return (0);
59354c7070dbSScott Long }
59364c7070dbSScott Long 
59374c7070dbSScott Long void
59383e0e6330SStephen Hurd iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
59394c7070dbSScott Long {
59404c7070dbSScott Long 	struct grouptask *gtask;
59414c7070dbSScott Long 	struct taskqgroup *tqg;
594223ac9029SStephen Hurd 	gtask_fn_t *fn;
59434c7070dbSScott Long 	void *q;
5944b103855eSStephen Hurd 	int err;
59454c7070dbSScott Long 
59464c7070dbSScott Long 	switch (type) {
59474c7070dbSScott Long 	case IFLIB_INTR_TX:
59484c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
59494c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
5950ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
59514c7070dbSScott Long 		fn = _task_fn_tx;
59524c7070dbSScott Long 		break;
59534c7070dbSScott Long 	case IFLIB_INTR_RX:
59544c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
59554c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5956ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
59574c7070dbSScott Long 		fn = _task_fn_rx;
59584c7070dbSScott Long 		break;
59594c7070dbSScott Long 	case IFLIB_INTR_IOV:
59604c7070dbSScott Long 		q = ctx;
59614c7070dbSScott Long 		gtask = &ctx->ifc_vflr_task;
5962ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
59634c7070dbSScott Long 		fn = _task_fn_iov;
59644c7070dbSScott Long 		break;
59654c7070dbSScott Long 	default:
59664c7070dbSScott Long 		panic("unknown net intr type");
59674c7070dbSScott Long 	}
59684c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, q);
5969f855ec81SMarius Strobl 	if (irq != NULL) {
5970f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
5971f855ec81SMarius Strobl 		    q, name);
5972b103855eSStephen Hurd 		if (err)
5973f855ec81SMarius Strobl 			taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
5974f855ec81SMarius Strobl 			    irq->ii_res, name);
5975f855ec81SMarius Strobl 	} else {
5976f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
5977b103855eSStephen Hurd 	}
5978b103855eSStephen Hurd }
59794c7070dbSScott Long 
59804c7070dbSScott Long void
59814c7070dbSScott Long iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
59824c7070dbSScott Long {
5983b97de13aSMarius Strobl 
59844c7070dbSScott Long 	if (irq->ii_tag)
59854c7070dbSScott Long 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
59864c7070dbSScott Long 
59874c7070dbSScott Long 	if (irq->ii_res)
5988b97de13aSMarius Strobl 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
5989b97de13aSMarius Strobl 		    rman_get_rid(irq->ii_res), irq->ii_res);
59904c7070dbSScott Long }
59914c7070dbSScott Long 
59924c7070dbSScott Long static int
59933e0e6330SStephen Hurd iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
59944c7070dbSScott Long {
59954c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
59964c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
59974c7070dbSScott Long 	if_irq_t irq = &ctx->ifc_legacy_irq;
59984c7070dbSScott Long 	iflib_filter_info_t info;
5999f855ec81SMarius Strobl 	device_t dev;
60004c7070dbSScott Long 	struct grouptask *gtask;
6001f855ec81SMarius Strobl 	struct resource *res;
60024c7070dbSScott Long 	struct taskqgroup *tqg;
600323ac9029SStephen Hurd 	gtask_fn_t *fn;
60044c7070dbSScott Long 	int tqrid;
60054c7070dbSScott Long 	void *q;
60064c7070dbSScott Long 	int err;
60074c7070dbSScott Long 
60084c7070dbSScott Long 	q = &ctx->ifc_rxqs[0];
60094c7070dbSScott Long 	info = &rxq[0].ifr_filter_info;
60104c7070dbSScott Long 	gtask = &rxq[0].ifr_task;
6011ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
60124c7070dbSScott Long 	tqrid = irq->ii_rid = *rid;
60134c7070dbSScott Long 	fn = _task_fn_rx;
60144c7070dbSScott Long 
60154c7070dbSScott Long 	ctx->ifc_flags |= IFC_LEGACY;
60164c7070dbSScott Long 	info->ifi_filter = filter;
60174c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
60184c7070dbSScott Long 	info->ifi_task = gtask;
60194ecb427aSSean Bruno 	info->ifi_ctx = ctx;
60204c7070dbSScott Long 
6021f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
60224c7070dbSScott Long 	/* We allocate a single interrupt resource */
602395246abbSSean Bruno 	if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
60244c7070dbSScott Long 		return (err);
60254c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, q);
6026f855ec81SMarius Strobl 	res = irq->ii_res;
6027f855ec81SMarius Strobl 	taskqgroup_attach(tqg, gtask, q, dev, res, name);
60284c7070dbSScott Long 
60294c7070dbSScott Long 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6030f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6031f855ec81SMarius Strobl 	    "tx");
60324c7070dbSScott Long 	return (0);
60334c7070dbSScott Long }
60344c7070dbSScott Long 
60354c7070dbSScott Long void
60364c7070dbSScott Long iflib_led_create(if_ctx_t ctx)
60374c7070dbSScott Long {
60384c7070dbSScott Long 
60394c7070dbSScott Long 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
60404c7070dbSScott Long 	    device_get_nameunit(ctx->ifc_dev));
60414c7070dbSScott Long }
60424c7070dbSScott Long 
60434c7070dbSScott Long void
60444c7070dbSScott Long iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
60454c7070dbSScott Long {
60464c7070dbSScott Long 
60474c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
60484c7070dbSScott Long }
60494c7070dbSScott Long 
60504c7070dbSScott Long void
60514c7070dbSScott Long iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
60524c7070dbSScott Long {
60534c7070dbSScott Long 
60544c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
60554c7070dbSScott Long }
60564c7070dbSScott Long 
60574c7070dbSScott Long void
60584c7070dbSScott Long iflib_admin_intr_deferred(if_ctx_t ctx)
60594c7070dbSScott Long {
60601248952aSSean Bruno #ifdef INVARIANTS
60611248952aSSean Bruno 	struct grouptask *gtask;
606246fa0c25SEric Joyner 
60631248952aSSean Bruno 	gtask = &ctx->ifc_admin_task;
6064d0d0ad0aSStephen Hurd 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
60651248952aSSean Bruno #endif
60664c7070dbSScott Long 
60674c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
60684c7070dbSScott Long }
60694c7070dbSScott Long 
60704c7070dbSScott Long void
60714c7070dbSScott Long iflib_iov_intr_deferred(if_ctx_t ctx)
60724c7070dbSScott Long {
60734c7070dbSScott Long 
60744c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
60754c7070dbSScott Long }
60764c7070dbSScott Long 
60774c7070dbSScott Long void
60784c7070dbSScott Long iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
60794c7070dbSScott Long {
60804c7070dbSScott Long 
6081f855ec81SMarius Strobl 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6082f855ec81SMarius Strobl 	    name);
60834c7070dbSScott Long }
60844c7070dbSScott Long 
60854c7070dbSScott Long void
6086aa8a24d3SStephen Hurd iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
6087aa8a24d3SStephen Hurd 	const char *name)
60884c7070dbSScott Long {
60894c7070dbSScott Long 
60904c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, ctx);
6091f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
6092f855ec81SMarius Strobl 	    name);
60934c7070dbSScott Long }
60944c7070dbSScott Long 
60954c7070dbSScott Long void
609623ac9029SStephen Hurd iflib_config_gtask_deinit(struct grouptask *gtask)
609723ac9029SStephen Hurd {
609823ac9029SStephen Hurd 
6099ab2e3f79SStephen Hurd 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
610023ac9029SStephen Hurd }
610123ac9029SStephen Hurd 
610223ac9029SStephen Hurd void
610323ac9029SStephen Hurd iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
61044c7070dbSScott Long {
61054c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
61064c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
61074c7070dbSScott Long 
61084c7070dbSScott Long 	if_setbaudrate(ifp, baudrate);
61097b610b60SSean Bruno 	if (baudrate >= IF_Gbps(10)) {
61107b610b60SSean Bruno 		STATE_LOCK(ctx);
611195246abbSSean Bruno 		ctx->ifc_flags |= IFC_PREFETCH;
61127b610b60SSean Bruno 		STATE_UNLOCK(ctx);
61137b610b60SSean Bruno 	}
61144c7070dbSScott Long 	/* If link down, disable watchdog */
61154c7070dbSScott Long 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
61164c7070dbSScott Long 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
61174c7070dbSScott Long 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
61184c7070dbSScott Long 	}
61194c7070dbSScott Long 	ctx->ifc_link_state = link_state;
61204c7070dbSScott Long 	if_link_state_change(ifp, link_state);
61214c7070dbSScott Long }
61224c7070dbSScott Long 
61234c7070dbSScott Long static int
61244c7070dbSScott Long iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
61254c7070dbSScott Long {
61264c7070dbSScott Long 	int credits;
61271248952aSSean Bruno #ifdef INVARIANTS
61281248952aSSean Bruno 	int credits_pre = txq->ift_cidx_processed;
61291248952aSSean Bruno #endif
61304c7070dbSScott Long 
61314c7070dbSScott Long 	if (ctx->isc_txd_credits_update == NULL)
61324c7070dbSScott Long 		return (0);
61334c7070dbSScott Long 
61348a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
61358a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
613695246abbSSean Bruno 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
61374c7070dbSScott Long 		return (0);
61384c7070dbSScott Long 
61394c7070dbSScott Long 	txq->ift_processed += credits;
61404c7070dbSScott Long 	txq->ift_cidx_processed += credits;
61414c7070dbSScott Long 
61421248952aSSean Bruno 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
61434c7070dbSScott Long 	if (txq->ift_cidx_processed >= txq->ift_size)
61444c7070dbSScott Long 		txq->ift_cidx_processed -= txq->ift_size;
61454c7070dbSScott Long 	return (credits);
61464c7070dbSScott Long }
61474c7070dbSScott Long 
61484c7070dbSScott Long static int
614995246abbSSean Bruno iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
61504c7070dbSScott Long {
615195dcf343SMarius Strobl 	iflib_fl_t fl;
615295dcf343SMarius Strobl 	u_int i;
61534c7070dbSScott Long 
615495dcf343SMarius Strobl 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
615595dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
615695dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
615723ac9029SStephen Hurd 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
615823ac9029SStephen Hurd 	    budget));
61594c7070dbSScott Long }
61604c7070dbSScott Long 
61614c7070dbSScott Long void
61624c7070dbSScott Long iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
61634c7070dbSScott Long 	const char *description, if_int_delay_info_t info,
61644c7070dbSScott Long 	int offset, int value)
61654c7070dbSScott Long {
61664c7070dbSScott Long 	info->iidi_ctx = ctx;
61674c7070dbSScott Long 	info->iidi_offset = offset;
61684c7070dbSScott Long 	info->iidi_value = value;
61694c7070dbSScott Long 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
61704c7070dbSScott Long 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
61714c7070dbSScott Long 	    OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
61724c7070dbSScott Long 	    info, 0, iflib_sysctl_int_delay, "I", description);
61734c7070dbSScott Long }
61744c7070dbSScott Long 
6175aa8a24d3SStephen Hurd struct sx *
61764c7070dbSScott Long iflib_ctx_lock_get(if_ctx_t ctx)
61774c7070dbSScott Long {
61784c7070dbSScott Long 
6179aa8a24d3SStephen Hurd 	return (&ctx->ifc_ctx_sx);
61804c7070dbSScott Long }
61814c7070dbSScott Long 
61824c7070dbSScott Long static int
61834c7070dbSScott Long iflib_msix_init(if_ctx_t ctx)
61844c7070dbSScott Long {
61854c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
61864c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
61874c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
61884c7070dbSScott Long 	int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
61894c7070dbSScott Long 	int iflib_num_tx_queues, iflib_num_rx_queues;
61904c7070dbSScott Long 	int err, admincnt, bar;
61914c7070dbSScott Long 
6192d2735264SStephen Hurd 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6193d2735264SStephen Hurd 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
619423ac9029SStephen Hurd 
6195b97de13aSMarius Strobl 	if (bootverbose)
6196b97de13aSMarius Strobl 		device_printf(dev, "msix_init qsets capped at %d\n",
6197b97de13aSMarius Strobl 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
61981248952aSSean Bruno 
61994c7070dbSScott Long 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
62004c7070dbSScott Long 	admincnt = sctx->isc_admin_intrcnt;
62014c7070dbSScott Long 	/* Override by tuneable */
6202ea351d3fSSean Bruno 	if (scctx->isc_disable_msix)
62034c7070dbSScott Long 		goto msi;
62044c7070dbSScott Long 
6205b97de13aSMarius Strobl 	/* First try MSI-X */
6206b97de13aSMarius Strobl 	if ((msgs = pci_msix_count(dev)) == 0) {
6207b97de13aSMarius Strobl 		if (bootverbose)
6208b97de13aSMarius Strobl 			device_printf(dev, "MSI-X not supported or disabled\n");
6209b97de13aSMarius Strobl 		goto msi;
6210b97de13aSMarius Strobl 	}
62114c7070dbSScott Long 	/*
62124c7070dbSScott Long 	 * bar == -1 => "trust me I know what I'm doing"
62134c7070dbSScott Long 	 * Some drivers are for hardware that is so shoddily
62144c7070dbSScott Long 	 * documented that no one knows which bars are which
62154c7070dbSScott Long 	 * so the developer has to map all bars. This hack
6216b97de13aSMarius Strobl 	 * allows shoddy garbage to use MSI-X in this framework.
62174c7070dbSScott Long 	 */
62184c7070dbSScott Long 	if (bar != -1) {
62194c7070dbSScott Long 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
62204c7070dbSScott Long 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
62214c7070dbSScott Long 		if (ctx->ifc_msix_mem == NULL) {
6222b97de13aSMarius Strobl 			device_printf(dev, "Unable to map MSI-X table\n");
62234c7070dbSScott Long 			goto msi;
62244c7070dbSScott Long 		}
62254c7070dbSScott Long 	}
62264c7070dbSScott Long #if IFLIB_DEBUG
62274c7070dbSScott Long 	/* use only 1 qset in debug mode */
62284c7070dbSScott Long 	queuemsgs = min(msgs - admincnt, 1);
62294c7070dbSScott Long #else
62304c7070dbSScott Long 	queuemsgs = msgs - admincnt;
62314c7070dbSScott Long #endif
62324c7070dbSScott Long #ifdef RSS
62334c7070dbSScott Long 	queues = imin(queuemsgs, rss_getnumbuckets());
62344c7070dbSScott Long #else
62354c7070dbSScott Long 	queues = queuemsgs;
62364c7070dbSScott Long #endif
62374c7070dbSScott Long 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6238b97de13aSMarius Strobl 	if (bootverbose)
6239b97de13aSMarius Strobl 		device_printf(dev,
6240b97de13aSMarius Strobl 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
62414c7070dbSScott Long 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
62424c7070dbSScott Long #ifdef  RSS
62434c7070dbSScott Long 	/* If we're doing RSS, clamp at the number of RSS buckets */
62444c7070dbSScott Long 	if (queues > rss_getnumbuckets())
62454c7070dbSScott Long 		queues = rss_getnumbuckets();
62464c7070dbSScott Long #endif
624723ac9029SStephen Hurd 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
624823ac9029SStephen Hurd 		rx_queues = iflib_num_rx_queues;
62494c7070dbSScott Long 	else
62504c7070dbSScott Long 		rx_queues = queues;
6251d2735264SStephen Hurd 
6252d2735264SStephen Hurd 	if (rx_queues > scctx->isc_nrxqsets)
6253d2735264SStephen Hurd 		rx_queues = scctx->isc_nrxqsets;
6254d2735264SStephen Hurd 
625523ac9029SStephen Hurd 	/*
625623ac9029SStephen Hurd 	 * We want this to be all logical CPUs by default
625723ac9029SStephen Hurd 	 */
62584c7070dbSScott Long 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
62594c7070dbSScott Long 		tx_queues = iflib_num_tx_queues;
62604c7070dbSScott Long 	else
626123ac9029SStephen Hurd 		tx_queues = mp_ncpus;
626223ac9029SStephen Hurd 
6263d2735264SStephen Hurd 	if (tx_queues > scctx->isc_ntxqsets)
6264d2735264SStephen Hurd 		tx_queues = scctx->isc_ntxqsets;
6265d2735264SStephen Hurd 
626623ac9029SStephen Hurd 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
626723ac9029SStephen Hurd #ifdef INVARIANTS
626823ac9029SStephen Hurd 		if (tx_queues != rx_queues)
626977c1fcecSEric Joyner 			device_printf(dev,
627077c1fcecSEric Joyner 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
627123ac9029SStephen Hurd 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
627223ac9029SStephen Hurd #endif
627323ac9029SStephen Hurd 		tx_queues = min(rx_queues, tx_queues);
627423ac9029SStephen Hurd 		rx_queues = min(rx_queues, tx_queues);
627523ac9029SStephen Hurd 	}
62764c7070dbSScott Long 
6277b97de13aSMarius Strobl 	device_printf(dev, "Using %d rx queues %d tx queues\n",
6278b97de13aSMarius Strobl 	    rx_queues, tx_queues);
62794c7070dbSScott Long 
6280ab2e3f79SStephen Hurd 	vectors = rx_queues + admincnt;
62814c7070dbSScott Long 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
6282b97de13aSMarius Strobl 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6283b97de13aSMarius Strobl 		    vectors);
62844c7070dbSScott Long 		scctx->isc_vectors = vectors;
62854c7070dbSScott Long 		scctx->isc_nrxqsets = rx_queues;
62864c7070dbSScott Long 		scctx->isc_ntxqsets = tx_queues;
62874c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSIX;
628823ac9029SStephen Hurd 
62894c7070dbSScott Long 		return (vectors);
62904c7070dbSScott Long 	} else {
629177c1fcecSEric Joyner 		device_printf(dev,
6292b97de13aSMarius Strobl 		    "failed to allocate %d MSI-X vectors, err: %d - using MSI\n",
6293b97de13aSMarius Strobl 		    vectors, err);
6294e4defe55SMarius Strobl 		bus_release_resource(dev, SYS_RES_MEMORY, bar,
6295e4defe55SMarius Strobl 		    ctx->ifc_msix_mem);
6296e4defe55SMarius Strobl 		ctx->ifc_msix_mem = NULL;
62974c7070dbSScott Long 	}
62984c7070dbSScott Long msi:
62994c7070dbSScott Long 	vectors = pci_msi_count(dev);
63004c7070dbSScott Long 	scctx->isc_nrxqsets = 1;
63014c7070dbSScott Long 	scctx->isc_ntxqsets = 1;
63024c7070dbSScott Long 	scctx->isc_vectors = vectors;
63034c7070dbSScott Long 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
63044c7070dbSScott Long 		device_printf(dev,"Using an MSI interrupt\n");
63054c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSI;
63064c7070dbSScott Long 	} else {
6307e4defe55SMarius Strobl 		scctx->isc_vectors = 1;
63084c7070dbSScott Long 		device_printf(dev,"Using a Legacy interrupt\n");
63094c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
63104c7070dbSScott Long 	}
63114c7070dbSScott Long 
63124c7070dbSScott Long 	return (vectors);
63134c7070dbSScott Long }
63144c7070dbSScott Long 
6315e4defe55SMarius Strobl static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
63164c7070dbSScott Long 
63174c7070dbSScott Long static int
63184c7070dbSScott Long mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
63194c7070dbSScott Long {
63204c7070dbSScott Long 	int rc;
63214c7070dbSScott Long 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
63224c7070dbSScott Long 	struct sbuf *sb;
6323e4defe55SMarius Strobl 	const char *ring_state = "UNKNOWN";
63244c7070dbSScott Long 
63254c7070dbSScott Long 	/* XXX needed ? */
63264c7070dbSScott Long 	rc = sysctl_wire_old_buffer(req, 0);
63274c7070dbSScott Long 	MPASS(rc == 0);
63284c7070dbSScott Long 	if (rc != 0)
63294c7070dbSScott Long 		return (rc);
63304c7070dbSScott Long 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
63314c7070dbSScott Long 	MPASS(sb != NULL);
63324c7070dbSScott Long 	if (sb == NULL)
63334c7070dbSScott Long 		return (ENOMEM);
63344c7070dbSScott Long 	if (state[3] <= 3)
63354c7070dbSScott Long 		ring_state = ring_states[state[3]];
63364c7070dbSScott Long 
63374c7070dbSScott Long 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
63384c7070dbSScott Long 		    state[0], state[1], state[2], ring_state);
63394c7070dbSScott Long 	rc = sbuf_finish(sb);
63404c7070dbSScott Long 	sbuf_delete(sb);
63414c7070dbSScott Long         return(rc);
63424c7070dbSScott Long }
63434c7070dbSScott Long 
634423ac9029SStephen Hurd enum iflib_ndesc_handler {
634523ac9029SStephen Hurd 	IFLIB_NTXD_HANDLER,
634623ac9029SStephen Hurd 	IFLIB_NRXD_HANDLER,
634723ac9029SStephen Hurd };
63484c7070dbSScott Long 
634923ac9029SStephen Hurd static int
635023ac9029SStephen Hurd mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
635123ac9029SStephen Hurd {
635223ac9029SStephen Hurd 	if_ctx_t ctx = (void *)arg1;
635323ac9029SStephen Hurd 	enum iflib_ndesc_handler type = arg2;
635423ac9029SStephen Hurd 	char buf[256] = {0};
635595246abbSSean Bruno 	qidx_t *ndesc;
635623ac9029SStephen Hurd 	char *p, *next;
635723ac9029SStephen Hurd 	int nqs, rc, i;
635823ac9029SStephen Hurd 
635923ac9029SStephen Hurd 	MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
636023ac9029SStephen Hurd 
636123ac9029SStephen Hurd 	nqs = 8;
636223ac9029SStephen Hurd 	switch(type) {
636323ac9029SStephen Hurd 	case IFLIB_NTXD_HANDLER:
636423ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_ntxds;
636523ac9029SStephen Hurd 		if (ctx->ifc_sctx)
636623ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_ntxqs;
636723ac9029SStephen Hurd 		break;
636823ac9029SStephen Hurd 	case IFLIB_NRXD_HANDLER:
636923ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_nrxds;
637023ac9029SStephen Hurd 		if (ctx->ifc_sctx)
637123ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_nrxqs;
637223ac9029SStephen Hurd 		break;
63731ae4848cSMatt Macy 	default:
63741ae4848cSMatt Macy 			panic("unhandled type");
637523ac9029SStephen Hurd 	}
637623ac9029SStephen Hurd 	if (nqs == 0)
637723ac9029SStephen Hurd 		nqs = 8;
637823ac9029SStephen Hurd 
637923ac9029SStephen Hurd 	for (i=0; i<8; i++) {
638023ac9029SStephen Hurd 		if (i >= nqs)
638123ac9029SStephen Hurd 			break;
638223ac9029SStephen Hurd 		if (i)
638323ac9029SStephen Hurd 			strcat(buf, ",");
638423ac9029SStephen Hurd 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
638523ac9029SStephen Hurd 	}
638623ac9029SStephen Hurd 
638723ac9029SStephen Hurd 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
638823ac9029SStephen Hurd 	if (rc || req->newptr == NULL)
638923ac9029SStephen Hurd 		return rc;
639023ac9029SStephen Hurd 
639123ac9029SStephen Hurd 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
639223ac9029SStephen Hurd 	    i++, p = strsep(&next, " ,")) {
639323ac9029SStephen Hurd 		ndesc[i] = strtoul(p, NULL, 10);
639423ac9029SStephen Hurd 	}
639523ac9029SStephen Hurd 
639623ac9029SStephen Hurd 	return(rc);
639723ac9029SStephen Hurd }
63984c7070dbSScott Long 
63994c7070dbSScott Long #define NAME_BUFLEN 32
64004c7070dbSScott Long static void
64014c7070dbSScott Long iflib_add_device_sysctl_pre(if_ctx_t ctx)
64024c7070dbSScott Long {
64034c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
64044c7070dbSScott Long 	struct sysctl_oid_list *child, *oid_list;
64054c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
64064c7070dbSScott Long 	struct sysctl_oid *node;
64074c7070dbSScott Long 
64084c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
64094c7070dbSScott Long 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
64104c7070dbSScott Long 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
64114c7070dbSScott Long 						      CTLFLAG_RD, NULL, "IFLIB fields");
64124c7070dbSScott Long 	oid_list = SYSCTL_CHILDREN(node);
64134c7070dbSScott Long 
641410a1e981SEric Joyner 	SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
641510a1e981SEric Joyner 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version,
641623ac9029SStephen Hurd 		       "driver version");
641723ac9029SStephen Hurd 
64184c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
64194c7070dbSScott Long 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
64204c7070dbSScott Long 			"# of txqs to use, 0 => use default #");
64214c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
642223ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
642323ac9029SStephen Hurd 			"# of rxqs to use, 0 => use default #");
642423ac9029SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
642523ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
642623ac9029SStephen Hurd                        "permit #txq != #rxq");
6427ea351d3fSSean Bruno 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6428ea351d3fSSean Bruno                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6429b97de13aSMarius Strobl                       "disable MSI-X (default 0)");
6430f4d2154eSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6431f4d2154eSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
6432f4d2154eSStephen Hurd                        "set the rx budget");
6433fe51d4cdSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6434fe51d4cdSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
6435fe51d4cdSStephen Hurd 		       "cause tx to abdicate instead of running to completion");
6436*f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6437*f154ece0SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6438*f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6439*f154ece0SStephen Hurd 		       "offset to start using cores at");
6440*f154ece0SStephen Hurd 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6441*f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6442*f154ece0SStephen Hurd 		       "use separate cores for TX and RX");
64434c7070dbSScott Long 
644423ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
644523ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
644623ac9029SStephen Hurd 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
644723ac9029SStephen Hurd                        mp_ndesc_handler, "A",
644823ac9029SStephen Hurd                        "list of # of tx descriptors to use, 0 = use default #");
644923ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
645023ac9029SStephen Hurd 		       CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
645123ac9029SStephen Hurd                        mp_ndesc_handler, "A",
645223ac9029SStephen Hurd                        "list of # of rx descriptors to use, 0 = use default #");
64534c7070dbSScott Long }
64544c7070dbSScott Long 
64554c7070dbSScott Long static void
64564c7070dbSScott Long iflib_add_device_sysctl_post(if_ctx_t ctx)
64574c7070dbSScott Long {
64584c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
64594c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
64604c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
64614c7070dbSScott Long 	struct sysctl_oid_list *child;
64624c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
64634c7070dbSScott Long 	iflib_fl_t fl;
64644c7070dbSScott Long 	iflib_txq_t txq;
64654c7070dbSScott Long 	iflib_rxq_t rxq;
64664c7070dbSScott Long 	int i, j;
64674c7070dbSScott Long 	char namebuf[NAME_BUFLEN];
64684c7070dbSScott Long 	char *qfmt;
64694c7070dbSScott Long 	struct sysctl_oid *queue_node, *fl_node, *node;
64704c7070dbSScott Long 	struct sysctl_oid_list *queue_list, *fl_list;
64714c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
64724c7070dbSScott Long 
64734c7070dbSScott Long 	node = ctx->ifc_sysctl_node;
64744c7070dbSScott Long 	child = SYSCTL_CHILDREN(node);
64754c7070dbSScott Long 
64764c7070dbSScott Long 	if (scctx->isc_ntxqsets > 100)
64774c7070dbSScott Long 		qfmt = "txq%03d";
64784c7070dbSScott Long 	else if (scctx->isc_ntxqsets > 10)
64794c7070dbSScott Long 		qfmt = "txq%02d";
64804c7070dbSScott Long 	else
64814c7070dbSScott Long 		qfmt = "txq%d";
64824c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
64834c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
64844c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
64854c7070dbSScott Long 					     CTLFLAG_RD, NULL, "Queue Name");
64864c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
64874c7070dbSScott Long #if MEMORY_LOGGING
64884c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
64894c7070dbSScott Long 				CTLFLAG_RD,
64904c7070dbSScott Long 				&txq->ift_dequeued, "total mbufs freed");
64914c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
64924c7070dbSScott Long 				CTLFLAG_RD,
64934c7070dbSScott Long 				&txq->ift_enqueued, "total mbufs enqueued");
64944c7070dbSScott Long #endif
64954c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
64964c7070dbSScott Long 				   CTLFLAG_RD,
64974c7070dbSScott Long 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
64984c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
64994c7070dbSScott Long 				   CTLFLAG_RD,
65004c7070dbSScott Long 				   &txq->ift_pullups, "# of times m_pullup was called");
65014c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
65024c7070dbSScott Long 				   CTLFLAG_RD,
65034c7070dbSScott Long 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
65044c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
65054c7070dbSScott Long 				   CTLFLAG_RD,
650623ac9029SStephen Hurd 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
65074c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
65084c7070dbSScott Long 				   CTLFLAG_RD,
65094c7070dbSScott Long 				   &txq->ift_map_failed, "# of times dma map failed");
65104c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
65114c7070dbSScott Long 				   CTLFLAG_RD,
65124c7070dbSScott Long 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
65134c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
65144c7070dbSScott Long 				   CTLFLAG_RD,
65154c7070dbSScott Long 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
65164c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
65174c7070dbSScott Long 				   CTLFLAG_RD,
65184c7070dbSScott Long 				   &txq->ift_pidx, 1, "Producer Index");
65194c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
65204c7070dbSScott Long 				   CTLFLAG_RD,
65214c7070dbSScott Long 				   &txq->ift_cidx, 1, "Consumer Index");
65224c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
65234c7070dbSScott Long 				   CTLFLAG_RD,
65244c7070dbSScott Long 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
65254c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
65264c7070dbSScott Long 				   CTLFLAG_RD,
65274c7070dbSScott Long 				   &txq->ift_in_use, 1, "descriptors in use");
65284c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
65294c7070dbSScott Long 				   CTLFLAG_RD,
65304c7070dbSScott Long 				   &txq->ift_processed, "descriptors procesed for clean");
65314c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
65324c7070dbSScott Long 				   CTLFLAG_RD,
65334c7070dbSScott Long 				   &txq->ift_cleaned, "total cleaned");
65344c7070dbSScott Long 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
653595246abbSSean Bruno 				CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
65364c7070dbSScott Long 				0, mp_ring_state_handler, "A", "soft ring state");
65374c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
653895246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->enqueues,
65394c7070dbSScott Long 				       "# of enqueues to the mp_ring for this queue");
65404c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
654195246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->drops,
65424c7070dbSScott Long 				       "# of drops in the mp_ring for this queue");
65434c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
654495246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->starts,
65454c7070dbSScott Long 				       "# of normal consumer starts in the mp_ring for this queue");
65464c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
654795246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->stalls,
65484c7070dbSScott Long 					       "# of consumer stalls in the mp_ring for this queue");
65494c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
655095246abbSSean Bruno 			       CTLFLAG_RD, &txq->ift_br->restarts,
65514c7070dbSScott Long 				       "# of consumer restarts in the mp_ring for this queue");
65524c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
655395246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->abdications,
65544c7070dbSScott Long 				       "# of consumer abdications in the mp_ring for this queue");
65554c7070dbSScott Long 	}
65564c7070dbSScott Long 
65574c7070dbSScott Long 	if (scctx->isc_nrxqsets > 100)
65584c7070dbSScott Long 		qfmt = "rxq%03d";
65594c7070dbSScott Long 	else if (scctx->isc_nrxqsets > 10)
65604c7070dbSScott Long 		qfmt = "rxq%02d";
65614c7070dbSScott Long 	else
65624c7070dbSScott Long 		qfmt = "rxq%d";
65634c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
65644c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
65654c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
65664c7070dbSScott Long 					     CTLFLAG_RD, NULL, "Queue Name");
65674c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
656823ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
65694c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
65704c7070dbSScott Long 				       CTLFLAG_RD,
65714c7070dbSScott Long 				       &rxq->ifr_cq_pidx, 1, "Producer Index");
65724c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
65734c7070dbSScott Long 				       CTLFLAG_RD,
65744c7070dbSScott Long 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
65754c7070dbSScott Long 		}
6576da69b8f9SSean Bruno 
65774c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
65784c7070dbSScott Long 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
65794c7070dbSScott Long 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
65804c7070dbSScott Long 						     CTLFLAG_RD, NULL, "freelist Name");
65814c7070dbSScott Long 			fl_list = SYSCTL_CHILDREN(fl_node);
65824c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
65834c7070dbSScott Long 				       CTLFLAG_RD,
65844c7070dbSScott Long 				       &fl->ifl_pidx, 1, "Producer Index");
65854c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
65864c7070dbSScott Long 				       CTLFLAG_RD,
65874c7070dbSScott Long 				       &fl->ifl_cidx, 1, "Consumer Index");
65884c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
65894c7070dbSScott Long 				       CTLFLAG_RD,
65904c7070dbSScott Long 				       &fl->ifl_credits, 1, "credits available");
65914c7070dbSScott Long #if MEMORY_LOGGING
65924c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
65934c7070dbSScott Long 					CTLFLAG_RD,
65944c7070dbSScott Long 					&fl->ifl_m_enqueued, "mbufs allocated");
65954c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
65964c7070dbSScott Long 					CTLFLAG_RD,
65974c7070dbSScott Long 					&fl->ifl_m_dequeued, "mbufs freed");
65984c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
65994c7070dbSScott Long 					CTLFLAG_RD,
66004c7070dbSScott Long 					&fl->ifl_cl_enqueued, "clusters allocated");
66014c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
66024c7070dbSScott Long 					CTLFLAG_RD,
66034c7070dbSScott Long 					&fl->ifl_cl_dequeued, "clusters freed");
66044c7070dbSScott Long #endif
66054c7070dbSScott Long 
66064c7070dbSScott Long 		}
66074c7070dbSScott Long 	}
66084c7070dbSScott Long 
66094c7070dbSScott Long }
661095246abbSSean Bruno 
661177c1fcecSEric Joyner void
661277c1fcecSEric Joyner iflib_request_reset(if_ctx_t ctx)
661377c1fcecSEric Joyner {
661477c1fcecSEric Joyner 
661577c1fcecSEric Joyner 	STATE_LOCK(ctx);
661677c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_DO_RESET;
661777c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
661877c1fcecSEric Joyner }
661977c1fcecSEric Joyner 
662095246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
662195246abbSSean Bruno static struct mbuf *
662295246abbSSean Bruno iflib_fixup_rx(struct mbuf *m)
662395246abbSSean Bruno {
662495246abbSSean Bruno 	struct mbuf *n;
662595246abbSSean Bruno 
662695246abbSSean Bruno 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
662795246abbSSean Bruno 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
662895246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
662995246abbSSean Bruno 		n = m;
663095246abbSSean Bruno 	} else {
663195246abbSSean Bruno 		MGETHDR(n, M_NOWAIT, MT_DATA);
663295246abbSSean Bruno 		if (n == NULL) {
663395246abbSSean Bruno 			m_freem(m);
663495246abbSSean Bruno 			return (NULL);
663595246abbSSean Bruno 		}
663695246abbSSean Bruno 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
663795246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
663895246abbSSean Bruno 		m->m_len -= ETHER_HDR_LEN;
663995246abbSSean Bruno 		n->m_len = ETHER_HDR_LEN;
664095246abbSSean Bruno 		M_MOVE_PKTHDR(n, m);
664195246abbSSean Bruno 		n->m_next = m;
664295246abbSSean Bruno 	}
664395246abbSSean Bruno 	return (n);
664495246abbSSean Bruno }
664595246abbSSean Bruno #endif
664694618825SMark Johnston 
664794618825SMark Johnston #ifdef NETDUMP
664894618825SMark Johnston static void
664994618825SMark Johnston iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
665094618825SMark Johnston {
665194618825SMark Johnston 	if_ctx_t ctx;
665294618825SMark Johnston 
665394618825SMark Johnston 	ctx = if_getsoftc(ifp);
665494618825SMark Johnston 	CTX_LOCK(ctx);
665594618825SMark Johnston 	*nrxr = NRXQSETS(ctx);
665694618825SMark Johnston 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
665794618825SMark Johnston 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
665894618825SMark Johnston 	CTX_UNLOCK(ctx);
665994618825SMark Johnston }
666094618825SMark Johnston 
666194618825SMark Johnston static void
666294618825SMark Johnston iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
666394618825SMark Johnston {
666494618825SMark Johnston 	if_ctx_t ctx;
666594618825SMark Johnston 	if_softc_ctx_t scctx;
666694618825SMark Johnston 	iflib_fl_t fl;
666794618825SMark Johnston 	iflib_rxq_t rxq;
666894618825SMark Johnston 	int i, j;
666994618825SMark Johnston 
667094618825SMark Johnston 	ctx = if_getsoftc(ifp);
667194618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
667294618825SMark Johnston 
667394618825SMark Johnston 	switch (event) {
667494618825SMark Johnston 	case NETDUMP_START:
667594618825SMark Johnston 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
667694618825SMark Johnston 			rxq = &ctx->ifc_rxqs[i];
667794618825SMark Johnston 			for (j = 0; j < rxq->ifr_nfl; j++) {
667894618825SMark Johnston 				fl = rxq->ifr_fl;
667994618825SMark Johnston 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
668094618825SMark Johnston 			}
668194618825SMark Johnston 		}
668294618825SMark Johnston 		iflib_no_tx_batch = 1;
668394618825SMark Johnston 		break;
668494618825SMark Johnston 	default:
668594618825SMark Johnston 		break;
668694618825SMark Johnston 	}
668794618825SMark Johnston }
668894618825SMark Johnston 
668994618825SMark Johnston static int
669094618825SMark Johnston iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
669194618825SMark Johnston {
669294618825SMark Johnston 	if_ctx_t ctx;
669394618825SMark Johnston 	iflib_txq_t txq;
669494618825SMark Johnston 	int error;
669594618825SMark Johnston 
669694618825SMark Johnston 	ctx = if_getsoftc(ifp);
669794618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
669894618825SMark Johnston 	    IFF_DRV_RUNNING)
669994618825SMark Johnston 		return (EBUSY);
670094618825SMark Johnston 
670194618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
670294618825SMark Johnston 	error = iflib_encap(txq, &m);
670394618825SMark Johnston 	if (error == 0)
670494618825SMark Johnston 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
670594618825SMark Johnston 	return (error);
670694618825SMark Johnston }
670794618825SMark Johnston 
670894618825SMark Johnston static int
670994618825SMark Johnston iflib_netdump_poll(struct ifnet *ifp, int count)
671094618825SMark Johnston {
671194618825SMark Johnston 	if_ctx_t ctx;
671294618825SMark Johnston 	if_softc_ctx_t scctx;
671394618825SMark Johnston 	iflib_txq_t txq;
671494618825SMark Johnston 	int i;
671594618825SMark Johnston 
671694618825SMark Johnston 	ctx = if_getsoftc(ifp);
671794618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
671894618825SMark Johnston 
671994618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
672094618825SMark Johnston 	    IFF_DRV_RUNNING)
672194618825SMark Johnston 		return (EBUSY);
672294618825SMark Johnston 
672394618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
672494618825SMark Johnston 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
672594618825SMark Johnston 
672694618825SMark Johnston 	for (i = 0; i < scctx->isc_nrxqsets; i++)
672794618825SMark Johnston 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
672894618825SMark Johnston 	return (0);
672994618825SMark Johnston }
673094618825SMark Johnston #endif /* NETDUMP */
6731