xref: /freebsd/sys/net/iflib.c (revision 9e9b738ac5dc5b5bca1a4de3b03fd4d072261adc)
14c7070dbSScott Long /*-
27b610b60SSean Bruno  * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
34c7070dbSScott Long  * All rights reserved.
44c7070dbSScott Long  *
54c7070dbSScott Long  * Redistribution and use in source and binary forms, with or without
64c7070dbSScott Long  * modification, are permitted provided that the following conditions are met:
74c7070dbSScott Long  *
84c7070dbSScott Long  *  1. Redistributions of source code must retain the above copyright notice,
94c7070dbSScott Long  *     this list of conditions and the following disclaimer.
104c7070dbSScott Long  *
114c7070dbSScott Long  *  2. Neither the name of Matthew Macy nor the names of its
124c7070dbSScott Long  *     contributors may be used to endorse or promote products derived from
134c7070dbSScott Long  *     this software without specific prior written permission.
144c7070dbSScott Long  *
154c7070dbSScott Long  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
164c7070dbSScott Long  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
174c7070dbSScott Long  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
184c7070dbSScott Long  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
194c7070dbSScott Long  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204c7070dbSScott Long  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
214c7070dbSScott Long  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
224c7070dbSScott Long  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
234c7070dbSScott Long  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
244c7070dbSScott Long  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
254c7070dbSScott Long  * POSSIBILITY OF SUCH DAMAGE.
264c7070dbSScott Long  */
274c7070dbSScott Long 
284c7070dbSScott Long #include <sys/cdefs.h>
294c7070dbSScott Long __FBSDID("$FreeBSD$");
304c7070dbSScott Long 
31aaeb188aSBjoern A. Zeeb #include "opt_inet.h"
32aaeb188aSBjoern A. Zeeb #include "opt_inet6.h"
33aaeb188aSBjoern A. Zeeb #include "opt_acpi.h"
34b103855eSStephen Hurd #include "opt_sched.h"
35aaeb188aSBjoern A. Zeeb 
364c7070dbSScott Long #include <sys/param.h>
374c7070dbSScott Long #include <sys/types.h>
384c7070dbSScott Long #include <sys/bus.h>
394c7070dbSScott Long #include <sys/eventhandler.h>
404c7070dbSScott Long #include <sys/kernel.h>
414c7070dbSScott Long #include <sys/lock.h>
424c7070dbSScott Long #include <sys/mutex.h>
434c7070dbSScott Long #include <sys/module.h>
444c7070dbSScott Long #include <sys/kobj.h>
454c7070dbSScott Long #include <sys/rman.h>
464c7070dbSScott Long #include <sys/sbuf.h>
474c7070dbSScott Long #include <sys/smp.h>
484c7070dbSScott Long #include <sys/socket.h>
4909f6ff4fSMatt Macy #include <sys/sockio.h>
504c7070dbSScott Long #include <sys/sysctl.h>
514c7070dbSScott Long #include <sys/syslog.h>
524c7070dbSScott Long #include <sys/taskqueue.h>
5323ac9029SStephen Hurd #include <sys/limits.h>
544c7070dbSScott Long 
554c7070dbSScott Long #include <net/if.h>
564c7070dbSScott Long #include <net/if_var.h>
574c7070dbSScott Long #include <net/if_types.h>
584c7070dbSScott Long #include <net/if_media.h>
594c7070dbSScott Long #include <net/bpf.h>
604c7070dbSScott Long #include <net/ethernet.h>
614c7070dbSScott Long #include <net/mp_ring.h>
627790c8c1SConrad Meyer #include <net/debugnet.h>
636d49b41eSAndrew Gallatin #include <net/pfil.h>
6435e4e998SStephen Hurd #include <net/vnet.h>
654c7070dbSScott Long 
664c7070dbSScott Long #include <netinet/in.h>
674c7070dbSScott Long #include <netinet/in_pcb.h>
684c7070dbSScott Long #include <netinet/tcp_lro.h>
694c7070dbSScott Long #include <netinet/in_systm.h>
704c7070dbSScott Long #include <netinet/if_ether.h>
714c7070dbSScott Long #include <netinet/ip.h>
724c7070dbSScott Long #include <netinet/ip6.h>
734c7070dbSScott Long #include <netinet/tcp.h>
7435e4e998SStephen Hurd #include <netinet/ip_var.h>
7535e4e998SStephen Hurd #include <netinet6/ip6_var.h>
764c7070dbSScott Long 
774c7070dbSScott Long #include <machine/bus.h>
784c7070dbSScott Long #include <machine/in_cksum.h>
794c7070dbSScott Long 
804c7070dbSScott Long #include <vm/vm.h>
814c7070dbSScott Long #include <vm/pmap.h>
824c7070dbSScott Long 
834c7070dbSScott Long #include <dev/led/led.h>
844c7070dbSScott Long #include <dev/pci/pcireg.h>
854c7070dbSScott Long #include <dev/pci/pcivar.h>
864c7070dbSScott Long #include <dev/pci/pci_private.h>
874c7070dbSScott Long 
884c7070dbSScott Long #include <net/iflib.h>
8909f6ff4fSMatt Macy #include <net/iflib_private.h>
904c7070dbSScott Long 
914c7070dbSScott Long #include "ifdi_if.h"
924c7070dbSScott Long 
9377c1fcecSEric Joyner #ifdef PCI_IOV
9477c1fcecSEric Joyner #include <dev/pci/pci_iov.h>
9577c1fcecSEric Joyner #endif
9677c1fcecSEric Joyner 
9787890dbaSSean Bruno #include <sys/bitstring.h>
984c7070dbSScott Long /*
9995246abbSSean Bruno  * enable accounting of every mbuf as it comes in to and goes out of
10095246abbSSean Bruno  * iflib's software descriptor references
1014c7070dbSScott Long  */
1024c7070dbSScott Long #define MEMORY_LOGGING 0
1034c7070dbSScott Long /*
1044c7070dbSScott Long  * Enable mbuf vectors for compressing long mbuf chains
1054c7070dbSScott Long  */
1064c7070dbSScott Long 
1074c7070dbSScott Long /*
1084c7070dbSScott Long  * NB:
1094c7070dbSScott Long  * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
1104c7070dbSScott Long  *   we prefetch needs to be determined by the time spent in m_free vis a vis
1114c7070dbSScott Long  *   the cost of a prefetch. This will of course vary based on the workload:
1124c7070dbSScott Long  *      - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
1134c7070dbSScott Long  *        is quite expensive, thus suggesting very little prefetch.
1144c7070dbSScott Long  *      - small packet forwarding which is just returning a single mbuf to
1154c7070dbSScott Long  *        UMA will typically be very fast vis a vis the cost of a memory
1164c7070dbSScott Long  *        access.
1174c7070dbSScott Long  */
1184c7070dbSScott Long 
1194c7070dbSScott Long 
1204c7070dbSScott Long /*
1214c7070dbSScott Long  * File organization:
1224c7070dbSScott Long  *  - private structures
1234c7070dbSScott Long  *  - iflib private utility functions
1244c7070dbSScott Long  *  - ifnet functions
1254c7070dbSScott Long  *  - vlan registry and other exported functions
1264c7070dbSScott Long  *  - iflib public core functions
1274c7070dbSScott Long  *
1284c7070dbSScott Long  *
1294c7070dbSScott Long  */
13009f6ff4fSMatt Macy MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
1314c7070dbSScott Long 
132fb1a29b4SHans Petter Selasky #define	IFLIB_RXEOF_MORE (1U << 0)
133fb1a29b4SHans Petter Selasky #define	IFLIB_RXEOF_EMPTY (2U << 0)
134fb1a29b4SHans Petter Selasky 
1354c7070dbSScott Long struct iflib_txq;
1364c7070dbSScott Long typedef struct iflib_txq *iflib_txq_t;
1374c7070dbSScott Long struct iflib_rxq;
1384c7070dbSScott Long typedef struct iflib_rxq *iflib_rxq_t;
1394c7070dbSScott Long struct iflib_fl;
1404c7070dbSScott Long typedef struct iflib_fl *iflib_fl_t;
1414c7070dbSScott Long 
1424ecb427aSSean Bruno struct iflib_ctx;
1434ecb427aSSean Bruno 
1442d873474SStephen Hurd static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
145dd7fbcf1SStephen Hurd static void iflib_timer(void *arg);
1462d873474SStephen Hurd 
1474c7070dbSScott Long typedef struct iflib_filter_info {
1484c7070dbSScott Long 	driver_filter_t *ifi_filter;
1494c7070dbSScott Long 	void *ifi_filter_arg;
1504c7070dbSScott Long 	struct grouptask *ifi_task;
15195246abbSSean Bruno 	void *ifi_ctx;
1524c7070dbSScott Long } *iflib_filter_info_t;
1534c7070dbSScott Long 
1544c7070dbSScott Long struct iflib_ctx {
1554c7070dbSScott Long 	KOBJ_FIELDS;
1564c7070dbSScott Long 	/*
1574c7070dbSScott Long 	 * Pointer to hardware driver's softc
1584c7070dbSScott Long 	 */
1594c7070dbSScott Long 	void *ifc_softc;
1604c7070dbSScott Long 	device_t ifc_dev;
1614c7070dbSScott Long 	if_t ifc_ifp;
1624c7070dbSScott Long 
1634c7070dbSScott Long 	cpuset_t ifc_cpus;
1644c7070dbSScott Long 	if_shared_ctx_t ifc_sctx;
1654c7070dbSScott Long 	struct if_softc_ctx ifc_softc_ctx;
1664c7070dbSScott Long 
167aa8a24d3SStephen Hurd 	struct sx ifc_ctx_sx;
1687b610b60SSean Bruno 	struct mtx ifc_state_mtx;
1694c7070dbSScott Long 
1704c7070dbSScott Long 	iflib_txq_t ifc_txqs;
1714c7070dbSScott Long 	iflib_rxq_t ifc_rxqs;
1724c7070dbSScott Long 	uint32_t ifc_if_flags;
1734c7070dbSScott Long 	uint32_t ifc_flags;
1744c7070dbSScott Long 	uint32_t ifc_max_fl_buf_size;
1751b9d9394SEric Joyner 	uint32_t ifc_rx_mbuf_sz;
1764c7070dbSScott Long 
1774c7070dbSScott Long 	int ifc_link_state;
1784c7070dbSScott Long 	int ifc_watchdog_events;
1794c7070dbSScott Long 	struct cdev *ifc_led_dev;
1804c7070dbSScott Long 	struct resource *ifc_msix_mem;
1814c7070dbSScott Long 
1824c7070dbSScott Long 	struct if_irq ifc_legacy_irq;
1834c7070dbSScott Long 	struct grouptask ifc_admin_task;
1844c7070dbSScott Long 	struct grouptask ifc_vflr_task;
1854c7070dbSScott Long 	struct iflib_filter_info ifc_filter_info;
1864c7070dbSScott Long 	struct ifmedia	ifc_media;
187e2621d96SMatt Macy 	struct ifmedia	*ifc_mediap;
1884c7070dbSScott Long 
1894c7070dbSScott Long 	struct sysctl_oid *ifc_sysctl_node;
1904c7070dbSScott Long 	uint16_t ifc_sysctl_ntxqs;
1914c7070dbSScott Long 	uint16_t ifc_sysctl_nrxqs;
19223ac9029SStephen Hurd 	uint16_t ifc_sysctl_qs_eq_override;
193f4d2154eSStephen Hurd 	uint16_t ifc_sysctl_rx_budget;
194fe51d4cdSStephen Hurd 	uint16_t ifc_sysctl_tx_abdicate;
195f154ece0SStephen Hurd 	uint16_t ifc_sysctl_core_offset;
196f154ece0SStephen Hurd #define	CORE_OFFSET_UNSPECIFIED	0xffff
197f154ece0SStephen Hurd 	uint8_t  ifc_sysctl_separate_txrx;
19823ac9029SStephen Hurd 
19995246abbSSean Bruno 	qidx_t ifc_sysctl_ntxds[8];
20095246abbSSean Bruno 	qidx_t ifc_sysctl_nrxds[8];
2014c7070dbSScott Long 	struct if_txrx ifc_txrx;
2024c7070dbSScott Long #define isc_txd_encap  ifc_txrx.ift_txd_encap
2034c7070dbSScott Long #define isc_txd_flush  ifc_txrx.ift_txd_flush
2044c7070dbSScott Long #define isc_txd_credits_update  ifc_txrx.ift_txd_credits_update
2054c7070dbSScott Long #define isc_rxd_available ifc_txrx.ift_rxd_available
2064c7070dbSScott Long #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
2074c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2084c7070dbSScott Long #define isc_rxd_flush ifc_txrx.ift_rxd_flush
2094c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2104c7070dbSScott Long #define isc_rxd_refill ifc_txrx.ift_rxd_refill
2114c7070dbSScott Long #define isc_legacy_intr ifc_txrx.ift_legacy_intr
2124c7070dbSScott Long 	eventhandler_tag ifc_vlan_attach_event;
2134c7070dbSScott Long 	eventhandler_tag ifc_vlan_detach_event;
2141fd8c72cSKyle Evans 	struct ether_addr ifc_mac;
2154c7070dbSScott Long };
2164c7070dbSScott Long 
2174c7070dbSScott Long void *
2184c7070dbSScott Long iflib_get_softc(if_ctx_t ctx)
2194c7070dbSScott Long {
2204c7070dbSScott Long 
2214c7070dbSScott Long 	return (ctx->ifc_softc);
2224c7070dbSScott Long }
2234c7070dbSScott Long 
2244c7070dbSScott Long device_t
2254c7070dbSScott Long iflib_get_dev(if_ctx_t ctx)
2264c7070dbSScott Long {
2274c7070dbSScott Long 
2284c7070dbSScott Long 	return (ctx->ifc_dev);
2294c7070dbSScott Long }
2304c7070dbSScott Long 
2314c7070dbSScott Long if_t
2324c7070dbSScott Long iflib_get_ifp(if_ctx_t ctx)
2334c7070dbSScott Long {
2344c7070dbSScott Long 
2354c7070dbSScott Long 	return (ctx->ifc_ifp);
2364c7070dbSScott Long }
2374c7070dbSScott Long 
2384c7070dbSScott Long struct ifmedia *
2394c7070dbSScott Long iflib_get_media(if_ctx_t ctx)
2404c7070dbSScott Long {
2414c7070dbSScott Long 
242e2621d96SMatt Macy 	return (ctx->ifc_mediap);
2434c7070dbSScott Long }
2444c7070dbSScott Long 
24509f6ff4fSMatt Macy uint32_t
24609f6ff4fSMatt Macy iflib_get_flags(if_ctx_t ctx)
24709f6ff4fSMatt Macy {
24809f6ff4fSMatt Macy 	return (ctx->ifc_flags);
24909f6ff4fSMatt Macy }
25009f6ff4fSMatt Macy 
25109f6ff4fSMatt Macy void
2524c7070dbSScott Long iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
2534c7070dbSScott Long {
2544c7070dbSScott Long 
2551fd8c72cSKyle Evans 	bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN);
2564c7070dbSScott Long }
2574c7070dbSScott Long 
2584c7070dbSScott Long if_softc_ctx_t
2594c7070dbSScott Long iflib_get_softc_ctx(if_ctx_t ctx)
2604c7070dbSScott Long {
2614c7070dbSScott Long 
2624c7070dbSScott Long 	return (&ctx->ifc_softc_ctx);
2634c7070dbSScott Long }
2644c7070dbSScott Long 
2654c7070dbSScott Long if_shared_ctx_t
2664c7070dbSScott Long iflib_get_sctx(if_ctx_t ctx)
2674c7070dbSScott Long {
2684c7070dbSScott Long 
2694c7070dbSScott Long 	return (ctx->ifc_sctx);
2704c7070dbSScott Long }
2714c7070dbSScott Long 
27295246abbSSean Bruno #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
2734c7070dbSScott Long #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
2745e888388SSean Bruno #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
2754c7070dbSScott Long 
2764c7070dbSScott Long #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
2774c7070dbSScott Long #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
2784c7070dbSScott Long 
279e035717eSSean Bruno typedef struct iflib_sw_rx_desc_array {
280e035717eSSean Bruno 	bus_dmamap_t	*ifsd_map;         /* bus_dma maps for packet */
281e035717eSSean Bruno 	struct mbuf	**ifsd_m;           /* pkthdr mbufs */
282e035717eSSean Bruno 	caddr_t		*ifsd_cl;          /* direct cluster pointer for rx */
283fbec776dSAndrew Gallatin 	bus_addr_t	*ifsd_ba;          /* bus addr of cluster for rx */
284e035717eSSean Bruno } iflib_rxsd_array_t;
2854c7070dbSScott Long 
2864c7070dbSScott Long typedef struct iflib_sw_tx_desc_array {
2874c7070dbSScott Long 	bus_dmamap_t    *ifsd_map;         /* bus_dma maps for packet */
2888a04b53dSKonstantin Belousov 	bus_dmamap_t	*ifsd_tso_map;     /* bus_dma maps for TSO packet */
2894c7070dbSScott Long 	struct mbuf    **ifsd_m;           /* pkthdr mbufs */
29095246abbSSean Bruno } if_txsd_vec_t;
2914c7070dbSScott Long 
2924c7070dbSScott Long /* magic number that should be high enough for any hardware */
2934c7070dbSScott Long #define IFLIB_MAX_TX_SEGS		128
29495246abbSSean Bruno #define IFLIB_RX_COPY_THRESH		128
2954c7070dbSScott Long #define IFLIB_MAX_RX_REFRESH		32
29695246abbSSean Bruno /* The minimum descriptors per second before we start coalescing */
29795246abbSSean Bruno #define IFLIB_MIN_DESC_SEC		16384
29895246abbSSean Bruno #define IFLIB_DEFAULT_TX_UPDATE_FREQ	16
2994c7070dbSScott Long #define IFLIB_QUEUE_IDLE		0
3004c7070dbSScott Long #define IFLIB_QUEUE_HUNG		1
3014c7070dbSScott Long #define IFLIB_QUEUE_WORKING		2
30295246abbSSean Bruno /* maximum number of txqs that can share an rx interrupt */
30395246abbSSean Bruno #define IFLIB_MAX_TX_SHARED_INTR	4
3044c7070dbSScott Long 
30595246abbSSean Bruno /* this should really scale with ring size - this is a fairly arbitrary value */
30695246abbSSean Bruno #define TX_BATCH_SIZE			32
3074c7070dbSScott Long 
3084c7070dbSScott Long #define IFLIB_RESTART_BUDGET		8
3094c7070dbSScott Long 
3104c7070dbSScott Long #define CSUM_OFFLOAD		(CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
3114c7070dbSScott Long 				 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
3124c7070dbSScott Long 				 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
3131722eeacSMarius Strobl 
3144c7070dbSScott Long struct iflib_txq {
31595246abbSSean Bruno 	qidx_t		ift_in_use;
31695246abbSSean Bruno 	qidx_t		ift_cidx;
31795246abbSSean Bruno 	qidx_t		ift_cidx_processed;
31895246abbSSean Bruno 	qidx_t		ift_pidx;
3194c7070dbSScott Long 	uint8_t		ift_gen;
32023ac9029SStephen Hurd 	uint8_t		ift_br_offset;
32195246abbSSean Bruno 	uint16_t	ift_npending;
32295246abbSSean Bruno 	uint16_t	ift_db_pending;
32395246abbSSean Bruno 	uint16_t	ift_rs_pending;
3244c7070dbSScott Long 	/* implicit pad */
32595246abbSSean Bruno 	uint8_t		ift_txd_size[8];
3264c7070dbSScott Long 	uint64_t	ift_processed;
3274c7070dbSScott Long 	uint64_t	ift_cleaned;
32895246abbSSean Bruno 	uint64_t	ift_cleaned_prev;
3294c7070dbSScott Long #if MEMORY_LOGGING
3304c7070dbSScott Long 	uint64_t	ift_enqueued;
3314c7070dbSScott Long 	uint64_t	ift_dequeued;
3324c7070dbSScott Long #endif
3334c7070dbSScott Long 	uint64_t	ift_no_tx_dma_setup;
3344c7070dbSScott Long 	uint64_t	ift_no_desc_avail;
3354c7070dbSScott Long 	uint64_t	ift_mbuf_defrag_failed;
3364c7070dbSScott Long 	uint64_t	ift_mbuf_defrag;
3374c7070dbSScott Long 	uint64_t	ift_map_failed;
3384c7070dbSScott Long 	uint64_t	ift_txd_encap_efbig;
3394c7070dbSScott Long 	uint64_t	ift_pullups;
340dd7fbcf1SStephen Hurd 	uint64_t	ift_last_timer_tick;
3414c7070dbSScott Long 
3424c7070dbSScott Long 	struct mtx	ift_mtx;
3434c7070dbSScott Long 	struct mtx	ift_db_mtx;
3444c7070dbSScott Long 
3454c7070dbSScott Long 	/* constant values */
3464c7070dbSScott Long 	if_ctx_t	ift_ctx;
34795246abbSSean Bruno 	struct ifmp_ring        *ift_br;
3484c7070dbSScott Long 	struct grouptask	ift_task;
34995246abbSSean Bruno 	qidx_t		ift_size;
3504c7070dbSScott Long 	uint16_t	ift_id;
3514c7070dbSScott Long 	struct callout	ift_timer;
3524c7070dbSScott Long 
35395246abbSSean Bruno 	if_txsd_vec_t	ift_sds;
3544c7070dbSScott Long 	uint8_t		ift_qstatus;
3554c7070dbSScott Long 	uint8_t		ift_closed;
35695246abbSSean Bruno 	uint8_t		ift_update_freq;
3574c7070dbSScott Long 	struct iflib_filter_info ift_filter_info;
358bfce461eSMarius Strobl 	bus_dma_tag_t	ift_buf_tag;
359bfce461eSMarius Strobl 	bus_dma_tag_t	ift_tso_buf_tag;
3604c7070dbSScott Long 	iflib_dma_info_t	ift_ifdi;
3614c7070dbSScott Long #define MTX_NAME_LEN 16
3624c7070dbSScott Long 	char                    ift_mtx_name[MTX_NAME_LEN];
3634c7070dbSScott Long 	bus_dma_segment_t	ift_segs[IFLIB_MAX_TX_SEGS]  __aligned(CACHE_LINE_SIZE);
3641248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
3651248952aSSean Bruno 	uint64_t ift_cpu_exec_count[256];
3661248952aSSean Bruno #endif
3674c7070dbSScott Long } __aligned(CACHE_LINE_SIZE);
3684c7070dbSScott Long 
3694c7070dbSScott Long struct iflib_fl {
37095246abbSSean Bruno 	qidx_t		ifl_cidx;
37195246abbSSean Bruno 	qidx_t		ifl_pidx;
37295246abbSSean Bruno 	qidx_t		ifl_credits;
3734c7070dbSScott Long 	uint8_t		ifl_gen;
37495246abbSSean Bruno 	uint8_t		ifl_rxd_size;
3754c7070dbSScott Long #if MEMORY_LOGGING
3764c7070dbSScott Long 	uint64_t	ifl_m_enqueued;
3774c7070dbSScott Long 	uint64_t	ifl_m_dequeued;
3784c7070dbSScott Long 	uint64_t	ifl_cl_enqueued;
3794c7070dbSScott Long 	uint64_t	ifl_cl_dequeued;
3804c7070dbSScott Long #endif
3814c7070dbSScott Long 	/* implicit pad */
38287890dbaSSean Bruno 	bitstr_t 	*ifl_rx_bitmap;
38387890dbaSSean Bruno 	qidx_t		ifl_fragidx;
3844c7070dbSScott Long 	/* constant */
38595246abbSSean Bruno 	qidx_t		ifl_size;
3864c7070dbSScott Long 	uint16_t	ifl_buf_size;
3874c7070dbSScott Long 	uint16_t	ifl_cltype;
3884c7070dbSScott Long 	uma_zone_t	ifl_zone;
389e035717eSSean Bruno 	iflib_rxsd_array_t	ifl_sds;
3904c7070dbSScott Long 	iflib_rxq_t	ifl_rxq;
3914c7070dbSScott Long 	uint8_t		ifl_id;
392bfce461eSMarius Strobl 	bus_dma_tag_t	ifl_buf_tag;
3934c7070dbSScott Long 	iflib_dma_info_t	ifl_ifdi;
3944c7070dbSScott Long 	uint64_t	ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
3954c7070dbSScott Long 	caddr_t		ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
39695246abbSSean Bruno 	qidx_t	ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
3974c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
3984c7070dbSScott Long 
39995246abbSSean Bruno static inline qidx_t
40095246abbSSean Bruno get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
4014c7070dbSScott Long {
40295246abbSSean Bruno 	qidx_t used;
4034c7070dbSScott Long 
4044c7070dbSScott Long 	if (pidx > cidx)
4054c7070dbSScott Long 		used = pidx - cidx;
4064c7070dbSScott Long 	else if (pidx < cidx)
4074c7070dbSScott Long 		used = size - cidx + pidx;
4084c7070dbSScott Long 	else if (gen == 0 && pidx == cidx)
4094c7070dbSScott Long 		used = 0;
4104c7070dbSScott Long 	else if (gen == 1 && pidx == cidx)
4114c7070dbSScott Long 		used = size;
4124c7070dbSScott Long 	else
4134c7070dbSScott Long 		panic("bad state");
4144c7070dbSScott Long 
4154c7070dbSScott Long 	return (used);
4164c7070dbSScott Long }
4174c7070dbSScott Long 
4184c7070dbSScott Long #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
4194c7070dbSScott Long 
4204c7070dbSScott Long #define IDXDIFF(head, tail, wrap) \
4214c7070dbSScott Long 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
4224c7070dbSScott Long 
4234c7070dbSScott Long struct iflib_rxq {
4244c7070dbSScott Long 	if_ctx_t	ifr_ctx;
4254c7070dbSScott Long 	iflib_fl_t	ifr_fl;
4264c7070dbSScott Long 	uint64_t	ifr_rx_irq;
4276d49b41eSAndrew Gallatin 	struct pfil_head	*pfil;
4281722eeacSMarius Strobl 	/*
4291722eeacSMarius Strobl 	 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is
4301722eeacSMarius Strobl 	 * the command queue consumer index.  Otherwise it's unused.
4311722eeacSMarius Strobl 	 */
4321722eeacSMarius Strobl 	qidx_t		ifr_cq_cidx;
4334c7070dbSScott Long 	uint16_t	ifr_id;
4344c7070dbSScott Long 	uint8_t		ifr_nfl;
43595246abbSSean Bruno 	uint8_t		ifr_ntxqirq;
43695246abbSSean Bruno 	uint8_t		ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
4371722eeacSMarius Strobl 	uint8_t		ifr_fl_offset;
4384c7070dbSScott Long 	struct lro_ctrl			ifr_lc;
4394c7070dbSScott Long 	struct grouptask        ifr_task;
440fb1a29b4SHans Petter Selasky 	struct callout		ifr_watchdog;
4414c7070dbSScott Long 	struct iflib_filter_info ifr_filter_info;
4424c7070dbSScott Long 	iflib_dma_info_t		ifr_ifdi;
443ab2e3f79SStephen Hurd 
4444c7070dbSScott Long 	/* dynamically allocate if any drivers need a value substantially larger than this */
4454c7070dbSScott Long 	struct if_rxd_frag	ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
4461248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
4471248952aSSean Bruno 	uint64_t ifr_cpu_exec_count[256];
4481248952aSSean Bruno #endif
4494c7070dbSScott Long }  __aligned(CACHE_LINE_SIZE);
4504c7070dbSScott Long 
45195246abbSSean Bruno typedef struct if_rxsd {
45295246abbSSean Bruno 	caddr_t *ifsd_cl;
45395246abbSSean Bruno 	iflib_fl_t ifsd_fl;
45495246abbSSean Bruno 	qidx_t ifsd_cidx;
45595246abbSSean Bruno } *if_rxsd_t;
45695246abbSSean Bruno 
45795246abbSSean Bruno /* multiple of word size */
45895246abbSSean Bruno #ifdef __LP64__
459ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	6
46095246abbSSean Bruno #define RXD_INFO_SIZE	5
46195246abbSSean Bruno #define PKT_TYPE uint64_t
46295246abbSSean Bruno #else
463ab2e3f79SStephen Hurd #define PKT_INFO_SIZE	11
46495246abbSSean Bruno #define RXD_INFO_SIZE	8
46595246abbSSean Bruno #define PKT_TYPE uint32_t
46695246abbSSean Bruno #endif
46795246abbSSean Bruno #define PKT_LOOP_BOUND  ((PKT_INFO_SIZE/3)*3)
46895246abbSSean Bruno #define RXD_LOOP_BOUND  ((RXD_INFO_SIZE/4)*4)
46995246abbSSean Bruno 
47095246abbSSean Bruno typedef struct if_pkt_info_pad {
47195246abbSSean Bruno 	PKT_TYPE pkt_val[PKT_INFO_SIZE];
47295246abbSSean Bruno } *if_pkt_info_pad_t;
47395246abbSSean Bruno typedef struct if_rxd_info_pad {
47495246abbSSean Bruno 	PKT_TYPE rxd_val[RXD_INFO_SIZE];
47595246abbSSean Bruno } *if_rxd_info_pad_t;
47695246abbSSean Bruno 
47795246abbSSean Bruno CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
47895246abbSSean Bruno CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
47995246abbSSean Bruno 
48095246abbSSean Bruno 
48195246abbSSean Bruno static inline void
48295246abbSSean Bruno pkt_info_zero(if_pkt_info_t pi)
48395246abbSSean Bruno {
48495246abbSSean Bruno 	if_pkt_info_pad_t pi_pad;
48595246abbSSean Bruno 
48695246abbSSean Bruno 	pi_pad = (if_pkt_info_pad_t)pi;
48795246abbSSean Bruno 	pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
48895246abbSSean Bruno 	pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
48995246abbSSean Bruno #ifndef __LP64__
490ab2e3f79SStephen Hurd 	pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
491ab2e3f79SStephen Hurd 	pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
49295246abbSSean Bruno #endif
49395246abbSSean Bruno }
49495246abbSSean Bruno 
49509f6ff4fSMatt Macy static device_method_t iflib_pseudo_methods[] = {
49609f6ff4fSMatt Macy 	DEVMETHOD(device_attach, noop_attach),
49709f6ff4fSMatt Macy 	DEVMETHOD(device_detach, iflib_pseudo_detach),
49809f6ff4fSMatt Macy 	DEVMETHOD_END
49909f6ff4fSMatt Macy };
50009f6ff4fSMatt Macy 
50109f6ff4fSMatt Macy driver_t iflib_pseudodriver = {
50209f6ff4fSMatt Macy 	"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
50309f6ff4fSMatt Macy };
50409f6ff4fSMatt Macy 
50595246abbSSean Bruno static inline void
50695246abbSSean Bruno rxd_info_zero(if_rxd_info_t ri)
50795246abbSSean Bruno {
50895246abbSSean Bruno 	if_rxd_info_pad_t ri_pad;
50995246abbSSean Bruno 	int i;
51095246abbSSean Bruno 
51195246abbSSean Bruno 	ri_pad = (if_rxd_info_pad_t)ri;
51295246abbSSean Bruno 	for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
51395246abbSSean Bruno 		ri_pad->rxd_val[i] = 0;
51495246abbSSean Bruno 		ri_pad->rxd_val[i+1] = 0;
51595246abbSSean Bruno 		ri_pad->rxd_val[i+2] = 0;
51695246abbSSean Bruno 		ri_pad->rxd_val[i+3] = 0;
51795246abbSSean Bruno 	}
51895246abbSSean Bruno #ifdef __LP64__
51995246abbSSean Bruno 	ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
52095246abbSSean Bruno #endif
52195246abbSSean Bruno }
52295246abbSSean Bruno 
5234c7070dbSScott Long /*
5244c7070dbSScott Long  * Only allow a single packet to take up most 1/nth of the tx ring
5254c7070dbSScott Long  */
5264c7070dbSScott Long #define MAX_SINGLE_PACKET_FRACTION 12
5274c7070dbSScott Long #define IF_BAD_DMA (bus_addr_t)-1
5284c7070dbSScott Long 
5294c7070dbSScott Long #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
5304c7070dbSScott Long 
531aa8a24d3SStephen Hurd #define CTX_LOCK_INIT(_sc)  sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
532aa8a24d3SStephen Hurd #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
533aa8a24d3SStephen Hurd #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
534aa8a24d3SStephen Hurd #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
5354c7070dbSScott Long 
5367b610b60SSean Bruno #define STATE_LOCK_INIT(_sc, _name)  mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
5377b610b60SSean Bruno #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
5387b610b60SSean Bruno #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
5397b610b60SSean Bruno #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
5407b610b60SSean Bruno 
5414c7070dbSScott Long #define CALLOUT_LOCK(txq)	mtx_lock(&txq->ift_mtx)
5424c7070dbSScott Long #define CALLOUT_UNLOCK(txq) 	mtx_unlock(&txq->ift_mtx)
5434c7070dbSScott Long 
54477c1fcecSEric Joyner void
54577c1fcecSEric Joyner iflib_set_detach(if_ctx_t ctx)
54677c1fcecSEric Joyner {
54777c1fcecSEric Joyner 	STATE_LOCK(ctx);
54877c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
54977c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
55077c1fcecSEric Joyner }
5514c7070dbSScott Long 
5524c7070dbSScott Long /* Our boot-time initialization hook */
5534c7070dbSScott Long static int	iflib_module_event_handler(module_t, int, void *);
5544c7070dbSScott Long 
5554c7070dbSScott Long static moduledata_t iflib_moduledata = {
5564c7070dbSScott Long 	"iflib",
5574c7070dbSScott Long 	iflib_module_event_handler,
5584c7070dbSScott Long 	NULL
5594c7070dbSScott Long };
5604c7070dbSScott Long 
5614c7070dbSScott Long DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
5624c7070dbSScott Long MODULE_VERSION(iflib, 1);
5634c7070dbSScott Long 
5644c7070dbSScott Long MODULE_DEPEND(iflib, pci, 1, 1, 1);
5654c7070dbSScott Long MODULE_DEPEND(iflib, ether, 1, 1, 1);
5664c7070dbSScott Long 
567ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
568ab2e3f79SStephen Hurd TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
569ab2e3f79SStephen Hurd 
5704c7070dbSScott Long #ifndef IFLIB_DEBUG_COUNTERS
5714c7070dbSScott Long #ifdef INVARIANTS
5724c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 1
5734c7070dbSScott Long #else
5744c7070dbSScott Long #define IFLIB_DEBUG_COUNTERS 0
5754c7070dbSScott Long #endif /* !INVARIANTS */
5764c7070dbSScott Long #endif
5774c7070dbSScott Long 
5787029da5cSPawel Biernacki static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
579ab2e3f79SStephen Hurd     "iflib driver parameters");
580ab2e3f79SStephen Hurd 
5814c7070dbSScott Long /*
5824c7070dbSScott Long  * XXX need to ensure that this can't accidentally cause the head to be moved backwards
5834c7070dbSScott Long  */
5844c7070dbSScott Long static int iflib_min_tx_latency = 0;
5854c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
586da69b8f9SSean Bruno 		   &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
58795246abbSSean Bruno static int iflib_no_tx_batch = 0;
58895246abbSSean Bruno SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
58995246abbSSean Bruno 		   &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
5904c7070dbSScott Long 
5914c7070dbSScott Long 
5924c7070dbSScott Long #if IFLIB_DEBUG_COUNTERS
5934c7070dbSScott Long 
5944c7070dbSScott Long static int iflib_tx_seen;
5954c7070dbSScott Long static int iflib_tx_sent;
5964c7070dbSScott Long static int iflib_tx_encap;
5974c7070dbSScott Long static int iflib_rx_allocs;
5984c7070dbSScott Long static int iflib_fl_refills;
5994c7070dbSScott Long static int iflib_fl_refills_large;
6004c7070dbSScott Long static int iflib_tx_frees;
6014c7070dbSScott Long 
6024c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
6031722eeacSMarius Strobl 		   &iflib_tx_seen, 0, "# TX mbufs seen");
6044c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
6051722eeacSMarius Strobl 		   &iflib_tx_sent, 0, "# TX mbufs sent");
6064c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
6071722eeacSMarius Strobl 		   &iflib_tx_encap, 0, "# TX mbufs encapped");
6084c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
6091722eeacSMarius Strobl 		   &iflib_tx_frees, 0, "# TX frees");
6104c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
6111722eeacSMarius Strobl 		   &iflib_rx_allocs, 0, "# RX allocations");
6124c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
6134c7070dbSScott Long 		   &iflib_fl_refills, 0, "# refills");
6144c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
6154c7070dbSScott Long 		   &iflib_fl_refills_large, 0, "# large refills");
6164c7070dbSScott Long 
6174c7070dbSScott Long 
6184c7070dbSScott Long static int iflib_txq_drain_flushing;
6194c7070dbSScott Long static int iflib_txq_drain_oactive;
6204c7070dbSScott Long static int iflib_txq_drain_notready;
6214c7070dbSScott Long 
6224c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
6234c7070dbSScott Long 		   &iflib_txq_drain_flushing, 0, "# drain flushes");
6244c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
6254c7070dbSScott Long 		   &iflib_txq_drain_oactive, 0, "# drain oactives");
6264c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
6274c7070dbSScott Long 		   &iflib_txq_drain_notready, 0, "# drain notready");
6284c7070dbSScott Long 
6294c7070dbSScott Long 
6304c7070dbSScott Long static int iflib_encap_load_mbuf_fail;
631d14c853bSStephen Hurd static int iflib_encap_pad_mbuf_fail;
6324c7070dbSScott Long static int iflib_encap_txq_avail_fail;
6334c7070dbSScott Long static int iflib_encap_txd_encap_fail;
6344c7070dbSScott Long 
6354c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
6364c7070dbSScott Long 		   &iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
637d14c853bSStephen Hurd SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
638d14c853bSStephen Hurd 		   &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
6394c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
6404c7070dbSScott Long 		   &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
6414c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
6424c7070dbSScott Long 		   &iflib_encap_txd_encap_fail, 0, "# driver encap failures");
6434c7070dbSScott Long 
6444c7070dbSScott Long static int iflib_task_fn_rxs;
6454c7070dbSScott Long static int iflib_rx_intr_enables;
6464c7070dbSScott Long static int iflib_fast_intrs;
6474c7070dbSScott Long static int iflib_rx_unavail;
6484c7070dbSScott Long static int iflib_rx_ctx_inactive;
6494c7070dbSScott Long static int iflib_rx_if_input;
6504c7070dbSScott Long static int iflib_rxd_flush;
6514c7070dbSScott Long 
6524c7070dbSScott Long static int iflib_verbose_debug;
6534c7070dbSScott Long 
6544c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
6554c7070dbSScott Long 		   &iflib_task_fn_rxs, 0, "# task_fn_rx calls");
6564c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
6571722eeacSMarius Strobl 		   &iflib_rx_intr_enables, 0, "# RX intr enables");
6584c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
6594c7070dbSScott Long 		   &iflib_fast_intrs, 0, "# fast_intr calls");
6604c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
6614c7070dbSScott Long 		   &iflib_rx_unavail, 0, "# times rxeof called with no available data");
6624c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
6634c7070dbSScott Long 		   &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
6644c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
6654c7070dbSScott Long 		   &iflib_rx_if_input, 0, "# times rxeof called if_input");
6664c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
6674c7070dbSScott Long 	         &iflib_rxd_flush, 0, "# times rxd_flush called");
6684c7070dbSScott Long SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
6694c7070dbSScott Long 		   &iflib_verbose_debug, 0, "enable verbose debugging");
6704c7070dbSScott Long 
6714c7070dbSScott Long #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
672da69b8f9SSean Bruno static void
673da69b8f9SSean Bruno iflib_debug_reset(void)
674da69b8f9SSean Bruno {
675da69b8f9SSean Bruno 	iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
676da69b8f9SSean Bruno 		iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
677da69b8f9SSean Bruno 		iflib_txq_drain_flushing = iflib_txq_drain_oactive =
67864e6fc13SStephen Hurd 		iflib_txq_drain_notready =
679d14c853bSStephen Hurd 		iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
680d14c853bSStephen Hurd 		iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
681d14c853bSStephen Hurd 		iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
68264e6fc13SStephen Hurd 		iflib_rx_unavail =
68364e6fc13SStephen Hurd 		iflib_rx_ctx_inactive = iflib_rx_if_input =
6846d49b41eSAndrew Gallatin 		iflib_rxd_flush = 0;
685da69b8f9SSean Bruno }
6864c7070dbSScott Long 
6874c7070dbSScott Long #else
6884c7070dbSScott Long #define DBG_COUNTER_INC(name)
689da69b8f9SSean Bruno static void iflib_debug_reset(void) {}
6904c7070dbSScott Long #endif
6914c7070dbSScott Long 
6924c7070dbSScott Long #define IFLIB_DEBUG 0
6934c7070dbSScott Long 
6944c7070dbSScott Long static void iflib_tx_structures_free(if_ctx_t ctx);
6954c7070dbSScott Long static void iflib_rx_structures_free(if_ctx_t ctx);
6964c7070dbSScott Long static int iflib_queues_alloc(if_ctx_t ctx);
6974c7070dbSScott Long static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
69895246abbSSean Bruno static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
6994c7070dbSScott Long static int iflib_qset_structures_setup(if_ctx_t ctx);
7004c7070dbSScott Long static int iflib_msix_init(if_ctx_t ctx);
7013e0e6330SStephen Hurd static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
7024c7070dbSScott Long static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
7034c7070dbSScott Long static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
704b8ca4756SPatrick Kelsey #ifdef ALTQ
705b8ca4756SPatrick Kelsey static void iflib_altq_if_start(if_t ifp);
706b8ca4756SPatrick Kelsey static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
707b8ca4756SPatrick Kelsey #endif
7084c7070dbSScott Long static int iflib_register(if_ctx_t);
70956614414SEric Joyner static void iflib_deregister(if_ctx_t);
7101558015eSEric Joyner static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
7114c7070dbSScott Long static void iflib_init_locked(if_ctx_t ctx);
7124c7070dbSScott Long static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
7134c7070dbSScott Long static void iflib_add_device_sysctl_post(if_ctx_t ctx);
714da69b8f9SSean Bruno static void iflib_ifmp_purge(iflib_txq_t txq);
7151248952aSSean Bruno static void _iflib_pre_assert(if_softc_ctx_t scctx);
71695246abbSSean Bruno static void iflib_if_init_locked(if_ctx_t ctx);
71777c1fcecSEric Joyner static void iflib_free_intr_mem(if_ctx_t ctx);
71895246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
71995246abbSSean Bruno static struct mbuf * iflib_fixup_rx(struct mbuf *m);
72095246abbSSean Bruno #endif
7214c7070dbSScott Long 
722f154ece0SStephen Hurd static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
723f154ece0SStephen Hurd     SLIST_HEAD_INITIALIZER(cpu_offsets);
724f154ece0SStephen Hurd struct cpu_offset {
725f154ece0SStephen Hurd 	SLIST_ENTRY(cpu_offset) entries;
726f154ece0SStephen Hurd 	cpuset_t	set;
727f154ece0SStephen Hurd 	unsigned int	refcount;
728f154ece0SStephen Hurd 	uint16_t	offset;
729f154ece0SStephen Hurd };
730f154ece0SStephen Hurd static struct mtx cpu_offset_mtx;
731f154ece0SStephen Hurd MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock",
732f154ece0SStephen Hurd     MTX_DEF);
733f154ece0SStephen Hurd 
7347790c8c1SConrad Meyer DEBUGNET_DEFINE(iflib);
73594618825SMark Johnston 
7364c7070dbSScott Long #ifdef DEV_NETMAP
7374c7070dbSScott Long #include <sys/selinfo.h>
7384c7070dbSScott Long #include <net/netmap.h>
7394c7070dbSScott Long #include <dev/netmap/netmap_kern.h>
7404c7070dbSScott Long 
7414c7070dbSScott Long MODULE_DEPEND(iflib, netmap, 1, 1, 1);
7424c7070dbSScott Long 
7432d873474SStephen Hurd static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
7442d873474SStephen Hurd 
7454c7070dbSScott Long /*
7464c7070dbSScott Long  * device-specific sysctl variables:
7474c7070dbSScott Long  *
74891d546a0SConrad Meyer  * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
7494c7070dbSScott Long  *	During regular operations the CRC is stripped, but on some
7504c7070dbSScott Long  *	hardware reception of frames not multiple of 64 is slower,
7514c7070dbSScott Long  *	so using crcstrip=0 helps in benchmarks.
7524c7070dbSScott Long  *
75391d546a0SConrad Meyer  * iflib_rx_miss, iflib_rx_miss_bufs:
7544c7070dbSScott Long  *	count packets that might be missed due to lost interrupts.
7554c7070dbSScott Long  */
7564c7070dbSScott Long SYSCTL_DECL(_dev_netmap);
7574c7070dbSScott Long /*
7584c7070dbSScott Long  * The xl driver by default strips CRCs and we do not override it.
7594c7070dbSScott Long  */
7604c7070dbSScott Long 
7614c7070dbSScott Long int iflib_crcstrip = 1;
7624c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
7631722eeacSMarius Strobl     CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames");
7644c7070dbSScott Long 
7654c7070dbSScott Long int iflib_rx_miss, iflib_rx_miss_bufs;
7664c7070dbSScott Long SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
7671722eeacSMarius Strobl     CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr");
76891d546a0SConrad Meyer SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
7691722eeacSMarius Strobl     CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs");
7704c7070dbSScott Long 
7714c7070dbSScott Long /*
7724c7070dbSScott Long  * Register/unregister. We are already under netmap lock.
7734c7070dbSScott Long  * Only called on the first register or the last unregister.
7744c7070dbSScott Long  */
7754c7070dbSScott Long static int
7764c7070dbSScott Long iflib_netmap_register(struct netmap_adapter *na, int onoff)
7774c7070dbSScott Long {
7781722eeacSMarius Strobl 	if_t ifp = na->ifp;
7794c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
78095246abbSSean Bruno 	int status;
7814c7070dbSScott Long 
7824c7070dbSScott Long 	CTX_LOCK(ctx);
7834c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
7844c7070dbSScott Long 
7854c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
7864c7070dbSScott Long 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7874c7070dbSScott Long 
7884c7070dbSScott Long 	if (!CTX_IS_VF(ctx))
7891248952aSSean Bruno 		IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
7904c7070dbSScott Long 
7914c7070dbSScott Long 	/* enable or disable flags and callbacks in na and ifp */
7924c7070dbSScott Long 	if (onoff) {
7934c7070dbSScott Long 		nm_set_native_flags(na);
7944c7070dbSScott Long 	} else {
7954c7070dbSScott Long 		nm_clear_native_flags(na);
7964c7070dbSScott Long 	}
79795246abbSSean Bruno 	iflib_stop(ctx);
79895246abbSSean Bruno 	iflib_init_locked(ctx);
7991248952aSSean Bruno 	IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
80095246abbSSean Bruno 	status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
80195246abbSSean Bruno 	if (status)
80295246abbSSean Bruno 		nm_clear_native_flags(na);
8034c7070dbSScott Long 	CTX_UNLOCK(ctx);
80495246abbSSean Bruno 	return (status);
8054c7070dbSScott Long }
8064c7070dbSScott Long 
8072d873474SStephen Hurd static int
8082d873474SStephen Hurd netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
8092d873474SStephen Hurd {
8102d873474SStephen Hurd 	struct netmap_adapter *na = kring->na;
8112d873474SStephen Hurd 	u_int const lim = kring->nkr_num_slots - 1;
8122d873474SStephen Hurd 	u_int head = kring->rhead;
8132d873474SStephen Hurd 	struct netmap_ring *ring = kring->ring;
8142d873474SStephen Hurd 	bus_dmamap_t *map;
8152d873474SStephen Hurd 	struct if_rxd_update iru;
8162d873474SStephen Hurd 	if_ctx_t ctx = rxq->ifr_ctx;
8172d873474SStephen Hurd 	iflib_fl_t fl = &rxq->ifr_fl[0];
8182d873474SStephen Hurd 	uint32_t refill_pidx, nic_i;
81964e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
82064e6fc13SStephen Hurd 	int rf_count = 0;
82164e6fc13SStephen Hurd #endif
8222d873474SStephen Hurd 
8232d873474SStephen Hurd 	if (nm_i == head && __predict_true(!init))
8242d873474SStephen Hurd 		return 0;
8252d873474SStephen Hurd 	iru_init(&iru, rxq, 0 /* flid */);
8262d873474SStephen Hurd 	map = fl->ifl_sds.ifsd_map;
8272d873474SStephen Hurd 	refill_pidx = netmap_idx_k2n(kring, nm_i);
8282d873474SStephen Hurd 	/*
8292d873474SStephen Hurd 	 * IMPORTANT: we must leave one free slot in the ring,
8302d873474SStephen Hurd 	 * so move head back by one unit
8312d873474SStephen Hurd 	 */
8322d873474SStephen Hurd 	head = nm_prev(head, lim);
8331ae4848cSMatt Macy 	nic_i = UINT_MAX;
83464e6fc13SStephen Hurd 	DBG_COUNTER_INC(fl_refills);
8352d873474SStephen Hurd 	while (nm_i != head) {
83664e6fc13SStephen Hurd #if IFLIB_DEBUG_COUNTERS
83764e6fc13SStephen Hurd 		if (++rf_count == 9)
83864e6fc13SStephen Hurd 			DBG_COUNTER_INC(fl_refills_large);
83964e6fc13SStephen Hurd #endif
8402d873474SStephen Hurd 		for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
8412d873474SStephen Hurd 			struct netmap_slot *slot = &ring->slot[nm_i];
8422d873474SStephen Hurd 			void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
8432d873474SStephen Hurd 			uint32_t nic_i_dma = refill_pidx;
8442d873474SStephen Hurd 			nic_i = netmap_idx_k2n(kring, nm_i);
8452d873474SStephen Hurd 
8462d873474SStephen Hurd 			MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
8472d873474SStephen Hurd 
8482d873474SStephen Hurd 			if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
8492d873474SStephen Hurd 			        return netmap_ring_reinit(kring);
8502d873474SStephen Hurd 
8512d873474SStephen Hurd 			fl->ifl_vm_addrs[tmp_pidx] = addr;
85295dcf343SMarius Strobl 			if (__predict_false(init)) {
85395dcf343SMarius Strobl 				netmap_load_map(na, fl->ifl_buf_tag,
85495dcf343SMarius Strobl 				    map[nic_i], addr);
85595dcf343SMarius Strobl 			} else if (slot->flags & NS_BUF_CHANGED) {
8562d873474SStephen Hurd 				/* buffer has changed, reload map */
85795dcf343SMarius Strobl 				netmap_reload_map(na, fl->ifl_buf_tag,
85895dcf343SMarius Strobl 				    map[nic_i], addr);
8592d873474SStephen Hurd 			}
8602d873474SStephen Hurd 			slot->flags &= ~NS_BUF_CHANGED;
8612d873474SStephen Hurd 
8622d873474SStephen Hurd 			nm_i = nm_next(nm_i, lim);
8632d873474SStephen Hurd 			fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
8642d873474SStephen Hurd 			if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
8652d873474SStephen Hurd 				continue;
8662d873474SStephen Hurd 
8672d873474SStephen Hurd 			iru.iru_pidx = refill_pidx;
8682d873474SStephen Hurd 			iru.iru_count = tmp_pidx+1;
8692d873474SStephen Hurd 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
8702d873474SStephen Hurd 			refill_pidx = nic_i;
8712d873474SStephen Hurd 			for (int n = 0; n < iru.iru_count; n++) {
87295dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma],
8732d873474SStephen Hurd 						BUS_DMASYNC_PREREAD);
8742d873474SStephen Hurd 				/* XXX - change this to not use the netmap func*/
8752d873474SStephen Hurd 				nic_i_dma = nm_next(nic_i_dma, lim);
8762d873474SStephen Hurd 			}
8772d873474SStephen Hurd 		}
8782d873474SStephen Hurd 	}
8792d873474SStephen Hurd 	kring->nr_hwcur = head;
8802d873474SStephen Hurd 
8812d873474SStephen Hurd 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
8822d873474SStephen Hurd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
88364e6fc13SStephen Hurd 	if (__predict_true(nic_i != UINT_MAX)) {
8842d873474SStephen Hurd 		ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
88564e6fc13SStephen Hurd 		DBG_COUNTER_INC(rxd_flush);
88664e6fc13SStephen Hurd 	}
8872d873474SStephen Hurd 	return (0);
8882d873474SStephen Hurd }
8892d873474SStephen Hurd 
8904c7070dbSScott Long /*
8914c7070dbSScott Long  * Reconcile kernel and user view of the transmit ring.
8924c7070dbSScott Long  *
8934c7070dbSScott Long  * All information is in the kring.
8944c7070dbSScott Long  * Userspace wants to send packets up to the one before kring->rhead,
8954c7070dbSScott Long  * kernel knows kring->nr_hwcur is the first unsent packet.
8964c7070dbSScott Long  *
8974c7070dbSScott Long  * Here we push packets out (as many as possible), and possibly
8984c7070dbSScott Long  * reclaim buffers from previously completed transmission.
8994c7070dbSScott Long  *
9004c7070dbSScott Long  * The caller (netmap) guarantees that there is only one instance
9014c7070dbSScott Long  * running at any time. Any interference with other driver
9024c7070dbSScott Long  * methods should be handled by the individual drivers.
9034c7070dbSScott Long  */
9044c7070dbSScott Long static int
9054c7070dbSScott Long iflib_netmap_txsync(struct netmap_kring *kring, int flags)
9064c7070dbSScott Long {
9074c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
9081722eeacSMarius Strobl 	if_t ifp = na->ifp;
9094c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
910dd7fbcf1SStephen Hurd 	u_int nm_i;	/* index into the netmap kring */
9114c7070dbSScott Long 	u_int nic_i;	/* index into the NIC ring */
9124c7070dbSScott Long 	u_int n;
9134c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
9144c7070dbSScott Long 	u_int const head = kring->rhead;
9154c7070dbSScott Long 	struct if_pkt_info pi;
9164c7070dbSScott Long 
9174c7070dbSScott Long 	/*
9184c7070dbSScott Long 	 * interrupts on every tx packet are expensive so request
9194c7070dbSScott Long 	 * them every half ring, or where NS_REPORT is set
9204c7070dbSScott Long 	 */
9214c7070dbSScott Long 	u_int report_frequency = kring->nkr_num_slots >> 1;
9224c7070dbSScott Long 	/* device-specific */
9234c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
9244c7070dbSScott Long 	iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
9254c7070dbSScott Long 
92695dcf343SMarius Strobl 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
9274c7070dbSScott Long 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9284c7070dbSScott Long 
9294c7070dbSScott Long 	/*
9304c7070dbSScott Long 	 * First part: process new packets to send.
931dd7fbcf1SStephen Hurd 	 * nm_i is the current index in the netmap kring,
9324c7070dbSScott Long 	 * nic_i is the corresponding index in the NIC ring.
9334c7070dbSScott Long 	 *
9344c7070dbSScott Long 	 * If we have packets to send (nm_i != head)
9354c7070dbSScott Long 	 * iterate over the netmap ring, fetch length and update
9364c7070dbSScott Long 	 * the corresponding slot in the NIC ring. Some drivers also
9374c7070dbSScott Long 	 * need to update the buffer's physical address in the NIC slot
9384c7070dbSScott Long 	 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
9394c7070dbSScott Long 	 *
9404c7070dbSScott Long 	 * The netmap_reload_map() calls is especially expensive,
9414c7070dbSScott Long 	 * even when (as in this case) the tag is 0, so do only
9424c7070dbSScott Long 	 * when the buffer has actually changed.
9434c7070dbSScott Long 	 *
9444c7070dbSScott Long 	 * If possible do not set the report/intr bit on all slots,
9454c7070dbSScott Long 	 * but only a few times per ring or when NS_REPORT is set.
9464c7070dbSScott Long 	 *
9474c7070dbSScott Long 	 * Finally, on 10G and faster drivers, it might be useful
9484c7070dbSScott Long 	 * to prefetch the next slot and txr entry.
9494c7070dbSScott Long 	 */
9504c7070dbSScott Long 
951dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
9525ee36c68SStephen Hurd 	if (nm_i != head) {	/* we have new packets to send */
95395246abbSSean Bruno 		pkt_info_zero(&pi);
95495246abbSSean Bruno 		pi.ipi_segs = txq->ift_segs;
95595246abbSSean Bruno 		pi.ipi_qsidx = kring->ring_id;
9564c7070dbSScott Long 		nic_i = netmap_idx_k2n(kring, nm_i);
9574c7070dbSScott Long 
9584c7070dbSScott Long 		__builtin_prefetch(&ring->slot[nm_i]);
9594c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
9604c7070dbSScott Long 		__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
9614c7070dbSScott Long 
9624c7070dbSScott Long 		for (n = 0; nm_i != head; n++) {
9634c7070dbSScott Long 			struct netmap_slot *slot = &ring->slot[nm_i];
9644c7070dbSScott Long 			u_int len = slot->len;
9650a1b74a3SSean Bruno 			uint64_t paddr;
9664c7070dbSScott Long 			void *addr = PNMB(na, slot, &paddr);
9674c7070dbSScott Long 			int flags = (slot->flags & NS_REPORT ||
9684c7070dbSScott Long 				nic_i == 0 || nic_i == report_frequency) ?
9694c7070dbSScott Long 				IPI_TX_INTR : 0;
9704c7070dbSScott Long 
9714c7070dbSScott Long 			/* device-specific */
97295246abbSSean Bruno 			pi.ipi_len = len;
97395246abbSSean Bruno 			pi.ipi_segs[0].ds_addr = paddr;
97495246abbSSean Bruno 			pi.ipi_segs[0].ds_len = len;
97595246abbSSean Bruno 			pi.ipi_nsegs = 1;
97695246abbSSean Bruno 			pi.ipi_ndescs = 0;
9774c7070dbSScott Long 			pi.ipi_pidx = nic_i;
9784c7070dbSScott Long 			pi.ipi_flags = flags;
9794c7070dbSScott Long 
9804c7070dbSScott Long 			/* Fill the slot in the NIC ring. */
9814c7070dbSScott Long 			ctx->isc_txd_encap(ctx->ifc_softc, &pi);
98264e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_encap);
9834c7070dbSScott Long 
9844c7070dbSScott Long 			/* prefetch for next round */
9854c7070dbSScott Long 			__builtin_prefetch(&ring->slot[nm_i + 1]);
9864c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
9874c7070dbSScott Long 			__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
9884c7070dbSScott Long 
9894c7070dbSScott Long 			NM_CHECK_ADDR_LEN(na, addr, len);
9904c7070dbSScott Long 
9914c7070dbSScott Long 			if (slot->flags & NS_BUF_CHANGED) {
9924c7070dbSScott Long 				/* buffer has changed, reload map */
993bfce461eSMarius Strobl 				netmap_reload_map(na, txq->ift_buf_tag,
994bfce461eSMarius Strobl 				    txq->ift_sds.ifsd_map[nic_i], addr);
9954c7070dbSScott Long 			}
9964c7070dbSScott Long 			/* make sure changes to the buffer are synced */
99795dcf343SMarius Strobl 			bus_dmamap_sync(txq->ift_buf_tag,
99895dcf343SMarius Strobl 			    txq->ift_sds.ifsd_map[nic_i],
9994c7070dbSScott Long 			    BUS_DMASYNC_PREWRITE);
100095dcf343SMarius Strobl 
100195246abbSSean Bruno 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
10024c7070dbSScott Long 			nm_i = nm_next(nm_i, lim);
10034c7070dbSScott Long 			nic_i = nm_next(nic_i, lim);
10044c7070dbSScott Long 		}
1005dd7fbcf1SStephen Hurd 		kring->nr_hwcur = nm_i;
10064c7070dbSScott Long 
10074c7070dbSScott Long 		/* synchronize the NIC ring */
100895dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
10094c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10104c7070dbSScott Long 
10114c7070dbSScott Long 		/* (re)start the tx unit up to slot nic_i (excluded) */
10124c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
10134c7070dbSScott Long 	}
10144c7070dbSScott Long 
10154c7070dbSScott Long 	/*
10164c7070dbSScott Long 	 * Second part: reclaim buffers for completed transmissions.
10175ee36c68SStephen Hurd 	 *
10185ee36c68SStephen Hurd 	 * If there are unclaimed buffers, attempt to reclaim them.
10195ee36c68SStephen Hurd 	 * If none are reclaimed, and TX IRQs are not in use, do an initial
10205ee36c68SStephen Hurd 	 * minimal delay, then trigger the tx handler which will spin in the
10215ee36c68SStephen Hurd 	 * group task queue.
10224c7070dbSScott Long 	 */
1023dd7fbcf1SStephen Hurd 	if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
10244c7070dbSScott Long 		if (iflib_tx_credits_update(ctx, txq)) {
10254c7070dbSScott Long 			/* some tx completed, increment avail */
10264c7070dbSScott Long 			nic_i = txq->ift_cidx_processed;
10274c7070dbSScott Long 			kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
10284c7070dbSScott Long 		}
10295ee36c68SStephen Hurd 	}
1030dd7fbcf1SStephen Hurd 	if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
1031dd7fbcf1SStephen Hurd 		if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
1032dd7fbcf1SStephen Hurd 			callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
1033dd7fbcf1SStephen Hurd 			    iflib_timer, txq, txq->ift_timer.c_cpu);
10345ee36c68SStephen Hurd 	}
10354c7070dbSScott Long 	return (0);
10364c7070dbSScott Long }
10374c7070dbSScott Long 
10384c7070dbSScott Long /*
10394c7070dbSScott Long  * Reconcile kernel and user view of the receive ring.
10404c7070dbSScott Long  * Same as for the txsync, this routine must be efficient.
10414c7070dbSScott Long  * The caller guarantees a single invocations, but races against
10424c7070dbSScott Long  * the rest of the driver should be handled here.
10434c7070dbSScott Long  *
10444c7070dbSScott Long  * On call, kring->rhead is the first packet that userspace wants
10454c7070dbSScott Long  * to keep, and kring->rcur is the wakeup point.
10464c7070dbSScott Long  * The kernel has previously reported packets up to kring->rtail.
10474c7070dbSScott Long  *
10484c7070dbSScott Long  * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
10494c7070dbSScott Long  * of whether or not we received an interrupt.
10504c7070dbSScott Long  */
10514c7070dbSScott Long static int
10524c7070dbSScott Long iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
10534c7070dbSScott Long {
10544c7070dbSScott Long 	struct netmap_adapter *na = kring->na;
10554c7070dbSScott Long 	struct netmap_ring *ring = kring->ring;
10561722eeacSMarius Strobl 	if_t ifp = na->ifp;
105795dcf343SMarius Strobl 	iflib_fl_t fl;
105895246abbSSean Bruno 	uint32_t nm_i;	/* index into the netmap ring */
10592d873474SStephen Hurd 	uint32_t nic_i;	/* index into the NIC ring */
10604c7070dbSScott Long 	u_int i, n;
10614c7070dbSScott Long 	u_int const lim = kring->nkr_num_slots - 1;
1062dd7fbcf1SStephen Hurd 	u_int const head = kring->rhead;
10634c7070dbSScott Long 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1064ab2e3f79SStephen Hurd 	struct if_rxd_info ri;
106595246abbSSean Bruno 
10664c7070dbSScott Long 	if_ctx_t ctx = ifp->if_softc;
10674c7070dbSScott Long 	iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
10684c7070dbSScott Long 	if (head > lim)
10694c7070dbSScott Long 		return netmap_ring_reinit(kring);
10704c7070dbSScott Long 
107195dcf343SMarius Strobl 	/*
107295dcf343SMarius Strobl 	 * XXX netmap_fl_refill() only ever (re)fills free list 0 so far.
107395dcf343SMarius Strobl 	 */
107495dcf343SMarius Strobl 
107595246abbSSean Bruno 	for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
107695dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
10774c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
107895246abbSSean Bruno 	}
107995dcf343SMarius Strobl 
10804c7070dbSScott Long 	/*
10814c7070dbSScott Long 	 * First part: import newly received packets.
10824c7070dbSScott Long 	 *
10834c7070dbSScott Long 	 * nm_i is the index of the next free slot in the netmap ring,
10844c7070dbSScott Long 	 * nic_i is the index of the next received packet in the NIC ring,
10854c7070dbSScott Long 	 * and they may differ in case if_init() has been called while
10864c7070dbSScott Long 	 * in netmap mode. For the receive ring we have
10874c7070dbSScott Long 	 *
10884c7070dbSScott Long 	 *	nic_i = rxr->next_check;
10894c7070dbSScott Long 	 *	nm_i = kring->nr_hwtail (previous)
10904c7070dbSScott Long 	 * and
10914c7070dbSScott Long 	 *	nm_i == (nic_i + kring->nkr_hwofs) % ring_size
10924c7070dbSScott Long 	 *
10934c7070dbSScott Long 	 * rxr->next_check is set to 0 on a ring reinit
10944c7070dbSScott Long 	 */
10954c7070dbSScott Long 	if (netmap_no_pendintr || force_update) {
10964c7070dbSScott Long 		int crclen = iflib_crcstrip ? 0 : 4;
10974c7070dbSScott Long 		int error, avail;
10984c7070dbSScott Long 
10992d873474SStephen Hurd 		for (i = 0; i < rxq->ifr_nfl; i++) {
11002d873474SStephen Hurd 			fl = &rxq->ifr_fl[i];
11014c7070dbSScott Long 			nic_i = fl->ifl_cidx;
11024c7070dbSScott Long 			nm_i = netmap_idx_n2k(kring, nic_i);
110395dcf343SMarius Strobl 			avail = ctx->isc_rxd_available(ctx->ifc_softc,
110495dcf343SMarius Strobl 			    rxq->ifr_id, nic_i, USHRT_MAX);
11054c7070dbSScott Long 			for (n = 0; avail > 0; n++, avail--) {
1106ab2e3f79SStephen Hurd 				rxd_info_zero(&ri);
1107ab2e3f79SStephen Hurd 				ri.iri_frags = rxq->ifr_frags;
1108ab2e3f79SStephen Hurd 				ri.iri_qsidx = kring->ring_id;
1109ab2e3f79SStephen Hurd 				ri.iri_ifp = ctx->ifc_ifp;
1110ab2e3f79SStephen Hurd 				ri.iri_cidx = nic_i;
111195246abbSSean Bruno 
1112ab2e3f79SStephen Hurd 				error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
1113ab2e3f79SStephen Hurd 				ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
11147cb7c6e3SNavdeep Parhar 				ring->slot[nm_i].flags = 0;
111595dcf343SMarius Strobl 				bus_dmamap_sync(fl->ifl_buf_tag,
1116e035717eSSean Bruno 				    fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
11174c7070dbSScott Long 				nm_i = nm_next(nm_i, lim);
11184c7070dbSScott Long 				nic_i = nm_next(nic_i, lim);
11194c7070dbSScott Long 			}
11204c7070dbSScott Long 			if (n) { /* update the state variables */
11214c7070dbSScott Long 				if (netmap_no_pendintr && !force_update) {
11224c7070dbSScott Long 					/* diagnostics */
11234c7070dbSScott Long 					iflib_rx_miss ++;
11244c7070dbSScott Long 					iflib_rx_miss_bufs += n;
11254c7070dbSScott Long 				}
11264c7070dbSScott Long 				fl->ifl_cidx = nic_i;
1127dd7fbcf1SStephen Hurd 				kring->nr_hwtail = nm_i;
11284c7070dbSScott Long 			}
11294c7070dbSScott Long 			kring->nr_kflags &= ~NKR_PENDINTR;
11304c7070dbSScott Long 		}
11314c7070dbSScott Long 	}
11324c7070dbSScott Long 	/*
11334c7070dbSScott Long 	 * Second part: skip past packets that userspace has released.
11344c7070dbSScott Long 	 * (kring->nr_hwcur to head excluded),
11354c7070dbSScott Long 	 * and make the buffers available for reception.
11364c7070dbSScott Long 	 * As usual nm_i is the index in the netmap ring,
11374c7070dbSScott Long 	 * nic_i is the index in the NIC ring, and
11384c7070dbSScott Long 	 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
11394c7070dbSScott Long 	 */
11404c7070dbSScott Long 	/* XXX not sure how this will work with multiple free lists */
1141dd7fbcf1SStephen Hurd 	nm_i = kring->nr_hwcur;
114295246abbSSean Bruno 
11432d873474SStephen Hurd 	return (netmap_fl_refill(rxq, kring, nm_i, false));
11444c7070dbSScott Long }
11454c7070dbSScott Long 
114695246abbSSean Bruno static void
114795246abbSSean Bruno iflib_netmap_intr(struct netmap_adapter *na, int onoff)
114895246abbSSean Bruno {
11491722eeacSMarius Strobl 	if_ctx_t ctx = na->ifp->if_softc;
115095246abbSSean Bruno 
1151ab2e3f79SStephen Hurd 	CTX_LOCK(ctx);
115295246abbSSean Bruno 	if (onoff) {
115395246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
115495246abbSSean Bruno 	} else {
115595246abbSSean Bruno 		IFDI_INTR_DISABLE(ctx);
115695246abbSSean Bruno 	}
1157ab2e3f79SStephen Hurd 	CTX_UNLOCK(ctx);
115895246abbSSean Bruno }
115995246abbSSean Bruno 
116095246abbSSean Bruno 
11614c7070dbSScott Long static int
11624c7070dbSScott Long iflib_netmap_attach(if_ctx_t ctx)
11634c7070dbSScott Long {
11644c7070dbSScott Long 	struct netmap_adapter na;
116523ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
11664c7070dbSScott Long 
11674c7070dbSScott Long 	bzero(&na, sizeof(na));
11684c7070dbSScott Long 
11694c7070dbSScott Long 	na.ifp = ctx->ifc_ifp;
11704c7070dbSScott Long 	na.na_flags = NAF_BDG_MAYSLEEP;
11714c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
11724c7070dbSScott Long 	MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
11734c7070dbSScott Long 
117423ac9029SStephen Hurd 	na.num_tx_desc = scctx->isc_ntxd[0];
117523ac9029SStephen Hurd 	na.num_rx_desc = scctx->isc_nrxd[0];
11764c7070dbSScott Long 	na.nm_txsync = iflib_netmap_txsync;
11774c7070dbSScott Long 	na.nm_rxsync = iflib_netmap_rxsync;
11784c7070dbSScott Long 	na.nm_register = iflib_netmap_register;
117995246abbSSean Bruno 	na.nm_intr = iflib_netmap_intr;
11804c7070dbSScott Long 	na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
11814c7070dbSScott Long 	na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
11824c7070dbSScott Long 	return (netmap_attach(&na));
11834c7070dbSScott Long }
11844c7070dbSScott Long 
11854c7070dbSScott Long static void
11864c7070dbSScott Long iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
11874c7070dbSScott Long {
11884c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
11894c7070dbSScott Long 	struct netmap_slot *slot;
11904c7070dbSScott Long 
11914c7070dbSScott Long 	slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1192e099b90bSPedro F. Giffuni 	if (slot == NULL)
11934c7070dbSScott Long 		return;
119423ac9029SStephen Hurd 	for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
11954c7070dbSScott Long 
11964c7070dbSScott Long 		/*
11974c7070dbSScott Long 		 * In netmap mode, set the map for the packet buffer.
11984c7070dbSScott Long 		 * NOTE: Some drivers (not this one) also need to set
11994c7070dbSScott Long 		 * the physical buffer address in the NIC ring.
12004c7070dbSScott Long 		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
12014c7070dbSScott Long 		 * netmap slot index, si
12024c7070dbSScott Long 		 */
12032ff91c17SVincenzo Maffione 		int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1204bfce461eSMarius Strobl 		netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
1205bfce461eSMarius Strobl 		    NMB(na, slot + si));
12064c7070dbSScott Long 	}
12074c7070dbSScott Long }
12082d873474SStephen Hurd 
12094c7070dbSScott Long static void
12104c7070dbSScott Long iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
12114c7070dbSScott Long {
12124c7070dbSScott Long 	struct netmap_adapter *na = NA(ctx->ifc_ifp);
12132ff91c17SVincenzo Maffione 	struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
12144c7070dbSScott Long 	struct netmap_slot *slot;
12152d873474SStephen Hurd 	uint32_t nm_i;
12164c7070dbSScott Long 
12174c7070dbSScott Long 	slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
1218e099b90bSPedro F. Giffuni 	if (slot == NULL)
12194c7070dbSScott Long 		return;
12202d873474SStephen Hurd 	nm_i = netmap_idx_n2k(kring, 0);
12212d873474SStephen Hurd 	netmap_fl_refill(rxq, kring, nm_i, true);
12224c7070dbSScott Long }
12234c7070dbSScott Long 
1224dd7fbcf1SStephen Hurd static void
122595dcf343SMarius Strobl iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on)
1226dd7fbcf1SStephen Hurd {
1227dd7fbcf1SStephen Hurd 	struct netmap_kring *kring;
122895dcf343SMarius Strobl 	uint16_t txqid;
1229dd7fbcf1SStephen Hurd 
123095dcf343SMarius Strobl 	txqid = txq->ift_id;
1231dd7fbcf1SStephen Hurd 	kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
1232dd7fbcf1SStephen Hurd 
1233dd7fbcf1SStephen Hurd 	if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
123495dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
123595dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD);
1236dd7fbcf1SStephen Hurd 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
1237dd7fbcf1SStephen Hurd 			netmap_tx_irq(ctx->ifc_ifp, txqid);
1238dd7fbcf1SStephen Hurd 		if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
1239dd7fbcf1SStephen Hurd 			if (hz < 2000)
1240dd7fbcf1SStephen Hurd 				*reset_on = 1;
1241dd7fbcf1SStephen Hurd 			else
1242dd7fbcf1SStephen Hurd 				*reset_on = hz / 1000;
1243dd7fbcf1SStephen Hurd 		}
1244dd7fbcf1SStephen Hurd 	}
1245dd7fbcf1SStephen Hurd }
1246dd7fbcf1SStephen Hurd 
12474c7070dbSScott Long #define iflib_netmap_detach(ifp) netmap_detach(ifp)
12484c7070dbSScott Long 
12494c7070dbSScott Long #else
12504c7070dbSScott Long #define iflib_netmap_txq_init(ctx, txq)
12514c7070dbSScott Long #define iflib_netmap_rxq_init(ctx, rxq)
12524c7070dbSScott Long #define iflib_netmap_detach(ifp)
12534c7070dbSScott Long 
12544c7070dbSScott Long #define iflib_netmap_attach(ctx) (0)
12554c7070dbSScott Long #define netmap_rx_irq(ifp, qid, budget) (0)
125695246abbSSean Bruno #define netmap_tx_irq(ifp, qid) do {} while (0)
125795dcf343SMarius Strobl #define iflib_netmap_timer_adjust(ctx, txq, reset_on)
12584c7070dbSScott Long #endif
12594c7070dbSScott Long 
12604c7070dbSScott Long #if defined(__i386__) || defined(__amd64__)
12614c7070dbSScott Long static __inline void
12624c7070dbSScott Long prefetch(void *x)
12634c7070dbSScott Long {
12644c7070dbSScott Long 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12654c7070dbSScott Long }
12663429c02fSStephen Hurd static __inline void
12673429c02fSStephen Hurd prefetch2cachelines(void *x)
12683429c02fSStephen Hurd {
12693429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
12703429c02fSStephen Hurd #if (CACHE_LINE_SIZE < 128)
12713429c02fSStephen Hurd 	__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
12723429c02fSStephen Hurd #endif
12733429c02fSStephen Hurd }
12744c7070dbSScott Long #else
12754c7070dbSScott Long #define prefetch(x)
12763429c02fSStephen Hurd #define prefetch2cachelines(x)
12774c7070dbSScott Long #endif
12784c7070dbSScott Long 
12794c7070dbSScott Long static void
128010e0d938SStephen Hurd iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
128110e0d938SStephen Hurd {
128210e0d938SStephen Hurd 	iflib_fl_t fl;
128310e0d938SStephen Hurd 
128410e0d938SStephen Hurd 	fl = &rxq->ifr_fl[flid];
128510e0d938SStephen Hurd 	iru->iru_paddrs = fl->ifl_bus_addrs;
128610e0d938SStephen Hurd 	iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
128710e0d938SStephen Hurd 	iru->iru_idxs = fl->ifl_rxd_idxs;
128810e0d938SStephen Hurd 	iru->iru_qsidx = rxq->ifr_id;
128910e0d938SStephen Hurd 	iru->iru_buf_size = fl->ifl_buf_size;
129010e0d938SStephen Hurd 	iru->iru_flidx = fl->ifl_id;
129110e0d938SStephen Hurd }
129210e0d938SStephen Hurd 
129310e0d938SStephen Hurd static void
12944c7070dbSScott Long _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
12954c7070dbSScott Long {
12964c7070dbSScott Long 	if (err)
12974c7070dbSScott Long 		return;
12984c7070dbSScott Long 	*(bus_addr_t *) arg = segs[0].ds_addr;
12994c7070dbSScott Long }
13004c7070dbSScott Long 
13014c7070dbSScott Long int
13028f82136aSPatrick Kelsey iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
13034c7070dbSScott Long {
13044c7070dbSScott Long 	int err;
13054c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
13064c7070dbSScott Long 
13074c7070dbSScott Long 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
13088f82136aSPatrick Kelsey 				align, 0,		/* alignment, bounds */
13094c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* lowaddr */
13104c7070dbSScott Long 				BUS_SPACE_MAXADDR,	/* highaddr */
13114c7070dbSScott Long 				NULL, NULL,		/* filter, filterarg */
13124c7070dbSScott Long 				size,			/* maxsize */
13134c7070dbSScott Long 				1,			/* nsegments */
13144c7070dbSScott Long 				size,			/* maxsegsize */
13154c7070dbSScott Long 				BUS_DMA_ALLOCNOW,	/* flags */
13164c7070dbSScott Long 				NULL,			/* lockfunc */
13174c7070dbSScott Long 				NULL,			/* lockarg */
13184c7070dbSScott Long 				&dma->idi_tag);
13194c7070dbSScott Long 	if (err) {
13204c7070dbSScott Long 		device_printf(dev,
13214c7070dbSScott Long 		    "%s: bus_dma_tag_create failed: %d\n",
13224c7070dbSScott Long 		    __func__, err);
13234c7070dbSScott Long 		goto fail_0;
13244c7070dbSScott Long 	}
13254c7070dbSScott Long 
13264c7070dbSScott Long 	err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
13274c7070dbSScott Long 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
13284c7070dbSScott Long 	if (err) {
13294c7070dbSScott Long 		device_printf(dev,
13304c7070dbSScott Long 		    "%s: bus_dmamem_alloc(%ju) failed: %d\n",
13314c7070dbSScott Long 		    __func__, (uintmax_t)size, err);
13324c7070dbSScott Long 		goto fail_1;
13334c7070dbSScott Long 	}
13344c7070dbSScott Long 
13354c7070dbSScott Long 	dma->idi_paddr = IF_BAD_DMA;
13364c7070dbSScott Long 	err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
13374c7070dbSScott Long 	    size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
13384c7070dbSScott Long 	if (err || dma->idi_paddr == IF_BAD_DMA) {
13394c7070dbSScott Long 		device_printf(dev,
13404c7070dbSScott Long 		    "%s: bus_dmamap_load failed: %d\n",
13414c7070dbSScott Long 		    __func__, err);
13424c7070dbSScott Long 		goto fail_2;
13434c7070dbSScott Long 	}
13444c7070dbSScott Long 
13454c7070dbSScott Long 	dma->idi_size = size;
13464c7070dbSScott Long 	return (0);
13474c7070dbSScott Long 
13484c7070dbSScott Long fail_2:
13494c7070dbSScott Long 	bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13504c7070dbSScott Long fail_1:
13514c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
13524c7070dbSScott Long fail_0:
13534c7070dbSScott Long 	dma->idi_tag = NULL;
13544c7070dbSScott Long 
13554c7070dbSScott Long 	return (err);
13564c7070dbSScott Long }
13574c7070dbSScott Long 
13584c7070dbSScott Long int
13598f82136aSPatrick Kelsey iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
13608f82136aSPatrick Kelsey {
13618f82136aSPatrick Kelsey 	if_shared_ctx_t sctx = ctx->ifc_sctx;
13628f82136aSPatrick Kelsey 
13638f82136aSPatrick Kelsey 	KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
13648f82136aSPatrick Kelsey 
13658f82136aSPatrick Kelsey 	return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
13668f82136aSPatrick Kelsey }
13678f82136aSPatrick Kelsey 
13688f82136aSPatrick Kelsey int
13694c7070dbSScott Long iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
13704c7070dbSScott Long {
13714c7070dbSScott Long 	int i, err;
13724c7070dbSScott Long 	iflib_dma_info_t *dmaiter;
13734c7070dbSScott Long 
13744c7070dbSScott Long 	dmaiter = dmalist;
13754c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++) {
13764c7070dbSScott Long 		if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
13774c7070dbSScott Long 			break;
13784c7070dbSScott Long 	}
13794c7070dbSScott Long 	if (err)
13804c7070dbSScott Long 		iflib_dma_free_multi(dmalist, i);
13814c7070dbSScott Long 	return (err);
13824c7070dbSScott Long }
13834c7070dbSScott Long 
13844c7070dbSScott Long void
13854c7070dbSScott Long iflib_dma_free(iflib_dma_info_t dma)
13864c7070dbSScott Long {
13874c7070dbSScott Long 	if (dma->idi_tag == NULL)
13884c7070dbSScott Long 		return;
13894c7070dbSScott Long 	if (dma->idi_paddr != IF_BAD_DMA) {
13904c7070dbSScott Long 		bus_dmamap_sync(dma->idi_tag, dma->idi_map,
13914c7070dbSScott Long 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
13924c7070dbSScott Long 		bus_dmamap_unload(dma->idi_tag, dma->idi_map);
13934c7070dbSScott Long 		dma->idi_paddr = IF_BAD_DMA;
13944c7070dbSScott Long 	}
13954c7070dbSScott Long 	if (dma->idi_vaddr != NULL) {
13964c7070dbSScott Long 		bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
13974c7070dbSScott Long 		dma->idi_vaddr = NULL;
13984c7070dbSScott Long 	}
13994c7070dbSScott Long 	bus_dma_tag_destroy(dma->idi_tag);
14004c7070dbSScott Long 	dma->idi_tag = NULL;
14014c7070dbSScott Long }
14024c7070dbSScott Long 
14034c7070dbSScott Long void
14044c7070dbSScott Long iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
14054c7070dbSScott Long {
14064c7070dbSScott Long 	int i;
14074c7070dbSScott Long 	iflib_dma_info_t *dmaiter = dmalist;
14084c7070dbSScott Long 
14094c7070dbSScott Long 	for (i = 0; i < count; i++, dmaiter++)
14104c7070dbSScott Long 		iflib_dma_free(*dmaiter);
14114c7070dbSScott Long }
14124c7070dbSScott Long 
1413bd84f700SSean Bruno #ifdef EARLY_AP_STARTUP
1414bd84f700SSean Bruno static const int iflib_started = 1;
1415bd84f700SSean Bruno #else
1416bd84f700SSean Bruno /*
1417bd84f700SSean Bruno  * We used to abuse the smp_started flag to decide if the queues have been
1418bd84f700SSean Bruno  * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
1419bd84f700SSean Bruno  * That gave bad races, since the SYSINIT() runs strictly after smp_started
1420bd84f700SSean Bruno  * is set.  Run a SYSINIT() strictly after that to just set a usable
1421bd84f700SSean Bruno  * completion flag.
1422bd84f700SSean Bruno  */
1423bd84f700SSean Bruno 
1424bd84f700SSean Bruno static int iflib_started;
1425bd84f700SSean Bruno 
1426bd84f700SSean Bruno static void
1427bd84f700SSean Bruno iflib_record_started(void *arg)
1428bd84f700SSean Bruno {
1429bd84f700SSean Bruno 	iflib_started = 1;
1430bd84f700SSean Bruno }
1431bd84f700SSean Bruno 
1432bd84f700SSean Bruno SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
1433bd84f700SSean Bruno 	iflib_record_started, NULL);
1434bd84f700SSean Bruno #endif
1435bd84f700SSean Bruno 
14364c7070dbSScott Long static int
14374c7070dbSScott Long iflib_fast_intr(void *arg)
14384c7070dbSScott Long {
14394c7070dbSScott Long 	iflib_filter_info_t info = arg;
14404c7070dbSScott Long 	struct grouptask *gtask = info->ifi_task;
1441ca62461bSStephen Hurd 	int result;
1442ca62461bSStephen Hurd 
144395246abbSSean Bruno 	if (!iflib_started)
1444ca62461bSStephen Hurd 		return (FILTER_STRAY);
144595246abbSSean Bruno 
144695246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1447ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1448ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1449ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1450ca62461bSStephen Hurd 			return (result);
1451ca62461bSStephen Hurd 	}
145295246abbSSean Bruno 
145395246abbSSean Bruno 	GROUPTASK_ENQUEUE(gtask);
145495246abbSSean Bruno 	return (FILTER_HANDLED);
145595246abbSSean Bruno }
145695246abbSSean Bruno 
145795246abbSSean Bruno static int
145895246abbSSean Bruno iflib_fast_intr_rxtx(void *arg)
145995246abbSSean Bruno {
146095246abbSSean Bruno 	iflib_filter_info_t info = arg;
146195246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
146295dcf343SMarius Strobl 	if_ctx_t ctx;
146395246abbSSean Bruno 	iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
146495dcf343SMarius Strobl 	iflib_txq_t txq;
146595dcf343SMarius Strobl 	void *sc;
1466ca62461bSStephen Hurd 	int i, cidx, result;
146795dcf343SMarius Strobl 	qidx_t txqid;
14683d10e9edSMarius Strobl 	bool intr_enable, intr_legacy;
146995246abbSSean Bruno 
147095246abbSSean Bruno 	if (!iflib_started)
1471ca62461bSStephen Hurd 		return (FILTER_STRAY);
147295246abbSSean Bruno 
147395246abbSSean Bruno 	DBG_COUNTER_INC(fast_intrs);
1474ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1475ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1476ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1477ca62461bSStephen Hurd 			return (result);
1478ca62461bSStephen Hurd 	}
147995246abbSSean Bruno 
148095dcf343SMarius Strobl 	ctx = rxq->ifr_ctx;
148195dcf343SMarius Strobl 	sc = ctx->ifc_softc;
14823d10e9edSMarius Strobl 	intr_enable = false;
14833d10e9edSMarius Strobl 	intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY);
14841ae4848cSMatt Macy 	MPASS(rxq->ifr_ntxqirq);
148595246abbSSean Bruno 	for (i = 0; i < rxq->ifr_ntxqirq; i++) {
148695dcf343SMarius Strobl 		txqid = rxq->ifr_txqid[i];
148795dcf343SMarius Strobl 		txq = &ctx->ifc_txqs[txqid];
148895dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
14898a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
149095dcf343SMarius Strobl 		if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
14913d10e9edSMarius Strobl 			if (intr_legacy)
14923d10e9edSMarius Strobl 				intr_enable = true;
14933d10e9edSMarius Strobl 			else
149495246abbSSean Bruno 				IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
149595246abbSSean Bruno 			continue;
149695246abbSSean Bruno 		}
149795dcf343SMarius Strobl 		GROUPTASK_ENQUEUE(&txq->ift_task);
149895246abbSSean Bruno 	}
149995246abbSSean Bruno 	if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
150095246abbSSean Bruno 		cidx = rxq->ifr_cq_cidx;
150195246abbSSean Bruno 	else
150295246abbSSean Bruno 		cidx = rxq->ifr_fl[0].ifl_cidx;
150395246abbSSean Bruno 	if (iflib_rxd_avail(ctx, rxq, cidx, 1))
150495246abbSSean Bruno 		GROUPTASK_ENQUEUE(gtask);
150564e6fc13SStephen Hurd 	else {
15063d10e9edSMarius Strobl 		if (intr_legacy)
15073d10e9edSMarius Strobl 			intr_enable = true;
15083d10e9edSMarius Strobl 		else
150995246abbSSean Bruno 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
151064e6fc13SStephen Hurd 		DBG_COUNTER_INC(rx_intr_enables);
151164e6fc13SStephen Hurd 	}
15123d10e9edSMarius Strobl 	if (intr_enable)
15133d10e9edSMarius Strobl 		IFDI_INTR_ENABLE(ctx);
151495246abbSSean Bruno 	return (FILTER_HANDLED);
151595246abbSSean Bruno }
151695246abbSSean Bruno 
151795246abbSSean Bruno 
151895246abbSSean Bruno static int
151995246abbSSean Bruno iflib_fast_intr_ctx(void *arg)
152095246abbSSean Bruno {
152195246abbSSean Bruno 	iflib_filter_info_t info = arg;
152295246abbSSean Bruno 	struct grouptask *gtask = info->ifi_task;
1523ca62461bSStephen Hurd 	int result;
15244c7070dbSScott Long 
1525bd84f700SSean Bruno 	if (!iflib_started)
1526ca62461bSStephen Hurd 		return (FILTER_STRAY);
15271248952aSSean Bruno 
15284c7070dbSScott Long 	DBG_COUNTER_INC(fast_intrs);
1529ca62461bSStephen Hurd 	if (info->ifi_filter != NULL) {
1530ca62461bSStephen Hurd 		result = info->ifi_filter(info->ifi_filter_arg);
1531ca62461bSStephen Hurd 		if ((result & FILTER_SCHEDULE_THREAD) == 0)
1532ca62461bSStephen Hurd 			return (result);
1533ca62461bSStephen Hurd 	}
15344c7070dbSScott Long 
15354c7070dbSScott Long 	GROUPTASK_ENQUEUE(gtask);
15364c7070dbSScott Long 	return (FILTER_HANDLED);
15374c7070dbSScott Long }
15384c7070dbSScott Long 
15394c7070dbSScott Long static int
15404c7070dbSScott Long _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
15414c7070dbSScott Long 		 driver_filter_t filter, driver_intr_t handler, void *arg,
15423e0e6330SStephen Hurd 		 const char *name)
15434c7070dbSScott Long {
15444c7070dbSScott Long 	struct resource *res;
15452b2fc973SSean Bruno 	void *tag = NULL;
15464c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
1547d49e83eaSMarius Strobl 	int flags, i, rc;
15484c7070dbSScott Long 
15492b2fc973SSean Bruno 	flags = RF_ACTIVE;
15502b2fc973SSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
15512b2fc973SSean Bruno 		flags |= RF_SHAREABLE;
15524c7070dbSScott Long 	MPASS(rid < 512);
1553d49e83eaSMarius Strobl 	i = rid;
1554d49e83eaSMarius Strobl 	res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags);
15554c7070dbSScott Long 	if (res == NULL) {
15564c7070dbSScott Long 		device_printf(dev,
15574c7070dbSScott Long 		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
15584c7070dbSScott Long 		return (ENOMEM);
15594c7070dbSScott Long 	}
15604c7070dbSScott Long 	irq->ii_res = res;
15614c7070dbSScott Long 	KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
15624c7070dbSScott Long 	rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
15634c7070dbSScott Long 						filter, handler, arg, &tag);
15644c7070dbSScott Long 	if (rc != 0) {
15654c7070dbSScott Long 		device_printf(dev,
15664c7070dbSScott Long 		    "failed to setup interrupt for rid %d, name %s: %d\n",
15674c7070dbSScott Long 					  rid, name ? name : "unknown", rc);
15684c7070dbSScott Long 		return (rc);
15694c7070dbSScott Long 	} else if (name)
1570f454e7ebSJohn Baldwin 		bus_describe_intr(dev, res, tag, "%s", name);
15714c7070dbSScott Long 
15724c7070dbSScott Long 	irq->ii_tag = tag;
15734c7070dbSScott Long 	return (0);
15744c7070dbSScott Long }
15754c7070dbSScott Long 
15764c7070dbSScott Long /*********************************************************************
15774c7070dbSScott Long  *
1578bfce461eSMarius Strobl  *  Allocate DMA resources for TX buffers as well as memory for the TX
1579bfce461eSMarius Strobl  *  mbuf map.  TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
1580bfce461eSMarius Strobl  *  iflib_sw_tx_desc_array structure, storing all the information that
1581bfce461eSMarius Strobl  *  is needed to transmit a packet on the wire.  This is called only
1582bfce461eSMarius Strobl  *  once at attach, setup is done every reset.
15834c7070dbSScott Long  *
15844c7070dbSScott Long  **********************************************************************/
15854c7070dbSScott Long static int
15864c7070dbSScott Long iflib_txsd_alloc(iflib_txq_t txq)
15874c7070dbSScott Long {
15884c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
15894c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
15904c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
15914c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
15927f87c040SMarius Strobl 	bus_size_t tsomaxsize;
15934c7070dbSScott Long 	int err, nsegments, ntsosegments;
15948a04b53dSKonstantin Belousov 	bool tso;
15954c7070dbSScott Long 
15964c7070dbSScott Long 	nsegments = scctx->isc_tx_nsegments;
15974c7070dbSScott Long 	ntsosegments = scctx->isc_tx_tso_segments_max;
15987f87c040SMarius Strobl 	tsomaxsize = scctx->isc_tx_tso_size_max;
15997f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
16007f87c040SMarius Strobl 		tsomaxsize += sizeof(struct ether_vlan_header);
160123ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[0] > 0);
160223ac9029SStephen Hurd 	MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
16034c7070dbSScott Long 	MPASS(nsegments > 0);
16047f87c040SMarius Strobl 	if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
16054c7070dbSScott Long 		MPASS(ntsosegments > 0);
16067f87c040SMarius Strobl 		MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
16077f87c040SMarius Strobl 	}
16087f87c040SMarius Strobl 
16094c7070dbSScott Long 	/*
1610bfce461eSMarius Strobl 	 * Set up DMA tags for TX buffers.
16114c7070dbSScott Long 	 */
16124c7070dbSScott Long 	if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
16134c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16144c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16154c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16164c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16174c7070dbSScott Long 			       sctx->isc_tx_maxsize,		/* maxsize */
16184c7070dbSScott Long 			       nsegments,	/* nsegments */
16194c7070dbSScott Long 			       sctx->isc_tx_maxsegsize,	/* maxsegsize */
16204c7070dbSScott Long 			       0,			/* flags */
16214c7070dbSScott Long 			       NULL,			/* lockfunc */
16224c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1623bfce461eSMarius Strobl 			       &txq->ift_buf_tag))) {
16244c7070dbSScott Long 		device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
16259d0a88deSDimitry Andric 		device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
16269d0a88deSDimitry Andric 		    (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
16274c7070dbSScott Long 		goto fail;
16284c7070dbSScott Long 	}
16298a04b53dSKonstantin Belousov 	tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
16308a04b53dSKonstantin Belousov 	if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
16314c7070dbSScott Long 			       1, 0,			/* alignment, bounds */
16324c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* lowaddr */
16334c7070dbSScott Long 			       BUS_SPACE_MAXADDR,	/* highaddr */
16344c7070dbSScott Long 			       NULL, NULL,		/* filter, filterarg */
16357f87c040SMarius Strobl 			       tsomaxsize,		/* maxsize */
16364c7070dbSScott Long 			       ntsosegments,	/* nsegments */
16377f87c040SMarius Strobl 			       sctx->isc_tso_maxsegsize,/* maxsegsize */
16384c7070dbSScott Long 			       0,			/* flags */
16394c7070dbSScott Long 			       NULL,			/* lockfunc */
16404c7070dbSScott Long 			       NULL,			/* lockfuncarg */
1641bfce461eSMarius Strobl 			       &txq->ift_tso_buf_tag))) {
1642bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
1643bfce461eSMarius Strobl 		    err);
16444c7070dbSScott Long 		goto fail;
16454c7070dbSScott Long 	}
1646bfce461eSMarius Strobl 
1647bfce461eSMarius Strobl 	/* Allocate memory for the TX mbuf map. */
16484c7070dbSScott Long 	if (!(txq->ift_sds.ifsd_m =
1649ac2fffa4SPedro F. Giffuni 	    (struct mbuf **) malloc(sizeof(struct mbuf *) *
1650ac2fffa4SPedro F. Giffuni 	    scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1651bfce461eSMarius Strobl 		device_printf(dev, "Unable to allocate TX mbuf map memory\n");
16524c7070dbSScott Long 		err = ENOMEM;
16534c7070dbSScott Long 		goto fail;
16544c7070dbSScott Long 	}
16554c7070dbSScott Long 
1656bfce461eSMarius Strobl 	/*
1657bfce461eSMarius Strobl 	 * Create the DMA maps for TX buffers.
1658bfce461eSMarius Strobl 	 */
16598a04b53dSKonstantin Belousov 	if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
16608a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16618a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1662bfce461eSMarius Strobl 		device_printf(dev,
1663bfce461eSMarius Strobl 		    "Unable to allocate TX buffer DMA map memory\n");
16644c7070dbSScott Long 		err = ENOMEM;
16654c7070dbSScott Long 		goto fail;
16664c7070dbSScott Long 	}
16678a04b53dSKonstantin Belousov 	if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
16688a04b53dSKonstantin Belousov 	    sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
16698a04b53dSKonstantin Belousov 	    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
1670bfce461eSMarius Strobl 		device_printf(dev,
1671bfce461eSMarius Strobl 		    "Unable to allocate TSO TX buffer map memory\n");
16728a04b53dSKonstantin Belousov 		err = ENOMEM;
16738a04b53dSKonstantin Belousov 		goto fail;
16748a04b53dSKonstantin Belousov 	}
167523ac9029SStephen Hurd 	for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1676bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_buf_tag, 0,
16778a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_map[i]);
16784c7070dbSScott Long 		if (err != 0) {
16794c7070dbSScott Long 			device_printf(dev, "Unable to create TX DMA map\n");
16804c7070dbSScott Long 			goto fail;
16814c7070dbSScott Long 		}
16828a04b53dSKonstantin Belousov 		if (!tso)
16838a04b53dSKonstantin Belousov 			continue;
1684bfce461eSMarius Strobl 		err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
16858a04b53dSKonstantin Belousov 		    &txq->ift_sds.ifsd_tso_map[i]);
16868a04b53dSKonstantin Belousov 		if (err != 0) {
16878a04b53dSKonstantin Belousov 			device_printf(dev, "Unable to create TSO TX DMA map\n");
16888a04b53dSKonstantin Belousov 			goto fail;
16898a04b53dSKonstantin Belousov 		}
16904c7070dbSScott Long 	}
16914c7070dbSScott Long 	return (0);
16924c7070dbSScott Long fail:
16934c7070dbSScott Long 	/* We free all, it handles case where we are in the middle */
16944c7070dbSScott Long 	iflib_tx_structures_free(ctx);
16954c7070dbSScott Long 	return (err);
16964c7070dbSScott Long }
16974c7070dbSScott Long 
16984c7070dbSScott Long static void
16994c7070dbSScott Long iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
17004c7070dbSScott Long {
17014c7070dbSScott Long 	bus_dmamap_t map;
17024c7070dbSScott Long 
1703db8e8f1eSEric Joyner 	if (txq->ift_sds.ifsd_map != NULL) {
17044c7070dbSScott Long 		map = txq->ift_sds.ifsd_map[i];
1705bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
1706bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, map);
1707bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_buf_tag, map);
17084c7070dbSScott Long 		txq->ift_sds.ifsd_map[i] = NULL;
17094c7070dbSScott Long 	}
17108a04b53dSKonstantin Belousov 
1711db8e8f1eSEric Joyner 	if (txq->ift_sds.ifsd_tso_map != NULL) {
17128a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[i];
1713bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag, map,
17148a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTWRITE);
1715bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag, map);
1716bfce461eSMarius Strobl 		bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
17178a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map[i] = NULL;
17188a04b53dSKonstantin Belousov 	}
17194c7070dbSScott Long }
17204c7070dbSScott Long 
17214c7070dbSScott Long static void
17224c7070dbSScott Long iflib_txq_destroy(iflib_txq_t txq)
17234c7070dbSScott Long {
17244c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
17254c7070dbSScott Long 
172623ac9029SStephen Hurd 	for (int i = 0; i < txq->ift_size; i++)
17274c7070dbSScott Long 		iflib_txsd_destroy(ctx, txq, i);
1728244e7cffSEric Joyner 
1729244e7cffSEric Joyner 	if (txq->ift_br != NULL) {
1730244e7cffSEric Joyner 		ifmp_ring_free(txq->ift_br);
1731244e7cffSEric Joyner 		txq->ift_br = NULL;
1732244e7cffSEric Joyner 	}
1733244e7cffSEric Joyner 
1734244e7cffSEric Joyner 	mtx_destroy(&txq->ift_mtx);
1735244e7cffSEric Joyner 
17364c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
17374c7070dbSScott Long 		free(txq->ift_sds.ifsd_map, M_IFLIB);
17384c7070dbSScott Long 		txq->ift_sds.ifsd_map = NULL;
17394c7070dbSScott Long 	}
17408a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
17418a04b53dSKonstantin Belousov 		free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
17428a04b53dSKonstantin Belousov 		txq->ift_sds.ifsd_tso_map = NULL;
17438a04b53dSKonstantin Belousov 	}
17444c7070dbSScott Long 	if (txq->ift_sds.ifsd_m != NULL) {
17454c7070dbSScott Long 		free(txq->ift_sds.ifsd_m, M_IFLIB);
17464c7070dbSScott Long 		txq->ift_sds.ifsd_m = NULL;
17474c7070dbSScott Long 	}
1748bfce461eSMarius Strobl 	if (txq->ift_buf_tag != NULL) {
1749bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_buf_tag);
1750bfce461eSMarius Strobl 		txq->ift_buf_tag = NULL;
17514c7070dbSScott Long 	}
1752bfce461eSMarius Strobl 	if (txq->ift_tso_buf_tag != NULL) {
1753bfce461eSMarius Strobl 		bus_dma_tag_destroy(txq->ift_tso_buf_tag);
1754bfce461eSMarius Strobl 		txq->ift_tso_buf_tag = NULL;
17554c7070dbSScott Long 	}
1756244e7cffSEric Joyner 	if (txq->ift_ifdi != NULL) {
1757244e7cffSEric Joyner 		free(txq->ift_ifdi, M_IFLIB);
1758244e7cffSEric Joyner 	}
17594c7070dbSScott Long }
17604c7070dbSScott Long 
17614c7070dbSScott Long static void
17624c7070dbSScott Long iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
17634c7070dbSScott Long {
17644c7070dbSScott Long 	struct mbuf **mp;
17654c7070dbSScott Long 
17664c7070dbSScott Long 	mp = &txq->ift_sds.ifsd_m[i];
17674c7070dbSScott Long 	if (*mp == NULL)
17684c7070dbSScott Long 		return;
17694c7070dbSScott Long 
17704c7070dbSScott Long 	if (txq->ift_sds.ifsd_map != NULL) {
1771bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_buf_tag,
17728a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
1773bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
17748a04b53dSKonstantin Belousov 	}
17758a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL) {
1776bfce461eSMarius Strobl 		bus_dmamap_sync(txq->ift_tso_buf_tag,
17778a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
1778bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
17798a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[i]);
17804c7070dbSScott Long 	}
178123ac9029SStephen Hurd 	m_free(*mp);
17824c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
17834c7070dbSScott Long 	*mp = NULL;
17844c7070dbSScott Long }
17854c7070dbSScott Long 
17864c7070dbSScott Long static int
17874c7070dbSScott Long iflib_txq_setup(iflib_txq_t txq)
17884c7070dbSScott Long {
17894c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
179023ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
17914d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
17924c7070dbSScott Long 	iflib_dma_info_t di;
17934c7070dbSScott Long 	int i;
17944c7070dbSScott Long 
17954c7070dbSScott Long 	/* Set number of descriptors available */
17964c7070dbSScott Long 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
179795246abbSSean Bruno 	/* XXX make configurable */
179895246abbSSean Bruno 	txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
17994c7070dbSScott Long 
18004c7070dbSScott Long 	/* Reset indices */
180195246abbSSean Bruno 	txq->ift_cidx_processed = 0;
180295246abbSSean Bruno 	txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
180323ac9029SStephen Hurd 	txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
18044c7070dbSScott Long 
18054d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
18064c7070dbSScott Long 		bzero((void *)di->idi_vaddr, di->idi_size);
18074c7070dbSScott Long 
18084c7070dbSScott Long 	IFDI_TXQ_SETUP(ctx, txq->ift_id);
18094d261ce2SStephen Hurd 	for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
18104c7070dbSScott Long 		bus_dmamap_sync(di->idi_tag, di->idi_map,
18114c7070dbSScott Long 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
18124c7070dbSScott Long 	return (0);
18134c7070dbSScott Long }
18144c7070dbSScott Long 
18154c7070dbSScott Long /*********************************************************************
18164c7070dbSScott Long  *
1817bfce461eSMarius Strobl  *  Allocate DMA resources for RX buffers as well as memory for the RX
1818bfce461eSMarius Strobl  *  mbuf map, direct RX cluster pointer map and RX cluster bus address
1819bfce461eSMarius Strobl  *  map.  RX DMA map, RX mbuf map, direct RX cluster pointer map and
1820bfce461eSMarius Strobl  *  RX cluster map are kept in a iflib_sw_rx_desc_array structure.
1821bfce461eSMarius Strobl  *  Since we use use one entry in iflib_sw_rx_desc_array per received
1822bfce461eSMarius Strobl  *  packet, the maximum number of entries we'll need is equal to the
1823bfce461eSMarius Strobl  *  number of hardware receive descriptors that we've allocated.
18244c7070dbSScott Long  *
18254c7070dbSScott Long  **********************************************************************/
18264c7070dbSScott Long static int
18274c7070dbSScott Long iflib_rxsd_alloc(iflib_rxq_t rxq)
18284c7070dbSScott Long {
18294c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
18304c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
183123ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
18324c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
18334c7070dbSScott Long 	iflib_fl_t fl;
18344c7070dbSScott Long 	int			err;
18354c7070dbSScott Long 
183623ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[0] > 0);
183723ac9029SStephen Hurd 	MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
18384c7070dbSScott Long 
18394c7070dbSScott Long 	fl = rxq->ifr_fl;
18404c7070dbSScott Long 	for (int i = 0; i <  rxq->ifr_nfl; i++, fl++) {
184123ac9029SStephen Hurd 		fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
1842bfce461eSMarius Strobl 		/* Set up DMA tag for RX buffers. */
18434c7070dbSScott Long 		err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
18444c7070dbSScott Long 					 1, 0,			/* alignment, bounds */
18454c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* lowaddr */
18464c7070dbSScott Long 					 BUS_SPACE_MAXADDR,	/* highaddr */
18474c7070dbSScott Long 					 NULL, NULL,		/* filter, filterarg */
18484c7070dbSScott Long 					 sctx->isc_rx_maxsize,	/* maxsize */
18494c7070dbSScott Long 					 sctx->isc_rx_nsegments,	/* nsegments */
18504c7070dbSScott Long 					 sctx->isc_rx_maxsegsize,	/* maxsegsize */
18514c7070dbSScott Long 					 0,			/* flags */
18524c7070dbSScott Long 					 NULL,			/* lockfunc */
18534c7070dbSScott Long 					 NULL,			/* lockarg */
1854bfce461eSMarius Strobl 					 &fl->ifl_buf_tag);
18554c7070dbSScott Long 		if (err) {
1856bfce461eSMarius Strobl 			device_printf(dev,
1857bfce461eSMarius Strobl 			    "Unable to allocate RX DMA tag: %d\n", err);
18584c7070dbSScott Long 			goto fail;
18594c7070dbSScott Long 		}
1860bfce461eSMarius Strobl 
1861bfce461eSMarius Strobl 		/* Allocate memory for the RX mbuf map. */
1862e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_m =
1863ac2fffa4SPedro F. Giffuni 		      (struct mbuf **) malloc(sizeof(struct mbuf *) *
1864ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1865bfce461eSMarius Strobl 			device_printf(dev,
1866bfce461eSMarius Strobl 			    "Unable to allocate RX mbuf map memory\n");
1867e035717eSSean Bruno 			err = ENOMEM;
1868e035717eSSean Bruno 			goto fail;
1869e035717eSSean Bruno 		}
1870bfce461eSMarius Strobl 
1871bfce461eSMarius Strobl 		/* Allocate memory for the direct RX cluster pointer map. */
1872e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_cl =
1873ac2fffa4SPedro F. Giffuni 		      (caddr_t *) malloc(sizeof(caddr_t) *
1874ac2fffa4SPedro F. Giffuni 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1875bfce461eSMarius Strobl 			device_printf(dev,
1876bfce461eSMarius Strobl 			    "Unable to allocate RX cluster map memory\n");
1877e035717eSSean Bruno 			err = ENOMEM;
1878e035717eSSean Bruno 			goto fail;
1879e035717eSSean Bruno 		}
18804c7070dbSScott Long 
1881bfce461eSMarius Strobl 		/* Allocate memory for the RX cluster bus address map. */
1882fbec776dSAndrew Gallatin 		if (!(fl->ifl_sds.ifsd_ba =
1883fbec776dSAndrew Gallatin 		      (bus_addr_t *) malloc(sizeof(bus_addr_t) *
1884fbec776dSAndrew Gallatin 					      scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1885bfce461eSMarius Strobl 			device_printf(dev,
1886bfce461eSMarius Strobl 			    "Unable to allocate RX bus address map memory\n");
1887fbec776dSAndrew Gallatin 			err = ENOMEM;
1888fbec776dSAndrew Gallatin 			goto fail;
1889fbec776dSAndrew Gallatin 		}
1890e035717eSSean Bruno 
1891bfce461eSMarius Strobl 		/*
1892bfce461eSMarius Strobl 		 * Create the DMA maps for RX buffers.
1893bfce461eSMarius Strobl 		 */
1894e035717eSSean Bruno 		if (!(fl->ifl_sds.ifsd_map =
1895ac2fffa4SPedro F. Giffuni 		      (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1896bfce461eSMarius Strobl 			device_printf(dev,
1897bfce461eSMarius Strobl 			    "Unable to allocate RX buffer DMA map memory\n");
1898e035717eSSean Bruno 			err = ENOMEM;
1899e035717eSSean Bruno 			goto fail;
1900e035717eSSean Bruno 		}
1901e035717eSSean Bruno 		for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
1902bfce461eSMarius Strobl 			err = bus_dmamap_create(fl->ifl_buf_tag, 0,
1903bfce461eSMarius Strobl 			    &fl->ifl_sds.ifsd_map[i]);
1904e035717eSSean Bruno 			if (err != 0) {
190595246abbSSean Bruno 				device_printf(dev, "Unable to create RX buffer DMA map\n");
19064c7070dbSScott Long 				goto fail;
19074c7070dbSScott Long 			}
19084c7070dbSScott Long 		}
1909835809f9SSean Bruno 	}
19104c7070dbSScott Long 	return (0);
19114c7070dbSScott Long 
19124c7070dbSScott Long fail:
19134c7070dbSScott Long 	iflib_rx_structures_free(ctx);
19144c7070dbSScott Long 	return (err);
19154c7070dbSScott Long }
19164c7070dbSScott Long 
19174c7070dbSScott Long 
19184c7070dbSScott Long /*
19194c7070dbSScott Long  * Internal service routines
19204c7070dbSScott Long  */
19214c7070dbSScott Long 
19224c7070dbSScott Long struct rxq_refill_cb_arg {
19234c7070dbSScott Long 	int               error;
19244c7070dbSScott Long 	bus_dma_segment_t seg;
19254c7070dbSScott Long 	int               nseg;
19264c7070dbSScott Long };
19274c7070dbSScott Long 
19284c7070dbSScott Long static void
19294c7070dbSScott Long _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
19304c7070dbSScott Long {
19314c7070dbSScott Long 	struct rxq_refill_cb_arg *cb_arg = arg;
19324c7070dbSScott Long 
19334c7070dbSScott Long 	cb_arg->error = error;
19344c7070dbSScott Long 	cb_arg->seg = segs[0];
19354c7070dbSScott Long 	cb_arg->nseg = nseg;
19364c7070dbSScott Long }
19374c7070dbSScott Long 
19384c7070dbSScott Long /**
19391722eeacSMarius Strobl  * _iflib_fl_refill - refill an rxq free-buffer list
19404c7070dbSScott Long  * @ctx: the iflib context
19411722eeacSMarius Strobl  * @fl: the free list to refill
19421722eeacSMarius Strobl  * @count: the number of new buffers to allocate
19434c7070dbSScott Long  *
19441722eeacSMarius Strobl  * (Re)populate an rxq free-buffer list with up to @count new packet buffers.
19451722eeacSMarius Strobl  * The caller must assure that @count does not exceed the queue's capacity.
19464c7070dbSScott Long  */
1947fb1a29b4SHans Petter Selasky static uint8_t
19484c7070dbSScott Long _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
19494c7070dbSScott Long {
195095246abbSSean Bruno 	struct if_rxd_update iru;
1951fbec776dSAndrew Gallatin 	struct rxq_refill_cb_arg cb_arg;
19523db348b5SMarius Strobl 	struct mbuf *m;
19533db348b5SMarius Strobl 	caddr_t cl, *sd_cl;
19543db348b5SMarius Strobl 	struct mbuf **sd_m;
1955e035717eSSean Bruno 	bus_dmamap_t *sd_map;
1956fbec776dSAndrew Gallatin 	bus_addr_t bus_addr, *sd_ba;
19573db348b5SMarius Strobl 	int err, frag_idx, i, idx, n, pidx;
1958a1b799caSStephen Hurd 	qidx_t credits;
19594c7070dbSScott Long 
1960e035717eSSean Bruno 	sd_m = fl->ifl_sds.ifsd_m;
1961e035717eSSean Bruno 	sd_map = fl->ifl_sds.ifsd_map;
1962e035717eSSean Bruno 	sd_cl = fl->ifl_sds.ifsd_cl;
1963fbec776dSAndrew Gallatin 	sd_ba = fl->ifl_sds.ifsd_ba;
19643db348b5SMarius Strobl 	pidx = fl->ifl_pidx;
1965e035717eSSean Bruno 	idx = pidx;
19663db348b5SMarius Strobl 	frag_idx = fl->ifl_fragidx;
1967a1b799caSStephen Hurd 	credits = fl->ifl_credits;
1968e035717eSSean Bruno 
19693db348b5SMarius Strobl 	i = 0;
19704c7070dbSScott Long 	n = count;
19714c7070dbSScott Long 	MPASS(n > 0);
1972a1b799caSStephen Hurd 	MPASS(credits + n <= fl->ifl_size);
19734c7070dbSScott Long 
19744c7070dbSScott Long 	if (pidx < fl->ifl_cidx)
19754c7070dbSScott Long 		MPASS(pidx + n <= fl->ifl_cidx);
1976a1b799caSStephen Hurd 	if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
19774c7070dbSScott Long 		MPASS(fl->ifl_gen == 0);
19784c7070dbSScott Long 	if (pidx > fl->ifl_cidx)
19794c7070dbSScott Long 		MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
19804c7070dbSScott Long 
19814c7070dbSScott Long 	DBG_COUNTER_INC(fl_refills);
19824c7070dbSScott Long 	if (n > 8)
19834c7070dbSScott Long 		DBG_COUNTER_INC(fl_refills_large);
19842d873474SStephen Hurd 	iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
19854c7070dbSScott Long 	while (n--) {
19864c7070dbSScott Long 		/*
19874c7070dbSScott Long 		 * We allocate an uninitialized mbuf + cluster, mbuf is
19884c7070dbSScott Long 		 * initialized after rx.
19894c7070dbSScott Long 		 *
19904c7070dbSScott Long 		 * If the cluster is still set then we know a minimum sized packet was received
19914c7070dbSScott Long 		 */
19923db348b5SMarius Strobl 		bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
19933db348b5SMarius Strobl 		    &frag_idx);
19943db348b5SMarius Strobl 		if (frag_idx < 0)
199587890dbaSSean Bruno 			bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
19963db348b5SMarius Strobl 		MPASS(frag_idx >= 0);
199787890dbaSSean Bruno 		if ((cl = sd_cl[frag_idx]) == NULL) {
1998fbec776dSAndrew Gallatin 			if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
19994c7070dbSScott Long 				break;
20004c7070dbSScott Long 
20014c7070dbSScott Long 			cb_arg.error = 0;
200295246abbSSean Bruno 			MPASS(sd_map != NULL);
2003bfce461eSMarius Strobl 			err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
20048a04b53dSKonstantin Belousov 			    cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
20058a04b53dSKonstantin Belousov 			    BUS_DMA_NOWAIT);
20064c7070dbSScott Long 			if (err != 0 || cb_arg.error) {
20074c7070dbSScott Long 				/*
20084c7070dbSScott Long 				 * !zone_pack ?
20094c7070dbSScott Long 				 */
20104c7070dbSScott Long 				if (fl->ifl_zone == zone_pack)
20114c7070dbSScott Long 					uma_zfree(fl->ifl_zone, cl);
2012fbec776dSAndrew Gallatin 				break;
20134c7070dbSScott Long 			}
20144c7070dbSScott Long 
2015fbec776dSAndrew Gallatin 			sd_ba[frag_idx] =  bus_addr = cb_arg.seg.ds_addr;
201687890dbaSSean Bruno 			sd_cl[frag_idx] = cl;
2017fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2018fbec776dSAndrew Gallatin 			fl->ifl_cl_enqueued++;
2019fbec776dSAndrew Gallatin #endif
2020fbec776dSAndrew Gallatin 		} else {
2021fbec776dSAndrew Gallatin 			bus_addr = sd_ba[frag_idx];
2022fbec776dSAndrew Gallatin 		}
202395dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
202495dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD);
2025fbec776dSAndrew Gallatin 
20266d49b41eSAndrew Gallatin 		if (sd_m[frag_idx] == NULL) {
2027fbec776dSAndrew Gallatin 			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
2028fbec776dSAndrew Gallatin 				break;
2029fbec776dSAndrew Gallatin 			}
203087890dbaSSean Bruno 			sd_m[frag_idx] = m;
20316d49b41eSAndrew Gallatin 		}
20323db348b5SMarius Strobl 		bit_set(fl->ifl_rx_bitmap, frag_idx);
2033fbec776dSAndrew Gallatin #if MEMORY_LOGGING
2034fbec776dSAndrew Gallatin 		fl->ifl_m_enqueued++;
2035fbec776dSAndrew Gallatin #endif
2036fbec776dSAndrew Gallatin 
2037fbec776dSAndrew Gallatin 		DBG_COUNTER_INC(rx_allocs);
203887890dbaSSean Bruno 		fl->ifl_rxd_idxs[i] = frag_idx;
20394c7070dbSScott Long 		fl->ifl_bus_addrs[i] = bus_addr;
20404c7070dbSScott Long 		fl->ifl_vm_addrs[i] = cl;
2041a1b799caSStephen Hurd 		credits++;
20424c7070dbSScott Long 		i++;
2043a1b799caSStephen Hurd 		MPASS(credits <= fl->ifl_size);
2044e035717eSSean Bruno 		if (++idx == fl->ifl_size) {
20454c7070dbSScott Long 			fl->ifl_gen = 1;
2046e035717eSSean Bruno 			idx = 0;
20474c7070dbSScott Long 		}
20484c7070dbSScott Long 		if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
204995246abbSSean Bruno 			iru.iru_pidx = pidx;
205095246abbSSean Bruno 			iru.iru_count = i;
205195246abbSSean Bruno 			ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
20524c7070dbSScott Long 			i = 0;
2053e035717eSSean Bruno 			pidx = idx;
2054fa5416a8SSean Bruno 			fl->ifl_pidx = idx;
2055a1b799caSStephen Hurd 			fl->ifl_credits = credits;
205687890dbaSSean Bruno 		}
20574c7070dbSScott Long 	}
2058fbec776dSAndrew Gallatin 
2059a1b799caSStephen Hurd 	if (i) {
2060a1b799caSStephen Hurd 		iru.iru_pidx = pidx;
2061a1b799caSStephen Hurd 		iru.iru_count = i;
2062a1b799caSStephen Hurd 		ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
2063a1b799caSStephen Hurd 		fl->ifl_pidx = idx;
2064a1b799caSStephen Hurd 		fl->ifl_credits = credits;
2065a1b799caSStephen Hurd 	}
20664c7070dbSScott Long 	DBG_COUNTER_INC(rxd_flush);
20674c7070dbSScott Long 	if (fl->ifl_pidx == 0)
20684c7070dbSScott Long 		pidx = fl->ifl_size - 1;
20694c7070dbSScott Long 	else
20704c7070dbSScott Long 		pidx = fl->ifl_pidx - 1;
207195246abbSSean Bruno 
207295246abbSSean Bruno 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
207395246abbSSean Bruno 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
20744c7070dbSScott Long 	ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
2075*9e9b738aSPatrick Kelsey 	fl->ifl_fragidx = frag_idx + 1;
2076*9e9b738aSPatrick Kelsey 	if (fl->ifl_fragidx == fl->ifl_size)
2077*9e9b738aSPatrick Kelsey 		fl->ifl_fragidx = 0;
2078fb1a29b4SHans Petter Selasky 
2079fb1a29b4SHans Petter Selasky 	return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY);
20804c7070dbSScott Long }
20814c7070dbSScott Long 
2082fb1a29b4SHans Petter Selasky static __inline uint8_t
20834c7070dbSScott Long __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
20844c7070dbSScott Long {
20854c7070dbSScott Long 	/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
20864c7070dbSScott Long 	int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
20874c7070dbSScott Long #ifdef INVARIANTS
20884c7070dbSScott Long 	int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
20894c7070dbSScott Long #endif
20904c7070dbSScott Long 
20914c7070dbSScott Long 	MPASS(fl->ifl_credits <= fl->ifl_size);
20924c7070dbSScott Long 	MPASS(reclaimable == delta);
20934c7070dbSScott Long 
20944c7070dbSScott Long 	if (reclaimable > 0)
2095fb1a29b4SHans Petter Selasky 		return (_iflib_fl_refill(ctx, fl, min(max, reclaimable)));
2096fb1a29b4SHans Petter Selasky 	return (0);
20974c7070dbSScott Long }
20984c7070dbSScott Long 
209977c1fcecSEric Joyner uint8_t
210077c1fcecSEric Joyner iflib_in_detach(if_ctx_t ctx)
210177c1fcecSEric Joyner {
210277c1fcecSEric Joyner 	bool in_detach;
21031722eeacSMarius Strobl 
210477c1fcecSEric Joyner 	STATE_LOCK(ctx);
210577c1fcecSEric Joyner 	in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
210677c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
210777c1fcecSEric Joyner 	return (in_detach);
210877c1fcecSEric Joyner }
210977c1fcecSEric Joyner 
21104c7070dbSScott Long static void
21114c7070dbSScott Long iflib_fl_bufs_free(iflib_fl_t fl)
21124c7070dbSScott Long {
21134c7070dbSScott Long 	iflib_dma_info_t idi = fl->ifl_ifdi;
21148a04b53dSKonstantin Belousov 	bus_dmamap_t sd_map;
21154c7070dbSScott Long 	uint32_t i;
21164c7070dbSScott Long 
21174c7070dbSScott Long 	for (i = 0; i < fl->ifl_size; i++) {
2118e035717eSSean Bruno 		struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
2119e035717eSSean Bruno 		caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
21204c7070dbSScott Long 
2121fbec776dSAndrew Gallatin 		if (*sd_cl != NULL) {
21228a04b53dSKonstantin Belousov 			sd_map = fl->ifl_sds.ifsd_map[i];
2123bfce461eSMarius Strobl 			bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
21248a04b53dSKonstantin Belousov 			    BUS_DMASYNC_POSTREAD);
2125bfce461eSMarius Strobl 			bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
2126fbec776dSAndrew Gallatin 			if (*sd_cl != NULL)
2127fbec776dSAndrew Gallatin 				uma_zfree(fl->ifl_zone, *sd_cl);
2128e035717eSSean Bruno 			if (*sd_m != NULL) {
2129e035717eSSean Bruno 				m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
2130e035717eSSean Bruno 				uma_zfree(zone_mbuf, *sd_m);
2131e035717eSSean Bruno 			}
21324c7070dbSScott Long 		} else {
2133e035717eSSean Bruno 			MPASS(*sd_cl == NULL);
2134e035717eSSean Bruno 			MPASS(*sd_m == NULL);
21354c7070dbSScott Long 		}
21364c7070dbSScott Long #if MEMORY_LOGGING
21374c7070dbSScott Long 		fl->ifl_m_dequeued++;
21384c7070dbSScott Long 		fl->ifl_cl_dequeued++;
21394c7070dbSScott Long #endif
2140e035717eSSean Bruno 		*sd_cl = NULL;
2141e035717eSSean Bruno 		*sd_m = NULL;
21424c7070dbSScott Long 	}
214395246abbSSean Bruno #ifdef INVARIANTS
214495246abbSSean Bruno 	for (i = 0; i < fl->ifl_size; i++) {
214595246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
214695246abbSSean Bruno 		MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
214795246abbSSean Bruno 	}
214895246abbSSean Bruno #endif
21494c7070dbSScott Long 	/*
21504c7070dbSScott Long 	 * Reset free list values
21514c7070dbSScott Long 	 */
215287890dbaSSean Bruno 	fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
21534c7070dbSScott Long 	bzero(idi->idi_vaddr, idi->idi_size);
21544c7070dbSScott Long }
21554c7070dbSScott Long 
21564c7070dbSScott Long /*********************************************************************
21574c7070dbSScott Long  *
21581722eeacSMarius Strobl  *  Initialize a free list and its buffers.
21594c7070dbSScott Long  *
21604c7070dbSScott Long  **********************************************************************/
21614c7070dbSScott Long static int
21624c7070dbSScott Long iflib_fl_setup(iflib_fl_t fl)
21634c7070dbSScott Long {
21644c7070dbSScott Long 	iflib_rxq_t rxq = fl->ifl_rxq;
21654c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
21664c7070dbSScott Long 
21677274b2f6SStephen Hurd 	bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
21684c7070dbSScott Long 	/*
21694c7070dbSScott Long 	** Free current RX buffer structs and their mbufs
21704c7070dbSScott Long 	*/
21714c7070dbSScott Long 	iflib_fl_bufs_free(fl);
21724c7070dbSScott Long 	/* Now replenish the mbufs */
21734c7070dbSScott Long 	MPASS(fl->ifl_credits == 0);
21741b9d9394SEric Joyner 	fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz;
21754c7070dbSScott Long 	if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
21764c7070dbSScott Long 		ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
21774c7070dbSScott Long 	fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
21784c7070dbSScott Long 	fl->ifl_zone = m_getzone(fl->ifl_buf_size);
21794c7070dbSScott Long 
21804c7070dbSScott Long 
21814c7070dbSScott Long 	/* avoid pre-allocating zillions of clusters to an idle card
21824c7070dbSScott Long 	 * potentially speeding up attach
21834c7070dbSScott Long 	 */
2184fb1a29b4SHans Petter Selasky 	(void) _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
21854c7070dbSScott Long 	MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
21864c7070dbSScott Long 	if (min(128, fl->ifl_size) != fl->ifl_credits)
21874c7070dbSScott Long 		return (ENOBUFS);
21884c7070dbSScott Long 	/*
21894c7070dbSScott Long 	 * handle failure
21904c7070dbSScott Long 	 */
21914c7070dbSScott Long 	MPASS(rxq != NULL);
21924c7070dbSScott Long 	MPASS(fl->ifl_ifdi != NULL);
21934c7070dbSScott Long 	bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
21944c7070dbSScott Long 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
21954c7070dbSScott Long 	return (0);
21964c7070dbSScott Long }
21974c7070dbSScott Long 
21984c7070dbSScott Long /*********************************************************************
21994c7070dbSScott Long  *
22004c7070dbSScott Long  *  Free receive ring data structures
22014c7070dbSScott Long  *
22024c7070dbSScott Long  **********************************************************************/
22034c7070dbSScott Long static void
22044c7070dbSScott Long iflib_rx_sds_free(iflib_rxq_t rxq)
22054c7070dbSScott Long {
22064c7070dbSScott Long 	iflib_fl_t fl;
22078a04b53dSKonstantin Belousov 	int i, j;
22084c7070dbSScott Long 
22094c7070dbSScott Long 	if (rxq->ifr_fl != NULL) {
22104c7070dbSScott Long 		for (i = 0; i < rxq->ifr_nfl; i++) {
22114c7070dbSScott Long 			fl = &rxq->ifr_fl[i];
2212bfce461eSMarius Strobl 			if (fl->ifl_buf_tag != NULL) {
22138a04b53dSKonstantin Belousov 				if (fl->ifl_sds.ifsd_map != NULL) {
221477102fd6SAndrew Gallatin 					for (j = 0; j < fl->ifl_size; j++) {
22158a04b53dSKonstantin Belousov 						bus_dmamap_sync(
2216bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
221777102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j],
22188a04b53dSKonstantin Belousov 						    BUS_DMASYNC_POSTREAD);
22198a04b53dSKonstantin Belousov 						bus_dmamap_unload(
2220bfce461eSMarius Strobl 						    fl->ifl_buf_tag,
222177102fd6SAndrew Gallatin 						    fl->ifl_sds.ifsd_map[j]);
2222db8e8f1eSEric Joyner 						bus_dmamap_destroy(
2223db8e8f1eSEric Joyner 						    fl->ifl_buf_tag,
2224db8e8f1eSEric Joyner 						    fl->ifl_sds.ifsd_map[j]);
22258a04b53dSKonstantin Belousov 					}
22268a04b53dSKonstantin Belousov 				}
2227bfce461eSMarius Strobl 				bus_dma_tag_destroy(fl->ifl_buf_tag);
2228bfce461eSMarius Strobl 				fl->ifl_buf_tag = NULL;
22294c7070dbSScott Long 			}
2230e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_m, M_IFLIB);
2231e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_cl, M_IFLIB);
2232fbec776dSAndrew Gallatin 			free(fl->ifl_sds.ifsd_ba, M_IFLIB);
2233e035717eSSean Bruno 			free(fl->ifl_sds.ifsd_map, M_IFLIB);
2234e035717eSSean Bruno 			fl->ifl_sds.ifsd_m = NULL;
2235e035717eSSean Bruno 			fl->ifl_sds.ifsd_cl = NULL;
2236fbec776dSAndrew Gallatin 			fl->ifl_sds.ifsd_ba = NULL;
2237e035717eSSean Bruno 			fl->ifl_sds.ifsd_map = NULL;
22384c7070dbSScott Long 		}
22394c7070dbSScott Long 		free(rxq->ifr_fl, M_IFLIB);
22404c7070dbSScott Long 		rxq->ifr_fl = NULL;
2241244e7cffSEric Joyner 		free(rxq->ifr_ifdi, M_IFLIB);
2242244e7cffSEric Joyner 		rxq->ifr_ifdi = NULL;
22431722eeacSMarius Strobl 		rxq->ifr_cq_cidx = 0;
22444c7070dbSScott Long 	}
22454c7070dbSScott Long }
22464c7070dbSScott Long 
22474c7070dbSScott Long /*
22481722eeacSMarius Strobl  * Timer routine
22494c7070dbSScott Long  */
22504c7070dbSScott Long static void
22514c7070dbSScott Long iflib_timer(void *arg)
22524c7070dbSScott Long {
2253ab2e3f79SStephen Hurd 	iflib_txq_t txq = arg;
22544c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
2255ab2e3f79SStephen Hurd 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
2256dd7fbcf1SStephen Hurd 	uint64_t this_tick = ticks;
2257dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
22584c7070dbSScott Long 
22594c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
22604c7070dbSScott Long 		return;
22611722eeacSMarius Strobl 
22624c7070dbSScott Long 	/*
22634c7070dbSScott Long 	** Check on the state of the TX queue(s), this
22644c7070dbSScott Long 	** can be done without the lock because its RO
22654c7070dbSScott Long 	** and the HUNG state will be static if set.
22664c7070dbSScott Long 	*/
2267dd7fbcf1SStephen Hurd 	if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
2268dd7fbcf1SStephen Hurd 		txq->ift_last_timer_tick = this_tick;
2269ab2e3f79SStephen Hurd 		IFDI_TIMER(ctx, txq->ift_id);
2270ab2e3f79SStephen Hurd 		if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2271ab2e3f79SStephen Hurd 		    ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2272ab2e3f79SStephen Hurd 		     (sctx->isc_pause_frames == 0)))
2273ab2e3f79SStephen Hurd 			goto hung;
2274a9693502SSean Bruno 
2275f6afed72SEric Joyner 		if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2276f6afed72SEric Joyner 		    ifmp_ring_is_stalled(txq->ift_br)) {
2277f6afed72SEric Joyner 			KASSERT(ctx->ifc_link_state == LINK_STATE_UP, ("queue can't be marked as hung if interface is down"));
2278ab2e3f79SStephen Hurd 			txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2279f6afed72SEric Joyner 		}
2280ab2e3f79SStephen Hurd 		txq->ift_cleaned_prev = txq->ift_cleaned;
2281dd7fbcf1SStephen Hurd 	}
2282dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
2283dd7fbcf1SStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
228495dcf343SMarius Strobl 		iflib_netmap_timer_adjust(ctx, txq, &reset_on);
2285dd7fbcf1SStephen Hurd #endif
2286ab2e3f79SStephen Hurd 	/* handle any laggards */
2287ab2e3f79SStephen Hurd 	if (txq->ift_db_pending)
2288ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
2289a9693502SSean Bruno 
2290ab2e3f79SStephen Hurd 	sctx->isc_pause_frames = 0;
2291d300df01SStephen Hurd 	if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
2292dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
2293ab2e3f79SStephen Hurd 	return;
22941722eeacSMarius Strobl 
2295ab2e3f79SStephen Hurd  hung:
22961722eeacSMarius Strobl 	device_printf(ctx->ifc_dev,
22971722eeacSMarius Strobl 	    "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n",
2298ab2e3f79SStephen Hurd 	    txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
22997b610b60SSean Bruno 	STATE_LOCK(ctx);
23007b610b60SSean Bruno 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23017b610b60SSean Bruno 	ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
2302940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
230346fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
23044c7070dbSScott Long }
23054c7070dbSScott Long 
23064c7070dbSScott Long static void
23071b9d9394SEric Joyner iflib_calc_rx_mbuf_sz(if_ctx_t ctx)
23081b9d9394SEric Joyner {
23091b9d9394SEric Joyner 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
23101b9d9394SEric Joyner 
23111b9d9394SEric Joyner 	/*
23121b9d9394SEric Joyner 	 * XXX don't set the max_frame_size to larger
23131b9d9394SEric Joyner 	 * than the hardware can handle
23141b9d9394SEric Joyner 	 */
23151b9d9394SEric Joyner 	if (sctx->isc_max_frame_size <= MCLBYTES)
23161b9d9394SEric Joyner 		ctx->ifc_rx_mbuf_sz = MCLBYTES;
23171b9d9394SEric Joyner 	else
23181b9d9394SEric Joyner 		ctx->ifc_rx_mbuf_sz = MJUMPAGESIZE;
23191b9d9394SEric Joyner }
23201b9d9394SEric Joyner 
23211b9d9394SEric Joyner uint32_t
23221b9d9394SEric Joyner iflib_get_rx_mbuf_sz(if_ctx_t ctx)
23231b9d9394SEric Joyner {
23241722eeacSMarius Strobl 
23251b9d9394SEric Joyner 	return (ctx->ifc_rx_mbuf_sz);
23261b9d9394SEric Joyner }
23271b9d9394SEric Joyner 
23281b9d9394SEric Joyner static void
23294c7070dbSScott Long iflib_init_locked(if_ctx_t ctx)
23304c7070dbSScott Long {
23314c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
23321248952aSSean Bruno 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
23334c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
23344c7070dbSScott Long 	iflib_fl_t fl;
23354c7070dbSScott Long 	iflib_txq_t txq;
23364c7070dbSScott Long 	iflib_rxq_t rxq;
2337ab2e3f79SStephen Hurd 	int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
23384c7070dbSScott Long 
23394c7070dbSScott Long 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
23404c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
23414c7070dbSScott Long 
23421248952aSSean Bruno 	tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
23431248952aSSean Bruno 	tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
23444c7070dbSScott Long 	/* Set hardware offload abilities */
23454c7070dbSScott Long 	if_clearhwassist(ifp);
23464c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM)
23471248952aSSean Bruno 		if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
23484c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
23491248952aSSean Bruno 		if_sethwassistbits(ifp,  tx_ip6_csum_flags, 0);
23504c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO4)
23514c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
23524c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_TSO6)
23534c7070dbSScott Long 		if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
23544c7070dbSScott Long 
23554c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
23564c7070dbSScott Long 		CALLOUT_LOCK(txq);
23574c7070dbSScott Long 		callout_stop(&txq->ift_timer);
23584c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
23594c7070dbSScott Long 		iflib_netmap_txq_init(ctx, txq);
23604c7070dbSScott Long 	}
23611b9d9394SEric Joyner 
23621b9d9394SEric Joyner 	/*
23631b9d9394SEric Joyner 	 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so
23641b9d9394SEric Joyner 	 * that drivers can use the value when setting up the hardware receive
23651b9d9394SEric Joyner 	 * buffers.
23661b9d9394SEric Joyner 	 */
23671b9d9394SEric Joyner 	iflib_calc_rx_mbuf_sz(ctx);
23681b9d9394SEric Joyner 
236923ac9029SStephen Hurd #ifdef INVARIANTS
237023ac9029SStephen Hurd 	i = if_getdrvflags(ifp);
237123ac9029SStephen Hurd #endif
23724c7070dbSScott Long 	IFDI_INIT(ctx);
237323ac9029SStephen Hurd 	MPASS(if_getdrvflags(ifp) == i);
23744c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
237595246abbSSean Bruno 		/* XXX this should really be done on a per-queue basis */
2376d0d0ad0aSStephen Hurd 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2377d0d0ad0aSStephen Hurd 			MPASS(rxq->ifr_id == i);
2378d0d0ad0aSStephen Hurd 			iflib_netmap_rxq_init(ctx, rxq);
237995246abbSSean Bruno 			continue;
2380d0d0ad0aSStephen Hurd 		}
23814c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
23824c7070dbSScott Long 			if (iflib_fl_setup(fl)) {
23833d10e9edSMarius Strobl 				device_printf(ctx->ifc_dev,
23843d10e9edSMarius Strobl 				    "setting up free list %d failed - "
23853d10e9edSMarius Strobl 				    "check cluster settings\n", j);
23864c7070dbSScott Long 				goto done;
23874c7070dbSScott Long 			}
23884c7070dbSScott Long 		}
23894c7070dbSScott Long 	}
23904c7070dbSScott Long done:
23914c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
23924c7070dbSScott Long 	IFDI_INTR_ENABLE(ctx);
23934c7070dbSScott Long 	txq = ctx->ifc_txqs;
23944c7070dbSScott Long 	for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2395ab2e3f79SStephen Hurd 		callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2396ab2e3f79SStephen Hurd 			txq->ift_timer.c_cpu);
23974c7070dbSScott Long }
23984c7070dbSScott Long 
23994c7070dbSScott Long static int
24004c7070dbSScott Long iflib_media_change(if_t ifp)
24014c7070dbSScott Long {
24024c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
24034c7070dbSScott Long 	int err;
24044c7070dbSScott Long 
24054c7070dbSScott Long 	CTX_LOCK(ctx);
24064c7070dbSScott Long 	if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
24074c7070dbSScott Long 		iflib_init_locked(ctx);
24084c7070dbSScott Long 	CTX_UNLOCK(ctx);
24094c7070dbSScott Long 	return (err);
24104c7070dbSScott Long }
24114c7070dbSScott Long 
24124c7070dbSScott Long static void
24134c7070dbSScott Long iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
24144c7070dbSScott Long {
24154c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
24164c7070dbSScott Long 
24174c7070dbSScott Long 	CTX_LOCK(ctx);
2418ab2e3f79SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
24194c7070dbSScott Long 	IFDI_MEDIA_STATUS(ctx, ifmr);
24204c7070dbSScott Long 	CTX_UNLOCK(ctx);
24214c7070dbSScott Long }
24224c7070dbSScott Long 
242309f6ff4fSMatt Macy void
24244c7070dbSScott Long iflib_stop(if_ctx_t ctx)
24254c7070dbSScott Long {
24264c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
24274c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
24284c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
24294d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
24304c7070dbSScott Long 	iflib_dma_info_t di;
24314c7070dbSScott Long 	iflib_fl_t fl;
24324c7070dbSScott Long 	int i, j;
24334c7070dbSScott Long 
24344c7070dbSScott Long 	/* Tell the stack that the interface is no longer active */
24354c7070dbSScott Long 	if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
24364c7070dbSScott Long 
24374c7070dbSScott Long 	IFDI_INTR_DISABLE(ctx);
2438ab2e3f79SStephen Hurd 	DELAY(1000);
2439da69b8f9SSean Bruno 	IFDI_STOP(ctx);
2440ab2e3f79SStephen Hurd 	DELAY(1000);
24414c7070dbSScott Long 
2442da69b8f9SSean Bruno 	iflib_debug_reset();
24434c7070dbSScott Long 	/* Wait for current tx queue users to exit to disarm watchdog timer. */
24444c7070dbSScott Long 	for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
24454c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24464c7070dbSScott Long 
2447226fb85dSStephen Hurd 		CALLOUT_LOCK(txq);
2448226fb85dSStephen Hurd 		callout_stop(&txq->ift_timer);
2449226fb85dSStephen Hurd 		CALLOUT_UNLOCK(txq);
2450226fb85dSStephen Hurd 
24514c7070dbSScott Long 		/* clean any enqueued buffers */
2452da69b8f9SSean Bruno 		iflib_ifmp_purge(txq);
24534c7070dbSScott Long 		/* Free any existing tx buffers. */
245423ac9029SStephen Hurd 		for (j = 0; j < txq->ift_size; j++) {
24554c7070dbSScott Long 			iflib_txsd_free(ctx, txq, j);
24564c7070dbSScott Long 		}
2457ab2e3f79SStephen Hurd 		txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2458ab2e3f79SStephen Hurd 		txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
24594c7070dbSScott Long 		txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
24604c7070dbSScott Long 		txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2461ab2e3f79SStephen Hurd 		txq->ift_pullups = 0;
246295246abbSSean Bruno 		ifmp_ring_reset_stats(txq->ift_br);
24634d261ce2SStephen Hurd 		for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
24644c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24654c7070dbSScott Long 	}
24664c7070dbSScott Long 	for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
24674c7070dbSScott Long 		/* make sure all transmitters have completed before proceeding XXX */
24684c7070dbSScott Long 
24691722eeacSMarius Strobl 		rxq->ifr_cq_cidx = 0;
24704d261ce2SStephen Hurd 		for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
24714c7070dbSScott Long 			bzero((void *)di->idi_vaddr, di->idi_size);
24724c7070dbSScott Long 		/* also resets the free lists pidx/cidx */
24734c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
24744c7070dbSScott Long 			iflib_fl_bufs_free(fl);
24754c7070dbSScott Long 	}
24764c7070dbSScott Long }
24774c7070dbSScott Long 
247895246abbSSean Bruno static inline caddr_t
247995246abbSSean Bruno calc_next_rxd(iflib_fl_t fl, int cidx)
248095246abbSSean Bruno {
248195246abbSSean Bruno 	qidx_t size;
248295246abbSSean Bruno 	int nrxd;
248395246abbSSean Bruno 	caddr_t start, end, cur, next;
248495246abbSSean Bruno 
248595246abbSSean Bruno 	nrxd = fl->ifl_size;
248695246abbSSean Bruno 	size = fl->ifl_rxd_size;
248795246abbSSean Bruno 	start = fl->ifl_ifdi->idi_vaddr;
248895246abbSSean Bruno 
248995246abbSSean Bruno 	if (__predict_false(size == 0))
249095246abbSSean Bruno 		return (start);
249195246abbSSean Bruno 	cur = start + size*cidx;
249295246abbSSean Bruno 	end = start + size*nrxd;
249395246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
249495246abbSSean Bruno 	return (next < end ? next : start);
249595246abbSSean Bruno }
249695246abbSSean Bruno 
2497e035717eSSean Bruno static inline void
2498e035717eSSean Bruno prefetch_pkts(iflib_fl_t fl, int cidx)
2499e035717eSSean Bruno {
2500e035717eSSean Bruno 	int nextptr;
2501e035717eSSean Bruno 	int nrxd = fl->ifl_size;
250295246abbSSean Bruno 	caddr_t next_rxd;
250395246abbSSean Bruno 
2504e035717eSSean Bruno 
2505e035717eSSean Bruno 	nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2506e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
2507e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
250895246abbSSean Bruno 	next_rxd = calc_next_rxd(fl, cidx);
250995246abbSSean Bruno 	prefetch(next_rxd);
2510e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2511e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2512e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2513e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2514e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2515e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2516e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2517e035717eSSean Bruno 	prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2518e035717eSSean Bruno }
2519e035717eSSean Bruno 
25206d49b41eSAndrew Gallatin static struct mbuf *
25216d49b41eSAndrew Gallatin rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd,
25226d49b41eSAndrew Gallatin     int *pf_rv, if_rxd_info_t ri)
25234c7070dbSScott Long {
2524e035717eSSean Bruno 	bus_dmamap_t map;
25254c7070dbSScott Long 	iflib_fl_t fl;
25266d49b41eSAndrew Gallatin 	caddr_t payload;
25276d49b41eSAndrew Gallatin 	struct mbuf *m;
25286d49b41eSAndrew Gallatin 	int flid, cidx, len, next;
25294c7070dbSScott Long 
253095246abbSSean Bruno 	map = NULL;
25314c7070dbSScott Long 	flid = irf->irf_flid;
25324c7070dbSScott Long 	cidx = irf->irf_idx;
25334c7070dbSScott Long 	fl = &rxq->ifr_fl[flid];
253495246abbSSean Bruno 	sd->ifsd_fl = fl;
253595246abbSSean Bruno 	sd->ifsd_cidx = cidx;
25366d49b41eSAndrew Gallatin 	m = fl->ifl_sds.ifsd_m[cidx];
253795246abbSSean Bruno 	sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
25384c7070dbSScott Long 	fl->ifl_credits--;
25394c7070dbSScott Long #if MEMORY_LOGGING
25404c7070dbSScott Long 	fl->ifl_m_dequeued++;
25414c7070dbSScott Long #endif
254295246abbSSean Bruno 	if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
2543e035717eSSean Bruno 		prefetch_pkts(fl, cidx);
2544e035717eSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2545e035717eSSean Bruno 	prefetch(&fl->ifl_sds.ifsd_map[next]);
2546e035717eSSean Bruno 	map = fl->ifl_sds.ifsd_map[cidx];
2547e035717eSSean Bruno 	next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
25484c7070dbSScott Long 
25494c7070dbSScott Long 	/* not valid assert if bxe really does SGE from non-contiguous elements */
25504c7070dbSScott Long 	MPASS(fl->ifl_cidx == cidx);
2551bfce461eSMarius Strobl 	bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
25526d49b41eSAndrew Gallatin 
25536d49b41eSAndrew Gallatin 	if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL) {
25546d49b41eSAndrew Gallatin 		payload  = *sd->ifsd_cl;
25556d49b41eSAndrew Gallatin 		payload +=  ri->iri_pad;
25566d49b41eSAndrew Gallatin 		len = ri->iri_len - ri->iri_pad;
25576d49b41eSAndrew Gallatin 		*pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp,
25586d49b41eSAndrew Gallatin 		    len | PFIL_MEMPTR | PFIL_IN, NULL);
25596d49b41eSAndrew Gallatin 		switch (*pf_rv) {
25606d49b41eSAndrew Gallatin 		case PFIL_DROPPED:
25616d49b41eSAndrew Gallatin 		case PFIL_CONSUMED:
25626d49b41eSAndrew Gallatin 			/*
25636d49b41eSAndrew Gallatin 			 * The filter ate it.  Everything is recycled.
25646d49b41eSAndrew Gallatin 			 */
25656d49b41eSAndrew Gallatin 			m = NULL;
25666d49b41eSAndrew Gallatin 			unload = 0;
25676d49b41eSAndrew Gallatin 			break;
25686d49b41eSAndrew Gallatin 		case PFIL_REALLOCED:
25696d49b41eSAndrew Gallatin 			/*
25706d49b41eSAndrew Gallatin 			 * The filter copied it.  Everything is recycled.
25716d49b41eSAndrew Gallatin 			 */
25726d49b41eSAndrew Gallatin 			m = pfil_mem2mbuf(payload);
25736d49b41eSAndrew Gallatin 			unload = 0;
25746d49b41eSAndrew Gallatin 			break;
25756d49b41eSAndrew Gallatin 		case PFIL_PASS:
25766d49b41eSAndrew Gallatin 			/*
25776d49b41eSAndrew Gallatin 			 * Filter said it was OK, so receive like
25786d49b41eSAndrew Gallatin 			 * normal
25796d49b41eSAndrew Gallatin 			 */
25806d49b41eSAndrew Gallatin 			fl->ifl_sds.ifsd_m[cidx] = NULL;
25816d49b41eSAndrew Gallatin 			break;
25826d49b41eSAndrew Gallatin 		default:
25836d49b41eSAndrew Gallatin 			MPASS(0);
25846d49b41eSAndrew Gallatin 		}
25856d49b41eSAndrew Gallatin 	} else {
25866d49b41eSAndrew Gallatin 		fl->ifl_sds.ifsd_m[cidx] = NULL;
25876d49b41eSAndrew Gallatin 		*pf_rv = PFIL_PASS;
25886d49b41eSAndrew Gallatin 	}
25896d49b41eSAndrew Gallatin 
25904c7070dbSScott Long 	if (unload)
2591bfce461eSMarius Strobl 		bus_dmamap_unload(fl->ifl_buf_tag, map);
259295246abbSSean Bruno 	fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
259395246abbSSean Bruno 	if (__predict_false(fl->ifl_cidx == 0))
25944c7070dbSScott Long 		fl->ifl_gen = 0;
259587890dbaSSean Bruno 	bit_clear(fl->ifl_rx_bitmap, cidx);
25966d49b41eSAndrew Gallatin 	return (m);
25974c7070dbSScott Long }
25984c7070dbSScott Long 
25994c7070dbSScott Long static struct mbuf *
26006d49b41eSAndrew Gallatin assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv)
26014c7070dbSScott Long {
260295246abbSSean Bruno 	struct mbuf *m, *mh, *mt;
260395246abbSSean Bruno 	caddr_t cl;
26046d49b41eSAndrew Gallatin 	int  *pf_rv_ptr, flags, i, padlen;
26056d49b41eSAndrew Gallatin 	bool consumed;
26064c7070dbSScott Long 
26074c7070dbSScott Long 	i = 0;
260823ac9029SStephen Hurd 	mh = NULL;
26096d49b41eSAndrew Gallatin 	consumed = false;
26106d49b41eSAndrew Gallatin 	*pf_rv = PFIL_PASS;
26116d49b41eSAndrew Gallatin 	pf_rv_ptr = pf_rv;
26124c7070dbSScott Long 	do {
26136d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd,
26146d49b41eSAndrew Gallatin 		    pf_rv_ptr, ri);
26154c7070dbSScott Long 
261695246abbSSean Bruno 		MPASS(*sd->ifsd_cl != NULL);
261723ac9029SStephen Hurd 
26186d49b41eSAndrew Gallatin 		/*
26196d49b41eSAndrew Gallatin 		 * Exclude zero-length frags & frags from
26206d49b41eSAndrew Gallatin 		 * packets the filter has consumed or dropped
26216d49b41eSAndrew Gallatin 		 */
26226d49b41eSAndrew Gallatin 		if (ri->iri_frags[i].irf_len == 0 || consumed ||
26236d49b41eSAndrew Gallatin 		    *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) {
26246d49b41eSAndrew Gallatin 			if (mh == NULL) {
26256d49b41eSAndrew Gallatin 				/* everything saved here */
26266d49b41eSAndrew Gallatin 				consumed = true;
26276d49b41eSAndrew Gallatin 				pf_rv_ptr = NULL;
262823ac9029SStephen Hurd 				continue;
262923ac9029SStephen Hurd 			}
26306d49b41eSAndrew Gallatin 			/* XXX we can save the cluster here, but not the mbuf */
26316d49b41eSAndrew Gallatin 			m_init(m, M_NOWAIT, MT_DATA, 0);
26326d49b41eSAndrew Gallatin 			m_free(m);
26336d49b41eSAndrew Gallatin 			continue;
26346d49b41eSAndrew Gallatin 		}
263523ac9029SStephen Hurd 		if (mh == NULL) {
26364c7070dbSScott Long 			flags = M_PKTHDR|M_EXT;
26374c7070dbSScott Long 			mh = mt = m;
26384c7070dbSScott Long 			padlen = ri->iri_pad;
26394c7070dbSScott Long 		} else {
26404c7070dbSScott Long 			flags = M_EXT;
26414c7070dbSScott Long 			mt->m_next = m;
26424c7070dbSScott Long 			mt = m;
26434c7070dbSScott Long 			/* assuming padding is only on the first fragment */
26444c7070dbSScott Long 			padlen = 0;
26454c7070dbSScott Long 		}
264695246abbSSean Bruno 		cl = *sd->ifsd_cl;
264795246abbSSean Bruno 		*sd->ifsd_cl = NULL;
26484c7070dbSScott Long 
26494c7070dbSScott Long 		/* Can these two be made one ? */
26504c7070dbSScott Long 		m_init(m, M_NOWAIT, MT_DATA, flags);
265195246abbSSean Bruno 		m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
26524c7070dbSScott Long 		/*
26534c7070dbSScott Long 		 * These must follow m_init and m_cljset
26544c7070dbSScott Long 		 */
26554c7070dbSScott Long 		m->m_data += padlen;
26564c7070dbSScott Long 		ri->iri_len -= padlen;
265723ac9029SStephen Hurd 		m->m_len = ri->iri_frags[i].irf_len;
26584c7070dbSScott Long 	} while (++i < ri->iri_nfrags);
26594c7070dbSScott Long 
26604c7070dbSScott Long 	return (mh);
26614c7070dbSScott Long }
26624c7070dbSScott Long 
26634c7070dbSScott Long /*
26644c7070dbSScott Long  * Process one software descriptor
26654c7070dbSScott Long  */
26664c7070dbSScott Long static struct mbuf *
26674c7070dbSScott Long iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
26684c7070dbSScott Long {
266995246abbSSean Bruno 	struct if_rxsd sd;
26704c7070dbSScott Long 	struct mbuf *m;
26716d49b41eSAndrew Gallatin 	int pf_rv;
26724c7070dbSScott Long 
26734c7070dbSScott Long 	/* should I merge this back in now that the two paths are basically duplicated? */
267423ac9029SStephen Hurd 	if (ri->iri_nfrags == 1 &&
267518628b74SMark Johnston 	    ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
26766d49b41eSAndrew Gallatin 		m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd,
26776d49b41eSAndrew Gallatin 		    &pf_rv, ri);
26786d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
26796d49b41eSAndrew Gallatin 			return (m);
26806d49b41eSAndrew Gallatin 		if (pf_rv == PFIL_PASS) {
26814c7070dbSScott Long 			m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
268295246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
268395246abbSSean Bruno 			if (!IP_ALIGNED(m))
268495246abbSSean Bruno 				m->m_data += 2;
268595246abbSSean Bruno #endif
268695246abbSSean Bruno 			memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
268723ac9029SStephen Hurd 			m->m_len = ri->iri_frags[0].irf_len;
26886d49b41eSAndrew Gallatin 		}
26894c7070dbSScott Long 	} else {
26906d49b41eSAndrew Gallatin 		m = assemble_segments(rxq, ri, &sd, &pf_rv);
26916d49b41eSAndrew Gallatin 		if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED)
26926d49b41eSAndrew Gallatin 			return (m);
26934c7070dbSScott Long 	}
26944c7070dbSScott Long 	m->m_pkthdr.len = ri->iri_len;
26954c7070dbSScott Long 	m->m_pkthdr.rcvif = ri->iri_ifp;
26964c7070dbSScott Long 	m->m_flags |= ri->iri_flags;
26974c7070dbSScott Long 	m->m_pkthdr.ether_vtag = ri->iri_vtag;
26984c7070dbSScott Long 	m->m_pkthdr.flowid = ri->iri_flowid;
26994c7070dbSScott Long 	M_HASHTYPE_SET(m, ri->iri_rsstype);
27004c7070dbSScott Long 	m->m_pkthdr.csum_flags = ri->iri_csum_flags;
27014c7070dbSScott Long 	m->m_pkthdr.csum_data = ri->iri_csum_data;
27024c7070dbSScott Long 	return (m);
27034c7070dbSScott Long }
27044c7070dbSScott Long 
270535e4e998SStephen Hurd #if defined(INET6) || defined(INET)
2706fe1bcadaSStephen Hurd static void
2707fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
2708fe1bcadaSStephen Hurd {
2709fe1bcadaSStephen Hurd 	CURVNET_SET(lc->ifp->if_vnet);
2710fe1bcadaSStephen Hurd #if defined(INET6)
2711188adcb7SMarko Zec 	*v6 = V_ip6_forwarding;
2712fe1bcadaSStephen Hurd #endif
2713fe1bcadaSStephen Hurd #if defined(INET)
2714188adcb7SMarko Zec 	*v4 = V_ipforwarding;
2715fe1bcadaSStephen Hurd #endif
2716fe1bcadaSStephen Hurd 	CURVNET_RESTORE();
2717fe1bcadaSStephen Hurd }
2718fe1bcadaSStephen Hurd 
271935e4e998SStephen Hurd /*
272035e4e998SStephen Hurd  * Returns true if it's possible this packet could be LROed.
272135e4e998SStephen Hurd  * if it returns false, it is guaranteed that tcp_lro_rx()
272235e4e998SStephen Hurd  * would not return zero.
272335e4e998SStephen Hurd  */
272435e4e998SStephen Hurd static bool
2725fe1bcadaSStephen Hurd iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
272635e4e998SStephen Hurd {
272735e4e998SStephen Hurd 	struct ether_header *eh;
272835e4e998SStephen Hurd 
272935e4e998SStephen Hurd 	eh = mtod(m, struct ether_header *);
27306aee0bfaSMarko Zec 	switch (eh->ether_type) {
2731abec4724SSean Bruno #if defined(INET6)
27326aee0bfaSMarko Zec 		case htons(ETHERTYPE_IPV6):
27336aee0bfaSMarko Zec 			return (!v6_forwarding);
2734abec4724SSean Bruno #endif
2735abec4724SSean Bruno #if defined (INET)
27366aee0bfaSMarko Zec 		case htons(ETHERTYPE_IP):
27376aee0bfaSMarko Zec 			return (!v4_forwarding);
2738abec4724SSean Bruno #endif
273935e4e998SStephen Hurd 	}
274035e4e998SStephen Hurd 
274135e4e998SStephen Hurd 	return false;
274235e4e998SStephen Hurd }
2743fe1bcadaSStephen Hurd #else
2744fe1bcadaSStephen Hurd static void
2745fe1bcadaSStephen Hurd iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
2746fe1bcadaSStephen Hurd {
2747fe1bcadaSStephen Hurd }
274835e4e998SStephen Hurd #endif
274935e4e998SStephen Hurd 
2750fb1a29b4SHans Petter Selasky static void
2751fb1a29b4SHans Petter Selasky _task_fn_rx_watchdog(void *context)
2752fb1a29b4SHans Petter Selasky {
2753fb1a29b4SHans Petter Selasky 	iflib_rxq_t rxq = context;
2754fb1a29b4SHans Petter Selasky 
2755fb1a29b4SHans Petter Selasky 	GROUPTASK_ENQUEUE(&rxq->ifr_task);
2756fb1a29b4SHans Petter Selasky }
2757fb1a29b4SHans Petter Selasky 
2758fb1a29b4SHans Petter Selasky static uint8_t
275995246abbSSean Bruno iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
27604c7070dbSScott Long {
27611722eeacSMarius Strobl 	if_t ifp;
27624c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
27634c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
276423ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
27654c7070dbSScott Long 	int avail, i;
276695246abbSSean Bruno 	qidx_t *cidxp;
27674c7070dbSScott Long 	struct if_rxd_info ri;
27684c7070dbSScott Long 	int err, budget_left, rx_bytes, rx_pkts;
27694c7070dbSScott Long 	iflib_fl_t fl;
27704c7070dbSScott Long 	int lro_enabled;
2771f6cb0deaSMatt Macy 	bool v4_forwarding, v6_forwarding, lro_possible;
2772fb1a29b4SHans Petter Selasky 	uint8_t retval = 0;
277395246abbSSean Bruno 
27744c7070dbSScott Long 	/*
27754c7070dbSScott Long 	 * XXX early demux data packets so that if_input processing only handles
27764c7070dbSScott Long 	 * acks in interrupt context
27774c7070dbSScott Long 	 */
277820f63282SStephen Hurd 	struct mbuf *m, *mh, *mt, *mf;
27794c7070dbSScott Long 
27800b8df657SGleb Smirnoff 	NET_EPOCH_ASSERT();
27810b8df657SGleb Smirnoff 
2782f6cb0deaSMatt Macy 	lro_possible = v4_forwarding = v6_forwarding = false;
278395246abbSSean Bruno 	ifp = ctx->ifc_ifp;
27844c7070dbSScott Long 	mh = mt = NULL;
27854c7070dbSScott Long 	MPASS(budget > 0);
27864c7070dbSScott Long 	rx_pkts	= rx_bytes = 0;
278723ac9029SStephen Hurd 	if (sctx->isc_flags & IFLIB_HAS_RXCQ)
27884c7070dbSScott Long 		cidxp = &rxq->ifr_cq_cidx;
27894c7070dbSScott Long 	else
27904c7070dbSScott Long 		cidxp = &rxq->ifr_fl[0].ifl_cidx;
279123ac9029SStephen Hurd 	if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
27924c7070dbSScott Long 		for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2793fb1a29b4SHans Petter Selasky 			retval |= __iflib_fl_refill_lt(ctx, fl, budget + 8);
27944c7070dbSScott Long 		DBG_COUNTER_INC(rx_unavail);
2795fb1a29b4SHans Petter Selasky 		return (retval);
27964c7070dbSScott Long 	}
27974c7070dbSScott Long 
27986d49b41eSAndrew Gallatin 	/* pfil needs the vnet to be set */
27996d49b41eSAndrew Gallatin 	CURVNET_SET_QUIET(ifp->if_vnet);
28008b8d9093SMarius Strobl 	for (budget_left = budget; budget_left > 0 && avail > 0;) {
28014c7070dbSScott Long 		if (__predict_false(!CTX_ACTIVE(ctx))) {
28024c7070dbSScott Long 			DBG_COUNTER_INC(rx_ctx_inactive);
28034c7070dbSScott Long 			break;
28044c7070dbSScott Long 		}
28054c7070dbSScott Long 		/*
28064c7070dbSScott Long 		 * Reset client set fields to their default values
28074c7070dbSScott Long 		 */
280895246abbSSean Bruno 		rxd_info_zero(&ri);
28094c7070dbSScott Long 		ri.iri_qsidx = rxq->ifr_id;
28104c7070dbSScott Long 		ri.iri_cidx = *cidxp;
281195246abbSSean Bruno 		ri.iri_ifp = ifp;
28124c7070dbSScott Long 		ri.iri_frags = rxq->ifr_frags;
28134c7070dbSScott Long 		err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
28144c7070dbSScott Long 
281595246abbSSean Bruno 		if (err)
281695246abbSSean Bruno 			goto err;
28176d49b41eSAndrew Gallatin 		rx_pkts += 1;
28186d49b41eSAndrew Gallatin 		rx_bytes += ri.iri_len;
281923ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
282023ac9029SStephen Hurd 			*cidxp = ri.iri_cidx;
282123ac9029SStephen Hurd 			/* Update our consumer index */
282295246abbSSean Bruno 			/* XXX NB: shurd - check if this is still safe */
28231722eeacSMarius Strobl 			while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0])
282423ac9029SStephen Hurd 				rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
28254c7070dbSScott Long 			/* was this only a completion queue message? */
28264c7070dbSScott Long 			if (__predict_false(ri.iri_nfrags == 0))
28274c7070dbSScott Long 				continue;
28284c7070dbSScott Long 		}
28294c7070dbSScott Long 		MPASS(ri.iri_nfrags != 0);
28304c7070dbSScott Long 		MPASS(ri.iri_len != 0);
28314c7070dbSScott Long 
28324c7070dbSScott Long 		/* will advance the cidx on the corresponding free lists */
28334c7070dbSScott Long 		m = iflib_rxd_pkt_get(rxq, &ri);
28348b8d9093SMarius Strobl 		avail--;
28358b8d9093SMarius Strobl 		budget_left--;
28364c7070dbSScott Long 		if (avail == 0 && budget_left)
283723ac9029SStephen Hurd 			avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
28384c7070dbSScott Long 
28396d49b41eSAndrew Gallatin 		if (__predict_false(m == NULL))
28404c7070dbSScott Long 			continue;
28416d49b41eSAndrew Gallatin 
28424c7070dbSScott Long 		/* imm_pkt: -- cxgb */
28434c7070dbSScott Long 		if (mh == NULL)
28444c7070dbSScott Long 			mh = mt = m;
28454c7070dbSScott Long 		else {
28464c7070dbSScott Long 			mt->m_nextpkt = m;
28474c7070dbSScott Long 			mt = m;
28484c7070dbSScott Long 		}
28494c7070dbSScott Long 	}
28506d49b41eSAndrew Gallatin 	CURVNET_RESTORE();
28514c7070dbSScott Long 	/* make sure that we can refill faster than drain */
28524c7070dbSScott Long 	for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
2853fb1a29b4SHans Petter Selasky 		retval |= __iflib_fl_refill_lt(ctx, fl, budget + 8);
28544c7070dbSScott Long 
28554c7070dbSScott Long 	lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
2856fe1bcadaSStephen Hurd 	if (lro_enabled)
2857fe1bcadaSStephen Hurd 		iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
285820f63282SStephen Hurd 	mt = mf = NULL;
28594c7070dbSScott Long 	while (mh != NULL) {
28604c7070dbSScott Long 		m = mh;
28614c7070dbSScott Long 		mh = mh->m_nextpkt;
28624c7070dbSScott Long 		m->m_nextpkt = NULL;
286395246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
286495246abbSSean Bruno 		if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
286595246abbSSean Bruno 			continue;
286695246abbSSean Bruno #endif
28674c7070dbSScott Long 		rx_bytes += m->m_pkthdr.len;
28684c7070dbSScott Long 		rx_pkts++;
2869aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
287035e4e998SStephen Hurd 		if (lro_enabled) {
287135e4e998SStephen Hurd 			if (!lro_possible) {
2872fe1bcadaSStephen Hurd 				lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
287335e4e998SStephen Hurd 				if (lro_possible && mf != NULL) {
287435e4e998SStephen Hurd 					ifp->if_input(ifp, mf);
287535e4e998SStephen Hurd 					DBG_COUNTER_INC(rx_if_input);
287635e4e998SStephen Hurd 					mt = mf = NULL;
287735e4e998SStephen Hurd 				}
287835e4e998SStephen Hurd 			}
287925ac1dd5SStephen Hurd 			if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
288025ac1dd5SStephen Hurd 			    (CSUM_L4_CALC|CSUM_L4_VALID)) {
288135e4e998SStephen Hurd 				if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
28824c7070dbSScott Long 					continue;
288320f63282SStephen Hurd 			}
288425ac1dd5SStephen Hurd 		}
2885aaeb188aSBjoern A. Zeeb #endif
288635e4e998SStephen Hurd 		if (lro_possible) {
288735e4e998SStephen Hurd 			ifp->if_input(ifp, m);
288835e4e998SStephen Hurd 			DBG_COUNTER_INC(rx_if_input);
288935e4e998SStephen Hurd 			continue;
289035e4e998SStephen Hurd 		}
289135e4e998SStephen Hurd 
289235e4e998SStephen Hurd 		if (mf == NULL)
289335e4e998SStephen Hurd 			mf = m;
289420f63282SStephen Hurd 		if (mt != NULL)
289520f63282SStephen Hurd 			mt->m_nextpkt = m;
289620f63282SStephen Hurd 		mt = m;
289720f63282SStephen Hurd 	}
289820f63282SStephen Hurd 	if (mf != NULL) {
289920f63282SStephen Hurd 		ifp->if_input(ifp, mf);
29004c7070dbSScott Long 		DBG_COUNTER_INC(rx_if_input);
29014c7070dbSScott Long 	}
290223ac9029SStephen Hurd 
29034c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
29044c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
29054c7070dbSScott Long 
29064c7070dbSScott Long 	/*
29074c7070dbSScott Long 	 * Flush any outstanding LRO work
29084c7070dbSScott Long 	 */
2909aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
291023ac9029SStephen Hurd 	tcp_lro_flush_all(&rxq->ifr_lc);
2911aaeb188aSBjoern A. Zeeb #endif
2912fb1a29b4SHans Petter Selasky 	if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0)
2913fb1a29b4SHans Petter Selasky 		retval |= IFLIB_RXEOF_MORE;
2914fb1a29b4SHans Petter Selasky 	return (retval);
291595246abbSSean Bruno err:
29167b610b60SSean Bruno 	STATE_LOCK(ctx);
2917ab2e3f79SStephen Hurd 	ctx->ifc_flags |= IFC_DO_RESET;
2918940f62d6SEric Joyner 	iflib_admin_intr_deferred(ctx);
291946fa0c25SEric Joyner 	STATE_UNLOCK(ctx);
2920fb1a29b4SHans Petter Selasky 	return (0);
292195246abbSSean Bruno }
292295246abbSSean Bruno 
292395246abbSSean Bruno #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
292495246abbSSean Bruno static inline qidx_t
292595246abbSSean Bruno txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
292695246abbSSean Bruno {
292795246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
292895246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
292995246abbSSean Bruno 	if (in_use > 4*minthresh)
293095246abbSSean Bruno 		return (notify_count);
293195246abbSSean Bruno 	if (in_use > 2*minthresh)
293295246abbSSean Bruno 		return (notify_count >> 1);
293395246abbSSean Bruno 	if (in_use > minthresh)
293495246abbSSean Bruno 		return (notify_count >> 3);
293595246abbSSean Bruno 	return (0);
293695246abbSSean Bruno }
293795246abbSSean Bruno 
293895246abbSSean Bruno static inline qidx_t
293995246abbSSean Bruno txq_max_rs_deferred(iflib_txq_t txq)
294095246abbSSean Bruno {
294195246abbSSean Bruno 	qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
294295246abbSSean Bruno 	qidx_t minthresh = txq->ift_size / 8;
294395246abbSSean Bruno 	if (txq->ift_in_use > 4*minthresh)
294495246abbSSean Bruno 		return (notify_count);
294595246abbSSean Bruno 	if (txq->ift_in_use > 2*minthresh)
294695246abbSSean Bruno 		return (notify_count >> 1);
294795246abbSSean Bruno 	if (txq->ift_in_use > minthresh)
294895246abbSSean Bruno 		return (notify_count >> 2);
29492b2fc973SSean Bruno 	return (2);
29504c7070dbSScott Long }
29514c7070dbSScott Long 
29524c7070dbSScott Long #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
29534c7070dbSScott Long #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
295495246abbSSean Bruno 
295595246abbSSean Bruno #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
295695246abbSSean Bruno #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
295723ac9029SStephen Hurd #define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
29584c7070dbSScott Long 
295995246abbSSean Bruno /* forward compatibility for cxgb */
296095246abbSSean Bruno #define FIRST_QSET(ctx) 0
296195246abbSSean Bruno #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
296295246abbSSean Bruno #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
296395246abbSSean Bruno #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
296495246abbSSean Bruno #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
296595246abbSSean Bruno 
296695246abbSSean Bruno /* XXX we should be setting this to something other than zero */
296795246abbSSean Bruno #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
29687474544bSMarius Strobl #define	MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
29697474544bSMarius Strobl     (ctx)->ifc_softc_ctx.isc_tx_nsegments)
297095246abbSSean Bruno 
297195246abbSSean Bruno static inline bool
297295246abbSSean Bruno iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
29734c7070dbSScott Long {
297495246abbSSean Bruno 	qidx_t dbval, max;
297595246abbSSean Bruno 	bool rang;
29764c7070dbSScott Long 
297795246abbSSean Bruno 	rang = false;
297895246abbSSean Bruno 	max = TXQ_MAX_DB_DEFERRED(txq, in_use);
297995246abbSSean Bruno 	if (ring || txq->ift_db_pending >= max) {
29804c7070dbSScott Long 		dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
298195dcf343SMarius Strobl 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
298295dcf343SMarius Strobl 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
29834c7070dbSScott Long 		ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
29844c7070dbSScott Long 		txq->ift_db_pending = txq->ift_npending = 0;
298595246abbSSean Bruno 		rang = true;
29864c7070dbSScott Long 	}
298795246abbSSean Bruno 	return (rang);
29884c7070dbSScott Long }
29894c7070dbSScott Long 
29904c7070dbSScott Long #ifdef PKT_DEBUG
29914c7070dbSScott Long static void
29924c7070dbSScott Long print_pkt(if_pkt_info_t pi)
29934c7070dbSScott Long {
29944c7070dbSScott Long 	printf("pi len:  %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
29954c7070dbSScott Long 	       pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
29964c7070dbSScott Long 	printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
29974c7070dbSScott Long 	       pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
29984c7070dbSScott Long 	printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
29994c7070dbSScott Long 	       pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
30004c7070dbSScott Long }
30014c7070dbSScott Long #endif
30024c7070dbSScott Long 
30034c7070dbSScott Long #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
3004a06424ddSEric Joyner #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
30054c7070dbSScott Long #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
3006a06424ddSEric Joyner #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
30074c7070dbSScott Long 
30084c7070dbSScott Long static int
30094c7070dbSScott Long iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
30104c7070dbSScott Long {
3011ab2e3f79SStephen Hurd 	if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
30124c7070dbSScott Long 	struct ether_vlan_header *eh;
3013c9a49a4fSMarius Strobl 	struct mbuf *m;
30144c7070dbSScott Long 
30158b8d9093SMarius Strobl 	m = *mp;
3016ab2e3f79SStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
3017ab2e3f79SStephen Hurd 	    M_WRITABLE(m) == 0) {
3018ab2e3f79SStephen Hurd 		if ((m = m_dup(m, M_NOWAIT)) == NULL) {
3019ab2e3f79SStephen Hurd 			return (ENOMEM);
3020ab2e3f79SStephen Hurd 		} else {
3021ab2e3f79SStephen Hurd 			m_freem(*mp);
302264e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
30238b8d9093SMarius Strobl 			*mp = m;
3024ab2e3f79SStephen Hurd 		}
3025ab2e3f79SStephen Hurd 	}
30261248952aSSean Bruno 
30274c7070dbSScott Long 	/*
30284c7070dbSScott Long 	 * Determine where frame payload starts.
30294c7070dbSScott Long 	 * Jump over vlan headers if already present,
30304c7070dbSScott Long 	 * helpful for QinQ too.
30314c7070dbSScott Long 	 */
30324c7070dbSScott Long 	if (__predict_false(m->m_len < sizeof(*eh))) {
30334c7070dbSScott Long 		txq->ift_pullups++;
30344c7070dbSScott Long 		if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
30354c7070dbSScott Long 			return (ENOMEM);
30364c7070dbSScott Long 	}
30374c7070dbSScott Long 	eh = mtod(m, struct ether_vlan_header *);
30384c7070dbSScott Long 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
30394c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_proto);
30404c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
30414c7070dbSScott Long 	} else {
30424c7070dbSScott Long 		pi->ipi_etype = ntohs(eh->evl_encap_proto);
30434c7070dbSScott Long 		pi->ipi_ehdrlen = ETHER_HDR_LEN;
30444c7070dbSScott Long 	}
30454c7070dbSScott Long 
30464c7070dbSScott Long 	switch (pi->ipi_etype) {
30474c7070dbSScott Long #ifdef INET
30484c7070dbSScott Long 	case ETHERTYPE_IP:
30494c7070dbSScott Long 	{
3050c9a49a4fSMarius Strobl 		struct mbuf *n;
30514c7070dbSScott Long 		struct ip *ip = NULL;
30524c7070dbSScott Long 		struct tcphdr *th = NULL;
30534c7070dbSScott Long 		int minthlen;
30544c7070dbSScott Long 
30554c7070dbSScott Long 		minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
30564c7070dbSScott Long 		if (__predict_false(m->m_len < minthlen)) {
30574c7070dbSScott Long 			/*
30584c7070dbSScott Long 			 * if this code bloat is causing too much of a hit
30594c7070dbSScott Long 			 * move it to a separate function and mark it noinline
30604c7070dbSScott Long 			 */
30614c7070dbSScott Long 			if (m->m_len == pi->ipi_ehdrlen) {
30624c7070dbSScott Long 				n = m->m_next;
30634c7070dbSScott Long 				MPASS(n);
30644c7070dbSScott Long 				if (n->m_len >= sizeof(*ip))  {
30654c7070dbSScott Long 					ip = (struct ip *)n->m_data;
30664c7070dbSScott Long 					if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30674c7070dbSScott Long 						th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30684c7070dbSScott Long 				} else {
30694c7070dbSScott Long 					txq->ift_pullups++;
30704c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30714c7070dbSScott Long 						return (ENOMEM);
30724c7070dbSScott Long 					ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30734c7070dbSScott Long 				}
30744c7070dbSScott Long 			} else {
30754c7070dbSScott Long 				txq->ift_pullups++;
30764c7070dbSScott Long 				if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
30774c7070dbSScott Long 					return (ENOMEM);
30784c7070dbSScott Long 				ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30794c7070dbSScott Long 				if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30804c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30814c7070dbSScott Long 			}
30824c7070dbSScott Long 		} else {
30834c7070dbSScott Long 			ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
30844c7070dbSScott Long 			if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
30854c7070dbSScott Long 				th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
30864c7070dbSScott Long 		}
30874c7070dbSScott Long 		pi->ipi_ip_hlen = ip->ip_hl << 2;
30884c7070dbSScott Long 		pi->ipi_ipproto = ip->ip_p;
30894c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV4;
30904c7070dbSScott Long 
3091a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3092a06424ddSEric Joyner 		if (IS_TX_OFFLOAD4(pi)) {
3093a06424ddSEric Joyner 			if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
30944c7070dbSScott Long 				if (__predict_false(th == NULL)) {
30954c7070dbSScott Long 					txq->ift_pullups++;
30964c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
30974c7070dbSScott Long 						return (ENOMEM);
30984c7070dbSScott Long 					th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
30994c7070dbSScott Long 				}
31004c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
31014c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
31024c7070dbSScott Long 				pi->ipi_tcp_seq = th->th_seq;
31034c7070dbSScott Long 			}
3104a06424ddSEric Joyner 			if (IS_TSO4(pi)) {
31054c7070dbSScott Long 				if (__predict_false(ip->ip_p != IPPROTO_TCP))
31064c7070dbSScott Long 					return (ENXIO);
31078d4ceb9cSStephen Hurd 				/*
31088d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
31098d4ceb9cSStephen Hurd 				 */
31108d4ceb9cSStephen Hurd 				pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
31114c7070dbSScott Long 				th->th_sum = in_pseudo(ip->ip_src.s_addr,
31124c7070dbSScott Long 						       ip->ip_dst.s_addr, htons(IPPROTO_TCP));
31134c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
31141248952aSSean Bruno 				if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
31151248952aSSean Bruno 					ip->ip_sum = 0;
31161248952aSSean Bruno 					ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
31171248952aSSean Bruno 				}
31184c7070dbSScott Long 			}
3119a06424ddSEric Joyner 		}
31208d4ceb9cSStephen Hurd 		if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
31218d4ceb9cSStephen Hurd                        ip->ip_sum = 0;
31228d4ceb9cSStephen Hurd 
31234c7070dbSScott Long 		break;
31244c7070dbSScott Long 	}
31254c7070dbSScott Long #endif
31264c7070dbSScott Long #ifdef INET6
31274c7070dbSScott Long 	case ETHERTYPE_IPV6:
31284c7070dbSScott Long 	{
31294c7070dbSScott Long 		struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
31304c7070dbSScott Long 		struct tcphdr *th;
31314c7070dbSScott Long 		pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
31324c7070dbSScott Long 
31334c7070dbSScott Long 		if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
313464e6fc13SStephen Hurd 			txq->ift_pullups++;
31354c7070dbSScott Long 			if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
31364c7070dbSScott Long 				return (ENOMEM);
31374c7070dbSScott Long 		}
31384c7070dbSScott Long 		th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
31394c7070dbSScott Long 
31404c7070dbSScott Long 		/* XXX-BZ this will go badly in case of ext hdrs. */
31414c7070dbSScott Long 		pi->ipi_ipproto = ip6->ip6_nxt;
31424c7070dbSScott Long 		pi->ipi_flags |= IPI_TX_IPV6;
31434c7070dbSScott Long 
3144a06424ddSEric Joyner 		/* TCP checksum offload may require TCP header length */
3145a06424ddSEric Joyner 		if (IS_TX_OFFLOAD6(pi)) {
31464c7070dbSScott Long 			if (pi->ipi_ipproto == IPPROTO_TCP) {
31474c7070dbSScott Long 				if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
3148a06424ddSEric Joyner 					txq->ift_pullups++;
31494c7070dbSScott Long 					if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
31504c7070dbSScott Long 						return (ENOMEM);
31514c7070dbSScott Long 				}
31524c7070dbSScott Long 				pi->ipi_tcp_hflags = th->th_flags;
31534c7070dbSScott Long 				pi->ipi_tcp_hlen = th->th_off << 2;
3154a06424ddSEric Joyner 				pi->ipi_tcp_seq = th->th_seq;
31554c7070dbSScott Long 			}
3156a06424ddSEric Joyner 			if (IS_TSO6(pi)) {
31574c7070dbSScott Long 				if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
31584c7070dbSScott Long 					return (ENXIO);
31594c7070dbSScott Long 				/*
31608d4ceb9cSStephen Hurd 				 * TSO always requires hardware checksum offload.
31614c7070dbSScott Long 				 */
3162a06424ddSEric Joyner 				pi->ipi_csum_flags |= CSUM_IP6_TCP;
31634c7070dbSScott Long 				th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
31644c7070dbSScott Long 				pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
31654c7070dbSScott Long 			}
3166a06424ddSEric Joyner 		}
31674c7070dbSScott Long 		break;
31684c7070dbSScott Long 	}
31694c7070dbSScott Long #endif
31704c7070dbSScott Long 	default:
31714c7070dbSScott Long 		pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
31724c7070dbSScott Long 		pi->ipi_ip_hlen = 0;
31734c7070dbSScott Long 		break;
31744c7070dbSScott Long 	}
31754c7070dbSScott Long 	*mp = m;
31761248952aSSean Bruno 
31774c7070dbSScott Long 	return (0);
31784c7070dbSScott Long }
31794c7070dbSScott Long 
31804c7070dbSScott Long /*
31814c7070dbSScott Long  * If dodgy hardware rejects the scatter gather chain we've handed it
318223ac9029SStephen Hurd  * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
318323ac9029SStephen Hurd  * m_defrag'd mbufs
31844c7070dbSScott Long  */
31854c7070dbSScott Long static __noinline struct mbuf *
318623ac9029SStephen Hurd iflib_remove_mbuf(iflib_txq_t txq)
31874c7070dbSScott Long {
3188fbec776dSAndrew Gallatin 	int ntxd, pidx;
3189fbec776dSAndrew Gallatin 	struct mbuf *m, **ifsd_m;
31904c7070dbSScott Long 
31914c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
319223ac9029SStephen Hurd 	ntxd = txq->ift_size;
3193fbec776dSAndrew Gallatin 	pidx = txq->ift_pidx & (ntxd - 1);
3194fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
3195fbec776dSAndrew Gallatin 	m = ifsd_m[pidx];
31964c7070dbSScott Long 	ifsd_m[pidx] = NULL;
3197bfce461eSMarius Strobl 	bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
31988a04b53dSKonstantin Belousov 	if (txq->ift_sds.ifsd_tso_map != NULL)
3199bfce461eSMarius Strobl 		bus_dmamap_unload(txq->ift_tso_buf_tag,
32008a04b53dSKonstantin Belousov 		    txq->ift_sds.ifsd_tso_map[pidx]);
32014c7070dbSScott Long #if MEMORY_LOGGING
32024c7070dbSScott Long 	txq->ift_dequeued++;
32034c7070dbSScott Long #endif
3204fbec776dSAndrew Gallatin 	return (m);
32054c7070dbSScott Long }
32064c7070dbSScott Long 
320795246abbSSean Bruno static inline caddr_t
320895246abbSSean Bruno calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
320995246abbSSean Bruno {
321095246abbSSean Bruno 	qidx_t size;
321195246abbSSean Bruno 	int ntxd;
321295246abbSSean Bruno 	caddr_t start, end, cur, next;
321395246abbSSean Bruno 
321495246abbSSean Bruno 	ntxd = txq->ift_size;
321595246abbSSean Bruno 	size = txq->ift_txd_size[qid];
321695246abbSSean Bruno 	start = txq->ift_ifdi[qid].idi_vaddr;
321795246abbSSean Bruno 
321895246abbSSean Bruno 	if (__predict_false(size == 0))
321995246abbSSean Bruno 		return (start);
322095246abbSSean Bruno 	cur = start + size*cidx;
322195246abbSSean Bruno 	end = start + size*ntxd;
322295246abbSSean Bruno 	next = CACHE_PTR_NEXT(cur);
322395246abbSSean Bruno 	return (next < end ? next : start);
322495246abbSSean Bruno }
322595246abbSSean Bruno 
3226d14c853bSStephen Hurd /*
3227d14c853bSStephen Hurd  * Pad an mbuf to ensure a minimum ethernet frame size.
3228d14c853bSStephen Hurd  * min_frame_size is the frame size (less CRC) to pad the mbuf to
3229d14c853bSStephen Hurd  */
3230d14c853bSStephen Hurd static __noinline int
3231a15fbbb8SStephen Hurd iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
3232d14c853bSStephen Hurd {
3233d14c853bSStephen Hurd 	/*
3234d14c853bSStephen Hurd 	 * 18 is enough bytes to pad an ARP packet to 46 bytes, and
3235d14c853bSStephen Hurd 	 * and ARP message is the smallest common payload I can think of
3236d14c853bSStephen Hurd 	 */
3237d14c853bSStephen Hurd 	static char pad[18];	/* just zeros */
3238d14c853bSStephen Hurd 	int n;
3239a15fbbb8SStephen Hurd 	struct mbuf *new_head;
3240d14c853bSStephen Hurd 
3241a15fbbb8SStephen Hurd 	if (!M_WRITABLE(*m_head)) {
3242a15fbbb8SStephen Hurd 		new_head = m_dup(*m_head, M_NOWAIT);
3243a15fbbb8SStephen Hurd 		if (new_head == NULL) {
324404993890SStephen Hurd 			m_freem(*m_head);
3245a15fbbb8SStephen Hurd 			device_printf(dev, "cannot pad short frame, m_dup() failed");
324606c47d48SStephen Hurd 			DBG_COUNTER_INC(encap_pad_mbuf_fail);
324764e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
3248a15fbbb8SStephen Hurd 			return ENOMEM;
3249a15fbbb8SStephen Hurd 		}
3250a15fbbb8SStephen Hurd 		m_freem(*m_head);
3251a15fbbb8SStephen Hurd 		*m_head = new_head;
3252a15fbbb8SStephen Hurd 	}
3253a15fbbb8SStephen Hurd 
3254a15fbbb8SStephen Hurd 	for (n = min_frame_size - (*m_head)->m_pkthdr.len;
3255d14c853bSStephen Hurd 	     n > 0; n -= sizeof(pad))
3256a15fbbb8SStephen Hurd 		if (!m_append(*m_head, min(n, sizeof(pad)), pad))
3257d14c853bSStephen Hurd 			break;
3258d14c853bSStephen Hurd 
3259d14c853bSStephen Hurd 	if (n > 0) {
3260a15fbbb8SStephen Hurd 		m_freem(*m_head);
3261d14c853bSStephen Hurd 		device_printf(dev, "cannot pad short frame\n");
3262d14c853bSStephen Hurd 		DBG_COUNTER_INC(encap_pad_mbuf_fail);
326364e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3264d14c853bSStephen Hurd 		return (ENOBUFS);
3265d14c853bSStephen Hurd 	}
3266d14c853bSStephen Hurd 
3267d14c853bSStephen Hurd 	return 0;
3268d14c853bSStephen Hurd }
3269d14c853bSStephen Hurd 
32704c7070dbSScott Long static int
32714c7070dbSScott Long iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
32724c7070dbSScott Long {
32734c7070dbSScott Long 	if_ctx_t		ctx;
32744c7070dbSScott Long 	if_shared_ctx_t		sctx;
32754c7070dbSScott Long 	if_softc_ctx_t		scctx;
3276bfce461eSMarius Strobl 	bus_dma_tag_t		buf_tag;
32774c7070dbSScott Long 	bus_dma_segment_t	*segs;
3278fbec776dSAndrew Gallatin 	struct mbuf		*m_head, **ifsd_m;
327995246abbSSean Bruno 	void			*next_txd;
32804c7070dbSScott Long 	bus_dmamap_t		map;
32814c7070dbSScott Long 	struct if_pkt_info	pi;
32824c7070dbSScott Long 	int remap = 0;
32834c7070dbSScott Long 	int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
32844c7070dbSScott Long 
32854c7070dbSScott Long 	ctx = txq->ift_ctx;
32864c7070dbSScott Long 	sctx = ctx->ifc_sctx;
32874c7070dbSScott Long 	scctx = &ctx->ifc_softc_ctx;
32884c7070dbSScott Long 	segs = txq->ift_segs;
328923ac9029SStephen Hurd 	ntxd = txq->ift_size;
32904c7070dbSScott Long 	m_head = *m_headp;
32914c7070dbSScott Long 	map = NULL;
32924c7070dbSScott Long 
32934c7070dbSScott Long 	/*
32944c7070dbSScott Long 	 * If we're doing TSO the next descriptor to clean may be quite far ahead
32954c7070dbSScott Long 	 */
32964c7070dbSScott Long 	cidx = txq->ift_cidx;
32974c7070dbSScott Long 	pidx = txq->ift_pidx;
329895246abbSSean Bruno 	if (ctx->ifc_flags & IFC_PREFETCH) {
32994c7070dbSScott Long 		next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
330095246abbSSean Bruno 		if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
330195246abbSSean Bruno 			next_txd = calc_next_txd(txq, cidx, 0);
330295246abbSSean Bruno 			prefetch(next_txd);
330395246abbSSean Bruno 		}
33044c7070dbSScott Long 
33054c7070dbSScott Long 		/* prefetch the next cache line of mbuf pointers and flags */
33064c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_m[next]);
33074c7070dbSScott Long 		prefetch(&txq->ift_sds.ifsd_map[next]);
33084c7070dbSScott Long 		next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
33094c7070dbSScott Long 	}
331095246abbSSean Bruno 	map = txq->ift_sds.ifsd_map[pidx];
3311fbec776dSAndrew Gallatin 	ifsd_m = txq->ift_sds.ifsd_m;
33124c7070dbSScott Long 
33134c7070dbSScott Long 	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3314bfce461eSMarius Strobl 		buf_tag = txq->ift_tso_buf_tag;
33154c7070dbSScott Long 		max_segs = scctx->isc_tx_tso_segments_max;
33168a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_tso_map[pidx];
3317bfce461eSMarius Strobl 		MPASS(buf_tag != NULL);
33187f87c040SMarius Strobl 		MPASS(max_segs > 0);
33194c7070dbSScott Long 	} else {
3320bfce461eSMarius Strobl 		buf_tag = txq->ift_buf_tag;
33214c7070dbSScott Long 		max_segs = scctx->isc_tx_nsegments;
33228a04b53dSKonstantin Belousov 		map = txq->ift_sds.ifsd_map[pidx];
33234c7070dbSScott Long 	}
3324d14c853bSStephen Hurd 	if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
3325d14c853bSStephen Hurd 	    __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
3326a15fbbb8SStephen Hurd 		err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
332764e6fc13SStephen Hurd 		if (err) {
332864e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
3329d14c853bSStephen Hurd 			return err;
3330d14c853bSStephen Hurd 		}
333164e6fc13SStephen Hurd 	}
3332a15fbbb8SStephen Hurd 	m_head = *m_headp;
333395246abbSSean Bruno 
333495246abbSSean Bruno 	pkt_info_zero(&pi);
3335ab2e3f79SStephen Hurd 	pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
3336ab2e3f79SStephen Hurd 	pi.ipi_pidx = pidx;
3337ab2e3f79SStephen Hurd 	pi.ipi_qsidx = txq->ift_id;
33383429c02fSStephen Hurd 	pi.ipi_len = m_head->m_pkthdr.len;
33393429c02fSStephen Hurd 	pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
33401722eeacSMarius Strobl 	pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0;
33414c7070dbSScott Long 
33424c7070dbSScott Long 	/* deliberate bitwise OR to make one condition */
33434c7070dbSScott Long 	if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
334464e6fc13SStephen Hurd 		if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
334564e6fc13SStephen Hurd 			DBG_COUNTER_INC(encap_txd_encap_fail);
33464c7070dbSScott Long 			return (err);
334764e6fc13SStephen Hurd 		}
33484c7070dbSScott Long 		m_head = *m_headp;
33494c7070dbSScott Long 	}
33504c7070dbSScott Long 
33514c7070dbSScott Long retry:
3352bfce461eSMarius Strobl 	err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
3353fbec776dSAndrew Gallatin 	    BUS_DMA_NOWAIT);
33544c7070dbSScott Long defrag:
33554c7070dbSScott Long 	if (__predict_false(err)) {
33564c7070dbSScott Long 		switch (err) {
33574c7070dbSScott Long 		case EFBIG:
33584c7070dbSScott Long 			/* try collapse once and defrag once */
3359f7594707SAndrew Gallatin 			if (remap == 0) {
33604c7070dbSScott Long 				m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
3361f7594707SAndrew Gallatin 				/* try defrag if collapsing fails */
3362f7594707SAndrew Gallatin 				if (m_head == NULL)
3363f7594707SAndrew Gallatin 					remap++;
3364f7594707SAndrew Gallatin 			}
336564e6fc13SStephen Hurd 			if (remap == 1) {
336664e6fc13SStephen Hurd 				txq->ift_mbuf_defrag++;
33674c7070dbSScott Long 				m_head = m_defrag(*m_headp, M_NOWAIT);
336864e6fc13SStephen Hurd 			}
33693e8d1baeSEric Joyner 			/*
33703e8d1baeSEric Joyner 			 * remap should never be >1 unless bus_dmamap_load_mbuf_sg
33713e8d1baeSEric Joyner 			 * failed to map an mbuf that was run through m_defrag
33723e8d1baeSEric Joyner 			 */
33733e8d1baeSEric Joyner 			MPASS(remap <= 1);
33743e8d1baeSEric Joyner 			if (__predict_false(m_head == NULL || remap > 1))
33754c7070dbSScott Long 				goto defrag_failed;
33763e8d1baeSEric Joyner 			remap++;
33774c7070dbSScott Long 			*m_headp = m_head;
33784c7070dbSScott Long 			goto retry;
33794c7070dbSScott Long 			break;
33804c7070dbSScott Long 		case ENOMEM:
33814c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33824c7070dbSScott Long 			break;
33834c7070dbSScott Long 		default:
33844c7070dbSScott Long 			txq->ift_no_tx_dma_setup++;
33854c7070dbSScott Long 			m_freem(*m_headp);
33864c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
33874c7070dbSScott Long 			*m_headp = NULL;
33884c7070dbSScott Long 			break;
33894c7070dbSScott Long 		}
33904c7070dbSScott Long 		txq->ift_map_failed++;
33914c7070dbSScott Long 		DBG_COUNTER_INC(encap_load_mbuf_fail);
339264e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
33934c7070dbSScott Long 		return (err);
33944c7070dbSScott Long 	}
3395fbec776dSAndrew Gallatin 	ifsd_m[pidx] = m_head;
33964c7070dbSScott Long 	/*
33974c7070dbSScott Long 	 * XXX assumes a 1 to 1 relationship between segments and
33984c7070dbSScott Long 	 *        descriptors - this does not hold true on all drivers, e.g.
33994c7070dbSScott Long 	 *        cxgb
34004c7070dbSScott Long 	 */
34014c7070dbSScott Long 	if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
34024c7070dbSScott Long 		txq->ift_no_desc_avail++;
3403bfce461eSMarius Strobl 		bus_dmamap_unload(buf_tag, map);
34044c7070dbSScott Long 		DBG_COUNTER_INC(encap_txq_avail_fail);
340564e6fc13SStephen Hurd 		DBG_COUNTER_INC(encap_txd_encap_fail);
340623ac9029SStephen Hurd 		if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
34074c7070dbSScott Long 			GROUPTASK_ENQUEUE(&txq->ift_task);
34084c7070dbSScott Long 		return (ENOBUFS);
34094c7070dbSScott Long 	}
341095246abbSSean Bruno 	/*
341195246abbSSean Bruno 	 * On Intel cards we can greatly reduce the number of TX interrupts
341295246abbSSean Bruno 	 * we see by only setting report status on every Nth descriptor.
341395246abbSSean Bruno 	 * However, this also means that the driver will need to keep track
341495246abbSSean Bruno 	 * of the descriptors that RS was set on to check them for the DD bit.
341595246abbSSean Bruno 	 */
341695246abbSSean Bruno 	txq->ift_rs_pending += nsegs + 1;
341795246abbSSean Bruno 	if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
34181f7ce05dSAndrew Gallatin 	     iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
341995246abbSSean Bruno 		pi.ipi_flags |= IPI_TX_INTR;
342095246abbSSean Bruno 		txq->ift_rs_pending = 0;
342195246abbSSean Bruno 	}
342295246abbSSean Bruno 
34234c7070dbSScott Long 	pi.ipi_segs = segs;
34244c7070dbSScott Long 	pi.ipi_nsegs = nsegs;
34254c7070dbSScott Long 
342623ac9029SStephen Hurd 	MPASS(pidx >= 0 && pidx < txq->ift_size);
34274c7070dbSScott Long #ifdef PKT_DEBUG
34284c7070dbSScott Long 	print_pkt(&pi);
34294c7070dbSScott Long #endif
34304c7070dbSScott Long 	if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
343195dcf343SMarius Strobl 		bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
34324c7070dbSScott Long 		DBG_COUNTER_INC(tx_encap);
343395246abbSSean Bruno 		MPASS(pi.ipi_new_pidx < txq->ift_size);
34344c7070dbSScott Long 
34354c7070dbSScott Long 		ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
34364c7070dbSScott Long 		if (pi.ipi_new_pidx < pi.ipi_pidx) {
343723ac9029SStephen Hurd 			ndesc += txq->ift_size;
34384c7070dbSScott Long 			txq->ift_gen = 1;
34394c7070dbSScott Long 		}
34401248952aSSean Bruno 		/*
34411248952aSSean Bruno 		 * drivers can need as many as
34421248952aSSean Bruno 		 * two sentinels
34431248952aSSean Bruno 		 */
34441248952aSSean Bruno 		MPASS(ndesc <= pi.ipi_nsegs + 2);
34454c7070dbSScott Long 		MPASS(pi.ipi_new_pidx != pidx);
34464c7070dbSScott Long 		MPASS(ndesc > 0);
34474c7070dbSScott Long 		txq->ift_in_use += ndesc;
344895246abbSSean Bruno 
34494c7070dbSScott Long 		/*
34504c7070dbSScott Long 		 * We update the last software descriptor again here because there may
34514c7070dbSScott Long 		 * be a sentinel and/or there may be more mbufs than segments
34524c7070dbSScott Long 		 */
34534c7070dbSScott Long 		txq->ift_pidx = pi.ipi_new_pidx;
34544c7070dbSScott Long 		txq->ift_npending += pi.ipi_ndescs;
3455f7594707SAndrew Gallatin 	} else {
345623ac9029SStephen Hurd 		*m_headp = m_head = iflib_remove_mbuf(txq);
3457f7594707SAndrew Gallatin 		if (err == EFBIG) {
34584c7070dbSScott Long 			txq->ift_txd_encap_efbig++;
3459f7594707SAndrew Gallatin 			if (remap < 2) {
3460f7594707SAndrew Gallatin 				remap = 1;
34614c7070dbSScott Long 				goto defrag;
3462f7594707SAndrew Gallatin 			}
3463f7594707SAndrew Gallatin 		}
3464f7594707SAndrew Gallatin 		goto defrag_failed;
3465f7594707SAndrew Gallatin 	}
346664e6fc13SStephen Hurd 	/*
346764e6fc13SStephen Hurd 	 * err can't possibly be non-zero here, so we don't neet to test it
346864e6fc13SStephen Hurd 	 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
346964e6fc13SStephen Hurd 	 */
34704c7070dbSScott Long 	return (err);
34714c7070dbSScott Long 
34724c7070dbSScott Long defrag_failed:
34734c7070dbSScott Long 	txq->ift_mbuf_defrag_failed++;
34744c7070dbSScott Long 	txq->ift_map_failed++;
34754c7070dbSScott Long 	m_freem(*m_headp);
34764c7070dbSScott Long 	DBG_COUNTER_INC(tx_frees);
34774c7070dbSScott Long 	*m_headp = NULL;
347864e6fc13SStephen Hurd 	DBG_COUNTER_INC(encap_txd_encap_fail);
34794c7070dbSScott Long 	return (ENOMEM);
34804c7070dbSScott Long }
34814c7070dbSScott Long 
34824c7070dbSScott Long static void
34834c7070dbSScott Long iflib_tx_desc_free(iflib_txq_t txq, int n)
34844c7070dbSScott Long {
34854c7070dbSScott Long 	uint32_t qsize, cidx, mask, gen;
34864c7070dbSScott Long 	struct mbuf *m, **ifsd_m;
348795246abbSSean Bruno 	bool do_prefetch;
34884c7070dbSScott Long 
34894c7070dbSScott Long 	cidx = txq->ift_cidx;
34904c7070dbSScott Long 	gen = txq->ift_gen;
349123ac9029SStephen Hurd 	qsize = txq->ift_size;
34924c7070dbSScott Long 	mask = qsize-1;
34934c7070dbSScott Long 	ifsd_m = txq->ift_sds.ifsd_m;
349495246abbSSean Bruno 	do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
34954c7070dbSScott Long 
349694618825SMark Johnston 	while (n-- > 0) {
349795246abbSSean Bruno 		if (do_prefetch) {
34984c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 3) & mask]);
34994c7070dbSScott Long 			prefetch(ifsd_m[(cidx + 4) & mask]);
350095246abbSSean Bruno 		}
35014c7070dbSScott Long 		if ((m = ifsd_m[cidx]) != NULL) {
3502fbec776dSAndrew Gallatin 			prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
35038a04b53dSKonstantin Belousov 			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
3504bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_tso_buf_tag,
35058a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx],
35068a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3507bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_tso_buf_tag,
35088a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_tso_map[cidx]);
35098a04b53dSKonstantin Belousov 			} else {
3510bfce461eSMarius Strobl 				bus_dmamap_sync(txq->ift_buf_tag,
35118a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx],
35128a04b53dSKonstantin Belousov 				    BUS_DMASYNC_POSTWRITE);
3513bfce461eSMarius Strobl 				bus_dmamap_unload(txq->ift_buf_tag,
35148a04b53dSKonstantin Belousov 				    txq->ift_sds.ifsd_map[cidx]);
35158a04b53dSKonstantin Belousov 			}
35164c7070dbSScott Long 			/* XXX we don't support any drivers that batch packets yet */
35174c7070dbSScott Long 			MPASS(m->m_nextpkt == NULL);
35185c5ca36cSSean Bruno 			m_freem(m);
35194c7070dbSScott Long 			ifsd_m[cidx] = NULL;
35204c7070dbSScott Long #if MEMORY_LOGGING
35214c7070dbSScott Long 			txq->ift_dequeued++;
35224c7070dbSScott Long #endif
35234c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
35244c7070dbSScott Long 		}
35254c7070dbSScott Long 		if (__predict_false(++cidx == qsize)) {
35264c7070dbSScott Long 			cidx = 0;
35274c7070dbSScott Long 			gen = 0;
35284c7070dbSScott Long 		}
35294c7070dbSScott Long 	}
35304c7070dbSScott Long 	txq->ift_cidx = cidx;
35314c7070dbSScott Long 	txq->ift_gen = gen;
35324c7070dbSScott Long }
35334c7070dbSScott Long 
35344c7070dbSScott Long static __inline int
35354c7070dbSScott Long iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
35364c7070dbSScott Long {
35374c7070dbSScott Long 	int reclaim;
35384c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35394c7070dbSScott Long 
35404c7070dbSScott Long 	KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
35414c7070dbSScott Long 	MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
35424c7070dbSScott Long 
35434c7070dbSScott Long 	/*
35444c7070dbSScott Long 	 * Need a rate-limiting check so that this isn't called every time
35454c7070dbSScott Long 	 */
35464c7070dbSScott Long 	iflib_tx_credits_update(ctx, txq);
35474c7070dbSScott Long 	reclaim = DESC_RECLAIMABLE(txq);
35484c7070dbSScott Long 
35494c7070dbSScott Long 	if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
35504c7070dbSScott Long #ifdef INVARIANTS
35514c7070dbSScott Long 		if (iflib_verbose_debug) {
35524c7070dbSScott Long 			printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
35534c7070dbSScott Long 			       txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
35544c7070dbSScott Long 			       reclaim, thresh);
35554c7070dbSScott Long 
35564c7070dbSScott Long 		}
35574c7070dbSScott Long #endif
35584c7070dbSScott Long 		return (0);
35594c7070dbSScott Long 	}
35604c7070dbSScott Long 	iflib_tx_desc_free(txq, reclaim);
35614c7070dbSScott Long 	txq->ift_cleaned += reclaim;
35624c7070dbSScott Long 	txq->ift_in_use -= reclaim;
35634c7070dbSScott Long 
35644c7070dbSScott Long 	return (reclaim);
35654c7070dbSScott Long }
35664c7070dbSScott Long 
35674c7070dbSScott Long static struct mbuf **
356895246abbSSean Bruno _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
35694c7070dbSScott Long {
357095246abbSSean Bruno 	int next, size;
357195246abbSSean Bruno 	struct mbuf **items;
35724c7070dbSScott Long 
357395246abbSSean Bruno 	size = r->size;
357495246abbSSean Bruno 	next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
357595246abbSSean Bruno 	items = __DEVOLATILE(struct mbuf **, &r->items[0]);
357695246abbSSean Bruno 
357795246abbSSean Bruno 	prefetch(items[(cidx + offset) & (size-1)]);
357895246abbSSean Bruno 	if (remaining > 1) {
35793429c02fSStephen Hurd 		prefetch2cachelines(&items[next]);
35803429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
35813429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
35823429c02fSStephen Hurd 		prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
358395246abbSSean Bruno 	}
358495246abbSSean Bruno 	return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
35854c7070dbSScott Long }
35864c7070dbSScott Long 
35874c7070dbSScott Long static void
35884c7070dbSScott Long iflib_txq_check_drain(iflib_txq_t txq, int budget)
35894c7070dbSScott Long {
35904c7070dbSScott Long 
359195246abbSSean Bruno 	ifmp_ring_check_drainage(txq->ift_br, budget);
35924c7070dbSScott Long }
35934c7070dbSScott Long 
35944c7070dbSScott Long static uint32_t
35954c7070dbSScott Long iflib_txq_can_drain(struct ifmp_ring *r)
35964c7070dbSScott Long {
35974c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
35984c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
35994c7070dbSScott Long 
360095dcf343SMarius Strobl 	if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
360195dcf343SMarius Strobl 		return (1);
36028a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
36038a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
360495dcf343SMarius Strobl 	return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
360595dcf343SMarius Strobl 	    false));
36064c7070dbSScott Long }
36074c7070dbSScott Long 
36084c7070dbSScott Long static uint32_t
36094c7070dbSScott Long iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
36104c7070dbSScott Long {
36114c7070dbSScott Long 	iflib_txq_t txq = r->cookie;
36124c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
36131722eeacSMarius Strobl 	if_t ifp = ctx->ifc_ifp;
3614c2c5d1e7SMarius Strobl 	struct mbuf *m, **mp;
3615c2c5d1e7SMarius Strobl 	int avail, bytes_sent, consumed, count, err, i, in_use_prev;
3616c2c5d1e7SMarius Strobl 	int mcast_sent, pkt_sent, reclaimed, txq_avail;
3617c2c5d1e7SMarius Strobl 	bool do_prefetch, rang, ring;
36184c7070dbSScott Long 
36194c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
36204c7070dbSScott Long 			    !LINK_ACTIVE(ctx))) {
36214c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_notready);
36224c7070dbSScott Long 		return (0);
36234c7070dbSScott Long 	}
362495246abbSSean Bruno 	reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
362595246abbSSean Bruno 	rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
36264c7070dbSScott Long 	avail = IDXDIFF(pidx, cidx, r->size);
36274c7070dbSScott Long 	if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
36284c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_flushing);
36294c7070dbSScott Long 		for (i = 0; i < avail; i++) {
3630bc0e855bSStephen Hurd 			if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
363123ac9029SStephen Hurd 				m_free(r->items[(cidx + i) & (r->size-1)]);
36324c7070dbSScott Long 			r->items[(cidx + i) & (r->size-1)] = NULL;
36334c7070dbSScott Long 		}
36344c7070dbSScott Long 		return (avail);
36354c7070dbSScott Long 	}
363695246abbSSean Bruno 
36374c7070dbSScott Long 	if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
36384c7070dbSScott Long 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36394c7070dbSScott Long 		CALLOUT_LOCK(txq);
36404c7070dbSScott Long 		callout_stop(&txq->ift_timer);
36414c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
36424c7070dbSScott Long 		DBG_COUNTER_INC(txq_drain_oactive);
36434c7070dbSScott Long 		return (0);
36444c7070dbSScott Long 	}
364595246abbSSean Bruno 	if (reclaimed)
364695246abbSSean Bruno 		txq->ift_qstatus = IFLIB_QUEUE_IDLE;
36474c7070dbSScott Long 	consumed = mcast_sent = bytes_sent = pkt_sent = 0;
36484c7070dbSScott Long 	count = MIN(avail, TX_BATCH_SIZE);
3649da69b8f9SSean Bruno #ifdef INVARIANTS
3650da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3651da69b8f9SSean Bruno 		printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
3652da69b8f9SSean Bruno 		       avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3653da69b8f9SSean Bruno #endif
365495246abbSSean Bruno 	do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
3655c2c5d1e7SMarius Strobl 	txq_avail = TXQ_AVAIL(txq);
36561ae4848cSMatt Macy 	err = 0;
3657c2c5d1e7SMarius Strobl 	for (i = 0; i < count && txq_avail > MAX_TX_DESC(ctx) + 2; i++) {
36581ae4848cSMatt Macy 		int rem = do_prefetch ? count - i : 0;
36594c7070dbSScott Long 
366095246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, rem);
3661da69b8f9SSean Bruno 		MPASS(mp != NULL && *mp != NULL);
366295246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq)) {
366395246abbSSean Bruno 			consumed++;
366495246abbSSean Bruno 			continue;
366595246abbSSean Bruno 		}
36664c7070dbSScott Long 		in_use_prev = txq->ift_in_use;
366795246abbSSean Bruno 		err = iflib_encap(txq, mp);
366895246abbSSean Bruno 		if (__predict_false(err)) {
3669da69b8f9SSean Bruno 			/* no room - bail out */
367095246abbSSean Bruno 			if (err == ENOBUFS)
36714c7070dbSScott Long 				break;
36724c7070dbSScott Long 			consumed++;
3673da69b8f9SSean Bruno 			/* we can't send this packet - skip it */
36744c7070dbSScott Long 			continue;
3675da69b8f9SSean Bruno 		}
367695246abbSSean Bruno 		consumed++;
36774c7070dbSScott Long 		pkt_sent++;
36784c7070dbSScott Long 		m = *mp;
36794c7070dbSScott Long 		DBG_COUNTER_INC(tx_sent);
36804c7070dbSScott Long 		bytes_sent += m->m_pkthdr.len;
368195246abbSSean Bruno 		mcast_sent += !!(m->m_flags & M_MCAST);
3682c2c5d1e7SMarius Strobl 		txq_avail = TXQ_AVAIL(txq);
36834c7070dbSScott Long 
36844c7070dbSScott Long 		txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
36854c7070dbSScott Long 		ETHER_BPF_MTAP(ifp, m);
368695246abbSSean Bruno 		if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
36874c7070dbSScott Long 			break;
368895246abbSSean Bruno 		rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
36894c7070dbSScott Long 	}
36904c7070dbSScott Long 
369195246abbSSean Bruno 	/* deliberate use of bitwise or to avoid gratuitous short-circuit */
369295246abbSSean Bruno 	ring = rang ? false  : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
369395246abbSSean Bruno 	iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
36944c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
36954c7070dbSScott Long 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
36964c7070dbSScott Long 	if (mcast_sent)
36974c7070dbSScott Long 		if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
3698da69b8f9SSean Bruno #ifdef INVARIANTS
3699da69b8f9SSean Bruno 	if (iflib_verbose_debug)
3700da69b8f9SSean Bruno 		printf("consumed=%d\n", consumed);
3701da69b8f9SSean Bruno #endif
37024c7070dbSScott Long 	return (consumed);
37034c7070dbSScott Long }
37044c7070dbSScott Long 
3705da69b8f9SSean Bruno static uint32_t
3706da69b8f9SSean Bruno iflib_txq_drain_always(struct ifmp_ring *r)
3707da69b8f9SSean Bruno {
3708da69b8f9SSean Bruno 	return (1);
3709da69b8f9SSean Bruno }
3710da69b8f9SSean Bruno 
3711da69b8f9SSean Bruno static uint32_t
3712da69b8f9SSean Bruno iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3713da69b8f9SSean Bruno {
3714da69b8f9SSean Bruno 	int i, avail;
3715da69b8f9SSean Bruno 	struct mbuf **mp;
3716da69b8f9SSean Bruno 	iflib_txq_t txq;
3717da69b8f9SSean Bruno 
3718da69b8f9SSean Bruno 	txq = r->cookie;
3719da69b8f9SSean Bruno 
3720da69b8f9SSean Bruno 	txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3721da69b8f9SSean Bruno 	CALLOUT_LOCK(txq);
3722da69b8f9SSean Bruno 	callout_stop(&txq->ift_timer);
3723da69b8f9SSean Bruno 	CALLOUT_UNLOCK(txq);
3724da69b8f9SSean Bruno 
3725da69b8f9SSean Bruno 	avail = IDXDIFF(pidx, cidx, r->size);
3726da69b8f9SSean Bruno 	for (i = 0; i < avail; i++) {
372795246abbSSean Bruno 		mp = _ring_peek_one(r, cidx, i, avail - i);
372895246abbSSean Bruno 		if (__predict_false(*mp == (struct mbuf *)txq))
372995246abbSSean Bruno 			continue;
3730da69b8f9SSean Bruno 		m_freem(*mp);
373164e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
3732da69b8f9SSean Bruno 	}
3733da69b8f9SSean Bruno 	MPASS(ifmp_ring_is_stalled(r) == 0);
3734da69b8f9SSean Bruno 	return (avail);
3735da69b8f9SSean Bruno }
3736da69b8f9SSean Bruno 
3737da69b8f9SSean Bruno static void
3738da69b8f9SSean Bruno iflib_ifmp_purge(iflib_txq_t txq)
3739da69b8f9SSean Bruno {
3740da69b8f9SSean Bruno 	struct ifmp_ring *r;
3741da69b8f9SSean Bruno 
374295246abbSSean Bruno 	r = txq->ift_br;
3743da69b8f9SSean Bruno 	r->drain = iflib_txq_drain_free;
3744da69b8f9SSean Bruno 	r->can_drain = iflib_txq_drain_always;
3745da69b8f9SSean Bruno 
3746da69b8f9SSean Bruno 	ifmp_ring_check_drainage(r, r->size);
3747da69b8f9SSean Bruno 
3748da69b8f9SSean Bruno 	r->drain = iflib_txq_drain;
3749da69b8f9SSean Bruno 	r->can_drain = iflib_txq_can_drain;
3750da69b8f9SSean Bruno }
3751da69b8f9SSean Bruno 
37524c7070dbSScott Long static void
375323ac9029SStephen Hurd _task_fn_tx(void *context)
37544c7070dbSScott Long {
37554c7070dbSScott Long 	iflib_txq_t txq = context;
37564c7070dbSScott Long 	if_ctx_t ctx = txq->ift_ctx;
3757a6611c93SMarius Strobl #if defined(ALTQ) || defined(DEV_NETMAP)
3758a6611c93SMarius Strobl 	if_t ifp = ctx->ifc_ifp;
3759a6611c93SMarius Strobl #endif
3760fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
37614c7070dbSScott Long 
37621248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
37631248952aSSean Bruno 	txq->ift_cpu_exec_count[curcpu]++;
37641248952aSSean Bruno #endif
37654c7070dbSScott Long 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
37664c7070dbSScott Long 		return;
376795dcf343SMarius Strobl #ifdef DEV_NETMAP
3768a6611c93SMarius Strobl 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
37698a04b53dSKonstantin Belousov 		bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
37708a04b53dSKonstantin Belousov 		    BUS_DMASYNC_POSTREAD);
377195246abbSSean Bruno 		if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3772a6611c93SMarius Strobl 			netmap_tx_irq(ifp, txq->ift_id);
37733d10e9edSMarius Strobl 		if (ctx->ifc_flags & IFC_LEGACY)
37743d10e9edSMarius Strobl 			IFDI_INTR_ENABLE(ctx);
37753d10e9edSMarius Strobl 		else
377695246abbSSean Bruno 			IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
377795246abbSSean Bruno 		return;
377895246abbSSean Bruno 	}
377995dcf343SMarius Strobl #endif
3780b8ca4756SPatrick Kelsey #ifdef ALTQ
3781b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd))
3782b8ca4756SPatrick Kelsey 		iflib_altq_if_start(ifp);
3783b8ca4756SPatrick Kelsey #endif
378495246abbSSean Bruno 	if (txq->ift_db_pending)
3785fe51d4cdSStephen Hurd 		ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
3786fe51d4cdSStephen Hurd 	else if (!abdicate)
3787fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3788fe51d4cdSStephen Hurd 	/*
3789fe51d4cdSStephen Hurd 	 * When abdicating, we always need to check drainage, not just when we don't enqueue
3790fe51d4cdSStephen Hurd 	 */
3791fe51d4cdSStephen Hurd 	if (abdicate)
3792fe51d4cdSStephen Hurd 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
379395246abbSSean Bruno 	if (ctx->ifc_flags & IFC_LEGACY)
379495246abbSSean Bruno 		IFDI_INTR_ENABLE(ctx);
37953d10e9edSMarius Strobl 	else
37961ae4848cSMatt Macy 		IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
37974c7070dbSScott Long }
37984c7070dbSScott Long 
37994c7070dbSScott Long static void
380023ac9029SStephen Hurd _task_fn_rx(void *context)
38014c7070dbSScott Long {
38024c7070dbSScott Long 	iflib_rxq_t rxq = context;
38034c7070dbSScott Long 	if_ctx_t ctx = rxq->ifr_ctx;
3804fb1a29b4SHans Petter Selasky 	uint8_t more;
3805f4d2154eSStephen Hurd 	uint16_t budget;
38064c7070dbSScott Long 
38071248952aSSean Bruno #ifdef IFLIB_DIAGNOSTICS
38081248952aSSean Bruno 	rxq->ifr_cpu_exec_count[curcpu]++;
38091248952aSSean Bruno #endif
38104c7070dbSScott Long 	DBG_COUNTER_INC(task_fn_rxs);
38114c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38124c7070dbSScott Long 		return;
3813d0d0ad0aSStephen Hurd #ifdef DEV_NETMAP
3814d0d0ad0aSStephen Hurd 	if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
3815d0d0ad0aSStephen Hurd 		u_int work = 0;
3816d0d0ad0aSStephen Hurd 		if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
3817fb1a29b4SHans Petter Selasky 			more = 0;
3818fb1a29b4SHans Petter Selasky 			goto skip_rxeof;
3819d0d0ad0aSStephen Hurd 		}
3820d0d0ad0aSStephen Hurd 	}
3821d0d0ad0aSStephen Hurd #endif
3822f4d2154eSStephen Hurd 	budget = ctx->ifc_sysctl_rx_budget;
3823f4d2154eSStephen Hurd 	if (budget == 0)
3824f4d2154eSStephen Hurd 		budget = 16;	/* XXX */
3825fb1a29b4SHans Petter Selasky 	more = iflib_rxeof(rxq, budget);
3826fb1a29b4SHans Petter Selasky #ifdef DEV_NETMAP
3827fb1a29b4SHans Petter Selasky skip_rxeof:
3828fb1a29b4SHans Petter Selasky #endif
3829fb1a29b4SHans Petter Selasky 	if ((more & IFLIB_RXEOF_MORE) == 0) {
38304c7070dbSScott Long 		if (ctx->ifc_flags & IFC_LEGACY)
38314c7070dbSScott Long 			IFDI_INTR_ENABLE(ctx);
38323d10e9edSMarius Strobl 		else
38331ae4848cSMatt Macy 			IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
38341ae4848cSMatt Macy 		DBG_COUNTER_INC(rx_intr_enables);
38354c7070dbSScott Long 	}
38364c7070dbSScott Long 	if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
38374c7070dbSScott Long 		return;
3838fb1a29b4SHans Petter Selasky 
3839fb1a29b4SHans Petter Selasky 	if (more & IFLIB_RXEOF_MORE)
38404c7070dbSScott Long 		GROUPTASK_ENQUEUE(&rxq->ifr_task);
3841fb1a29b4SHans Petter Selasky 	else if (more & IFLIB_RXEOF_EMPTY)
3842fb1a29b4SHans Petter Selasky 		callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq);
38434c7070dbSScott Long }
38444c7070dbSScott Long 
38454c7070dbSScott Long static void
384623ac9029SStephen Hurd _task_fn_admin(void *context)
38474c7070dbSScott Long {
38484c7070dbSScott Long 	if_ctx_t ctx = context;
38494c7070dbSScott Long 	if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
38504c7070dbSScott Long 	iflib_txq_t txq;
3851ab2e3f79SStephen Hurd 	int i;
385277c1fcecSEric Joyner 	bool oactive, running, do_reset, do_watchdog, in_detach;
3853dd7fbcf1SStephen Hurd 	uint32_t reset_on = hz / 2;
3854ab2e3f79SStephen Hurd 
38557b610b60SSean Bruno 	STATE_LOCK(ctx);
38567b610b60SSean Bruno 	running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
38577b610b60SSean Bruno 	oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
38587b610b60SSean Bruno 	do_reset = (ctx->ifc_flags & IFC_DO_RESET);
38597b610b60SSean Bruno 	do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
386077c1fcecSEric Joyner 	in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
38617b610b60SSean Bruno 	ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
38627b610b60SSean Bruno 	STATE_UNLOCK(ctx);
38637b610b60SSean Bruno 
386477c1fcecSEric Joyner 	if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
386577c1fcecSEric Joyner 		return;
386677c1fcecSEric Joyner 	if (in_detach)
3867ab2e3f79SStephen Hurd 		return;
38684c7070dbSScott Long 
38694c7070dbSScott Long 	CTX_LOCK(ctx);
38704c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
38714c7070dbSScott Long 		CALLOUT_LOCK(txq);
38724c7070dbSScott Long 		callout_stop(&txq->ift_timer);
38734c7070dbSScott Long 		CALLOUT_UNLOCK(txq);
38744c7070dbSScott Long 	}
38757b610b60SSean Bruno 	if (do_watchdog) {
38767b610b60SSean Bruno 		ctx->ifc_watchdog_events++;
38777b610b60SSean Bruno 		IFDI_WATCHDOG_RESET(ctx);
38787b610b60SSean Bruno 	}
3879d300df01SStephen Hurd 	IFDI_UPDATE_ADMIN_STATUS(ctx);
3880dd7fbcf1SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3881dd7fbcf1SStephen Hurd #ifdef DEV_NETMAP
3882dd7fbcf1SStephen Hurd 		reset_on = hz / 2;
3883dd7fbcf1SStephen Hurd 		if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
388495dcf343SMarius Strobl 			iflib_netmap_timer_adjust(ctx, txq, &reset_on);
3885dd7fbcf1SStephen Hurd #endif
3886dd7fbcf1SStephen Hurd 		callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
3887dd7fbcf1SStephen Hurd 	}
3888ab2e3f79SStephen Hurd 	IFDI_LINK_INTR_ENABLE(ctx);
38897b610b60SSean Bruno 	if (do_reset)
3890ab2e3f79SStephen Hurd 		iflib_if_init_locked(ctx);
38914c7070dbSScott Long 	CTX_UNLOCK(ctx);
38924c7070dbSScott Long 
3893ab2e3f79SStephen Hurd 	if (LINK_ACTIVE(ctx) == 0)
38944c7070dbSScott Long 		return;
38954c7070dbSScott Long 	for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
38964c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
38974c7070dbSScott Long }
38984c7070dbSScott Long 
38994c7070dbSScott Long 
39004c7070dbSScott Long static void
390123ac9029SStephen Hurd _task_fn_iov(void *context)
39024c7070dbSScott Long {
39034c7070dbSScott Long 	if_ctx_t ctx = context;
39044c7070dbSScott Long 
390577c1fcecSEric Joyner 	if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
390677c1fcecSEric Joyner 	    !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
39074c7070dbSScott Long 		return;
39084c7070dbSScott Long 
39094c7070dbSScott Long 	CTX_LOCK(ctx);
39104c7070dbSScott Long 	IFDI_VFLR_HANDLE(ctx);
39114c7070dbSScott Long 	CTX_UNLOCK(ctx);
39124c7070dbSScott Long }
39134c7070dbSScott Long 
39144c7070dbSScott Long static int
39154c7070dbSScott Long iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
39164c7070dbSScott Long {
39174c7070dbSScott Long 	int err;
39184c7070dbSScott Long 	if_int_delay_info_t info;
39194c7070dbSScott Long 	if_ctx_t ctx;
39204c7070dbSScott Long 
39214c7070dbSScott Long 	info = (if_int_delay_info_t)arg1;
39224c7070dbSScott Long 	ctx = info->iidi_ctx;
39234c7070dbSScott Long 	info->iidi_req = req;
39244c7070dbSScott Long 	info->iidi_oidp = oidp;
39254c7070dbSScott Long 	CTX_LOCK(ctx);
39264c7070dbSScott Long 	err = IFDI_SYSCTL_INT_DELAY(ctx, info);
39274c7070dbSScott Long 	CTX_UNLOCK(ctx);
39284c7070dbSScott Long 	return (err);
39294c7070dbSScott Long }
39304c7070dbSScott Long 
39314c7070dbSScott Long /*********************************************************************
39324c7070dbSScott Long  *
39334c7070dbSScott Long  *  IFNET FUNCTIONS
39344c7070dbSScott Long  *
39354c7070dbSScott Long  **********************************************************************/
39364c7070dbSScott Long 
39374c7070dbSScott Long static void
39384c7070dbSScott Long iflib_if_init_locked(if_ctx_t ctx)
39394c7070dbSScott Long {
39404c7070dbSScott Long 	iflib_stop(ctx);
39414c7070dbSScott Long 	iflib_init_locked(ctx);
39424c7070dbSScott Long }
39434c7070dbSScott Long 
39444c7070dbSScott Long 
39454c7070dbSScott Long static void
39464c7070dbSScott Long iflib_if_init(void *arg)
39474c7070dbSScott Long {
39484c7070dbSScott Long 	if_ctx_t ctx = arg;
39494c7070dbSScott Long 
39504c7070dbSScott Long 	CTX_LOCK(ctx);
39514c7070dbSScott Long 	iflib_if_init_locked(ctx);
39524c7070dbSScott Long 	CTX_UNLOCK(ctx);
39534c7070dbSScott Long }
39544c7070dbSScott Long 
39554c7070dbSScott Long static int
39564c7070dbSScott Long iflib_if_transmit(if_t ifp, struct mbuf *m)
39574c7070dbSScott Long {
39584c7070dbSScott Long 	if_ctx_t	ctx = if_getsoftc(ifp);
39594c7070dbSScott Long 
39604c7070dbSScott Long 	iflib_txq_t txq;
396123ac9029SStephen Hurd 	int err, qidx;
3962fe51d4cdSStephen Hurd 	int abdicate = ctx->ifc_sysctl_tx_abdicate;
39634c7070dbSScott Long 
39644c7070dbSScott Long 	if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
39654c7070dbSScott Long 		DBG_COUNTER_INC(tx_frees);
39664c7070dbSScott Long 		m_freem(m);
3967225eae1bSEric Joyner 		return (ENETDOWN);
39684c7070dbSScott Long 	}
39694c7070dbSScott Long 
397023ac9029SStephen Hurd 	MPASS(m->m_nextpkt == NULL);
3971b8ca4756SPatrick Kelsey 	/* ALTQ-enabled interfaces always use queue 0. */
39724c7070dbSScott Long 	qidx = 0;
3973b8ca4756SPatrick Kelsey 	if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
39744c7070dbSScott Long 		qidx = QIDX(ctx, m);
39754c7070dbSScott Long 	/*
39764c7070dbSScott Long 	 * XXX calculate buf_ring based on flowid (divvy up bits?)
39774c7070dbSScott Long 	 */
39784c7070dbSScott Long 	txq = &ctx->ifc_txqs[qidx];
39794c7070dbSScott Long 
39804c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
39814c7070dbSScott Long 	if (txq->ift_closed) {
39824c7070dbSScott Long 		while (m != NULL) {
39834c7070dbSScott Long 			next = m->m_nextpkt;
39844c7070dbSScott Long 			m->m_nextpkt = NULL;
39854c7070dbSScott Long 			m_freem(m);
398664e6fc13SStephen Hurd 			DBG_COUNTER_INC(tx_frees);
39874c7070dbSScott Long 			m = next;
39884c7070dbSScott Long 		}
39894c7070dbSScott Long 		return (ENOBUFS);
39904c7070dbSScott Long 	}
39914c7070dbSScott Long #endif
399223ac9029SStephen Hurd #ifdef notyet
39934c7070dbSScott Long 	qidx = count = 0;
39944c7070dbSScott Long 	mp = marr;
39954c7070dbSScott Long 	next = m;
39964c7070dbSScott Long 	do {
39974c7070dbSScott Long 		count++;
39984c7070dbSScott Long 		next = next->m_nextpkt;
39994c7070dbSScott Long 	} while (next != NULL);
40004c7070dbSScott Long 
400116fb86abSConrad Meyer 	if (count > nitems(marr))
40024c7070dbSScott Long 		if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
40034c7070dbSScott Long 			/* XXX check nextpkt */
40044c7070dbSScott Long 			m_freem(m);
40054c7070dbSScott Long 			/* XXX simplify for now */
40064c7070dbSScott Long 			DBG_COUNTER_INC(tx_frees);
40074c7070dbSScott Long 			return (ENOBUFS);
40084c7070dbSScott Long 		}
40094c7070dbSScott Long 	for (next = m, i = 0; next != NULL; i++) {
40104c7070dbSScott Long 		mp[i] = next;
40114c7070dbSScott Long 		next = next->m_nextpkt;
40124c7070dbSScott Long 		mp[i]->m_nextpkt = NULL;
40134c7070dbSScott Long 	}
401423ac9029SStephen Hurd #endif
40154c7070dbSScott Long 	DBG_COUNTER_INC(tx_seen);
4016fe51d4cdSStephen Hurd 	err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
40174c7070dbSScott Long 
4018fe51d4cdSStephen Hurd 	if (abdicate)
4019ab2e3f79SStephen Hurd 		GROUPTASK_ENQUEUE(&txq->ift_task);
40201225d9daSStephen Hurd  	if (err) {
4021fe51d4cdSStephen Hurd 		if (!abdicate)
4022fe51d4cdSStephen Hurd 			GROUPTASK_ENQUEUE(&txq->ift_task);
40234c7070dbSScott Long 		/* support forthcoming later */
40244c7070dbSScott Long #ifdef DRIVER_BACKPRESSURE
40254c7070dbSScott Long 		txq->ift_closed = TRUE;
40264c7070dbSScott Long #endif
402795246abbSSean Bruno 		ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
402823ac9029SStephen Hurd 		m_freem(m);
402964e6fc13SStephen Hurd 		DBG_COUNTER_INC(tx_frees);
40304c7070dbSScott Long 	}
40314c7070dbSScott Long 
40324c7070dbSScott Long 	return (err);
40334c7070dbSScott Long }
40344c7070dbSScott Long 
4035b8ca4756SPatrick Kelsey #ifdef ALTQ
4036b8ca4756SPatrick Kelsey /*
4037b8ca4756SPatrick Kelsey  * The overall approach to integrating iflib with ALTQ is to continue to use
4038b8ca4756SPatrick Kelsey  * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
4039b8ca4756SPatrick Kelsey  * ring.  Technically, when using ALTQ, queueing to an intermediate mp_ring
4040b8ca4756SPatrick Kelsey  * is redundant/unnecessary, but doing so minimizes the amount of
4041b8ca4756SPatrick Kelsey  * ALTQ-specific code required in iflib.  It is assumed that the overhead of
4042b8ca4756SPatrick Kelsey  * redundantly queueing to an intermediate mp_ring is swamped by the
4043b8ca4756SPatrick Kelsey  * performance limitations inherent in using ALTQ.
4044b8ca4756SPatrick Kelsey  *
4045b8ca4756SPatrick Kelsey  * When ALTQ support is compiled in, all iflib drivers will use a transmit
4046b8ca4756SPatrick Kelsey  * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
4047b8ca4756SPatrick Kelsey  * given interface.  If ALTQ is enabled for an interface, then all
4048b8ca4756SPatrick Kelsey  * transmitted packets for that interface will be submitted to the ALTQ
4049b8ca4756SPatrick Kelsey  * subsystem via IFQ_ENQUEUE().  We don't use the legacy if_transmit()
4050b8ca4756SPatrick Kelsey  * implementation because it uses IFQ_HANDOFF(), which will duplicatively
4051b8ca4756SPatrick Kelsey  * update stats that the iflib machinery handles, and which is sensitve to
4052b8ca4756SPatrick Kelsey  * the disused IFF_DRV_OACTIVE flag.  Additionally, iflib_altq_if_start()
4053b8ca4756SPatrick Kelsey  * will be installed as the start routine for use by ALTQ facilities that
4054b8ca4756SPatrick Kelsey  * need to trigger queue drains on a scheduled basis.
4055b8ca4756SPatrick Kelsey  *
4056b8ca4756SPatrick Kelsey  */
4057b8ca4756SPatrick Kelsey static void
4058b8ca4756SPatrick Kelsey iflib_altq_if_start(if_t ifp)
4059b8ca4756SPatrick Kelsey {
4060b8ca4756SPatrick Kelsey 	struct ifaltq *ifq = &ifp->if_snd;
4061b8ca4756SPatrick Kelsey 	struct mbuf *m;
4062b8ca4756SPatrick Kelsey 
4063b8ca4756SPatrick Kelsey 	IFQ_LOCK(ifq);
4064b8ca4756SPatrick Kelsey 	IFQ_DEQUEUE_NOLOCK(ifq, m);
4065b8ca4756SPatrick Kelsey 	while (m != NULL) {
4066b8ca4756SPatrick Kelsey 		iflib_if_transmit(ifp, m);
4067b8ca4756SPatrick Kelsey 		IFQ_DEQUEUE_NOLOCK(ifq, m);
4068b8ca4756SPatrick Kelsey 	}
4069b8ca4756SPatrick Kelsey 	IFQ_UNLOCK(ifq);
4070b8ca4756SPatrick Kelsey }
4071b8ca4756SPatrick Kelsey 
4072b8ca4756SPatrick Kelsey static int
4073b8ca4756SPatrick Kelsey iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
4074b8ca4756SPatrick Kelsey {
4075b8ca4756SPatrick Kelsey 	int err;
4076b8ca4756SPatrick Kelsey 
4077b8ca4756SPatrick Kelsey 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
4078b8ca4756SPatrick Kelsey 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
4079b8ca4756SPatrick Kelsey 		if (err == 0)
4080b8ca4756SPatrick Kelsey 			iflib_altq_if_start(ifp);
4081b8ca4756SPatrick Kelsey 	} else
4082b8ca4756SPatrick Kelsey 		err = iflib_if_transmit(ifp, m);
4083b8ca4756SPatrick Kelsey 
4084b8ca4756SPatrick Kelsey 	return (err);
4085b8ca4756SPatrick Kelsey }
4086b8ca4756SPatrick Kelsey #endif /* ALTQ */
4087b8ca4756SPatrick Kelsey 
40884c7070dbSScott Long static void
40894c7070dbSScott Long iflib_if_qflush(if_t ifp)
40904c7070dbSScott Long {
40914c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
40924c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
40934c7070dbSScott Long 	int i;
40944c7070dbSScott Long 
40957b610b60SSean Bruno 	STATE_LOCK(ctx);
40964c7070dbSScott Long 	ctx->ifc_flags |= IFC_QFLUSH;
40977b610b60SSean Bruno 	STATE_UNLOCK(ctx);
40984c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
409995246abbSSean Bruno 		while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
41004c7070dbSScott Long 			iflib_txq_check_drain(txq, 0);
41017b610b60SSean Bruno 	STATE_LOCK(ctx);
41024c7070dbSScott Long 	ctx->ifc_flags &= ~IFC_QFLUSH;
41037b610b60SSean Bruno 	STATE_UNLOCK(ctx);
41044c7070dbSScott Long 
4105b8ca4756SPatrick Kelsey 	/*
4106b8ca4756SPatrick Kelsey 	 * When ALTQ is enabled, this will also take care of purging the
4107b8ca4756SPatrick Kelsey 	 * ALTQ queue(s).
4108b8ca4756SPatrick Kelsey 	 */
41094c7070dbSScott Long 	if_qflush(ifp);
41104c7070dbSScott Long }
41114c7070dbSScott Long 
41124c7070dbSScott Long 
41130c919c23SStephen Hurd #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
41140c919c23SStephen Hurd 		     IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
41150c919c23SStephen Hurd 		     IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
41166554362cSAndrew Gallatin 		     IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_NOMAP)
41174c7070dbSScott Long 
41184c7070dbSScott Long static int
41194c7070dbSScott Long iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
41204c7070dbSScott Long {
41214c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
41224c7070dbSScott Long 	struct ifreq	*ifr = (struct ifreq *)data;
41234c7070dbSScott Long #if defined(INET) || defined(INET6)
41244c7070dbSScott Long 	struct ifaddr	*ifa = (struct ifaddr *)data;
41254c7070dbSScott Long #endif
41261722eeacSMarius Strobl 	bool		avoid_reset = false;
41274c7070dbSScott Long 	int		err = 0, reinit = 0, bits;
41284c7070dbSScott Long 
41294c7070dbSScott Long 	switch (command) {
41304c7070dbSScott Long 	case SIOCSIFADDR:
41314c7070dbSScott Long #ifdef INET
41324c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET)
41331722eeacSMarius Strobl 			avoid_reset = true;
41344c7070dbSScott Long #endif
41354c7070dbSScott Long #ifdef INET6
41364c7070dbSScott Long 		if (ifa->ifa_addr->sa_family == AF_INET6)
41371722eeacSMarius Strobl 			avoid_reset = true;
41384c7070dbSScott Long #endif
41394c7070dbSScott Long 		/*
41404c7070dbSScott Long 		** Calling init results in link renegotiation,
41414c7070dbSScott Long 		** so we avoid doing it when possible.
41424c7070dbSScott Long 		*/
41434c7070dbSScott Long 		if (avoid_reset) {
41444c7070dbSScott Long 			if_setflagbits(ifp, IFF_UP,0);
41454c7070dbSScott Long 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
41464c7070dbSScott Long 				reinit = 1;
41474c7070dbSScott Long #ifdef INET
41484c7070dbSScott Long 			if (!(if_getflags(ifp) & IFF_NOARP))
41494c7070dbSScott Long 				arp_ifinit(ifp, ifa);
41504c7070dbSScott Long #endif
41514c7070dbSScott Long 		} else
41524c7070dbSScott Long 			err = ether_ioctl(ifp, command, data);
41534c7070dbSScott Long 		break;
41544c7070dbSScott Long 	case SIOCSIFMTU:
41554c7070dbSScott Long 		CTX_LOCK(ctx);
41564c7070dbSScott Long 		if (ifr->ifr_mtu == if_getmtu(ifp)) {
41574c7070dbSScott Long 			CTX_UNLOCK(ctx);
41584c7070dbSScott Long 			break;
41594c7070dbSScott Long 		}
41604c7070dbSScott Long 		bits = if_getdrvflags(ifp);
41614c7070dbSScott Long 		/* stop the driver and free any clusters before proceeding */
41624c7070dbSScott Long 		iflib_stop(ctx);
41634c7070dbSScott Long 
41644c7070dbSScott Long 		if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
41657b610b60SSean Bruno 			STATE_LOCK(ctx);
41664c7070dbSScott Long 			if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
41674c7070dbSScott Long 				ctx->ifc_flags |= IFC_MULTISEG;
41684c7070dbSScott Long 			else
41694c7070dbSScott Long 				ctx->ifc_flags &= ~IFC_MULTISEG;
41707b610b60SSean Bruno 			STATE_UNLOCK(ctx);
41714c7070dbSScott Long 			err = if_setmtu(ifp, ifr->ifr_mtu);
41724c7070dbSScott Long 		}
41734c7070dbSScott Long 		iflib_init_locked(ctx);
41747b610b60SSean Bruno 		STATE_LOCK(ctx);
41754c7070dbSScott Long 		if_setdrvflags(ifp, bits);
41767b610b60SSean Bruno 		STATE_UNLOCK(ctx);
41774c7070dbSScott Long 		CTX_UNLOCK(ctx);
41784c7070dbSScott Long 		break;
41794c7070dbSScott Long 	case SIOCSIFFLAGS:
4180ab2e3f79SStephen Hurd 		CTX_LOCK(ctx);
4181ab2e3f79SStephen Hurd 		if (if_getflags(ifp) & IFF_UP) {
4182ab2e3f79SStephen Hurd 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4183ab2e3f79SStephen Hurd 				if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
4184ab2e3f79SStephen Hurd 				    (IFF_PROMISC | IFF_ALLMULTI)) {
4185ab2e3f79SStephen Hurd 					err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
4186ab2e3f79SStephen Hurd 				}
4187ab2e3f79SStephen Hurd 			} else
4188ab2e3f79SStephen Hurd 				reinit = 1;
4189ab2e3f79SStephen Hurd 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4190ab2e3f79SStephen Hurd 			iflib_stop(ctx);
4191ab2e3f79SStephen Hurd 		}
4192ab2e3f79SStephen Hurd 		ctx->ifc_if_flags = if_getflags(ifp);
4193ab2e3f79SStephen Hurd 		CTX_UNLOCK(ctx);
41944c7070dbSScott Long 		break;
41954c7070dbSScott Long 	case SIOCADDMULTI:
41964c7070dbSScott Long 	case SIOCDELMULTI:
41974c7070dbSScott Long 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4198ab2e3f79SStephen Hurd 			CTX_LOCK(ctx);
4199ab2e3f79SStephen Hurd 			IFDI_INTR_DISABLE(ctx);
4200ab2e3f79SStephen Hurd 			IFDI_MULTI_SET(ctx);
4201ab2e3f79SStephen Hurd 			IFDI_INTR_ENABLE(ctx);
4202ab2e3f79SStephen Hurd 			CTX_UNLOCK(ctx);
42034c7070dbSScott Long 		}
42044c7070dbSScott Long 		break;
42054c7070dbSScott Long 	case SIOCSIFMEDIA:
42064c7070dbSScott Long 		CTX_LOCK(ctx);
42074c7070dbSScott Long 		IFDI_MEDIA_SET(ctx);
42084c7070dbSScott Long 		CTX_UNLOCK(ctx);
42091722eeacSMarius Strobl 		/* FALLTHROUGH */
42104c7070dbSScott Long 	case SIOCGIFMEDIA:
4211a027c8e9SStephen Hurd 	case SIOCGIFXMEDIA:
4212e2621d96SMatt Macy 		err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command);
42134c7070dbSScott Long 		break;
42144c7070dbSScott Long 	case SIOCGI2C:
42154c7070dbSScott Long 	{
42164c7070dbSScott Long 		struct ifi2creq i2c;
42174c7070dbSScott Long 
4218541d96aaSBrooks Davis 		err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
42194c7070dbSScott Long 		if (err != 0)
42204c7070dbSScott Long 			break;
42214c7070dbSScott Long 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
42224c7070dbSScott Long 			err = EINVAL;
42234c7070dbSScott Long 			break;
42244c7070dbSScott Long 		}
42254c7070dbSScott Long 		if (i2c.len > sizeof(i2c.data)) {
42264c7070dbSScott Long 			err = EINVAL;
42274c7070dbSScott Long 			break;
42284c7070dbSScott Long 		}
42294c7070dbSScott Long 
42304c7070dbSScott Long 		if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
4231541d96aaSBrooks Davis 			err = copyout(&i2c, ifr_data_get_ptr(ifr),
4232541d96aaSBrooks Davis 			    sizeof(i2c));
42334c7070dbSScott Long 		break;
42344c7070dbSScott Long 	}
42354c7070dbSScott Long 	case SIOCSIFCAP:
42364c7070dbSScott Long 	{
42370c919c23SStephen Hurd 		int mask, setmask, oldmask;
42384c7070dbSScott Long 
42390c919c23SStephen Hurd 		oldmask = if_getcapenable(ifp);
42400c919c23SStephen Hurd 		mask = ifr->ifr_reqcap ^ oldmask;
42416554362cSAndrew Gallatin 		mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_NOMAP;
42424c7070dbSScott Long 		setmask = 0;
42434c7070dbSScott Long #ifdef TCP_OFFLOAD
42444c7070dbSScott Long 		setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
42454c7070dbSScott Long #endif
42464c7070dbSScott Long 		setmask |= (mask & IFCAP_FLAGS);
42470c919c23SStephen Hurd 		setmask |= (mask & IFCAP_WOL);
42484c7070dbSScott Long 
42490c919c23SStephen Hurd 		/*
4250a42546dfSStephen Hurd 		 * If any RX csum has changed, change all the ones that
4251a42546dfSStephen Hurd 		 * are supported by the driver.
42520c919c23SStephen Hurd 		 */
4253a42546dfSStephen Hurd 		if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4254a42546dfSStephen Hurd 			setmask |= ctx->ifc_softc_ctx.isc_capabilities &
4255a42546dfSStephen Hurd 			    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
4256a42546dfSStephen Hurd 		}
42570c919c23SStephen Hurd 
42584c7070dbSScott Long 		/*
42594c7070dbSScott Long 		 * want to ensure that traffic has stopped before we change any of the flags
42604c7070dbSScott Long 		 */
42614c7070dbSScott Long 		if (setmask) {
42624c7070dbSScott Long 			CTX_LOCK(ctx);
42634c7070dbSScott Long 			bits = if_getdrvflags(ifp);
42640c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42654c7070dbSScott Long 				iflib_stop(ctx);
42667b610b60SSean Bruno 			STATE_LOCK(ctx);
42674c7070dbSScott Long 			if_togglecapenable(ifp, setmask);
42687b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42690c919c23SStephen Hurd 			if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
42704c7070dbSScott Long 				iflib_init_locked(ctx);
42717b610b60SSean Bruno 			STATE_LOCK(ctx);
42724c7070dbSScott Long 			if_setdrvflags(ifp, bits);
42737b610b60SSean Bruno 			STATE_UNLOCK(ctx);
42744c7070dbSScott Long 			CTX_UNLOCK(ctx);
42754c7070dbSScott Long 		}
42760c919c23SStephen Hurd 		if_vlancap(ifp);
42774c7070dbSScott Long 		break;
42784c7070dbSScott Long 	}
42794c7070dbSScott Long 	case SIOCGPRIVATE_0:
42804c7070dbSScott Long 	case SIOCSDRVSPEC:
42814c7070dbSScott Long 	case SIOCGDRVSPEC:
42824c7070dbSScott Long 		CTX_LOCK(ctx);
42834c7070dbSScott Long 		err = IFDI_PRIV_IOCTL(ctx, command, data);
42844c7070dbSScott Long 		CTX_UNLOCK(ctx);
42854c7070dbSScott Long 		break;
42864c7070dbSScott Long 	default:
42874c7070dbSScott Long 		err = ether_ioctl(ifp, command, data);
42884c7070dbSScott Long 		break;
42894c7070dbSScott Long 	}
42904c7070dbSScott Long 	if (reinit)
42914c7070dbSScott Long 		iflib_if_init(ctx);
42924c7070dbSScott Long 	return (err);
42934c7070dbSScott Long }
42944c7070dbSScott Long 
42954c7070dbSScott Long static uint64_t
42964c7070dbSScott Long iflib_if_get_counter(if_t ifp, ift_counter cnt)
42974c7070dbSScott Long {
42984c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
42994c7070dbSScott Long 
43004c7070dbSScott Long 	return (IFDI_GET_COUNTER(ctx, cnt));
43014c7070dbSScott Long }
43024c7070dbSScott Long 
43034c7070dbSScott Long /*********************************************************************
43044c7070dbSScott Long  *
43054c7070dbSScott Long  *  OTHER FUNCTIONS EXPORTED TO THE STACK
43064c7070dbSScott Long  *
43074c7070dbSScott Long  **********************************************************************/
43084c7070dbSScott Long 
43094c7070dbSScott Long static void
43104c7070dbSScott Long iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
43114c7070dbSScott Long {
43124c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43134c7070dbSScott Long 
43144c7070dbSScott Long 	if ((void *)ctx != arg)
43154c7070dbSScott Long 		return;
43164c7070dbSScott Long 
43174c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
43184c7070dbSScott Long 		return;
43194c7070dbSScott Long 
432053b5b9b0SEric Joyner 	if (iflib_in_detach(ctx))
432153b5b9b0SEric Joyner 		return;
432253b5b9b0SEric Joyner 
43234c7070dbSScott Long 	CTX_LOCK(ctx);
43244c7070dbSScott Long 	IFDI_VLAN_REGISTER(ctx, vtag);
43254c7070dbSScott Long 	/* Re-init to load the changes */
43264c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
432721e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43284c7070dbSScott Long 	CTX_UNLOCK(ctx);
43294c7070dbSScott Long }
43304c7070dbSScott Long 
43314c7070dbSScott Long static void
43324c7070dbSScott Long iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
43334c7070dbSScott Long {
43344c7070dbSScott Long 	if_ctx_t ctx = if_getsoftc(ifp);
43354c7070dbSScott Long 
43364c7070dbSScott Long 	if ((void *)ctx != arg)
43374c7070dbSScott Long 		return;
43384c7070dbSScott Long 
43394c7070dbSScott Long 	if ((vtag == 0) || (vtag > 4095))
43404c7070dbSScott Long 		return;
43414c7070dbSScott Long 
43424c7070dbSScott Long 	CTX_LOCK(ctx);
43434c7070dbSScott Long 	IFDI_VLAN_UNREGISTER(ctx, vtag);
43444c7070dbSScott Long 	/* Re-init to load the changes */
43454c7070dbSScott Long 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
434621e10b16SSean Bruno 		iflib_if_init_locked(ctx);
43474c7070dbSScott Long 	CTX_UNLOCK(ctx);
43484c7070dbSScott Long }
43494c7070dbSScott Long 
43504c7070dbSScott Long static void
43514c7070dbSScott Long iflib_led_func(void *arg, int onoff)
43524c7070dbSScott Long {
43534c7070dbSScott Long 	if_ctx_t ctx = arg;
43544c7070dbSScott Long 
43554c7070dbSScott Long 	CTX_LOCK(ctx);
43564c7070dbSScott Long 	IFDI_LED_FUNC(ctx, onoff);
43574c7070dbSScott Long 	CTX_UNLOCK(ctx);
43584c7070dbSScott Long }
43594c7070dbSScott Long 
43604c7070dbSScott Long /*********************************************************************
43614c7070dbSScott Long  *
43624c7070dbSScott Long  *  BUS FUNCTION DEFINITIONS
43634c7070dbSScott Long  *
43644c7070dbSScott Long  **********************************************************************/
43654c7070dbSScott Long 
43664c7070dbSScott Long int
43674c7070dbSScott Long iflib_device_probe(device_t dev)
43684c7070dbSScott Long {
4369d49e83eaSMarius Strobl 	const pci_vendor_info_t *ent;
43704c7070dbSScott Long 	if_shared_ctx_t sctx;
4371d49e83eaSMarius Strobl 	uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id;
4372d49e83eaSMarius Strobl 	uint16_t pci_vendor_id;
43734c7070dbSScott Long 
43744c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
43754c7070dbSScott Long 		return (ENOTSUP);
43764c7070dbSScott Long 
43774c7070dbSScott Long 	pci_vendor_id = pci_get_vendor(dev);
43784c7070dbSScott Long 	pci_device_id = pci_get_device(dev);
43794c7070dbSScott Long 	pci_subvendor_id = pci_get_subvendor(dev);
43804c7070dbSScott Long 	pci_subdevice_id = pci_get_subdevice(dev);
43814c7070dbSScott Long 	pci_rev_id = pci_get_revid(dev);
43824c7070dbSScott Long 	if (sctx->isc_parse_devinfo != NULL)
43834c7070dbSScott Long 		sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
43844c7070dbSScott Long 
43854c7070dbSScott Long 	ent = sctx->isc_vendor_info;
43864c7070dbSScott Long 	while (ent->pvi_vendor_id != 0) {
43874c7070dbSScott Long 		if (pci_vendor_id != ent->pvi_vendor_id) {
43884c7070dbSScott Long 			ent++;
43894c7070dbSScott Long 			continue;
43904c7070dbSScott Long 		}
43914c7070dbSScott Long 		if ((pci_device_id == ent->pvi_device_id) &&
43924c7070dbSScott Long 		    ((pci_subvendor_id == ent->pvi_subvendor_id) ||
43934c7070dbSScott Long 		     (ent->pvi_subvendor_id == 0)) &&
43944c7070dbSScott Long 		    ((pci_subdevice_id == ent->pvi_subdevice_id) ||
43954c7070dbSScott Long 		     (ent->pvi_subdevice_id == 0)) &&
43964c7070dbSScott Long 		    ((pci_rev_id == ent->pvi_rev_id) ||
43974c7070dbSScott Long 		     (ent->pvi_rev_id == 0))) {
43984c7070dbSScott Long 
43994c7070dbSScott Long 			device_set_desc_copy(dev, ent->pvi_name);
44004c7070dbSScott Long 			/* this needs to be changed to zero if the bus probing code
44014c7070dbSScott Long 			 * ever stops re-probing on best match because the sctx
44024c7070dbSScott Long 			 * may have its values over written by register calls
44034c7070dbSScott Long 			 * in subsequent probes
44044c7070dbSScott Long 			 */
44054c7070dbSScott Long 			return (BUS_PROBE_DEFAULT);
44064c7070dbSScott Long 		}
44074c7070dbSScott Long 		ent++;
44084c7070dbSScott Long 	}
44094c7070dbSScott Long 	return (ENXIO);
44104c7070dbSScott Long }
44114c7070dbSScott Long 
4412668d6dbbSEric Joyner int
4413668d6dbbSEric Joyner iflib_device_probe_vendor(device_t dev)
4414668d6dbbSEric Joyner {
4415668d6dbbSEric Joyner 	int probe;
4416668d6dbbSEric Joyner 
4417668d6dbbSEric Joyner 	probe = iflib_device_probe(dev);
4418668d6dbbSEric Joyner 	if (probe == BUS_PROBE_DEFAULT)
4419668d6dbbSEric Joyner 		return (BUS_PROBE_VENDOR);
4420668d6dbbSEric Joyner 	else
4421668d6dbbSEric Joyner 		return (probe);
4422668d6dbbSEric Joyner }
4423668d6dbbSEric Joyner 
442409f6ff4fSMatt Macy static void
442509f6ff4fSMatt Macy iflib_reset_qvalues(if_ctx_t ctx)
44264c7070dbSScott Long {
442709f6ff4fSMatt Macy 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
442809f6ff4fSMatt Macy 	if_shared_ctx_t sctx = ctx->ifc_sctx;
442909f6ff4fSMatt Macy 	device_t dev = ctx->ifc_dev;
443046d0f824SMatt Macy 	int i;
44314c7070dbSScott Long 
443223ac9029SStephen Hurd 	if (ctx->ifc_sysctl_ntxqs != 0)
443323ac9029SStephen Hurd 		scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
443423ac9029SStephen Hurd 	if (ctx->ifc_sysctl_nrxqs != 0)
443523ac9029SStephen Hurd 		scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
443623ac9029SStephen Hurd 
443723ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
443823ac9029SStephen Hurd 		if (ctx->ifc_sysctl_ntxds[i] != 0)
443923ac9029SStephen Hurd 			scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
444023ac9029SStephen Hurd 		else
444123ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
444223ac9029SStephen Hurd 	}
444323ac9029SStephen Hurd 
444423ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
444523ac9029SStephen Hurd 		if (ctx->ifc_sysctl_nrxds[i] != 0)
444623ac9029SStephen Hurd 			scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
444723ac9029SStephen Hurd 		else
444823ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
444923ac9029SStephen Hurd 	}
445023ac9029SStephen Hurd 
445123ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_nrxqs; i++) {
445223ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
445323ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
445423ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
445523ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
445623ac9029SStephen Hurd 		}
445723ac9029SStephen Hurd 		if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
445823ac9029SStephen Hurd 			device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
445923ac9029SStephen Hurd 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
446023ac9029SStephen Hurd 			scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
446123ac9029SStephen Hurd 		}
4462afb77372SEric Joyner 		if (!powerof2(scctx->isc_nrxd[i])) {
4463afb77372SEric Joyner 			device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n",
4464afb77372SEric Joyner 				      i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]);
4465afb77372SEric Joyner 			scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
4466afb77372SEric Joyner 		}
446723ac9029SStephen Hurd 	}
446823ac9029SStephen Hurd 
446923ac9029SStephen Hurd 	for (i = 0; i < sctx->isc_ntxqs; i++) {
447023ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
447123ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
447223ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
447323ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
447423ac9029SStephen Hurd 		}
447523ac9029SStephen Hurd 		if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
447623ac9029SStephen Hurd 			device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
447723ac9029SStephen Hurd 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
447823ac9029SStephen Hurd 			scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
447923ac9029SStephen Hurd 		}
4480afb77372SEric Joyner 		if (!powerof2(scctx->isc_ntxd[i])) {
4481afb77372SEric Joyner 			device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n",
4482afb77372SEric Joyner 				      i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]);
4483afb77372SEric Joyner 			scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
4484afb77372SEric Joyner 		}
448523ac9029SStephen Hurd 	}
448609f6ff4fSMatt Macy }
4487ab2e3f79SStephen Hurd 
44886d49b41eSAndrew Gallatin static void
44896d49b41eSAndrew Gallatin iflib_add_pfil(if_ctx_t ctx)
44906d49b41eSAndrew Gallatin {
44916d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
44926d49b41eSAndrew Gallatin 	struct pfil_head_args pa;
44936d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
44946d49b41eSAndrew Gallatin 	int i;
44956d49b41eSAndrew Gallatin 
44966d49b41eSAndrew Gallatin 	pa.pa_version = PFIL_VERSION;
44976d49b41eSAndrew Gallatin 	pa.pa_flags = PFIL_IN;
44986d49b41eSAndrew Gallatin 	pa.pa_type = PFIL_TYPE_ETHERNET;
44996d49b41eSAndrew Gallatin 	pa.pa_headname = ctx->ifc_ifp->if_xname;
45006d49b41eSAndrew Gallatin 	pfil = pfil_head_register(&pa);
45016d49b41eSAndrew Gallatin 
45026d49b41eSAndrew Gallatin 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
45036d49b41eSAndrew Gallatin 		rxq->pfil = pfil;
45046d49b41eSAndrew Gallatin 	}
45056d49b41eSAndrew Gallatin }
45066d49b41eSAndrew Gallatin 
45076d49b41eSAndrew Gallatin static void
45086d49b41eSAndrew Gallatin iflib_rem_pfil(if_ctx_t ctx)
45096d49b41eSAndrew Gallatin {
45106d49b41eSAndrew Gallatin 	struct pfil_head *pfil;
45116d49b41eSAndrew Gallatin 	iflib_rxq_t rxq;
45126d49b41eSAndrew Gallatin 	int i;
45136d49b41eSAndrew Gallatin 
45146d49b41eSAndrew Gallatin 	rxq = ctx->ifc_rxqs;
45156d49b41eSAndrew Gallatin 	pfil = rxq->pfil;
45166d49b41eSAndrew Gallatin 	for (i = 0; i < NRXQSETS(ctx); i++, rxq++) {
45176d49b41eSAndrew Gallatin 		rxq->pfil = NULL;
45186d49b41eSAndrew Gallatin 	}
45196d49b41eSAndrew Gallatin 	pfil_head_unregister(pfil);
45206d49b41eSAndrew Gallatin }
45216d49b41eSAndrew Gallatin 
4522f154ece0SStephen Hurd static uint16_t
4523f154ece0SStephen Hurd get_ctx_core_offset(if_ctx_t ctx)
4524f154ece0SStephen Hurd {
4525f154ece0SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
4526f154ece0SStephen Hurd 	struct cpu_offset *op;
4527f154ece0SStephen Hurd 	uint16_t qc;
4528f154ece0SStephen Hurd 	uint16_t ret = ctx->ifc_sysctl_core_offset;
4529f154ece0SStephen Hurd 
4530f154ece0SStephen Hurd 	if (ret != CORE_OFFSET_UNSPECIFIED)
4531f154ece0SStephen Hurd 		return (ret);
4532f154ece0SStephen Hurd 
4533f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx)
4534f154ece0SStephen Hurd 		qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets;
4535f154ece0SStephen Hurd 	else
4536f154ece0SStephen Hurd 		qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets);
4537f154ece0SStephen Hurd 
4538f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4539f154ece0SStephen Hurd 	SLIST_FOREACH(op, &cpu_offsets, entries) {
4540f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4541f154ece0SStephen Hurd 			ret = op->offset;
4542f154ece0SStephen Hurd 			op->offset += qc;
4543f154ece0SStephen Hurd 			MPASS(op->refcount < UINT_MAX);
4544f154ece0SStephen Hurd 			op->refcount++;
4545f154ece0SStephen Hurd 			break;
4546f154ece0SStephen Hurd 		}
4547f154ece0SStephen Hurd 	}
4548f154ece0SStephen Hurd 	if (ret == CORE_OFFSET_UNSPECIFIED) {
4549f154ece0SStephen Hurd 		ret = 0;
4550f154ece0SStephen Hurd 		op = malloc(sizeof(struct cpu_offset), M_IFLIB,
4551f154ece0SStephen Hurd 		    M_NOWAIT | M_ZERO);
4552f154ece0SStephen Hurd 		if (op == NULL) {
4553f154ece0SStephen Hurd 			device_printf(ctx->ifc_dev,
4554f154ece0SStephen Hurd 			    "allocation for cpu offset failed.\n");
4555f154ece0SStephen Hurd 		} else {
4556f154ece0SStephen Hurd 			op->offset = qc;
4557f154ece0SStephen Hurd 			op->refcount = 1;
4558f154ece0SStephen Hurd 			CPU_COPY(&ctx->ifc_cpus, &op->set);
4559f154ece0SStephen Hurd 			SLIST_INSERT_HEAD(&cpu_offsets, op, entries);
4560f154ece0SStephen Hurd 		}
4561f154ece0SStephen Hurd 	}
4562f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4563f154ece0SStephen Hurd 
4564f154ece0SStephen Hurd 	return (ret);
4565f154ece0SStephen Hurd }
4566f154ece0SStephen Hurd 
4567f154ece0SStephen Hurd static void
4568f154ece0SStephen Hurd unref_ctx_core_offset(if_ctx_t ctx)
4569f154ece0SStephen Hurd {
4570f154ece0SStephen Hurd 	struct cpu_offset *op, *top;
4571f154ece0SStephen Hurd 
4572f154ece0SStephen Hurd 	mtx_lock(&cpu_offset_mtx);
4573f154ece0SStephen Hurd 	SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) {
4574f154ece0SStephen Hurd 		if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) {
4575f154ece0SStephen Hurd 			MPASS(op->refcount > 0);
4576f154ece0SStephen Hurd 			op->refcount--;
4577f154ece0SStephen Hurd 			if (op->refcount == 0) {
4578f154ece0SStephen Hurd 				SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries);
4579f154ece0SStephen Hurd 				free(op, M_IFLIB);
4580f154ece0SStephen Hurd 			}
4581f154ece0SStephen Hurd 			break;
4582f154ece0SStephen Hurd 		}
4583f154ece0SStephen Hurd 	}
4584f154ece0SStephen Hurd 	mtx_unlock(&cpu_offset_mtx);
4585f154ece0SStephen Hurd }
4586f154ece0SStephen Hurd 
458709f6ff4fSMatt Macy int
458809f6ff4fSMatt Macy iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
458909f6ff4fSMatt Macy {
459009f6ff4fSMatt Macy 	if_ctx_t ctx;
459109f6ff4fSMatt Macy 	if_t ifp;
459209f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
45933d10e9edSMarius Strobl 	kobjop_desc_t kobj_desc;
45943d10e9edSMarius Strobl 	kobj_method_t *kobj_method;
4595afb77372SEric Joyner 	int err, msix, rid;
45963d10e9edSMarius Strobl 	uint16_t main_rxq, main_txq;
459709f6ff4fSMatt Macy 
459809f6ff4fSMatt Macy 	ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
459909f6ff4fSMatt Macy 
460009f6ff4fSMatt Macy 	if (sc == NULL) {
460109f6ff4fSMatt Macy 		sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
460209f6ff4fSMatt Macy 		device_set_softc(dev, ctx);
460309f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_SC_ALLOCATED;
460409f6ff4fSMatt Macy 	}
460509f6ff4fSMatt Macy 
460609f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
460709f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
460809f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
460909f6ff4fSMatt Macy 
461009f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
461109f6ff4fSMatt Macy 		device_printf(dev, "iflib_register failed %d\n", err);
46127f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
461309f6ff4fSMatt Macy 	}
461409f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
461509f6ff4fSMatt Macy 
461609f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
461709f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
461809f6ff4fSMatt Macy 
461909f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4620aa8a24d3SStephen Hurd 	CTX_LOCK(ctx);
4621ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
46224c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
46237f3eb9daSPatrick Kelsey 		goto fail_unlock;
46244c7070dbSScott Long 	}
46251248952aSSean Bruno 	_iflib_pre_assert(scctx);
46261248952aSSean Bruno 	ctx->ifc_txrx = *scctx->isc_txrx;
46271248952aSSean Bruno 
4628e2621d96SMatt Macy 	if (sctx->isc_flags & IFLIB_DRIVER_MEDIA)
4629e2621d96SMatt Macy 		ctx->ifc_mediap = scctx->isc_media;
4630e2621d96SMatt Macy 
46311248952aSSean Bruno #ifdef INVARIANTS
46327f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
46331248952aSSean Bruno 		MPASS(scctx->isc_tx_csum_flags);
46341248952aSSean Bruno #endif
46351248952aSSean Bruno 
46366554362cSAndrew Gallatin 	if_setcapabilities(ifp,
46376554362cSAndrew Gallatin 	    scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_NOMAP);
46386554362cSAndrew Gallatin 	if_setcapenable(ifp,
46396554362cSAndrew Gallatin 	    scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_NOMAP);
46401248952aSSean Bruno 
46411248952aSSean Bruno 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
46421248952aSSean Bruno 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
46431248952aSSean Bruno 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
46441248952aSSean Bruno 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
464523ac9029SStephen Hurd 
464695246abbSSean Bruno 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
464795246abbSSean Bruno 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
464823ac9029SStephen Hurd 
464923ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
46501722eeacSMarius Strobl 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
465123ac9029SStephen Hurd 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
465223ac9029SStephen Hurd 
465323ac9029SStephen Hurd 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
465423ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
465523ac9029SStephen Hurd 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
465623ac9029SStephen Hurd 		    MAX_SINGLE_PACKET_FRACTION);
465723ac9029SStephen Hurd 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
465823ac9029SStephen Hurd 	    MAX_SINGLE_PACKET_FRACTION)
465923ac9029SStephen Hurd 		scctx->isc_tx_tso_segments_max = max(1,
466023ac9029SStephen Hurd 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
46614c7070dbSScott Long 
46624c7070dbSScott Long 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
46637f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
46647f87c040SMarius Strobl 		/*
46657f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
46667f87c040SMarius Strobl 		 * but some MACs do.
46677f87c040SMarius Strobl 		 */
46687f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
46697f87c040SMarius Strobl 		    IP_MAXPACKET));
46707f87c040SMarius Strobl 		/*
46717f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
46727f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
46737f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
46747f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
46757f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
46767f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
46777f87c040SMarius Strobl 		 */
46787f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
46797f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
46807f87c040SMarius Strobl 	}
46814c7070dbSScott Long 	if (scctx->isc_rss_table_size == 0)
46824c7070dbSScott Long 		scctx->isc_rss_table_size = 64;
468323ac9029SStephen Hurd 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
4684da69b8f9SSean Bruno 
4685da69b8f9SSean Bruno 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
4686da69b8f9SSean Bruno 	/* XXX format name */
4687f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4688f855ec81SMarius Strobl 	    NULL, NULL, "admin");
4689e516b535SStephen Hurd 
4690772593dbSStephen Hurd 	/* Set up cpu set.  If it fails, use the set of all CPUs. */
4691e516b535SStephen Hurd 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
4692e516b535SStephen Hurd 		device_printf(dev, "Unable to fetch CPU list\n");
4693e516b535SStephen Hurd 		CPU_COPY(&all_cpus, &ctx->ifc_cpus);
4694e516b535SStephen Hurd 	}
4695e516b535SStephen Hurd 	MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
4696e516b535SStephen Hurd 
46974c7070dbSScott Long 	/*
4698b97de13aSMarius Strobl 	** Now set up MSI or MSI-X, should return us the number of supported
4699b97de13aSMarius Strobl 	** vectors (will be 1 for a legacy interrupt and MSI).
47004c7070dbSScott Long 	*/
47014c7070dbSScott Long 	if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
47024c7070dbSScott Long 		msix = scctx->isc_vectors;
47034c7070dbSScott Long 	} else if (scctx->isc_msix_bar != 0)
4704f7ae9a84SSean Bruno 	       /*
4705f7ae9a84SSean Bruno 		* The simple fact that isc_msix_bar is not 0 does not mean we
4706f7ae9a84SSean Bruno 		* we have a good value there that is known to work.
4707f7ae9a84SSean Bruno 		*/
47084c7070dbSScott Long 		msix = iflib_msix_init(ctx);
47094c7070dbSScott Long 	else {
47104c7070dbSScott Long 		scctx->isc_vectors = 1;
47114c7070dbSScott Long 		scctx->isc_ntxqsets = 1;
47124c7070dbSScott Long 		scctx->isc_nrxqsets = 1;
47134c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
47144c7070dbSScott Long 		msix = 0;
47154c7070dbSScott Long 	}
47164c7070dbSScott Long 	/* Get memory for the station queues */
47174c7070dbSScott Long 	if ((err = iflib_queues_alloc(ctx))) {
47184c7070dbSScott Long 		device_printf(dev, "Unable to allocate queue memory\n");
47197f3eb9daSPatrick Kelsey 		goto fail_intr_free;
47204c7070dbSScott Long 	}
47214c7070dbSScott Long 
4722ac88e6daSStephen Hurd 	if ((err = iflib_qset_structures_setup(ctx)))
47234c7070dbSScott Long 		goto fail_queues;
472469b7fc3eSSean Bruno 
4725bd84f700SSean Bruno 	/*
4726f154ece0SStephen Hurd 	 * Now that we know how many queues there are, get the core offset.
4727f154ece0SStephen Hurd 	 */
4728f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx);
4729f154ece0SStephen Hurd 
4730f154ece0SStephen Hurd 	/*
4731bd84f700SSean Bruno 	 * Group taskqueues aren't properly set up until SMP is started,
4732bd84f700SSean Bruno 	 * so we disable interrupts until we can handle them post
4733bd84f700SSean Bruno 	 * SI_SUB_SMP.
4734bd84f700SSean Bruno 	 *
4735bd84f700SSean Bruno 	 * XXX: disabling interrupts doesn't actually work, at least for
4736bd84f700SSean Bruno 	 * the non-MSI case.  When they occur before SI_SUB_SMP completes,
4737bd84f700SSean Bruno 	 * we do null handling and depend on this not causing too large an
4738bd84f700SSean Bruno 	 * interrupt storm.
4739bd84f700SSean Bruno 	 */
47401248952aSSean Bruno 	IFDI_INTR_DISABLE(ctx);
47413d10e9edSMarius Strobl 
47423d10e9edSMarius Strobl 	if (msix > 1) {
47433d10e9edSMarius Strobl 		/*
47443d10e9edSMarius Strobl 		 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable
47453d10e9edSMarius Strobl 		 * aren't the default NULL implementation.
47463d10e9edSMarius Strobl 		 */
47473d10e9edSMarius Strobl 		kobj_desc = &ifdi_rx_queue_intr_enable_desc;
47483d10e9edSMarius Strobl 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
47493d10e9edSMarius Strobl 		    kobj_desc);
47503d10e9edSMarius Strobl 		if (kobj_method == &kobj_desc->deflt) {
47513d10e9edSMarius Strobl 			device_printf(dev,
47523d10e9edSMarius Strobl 			    "MSI-X requires ifdi_rx_queue_intr_enable method");
47533d10e9edSMarius Strobl 			err = EOPNOTSUPP;
47547f3eb9daSPatrick Kelsey 			goto fail_queues;
47554c7070dbSScott Long 		}
47563d10e9edSMarius Strobl 		kobj_desc = &ifdi_tx_queue_intr_enable_desc;
47573d10e9edSMarius Strobl 		kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL,
47583d10e9edSMarius Strobl 		    kobj_desc);
47593d10e9edSMarius Strobl 		if (kobj_method == &kobj_desc->deflt) {
47603d10e9edSMarius Strobl 			device_printf(dev,
47613d10e9edSMarius Strobl 			    "MSI-X requires ifdi_tx_queue_intr_enable method");
47623d10e9edSMarius Strobl 			err = EOPNOTSUPP;
47633d10e9edSMarius Strobl 			goto fail_queues;
47643d10e9edSMarius Strobl 		}
47653d10e9edSMarius Strobl 
47663d10e9edSMarius Strobl 		/*
47673d10e9edSMarius Strobl 		 * Assign the MSI-X vectors.
47683d10e9edSMarius Strobl 		 * Note that the default NULL ifdi_msix_intr_assign method will
47693d10e9edSMarius Strobl 		 * fail here, too.
47703d10e9edSMarius Strobl 		 */
47713d10e9edSMarius Strobl 		err = IFDI_MSIX_INTR_ASSIGN(ctx, msix);
47723d10e9edSMarius Strobl 		if (err != 0) {
47733d10e9edSMarius Strobl 			device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n",
47743d10e9edSMarius Strobl 			    err);
47753d10e9edSMarius Strobl 			goto fail_queues;
47763d10e9edSMarius Strobl 		}
4777197c6798SEric Joyner 	} else if (scctx->isc_intr != IFLIB_INTR_MSIX) {
47784c7070dbSScott Long 		rid = 0;
47794c7070dbSScott Long 		if (scctx->isc_intr == IFLIB_INTR_MSI) {
47804c7070dbSScott Long 			MPASS(msix == 1);
47814c7070dbSScott Long 			rid = 1;
47824c7070dbSScott Long 		}
478323ac9029SStephen Hurd 		if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
47844c7070dbSScott Long 			device_printf(dev, "iflib_legacy_setup failed %d\n", err);
47857f3eb9daSPatrick Kelsey 			goto fail_queues;
47864c7070dbSScott Long 		}
4787197c6798SEric Joyner 	} else {
4788197c6798SEric Joyner 		device_printf(dev,
4789197c6798SEric Joyner 		    "Cannot use iflib with only 1 MSI-X interrupt!\n");
4790197c6798SEric Joyner 		err = ENODEV;
4791197c6798SEric Joyner 		goto fail_intr_free;
47924c7070dbSScott Long 	}
47937f87c040SMarius Strobl 
47941fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
47957f87c040SMarius Strobl 
4796ab2e3f79SStephen Hurd 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
47974c7070dbSScott Long 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
47984c7070dbSScott Long 		goto fail_detach;
47994c7070dbSScott Long 	}
48007f87c040SMarius Strobl 
48017f87c040SMarius Strobl 	/*
48027f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
48037f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
48047f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
48057f87c040SMarius Strobl 	 */
48067f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
48077f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
48087f87c040SMarius Strobl 
48094c7070dbSScott Long 	if ((err = iflib_netmap_attach(ctx))) {
48104c7070dbSScott Long 		device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
48114c7070dbSScott Long 		goto fail_detach;
48124c7070dbSScott Long 	}
48134c7070dbSScott Long 	*ctxp = ctx;
48144c7070dbSScott Long 
48157790c8c1SConrad Meyer 	DEBUGNET_SET(ctx->ifc_ifp, iflib);
481694618825SMark Johnston 
481723ac9029SStephen Hurd 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
48184c7070dbSScott Long 	iflib_add_device_sysctl_post(ctx);
48196d49b41eSAndrew Gallatin 	iflib_add_pfil(ctx);
48204ecb427aSSean Bruno 	ctx->ifc_flags |= IFC_INIT_DONE;
4821aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
48223d10e9edSMarius Strobl 
48234c7070dbSScott Long 	return (0);
482477c1fcecSEric Joyner 
48254c7070dbSScott Long fail_detach:
48264c7070dbSScott Long 	ether_ifdetach(ctx->ifc_ifp);
48274c7070dbSScott Long fail_intr_free:
48287f3eb9daSPatrick Kelsey 	iflib_free_intr_mem(ctx);
48294c7070dbSScott Long fail_queues:
48306108c013SStephen Hurd 	iflib_tx_structures_free(ctx);
48316108c013SStephen Hurd 	iflib_rx_structures_free(ctx);
4832197c6798SEric Joyner 	taskqgroup_detach(qgroup_if_config_tqg, &ctx->ifc_admin_task);
48334c7070dbSScott Long 	IFDI_DETACH(ctx);
48347f3eb9daSPatrick Kelsey fail_unlock:
4835aa8a24d3SStephen Hurd 	CTX_UNLOCK(ctx);
483656614414SEric Joyner 	iflib_deregister(ctx);
48377f3eb9daSPatrick Kelsey fail_ctx_free:
48387f3f6aadSEric Joyner 	device_set_softc(ctx->ifc_dev, NULL);
48397f3eb9daSPatrick Kelsey         if (ctx->ifc_flags & IFC_SC_ALLOCATED)
48407f3eb9daSPatrick Kelsey                 free(ctx->ifc_softc, M_IFLIB);
48417f3eb9daSPatrick Kelsey         free(ctx, M_IFLIB);
48424c7070dbSScott Long 	return (err);
48434c7070dbSScott Long }
48444c7070dbSScott Long 
48454c7070dbSScott Long int
484609f6ff4fSMatt Macy iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
484709f6ff4fSMatt Macy 					  struct iflib_cloneattach_ctx *clctx)
484809f6ff4fSMatt Macy {
484909f6ff4fSMatt Macy 	int err;
485009f6ff4fSMatt Macy 	if_ctx_t ctx;
485109f6ff4fSMatt Macy 	if_t ifp;
485209f6ff4fSMatt Macy 	if_softc_ctx_t scctx;
485309f6ff4fSMatt Macy 	int i;
485409f6ff4fSMatt Macy 	void *sc;
485509f6ff4fSMatt Macy 	uint16_t main_txq;
485609f6ff4fSMatt Macy 	uint16_t main_rxq;
485709f6ff4fSMatt Macy 
485809f6ff4fSMatt Macy 	ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
485909f6ff4fSMatt Macy 	sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
486009f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_SC_ALLOCATED;
486109f6ff4fSMatt Macy 	if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
486209f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_PSEUDO;
486309f6ff4fSMatt Macy 
486409f6ff4fSMatt Macy 	ctx->ifc_sctx = sctx;
486509f6ff4fSMatt Macy 	ctx->ifc_softc = sc;
486609f6ff4fSMatt Macy 	ctx->ifc_dev = dev;
486709f6ff4fSMatt Macy 
486809f6ff4fSMatt Macy 	if ((err = iflib_register(ctx)) != 0) {
486909f6ff4fSMatt Macy 		device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
48707f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
487109f6ff4fSMatt Macy 	}
487209f6ff4fSMatt Macy 	iflib_add_device_sysctl_pre(ctx);
487309f6ff4fSMatt Macy 
487409f6ff4fSMatt Macy 	scctx = &ctx->ifc_softc_ctx;
487509f6ff4fSMatt Macy 	ifp = ctx->ifc_ifp;
487609f6ff4fSMatt Macy 
487709f6ff4fSMatt Macy 	iflib_reset_qvalues(ctx);
4878aac9c817SEric Joyner 	CTX_LOCK(ctx);
487909f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
488009f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
4881aac9c817SEric Joyner 		goto fail_unlock;
488209f6ff4fSMatt Macy 	}
488309f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_GEN_MAC)
48841fd8c72cSKyle Evans 		ether_gen_addr(ifp, &ctx->ifc_mac);
488509f6ff4fSMatt Macy 	if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
488609f6ff4fSMatt Macy 								clctx->cc_params)) != 0) {
488709f6ff4fSMatt Macy 		device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
48887f3eb9daSPatrick Kelsey 		goto fail_ctx_free;
488909f6ff4fSMatt Macy 	}
4890e2621d96SMatt Macy 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
4891e2621d96SMatt Macy 	ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL);
4892e2621d96SMatt Macy 	ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO);
489309f6ff4fSMatt Macy 
489409f6ff4fSMatt Macy #ifdef INVARIANTS
48957f87c040SMarius Strobl 	if (scctx->isc_capabilities & IFCAP_TXCSUM)
489609f6ff4fSMatt Macy 		MPASS(scctx->isc_tx_csum_flags);
489709f6ff4fSMatt Macy #endif
489809f6ff4fSMatt Macy 
48997f87c040SMarius Strobl 	if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
490009f6ff4fSMatt Macy 	if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
490109f6ff4fSMatt Macy 
490209f6ff4fSMatt Macy 	ifp->if_flags |= IFF_NOGROUP;
490309f6ff4fSMatt Macy 	if (sctx->isc_flags & IFLIB_PSEUDO) {
49041fd8c72cSKyle Evans 		ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
490509f6ff4fSMatt Macy 
490609f6ff4fSMatt Macy 		if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
490709f6ff4fSMatt Macy 			device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
490809f6ff4fSMatt Macy 			goto fail_detach;
490909f6ff4fSMatt Macy 		}
491009f6ff4fSMatt Macy 		*ctxp = ctx;
491109f6ff4fSMatt Macy 
49127f87c040SMarius Strobl 		/*
49137f87c040SMarius Strobl 		 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
49147f87c040SMarius Strobl 		 * This must appear after the call to ether_ifattach() because
49157f87c040SMarius Strobl 		 * ether_ifattach() sets if_hdrlen to the default value.
49167f87c040SMarius Strobl 		 */
49177f87c040SMarius Strobl 		if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
49187f87c040SMarius Strobl 			if_setifheaderlen(ifp,
49197f87c040SMarius Strobl 			    sizeof(struct ether_vlan_header));
49207f87c040SMarius Strobl 
492109f6ff4fSMatt Macy 		if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
492209f6ff4fSMatt Macy 		iflib_add_device_sysctl_post(ctx);
492309f6ff4fSMatt Macy 		ctx->ifc_flags |= IFC_INIT_DONE;
492409f6ff4fSMatt Macy 		return (0);
492509f6ff4fSMatt Macy 	}
492609f6ff4fSMatt Macy 	_iflib_pre_assert(scctx);
492709f6ff4fSMatt Macy 	ctx->ifc_txrx = *scctx->isc_txrx;
492809f6ff4fSMatt Macy 
492909f6ff4fSMatt Macy 	if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
493009f6ff4fSMatt Macy 		scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
493109f6ff4fSMatt Macy 	if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
493209f6ff4fSMatt Macy 		scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
493309f6ff4fSMatt Macy 
493409f6ff4fSMatt Macy 	main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
493509f6ff4fSMatt Macy 	main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
493609f6ff4fSMatt Macy 
493709f6ff4fSMatt Macy 	/* XXX change for per-queue sizes */
49381722eeacSMarius Strobl 	device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n",
493909f6ff4fSMatt Macy 	    scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
494009f6ff4fSMatt Macy 
494109f6ff4fSMatt Macy 	if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
494209f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
494309f6ff4fSMatt Macy 		scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
494409f6ff4fSMatt Macy 		    MAX_SINGLE_PACKET_FRACTION);
494509f6ff4fSMatt Macy 	if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
494609f6ff4fSMatt Macy 	    MAX_SINGLE_PACKET_FRACTION)
494709f6ff4fSMatt Macy 		scctx->isc_tx_tso_segments_max = max(1,
494809f6ff4fSMatt Macy 		    scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
494909f6ff4fSMatt Macy 
495009f6ff4fSMatt Macy 	/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
49517f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_TSO) {
49527f87c040SMarius Strobl 		/*
49537f87c040SMarius Strobl 		 * The stack can't handle a TSO size larger than IP_MAXPACKET,
49547f87c040SMarius Strobl 		 * but some MACs do.
49557f87c040SMarius Strobl 		 */
49567f87c040SMarius Strobl 		if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
49577f87c040SMarius Strobl 		    IP_MAXPACKET));
49587f87c040SMarius Strobl 		/*
49597f87c040SMarius Strobl 		 * Take maximum number of m_pullup(9)'s in iflib_parse_header()
49607f87c040SMarius Strobl 		 * into account.  In the worst case, each of these calls will
49617f87c040SMarius Strobl 		 * add another mbuf and, thus, the requirement for another DMA
49627f87c040SMarius Strobl 		 * segment.  So for best performance, it doesn't make sense to
49637f87c040SMarius Strobl 		 * advertize a maximum of TSO segments that typically will
49647f87c040SMarius Strobl 		 * require defragmentation in iflib_encap().
49657f87c040SMarius Strobl 		 */
49667f87c040SMarius Strobl 		if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
49677f87c040SMarius Strobl 		if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
49687f87c040SMarius Strobl 	}
496909f6ff4fSMatt Macy 	if (scctx->isc_rss_table_size == 0)
497009f6ff4fSMatt Macy 		scctx->isc_rss_table_size = 64;
497109f6ff4fSMatt Macy 	scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
497209f6ff4fSMatt Macy 
497309f6ff4fSMatt Macy 	GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
497409f6ff4fSMatt Macy 	/* XXX format name */
4975f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
4976f855ec81SMarius Strobl 	    NULL, NULL, "admin");
497709f6ff4fSMatt Macy 
497809f6ff4fSMatt Macy 	/* XXX --- can support > 1 -- but keep it simple for now */
497909f6ff4fSMatt Macy 	scctx->isc_intr = IFLIB_INTR_LEGACY;
498009f6ff4fSMatt Macy 
498109f6ff4fSMatt Macy 	/* Get memory for the station queues */
498209f6ff4fSMatt Macy 	if ((err = iflib_queues_alloc(ctx))) {
498309f6ff4fSMatt Macy 		device_printf(dev, "Unable to allocate queue memory\n");
49847f3eb9daSPatrick Kelsey 		goto fail_iflib_detach;
498509f6ff4fSMatt Macy 	}
498609f6ff4fSMatt Macy 
498709f6ff4fSMatt Macy 	if ((err = iflib_qset_structures_setup(ctx))) {
498809f6ff4fSMatt Macy 		device_printf(dev, "qset structure setup failed %d\n", err);
498909f6ff4fSMatt Macy 		goto fail_queues;
499009f6ff4fSMatt Macy 	}
49917f87c040SMarius Strobl 
499209f6ff4fSMatt Macy 	/*
499309f6ff4fSMatt Macy 	 * XXX What if anything do we want to do about interrupts?
499409f6ff4fSMatt Macy 	 */
49951fd8c72cSKyle Evans 	ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet);
499609f6ff4fSMatt Macy 	if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
499709f6ff4fSMatt Macy 		device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
499809f6ff4fSMatt Macy 		goto fail_detach;
499909f6ff4fSMatt Macy 	}
50007f87c040SMarius Strobl 
50017f87c040SMarius Strobl 	/*
50027f87c040SMarius Strobl 	 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
50037f87c040SMarius Strobl 	 * This must appear after the call to ether_ifattach() because
50047f87c040SMarius Strobl 	 * ether_ifattach() sets if_hdrlen to the default value.
50057f87c040SMarius Strobl 	 */
50067f87c040SMarius Strobl 	if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
50077f87c040SMarius Strobl 		if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
50087f87c040SMarius Strobl 
500909f6ff4fSMatt Macy 	/* XXX handle more than one queue */
501009f6ff4fSMatt Macy 	for (i = 0; i < scctx->isc_nrxqsets; i++)
501109f6ff4fSMatt Macy 		IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
501209f6ff4fSMatt Macy 
501309f6ff4fSMatt Macy 	*ctxp = ctx;
501409f6ff4fSMatt Macy 
501509f6ff4fSMatt Macy 	if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
501609f6ff4fSMatt Macy 	iflib_add_device_sysctl_post(ctx);
501709f6ff4fSMatt Macy 	ctx->ifc_flags |= IFC_INIT_DONE;
5018aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
50193d10e9edSMarius Strobl 
502009f6ff4fSMatt Macy 	return (0);
502109f6ff4fSMatt Macy fail_detach:
502209f6ff4fSMatt Macy 	ether_ifdetach(ctx->ifc_ifp);
502309f6ff4fSMatt Macy fail_queues:
502409f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
502509f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
50267f3eb9daSPatrick Kelsey fail_iflib_detach:
502709f6ff4fSMatt Macy 	IFDI_DETACH(ctx);
5028aac9c817SEric Joyner fail_unlock:
5029aac9c817SEric Joyner 	CTX_UNLOCK(ctx);
503056614414SEric Joyner 	iflib_deregister(ctx);
50317f3eb9daSPatrick Kelsey fail_ctx_free:
50327f3eb9daSPatrick Kelsey 	free(ctx->ifc_softc, M_IFLIB);
50337f3eb9daSPatrick Kelsey 	free(ctx, M_IFLIB);
503409f6ff4fSMatt Macy 	return (err);
503509f6ff4fSMatt Macy }
503609f6ff4fSMatt Macy 
503709f6ff4fSMatt Macy int
503809f6ff4fSMatt Macy iflib_pseudo_deregister(if_ctx_t ctx)
503909f6ff4fSMatt Macy {
504009f6ff4fSMatt Macy 	if_t ifp = ctx->ifc_ifp;
504109f6ff4fSMatt Macy 	iflib_txq_t txq;
504209f6ff4fSMatt Macy 	iflib_rxq_t rxq;
504309f6ff4fSMatt Macy 	int i, j;
504409f6ff4fSMatt Macy 	struct taskqgroup *tqg;
504509f6ff4fSMatt Macy 	iflib_fl_t fl;
504609f6ff4fSMatt Macy 
50471558015eSEric Joyner 	/* Unregister VLAN event handlers early */
50481558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
50491558015eSEric Joyner 
505009f6ff4fSMatt Macy 	ether_ifdetach(ifp);
505109f6ff4fSMatt Macy 	/* XXX drain any dependent tasks */
505209f6ff4fSMatt Macy 	tqg = qgroup_if_io_tqg;
505309f6ff4fSMatt Macy 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
505409f6ff4fSMatt Macy 		callout_drain(&txq->ift_timer);
505509f6ff4fSMatt Macy 		if (txq->ift_task.gt_uniq != NULL)
505609f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &txq->ift_task);
505709f6ff4fSMatt Macy 	}
505809f6ff4fSMatt Macy 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
5059fb1a29b4SHans Petter Selasky 		callout_drain(&rxq->ifr_watchdog);
506009f6ff4fSMatt Macy 		if (rxq->ifr_task.gt_uniq != NULL)
506109f6ff4fSMatt Macy 			taskqgroup_detach(tqg, &rxq->ifr_task);
506209f6ff4fSMatt Macy 
506309f6ff4fSMatt Macy 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
506409f6ff4fSMatt Macy 			free(fl->ifl_rx_bitmap, M_IFLIB);
506509f6ff4fSMatt Macy 	}
506609f6ff4fSMatt Macy 	tqg = qgroup_if_config_tqg;
506709f6ff4fSMatt Macy 	if (ctx->ifc_admin_task.gt_uniq != NULL)
506809f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
506909f6ff4fSMatt Macy 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
507009f6ff4fSMatt Macy 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
507109f6ff4fSMatt Macy 
507209f6ff4fSMatt Macy 	iflib_tx_structures_free(ctx);
507309f6ff4fSMatt Macy 	iflib_rx_structures_free(ctx);
507456614414SEric Joyner 
507556614414SEric Joyner 	iflib_deregister(ctx);
507656614414SEric Joyner 
507709f6ff4fSMatt Macy 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
507809f6ff4fSMatt Macy 		free(ctx->ifc_softc, M_IFLIB);
507909f6ff4fSMatt Macy 	free(ctx, M_IFLIB);
508009f6ff4fSMatt Macy 	return (0);
508109f6ff4fSMatt Macy }
508209f6ff4fSMatt Macy 
508309f6ff4fSMatt Macy int
50844c7070dbSScott Long iflib_device_attach(device_t dev)
50854c7070dbSScott Long {
50864c7070dbSScott Long 	if_ctx_t ctx;
50874c7070dbSScott Long 	if_shared_ctx_t sctx;
50884c7070dbSScott Long 
50894c7070dbSScott Long 	if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
50904c7070dbSScott Long 		return (ENOTSUP);
50914c7070dbSScott Long 
50924c7070dbSScott Long 	pci_enable_busmaster(dev);
50934c7070dbSScott Long 
50944c7070dbSScott Long 	return (iflib_device_register(dev, NULL, sctx, &ctx));
50954c7070dbSScott Long }
50964c7070dbSScott Long 
50974c7070dbSScott Long int
50984c7070dbSScott Long iflib_device_deregister(if_ctx_t ctx)
50994c7070dbSScott Long {
51004c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
51014c7070dbSScott Long 	iflib_txq_t txq;
51024c7070dbSScott Long 	iflib_rxq_t rxq;
51034c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
510487890dbaSSean Bruno 	int i, j;
51054c7070dbSScott Long 	struct taskqgroup *tqg;
510687890dbaSSean Bruno 	iflib_fl_t fl;
51074c7070dbSScott Long 
51084c7070dbSScott Long 	/* Make sure VLANS are not using driver */
51094c7070dbSScott Long 	if (if_vlantrunkinuse(ifp)) {
51104c7070dbSScott Long 		device_printf(dev, "Vlan in use, detach first\n");
51114c7070dbSScott Long 		return (EBUSY);
51124c7070dbSScott Long 	}
511377c1fcecSEric Joyner #ifdef PCI_IOV
511477c1fcecSEric Joyner 	if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
511577c1fcecSEric Joyner 		device_printf(dev, "SR-IOV in use; detach first.\n");
511677c1fcecSEric Joyner 		return (EBUSY);
511777c1fcecSEric Joyner 	}
511877c1fcecSEric Joyner #endif
511977c1fcecSEric Joyner 
512077c1fcecSEric Joyner 	STATE_LOCK(ctx);
512177c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_IN_DETACH;
512277c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
51234c7070dbSScott Long 
51241558015eSEric Joyner 	/* Unregister VLAN handlers before calling iflib_stop() */
51251558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
51261558015eSEric Joyner 
51271558015eSEric Joyner 	iflib_netmap_detach(ifp);
51281558015eSEric Joyner 	ether_ifdetach(ifp);
51291558015eSEric Joyner 
51304c7070dbSScott Long 	CTX_LOCK(ctx);
51314c7070dbSScott Long 	iflib_stop(ctx);
51324c7070dbSScott Long 	CTX_UNLOCK(ctx);
51334c7070dbSScott Long 
51346d49b41eSAndrew Gallatin 	iflib_rem_pfil(ctx);
51354c7070dbSScott Long 	if (ctx->ifc_led_dev != NULL)
51364c7070dbSScott Long 		led_destroy(ctx->ifc_led_dev);
51374c7070dbSScott Long 	/* XXX drain any dependent tasks */
5138ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
513923ac9029SStephen Hurd 	for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
51404c7070dbSScott Long 		callout_drain(&txq->ift_timer);
51414c7070dbSScott Long 		if (txq->ift_task.gt_uniq != NULL)
51424c7070dbSScott Long 			taskqgroup_detach(tqg, &txq->ift_task);
51434c7070dbSScott Long 	}
51444c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
51454c7070dbSScott Long 		if (rxq->ifr_task.gt_uniq != NULL)
51464c7070dbSScott Long 			taskqgroup_detach(tqg, &rxq->ifr_task);
514787890dbaSSean Bruno 
514887890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
514987890dbaSSean Bruno 			free(fl->ifl_rx_bitmap, M_IFLIB);
51504c7070dbSScott Long 	}
5151ab2e3f79SStephen Hurd 	tqg = qgroup_if_config_tqg;
51524c7070dbSScott Long 	if (ctx->ifc_admin_task.gt_uniq != NULL)
51534c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_admin_task);
51544c7070dbSScott Long 	if (ctx->ifc_vflr_task.gt_uniq != NULL)
51554c7070dbSScott Long 		taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
51566c3c3194SMatt Macy 	CTX_LOCK(ctx);
51574c7070dbSScott Long 	IFDI_DETACH(ctx);
51586c3c3194SMatt Macy 	CTX_UNLOCK(ctx);
51596c3c3194SMatt Macy 
51606c3c3194SMatt Macy 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
516177c1fcecSEric Joyner 	iflib_free_intr_mem(ctx);
516277c1fcecSEric Joyner 
516377c1fcecSEric Joyner 	bus_generic_detach(dev);
516477c1fcecSEric Joyner 
516577c1fcecSEric Joyner 	iflib_tx_structures_free(ctx);
516677c1fcecSEric Joyner 	iflib_rx_structures_free(ctx);
516756614414SEric Joyner 
516856614414SEric Joyner 	iflib_deregister(ctx);
516956614414SEric Joyner 
517056614414SEric Joyner 	device_set_softc(ctx->ifc_dev, NULL);
517177c1fcecSEric Joyner 	if (ctx->ifc_flags & IFC_SC_ALLOCATED)
517277c1fcecSEric Joyner 		free(ctx->ifc_softc, M_IFLIB);
5173f154ece0SStephen Hurd 	unref_ctx_core_offset(ctx);
517477c1fcecSEric Joyner 	free(ctx, M_IFLIB);
517577c1fcecSEric Joyner 	return (0);
517677c1fcecSEric Joyner }
517777c1fcecSEric Joyner 
517877c1fcecSEric Joyner static void
517977c1fcecSEric Joyner iflib_free_intr_mem(if_ctx_t ctx)
518077c1fcecSEric Joyner {
518177c1fcecSEric Joyner 
51824c7070dbSScott Long 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
51834c7070dbSScott Long 		iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
51844c7070dbSScott Long 	}
5185b97de13aSMarius Strobl 	if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
5186b97de13aSMarius Strobl 		pci_release_msi(ctx->ifc_dev);
5187b97de13aSMarius Strobl 	}
51884c7070dbSScott Long 	if (ctx->ifc_msix_mem != NULL) {
51894c7070dbSScott Long 		bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
5190b97de13aSMarius Strobl 		    rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
51914c7070dbSScott Long 		ctx->ifc_msix_mem = NULL;
51924c7070dbSScott Long 	}
51934c7070dbSScott Long }
51944c7070dbSScott Long 
51954c7070dbSScott Long int
51964c7070dbSScott Long iflib_device_detach(device_t dev)
51974c7070dbSScott Long {
51984c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
51994c7070dbSScott Long 
52004c7070dbSScott Long 	return (iflib_device_deregister(ctx));
52014c7070dbSScott Long }
52024c7070dbSScott Long 
52034c7070dbSScott Long int
52044c7070dbSScott Long iflib_device_suspend(device_t dev)
52054c7070dbSScott Long {
52064c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52074c7070dbSScott Long 
52084c7070dbSScott Long 	CTX_LOCK(ctx);
52094c7070dbSScott Long 	IFDI_SUSPEND(ctx);
52104c7070dbSScott Long 	CTX_UNLOCK(ctx);
52114c7070dbSScott Long 
52124c7070dbSScott Long 	return bus_generic_suspend(dev);
52134c7070dbSScott Long }
52144c7070dbSScott Long int
52154c7070dbSScott Long iflib_device_shutdown(device_t dev)
52164c7070dbSScott Long {
52174c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52184c7070dbSScott Long 
52194c7070dbSScott Long 	CTX_LOCK(ctx);
52204c7070dbSScott Long 	IFDI_SHUTDOWN(ctx);
52214c7070dbSScott Long 	CTX_UNLOCK(ctx);
52224c7070dbSScott Long 
52234c7070dbSScott Long 	return bus_generic_suspend(dev);
52244c7070dbSScott Long }
52254c7070dbSScott Long 
52264c7070dbSScott Long 
52274c7070dbSScott Long int
52284c7070dbSScott Long iflib_device_resume(device_t dev)
52294c7070dbSScott Long {
52304c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52314c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
52324c7070dbSScott Long 
52334c7070dbSScott Long 	CTX_LOCK(ctx);
52344c7070dbSScott Long 	IFDI_RESUME(ctx);
5235cd28ea92SStephen Hurd 	iflib_if_init_locked(ctx);
52364c7070dbSScott Long 	CTX_UNLOCK(ctx);
52374c7070dbSScott Long 	for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
52384c7070dbSScott Long 		iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
52394c7070dbSScott Long 
52404c7070dbSScott Long 	return (bus_generic_resume(dev));
52414c7070dbSScott Long }
52424c7070dbSScott Long 
52434c7070dbSScott Long int
52444c7070dbSScott Long iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
52454c7070dbSScott Long {
52464c7070dbSScott Long 	int error;
52474c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52484c7070dbSScott Long 
52494c7070dbSScott Long 	CTX_LOCK(ctx);
52504c7070dbSScott Long 	error = IFDI_IOV_INIT(ctx, num_vfs, params);
52514c7070dbSScott Long 	CTX_UNLOCK(ctx);
52524c7070dbSScott Long 
52534c7070dbSScott Long 	return (error);
52544c7070dbSScott Long }
52554c7070dbSScott Long 
52564c7070dbSScott Long void
52574c7070dbSScott Long iflib_device_iov_uninit(device_t dev)
52584c7070dbSScott Long {
52594c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52604c7070dbSScott Long 
52614c7070dbSScott Long 	CTX_LOCK(ctx);
52624c7070dbSScott Long 	IFDI_IOV_UNINIT(ctx);
52634c7070dbSScott Long 	CTX_UNLOCK(ctx);
52644c7070dbSScott Long }
52654c7070dbSScott Long 
52664c7070dbSScott Long int
52674c7070dbSScott Long iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
52684c7070dbSScott Long {
52694c7070dbSScott Long 	int error;
52704c7070dbSScott Long 	if_ctx_t ctx = device_get_softc(dev);
52714c7070dbSScott Long 
52724c7070dbSScott Long 	CTX_LOCK(ctx);
52734c7070dbSScott Long 	error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
52744c7070dbSScott Long 	CTX_UNLOCK(ctx);
52754c7070dbSScott Long 
52764c7070dbSScott Long 	return (error);
52774c7070dbSScott Long }
52784c7070dbSScott Long 
52794c7070dbSScott Long /*********************************************************************
52804c7070dbSScott Long  *
52814c7070dbSScott Long  *  MODULE FUNCTION DEFINITIONS
52824c7070dbSScott Long  *
52834c7070dbSScott Long  **********************************************************************/
52844c7070dbSScott Long 
5285ab2e3f79SStephen Hurd /*
5286ab2e3f79SStephen Hurd  * - Start a fast taskqueue thread for each core
5287ab2e3f79SStephen Hurd  * - Start a taskqueue for control operations
5288ab2e3f79SStephen Hurd  */
52894c7070dbSScott Long static int
52904c7070dbSScott Long iflib_module_init(void)
52914c7070dbSScott Long {
52924c7070dbSScott Long 	return (0);
52934c7070dbSScott Long }
52944c7070dbSScott Long 
52954c7070dbSScott Long static int
52964c7070dbSScott Long iflib_module_event_handler(module_t mod, int what, void *arg)
52974c7070dbSScott Long {
52984c7070dbSScott Long 	int err;
52994c7070dbSScott Long 
53004c7070dbSScott Long 	switch (what) {
53014c7070dbSScott Long 	case MOD_LOAD:
53024c7070dbSScott Long 		if ((err = iflib_module_init()) != 0)
53034c7070dbSScott Long 			return (err);
53044c7070dbSScott Long 		break;
53054c7070dbSScott Long 	case MOD_UNLOAD:
53064c7070dbSScott Long 		return (EBUSY);
53074c7070dbSScott Long 	default:
53084c7070dbSScott Long 		return (EOPNOTSUPP);
53094c7070dbSScott Long 	}
53104c7070dbSScott Long 
53114c7070dbSScott Long 	return (0);
53124c7070dbSScott Long }
53134c7070dbSScott Long 
53144c7070dbSScott Long /*********************************************************************
53154c7070dbSScott Long  *
53164c7070dbSScott Long  *  PUBLIC FUNCTION DEFINITIONS
53174c7070dbSScott Long  *     ordered as in iflib.h
53184c7070dbSScott Long  *
53194c7070dbSScott Long  **********************************************************************/
53204c7070dbSScott Long 
53214c7070dbSScott Long 
53224c7070dbSScott Long static void
53234c7070dbSScott Long _iflib_assert(if_shared_ctx_t sctx)
53244c7070dbSScott Long {
5325afb77372SEric Joyner 	int i;
5326afb77372SEric Joyner 
53274c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsize);
53284c7070dbSScott Long 	MPASS(sctx->isc_tx_maxsegsize);
53294c7070dbSScott Long 
53304c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsize);
53314c7070dbSScott Long 	MPASS(sctx->isc_rx_nsegments);
53324c7070dbSScott Long 	MPASS(sctx->isc_rx_maxsegsize);
53334c7070dbSScott Long 
5334afb77372SEric Joyner 	MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8);
5335afb77372SEric Joyner 	for (i = 0; i < sctx->isc_nrxqs; i++) {
5336afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_min[i]);
5337afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_min[i]));
5338afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_max[i]);
5339afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_max[i]));
5340afb77372SEric Joyner 		MPASS(sctx->isc_nrxd_default[i]);
5341afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_nrxd_default[i]));
5342afb77372SEric Joyner 	}
5343afb77372SEric Joyner 
5344afb77372SEric Joyner 	MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8);
5345afb77372SEric Joyner 	for (i = 0; i < sctx->isc_ntxqs; i++) {
5346afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_min[i]);
5347afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_min[i]));
5348afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_max[i]);
5349afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_max[i]));
5350afb77372SEric Joyner 		MPASS(sctx->isc_ntxd_default[i]);
5351afb77372SEric Joyner 		MPASS(powerof2(sctx->isc_ntxd_default[i]));
5352afb77372SEric Joyner 	}
53534c7070dbSScott Long }
53544c7070dbSScott Long 
53551248952aSSean Bruno static void
53561248952aSSean Bruno _iflib_pre_assert(if_softc_ctx_t scctx)
53571248952aSSean Bruno {
53581248952aSSean Bruno 
53591248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_encap);
53601248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_flush);
53611248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_txd_credits_update);
53621248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_available);
53631248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
53641248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_refill);
53651248952aSSean Bruno 	MPASS(scctx->isc_txrx->ift_rxd_flush);
53661248952aSSean Bruno }
53672fe66646SSean Bruno 
53684c7070dbSScott Long static int
53694c7070dbSScott Long iflib_register(if_ctx_t ctx)
53704c7070dbSScott Long {
53714c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
53724c7070dbSScott Long 	driver_t *driver = sctx->isc_driver;
53734c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
53744c7070dbSScott Long 	if_t ifp;
53754c7070dbSScott Long 
53764c7070dbSScott Long 	_iflib_assert(sctx);
53774c7070dbSScott Long 
5378aa8a24d3SStephen Hurd 	CTX_LOCK_INIT(ctx);
53797b610b60SSean Bruno 	STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
538077c1fcecSEric Joyner 	ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
53814c7070dbSScott Long 	if (ifp == NULL) {
53824c7070dbSScott Long 		device_printf(dev, "can not allocate ifnet structure\n");
53834c7070dbSScott Long 		return (ENOMEM);
53844c7070dbSScott Long 	}
53854c7070dbSScott Long 
53864c7070dbSScott Long 	/*
53874c7070dbSScott Long 	 * Initialize our context's device specific methods
53884c7070dbSScott Long 	 */
53894c7070dbSScott Long 	kobj_init((kobj_t) ctx, (kobj_class_t) driver);
53904c7070dbSScott Long 	kobj_class_compile((kobj_class_t) driver);
53914c7070dbSScott Long 
53924c7070dbSScott Long 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
53934c7070dbSScott Long 	if_setsoftc(ifp, ctx);
53944c7070dbSScott Long 	if_setdev(ifp, dev);
53954c7070dbSScott Long 	if_setinitfn(ifp, iflib_if_init);
53964c7070dbSScott Long 	if_setioctlfn(ifp, iflib_if_ioctl);
5397b8ca4756SPatrick Kelsey #ifdef ALTQ
5398b8ca4756SPatrick Kelsey 	if_setstartfn(ifp, iflib_altq_if_start);
5399b8ca4756SPatrick Kelsey 	if_settransmitfn(ifp, iflib_altq_if_transmit);
54008f410865SPatrick Kelsey 	if_setsendqready(ifp);
5401b8ca4756SPatrick Kelsey #else
54024c7070dbSScott Long 	if_settransmitfn(ifp, iflib_if_transmit);
5403b8ca4756SPatrick Kelsey #endif
54044c7070dbSScott Long 	if_setqflushfn(ifp, iflib_if_qflush);
5405e87c4940SGleb Smirnoff 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
5406e87c4940SGleb Smirnoff 	    IFF_KNOWSEPOCH);
54074c7070dbSScott Long 
54084c7070dbSScott Long 	ctx->ifc_vlan_attach_event =
54094c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
54104c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
54114c7070dbSScott Long 	ctx->ifc_vlan_detach_event =
54124c7070dbSScott Long 		EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
54134c7070dbSScott Long 							  EVENTHANDLER_PRI_FIRST);
54144c7070dbSScott Long 
5415e2621d96SMatt Macy 	if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) {
5416e2621d96SMatt Macy 		ctx->ifc_mediap = &ctx->ifc_media;
5417e2621d96SMatt Macy 		ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
54184c7070dbSScott Long 		    iflib_media_change, iflib_media_status);
5419e2621d96SMatt Macy 	}
54204c7070dbSScott Long 	return (0);
54214c7070dbSScott Long }
54224c7070dbSScott Long 
542356614414SEric Joyner static void
54241558015eSEric Joyner iflib_unregister_vlan_handlers(if_ctx_t ctx)
542556614414SEric Joyner {
542656614414SEric Joyner 	/* Unregister VLAN events */
542756614414SEric Joyner 	if (ctx->ifc_vlan_attach_event != NULL) {
542856614414SEric Joyner 		EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
542956614414SEric Joyner 		ctx->ifc_vlan_attach_event = NULL;
543056614414SEric Joyner 	}
543156614414SEric Joyner 	if (ctx->ifc_vlan_detach_event != NULL) {
543256614414SEric Joyner 		EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
543356614414SEric Joyner 		ctx->ifc_vlan_detach_event = NULL;
543456614414SEric Joyner 	}
543556614414SEric Joyner 
54361558015eSEric Joyner }
54371558015eSEric Joyner 
54381558015eSEric Joyner static void
54391558015eSEric Joyner iflib_deregister(if_ctx_t ctx)
54401558015eSEric Joyner {
54411558015eSEric Joyner 	if_t ifp = ctx->ifc_ifp;
54421558015eSEric Joyner 
54431558015eSEric Joyner 	/* Remove all media */
54441558015eSEric Joyner 	ifmedia_removeall(&ctx->ifc_media);
54451558015eSEric Joyner 
54461558015eSEric Joyner 	/* Ensure that VLAN event handlers are unregistered */
54471558015eSEric Joyner 	iflib_unregister_vlan_handlers(ctx);
54481558015eSEric Joyner 
544956614414SEric Joyner 	/* Release kobject reference */
545056614414SEric Joyner 	kobj_delete((kobj_t) ctx, NULL);
545156614414SEric Joyner 
545256614414SEric Joyner 	/* Free the ifnet structure */
545356614414SEric Joyner 	if_free(ifp);
545456614414SEric Joyner 
545556614414SEric Joyner 	STATE_LOCK_DESTROY(ctx);
545656614414SEric Joyner 
545756614414SEric Joyner 	/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
545856614414SEric Joyner 	CTX_LOCK_DESTROY(ctx);
545956614414SEric Joyner }
546056614414SEric Joyner 
54614c7070dbSScott Long static int
54624c7070dbSScott Long iflib_queues_alloc(if_ctx_t ctx)
54634c7070dbSScott Long {
54644c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
546523ac9029SStephen Hurd 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
54664c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
546723ac9029SStephen Hurd 	int nrxqsets = scctx->isc_nrxqsets;
546823ac9029SStephen Hurd 	int ntxqsets = scctx->isc_ntxqsets;
54694c7070dbSScott Long 	iflib_txq_t txq;
54704c7070dbSScott Long 	iflib_rxq_t rxq;
54714c7070dbSScott Long 	iflib_fl_t fl = NULL;
547223ac9029SStephen Hurd 	int i, j, cpu, err, txconf, rxconf;
54734c7070dbSScott Long 	iflib_dma_info_t ifdip;
547423ac9029SStephen Hurd 	uint32_t *rxqsizes = scctx->isc_rxqsizes;
547523ac9029SStephen Hurd 	uint32_t *txqsizes = scctx->isc_txqsizes;
54764c7070dbSScott Long 	uint8_t nrxqs = sctx->isc_nrxqs;
54774c7070dbSScott Long 	uint8_t ntxqs = sctx->isc_ntxqs;
54784c7070dbSScott Long 	int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
54794c7070dbSScott Long 	caddr_t *vaddrs;
54804c7070dbSScott Long 	uint64_t *paddrs;
54814c7070dbSScott Long 
548223ac9029SStephen Hurd 	KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
548323ac9029SStephen Hurd 	KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
54844c7070dbSScott Long 
54854c7070dbSScott Long 	/* Allocate the TX ring struct memory */
5486b89827a0SStephen Hurd 	if (!(ctx->ifc_txqs =
5487ac2fffa4SPedro F. Giffuni 	    (iflib_txq_t) malloc(sizeof(struct iflib_txq) *
5488ac2fffa4SPedro F. Giffuni 	    ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
54894c7070dbSScott Long 		device_printf(dev, "Unable to allocate TX ring memory\n");
54904c7070dbSScott Long 		err = ENOMEM;
54914c7070dbSScott Long 		goto fail;
54924c7070dbSScott Long 	}
54934c7070dbSScott Long 
54944c7070dbSScott Long 	/* Now allocate the RX */
5495b89827a0SStephen Hurd 	if (!(ctx->ifc_rxqs =
5496ac2fffa4SPedro F. Giffuni 	    (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
5497ac2fffa4SPedro F. Giffuni 	    nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
54984c7070dbSScott Long 		device_printf(dev, "Unable to allocate RX ring memory\n");
54994c7070dbSScott Long 		err = ENOMEM;
55004c7070dbSScott Long 		goto rx_fail;
55014c7070dbSScott Long 	}
55024c7070dbSScott Long 
5503b89827a0SStephen Hurd 	txq = ctx->ifc_txqs;
5504b89827a0SStephen Hurd 	rxq = ctx->ifc_rxqs;
55054c7070dbSScott Long 
55064c7070dbSScott Long 	/*
55074c7070dbSScott Long 	 * XXX handle allocation failure
55084c7070dbSScott Long 	 */
550996c85efbSNathan Whitehorn 	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
55104c7070dbSScott Long 		/* Set up some basics */
55114c7070dbSScott Long 
5512bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
5513bfce461eSMarius Strobl 		    M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5514bfce461eSMarius Strobl 			device_printf(dev,
5515bfce461eSMarius Strobl 			    "Unable to allocate TX DMA info memory\n");
55164c7070dbSScott Long 			err = ENOMEM;
55170d0338afSConrad Meyer 			goto err_tx_desc;
55184c7070dbSScott Long 		}
55194c7070dbSScott Long 		txq->ift_ifdi = ifdip;
55204c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, ifdip++) {
5521bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
5522bfce461eSMarius Strobl 				device_printf(dev,
5523bfce461eSMarius Strobl 				    "Unable to allocate TX descriptors\n");
55244c7070dbSScott Long 				err = ENOMEM;
55254c7070dbSScott Long 				goto err_tx_desc;
55264c7070dbSScott Long 			}
552795246abbSSean Bruno 			txq->ift_txd_size[j] = scctx->isc_txd_size[j];
55284c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
55294c7070dbSScott Long 		}
55304c7070dbSScott Long 		txq->ift_ctx = ctx;
55314c7070dbSScott Long 		txq->ift_id = i;
553223ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
553323ac9029SStephen Hurd 			txq->ift_br_offset = 1;
553423ac9029SStephen Hurd 		} else {
553523ac9029SStephen Hurd 			txq->ift_br_offset = 0;
553623ac9029SStephen Hurd 		}
55374c7070dbSScott Long 		/* XXX fix this */
553896c85efbSNathan Whitehorn 		txq->ift_timer.c_cpu = cpu;
55394c7070dbSScott Long 
55404c7070dbSScott Long 		if (iflib_txsd_alloc(txq)) {
55414c7070dbSScott Long 			device_printf(dev, "Critical Failure setting up TX buffers\n");
55424c7070dbSScott Long 			err = ENOMEM;
55434c7070dbSScott Long 			goto err_tx_desc;
55444c7070dbSScott Long 		}
55454c7070dbSScott Long 
55464c7070dbSScott Long 		/* Initialize the TX lock */
55471722eeacSMarius Strobl 		snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout",
55484c7070dbSScott Long 		    device_get_nameunit(dev), txq->ift_id);
55494c7070dbSScott Long 		mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
55504c7070dbSScott Long 		callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
55514c7070dbSScott Long 
555295246abbSSean Bruno 		err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
55534c7070dbSScott Long 				      iflib_txq_can_drain, M_IFLIB, M_WAITOK);
55544c7070dbSScott Long 		if (err) {
55554c7070dbSScott Long 			/* XXX free any allocated rings */
55564c7070dbSScott Long 			device_printf(dev, "Unable to allocate buf_ring\n");
55570d0338afSConrad Meyer 			goto err_tx_desc;
55584c7070dbSScott Long 		}
55594c7070dbSScott Long 	}
55604c7070dbSScott Long 
55614c7070dbSScott Long 	for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
55624c7070dbSScott Long 		/* Set up some basics */
5563fb1a29b4SHans Petter Selasky 		callout_init(&rxq->ifr_watchdog, 1);
55644c7070dbSScott Long 
5565bfce461eSMarius Strobl 		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
5566bfce461eSMarius Strobl 		   M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
5567bfce461eSMarius Strobl 			device_printf(dev,
5568bfce461eSMarius Strobl 			    "Unable to allocate RX DMA info memory\n");
55694c7070dbSScott Long 			err = ENOMEM;
55700d0338afSConrad Meyer 			goto err_tx_desc;
55714c7070dbSScott Long 		}
55724c7070dbSScott Long 
55734c7070dbSScott Long 		rxq->ifr_ifdi = ifdip;
557495246abbSSean Bruno 		/* XXX this needs to be changed if #rx queues != #tx queues */
557595246abbSSean Bruno 		rxq->ifr_ntxqirq = 1;
557695246abbSSean Bruno 		rxq->ifr_txqid[0] = i;
55774c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, ifdip++) {
5578bfce461eSMarius Strobl 			if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
5579bfce461eSMarius Strobl 				device_printf(dev,
5580bfce461eSMarius Strobl 				    "Unable to allocate RX descriptors\n");
55814c7070dbSScott Long 				err = ENOMEM;
55824c7070dbSScott Long 				goto err_tx_desc;
55834c7070dbSScott Long 			}
55844c7070dbSScott Long 			bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
55854c7070dbSScott Long 		}
55864c7070dbSScott Long 		rxq->ifr_ctx = ctx;
55874c7070dbSScott Long 		rxq->ifr_id = i;
558823ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
558923ac9029SStephen Hurd 			rxq->ifr_fl_offset = 1;
55904c7070dbSScott Long 		} else {
559123ac9029SStephen Hurd 			rxq->ifr_fl_offset = 0;
55924c7070dbSScott Long 		}
55934c7070dbSScott Long 		rxq->ifr_nfl = nfree_lists;
55944c7070dbSScott Long 		if (!(fl =
5595ac2fffa4SPedro F. Giffuni 			  (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
55964c7070dbSScott Long 			device_printf(dev, "Unable to allocate free list memory\n");
55974c7070dbSScott Long 			err = ENOMEM;
55980d0338afSConrad Meyer 			goto err_tx_desc;
55994c7070dbSScott Long 		}
56004c7070dbSScott Long 		rxq->ifr_fl = fl;
56014c7070dbSScott Long 		for (j = 0; j < nfree_lists; j++) {
560295246abbSSean Bruno 			fl[j].ifl_rxq = rxq;
560395246abbSSean Bruno 			fl[j].ifl_id = j;
560495246abbSSean Bruno 			fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
560595246abbSSean Bruno 			fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
56064c7070dbSScott Long 		}
56074c7070dbSScott Long 		/* Allocate receive buffers for the ring */
56084c7070dbSScott Long 		if (iflib_rxsd_alloc(rxq)) {
56094c7070dbSScott Long 			device_printf(dev,
56104c7070dbSScott Long 			    "Critical Failure setting up receive buffers\n");
56114c7070dbSScott Long 			err = ENOMEM;
56124c7070dbSScott Long 			goto err_rx_desc;
56134c7070dbSScott Long 		}
561487890dbaSSean Bruno 
561587890dbaSSean Bruno 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
56163db348b5SMarius Strobl 			fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
56173db348b5SMarius Strobl 			    M_WAITOK);
56184c7070dbSScott Long 	}
56194c7070dbSScott Long 
56204c7070dbSScott Long 	/* TXQs */
56214c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
56224c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
56234c7070dbSScott Long 	for (i = 0; i < ntxqsets; i++) {
56244c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
56254c7070dbSScott Long 
56264c7070dbSScott Long 		for (j = 0; j < ntxqs; j++, di++) {
56274c7070dbSScott Long 			vaddrs[i*ntxqs + j] = di->idi_vaddr;
56284c7070dbSScott Long 			paddrs[i*ntxqs + j] = di->idi_paddr;
56294c7070dbSScott Long 		}
56304c7070dbSScott Long 	}
56314c7070dbSScott Long 	if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
5632bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5633bfce461eSMarius Strobl 		    "Unable to allocate device TX queue\n");
56344c7070dbSScott Long 		iflib_tx_structures_free(ctx);
56354c7070dbSScott Long 		free(vaddrs, M_IFLIB);
56364c7070dbSScott Long 		free(paddrs, M_IFLIB);
56374c7070dbSScott Long 		goto err_rx_desc;
56384c7070dbSScott Long 	}
56394c7070dbSScott Long 	free(vaddrs, M_IFLIB);
56404c7070dbSScott Long 	free(paddrs, M_IFLIB);
56414c7070dbSScott Long 
56424c7070dbSScott Long 	/* RXQs */
56434c7070dbSScott Long 	vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
56444c7070dbSScott Long 	paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
56454c7070dbSScott Long 	for (i = 0; i < nrxqsets; i++) {
56464c7070dbSScott Long 		iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
56474c7070dbSScott Long 
56484c7070dbSScott Long 		for (j = 0; j < nrxqs; j++, di++) {
56494c7070dbSScott Long 			vaddrs[i*nrxqs + j] = di->idi_vaddr;
56504c7070dbSScott Long 			paddrs[i*nrxqs + j] = di->idi_paddr;
56514c7070dbSScott Long 		}
56524c7070dbSScott Long 	}
56534c7070dbSScott Long 	if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
5654bfce461eSMarius Strobl 		device_printf(ctx->ifc_dev,
5655bfce461eSMarius Strobl 		    "Unable to allocate device RX queue\n");
56564c7070dbSScott Long 		iflib_tx_structures_free(ctx);
56574c7070dbSScott Long 		free(vaddrs, M_IFLIB);
56584c7070dbSScott Long 		free(paddrs, M_IFLIB);
56594c7070dbSScott Long 		goto err_rx_desc;
56604c7070dbSScott Long 	}
56614c7070dbSScott Long 	free(vaddrs, M_IFLIB);
56624c7070dbSScott Long 	free(paddrs, M_IFLIB);
56634c7070dbSScott Long 
56644c7070dbSScott Long 	return (0);
56654c7070dbSScott Long 
56664c7070dbSScott Long /* XXX handle allocation failure changes */
56674c7070dbSScott Long err_rx_desc:
56684c7070dbSScott Long err_tx_desc:
5669b89827a0SStephen Hurd rx_fail:
56704c7070dbSScott Long 	if (ctx->ifc_rxqs != NULL)
56714c7070dbSScott Long 		free(ctx->ifc_rxqs, M_IFLIB);
56724c7070dbSScott Long 	ctx->ifc_rxqs = NULL;
56734c7070dbSScott Long 	if (ctx->ifc_txqs != NULL)
56744c7070dbSScott Long 		free(ctx->ifc_txqs, M_IFLIB);
56754c7070dbSScott Long 	ctx->ifc_txqs = NULL;
56764c7070dbSScott Long fail:
56774c7070dbSScott Long 	return (err);
56784c7070dbSScott Long }
56794c7070dbSScott Long 
56804c7070dbSScott Long static int
56814c7070dbSScott Long iflib_tx_structures_setup(if_ctx_t ctx)
56824c7070dbSScott Long {
56834c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
56844c7070dbSScott Long 	int i;
56854c7070dbSScott Long 
56864c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++)
56874c7070dbSScott Long 		iflib_txq_setup(txq);
56884c7070dbSScott Long 
56894c7070dbSScott Long 	return (0);
56904c7070dbSScott Long }
56914c7070dbSScott Long 
56924c7070dbSScott Long static void
56934c7070dbSScott Long iflib_tx_structures_free(if_ctx_t ctx)
56944c7070dbSScott Long {
56954c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
56964d261ce2SStephen Hurd 	if_shared_ctx_t sctx = ctx->ifc_sctx;
56974c7070dbSScott Long 	int i, j;
56984c7070dbSScott Long 
56994c7070dbSScott Long 	for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
57004d261ce2SStephen Hurd 		for (j = 0; j < sctx->isc_ntxqs; j++)
57014c7070dbSScott Long 			iflib_dma_free(&txq->ift_ifdi[j]);
5702244e7cffSEric Joyner 		iflib_txq_destroy(txq);
57034c7070dbSScott Long 	}
57044c7070dbSScott Long 	free(ctx->ifc_txqs, M_IFLIB);
57054c7070dbSScott Long 	ctx->ifc_txqs = NULL;
57064c7070dbSScott Long 	IFDI_QUEUES_FREE(ctx);
57074c7070dbSScott Long }
57084c7070dbSScott Long 
57094c7070dbSScott Long /*********************************************************************
57104c7070dbSScott Long  *
57114c7070dbSScott Long  *  Initialize all receive rings.
57124c7070dbSScott Long  *
57134c7070dbSScott Long  **********************************************************************/
57144c7070dbSScott Long static int
57154c7070dbSScott Long iflib_rx_structures_setup(if_ctx_t ctx)
57164c7070dbSScott Long {
57174c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5718aaeb188aSBjoern A. Zeeb 	int q;
5719aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57203d10e9edSMarius Strobl 	int err, i;
5721aaeb188aSBjoern A. Zeeb #endif
57224c7070dbSScott Long 
57234c7070dbSScott Long 	for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
5724aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57253d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) {
57263d10e9edSMarius Strobl 			err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
572723ac9029SStephen Hurd 			    TCP_LRO_ENTRIES, min(1024,
57283d10e9edSMarius Strobl 			    ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]));
57293d10e9edSMarius Strobl 			if (err != 0) {
57303d10e9edSMarius Strobl 				device_printf(ctx->ifc_dev,
57313d10e9edSMarius Strobl 				    "LRO Initialization failed!\n");
57324c7070dbSScott Long 				goto fail;
57334c7070dbSScott Long 			}
57343d10e9edSMarius Strobl 		}
5735aaeb188aSBjoern A. Zeeb #endif
57364c7070dbSScott Long 		IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
57374c7070dbSScott Long 	}
57384c7070dbSScott Long 	return (0);
5739aaeb188aSBjoern A. Zeeb #if defined(INET6) || defined(INET)
57404c7070dbSScott Long fail:
57414c7070dbSScott Long 	/*
57423d10e9edSMarius Strobl 	 * Free LRO resources allocated so far, we will only handle
57434c7070dbSScott Long 	 * the rings that completed, the failing case will have
57444c7070dbSScott Long 	 * cleaned up for itself.  'q' failed, so its the terminus.
57454c7070dbSScott Long 	 */
57464c7070dbSScott Long 	rxq = ctx->ifc_rxqs;
57474c7070dbSScott Long 	for (i = 0; i < q; ++i, rxq++) {
57483d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO)
57493d10e9edSMarius Strobl 			tcp_lro_free(&rxq->ifr_lc);
57504c7070dbSScott Long 	}
57514c7070dbSScott Long 	return (err);
5752aaeb188aSBjoern A. Zeeb #endif
57534c7070dbSScott Long }
57544c7070dbSScott Long 
57554c7070dbSScott Long /*********************************************************************
57564c7070dbSScott Long  *
57574c7070dbSScott Long  *  Free all receive rings.
57584c7070dbSScott Long  *
57594c7070dbSScott Long  **********************************************************************/
57604c7070dbSScott Long static void
57614c7070dbSScott Long iflib_rx_structures_free(if_ctx_t ctx)
57624c7070dbSScott Long {
57634c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
5764db8e8f1eSEric Joyner 	if_shared_ctx_t sctx = ctx->ifc_sctx;
5765db8e8f1eSEric Joyner 	int i, j;
57664c7070dbSScott Long 
57673d10e9edSMarius Strobl 	for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
5768db8e8f1eSEric Joyner 		for (j = 0; j < sctx->isc_nrxqs; j++)
5769db8e8f1eSEric Joyner 			iflib_dma_free(&rxq->ifr_ifdi[j]);
57704c7070dbSScott Long 		iflib_rx_sds_free(rxq);
5771007b804fSMarius Strobl #if defined(INET6) || defined(INET)
57723d10e9edSMarius Strobl 		if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO)
57733d10e9edSMarius Strobl 			tcp_lro_free(&rxq->ifr_lc);
5774007b804fSMarius Strobl #endif
57754c7070dbSScott Long 	}
577677c1fcecSEric Joyner 	free(ctx->ifc_rxqs, M_IFLIB);
577777c1fcecSEric Joyner 	ctx->ifc_rxqs = NULL;
57784c7070dbSScott Long }
57794c7070dbSScott Long 
57804c7070dbSScott Long static int
57814c7070dbSScott Long iflib_qset_structures_setup(if_ctx_t ctx)
57824c7070dbSScott Long {
57834c7070dbSScott Long 	int err;
57844c7070dbSScott Long 
57856108c013SStephen Hurd 	/*
57866108c013SStephen Hurd 	 * It is expected that the caller takes care of freeing queues if this
57876108c013SStephen Hurd 	 * fails.
57886108c013SStephen Hurd 	 */
5789ac88e6daSStephen Hurd 	if ((err = iflib_tx_structures_setup(ctx)) != 0) {
5790ac88e6daSStephen Hurd 		device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
57914c7070dbSScott Long 		return (err);
5792ac88e6daSStephen Hurd 	}
57934c7070dbSScott Long 
57946108c013SStephen Hurd 	if ((err = iflib_rx_structures_setup(ctx)) != 0)
57954c7070dbSScott Long 		device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
57966108c013SStephen Hurd 
57974c7070dbSScott Long 	return (err);
57984c7070dbSScott Long }
57994c7070dbSScott Long 
58004c7070dbSScott Long int
58014c7070dbSScott Long iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
58023e0e6330SStephen Hurd 		driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
58034c7070dbSScott Long {
58044c7070dbSScott Long 
58054c7070dbSScott Long 	return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
58064c7070dbSScott Long }
58074c7070dbSScott Long 
5808b103855eSStephen Hurd #ifdef SMP
5809aa3c5dd8SSean Bruno static int
5810b103855eSStephen Hurd find_nth(if_ctx_t ctx, int qid)
58114c7070dbSScott Long {
5812b103855eSStephen Hurd 	cpuset_t cpus;
5813aa3c5dd8SSean Bruno 	int i, cpuid, eqid, count;
58144c7070dbSScott Long 
5815b103855eSStephen Hurd 	CPU_COPY(&ctx->ifc_cpus, &cpus);
5816b103855eSStephen Hurd 	count = CPU_COUNT(&cpus);
5817aa3c5dd8SSean Bruno 	eqid = qid % count;
58184c7070dbSScott Long 	/* clear up to the qid'th bit */
5819aa3c5dd8SSean Bruno 	for (i = 0; i < eqid; i++) {
5820b103855eSStephen Hurd 		cpuid = CPU_FFS(&cpus);
5821aa3c5dd8SSean Bruno 		MPASS(cpuid != 0);
5822b103855eSStephen Hurd 		CPU_CLR(cpuid-1, &cpus);
58234c7070dbSScott Long 	}
5824b103855eSStephen Hurd 	cpuid = CPU_FFS(&cpus);
5825aa3c5dd8SSean Bruno 	MPASS(cpuid != 0);
5826aa3c5dd8SSean Bruno 	return (cpuid-1);
58274c7070dbSScott Long }
58284c7070dbSScott Long 
5829b103855eSStephen Hurd #ifdef SCHED_ULE
5830b103855eSStephen Hurd extern struct cpu_group *cpu_top;              /* CPU topology */
5831b103855eSStephen Hurd 
5832b103855eSStephen Hurd static int
5833b103855eSStephen Hurd find_child_with_core(int cpu, struct cpu_group *grp)
5834b103855eSStephen Hurd {
5835b103855eSStephen Hurd 	int i;
5836b103855eSStephen Hurd 
5837b103855eSStephen Hurd 	if (grp->cg_children == 0)
5838b103855eSStephen Hurd 		return -1;
5839b103855eSStephen Hurd 
5840b103855eSStephen Hurd 	MPASS(grp->cg_child);
5841b103855eSStephen Hurd 	for (i = 0; i < grp->cg_children; i++) {
5842b103855eSStephen Hurd 		if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
5843b103855eSStephen Hurd 			return i;
5844b103855eSStephen Hurd 	}
5845b103855eSStephen Hurd 
5846b103855eSStephen Hurd 	return -1;
5847b103855eSStephen Hurd }
5848b103855eSStephen Hurd 
5849b103855eSStephen Hurd /*
58500b75ac77SStephen Hurd  * Find the nth "close" core to the specified core
58510b75ac77SStephen Hurd  * "close" is defined as the deepest level that shares
58520b75ac77SStephen Hurd  * at least an L2 cache.  With threads, this will be
5853f154ece0SStephen Hurd  * threads on the same core.  If the shared cache is L3
58540b75ac77SStephen Hurd  * or higher, simply returns the same core.
5855b103855eSStephen Hurd  */
5856b103855eSStephen Hurd static int
58570b75ac77SStephen Hurd find_close_core(int cpu, int core_offset)
5858b103855eSStephen Hurd {
5859b103855eSStephen Hurd 	struct cpu_group *grp;
5860b103855eSStephen Hurd 	int i;
58610b75ac77SStephen Hurd 	int fcpu;
5862b103855eSStephen Hurd 	cpuset_t cs;
5863b103855eSStephen Hurd 
5864b103855eSStephen Hurd 	grp = cpu_top;
5865b103855eSStephen Hurd 	if (grp == NULL)
5866b103855eSStephen Hurd 		return cpu;
5867b103855eSStephen Hurd 	i = 0;
5868b103855eSStephen Hurd 	while ((i = find_child_with_core(cpu, grp)) != -1) {
5869b103855eSStephen Hurd 		/* If the child only has one cpu, don't descend */
5870b103855eSStephen Hurd 		if (grp->cg_child[i].cg_count <= 1)
5871b103855eSStephen Hurd 			break;
5872b103855eSStephen Hurd 		grp = &grp->cg_child[i];
5873b103855eSStephen Hurd 	}
5874b103855eSStephen Hurd 
5875b103855eSStephen Hurd 	/* If they don't share at least an L2 cache, use the same CPU */
5876b103855eSStephen Hurd 	if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
5877b103855eSStephen Hurd 		return cpu;
5878b103855eSStephen Hurd 
5879b103855eSStephen Hurd 	/* Now pick one */
5880b103855eSStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
58810b75ac77SStephen Hurd 
58820b75ac77SStephen Hurd 	/* Add the selected CPU offset to core offset. */
58830b75ac77SStephen Hurd 	for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
58840b75ac77SStephen Hurd 		if (fcpu - 1 == cpu)
58850b75ac77SStephen Hurd 			break;
58860b75ac77SStephen Hurd 		CPU_CLR(fcpu - 1, &cs);
58870b75ac77SStephen Hurd 	}
58880b75ac77SStephen Hurd 	MPASS(fcpu);
58890b75ac77SStephen Hurd 
58900b75ac77SStephen Hurd 	core_offset += i;
58910b75ac77SStephen Hurd 
58920b75ac77SStephen Hurd 	CPU_COPY(&grp->cg_mask, &cs);
58930b75ac77SStephen Hurd 	for (i = core_offset % grp->cg_count; i > 0; i--) {
5894b103855eSStephen Hurd 		MPASS(CPU_FFS(&cs));
5895b103855eSStephen Hurd 		CPU_CLR(CPU_FFS(&cs) - 1, &cs);
5896b103855eSStephen Hurd 	}
5897b103855eSStephen Hurd 	MPASS(CPU_FFS(&cs));
5898b103855eSStephen Hurd 	return CPU_FFS(&cs) - 1;
5899b103855eSStephen Hurd }
5900b103855eSStephen Hurd #else
5901b103855eSStephen Hurd static int
59020b75ac77SStephen Hurd find_close_core(int cpu, int core_offset __unused)
5903b103855eSStephen Hurd {
590497755e83SKonstantin Belousov 	return cpu;
5905b103855eSStephen Hurd }
5906b103855eSStephen Hurd #endif
5907b103855eSStephen Hurd 
5908b103855eSStephen Hurd static int
59090b75ac77SStephen Hurd get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
5910b103855eSStephen Hurd {
5911b103855eSStephen Hurd 	switch (type) {
5912b103855eSStephen Hurd 	case IFLIB_INTR_TX:
59130b75ac77SStephen Hurd 		/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
59140b75ac77SStephen Hurd 		/* XXX handle multiple RX threads per core and more than two core per L2 group */
5915b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
5916b103855eSStephen Hurd 	case IFLIB_INTR_RX:
5917b103855eSStephen Hurd 	case IFLIB_INTR_RXTX:
59180b75ac77SStephen Hurd 		/* RX queues get the specified core */
5919b103855eSStephen Hurd 		return qid / CPU_COUNT(&ctx->ifc_cpus);
5920b103855eSStephen Hurd 	default:
5921b103855eSStephen Hurd 		return -1;
5922b103855eSStephen Hurd 	}
5923b103855eSStephen Hurd }
5924b103855eSStephen Hurd #else
59250b75ac77SStephen Hurd #define get_core_offset(ctx, type, qid)	CPU_FIRST()
59260b75ac77SStephen Hurd #define find_close_core(cpuid, tid)	CPU_FIRST()
5927b103855eSStephen Hurd #define find_nth(ctx, gid)		CPU_FIRST()
5928b103855eSStephen Hurd #endif
5929b103855eSStephen Hurd 
5930b103855eSStephen Hurd /* Just to avoid copy/paste */
5931b103855eSStephen Hurd static inline int
5932f855ec81SMarius Strobl iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
5933f855ec81SMarius Strobl     int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
5934f855ec81SMarius Strobl     const char *name)
5935b103855eSStephen Hurd {
5936f855ec81SMarius Strobl 	device_t dev;
5937f154ece0SStephen Hurd 	int co, cpuid, err, tid;
5938b103855eSStephen Hurd 
5939f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
5940f154ece0SStephen Hurd 	co = ctx->ifc_sysctl_core_offset;
5941f154ece0SStephen Hurd 	if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX)
5942f154ece0SStephen Hurd 		co += ctx->ifc_softc_ctx.isc_nrxqsets;
5943f154ece0SStephen Hurd 	cpuid = find_nth(ctx, qid + co);
59440b75ac77SStephen Hurd 	tid = get_core_offset(ctx, type, qid);
59453d10e9edSMarius Strobl 	if (tid < 0) {
59463d10e9edSMarius Strobl 		device_printf(dev, "get_core_offset failed\n");
59473d10e9edSMarius Strobl 		return (EOPNOTSUPP);
59483d10e9edSMarius Strobl 	}
59490b75ac77SStephen Hurd 	cpuid = find_close_core(cpuid, tid);
5950f855ec81SMarius Strobl 	err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
5951f855ec81SMarius Strobl 	    name);
5952b103855eSStephen Hurd 	if (err) {
5953f855ec81SMarius Strobl 		device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
5954b103855eSStephen Hurd 		return (err);
5955b103855eSStephen Hurd 	}
5956b103855eSStephen Hurd #ifdef notyet
5957b103855eSStephen Hurd 	if (cpuid > ctx->ifc_cpuid_highest)
5958b103855eSStephen Hurd 		ctx->ifc_cpuid_highest = cpuid;
5959b103855eSStephen Hurd #endif
59603d10e9edSMarius Strobl 	return (0);
5961b103855eSStephen Hurd }
5962b103855eSStephen Hurd 
59634c7070dbSScott Long int
59644c7070dbSScott Long iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
59654c7070dbSScott Long 			iflib_intr_type_t type, driver_filter_t *filter,
59663e0e6330SStephen Hurd 			void *filter_arg, int qid, const char *name)
59674c7070dbSScott Long {
5968f855ec81SMarius Strobl 	device_t dev;
59694c7070dbSScott Long 	struct grouptask *gtask;
59704c7070dbSScott Long 	struct taskqgroup *tqg;
59714c7070dbSScott Long 	iflib_filter_info_t info;
597223ac9029SStephen Hurd 	gtask_fn_t *fn;
5973b103855eSStephen Hurd 	int tqrid, err;
597495246abbSSean Bruno 	driver_filter_t *intr_fast;
59754c7070dbSScott Long 	void *q;
59764c7070dbSScott Long 
59774c7070dbSScott Long 	info = &ctx->ifc_filter_info;
5978add6f7d0SSean Bruno 	tqrid = rid;
59794c7070dbSScott Long 
59804c7070dbSScott Long 	switch (type) {
59814c7070dbSScott Long 	/* XXX merge tx/rx for netmap? */
59824c7070dbSScott Long 	case IFLIB_INTR_TX:
59834c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
59844c7070dbSScott Long 		info = &ctx->ifc_txqs[qid].ift_filter_info;
59854c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
5986ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
59874c7070dbSScott Long 		fn = _task_fn_tx;
598895246abbSSean Bruno 		intr_fast = iflib_fast_intr;
5989da69b8f9SSean Bruno 		GROUPTASK_INIT(gtask, 0, fn, q);
59905ee36c68SStephen Hurd 		ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
59914c7070dbSScott Long 		break;
59924c7070dbSScott Long 	case IFLIB_INTR_RX:
59934c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
59944c7070dbSScott Long 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
59954c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
5996ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
59974c7070dbSScott Long 		fn = _task_fn_rx;
5998ab2e3f79SStephen Hurd 		intr_fast = iflib_fast_intr;
59996c3e93cbSGleb Smirnoff 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
600095246abbSSean Bruno 		break;
600195246abbSSean Bruno 	case IFLIB_INTR_RXTX:
600295246abbSSean Bruno 		q = &ctx->ifc_rxqs[qid];
600395246abbSSean Bruno 		info = &ctx->ifc_rxqs[qid].ifr_filter_info;
600495246abbSSean Bruno 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6005ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
600695246abbSSean Bruno 		fn = _task_fn_rx;
600795246abbSSean Bruno 		intr_fast = iflib_fast_intr_rxtx;
60086c3e93cbSGleb Smirnoff 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
60094c7070dbSScott Long 		break;
60104c7070dbSScott Long 	case IFLIB_INTR_ADMIN:
60114c7070dbSScott Long 		q = ctx;
6012da69b8f9SSean Bruno 		tqrid = -1;
60134c7070dbSScott Long 		info = &ctx->ifc_filter_info;
60144c7070dbSScott Long 		gtask = &ctx->ifc_admin_task;
6015ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
60164c7070dbSScott Long 		fn = _task_fn_admin;
601795246abbSSean Bruno 		intr_fast = iflib_fast_intr_ctx;
60184c7070dbSScott Long 		break;
60194c7070dbSScott Long 	default:
60203d10e9edSMarius Strobl 		device_printf(ctx->ifc_dev, "%s: unknown net intr type\n",
60213d10e9edSMarius Strobl 		    __func__);
60223d10e9edSMarius Strobl 		return (EINVAL);
60234c7070dbSScott Long 	}
60244c7070dbSScott Long 
60254c7070dbSScott Long 	info->ifi_filter = filter;
60264c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
60274c7070dbSScott Long 	info->ifi_task = gtask;
602895246abbSSean Bruno 	info->ifi_ctx = q;
60294c7070dbSScott Long 
6030f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
603195246abbSSean Bruno 	err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info,  name);
6032da69b8f9SSean Bruno 	if (err != 0) {
6033f855ec81SMarius Strobl 		device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
60344c7070dbSScott Long 		return (err);
6035da69b8f9SSean Bruno 	}
6036da69b8f9SSean Bruno 	if (type == IFLIB_INTR_ADMIN)
6037da69b8f9SSean Bruno 		return (0);
6038da69b8f9SSean Bruno 
60394c7070dbSScott Long 	if (tqrid != -1) {
6040f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
6041f855ec81SMarius Strobl 		    q, name);
6042b103855eSStephen Hurd 		if (err)
6043b103855eSStephen Hurd 			return (err);
6044aa3c5dd8SSean Bruno 	} else {
6045f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
6046aa3c5dd8SSean Bruno 	}
60474c7070dbSScott Long 
60484c7070dbSScott Long 	return (0);
60494c7070dbSScott Long }
60504c7070dbSScott Long 
60514c7070dbSScott Long void
60523e0e6330SStephen Hurd iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
60534c7070dbSScott Long {
60544c7070dbSScott Long 	struct grouptask *gtask;
60554c7070dbSScott Long 	struct taskqgroup *tqg;
605623ac9029SStephen Hurd 	gtask_fn_t *fn;
60574c7070dbSScott Long 	void *q;
6058b103855eSStephen Hurd 	int err;
60594c7070dbSScott Long 
60604c7070dbSScott Long 	switch (type) {
60614c7070dbSScott Long 	case IFLIB_INTR_TX:
60624c7070dbSScott Long 		q = &ctx->ifc_txqs[qid];
60634c7070dbSScott Long 		gtask = &ctx->ifc_txqs[qid].ift_task;
6064ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60654c7070dbSScott Long 		fn = _task_fn_tx;
6066f98977b5SHans Petter Selasky 		GROUPTASK_INIT(gtask, 0, fn, q);
60674c7070dbSScott Long 		break;
60684c7070dbSScott Long 	case IFLIB_INTR_RX:
60694c7070dbSScott Long 		q = &ctx->ifc_rxqs[qid];
60704c7070dbSScott Long 		gtask = &ctx->ifc_rxqs[qid].ifr_task;
6071ab2e3f79SStephen Hurd 		tqg = qgroup_if_io_tqg;
60724c7070dbSScott Long 		fn = _task_fn_rx;
6073f98977b5SHans Petter Selasky 		NET_GROUPTASK_INIT(gtask, 0, fn, q);
60744c7070dbSScott Long 		break;
60754c7070dbSScott Long 	case IFLIB_INTR_IOV:
60764c7070dbSScott Long 		q = ctx;
60774c7070dbSScott Long 		gtask = &ctx->ifc_vflr_task;
6078ab2e3f79SStephen Hurd 		tqg = qgroup_if_config_tqg;
60794c7070dbSScott Long 		fn = _task_fn_iov;
6080f98977b5SHans Petter Selasky 		GROUPTASK_INIT(gtask, 0, fn, q);
60814c7070dbSScott Long 		break;
60824c7070dbSScott Long 	default:
60834c7070dbSScott Long 		panic("unknown net intr type");
60844c7070dbSScott Long 	}
6085f855ec81SMarius Strobl 	if (irq != NULL) {
6086f855ec81SMarius Strobl 		err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
6087f855ec81SMarius Strobl 		    q, name);
6088b103855eSStephen Hurd 		if (err)
6089f855ec81SMarius Strobl 			taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
6090f855ec81SMarius Strobl 			    irq->ii_res, name);
6091f855ec81SMarius Strobl 	} else {
6092f855ec81SMarius Strobl 		taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
6093b103855eSStephen Hurd 	}
6094b103855eSStephen Hurd }
60954c7070dbSScott Long 
60964c7070dbSScott Long void
60974c7070dbSScott Long iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
60984c7070dbSScott Long {
6099b97de13aSMarius Strobl 
61004c7070dbSScott Long 	if (irq->ii_tag)
61014c7070dbSScott Long 		bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
61024c7070dbSScott Long 
61034c7070dbSScott Long 	if (irq->ii_res)
6104b97de13aSMarius Strobl 		bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
6105b97de13aSMarius Strobl 		    rman_get_rid(irq->ii_res), irq->ii_res);
61064c7070dbSScott Long }
61074c7070dbSScott Long 
61084c7070dbSScott Long static int
61093e0e6330SStephen Hurd iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
61104c7070dbSScott Long {
61114c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
61124c7070dbSScott Long 	iflib_rxq_t rxq = ctx->ifc_rxqs;
61134c7070dbSScott Long 	if_irq_t irq = &ctx->ifc_legacy_irq;
61144c7070dbSScott Long 	iflib_filter_info_t info;
6115f855ec81SMarius Strobl 	device_t dev;
61164c7070dbSScott Long 	struct grouptask *gtask;
6117f855ec81SMarius Strobl 	struct resource *res;
61184c7070dbSScott Long 	struct taskqgroup *tqg;
61194c7070dbSScott Long 	void *q;
6120d49e83eaSMarius Strobl 	int err, tqrid;
612141669133SMark Johnston 	bool rx_only;
61224c7070dbSScott Long 
61234c7070dbSScott Long 	q = &ctx->ifc_rxqs[0];
61244c7070dbSScott Long 	info = &rxq[0].ifr_filter_info;
61254c7070dbSScott Long 	gtask = &rxq[0].ifr_task;
6126ab2e3f79SStephen Hurd 	tqg = qgroup_if_io_tqg;
6127d49e83eaSMarius Strobl 	tqrid = *rid;
612841669133SMark Johnston 	rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0;
61294c7070dbSScott Long 
61304c7070dbSScott Long 	ctx->ifc_flags |= IFC_LEGACY;
61314c7070dbSScott Long 	info->ifi_filter = filter;
61324c7070dbSScott Long 	info->ifi_filter_arg = filter_arg;
61334c7070dbSScott Long 	info->ifi_task = gtask;
613441669133SMark Johnston 	info->ifi_ctx = rx_only ? ctx : q;
61354c7070dbSScott Long 
6136f855ec81SMarius Strobl 	dev = ctx->ifc_dev;
61374c7070dbSScott Long 	/* We allocate a single interrupt resource */
613841669133SMark Johnston 	err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx :
613941669133SMark Johnston 	    iflib_fast_intr_rxtx, NULL, info, name);
614041669133SMark Johnston 	if (err != 0)
61414c7070dbSScott Long 		return (err);
6142f98977b5SHans Petter Selasky 	NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q);
6143f855ec81SMarius Strobl 	res = irq->ii_res;
6144f855ec81SMarius Strobl 	taskqgroup_attach(tqg, gtask, q, dev, res, name);
61454c7070dbSScott Long 
61464c7070dbSScott Long 	GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
6147f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
6148f855ec81SMarius Strobl 	    "tx");
61494c7070dbSScott Long 	return (0);
61504c7070dbSScott Long }
61514c7070dbSScott Long 
61524c7070dbSScott Long void
61534c7070dbSScott Long iflib_led_create(if_ctx_t ctx)
61544c7070dbSScott Long {
61554c7070dbSScott Long 
61564c7070dbSScott Long 	ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
61574c7070dbSScott Long 	    device_get_nameunit(ctx->ifc_dev));
61584c7070dbSScott Long }
61594c7070dbSScott Long 
61604c7070dbSScott Long void
61614c7070dbSScott Long iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
61624c7070dbSScott Long {
61634c7070dbSScott Long 
61644c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
61654c7070dbSScott Long }
61664c7070dbSScott Long 
61674c7070dbSScott Long void
61684c7070dbSScott Long iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
61694c7070dbSScott Long {
61704c7070dbSScott Long 
61714c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
61724c7070dbSScott Long }
61734c7070dbSScott Long 
61744c7070dbSScott Long void
61754c7070dbSScott Long iflib_admin_intr_deferred(if_ctx_t ctx)
61764c7070dbSScott Long {
61771248952aSSean Bruno #ifdef INVARIANTS
61781248952aSSean Bruno 	struct grouptask *gtask;
617946fa0c25SEric Joyner 
61801248952aSSean Bruno 	gtask = &ctx->ifc_admin_task;
6181d0d0ad0aSStephen Hurd 	MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
61821248952aSSean Bruno #endif
61834c7070dbSScott Long 
61844c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
61854c7070dbSScott Long }
61864c7070dbSScott Long 
61874c7070dbSScott Long void
61884c7070dbSScott Long iflib_iov_intr_deferred(if_ctx_t ctx)
61894c7070dbSScott Long {
61904c7070dbSScott Long 
61914c7070dbSScott Long 	GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
61924c7070dbSScott Long }
61934c7070dbSScott Long 
61944c7070dbSScott Long void
6195d49e83eaSMarius Strobl iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name)
61964c7070dbSScott Long {
61974c7070dbSScott Long 
6198f855ec81SMarius Strobl 	taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
6199f855ec81SMarius Strobl 	    name);
62004c7070dbSScott Long }
62014c7070dbSScott Long 
62024c7070dbSScott Long void
6203aa8a24d3SStephen Hurd iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
6204aa8a24d3SStephen Hurd 	const char *name)
62054c7070dbSScott Long {
62064c7070dbSScott Long 
62074c7070dbSScott Long 	GROUPTASK_INIT(gtask, 0, fn, ctx);
6208f855ec81SMarius Strobl 	taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
6209f855ec81SMarius Strobl 	    name);
62104c7070dbSScott Long }
62114c7070dbSScott Long 
62124c7070dbSScott Long void
621323ac9029SStephen Hurd iflib_config_gtask_deinit(struct grouptask *gtask)
621423ac9029SStephen Hurd {
621523ac9029SStephen Hurd 
6216ab2e3f79SStephen Hurd 	taskqgroup_detach(qgroup_if_config_tqg, gtask);
621723ac9029SStephen Hurd }
621823ac9029SStephen Hurd 
621923ac9029SStephen Hurd void
622023ac9029SStephen Hurd iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
62214c7070dbSScott Long {
62224c7070dbSScott Long 	if_t ifp = ctx->ifc_ifp;
62234c7070dbSScott Long 	iflib_txq_t txq = ctx->ifc_txqs;
62244c7070dbSScott Long 
62254c7070dbSScott Long 	if_setbaudrate(ifp, baudrate);
62267b610b60SSean Bruno 	if (baudrate >= IF_Gbps(10)) {
62277b610b60SSean Bruno 		STATE_LOCK(ctx);
622895246abbSSean Bruno 		ctx->ifc_flags |= IFC_PREFETCH;
62297b610b60SSean Bruno 		STATE_UNLOCK(ctx);
62307b610b60SSean Bruno 	}
62314c7070dbSScott Long 	/* If link down, disable watchdog */
62324c7070dbSScott Long 	if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
62334c7070dbSScott Long 		for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
62344c7070dbSScott Long 			txq->ift_qstatus = IFLIB_QUEUE_IDLE;
62354c7070dbSScott Long 	}
62364c7070dbSScott Long 	ctx->ifc_link_state = link_state;
62374c7070dbSScott Long 	if_link_state_change(ifp, link_state);
62384c7070dbSScott Long }
62394c7070dbSScott Long 
62404c7070dbSScott Long static int
62414c7070dbSScott Long iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
62424c7070dbSScott Long {
62434c7070dbSScott Long 	int credits;
62441248952aSSean Bruno #ifdef INVARIANTS
62451248952aSSean Bruno 	int credits_pre = txq->ift_cidx_processed;
62461248952aSSean Bruno #endif
62474c7070dbSScott Long 
62488a04b53dSKonstantin Belousov 	bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
62498a04b53dSKonstantin Belousov 	    BUS_DMASYNC_POSTREAD);
625095246abbSSean Bruno 	if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
62514c7070dbSScott Long 		return (0);
62524c7070dbSScott Long 
62534c7070dbSScott Long 	txq->ift_processed += credits;
62544c7070dbSScott Long 	txq->ift_cidx_processed += credits;
62554c7070dbSScott Long 
62561248952aSSean Bruno 	MPASS(credits_pre + credits == txq->ift_cidx_processed);
62574c7070dbSScott Long 	if (txq->ift_cidx_processed >= txq->ift_size)
62584c7070dbSScott Long 		txq->ift_cidx_processed -= txq->ift_size;
62594c7070dbSScott Long 	return (credits);
62604c7070dbSScott Long }
62614c7070dbSScott Long 
62624c7070dbSScott Long static int
626395246abbSSean Bruno iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
62644c7070dbSScott Long {
626595dcf343SMarius Strobl 	iflib_fl_t fl;
626695dcf343SMarius Strobl 	u_int i;
62674c7070dbSScott Long 
626895dcf343SMarius Strobl 	for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
626995dcf343SMarius Strobl 		bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
627095dcf343SMarius Strobl 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
627123ac9029SStephen Hurd 	return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
627223ac9029SStephen Hurd 	    budget));
62734c7070dbSScott Long }
62744c7070dbSScott Long 
62754c7070dbSScott Long void
62764c7070dbSScott Long iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
62774c7070dbSScott Long 	const char *description, if_int_delay_info_t info,
62784c7070dbSScott Long 	int offset, int value)
62794c7070dbSScott Long {
62804c7070dbSScott Long 	info->iidi_ctx = ctx;
62814c7070dbSScott Long 	info->iidi_offset = offset;
62824c7070dbSScott Long 	info->iidi_value = value;
62834c7070dbSScott Long 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
62844c7070dbSScott Long 	    SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
62857029da5cSPawel Biernacki 	    OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
62864c7070dbSScott Long 	    info, 0, iflib_sysctl_int_delay, "I", description);
62874c7070dbSScott Long }
62884c7070dbSScott Long 
6289aa8a24d3SStephen Hurd struct sx *
62904c7070dbSScott Long iflib_ctx_lock_get(if_ctx_t ctx)
62914c7070dbSScott Long {
62924c7070dbSScott Long 
6293aa8a24d3SStephen Hurd 	return (&ctx->ifc_ctx_sx);
62944c7070dbSScott Long }
62954c7070dbSScott Long 
62964c7070dbSScott Long static int
62974c7070dbSScott Long iflib_msix_init(if_ctx_t ctx)
62984c7070dbSScott Long {
62994c7070dbSScott Long 	device_t dev = ctx->ifc_dev;
63004c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
63014c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
63023d10e9edSMarius Strobl 	int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues;
63033d10e9edSMarius Strobl 	int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors;
63044c7070dbSScott Long 
6305d2735264SStephen Hurd 	iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
6306d2735264SStephen Hurd 	iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
630723ac9029SStephen Hurd 
6308b97de13aSMarius Strobl 	if (bootverbose)
6309b97de13aSMarius Strobl 		device_printf(dev, "msix_init qsets capped at %d\n",
6310b97de13aSMarius Strobl 		    imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
63111248952aSSean Bruno 
63124c7070dbSScott Long 	/* Override by tuneable */
6313ea351d3fSSean Bruno 	if (scctx->isc_disable_msix)
63144c7070dbSScott Long 		goto msi;
63154c7070dbSScott Long 
6316b97de13aSMarius Strobl 	/* First try MSI-X */
6317b97de13aSMarius Strobl 	if ((msgs = pci_msix_count(dev)) == 0) {
6318b97de13aSMarius Strobl 		if (bootverbose)
6319b97de13aSMarius Strobl 			device_printf(dev, "MSI-X not supported or disabled\n");
6320b97de13aSMarius Strobl 		goto msi;
6321b97de13aSMarius Strobl 	}
63223d10e9edSMarius Strobl 
63233d10e9edSMarius Strobl 	bar = ctx->ifc_softc_ctx.isc_msix_bar;
63244c7070dbSScott Long 	/*
63254c7070dbSScott Long 	 * bar == -1 => "trust me I know what I'm doing"
63264c7070dbSScott Long 	 * Some drivers are for hardware that is so shoddily
63274c7070dbSScott Long 	 * documented that no one knows which bars are which
63284c7070dbSScott Long 	 * so the developer has to map all bars. This hack
6329b97de13aSMarius Strobl 	 * allows shoddy garbage to use MSI-X in this framework.
63304c7070dbSScott Long 	 */
63314c7070dbSScott Long 	if (bar != -1) {
63324c7070dbSScott Long 		ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
63334c7070dbSScott Long 	            SYS_RES_MEMORY, &bar, RF_ACTIVE);
63344c7070dbSScott Long 		if (ctx->ifc_msix_mem == NULL) {
6335b97de13aSMarius Strobl 			device_printf(dev, "Unable to map MSI-X table\n");
63364c7070dbSScott Long 			goto msi;
63374c7070dbSScott Long 		}
63384c7070dbSScott Long 	}
63393d10e9edSMarius Strobl 
63403d10e9edSMarius Strobl 	admincnt = sctx->isc_admin_intrcnt;
63414c7070dbSScott Long #if IFLIB_DEBUG
63424c7070dbSScott Long 	/* use only 1 qset in debug mode */
63434c7070dbSScott Long 	queuemsgs = min(msgs - admincnt, 1);
63444c7070dbSScott Long #else
63454c7070dbSScott Long 	queuemsgs = msgs - admincnt;
63464c7070dbSScott Long #endif
63474c7070dbSScott Long #ifdef RSS
63484c7070dbSScott Long 	queues = imin(queuemsgs, rss_getnumbuckets());
63494c7070dbSScott Long #else
63504c7070dbSScott Long 	queues = queuemsgs;
63514c7070dbSScott Long #endif
63524c7070dbSScott Long 	queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
6353b97de13aSMarius Strobl 	if (bootverbose)
6354b97de13aSMarius Strobl 		device_printf(dev,
6355b97de13aSMarius Strobl 		    "intr CPUs: %d queue msgs: %d admincnt: %d\n",
63564c7070dbSScott Long 		    CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
63574c7070dbSScott Long #ifdef  RSS
63584c7070dbSScott Long 	/* If we're doing RSS, clamp at the number of RSS buckets */
63594c7070dbSScott Long 	if (queues > rss_getnumbuckets())
63604c7070dbSScott Long 		queues = rss_getnumbuckets();
63614c7070dbSScott Long #endif
636223ac9029SStephen Hurd 	if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
636323ac9029SStephen Hurd 		rx_queues = iflib_num_rx_queues;
63644c7070dbSScott Long 	else
63654c7070dbSScott Long 		rx_queues = queues;
6366d2735264SStephen Hurd 
6367d2735264SStephen Hurd 	if (rx_queues > scctx->isc_nrxqsets)
6368d2735264SStephen Hurd 		rx_queues = scctx->isc_nrxqsets;
6369d2735264SStephen Hurd 
637023ac9029SStephen Hurd 	/*
637123ac9029SStephen Hurd 	 * We want this to be all logical CPUs by default
637223ac9029SStephen Hurd 	 */
63734c7070dbSScott Long 	if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
63744c7070dbSScott Long 		tx_queues = iflib_num_tx_queues;
63754c7070dbSScott Long 	else
637623ac9029SStephen Hurd 		tx_queues = mp_ncpus;
637723ac9029SStephen Hurd 
6378d2735264SStephen Hurd 	if (tx_queues > scctx->isc_ntxqsets)
6379d2735264SStephen Hurd 		tx_queues = scctx->isc_ntxqsets;
6380d2735264SStephen Hurd 
638123ac9029SStephen Hurd 	if (ctx->ifc_sysctl_qs_eq_override == 0) {
638223ac9029SStephen Hurd #ifdef INVARIANTS
638323ac9029SStephen Hurd 		if (tx_queues != rx_queues)
638477c1fcecSEric Joyner 			device_printf(dev,
638577c1fcecSEric Joyner 			    "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
638623ac9029SStephen Hurd 			    min(rx_queues, tx_queues), min(rx_queues, tx_queues));
638723ac9029SStephen Hurd #endif
638823ac9029SStephen Hurd 		tx_queues = min(rx_queues, tx_queues);
638923ac9029SStephen Hurd 		rx_queues = min(rx_queues, tx_queues);
639023ac9029SStephen Hurd 	}
63914c7070dbSScott Long 
63923d10e9edSMarius Strobl 	vectors = rx_queues + admincnt;
63933d10e9edSMarius Strobl 	if (msgs < vectors) {
63943d10e9edSMarius Strobl 		device_printf(dev,
63953d10e9edSMarius Strobl 		    "insufficient number of MSI-X vectors "
63963d10e9edSMarius Strobl 		    "(supported %d, need %d)\n", msgs, vectors);
63973d10e9edSMarius Strobl 		goto msi;
63983d10e9edSMarius Strobl 	}
63993d10e9edSMarius Strobl 
64001722eeacSMarius Strobl 	device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues,
64011722eeacSMarius Strobl 	    tx_queues);
64023d10e9edSMarius Strobl 	msgs = vectors;
64034c7070dbSScott Long 	if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
64043d10e9edSMarius Strobl 		if (vectors != msgs) {
64053d10e9edSMarius Strobl 			device_printf(dev,
64063d10e9edSMarius Strobl 			    "Unable to allocate sufficient MSI-X vectors "
64073d10e9edSMarius Strobl 			    "(got %d, need %d)\n", vectors, msgs);
64083d10e9edSMarius Strobl 			pci_release_msi(dev);
64093d10e9edSMarius Strobl 			if (bar != -1) {
64103d10e9edSMarius Strobl 				bus_release_resource(dev, SYS_RES_MEMORY, bar,
64113d10e9edSMarius Strobl 				    ctx->ifc_msix_mem);
64123d10e9edSMarius Strobl 				ctx->ifc_msix_mem = NULL;
64133d10e9edSMarius Strobl 			}
64143d10e9edSMarius Strobl 			goto msi;
64153d10e9edSMarius Strobl 		}
6416b97de13aSMarius Strobl 		device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
6417b97de13aSMarius Strobl 		    vectors);
64184c7070dbSScott Long 		scctx->isc_vectors = vectors;
64194c7070dbSScott Long 		scctx->isc_nrxqsets = rx_queues;
64204c7070dbSScott Long 		scctx->isc_ntxqsets = tx_queues;
64214c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSIX;
642223ac9029SStephen Hurd 
64234c7070dbSScott Long 		return (vectors);
64244c7070dbSScott Long 	} else {
642577c1fcecSEric Joyner 		device_printf(dev,
64263d10e9edSMarius Strobl 		    "failed to allocate %d MSI-X vectors, err: %d\n", vectors,
64273d10e9edSMarius Strobl 		    err);
64283d10e9edSMarius Strobl 		if (bar != -1) {
6429e4defe55SMarius Strobl 			bus_release_resource(dev, SYS_RES_MEMORY, bar,
6430e4defe55SMarius Strobl 			    ctx->ifc_msix_mem);
6431e4defe55SMarius Strobl 			ctx->ifc_msix_mem = NULL;
64324c7070dbSScott Long 		}
64333d10e9edSMarius Strobl 	}
64343d10e9edSMarius Strobl 
64354c7070dbSScott Long msi:
64364c7070dbSScott Long 	vectors = pci_msi_count(dev);
64374c7070dbSScott Long 	scctx->isc_nrxqsets = 1;
64384c7070dbSScott Long 	scctx->isc_ntxqsets = 1;
64394c7070dbSScott Long 	scctx->isc_vectors = vectors;
64404c7070dbSScott Long 	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
64414c7070dbSScott Long 		device_printf(dev,"Using an MSI interrupt\n");
64424c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_MSI;
64434c7070dbSScott Long 	} else {
6444e4defe55SMarius Strobl 		scctx->isc_vectors = 1;
64454c7070dbSScott Long 		device_printf(dev,"Using a Legacy interrupt\n");
64464c7070dbSScott Long 		scctx->isc_intr = IFLIB_INTR_LEGACY;
64474c7070dbSScott Long 	}
64484c7070dbSScott Long 
64494c7070dbSScott Long 	return (vectors);
64504c7070dbSScott Long }
64514c7070dbSScott Long 
6452e4defe55SMarius Strobl static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
64534c7070dbSScott Long 
64544c7070dbSScott Long static int
64554c7070dbSScott Long mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
64564c7070dbSScott Long {
64574c7070dbSScott Long 	int rc;
64584c7070dbSScott Long 	uint16_t *state = ((uint16_t *)oidp->oid_arg1);
64594c7070dbSScott Long 	struct sbuf *sb;
6460e4defe55SMarius Strobl 	const char *ring_state = "UNKNOWN";
64614c7070dbSScott Long 
64624c7070dbSScott Long 	/* XXX needed ? */
64634c7070dbSScott Long 	rc = sysctl_wire_old_buffer(req, 0);
64644c7070dbSScott Long 	MPASS(rc == 0);
64654c7070dbSScott Long 	if (rc != 0)
64664c7070dbSScott Long 		return (rc);
64674c7070dbSScott Long 	sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
64684c7070dbSScott Long 	MPASS(sb != NULL);
64694c7070dbSScott Long 	if (sb == NULL)
64704c7070dbSScott Long 		return (ENOMEM);
64714c7070dbSScott Long 	if (state[3] <= 3)
64724c7070dbSScott Long 		ring_state = ring_states[state[3]];
64734c7070dbSScott Long 
64744c7070dbSScott Long 	sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
64754c7070dbSScott Long 		    state[0], state[1], state[2], ring_state);
64764c7070dbSScott Long 	rc = sbuf_finish(sb);
64774c7070dbSScott Long 	sbuf_delete(sb);
64784c7070dbSScott Long         return(rc);
64794c7070dbSScott Long }
64804c7070dbSScott Long 
648123ac9029SStephen Hurd enum iflib_ndesc_handler {
648223ac9029SStephen Hurd 	IFLIB_NTXD_HANDLER,
648323ac9029SStephen Hurd 	IFLIB_NRXD_HANDLER,
648423ac9029SStephen Hurd };
64854c7070dbSScott Long 
648623ac9029SStephen Hurd static int
648723ac9029SStephen Hurd mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
648823ac9029SStephen Hurd {
648923ac9029SStephen Hurd 	if_ctx_t ctx = (void *)arg1;
649023ac9029SStephen Hurd 	enum iflib_ndesc_handler type = arg2;
649123ac9029SStephen Hurd 	char buf[256] = {0};
649295246abbSSean Bruno 	qidx_t *ndesc;
649323ac9029SStephen Hurd 	char *p, *next;
649423ac9029SStephen Hurd 	int nqs, rc, i;
649523ac9029SStephen Hurd 
649623ac9029SStephen Hurd 	nqs = 8;
649723ac9029SStephen Hurd 	switch(type) {
649823ac9029SStephen Hurd 	case IFLIB_NTXD_HANDLER:
649923ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_ntxds;
650023ac9029SStephen Hurd 		if (ctx->ifc_sctx)
650123ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_ntxqs;
650223ac9029SStephen Hurd 		break;
650323ac9029SStephen Hurd 	case IFLIB_NRXD_HANDLER:
650423ac9029SStephen Hurd 		ndesc = ctx->ifc_sysctl_nrxds;
650523ac9029SStephen Hurd 		if (ctx->ifc_sctx)
650623ac9029SStephen Hurd 			nqs = ctx->ifc_sctx->isc_nrxqs;
650723ac9029SStephen Hurd 		break;
65081ae4848cSMatt Macy 	default:
65093d10e9edSMarius Strobl 		printf("%s: unhandled type\n", __func__);
65103d10e9edSMarius Strobl 		return (EINVAL);
651123ac9029SStephen Hurd 	}
651223ac9029SStephen Hurd 	if (nqs == 0)
651323ac9029SStephen Hurd 		nqs = 8;
651423ac9029SStephen Hurd 
651523ac9029SStephen Hurd 	for (i=0; i<8; i++) {
651623ac9029SStephen Hurd 		if (i >= nqs)
651723ac9029SStephen Hurd 			break;
651823ac9029SStephen Hurd 		if (i)
651923ac9029SStephen Hurd 			strcat(buf, ",");
652023ac9029SStephen Hurd 		sprintf(strchr(buf, 0), "%d", ndesc[i]);
652123ac9029SStephen Hurd 	}
652223ac9029SStephen Hurd 
652323ac9029SStephen Hurd 	rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
652423ac9029SStephen Hurd 	if (rc || req->newptr == NULL)
652523ac9029SStephen Hurd 		return rc;
652623ac9029SStephen Hurd 
652723ac9029SStephen Hurd 	for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
652823ac9029SStephen Hurd 	    i++, p = strsep(&next, " ,")) {
652923ac9029SStephen Hurd 		ndesc[i] = strtoul(p, NULL, 10);
653023ac9029SStephen Hurd 	}
653123ac9029SStephen Hurd 
653223ac9029SStephen Hurd 	return(rc);
653323ac9029SStephen Hurd }
65344c7070dbSScott Long 
65354c7070dbSScott Long #define NAME_BUFLEN 32
65364c7070dbSScott Long static void
65374c7070dbSScott Long iflib_add_device_sysctl_pre(if_ctx_t ctx)
65384c7070dbSScott Long {
65394c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
65404c7070dbSScott Long 	struct sysctl_oid_list *child, *oid_list;
65414c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
65424c7070dbSScott Long 	struct sysctl_oid *node;
65434c7070dbSScott Long 
65444c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
65454c7070dbSScott Long 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
65464c7070dbSScott Long 	ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
65477029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IFLIB fields");
65484c7070dbSScott Long 	oid_list = SYSCTL_CHILDREN(node);
65494c7070dbSScott Long 
655010a1e981SEric Joyner 	SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
655110a1e981SEric Joyner 		       CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version,
655223ac9029SStephen Hurd 		       "driver version");
655323ac9029SStephen Hurd 
65544c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
65554c7070dbSScott Long 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
65564c7070dbSScott Long 			"# of txqs to use, 0 => use default #");
65574c7070dbSScott Long 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
655823ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
655923ac9029SStephen Hurd 			"# of rxqs to use, 0 => use default #");
656023ac9029SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
656123ac9029SStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
656223ac9029SStephen Hurd                        "permit #txq != #rxq");
6563ea351d3fSSean Bruno 	SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
6564ea351d3fSSean Bruno                       CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
6565b97de13aSMarius Strobl                       "disable MSI-X (default 0)");
6566f4d2154eSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
6567f4d2154eSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
65681722eeacSMarius Strobl 		       "set the RX budget");
6569fe51d4cdSStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
6570fe51d4cdSStephen Hurd 		       CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
65711722eeacSMarius Strobl 		       "cause TX to abdicate instead of running to completion");
6572f154ece0SStephen Hurd 	ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED;
6573f154ece0SStephen Hurd 	SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset",
6574f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0,
6575f154ece0SStephen Hurd 		       "offset to start using cores at");
6576f154ece0SStephen Hurd 	SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx",
6577f154ece0SStephen Hurd 		       CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0,
6578f154ece0SStephen Hurd 		       "use separate cores for TX and RX");
65794c7070dbSScott Long 
658023ac9029SStephen Hurd 	/* XXX change for per-queue sizes */
658123ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
65827029da5cSPawel Biernacki 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
65837029da5cSPawel Biernacki 	    IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A",
65841722eeacSMarius Strobl 	    "list of # of TX descriptors to use, 0 = use default #");
658523ac9029SStephen Hurd 	SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
65867029da5cSPawel Biernacki 	    CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx,
65877029da5cSPawel Biernacki 	    IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A",
65881722eeacSMarius Strobl 	    "list of # of RX descriptors to use, 0 = use default #");
65894c7070dbSScott Long }
65904c7070dbSScott Long 
65914c7070dbSScott Long static void
65924c7070dbSScott Long iflib_add_device_sysctl_post(if_ctx_t ctx)
65934c7070dbSScott Long {
65944c7070dbSScott Long 	if_shared_ctx_t sctx = ctx->ifc_sctx;
65954c7070dbSScott Long 	if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
65964c7070dbSScott Long         device_t dev = iflib_get_dev(ctx);
65974c7070dbSScott Long 	struct sysctl_oid_list *child;
65984c7070dbSScott Long 	struct sysctl_ctx_list *ctx_list;
65994c7070dbSScott Long 	iflib_fl_t fl;
66004c7070dbSScott Long 	iflib_txq_t txq;
66014c7070dbSScott Long 	iflib_rxq_t rxq;
66024c7070dbSScott Long 	int i, j;
66034c7070dbSScott Long 	char namebuf[NAME_BUFLEN];
66044c7070dbSScott Long 	char *qfmt;
66054c7070dbSScott Long 	struct sysctl_oid *queue_node, *fl_node, *node;
66064c7070dbSScott Long 	struct sysctl_oid_list *queue_list, *fl_list;
66074c7070dbSScott Long 	ctx_list = device_get_sysctl_ctx(dev);
66084c7070dbSScott Long 
66094c7070dbSScott Long 	node = ctx->ifc_sysctl_node;
66104c7070dbSScott Long 	child = SYSCTL_CHILDREN(node);
66114c7070dbSScott Long 
66124c7070dbSScott Long 	if (scctx->isc_ntxqsets > 100)
66134c7070dbSScott Long 		qfmt = "txq%03d";
66144c7070dbSScott Long 	else if (scctx->isc_ntxqsets > 10)
66154c7070dbSScott Long 		qfmt = "txq%02d";
66164c7070dbSScott Long 	else
66174c7070dbSScott Long 		qfmt = "txq%d";
66184c7070dbSScott Long 	for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
66194c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
66204c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
66217029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
66224c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
66234c7070dbSScott Long #if MEMORY_LOGGING
66244c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
66254c7070dbSScott Long 				CTLFLAG_RD,
66264c7070dbSScott Long 				&txq->ift_dequeued, "total mbufs freed");
66274c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
66284c7070dbSScott Long 				CTLFLAG_RD,
66294c7070dbSScott Long 				&txq->ift_enqueued, "total mbufs enqueued");
66304c7070dbSScott Long #endif
66314c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
66324c7070dbSScott Long 				   CTLFLAG_RD,
66334c7070dbSScott Long 				   &txq->ift_mbuf_defrag, "# of times m_defrag was called");
66344c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
66354c7070dbSScott Long 				   CTLFLAG_RD,
66364c7070dbSScott Long 				   &txq->ift_pullups, "# of times m_pullup was called");
66374c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
66384c7070dbSScott Long 				   CTLFLAG_RD,
66394c7070dbSScott Long 				   &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
66404c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
66414c7070dbSScott Long 				   CTLFLAG_RD,
664223ac9029SStephen Hurd 				   &txq->ift_no_desc_avail, "# of times no descriptors were available");
66434c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
66444c7070dbSScott Long 				   CTLFLAG_RD,
66451722eeacSMarius Strobl 				   &txq->ift_map_failed, "# of times DMA map failed");
66464c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
66474c7070dbSScott Long 				   CTLFLAG_RD,
66484c7070dbSScott Long 				   &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
66494c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
66504c7070dbSScott Long 				   CTLFLAG_RD,
66514c7070dbSScott Long 				   &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
66524c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
66534c7070dbSScott Long 				   CTLFLAG_RD,
66544c7070dbSScott Long 				   &txq->ift_pidx, 1, "Producer Index");
66554c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
66564c7070dbSScott Long 				   CTLFLAG_RD,
66574c7070dbSScott Long 				   &txq->ift_cidx, 1, "Consumer Index");
66584c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
66594c7070dbSScott Long 				   CTLFLAG_RD,
66604c7070dbSScott Long 				   &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
66614c7070dbSScott Long 		SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
66624c7070dbSScott Long 				   CTLFLAG_RD,
66634c7070dbSScott Long 				   &txq->ift_in_use, 1, "descriptors in use");
66644c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
66654c7070dbSScott Long 				   CTLFLAG_RD,
66664c7070dbSScott Long 				   &txq->ift_processed, "descriptors procesed for clean");
66674c7070dbSScott Long 		SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
66684c7070dbSScott Long 				   CTLFLAG_RD,
66694c7070dbSScott Long 				   &txq->ift_cleaned, "total cleaned");
66704c7070dbSScott Long 		SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
66717029da5cSPawel Biernacki 		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
66727029da5cSPawel Biernacki 		    __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0,
66737029da5cSPawel Biernacki 		    mp_ring_state_handler, "A", "soft ring state");
66744c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
667595246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->enqueues,
66764c7070dbSScott Long 				       "# of enqueues to the mp_ring for this queue");
66774c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
667895246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->drops,
66794c7070dbSScott Long 				       "# of drops in the mp_ring for this queue");
66804c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
668195246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->starts,
66824c7070dbSScott Long 				       "# of normal consumer starts in the mp_ring for this queue");
66834c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
668495246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->stalls,
66854c7070dbSScott Long 					       "# of consumer stalls in the mp_ring for this queue");
66864c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
668795246abbSSean Bruno 			       CTLFLAG_RD, &txq->ift_br->restarts,
66884c7070dbSScott Long 				       "# of consumer restarts in the mp_ring for this queue");
66894c7070dbSScott Long 		SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
669095246abbSSean Bruno 				       CTLFLAG_RD, &txq->ift_br->abdications,
66914c7070dbSScott Long 				       "# of consumer abdications in the mp_ring for this queue");
66924c7070dbSScott Long 	}
66934c7070dbSScott Long 
66944c7070dbSScott Long 	if (scctx->isc_nrxqsets > 100)
66954c7070dbSScott Long 		qfmt = "rxq%03d";
66964c7070dbSScott Long 	else if (scctx->isc_nrxqsets > 10)
66974c7070dbSScott Long 		qfmt = "rxq%02d";
66984c7070dbSScott Long 	else
66994c7070dbSScott Long 		qfmt = "rxq%d";
67004c7070dbSScott Long 	for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
67014c7070dbSScott Long 		snprintf(namebuf, NAME_BUFLEN, qfmt, i);
67024c7070dbSScott Long 		queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
67037029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
67044c7070dbSScott Long 		queue_list = SYSCTL_CHILDREN(queue_node);
670523ac9029SStephen Hurd 		if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
67064c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
67074c7070dbSScott Long 				       CTLFLAG_RD,
67084c7070dbSScott Long 				       &rxq->ifr_cq_cidx, 1, "Consumer Index");
67094c7070dbSScott Long 		}
6710da69b8f9SSean Bruno 
67114c7070dbSScott Long 		for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
67124c7070dbSScott Long 			snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
67134c7070dbSScott Long 			fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
67147029da5cSPawel Biernacki 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist Name");
67154c7070dbSScott Long 			fl_list = SYSCTL_CHILDREN(fl_node);
67164c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
67174c7070dbSScott Long 				       CTLFLAG_RD,
67184c7070dbSScott Long 				       &fl->ifl_pidx, 1, "Producer Index");
67194c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
67204c7070dbSScott Long 				       CTLFLAG_RD,
67214c7070dbSScott Long 				       &fl->ifl_cidx, 1, "Consumer Index");
67224c7070dbSScott Long 			SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
67234c7070dbSScott Long 				       CTLFLAG_RD,
67244c7070dbSScott Long 				       &fl->ifl_credits, 1, "credits available");
67254c7070dbSScott Long #if MEMORY_LOGGING
67264c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
67274c7070dbSScott Long 					CTLFLAG_RD,
67284c7070dbSScott Long 					&fl->ifl_m_enqueued, "mbufs allocated");
67294c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
67304c7070dbSScott Long 					CTLFLAG_RD,
67314c7070dbSScott Long 					&fl->ifl_m_dequeued, "mbufs freed");
67324c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
67334c7070dbSScott Long 					CTLFLAG_RD,
67344c7070dbSScott Long 					&fl->ifl_cl_enqueued, "clusters allocated");
67354c7070dbSScott Long 			SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
67364c7070dbSScott Long 					CTLFLAG_RD,
67374c7070dbSScott Long 					&fl->ifl_cl_dequeued, "clusters freed");
67384c7070dbSScott Long #endif
67394c7070dbSScott Long 
67404c7070dbSScott Long 		}
67414c7070dbSScott Long 	}
67424c7070dbSScott Long 
67434c7070dbSScott Long }
674495246abbSSean Bruno 
674577c1fcecSEric Joyner void
674677c1fcecSEric Joyner iflib_request_reset(if_ctx_t ctx)
674777c1fcecSEric Joyner {
674877c1fcecSEric Joyner 
674977c1fcecSEric Joyner 	STATE_LOCK(ctx);
675077c1fcecSEric Joyner 	ctx->ifc_flags |= IFC_DO_RESET;
675177c1fcecSEric Joyner 	STATE_UNLOCK(ctx);
675277c1fcecSEric Joyner }
675377c1fcecSEric Joyner 
675495246abbSSean Bruno #ifndef __NO_STRICT_ALIGNMENT
675595246abbSSean Bruno static struct mbuf *
675695246abbSSean Bruno iflib_fixup_rx(struct mbuf *m)
675795246abbSSean Bruno {
675895246abbSSean Bruno 	struct mbuf *n;
675995246abbSSean Bruno 
676095246abbSSean Bruno 	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
676195246abbSSean Bruno 		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
676295246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
676395246abbSSean Bruno 		n = m;
676495246abbSSean Bruno 	} else {
676595246abbSSean Bruno 		MGETHDR(n, M_NOWAIT, MT_DATA);
676695246abbSSean Bruno 		if (n == NULL) {
676795246abbSSean Bruno 			m_freem(m);
676895246abbSSean Bruno 			return (NULL);
676995246abbSSean Bruno 		}
677095246abbSSean Bruno 		bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
677195246abbSSean Bruno 		m->m_data += ETHER_HDR_LEN;
677295246abbSSean Bruno 		m->m_len -= ETHER_HDR_LEN;
677395246abbSSean Bruno 		n->m_len = ETHER_HDR_LEN;
677495246abbSSean Bruno 		M_MOVE_PKTHDR(n, m);
677595246abbSSean Bruno 		n->m_next = m;
677695246abbSSean Bruno 	}
677795246abbSSean Bruno 	return (n);
677895246abbSSean Bruno }
677995246abbSSean Bruno #endif
678094618825SMark Johnston 
67817790c8c1SConrad Meyer #ifdef DEBUGNET
678294618825SMark Johnston static void
67837790c8c1SConrad Meyer iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
678494618825SMark Johnston {
678594618825SMark Johnston 	if_ctx_t ctx;
678694618825SMark Johnston 
678794618825SMark Johnston 	ctx = if_getsoftc(ifp);
678894618825SMark Johnston 	CTX_LOCK(ctx);
678994618825SMark Johnston 	*nrxr = NRXQSETS(ctx);
679094618825SMark Johnston 	*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
679194618825SMark Johnston 	*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
679294618825SMark Johnston 	CTX_UNLOCK(ctx);
679394618825SMark Johnston }
679494618825SMark Johnston 
679594618825SMark Johnston static void
67967790c8c1SConrad Meyer iflib_debugnet_event(if_t ifp, enum debugnet_ev event)
679794618825SMark Johnston {
679894618825SMark Johnston 	if_ctx_t ctx;
679994618825SMark Johnston 	if_softc_ctx_t scctx;
680094618825SMark Johnston 	iflib_fl_t fl;
680194618825SMark Johnston 	iflib_rxq_t rxq;
680294618825SMark Johnston 	int i, j;
680394618825SMark Johnston 
680494618825SMark Johnston 	ctx = if_getsoftc(ifp);
680594618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
680694618825SMark Johnston 
680794618825SMark Johnston 	switch (event) {
68087790c8c1SConrad Meyer 	case DEBUGNET_START:
680994618825SMark Johnston 		for (i = 0; i < scctx->isc_nrxqsets; i++) {
681094618825SMark Johnston 			rxq = &ctx->ifc_rxqs[i];
681194618825SMark Johnston 			for (j = 0; j < rxq->ifr_nfl; j++) {
681294618825SMark Johnston 				fl = rxq->ifr_fl;
681394618825SMark Johnston 				fl->ifl_zone = m_getzone(fl->ifl_buf_size);
681494618825SMark Johnston 			}
681594618825SMark Johnston 		}
681694618825SMark Johnston 		iflib_no_tx_batch = 1;
681794618825SMark Johnston 		break;
681894618825SMark Johnston 	default:
681994618825SMark Johnston 		break;
682094618825SMark Johnston 	}
682194618825SMark Johnston }
682294618825SMark Johnston 
682394618825SMark Johnston static int
68247790c8c1SConrad Meyer iflib_debugnet_transmit(if_t ifp, struct mbuf *m)
682594618825SMark Johnston {
682694618825SMark Johnston 	if_ctx_t ctx;
682794618825SMark Johnston 	iflib_txq_t txq;
682894618825SMark Johnston 	int error;
682994618825SMark Johnston 
683094618825SMark Johnston 	ctx = if_getsoftc(ifp);
683194618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
683294618825SMark Johnston 	    IFF_DRV_RUNNING)
683394618825SMark Johnston 		return (EBUSY);
683494618825SMark Johnston 
683594618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
683694618825SMark Johnston 	error = iflib_encap(txq, &m);
683794618825SMark Johnston 	if (error == 0)
683894618825SMark Johnston 		(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
683994618825SMark Johnston 	return (error);
684094618825SMark Johnston }
684194618825SMark Johnston 
684294618825SMark Johnston static int
68437790c8c1SConrad Meyer iflib_debugnet_poll(if_t ifp, int count)
684494618825SMark Johnston {
68450b8df657SGleb Smirnoff 	struct epoch_tracker et;
684694618825SMark Johnston 	if_ctx_t ctx;
684794618825SMark Johnston 	if_softc_ctx_t scctx;
684894618825SMark Johnston 	iflib_txq_t txq;
684994618825SMark Johnston 	int i;
685094618825SMark Johnston 
685194618825SMark Johnston 	ctx = if_getsoftc(ifp);
685294618825SMark Johnston 	scctx = &ctx->ifc_softc_ctx;
685394618825SMark Johnston 
685494618825SMark Johnston 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
685594618825SMark Johnston 	    IFF_DRV_RUNNING)
685694618825SMark Johnston 		return (EBUSY);
685794618825SMark Johnston 
685894618825SMark Johnston 	txq = &ctx->ifc_txqs[0];
685994618825SMark Johnston 	(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
686094618825SMark Johnston 
68610b8df657SGleb Smirnoff 	NET_EPOCH_ENTER(et);
686294618825SMark Johnston 	for (i = 0; i < scctx->isc_nrxqsets; i++)
686394618825SMark Johnston 		(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
68640b8df657SGleb Smirnoff 	NET_EPOCH_EXIT(et);
686594618825SMark Johnston 	return (0);
686694618825SMark Johnston }
68677790c8c1SConrad Meyer #endif /* DEBUGNET */
6868