xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision c3322cb91ca99bcc662641b5b08e9501de6780da)
154e4ee71SNavdeep Parhar /*-
254e4ee71SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
354e4ee71SNavdeep Parhar  * All rights reserved.
454e4ee71SNavdeep Parhar  * Written by: Navdeep Parhar <np@FreeBSD.org>
554e4ee71SNavdeep Parhar  *
654e4ee71SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
754e4ee71SNavdeep Parhar  * modification, are permitted provided that the following conditions
854e4ee71SNavdeep Parhar  * are met:
954e4ee71SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
1054e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
1154e4ee71SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
1254e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
1354e4ee71SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
1454e4ee71SNavdeep Parhar  *
1554e4ee71SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1654e4ee71SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1754e4ee71SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1854e4ee71SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1954e4ee71SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2054e4ee71SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2154e4ee71SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2254e4ee71SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2354e4ee71SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2454e4ee71SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2554e4ee71SNavdeep Parhar  * SUCH DAMAGE.
2654e4ee71SNavdeep Parhar  */
2754e4ee71SNavdeep Parhar 
2854e4ee71SNavdeep Parhar #include <sys/cdefs.h>
2954e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$");
3054e4ee71SNavdeep Parhar 
3154e4ee71SNavdeep Parhar #include "opt_inet.h"
32a1ea9a82SNavdeep Parhar #include "opt_inet6.h"
3354e4ee71SNavdeep Parhar 
3454e4ee71SNavdeep Parhar #include <sys/types.h>
35*c3322cb9SGleb Smirnoff #include <sys/eventhandler.h>
3654e4ee71SNavdeep Parhar #include <sys/mbuf.h>
3754e4ee71SNavdeep Parhar #include <sys/socket.h>
3854e4ee71SNavdeep Parhar #include <sys/kernel.h>
3909fe6320SNavdeep Parhar #include <sys/kdb.h>
40ecb79ca4SNavdeep Parhar #include <sys/malloc.h>
41ecb79ca4SNavdeep Parhar #include <sys/queue.h>
42ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h>
43480e603cSNavdeep Parhar #include <sys/time.h>
4454e4ee71SNavdeep Parhar #include <sys/sysctl.h>
45733b9277SNavdeep Parhar #include <sys/smp.h>
4654e4ee71SNavdeep Parhar #include <net/bpf.h>
4754e4ee71SNavdeep Parhar #include <net/ethernet.h>
4854e4ee71SNavdeep Parhar #include <net/if.h>
4954e4ee71SNavdeep Parhar #include <net/if_vlan_var.h>
5054e4ee71SNavdeep Parhar #include <netinet/in.h>
5154e4ee71SNavdeep Parhar #include <netinet/ip.h>
52a1ea9a82SNavdeep Parhar #include <netinet/ip6.h>
5354e4ee71SNavdeep Parhar #include <netinet/tcp.h>
5464db8966SDimitry Andric #include <machine/md_var.h>
5554e4ee71SNavdeep Parhar 
5654e4ee71SNavdeep Parhar #include "common/common.h"
5754e4ee71SNavdeep Parhar #include "common/t4_regs.h"
5854e4ee71SNavdeep Parhar #include "common/t4_regs_values.h"
5954e4ee71SNavdeep Parhar #include "common/t4_msg.h"
6054e4ee71SNavdeep Parhar 
61d14b0ac1SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
62d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
63d14b0ac1SNavdeep Parhar #else
64d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE
65d14b0ac1SNavdeep Parhar #endif
66d14b0ac1SNavdeep Parhar 
679fb8886bSNavdeep Parhar /*
689fb8886bSNavdeep Parhar  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
699fb8886bSNavdeep Parhar  * 0-7 are valid values.
709fb8886bSNavdeep Parhar  */
719fb8886bSNavdeep Parhar static int fl_pktshift = 2;
729fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
7354e4ee71SNavdeep Parhar 
749fb8886bSNavdeep Parhar /*
759fb8886bSNavdeep Parhar  * Pad ethernet payload up to this boundary.
769fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
771458bff9SNavdeep Parhar  *  0: disable padding.
781458bff9SNavdeep Parhar  *  Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
799fb8886bSNavdeep Parhar  */
809fb8886bSNavdeep Parhar static int fl_pad = -1;
819fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
829fb8886bSNavdeep Parhar 
839fb8886bSNavdeep Parhar /*
849fb8886bSNavdeep Parhar  * Status page length.
859fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
869fb8886bSNavdeep Parhar  *  64 or 128 are the only other valid values.
879fb8886bSNavdeep Parhar  */
889fb8886bSNavdeep Parhar static int spg_len = -1;
899fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
909fb8886bSNavdeep Parhar 
919fb8886bSNavdeep Parhar /*
929fb8886bSNavdeep Parhar  * Congestion drops.
939fb8886bSNavdeep Parhar  * -1: no congestion feedback (not recommended).
949fb8886bSNavdeep Parhar  *  0: backpressure the channel instead of dropping packets right away.
959fb8886bSNavdeep Parhar  *  1: no backpressure, drop packets for the congested queue immediately.
969fb8886bSNavdeep Parhar  */
979fb8886bSNavdeep Parhar static int cong_drop = 0;
989fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop);
9954e4ee71SNavdeep Parhar 
1001458bff9SNavdeep Parhar /*
1011458bff9SNavdeep Parhar  * Deliver multiple frames in the same free list buffer if they fit.
1021458bff9SNavdeep Parhar  * -1: let the driver decide whether to enable buffer packing or not.
1031458bff9SNavdeep Parhar  *  0: disable buffer packing.
1041458bff9SNavdeep Parhar  *  1: enable buffer packing.
1051458bff9SNavdeep Parhar  */
1061458bff9SNavdeep Parhar static int buffer_packing = -1;
1071458bff9SNavdeep Parhar TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing);
1081458bff9SNavdeep Parhar 
1091458bff9SNavdeep Parhar /*
1101458bff9SNavdeep Parhar  * Start next frame in a packed buffer at this boundary.
1111458bff9SNavdeep Parhar  * -1: driver should figure out a good value.
1121458bff9SNavdeep Parhar  * T4:
1131458bff9SNavdeep Parhar  * ---
1141458bff9SNavdeep Parhar  * if fl_pad != 0
1151458bff9SNavdeep Parhar  * 	value specified here will be overridden by fl_pad.
1161458bff9SNavdeep Parhar  * else
1171458bff9SNavdeep Parhar  * 	power of 2 from 32 to 4096 (both inclusive) is a valid value here.
1181458bff9SNavdeep Parhar  * T5:
1191458bff9SNavdeep Parhar  * ---
1201458bff9SNavdeep Parhar  * 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
1211458bff9SNavdeep Parhar  */
1221458bff9SNavdeep Parhar static int fl_pack = -1;
1231458bff9SNavdeep Parhar static int t4_fl_pack;
1241458bff9SNavdeep Parhar static int t5_fl_pack;
1251458bff9SNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack);
1261458bff9SNavdeep Parhar 
12754e4ee71SNavdeep Parhar /* Used to track coalesced tx work request */
12854e4ee71SNavdeep Parhar struct txpkts {
12954e4ee71SNavdeep Parhar 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
13054e4ee71SNavdeep Parhar 	uint8_t npkt;		/* # of packets in this work request */
13154e4ee71SNavdeep Parhar 	uint8_t nflits;		/* # of flits used by this work request */
13254e4ee71SNavdeep Parhar 	uint16_t plen;		/* total payload (sum of all packets) */
13354e4ee71SNavdeep Parhar };
13454e4ee71SNavdeep Parhar 
13554e4ee71SNavdeep Parhar /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
13654e4ee71SNavdeep Parhar struct sgl {
13754e4ee71SNavdeep Parhar 	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
13854e4ee71SNavdeep Parhar 	int nflits;		/* # of flits needed for the SGL */
13954e4ee71SNavdeep Parhar 	bus_dma_segment_t seg[TX_SGL_SEGS];
14054e4ee71SNavdeep Parhar };
14154e4ee71SNavdeep Parhar 
142733b9277SNavdeep Parhar static int service_iq(struct sge_iq *, int);
1431458bff9SNavdeep Parhar static struct mbuf *get_fl_payload1(struct adapter *, struct sge_fl *, uint32_t,
1441458bff9SNavdeep Parhar     int *);
1451458bff9SNavdeep Parhar static struct mbuf *get_fl_payload2(struct adapter *, struct sge_fl *, uint32_t,
146733b9277SNavdeep Parhar     int *);
147733b9277SNavdeep Parhar static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
14854e4ee71SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
1495323ca8fSNavdeep Parhar     int);
1501458bff9SNavdeep Parhar static inline void init_fl(struct adapter *, struct sge_fl *, int, int, int,
1511458bff9SNavdeep Parhar     char *);
152733b9277SNavdeep Parhar static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t,
153733b9277SNavdeep Parhar     char *);
15454e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
15554e4ee71SNavdeep Parhar     bus_addr_t *, void **);
15654e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
15754e4ee71SNavdeep Parhar     void *);
15854e4ee71SNavdeep Parhar static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
159bc14b14dSNavdeep Parhar     int, int);
16054e4ee71SNavdeep Parhar static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
161733b9277SNavdeep Parhar static int alloc_fwq(struct adapter *);
162733b9277SNavdeep Parhar static int free_fwq(struct adapter *);
163733b9277SNavdeep Parhar static int alloc_mgmtq(struct adapter *);
164733b9277SNavdeep Parhar static int free_mgmtq(struct adapter *);
165733b9277SNavdeep Parhar static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int,
166733b9277SNavdeep Parhar     struct sysctl_oid *);
16754e4ee71SNavdeep Parhar static int free_rxq(struct port_info *, struct sge_rxq *);
16809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
169733b9277SNavdeep Parhar static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
170733b9277SNavdeep Parhar     struct sysctl_oid *);
171733b9277SNavdeep Parhar static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
172733b9277SNavdeep Parhar #endif
173733b9277SNavdeep Parhar static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
174733b9277SNavdeep Parhar static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
17509fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
176733b9277SNavdeep Parhar static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
177733b9277SNavdeep Parhar #endif
178733b9277SNavdeep Parhar static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *);
179733b9277SNavdeep Parhar static int free_eq(struct adapter *, struct sge_eq *);
180733b9277SNavdeep Parhar static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *,
181733b9277SNavdeep Parhar     struct sysctl_oid *);
182733b9277SNavdeep Parhar static int free_wrq(struct adapter *, struct sge_wrq *);
183733b9277SNavdeep Parhar static int alloc_txq(struct port_info *, struct sge_txq *, int,
184733b9277SNavdeep Parhar     struct sysctl_oid *);
18554e4ee71SNavdeep Parhar static int free_txq(struct port_info *, struct sge_txq *);
18654e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
18754e4ee71SNavdeep Parhar static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
18854e4ee71SNavdeep Parhar static inline void iq_next(struct sge_iq *);
18954e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *);
190733b9277SNavdeep Parhar static int refill_fl(struct adapter *, struct sge_fl *, int);
191733b9277SNavdeep Parhar static void refill_sfl(void *);
19254e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *);
1931458bff9SNavdeep Parhar static void free_fl_sdesc(struct adapter *, struct sge_fl *);
1941458bff9SNavdeep Parhar static void set_fl_tag_idx(struct adapter *, struct sge_fl *, int);
195733b9277SNavdeep Parhar static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
19654e4ee71SNavdeep Parhar 
19754e4ee71SNavdeep Parhar static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
19854e4ee71SNavdeep Parhar static int free_pkt_sgl(struct sge_txq *, struct sgl *);
19954e4ee71SNavdeep Parhar static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
20054e4ee71SNavdeep Parhar     struct sgl *);
20154e4ee71SNavdeep Parhar static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
20254e4ee71SNavdeep Parhar     struct mbuf *, struct sgl *);
20354e4ee71SNavdeep Parhar static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
20454e4ee71SNavdeep Parhar static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
20554e4ee71SNavdeep Parhar     struct txpkts *, struct mbuf *, struct sgl *);
20654e4ee71SNavdeep Parhar static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
20754e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
208f7dfe243SNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *);
209e874ff7aSNavdeep Parhar static inline int reclaimable(struct sge_eq *);
210f7dfe243SNavdeep Parhar static int reclaim_tx_descs(struct sge_txq *, int, int);
21154e4ee71SNavdeep Parhar static void write_eqflush_wr(struct sge_eq *);
21254e4ee71SNavdeep Parhar static __be64 get_flit(bus_dma_segment_t *, int, int);
213733b9277SNavdeep Parhar static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
214733b9277SNavdeep Parhar     struct mbuf *);
2151b4cc91fSNavdeep Parhar static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
216733b9277SNavdeep Parhar     struct mbuf *);
21754e4ee71SNavdeep Parhar 
21856599263SNavdeep Parhar static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
219f7dfe243SNavdeep Parhar 
22094586193SNavdeep Parhar /*
2211458bff9SNavdeep Parhar  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
22294586193SNavdeep Parhar  */
22394586193SNavdeep Parhar void
22494586193SNavdeep Parhar t4_sge_modload(void)
22594586193SNavdeep Parhar {
2261458bff9SNavdeep Parhar 	int pad;
22794586193SNavdeep Parhar 
2281458bff9SNavdeep Parhar 	/* set pad to a reasonable powerof2 between 16 and 4096 (inclusive) */
2291458bff9SNavdeep Parhar #if defined(__i386__) || defined(__amd64__)
2301458bff9SNavdeep Parhar 	pad = max(cpu_clflush_line_size, 16);
2311458bff9SNavdeep Parhar #else
2321458bff9SNavdeep Parhar 	pad = max(CACHE_LINE_SIZE, 16);
2331458bff9SNavdeep Parhar #endif
2341458bff9SNavdeep Parhar 	pad = min(pad, 4096);
2354defc81bSNavdeep Parhar 
2369fb8886bSNavdeep Parhar 	if (fl_pktshift < 0 || fl_pktshift > 7) {
2379fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
2389fb8886bSNavdeep Parhar 		    " using 2 instead.\n", fl_pktshift);
2399fb8886bSNavdeep Parhar 		fl_pktshift = 2;
2409fb8886bSNavdeep Parhar 	}
2419fb8886bSNavdeep Parhar 
2421458bff9SNavdeep Parhar 	if (fl_pad != 0 &&
2431458bff9SNavdeep Parhar 	    (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad))) {
2449fb8886bSNavdeep Parhar 
2459fb8886bSNavdeep Parhar 		if (fl_pad != -1) {
2469fb8886bSNavdeep Parhar 			printf("Invalid hw.cxgbe.fl_pad value (%d),"
2471458bff9SNavdeep Parhar 			    " using %d instead.\n", fl_pad, max(pad, 32));
2489fb8886bSNavdeep Parhar 		}
2491458bff9SNavdeep Parhar 		fl_pad = max(pad, 32);
2509fb8886bSNavdeep Parhar 	}
2519fb8886bSNavdeep Parhar 
2521458bff9SNavdeep Parhar 	/*
2531458bff9SNavdeep Parhar 	 * T4 has the same pad and pack boundary.  If a pad boundary is set,
2541458bff9SNavdeep Parhar 	 * pack boundary must be set to the same value.  Otherwise take the
2551458bff9SNavdeep Parhar 	 * specified value or auto-calculate something reasonable.
2561458bff9SNavdeep Parhar 	 */
2571458bff9SNavdeep Parhar 	if (fl_pad)
2581458bff9SNavdeep Parhar 		t4_fl_pack = fl_pad;
2591458bff9SNavdeep Parhar 	else if (fl_pack < 32 || fl_pack > 4096 || !powerof2(fl_pack))
2601458bff9SNavdeep Parhar 		t4_fl_pack = max(pad, 32);
2611458bff9SNavdeep Parhar 	else
2621458bff9SNavdeep Parhar 		t4_fl_pack = fl_pack;
2631458bff9SNavdeep Parhar 
2641458bff9SNavdeep Parhar 	/* T5's pack boundary is independent of the pad boundary. */
2651458bff9SNavdeep Parhar 	if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
2661458bff9SNavdeep Parhar 	    !powerof2(fl_pack))
2671458bff9SNavdeep Parhar 	       t5_fl_pack = max(pad, 64);
2681458bff9SNavdeep Parhar 	else
2691458bff9SNavdeep Parhar 	       t5_fl_pack = fl_pack;
2701458bff9SNavdeep Parhar 
2719fb8886bSNavdeep Parhar 	if (spg_len != 64 && spg_len != 128) {
2729fb8886bSNavdeep Parhar 		int len;
2739fb8886bSNavdeep Parhar 
2749fb8886bSNavdeep Parhar #if defined(__i386__) || defined(__amd64__)
2759fb8886bSNavdeep Parhar 		len = cpu_clflush_line_size > 64 ? 128 : 64;
2769fb8886bSNavdeep Parhar #else
2779fb8886bSNavdeep Parhar 		len = 64;
2789fb8886bSNavdeep Parhar #endif
2799fb8886bSNavdeep Parhar 		if (spg_len != -1) {
2809fb8886bSNavdeep Parhar 			printf("Invalid hw.cxgbe.spg_len value (%d),"
2819fb8886bSNavdeep Parhar 			    " using %d instead.\n", spg_len, len);
2829fb8886bSNavdeep Parhar 		}
2839fb8886bSNavdeep Parhar 		spg_len = len;
2849fb8886bSNavdeep Parhar 	}
2859fb8886bSNavdeep Parhar 
2869fb8886bSNavdeep Parhar 	if (cong_drop < -1 || cong_drop > 1) {
2879fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.cong_drop value (%d),"
2889fb8886bSNavdeep Parhar 		    " using 0 instead.\n", cong_drop);
2899fb8886bSNavdeep Parhar 		cong_drop = 0;
2909fb8886bSNavdeep Parhar 	}
29194586193SNavdeep Parhar }
29294586193SNavdeep Parhar 
293d14b0ac1SNavdeep Parhar void
294d14b0ac1SNavdeep Parhar t4_init_sge_cpl_handlers(struct adapter *sc)
29554e4ee71SNavdeep Parhar {
29654e4ee71SNavdeep Parhar 
297d14b0ac1SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg);
298d14b0ac1SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg);
299d14b0ac1SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
300d14b0ac1SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
301d14b0ac1SNavdeep Parhar 	t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
302d14b0ac1SNavdeep Parhar }
303d14b0ac1SNavdeep Parhar 
304cf738022SNavdeep Parhar /*
305cf738022SNavdeep Parhar  * adap->params.vpd.cclk must be set up before this is called.
306cf738022SNavdeep Parhar  */
307d14b0ac1SNavdeep Parhar void
308d14b0ac1SNavdeep Parhar t4_tweak_chip_settings(struct adapter *sc)
309d14b0ac1SNavdeep Parhar {
310d14b0ac1SNavdeep Parhar 	int i;
311d14b0ac1SNavdeep Parhar 	uint32_t v, m;
312d14b0ac1SNavdeep Parhar 	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
313cf738022SNavdeep Parhar 	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
314d14b0ac1SNavdeep Parhar 	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
315d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
3161458bff9SNavdeep Parhar 	int sw_flbuf_sizes[] = {
3171458bff9SNavdeep Parhar 		MCLBYTES,
3181458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
3191458bff9SNavdeep Parhar 		MJUMPAGESIZE,
3201458bff9SNavdeep Parhar #endif
3211458bff9SNavdeep Parhar 		MJUM9BYTES,
3221458bff9SNavdeep Parhar 		MJUM16BYTES,
3231458bff9SNavdeep Parhar 		MJUMPAGESIZE - MSIZE
3241458bff9SNavdeep Parhar 	};
325d14b0ac1SNavdeep Parhar 
326d14b0ac1SNavdeep Parhar 	KASSERT(sc->flags & MASTER_PF,
327d14b0ac1SNavdeep Parhar 	    ("%s: trying to change chip settings when not master.", __func__));
328d14b0ac1SNavdeep Parhar 
3291458bff9SNavdeep Parhar 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
330d14b0ac1SNavdeep Parhar 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
3314defc81bSNavdeep Parhar 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
3321458bff9SNavdeep Parhar 	if (is_t4(sc) && (fl_pad || buffer_packing)) {
3331458bff9SNavdeep Parhar 		/* t4_fl_pack has the correct value even when fl_pad = 0 */
3341458bff9SNavdeep Parhar 		m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY);
3351458bff9SNavdeep Parhar 		v |= V_INGPADBOUNDARY(ilog2(t4_fl_pack) - 5);
3361458bff9SNavdeep Parhar 	} else if (is_t5(sc) && fl_pad) {
3371458bff9SNavdeep Parhar 		m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY);
3381458bff9SNavdeep Parhar 		v |= V_INGPADBOUNDARY(ilog2(fl_pad) - 5);
3391458bff9SNavdeep Parhar 	}
340d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
34154e4ee71SNavdeep Parhar 
3421458bff9SNavdeep Parhar 	if (is_t5(sc) && buffer_packing) {
3431458bff9SNavdeep Parhar 		m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
3441458bff9SNavdeep Parhar 		if (t5_fl_pack == 16)
3451458bff9SNavdeep Parhar 			v = V_INGPACKBOUNDARY(0);
3461458bff9SNavdeep Parhar 		else
3471458bff9SNavdeep Parhar 			v = V_INGPACKBOUNDARY(ilog2(t5_fl_pack) - 5);
3481458bff9SNavdeep Parhar 		t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
3491458bff9SNavdeep Parhar 	}
3501458bff9SNavdeep Parhar 
351d14b0ac1SNavdeep Parhar 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
352733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
353733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
354733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
355733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
356733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
357733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
358733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
359d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
360733b9277SNavdeep Parhar 
3611458bff9SNavdeep Parhar 	for (i = 0; i < min(nitems(sw_flbuf_sizes), 16); i++) {
36254e4ee71SNavdeep Parhar 		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
3631458bff9SNavdeep Parhar 		    sw_flbuf_sizes[i]);
36454e4ee71SNavdeep Parhar 	}
36554e4ee71SNavdeep Parhar 
366d14b0ac1SNavdeep Parhar 	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
367d14b0ac1SNavdeep Parhar 	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
368d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
36954e4ee71SNavdeep Parhar 
370cf738022SNavdeep Parhar 	KASSERT(intr_timer[0] <= timer_max,
371cf738022SNavdeep Parhar 	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
372cf738022SNavdeep Parhar 	    timer_max));
373cf738022SNavdeep Parhar 	for (i = 1; i < nitems(intr_timer); i++) {
374cf738022SNavdeep Parhar 		KASSERT(intr_timer[i] >= intr_timer[i - 1],
375cf738022SNavdeep Parhar 		    ("%s: timers not listed in increasing order (%d)",
376cf738022SNavdeep Parhar 		    __func__, i));
377cf738022SNavdeep Parhar 
378cf738022SNavdeep Parhar 		while (intr_timer[i] > timer_max) {
379cf738022SNavdeep Parhar 			if (i == nitems(intr_timer) - 1) {
380cf738022SNavdeep Parhar 				intr_timer[i] = timer_max;
381cf738022SNavdeep Parhar 				break;
382cf738022SNavdeep Parhar 			}
383cf738022SNavdeep Parhar 			intr_timer[i] += intr_timer[i - 1];
384cf738022SNavdeep Parhar 			intr_timer[i] /= 2;
385cf738022SNavdeep Parhar 		}
386cf738022SNavdeep Parhar 	}
387cf738022SNavdeep Parhar 
388d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
389d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
390d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
391d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
392d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
393d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
394d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
395d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
396d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
39786e02bf2SNavdeep Parhar 
39886e02bf2SNavdeep Parhar 	if (cong_drop == 0) {
399d14b0ac1SNavdeep Parhar 		m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 |
400d14b0ac1SNavdeep Parhar 		    F_TUNNELCNGDROP3;
401d14b0ac1SNavdeep Parhar 		t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0);
402733b9277SNavdeep Parhar 	}
403733b9277SNavdeep Parhar 
404d14b0ac1SNavdeep Parhar 	/* 4K, 16K, 64K, 256K DDP "page sizes" */
405d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
406d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
407d14b0ac1SNavdeep Parhar 
408d14b0ac1SNavdeep Parhar 	m = v = F_TDDPTAGTCB;
409d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
410d14b0ac1SNavdeep Parhar 
411d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
412d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
413d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
414d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
415d14b0ac1SNavdeep Parhar }
416d14b0ac1SNavdeep Parhar 
417d14b0ac1SNavdeep Parhar /*
418d14b0ac1SNavdeep Parhar  * XXX: driver really should be able to deal with unexpected settings.
419d14b0ac1SNavdeep Parhar  */
420d14b0ac1SNavdeep Parhar int
421d14b0ac1SNavdeep Parhar t4_read_chip_settings(struct adapter *sc)
422d14b0ac1SNavdeep Parhar {
423d14b0ac1SNavdeep Parhar 	struct sge *s = &sc->sge;
4241458bff9SNavdeep Parhar 	int i, j, n, rc = 0;
425d14b0ac1SNavdeep Parhar 	uint32_t m, v, r;
426d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
4271458bff9SNavdeep Parhar 	uint32_t sge_flbuf_sizes[16], sw_flbuf_sizes[] = {
4281458bff9SNavdeep Parhar 		MCLBYTES,
4291458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
4301458bff9SNavdeep Parhar 		MJUMPAGESIZE,
4311458bff9SNavdeep Parhar #endif
4321458bff9SNavdeep Parhar 		MJUM9BYTES,
4331458bff9SNavdeep Parhar 		MJUM16BYTES
4341458bff9SNavdeep Parhar 	};
435d14b0ac1SNavdeep Parhar 
4361458bff9SNavdeep Parhar 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
437d14b0ac1SNavdeep Parhar 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
438d14b0ac1SNavdeep Parhar 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
4391458bff9SNavdeep Parhar 	if (is_t4(sc) && (fl_pad || buffer_packing)) {
4401458bff9SNavdeep Parhar 		m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY);
4411458bff9SNavdeep Parhar 		v |= V_INGPADBOUNDARY(ilog2(t4_fl_pack) - 5);
4421458bff9SNavdeep Parhar 	} else if (is_t5(sc) && fl_pad) {
4431458bff9SNavdeep Parhar 		m |= V_INGPADBOUNDARY(M_INGPADBOUNDARY);
4441458bff9SNavdeep Parhar 		v |= V_INGPADBOUNDARY(ilog2(fl_pad) - 5);
4451458bff9SNavdeep Parhar 	}
446d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_CONTROL);
447d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
448d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
449733b9277SNavdeep Parhar 		rc = EINVAL;
450733b9277SNavdeep Parhar 	}
451733b9277SNavdeep Parhar 
4521458bff9SNavdeep Parhar 	if (is_t5(sc) && buffer_packing) {
4531458bff9SNavdeep Parhar 		m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
4541458bff9SNavdeep Parhar 		if (t5_fl_pack == 16)
4551458bff9SNavdeep Parhar 			v = V_INGPACKBOUNDARY(0);
4561458bff9SNavdeep Parhar 		else
4571458bff9SNavdeep Parhar 			v = V_INGPACKBOUNDARY(ilog2(t5_fl_pack) - 5);
4581458bff9SNavdeep Parhar 		r = t4_read_reg(sc, A_SGE_CONTROL2);
4591458bff9SNavdeep Parhar 		if ((r & m) != v) {
4601458bff9SNavdeep Parhar 			device_printf(sc->dev,
4611458bff9SNavdeep Parhar 			    "invalid SGE_CONTROL2(0x%x)\n", r);
4621458bff9SNavdeep Parhar 			rc = EINVAL;
4631458bff9SNavdeep Parhar 		}
4641458bff9SNavdeep Parhar 	}
4651458bff9SNavdeep Parhar 
466d14b0ac1SNavdeep Parhar 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
467d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
468d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
469d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
470d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
471d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
472d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
473d14b0ac1SNavdeep Parhar 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
474d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE);
475d14b0ac1SNavdeep Parhar 	if (r != v) {
476d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
477733b9277SNavdeep Parhar 		rc = EINVAL;
478733b9277SNavdeep Parhar 	}
479733b9277SNavdeep Parhar 
4801458bff9SNavdeep Parhar 	/*
4811458bff9SNavdeep Parhar 	 * Make a list of SGE FL buffer sizes programmed in the chip and tally
4821458bff9SNavdeep Parhar 	 * it with the FL buffer sizes that we'd like to use.
4831458bff9SNavdeep Parhar 	 */
4841458bff9SNavdeep Parhar 	n = 0;
4851458bff9SNavdeep Parhar 	for (i = 0; i < nitems(sge_flbuf_sizes); i++) {
4861458bff9SNavdeep Parhar 		r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i));
4871458bff9SNavdeep Parhar 		sge_flbuf_sizes[i] = r;
4881458bff9SNavdeep Parhar 		if (r == MJUMPAGESIZE - MSIZE &&
4891458bff9SNavdeep Parhar 		    (sc->flags & BUF_PACKING_OK) == 0) {
4901458bff9SNavdeep Parhar 			sc->flags |= BUF_PACKING_OK;
4911458bff9SNavdeep Parhar 			FL_BUF_HWTAG(sc, n) = i;
4921458bff9SNavdeep Parhar 			FL_BUF_SIZE(sc, n) = MJUMPAGESIZE - MSIZE;
4931458bff9SNavdeep Parhar 			FL_BUF_TYPE(sc, n) = m_gettype(MJUMPAGESIZE);
4941458bff9SNavdeep Parhar 			FL_BUF_ZONE(sc, n) = m_getzone(MJUMPAGESIZE);
4951458bff9SNavdeep Parhar 			n++;
4961458bff9SNavdeep Parhar 		}
4971458bff9SNavdeep Parhar 	}
4981458bff9SNavdeep Parhar 	for (i = 0; i < nitems(sw_flbuf_sizes); i++) {
4991458bff9SNavdeep Parhar 		for (j = 0; j < nitems(sge_flbuf_sizes); j++) {
5001458bff9SNavdeep Parhar 			if (sw_flbuf_sizes[i] != sge_flbuf_sizes[j])
5011458bff9SNavdeep Parhar 				continue;
5021458bff9SNavdeep Parhar 			FL_BUF_HWTAG(sc, n) = j;
5031458bff9SNavdeep Parhar 			FL_BUF_SIZE(sc, n) = sw_flbuf_sizes[i];
5041458bff9SNavdeep Parhar 			FL_BUF_TYPE(sc, n) = m_gettype(sw_flbuf_sizes[i]);
5051458bff9SNavdeep Parhar 			FL_BUF_ZONE(sc, n) = m_getzone(sw_flbuf_sizes[i]);
5061458bff9SNavdeep Parhar 			n++;
5071458bff9SNavdeep Parhar 			break;
5081458bff9SNavdeep Parhar 		}
5091458bff9SNavdeep Parhar 	}
5101458bff9SNavdeep Parhar 	if (n == 0) {
5111458bff9SNavdeep Parhar 		device_printf(sc->dev, "no usable SGE FL buffer size.\n");
5121458bff9SNavdeep Parhar 		rc = EINVAL;
5131458bff9SNavdeep Parhar 	} else if (n == 1 && (sc->flags & BUF_PACKING_OK)) {
514733b9277SNavdeep Parhar 		device_printf(sc->dev,
5151458bff9SNavdeep Parhar 		    "no usable SGE FL buffer size when not packing buffers.\n");
516733b9277SNavdeep Parhar 		rc = EINVAL;
517733b9277SNavdeep Parhar 	}
5181458bff9SNavdeep Parhar 	FL_BUF_SIZES(sc) = n;
519733b9277SNavdeep Parhar 
520d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD);
521d14b0ac1SNavdeep Parhar 	s->counter_val[0] = G_THRESHOLD_0(r);
522d14b0ac1SNavdeep Parhar 	s->counter_val[1] = G_THRESHOLD_1(r);
523d14b0ac1SNavdeep Parhar 	s->counter_val[2] = G_THRESHOLD_2(r);
524d14b0ac1SNavdeep Parhar 	s->counter_val[3] = G_THRESHOLD_3(r);
525733b9277SNavdeep Parhar 
526d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1);
527d14b0ac1SNavdeep Parhar 	s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc);
528d14b0ac1SNavdeep Parhar 	s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc);
529d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3);
530d14b0ac1SNavdeep Parhar 	s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc);
531d14b0ac1SNavdeep Parhar 	s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc);
532d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5);
533d14b0ac1SNavdeep Parhar 	s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc);
534d14b0ac1SNavdeep Parhar 	s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc);
535733b9277SNavdeep Parhar 
536d14b0ac1SNavdeep Parhar 	if (cong_drop == 0) {
537d14b0ac1SNavdeep Parhar 		m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 |
538d14b0ac1SNavdeep Parhar 		    F_TUNNELCNGDROP3;
539d14b0ac1SNavdeep Parhar 		r = t4_read_reg(sc, A_TP_PARA_REG3);
540d14b0ac1SNavdeep Parhar 		if (r & m) {
541d14b0ac1SNavdeep Parhar 			device_printf(sc->dev,
542d14b0ac1SNavdeep Parhar 			    "invalid TP_PARA_REG3(0x%x)\n", r);
543d14b0ac1SNavdeep Parhar 			rc = EINVAL;
544d14b0ac1SNavdeep Parhar 		}
545d14b0ac1SNavdeep Parhar 	}
546733b9277SNavdeep Parhar 
547d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
548d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
549d14b0ac1SNavdeep Parhar 	if (r != v) {
550d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
551d14b0ac1SNavdeep Parhar 		rc = EINVAL;
552d14b0ac1SNavdeep Parhar 	}
553733b9277SNavdeep Parhar 
554d14b0ac1SNavdeep Parhar 	m = v = F_TDDPTAGTCB;
555d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_CTL);
556d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
557d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
558d14b0ac1SNavdeep Parhar 		rc = EINVAL;
559d14b0ac1SNavdeep Parhar 	}
560d14b0ac1SNavdeep Parhar 
561d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
562d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
563d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
564d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_TP_PARA_REG5);
565d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
566d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
567d14b0ac1SNavdeep Parhar 		rc = EINVAL;
568d14b0ac1SNavdeep Parhar 	}
569d14b0ac1SNavdeep Parhar 
570d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_CONM_CTRL);
571d14b0ac1SNavdeep Parhar 	s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
572d14b0ac1SNavdeep Parhar 
573b3eda787SNavdeep Parhar 	/* egress queues: log2 of # of doorbells per BAR2 page */
574d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
575d14b0ac1SNavdeep Parhar 	r >>= S_QUEUESPERPAGEPF0 +
576d14b0ac1SNavdeep Parhar 	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
577b3eda787SNavdeep Parhar 	s->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
578b3eda787SNavdeep Parhar 
579b3eda787SNavdeep Parhar 	/* ingress queues: log2 of # of doorbells per BAR2 page */
580b3eda787SNavdeep Parhar 	r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
581b3eda787SNavdeep Parhar 	r >>= S_QUEUESPERPAGEPF0 +
582b3eda787SNavdeep Parhar 	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
583b3eda787SNavdeep Parhar 	s->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
584d14b0ac1SNavdeep Parhar 
585c337fa30SNavdeep Parhar 	t4_init_tp_params(sc);
586d14b0ac1SNavdeep Parhar 
587d14b0ac1SNavdeep Parhar 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
588d14b0ac1SNavdeep Parhar 	t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
589d14b0ac1SNavdeep Parhar 
590733b9277SNavdeep Parhar 	return (rc);
59154e4ee71SNavdeep Parhar }
59254e4ee71SNavdeep Parhar 
59354e4ee71SNavdeep Parhar int
59454e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc)
59554e4ee71SNavdeep Parhar {
59654e4ee71SNavdeep Parhar 	int rc;
59754e4ee71SNavdeep Parhar 
59854e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
59954e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
60054e4ee71SNavdeep Parhar 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
60154e4ee71SNavdeep Parhar 	    NULL, &sc->dmat);
60254e4ee71SNavdeep Parhar 	if (rc != 0) {
60354e4ee71SNavdeep Parhar 		device_printf(sc->dev,
60454e4ee71SNavdeep Parhar 		    "failed to create main DMA tag: %d\n", rc);
60554e4ee71SNavdeep Parhar 	}
60654e4ee71SNavdeep Parhar 
60754e4ee71SNavdeep Parhar 	return (rc);
60854e4ee71SNavdeep Parhar }
60954e4ee71SNavdeep Parhar 
61032e92190SNavdeep Parhar static inline int
61132e92190SNavdeep Parhar enable_buffer_packing(struct adapter *sc)
61232e92190SNavdeep Parhar {
61332e92190SNavdeep Parhar 
61432e92190SNavdeep Parhar 	if (sc->flags & BUF_PACKING_OK &&
61532e92190SNavdeep Parhar 	    ((is_t5(sc) && buffer_packing) ||	/* 1 or -1 both ok for T5 */
61632e92190SNavdeep Parhar 	    (is_t4(sc) && buffer_packing == 1)))
61732e92190SNavdeep Parhar 		return (1);
61832e92190SNavdeep Parhar 	return (0);
61932e92190SNavdeep Parhar }
62032e92190SNavdeep Parhar 
6216e22f9f3SNavdeep Parhar void
6226e22f9f3SNavdeep Parhar t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
6236e22f9f3SNavdeep Parhar     struct sysctl_oid_list *children)
6246e22f9f3SNavdeep Parhar {
6256e22f9f3SNavdeep Parhar 
6266e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
6276e22f9f3SNavdeep Parhar 	    NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)");
6286e22f9f3SNavdeep Parhar 
6296e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
6306e22f9f3SNavdeep Parhar 	    NULL, fl_pad, "payload pad boundary (bytes)");
6316e22f9f3SNavdeep Parhar 
6326e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
6336e22f9f3SNavdeep Parhar 	    NULL, spg_len, "status page size (bytes)");
6346e22f9f3SNavdeep Parhar 
6356e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
6366e22f9f3SNavdeep Parhar 	    NULL, cong_drop, "congestion drop setting");
6371458bff9SNavdeep Parhar 
6381458bff9SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "buffer_packing", CTLFLAG_RD,
63932e92190SNavdeep Parhar 	    NULL, enable_buffer_packing(sc),
6401458bff9SNavdeep Parhar 	    "pack multiple frames in one fl buffer");
6411458bff9SNavdeep Parhar 
6421458bff9SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
6431458bff9SNavdeep Parhar 	    NULL, is_t5(sc) ? t5_fl_pack : t4_fl_pack,
6441458bff9SNavdeep Parhar 	    "payload pack boundary (bytes)");
6456e22f9f3SNavdeep Parhar }
6466e22f9f3SNavdeep Parhar 
64754e4ee71SNavdeep Parhar int
64854e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc)
64954e4ee71SNavdeep Parhar {
65054e4ee71SNavdeep Parhar 	if (sc->dmat)
65154e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(sc->dmat);
65254e4ee71SNavdeep Parhar 
65354e4ee71SNavdeep Parhar 	return (0);
65454e4ee71SNavdeep Parhar }
65554e4ee71SNavdeep Parhar 
65654e4ee71SNavdeep Parhar /*
657733b9277SNavdeep Parhar  * Allocate and initialize the firmware event queue and the management queue.
65854e4ee71SNavdeep Parhar  *
65954e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
66054e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
66154e4ee71SNavdeep Parhar  */
66254e4ee71SNavdeep Parhar int
663f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc)
66454e4ee71SNavdeep Parhar {
665733b9277SNavdeep Parhar 	int rc;
66654e4ee71SNavdeep Parhar 
66754e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
66854e4ee71SNavdeep Parhar 
669733b9277SNavdeep Parhar 	sysctl_ctx_init(&sc->ctx);
670733b9277SNavdeep Parhar 	sc->flags |= ADAP_SYSCTL_CTX;
67154e4ee71SNavdeep Parhar 
67256599263SNavdeep Parhar 	/*
67356599263SNavdeep Parhar 	 * Firmware event queue
67456599263SNavdeep Parhar 	 */
675733b9277SNavdeep Parhar 	rc = alloc_fwq(sc);
676aa95b653SNavdeep Parhar 	if (rc != 0)
677f7dfe243SNavdeep Parhar 		return (rc);
678f7dfe243SNavdeep Parhar 
679f7dfe243SNavdeep Parhar 	/*
680733b9277SNavdeep Parhar 	 * Management queue.  This is just a control queue that uses the fwq as
681733b9277SNavdeep Parhar 	 * its associated iq.
682f7dfe243SNavdeep Parhar 	 */
683733b9277SNavdeep Parhar 	rc = alloc_mgmtq(sc);
68454e4ee71SNavdeep Parhar 
68554e4ee71SNavdeep Parhar 	return (rc);
68654e4ee71SNavdeep Parhar }
68754e4ee71SNavdeep Parhar 
68854e4ee71SNavdeep Parhar /*
68954e4ee71SNavdeep Parhar  * Idempotent
69054e4ee71SNavdeep Parhar  */
69154e4ee71SNavdeep Parhar int
692f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc)
69354e4ee71SNavdeep Parhar {
69454e4ee71SNavdeep Parhar 
69554e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
69654e4ee71SNavdeep Parhar 
697733b9277SNavdeep Parhar 	/* Do this before freeing the queue */
698733b9277SNavdeep Parhar 	if (sc->flags & ADAP_SYSCTL_CTX) {
699f7dfe243SNavdeep Parhar 		sysctl_ctx_free(&sc->ctx);
700733b9277SNavdeep Parhar 		sc->flags &= ~ADAP_SYSCTL_CTX;
701f7dfe243SNavdeep Parhar 	}
702f7dfe243SNavdeep Parhar 
703733b9277SNavdeep Parhar 	free_mgmtq(sc);
704733b9277SNavdeep Parhar 	free_fwq(sc);
70554e4ee71SNavdeep Parhar 
70654e4ee71SNavdeep Parhar 	return (0);
70754e4ee71SNavdeep Parhar }
70854e4ee71SNavdeep Parhar 
709733b9277SNavdeep Parhar static inline int
710733b9277SNavdeep Parhar first_vector(struct port_info *pi)
71154e4ee71SNavdeep Parhar {
71254e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
713733b9277SNavdeep Parhar 	int rc = T4_EXTRA_INTR, i;
71454e4ee71SNavdeep Parhar 
715733b9277SNavdeep Parhar 	if (sc->intr_count == 1)
716733b9277SNavdeep Parhar 		return (0);
71754e4ee71SNavdeep Parhar 
718733b9277SNavdeep Parhar 	for_each_port(sc, i) {
719c8d954abSNavdeep Parhar 		struct port_info *p = sc->port[i];
720c8d954abSNavdeep Parhar 
721733b9277SNavdeep Parhar 		if (i == pi->port_id)
722733b9277SNavdeep Parhar 			break;
723733b9277SNavdeep Parhar 
72409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
725733b9277SNavdeep Parhar 		if (sc->flags & INTR_DIRECT)
726c8d954abSNavdeep Parhar 			rc += p->nrxq + p->nofldrxq;
727733b9277SNavdeep Parhar 		else
728c8d954abSNavdeep Parhar 			rc += max(p->nrxq, p->nofldrxq);
729733b9277SNavdeep Parhar #else
730733b9277SNavdeep Parhar 		/*
731733b9277SNavdeep Parhar 		 * Not compiled with offload support and intr_count > 1.  Only
732733b9277SNavdeep Parhar 		 * NIC queues exist and they'd better be taking direct
733733b9277SNavdeep Parhar 		 * interrupts.
734733b9277SNavdeep Parhar 		 */
735733b9277SNavdeep Parhar 		KASSERT(sc->flags & INTR_DIRECT,
736733b9277SNavdeep Parhar 		    ("%s: intr_count %d, !INTR_DIRECT", __func__,
737733b9277SNavdeep Parhar 		    sc->intr_count));
738733b9277SNavdeep Parhar 
739c8d954abSNavdeep Parhar 		rc += p->nrxq;
740733b9277SNavdeep Parhar #endif
74154e4ee71SNavdeep Parhar 	}
74254e4ee71SNavdeep Parhar 
743733b9277SNavdeep Parhar 	return (rc);
744733b9277SNavdeep Parhar }
745733b9277SNavdeep Parhar 
746733b9277SNavdeep Parhar /*
747733b9277SNavdeep Parhar  * Given an arbitrary "index," come up with an iq that can be used by other
748733b9277SNavdeep Parhar  * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
749733b9277SNavdeep Parhar  * The iq returned is guaranteed to be something that takes direct interrupts.
750733b9277SNavdeep Parhar  */
751733b9277SNavdeep Parhar static struct sge_iq *
752733b9277SNavdeep Parhar port_intr_iq(struct port_info *pi, int idx)
753733b9277SNavdeep Parhar {
754733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
755733b9277SNavdeep Parhar 	struct sge *s = &sc->sge;
756733b9277SNavdeep Parhar 	struct sge_iq *iq = NULL;
757733b9277SNavdeep Parhar 
758733b9277SNavdeep Parhar 	if (sc->intr_count == 1)
759733b9277SNavdeep Parhar 		return (&sc->sge.fwq);
760733b9277SNavdeep Parhar 
76109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
762733b9277SNavdeep Parhar 	if (sc->flags & INTR_DIRECT) {
763733b9277SNavdeep Parhar 		idx %= pi->nrxq + pi->nofldrxq;
764733b9277SNavdeep Parhar 
765733b9277SNavdeep Parhar 		if (idx >= pi->nrxq) {
766733b9277SNavdeep Parhar 			idx -= pi->nrxq;
767733b9277SNavdeep Parhar 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
768733b9277SNavdeep Parhar 		} else
769733b9277SNavdeep Parhar 			iq = &s->rxq[pi->first_rxq + idx].iq;
770733b9277SNavdeep Parhar 
771733b9277SNavdeep Parhar 	} else {
772733b9277SNavdeep Parhar 		idx %= max(pi->nrxq, pi->nofldrxq);
773733b9277SNavdeep Parhar 
774733b9277SNavdeep Parhar 		if (pi->nrxq >= pi->nofldrxq)
775733b9277SNavdeep Parhar 			iq = &s->rxq[pi->first_rxq + idx].iq;
776733b9277SNavdeep Parhar 		else
777733b9277SNavdeep Parhar 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
778733b9277SNavdeep Parhar 	}
779733b9277SNavdeep Parhar #else
780733b9277SNavdeep Parhar 	/*
781733b9277SNavdeep Parhar 	 * Not compiled with offload support and intr_count > 1.  Only NIC
782733b9277SNavdeep Parhar 	 * queues exist and they'd better be taking direct interrupts.
783733b9277SNavdeep Parhar 	 */
784733b9277SNavdeep Parhar 	KASSERT(sc->flags & INTR_DIRECT,
785733b9277SNavdeep Parhar 	    ("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count));
786733b9277SNavdeep Parhar 
787733b9277SNavdeep Parhar 	idx %= pi->nrxq;
788733b9277SNavdeep Parhar 	iq = &s->rxq[pi->first_rxq + idx].iq;
789733b9277SNavdeep Parhar #endif
790733b9277SNavdeep Parhar 
791733b9277SNavdeep Parhar 	KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__));
792733b9277SNavdeep Parhar 	return (iq);
793733b9277SNavdeep Parhar }
794733b9277SNavdeep Parhar 
7958340ece5SNavdeep Parhar static inline int
7968340ece5SNavdeep Parhar mtu_to_bufsize(int mtu)
7978340ece5SNavdeep Parhar {
7988340ece5SNavdeep Parhar 	int bufsize;
7998340ece5SNavdeep Parhar 
8008340ece5SNavdeep Parhar 	/* large enough for a frame even when VLAN extraction is disabled */
8018340ece5SNavdeep Parhar 	bufsize = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + mtu;
802d14b0ac1SNavdeep Parhar 	bufsize = roundup2(bufsize + fl_pktshift, fl_pad);
8038340ece5SNavdeep Parhar 
8048340ece5SNavdeep Parhar 	return (bufsize);
8058340ece5SNavdeep Parhar }
8068340ece5SNavdeep Parhar 
8076eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
8086eb3180fSNavdeep Parhar static inline int
8096eb3180fSNavdeep Parhar mtu_to_bufsize_toe(struct adapter *sc, int mtu)
8106eb3180fSNavdeep Parhar {
8116eb3180fSNavdeep Parhar 
8126eb3180fSNavdeep Parhar 	if (sc->tt.rx_coalesce)
8136eb3180fSNavdeep Parhar 		return (G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)));
8146eb3180fSNavdeep Parhar 
8156eb3180fSNavdeep Parhar 	return (mtu);
8166eb3180fSNavdeep Parhar }
8176eb3180fSNavdeep Parhar #endif
8186eb3180fSNavdeep Parhar 
819733b9277SNavdeep Parhar int
820733b9277SNavdeep Parhar t4_setup_port_queues(struct port_info *pi)
821733b9277SNavdeep Parhar {
822733b9277SNavdeep Parhar 	int rc = 0, i, j, intr_idx, iqid;
823733b9277SNavdeep Parhar 	struct sge_rxq *rxq;
824733b9277SNavdeep Parhar 	struct sge_txq *txq;
825733b9277SNavdeep Parhar 	struct sge_wrq *ctrlq;
82609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
827733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
828733b9277SNavdeep Parhar 	struct sge_wrq *ofld_txq;
82909fe6320SNavdeep Parhar 	struct sysctl_oid *oid2 = NULL;
830733b9277SNavdeep Parhar #endif
831733b9277SNavdeep Parhar 	char name[16];
832733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
8336eb3180fSNavdeep Parhar 	struct ifnet *ifp = pi->ifp;
83409fe6320SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
835733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
8361458bff9SNavdeep Parhar 	int bufsize, pack;
837733b9277SNavdeep Parhar 
838733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD,
839733b9277SNavdeep Parhar 	    NULL, "rx queues");
840733b9277SNavdeep Parhar 
84109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
842733b9277SNavdeep Parhar 	if (is_offload(sc)) {
843733b9277SNavdeep Parhar 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
844733b9277SNavdeep Parhar 		    CTLFLAG_RD, NULL,
845733b9277SNavdeep Parhar 		    "rx queues for offloaded TCP connections");
846733b9277SNavdeep Parhar 	}
847733b9277SNavdeep Parhar #endif
848733b9277SNavdeep Parhar 
849733b9277SNavdeep Parhar 	/* Interrupt vector to start from (when using multiple vectors) */
850733b9277SNavdeep Parhar 	intr_idx = first_vector(pi);
851733b9277SNavdeep Parhar 
852733b9277SNavdeep Parhar 	/*
853733b9277SNavdeep Parhar 	 * First pass over all rx queues (NIC and TOE):
854733b9277SNavdeep Parhar 	 * a) initialize iq and fl
855733b9277SNavdeep Parhar 	 * b) allocate queue iff it will take direct interrupts.
856733b9277SNavdeep Parhar 	 */
8576eb3180fSNavdeep Parhar 	bufsize = mtu_to_bufsize(ifp->if_mtu);
85832e92190SNavdeep Parhar 	pack = enable_buffer_packing(sc);
85954e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
86054e4ee71SNavdeep Parhar 
861733b9277SNavdeep Parhar 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq,
8625323ca8fSNavdeep Parhar 		    RX_IQ_ESIZE);
86354e4ee71SNavdeep Parhar 
86454e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-fl",
86554e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
8661458bff9SNavdeep Parhar 		init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, bufsize, pack, name);
86754e4ee71SNavdeep Parhar 
868733b9277SNavdeep Parhar 		if (sc->flags & INTR_DIRECT
86909fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
870733b9277SNavdeep Parhar 		    || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
871733b9277SNavdeep Parhar #endif
872733b9277SNavdeep Parhar 		   ) {
873733b9277SNavdeep Parhar 			rxq->iq.flags |= IQ_INTR;
874733b9277SNavdeep Parhar 			rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
87554e4ee71SNavdeep Parhar 			if (rc != 0)
87654e4ee71SNavdeep Parhar 				goto done;
877733b9277SNavdeep Parhar 			intr_idx++;
878733b9277SNavdeep Parhar 		}
87954e4ee71SNavdeep Parhar 	}
88054e4ee71SNavdeep Parhar 
88109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
8826eb3180fSNavdeep Parhar 	bufsize = mtu_to_bufsize_toe(sc, ifp->if_mtu);
8831458bff9SNavdeep Parhar 	pack = 0;	/* XXX: think about this some more */
884733b9277SNavdeep Parhar 	for_each_ofld_rxq(pi, i, ofld_rxq) {
885733b9277SNavdeep Parhar 
886733b9277SNavdeep Parhar 		init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
8875323ca8fSNavdeep Parhar 		    pi->qsize_rxq, RX_IQ_ESIZE);
888733b9277SNavdeep Parhar 
889733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
890733b9277SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
8911458bff9SNavdeep Parhar 		init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, bufsize, pack,
8921458bff9SNavdeep Parhar 		    name);
893733b9277SNavdeep Parhar 
894733b9277SNavdeep Parhar 		if (sc->flags & INTR_DIRECT ||
895733b9277SNavdeep Parhar 		    (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
896733b9277SNavdeep Parhar 			ofld_rxq->iq.flags |= IQ_INTR;
897733b9277SNavdeep Parhar 			rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
898733b9277SNavdeep Parhar 			if (rc != 0)
899733b9277SNavdeep Parhar 				goto done;
900733b9277SNavdeep Parhar 			intr_idx++;
901733b9277SNavdeep Parhar 		}
902733b9277SNavdeep Parhar 	}
903733b9277SNavdeep Parhar #endif
904733b9277SNavdeep Parhar 
905733b9277SNavdeep Parhar 	/*
906733b9277SNavdeep Parhar 	 * Second pass over all rx queues (NIC and TOE).  The queues forwarding
907733b9277SNavdeep Parhar 	 * their interrupts are allocated now.
908733b9277SNavdeep Parhar 	 */
909733b9277SNavdeep Parhar 	j = 0;
910733b9277SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
911733b9277SNavdeep Parhar 		if (rxq->iq.flags & IQ_INTR)
912733b9277SNavdeep Parhar 			continue;
913733b9277SNavdeep Parhar 
914733b9277SNavdeep Parhar 		intr_idx = port_intr_iq(pi, j)->abs_id;
915733b9277SNavdeep Parhar 
916733b9277SNavdeep Parhar 		rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
917733b9277SNavdeep Parhar 		if (rc != 0)
918733b9277SNavdeep Parhar 			goto done;
919733b9277SNavdeep Parhar 		j++;
920733b9277SNavdeep Parhar 	}
921733b9277SNavdeep Parhar 
92209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
923733b9277SNavdeep Parhar 	for_each_ofld_rxq(pi, i, ofld_rxq) {
924733b9277SNavdeep Parhar 		if (ofld_rxq->iq.flags & IQ_INTR)
925733b9277SNavdeep Parhar 			continue;
926733b9277SNavdeep Parhar 
927733b9277SNavdeep Parhar 		intr_idx = port_intr_iq(pi, j)->abs_id;
928733b9277SNavdeep Parhar 
929733b9277SNavdeep Parhar 		rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
930733b9277SNavdeep Parhar 		if (rc != 0)
931733b9277SNavdeep Parhar 			goto done;
932733b9277SNavdeep Parhar 		j++;
933733b9277SNavdeep Parhar 	}
934733b9277SNavdeep Parhar #endif
935733b9277SNavdeep Parhar 
936733b9277SNavdeep Parhar 	/*
937733b9277SNavdeep Parhar 	 * Now the tx queues.  Only one pass needed.
938733b9277SNavdeep Parhar 	 */
939733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
940733b9277SNavdeep Parhar 	    NULL, "tx queues");
941733b9277SNavdeep Parhar 	j = 0;
94254e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
943733b9277SNavdeep Parhar 		uint16_t iqid;
944733b9277SNavdeep Parhar 
945733b9277SNavdeep Parhar 		iqid = port_intr_iq(pi, j)->cntxt_id;
94654e4ee71SNavdeep Parhar 
94754e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s txq%d",
94854e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
949733b9277SNavdeep Parhar 		init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
950733b9277SNavdeep Parhar 		    name);
95154e4ee71SNavdeep Parhar 
952733b9277SNavdeep Parhar 		rc = alloc_txq(pi, txq, i, oid);
95354e4ee71SNavdeep Parhar 		if (rc != 0)
95454e4ee71SNavdeep Parhar 			goto done;
955733b9277SNavdeep Parhar 		j++;
95654e4ee71SNavdeep Parhar 	}
95754e4ee71SNavdeep Parhar 
95809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
959733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
960733b9277SNavdeep Parhar 	    CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
961733b9277SNavdeep Parhar 	for_each_ofld_txq(pi, i, ofld_txq) {
962733b9277SNavdeep Parhar 		uint16_t iqid;
963733b9277SNavdeep Parhar 
964733b9277SNavdeep Parhar 		iqid = port_intr_iq(pi, j)->cntxt_id;
965733b9277SNavdeep Parhar 
966733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_txq%d",
967733b9277SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
968733b9277SNavdeep Parhar 		init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
969733b9277SNavdeep Parhar 		    iqid, name);
970733b9277SNavdeep Parhar 
971733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%d", i);
972733b9277SNavdeep Parhar 		oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
973733b9277SNavdeep Parhar 		    name, CTLFLAG_RD, NULL, "offload tx queue");
974733b9277SNavdeep Parhar 
975733b9277SNavdeep Parhar 		rc = alloc_wrq(sc, pi, ofld_txq, oid2);
976733b9277SNavdeep Parhar 		if (rc != 0)
977733b9277SNavdeep Parhar 			goto done;
978733b9277SNavdeep Parhar 		j++;
979733b9277SNavdeep Parhar 	}
980733b9277SNavdeep Parhar #endif
981733b9277SNavdeep Parhar 
982733b9277SNavdeep Parhar 	/*
983733b9277SNavdeep Parhar 	 * Finally, the control queue.
984733b9277SNavdeep Parhar 	 */
985733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
986733b9277SNavdeep Parhar 	    NULL, "ctrl queue");
987733b9277SNavdeep Parhar 	ctrlq = &sc->sge.ctrlq[pi->port_id];
988733b9277SNavdeep Parhar 	iqid = port_intr_iq(pi, 0)->cntxt_id;
989733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev));
990733b9277SNavdeep Parhar 	init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
991733b9277SNavdeep Parhar 	rc = alloc_wrq(sc, pi, ctrlq, oid);
992733b9277SNavdeep Parhar 
99354e4ee71SNavdeep Parhar done:
99454e4ee71SNavdeep Parhar 	if (rc)
995733b9277SNavdeep Parhar 		t4_teardown_port_queues(pi);
99654e4ee71SNavdeep Parhar 
99754e4ee71SNavdeep Parhar 	return (rc);
99854e4ee71SNavdeep Parhar }
99954e4ee71SNavdeep Parhar 
100054e4ee71SNavdeep Parhar /*
100154e4ee71SNavdeep Parhar  * Idempotent
100254e4ee71SNavdeep Parhar  */
100354e4ee71SNavdeep Parhar int
1004733b9277SNavdeep Parhar t4_teardown_port_queues(struct port_info *pi)
100554e4ee71SNavdeep Parhar {
100654e4ee71SNavdeep Parhar 	int i;
1007733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
100854e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
100954e4ee71SNavdeep Parhar 	struct sge_txq *txq;
101009fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1011733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
1012733b9277SNavdeep Parhar 	struct sge_wrq *ofld_txq;
1013733b9277SNavdeep Parhar #endif
101454e4ee71SNavdeep Parhar 
101554e4ee71SNavdeep Parhar 	/* Do this before freeing the queues */
1016733b9277SNavdeep Parhar 	if (pi->flags & PORT_SYSCTL_CTX) {
101754e4ee71SNavdeep Parhar 		sysctl_ctx_free(&pi->ctx);
1018733b9277SNavdeep Parhar 		pi->flags &= ~PORT_SYSCTL_CTX;
101954e4ee71SNavdeep Parhar 	}
102054e4ee71SNavdeep Parhar 
1021733b9277SNavdeep Parhar 	/*
1022733b9277SNavdeep Parhar 	 * Take down all the tx queues first, as they reference the rx queues
1023733b9277SNavdeep Parhar 	 * (for egress updates, etc.).
1024733b9277SNavdeep Parhar 	 */
1025733b9277SNavdeep Parhar 
1026733b9277SNavdeep Parhar 	free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
1027733b9277SNavdeep Parhar 
102854e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
102954e4ee71SNavdeep Parhar 		free_txq(pi, txq);
103054e4ee71SNavdeep Parhar 	}
103154e4ee71SNavdeep Parhar 
103209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1033733b9277SNavdeep Parhar 	for_each_ofld_txq(pi, i, ofld_txq) {
1034733b9277SNavdeep Parhar 		free_wrq(sc, ofld_txq);
1035733b9277SNavdeep Parhar 	}
1036733b9277SNavdeep Parhar #endif
1037733b9277SNavdeep Parhar 
1038733b9277SNavdeep Parhar 	/*
1039733b9277SNavdeep Parhar 	 * Then take down the rx queues that forward their interrupts, as they
1040733b9277SNavdeep Parhar 	 * reference other rx queues.
1041733b9277SNavdeep Parhar 	 */
1042733b9277SNavdeep Parhar 
104354e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
1044733b9277SNavdeep Parhar 		if ((rxq->iq.flags & IQ_INTR) == 0)
104554e4ee71SNavdeep Parhar 			free_rxq(pi, rxq);
104654e4ee71SNavdeep Parhar 	}
104754e4ee71SNavdeep Parhar 
104809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1049733b9277SNavdeep Parhar 	for_each_ofld_rxq(pi, i, ofld_rxq) {
1050733b9277SNavdeep Parhar 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
1051733b9277SNavdeep Parhar 			free_ofld_rxq(pi, ofld_rxq);
1052733b9277SNavdeep Parhar 	}
1053733b9277SNavdeep Parhar #endif
1054733b9277SNavdeep Parhar 
1055733b9277SNavdeep Parhar 	/*
1056733b9277SNavdeep Parhar 	 * Then take down the rx queues that take direct interrupts.
1057733b9277SNavdeep Parhar 	 */
1058733b9277SNavdeep Parhar 
1059733b9277SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
1060733b9277SNavdeep Parhar 		if (rxq->iq.flags & IQ_INTR)
1061733b9277SNavdeep Parhar 			free_rxq(pi, rxq);
1062733b9277SNavdeep Parhar 	}
1063733b9277SNavdeep Parhar 
106409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1065733b9277SNavdeep Parhar 	for_each_ofld_rxq(pi, i, ofld_rxq) {
1066733b9277SNavdeep Parhar 		if (ofld_rxq->iq.flags & IQ_INTR)
1067733b9277SNavdeep Parhar 			free_ofld_rxq(pi, ofld_rxq);
1068733b9277SNavdeep Parhar 	}
1069733b9277SNavdeep Parhar #endif
1070733b9277SNavdeep Parhar 
107154e4ee71SNavdeep Parhar 	return (0);
107254e4ee71SNavdeep Parhar }
107354e4ee71SNavdeep Parhar 
1074733b9277SNavdeep Parhar /*
1075733b9277SNavdeep Parhar  * Deals with errors and the firmware event queue.  All data rx queues forward
1076733b9277SNavdeep Parhar  * their interrupt to the firmware event queue.
1077733b9277SNavdeep Parhar  */
107854e4ee71SNavdeep Parhar void
107954e4ee71SNavdeep Parhar t4_intr_all(void *arg)
108054e4ee71SNavdeep Parhar {
108154e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
1082733b9277SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
108354e4ee71SNavdeep Parhar 
108454e4ee71SNavdeep Parhar 	t4_intr_err(arg);
1085733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) {
1086733b9277SNavdeep Parhar 		service_iq(fwq, 0);
1087733b9277SNavdeep Parhar 		atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE);
108854e4ee71SNavdeep Parhar 	}
108954e4ee71SNavdeep Parhar }
109054e4ee71SNavdeep Parhar 
109154e4ee71SNavdeep Parhar /* Deals with error interrupts */
109254e4ee71SNavdeep Parhar void
109354e4ee71SNavdeep Parhar t4_intr_err(void *arg)
109454e4ee71SNavdeep Parhar {
109554e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
109654e4ee71SNavdeep Parhar 
109754e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
109854e4ee71SNavdeep Parhar 	t4_slow_intr_handler(sc);
109954e4ee71SNavdeep Parhar }
110054e4ee71SNavdeep Parhar 
110154e4ee71SNavdeep Parhar void
110254e4ee71SNavdeep Parhar t4_intr_evt(void *arg)
110354e4ee71SNavdeep Parhar {
110454e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
11052be67d29SNavdeep Parhar 
1106733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1107733b9277SNavdeep Parhar 		service_iq(iq, 0);
1108733b9277SNavdeep Parhar 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
11092be67d29SNavdeep Parhar 	}
11102be67d29SNavdeep Parhar }
11112be67d29SNavdeep Parhar 
1112733b9277SNavdeep Parhar void
1113733b9277SNavdeep Parhar t4_intr(void *arg)
11142be67d29SNavdeep Parhar {
11152be67d29SNavdeep Parhar 	struct sge_iq *iq = arg;
1116733b9277SNavdeep Parhar 
1117733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1118733b9277SNavdeep Parhar 		service_iq(iq, 0);
1119733b9277SNavdeep Parhar 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1120733b9277SNavdeep Parhar 	}
1121733b9277SNavdeep Parhar }
1122733b9277SNavdeep Parhar 
1123733b9277SNavdeep Parhar /*
1124733b9277SNavdeep Parhar  * Deals with anything and everything on the given ingress queue.
1125733b9277SNavdeep Parhar  */
1126733b9277SNavdeep Parhar static int
1127733b9277SNavdeep Parhar service_iq(struct sge_iq *iq, int budget)
1128733b9277SNavdeep Parhar {
1129733b9277SNavdeep Parhar 	struct sge_iq *q;
113009fe6320SNavdeep Parhar 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
1131733b9277SNavdeep Parhar 	struct sge_fl *fl = &rxq->fl;		/* Use iff IQ_HAS_FL */
113254e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
113354e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
1134733b9277SNavdeep Parhar 	const struct rss_header *rss;
1135733b9277SNavdeep Parhar 	int ndescs = 0, limit, fl_bufs_used = 0;
113656599263SNavdeep Parhar 	int rsp_type;
1137733b9277SNavdeep Parhar 	uint32_t lq;
1138733b9277SNavdeep Parhar 	struct mbuf *m0;
1139733b9277SNavdeep Parhar 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1140480e603cSNavdeep Parhar #if defined(INET) || defined(INET6)
1141480e603cSNavdeep Parhar 	const struct timeval lro_timeout = {0, sc->lro_timeout};
1142480e603cSNavdeep Parhar #endif
1143733b9277SNavdeep Parhar 
1144733b9277SNavdeep Parhar 	limit = budget ? budget : iq->qsize / 8;
1145733b9277SNavdeep Parhar 
1146733b9277SNavdeep Parhar 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1147733b9277SNavdeep Parhar 
1148733b9277SNavdeep Parhar 	/*
1149733b9277SNavdeep Parhar 	 * We always come back and check the descriptor ring for new indirect
1150733b9277SNavdeep Parhar 	 * interrupts and other responses after running a single handler.
1151733b9277SNavdeep Parhar 	 */
1152733b9277SNavdeep Parhar 	for (;;) {
1153733b9277SNavdeep Parhar 		while (is_new_response(iq, &ctrl)) {
115454e4ee71SNavdeep Parhar 
115554e4ee71SNavdeep Parhar 			rmb();
115654e4ee71SNavdeep Parhar 
1157733b9277SNavdeep Parhar 			m0 = NULL;
115856599263SNavdeep Parhar 			rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
1159733b9277SNavdeep Parhar 			lq = be32toh(ctrl->pldbuflen_qid);
1160733b9277SNavdeep Parhar 			rss = (const void *)iq->cdesc;
116154e4ee71SNavdeep Parhar 
1162733b9277SNavdeep Parhar 			switch (rsp_type) {
1163733b9277SNavdeep Parhar 			case X_RSPD_TYPE_FLBUF:
116454e4ee71SNavdeep Parhar 
1165733b9277SNavdeep Parhar 				KASSERT(iq->flags & IQ_HAS_FL,
1166733b9277SNavdeep Parhar 				    ("%s: data for an iq (%p) with no freelist",
1167733b9277SNavdeep Parhar 				    __func__, iq));
1168733b9277SNavdeep Parhar 
11691458bff9SNavdeep Parhar 				m0 = fl->flags & FL_BUF_PACKING ?
11701458bff9SNavdeep Parhar 				    get_fl_payload1(sc, fl, lq, &fl_bufs_used) :
11711458bff9SNavdeep Parhar 				    get_fl_payload2(sc, fl, lq, &fl_bufs_used);
11721458bff9SNavdeep Parhar 
11731458bff9SNavdeep Parhar 				if (__predict_false(m0 == NULL))
11741458bff9SNavdeep Parhar 					goto process_iql;
1175733b9277SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
1176733b9277SNavdeep Parhar 				/*
1177733b9277SNavdeep Parhar 				 * 60 bit timestamp for the payload is
1178733b9277SNavdeep Parhar 				 * *(uint64_t *)m0->m_pktdat.  Note that it is
1179733b9277SNavdeep Parhar 				 * in the leading free-space in the mbuf.  The
1180733b9277SNavdeep Parhar 				 * kernel can clobber it during a pullup,
1181733b9277SNavdeep Parhar 				 * m_copymdata, etc.  You need to make sure that
1182733b9277SNavdeep Parhar 				 * the mbuf reaches you unmolested if you care
1183733b9277SNavdeep Parhar 				 * about the timestamp.
1184733b9277SNavdeep Parhar 				 */
1185733b9277SNavdeep Parhar 				*(uint64_t *)m0->m_pktdat =
1186733b9277SNavdeep Parhar 				    be64toh(ctrl->u.last_flit) &
1187733b9277SNavdeep Parhar 				    0xfffffffffffffff;
1188733b9277SNavdeep Parhar #endif
1189733b9277SNavdeep Parhar 
1190733b9277SNavdeep Parhar 				/* fall through */
1191733b9277SNavdeep Parhar 
1192733b9277SNavdeep Parhar 			case X_RSPD_TYPE_CPL:
1193733b9277SNavdeep Parhar 				KASSERT(rss->opcode < NUM_CPL_CMDS,
1194733b9277SNavdeep Parhar 				    ("%s: bad opcode %02x.", __func__,
1195733b9277SNavdeep Parhar 				    rss->opcode));
1196733b9277SNavdeep Parhar 				sc->cpl_handler[rss->opcode](iq, rss, m0);
1197733b9277SNavdeep Parhar 				break;
1198733b9277SNavdeep Parhar 
1199733b9277SNavdeep Parhar 			case X_RSPD_TYPE_INTR:
1200733b9277SNavdeep Parhar 
1201733b9277SNavdeep Parhar 				/*
1202733b9277SNavdeep Parhar 				 * Interrupts should be forwarded only to queues
1203733b9277SNavdeep Parhar 				 * that are not forwarding their interrupts.
1204733b9277SNavdeep Parhar 				 * This means service_iq can recurse but only 1
1205733b9277SNavdeep Parhar 				 * level deep.
1206733b9277SNavdeep Parhar 				 */
1207733b9277SNavdeep Parhar 				KASSERT(budget == 0,
1208733b9277SNavdeep Parhar 				    ("%s: budget %u, rsp_type %u", __func__,
1209733b9277SNavdeep Parhar 				    budget, rsp_type));
1210733b9277SNavdeep Parhar 
121198005176SNavdeep Parhar 				/*
121298005176SNavdeep Parhar 				 * There are 1K interrupt-capable queues (qids 0
121398005176SNavdeep Parhar 				 * through 1023).  A response type indicating a
121498005176SNavdeep Parhar 				 * forwarded interrupt with a qid >= 1K is an
121598005176SNavdeep Parhar 				 * iWARP async notification.
121698005176SNavdeep Parhar 				 */
121798005176SNavdeep Parhar 				if (lq >= 1024) {
121898005176SNavdeep Parhar                                         sc->an_handler(iq, ctrl);
121998005176SNavdeep Parhar                                         break;
122098005176SNavdeep Parhar                                 }
122198005176SNavdeep Parhar 
1222733b9277SNavdeep Parhar 				q = sc->sge.iqmap[lq - sc->sge.iq_start];
1223733b9277SNavdeep Parhar 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
1224733b9277SNavdeep Parhar 				    IQS_BUSY)) {
1225733b9277SNavdeep Parhar 					if (service_iq(q, q->qsize / 8) == 0) {
1226733b9277SNavdeep Parhar 						atomic_cmpset_int(&q->state,
1227733b9277SNavdeep Parhar 						    IQS_BUSY, IQS_IDLE);
1228733b9277SNavdeep Parhar 					} else {
1229733b9277SNavdeep Parhar 						STAILQ_INSERT_TAIL(&iql, q,
1230733b9277SNavdeep Parhar 						    link);
1231733b9277SNavdeep Parhar 					}
1232733b9277SNavdeep Parhar 				}
1233733b9277SNavdeep Parhar 				break;
1234733b9277SNavdeep Parhar 
1235733b9277SNavdeep Parhar 			default:
123698005176SNavdeep Parhar 				KASSERT(0,
123798005176SNavdeep Parhar 				    ("%s: illegal response type %d on iq %p",
123898005176SNavdeep Parhar 				    __func__, rsp_type, iq));
123998005176SNavdeep Parhar 				log(LOG_ERR,
124098005176SNavdeep Parhar 				    "%s: illegal response type %d on iq %p",
124198005176SNavdeep Parhar 				    device_get_nameunit(sc->dev), rsp_type, iq);
124209fe6320SNavdeep Parhar 				break;
124354e4ee71SNavdeep Parhar 			}
124456599263SNavdeep Parhar 
124554e4ee71SNavdeep Parhar 			iq_next(iq);
1246733b9277SNavdeep Parhar 			if (++ndescs == limit) {
1247733b9277SNavdeep Parhar 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1248733b9277SNavdeep Parhar 				    V_CIDXINC(ndescs) |
1249733b9277SNavdeep Parhar 				    V_INGRESSQID(iq->cntxt_id) |
1250733b9277SNavdeep Parhar 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1251733b9277SNavdeep Parhar 				ndescs = 0;
1252733b9277SNavdeep Parhar 
1253480e603cSNavdeep Parhar #if defined(INET) || defined(INET6)
1254480e603cSNavdeep Parhar 				if (iq->flags & IQ_LRO_ENABLED &&
1255480e603cSNavdeep Parhar 				    sc->lro_timeout != 0) {
1256480e603cSNavdeep Parhar 					tcp_lro_flush_inactive(&rxq->lro,
1257480e603cSNavdeep Parhar 					    &lro_timeout);
1258480e603cSNavdeep Parhar 				}
1259480e603cSNavdeep Parhar #endif
1260480e603cSNavdeep Parhar 
1261733b9277SNavdeep Parhar 				if (fl_bufs_used > 0) {
1262733b9277SNavdeep Parhar 					FL_LOCK(fl);
1263733b9277SNavdeep Parhar 					fl->needed += fl_bufs_used;
1264733b9277SNavdeep Parhar 					refill_fl(sc, fl, fl->cap / 8);
1265733b9277SNavdeep Parhar 					FL_UNLOCK(fl);
1266733b9277SNavdeep Parhar 					fl_bufs_used = 0;
126754e4ee71SNavdeep Parhar 				}
126854e4ee71SNavdeep Parhar 
1269733b9277SNavdeep Parhar 				if (budget)
1270733b9277SNavdeep Parhar 					return (EINPROGRESS);
127154e4ee71SNavdeep Parhar 			}
1272733b9277SNavdeep Parhar 		}
1273733b9277SNavdeep Parhar 
12741458bff9SNavdeep Parhar process_iql:
1275733b9277SNavdeep Parhar 		if (STAILQ_EMPTY(&iql))
1276733b9277SNavdeep Parhar 			break;
1277733b9277SNavdeep Parhar 
1278733b9277SNavdeep Parhar 		/*
1279733b9277SNavdeep Parhar 		 * Process the head only, and send it to the back of the list if
1280733b9277SNavdeep Parhar 		 * it's still not done.
1281733b9277SNavdeep Parhar 		 */
1282733b9277SNavdeep Parhar 		q = STAILQ_FIRST(&iql);
1283733b9277SNavdeep Parhar 		STAILQ_REMOVE_HEAD(&iql, link);
1284733b9277SNavdeep Parhar 		if (service_iq(q, q->qsize / 8) == 0)
1285733b9277SNavdeep Parhar 			atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
1286733b9277SNavdeep Parhar 		else
1287733b9277SNavdeep Parhar 			STAILQ_INSERT_TAIL(&iql, q, link);
1288733b9277SNavdeep Parhar 	}
1289733b9277SNavdeep Parhar 
1290a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1291733b9277SNavdeep Parhar 	if (iq->flags & IQ_LRO_ENABLED) {
1292733b9277SNavdeep Parhar 		struct lro_ctrl *lro = &rxq->lro;
1293733b9277SNavdeep Parhar 		struct lro_entry *l;
1294733b9277SNavdeep Parhar 
1295733b9277SNavdeep Parhar 		while (!SLIST_EMPTY(&lro->lro_active)) {
1296733b9277SNavdeep Parhar 			l = SLIST_FIRST(&lro->lro_active);
1297733b9277SNavdeep Parhar 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1298733b9277SNavdeep Parhar 			tcp_lro_flush(lro, l);
1299733b9277SNavdeep Parhar 		}
1300733b9277SNavdeep Parhar 	}
1301733b9277SNavdeep Parhar #endif
1302733b9277SNavdeep Parhar 
1303733b9277SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
1304733b9277SNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1305733b9277SNavdeep Parhar 
1306733b9277SNavdeep Parhar 	if (iq->flags & IQ_HAS_FL) {
1307733b9277SNavdeep Parhar 		int starved;
1308733b9277SNavdeep Parhar 
1309733b9277SNavdeep Parhar 		FL_LOCK(fl);
1310733b9277SNavdeep Parhar 		fl->needed += fl_bufs_used;
1311733b9277SNavdeep Parhar 		starved = refill_fl(sc, fl, fl->cap / 4);
1312733b9277SNavdeep Parhar 		FL_UNLOCK(fl);
1313733b9277SNavdeep Parhar 		if (__predict_false(starved != 0))
1314733b9277SNavdeep Parhar 			add_fl_to_sfl(sc, fl);
1315733b9277SNavdeep Parhar 	}
1316733b9277SNavdeep Parhar 
1317733b9277SNavdeep Parhar 	return (0);
1318733b9277SNavdeep Parhar }
1319733b9277SNavdeep Parhar 
13201458bff9SNavdeep Parhar static int
13211458bff9SNavdeep Parhar fill_mbuf_stash(struct sge_fl *fl)
13221458bff9SNavdeep Parhar {
13231458bff9SNavdeep Parhar 	int i;
13241458bff9SNavdeep Parhar 
13251458bff9SNavdeep Parhar 	for (i = 0; i < nitems(fl->mstash); i++) {
13261458bff9SNavdeep Parhar 		if (fl->mstash[i] == NULL) {
13271458bff9SNavdeep Parhar 			struct mbuf *m;
13281458bff9SNavdeep Parhar 			if ((m = m_get(M_NOWAIT, MT_NOINIT)) == NULL)
13291458bff9SNavdeep Parhar 				return (ENOBUFS);
13301458bff9SNavdeep Parhar 			fl->mstash[i] = m;
13311458bff9SNavdeep Parhar 		}
13321458bff9SNavdeep Parhar 	}
13331458bff9SNavdeep Parhar 	return (0);
13341458bff9SNavdeep Parhar }
13351458bff9SNavdeep Parhar 
1336733b9277SNavdeep Parhar static struct mbuf *
13371458bff9SNavdeep Parhar get_mbuf_from_stash(struct sge_fl *fl)
13381458bff9SNavdeep Parhar {
13391458bff9SNavdeep Parhar 	int i;
13401458bff9SNavdeep Parhar 
13411458bff9SNavdeep Parhar 	for (i = 0; i < nitems(fl->mstash); i++) {
13421458bff9SNavdeep Parhar 		if (fl->mstash[i] != NULL) {
13431458bff9SNavdeep Parhar 			struct mbuf *m;
13441458bff9SNavdeep Parhar 
13451458bff9SNavdeep Parhar 			m = fl->mstash[i];
13461458bff9SNavdeep Parhar 			fl->mstash[i] = NULL;
13471458bff9SNavdeep Parhar 			return (m);
13481458bff9SNavdeep Parhar 		} else
13491458bff9SNavdeep Parhar 			fl->mstash[i] = m_get(M_NOWAIT, MT_NOINIT);
13501458bff9SNavdeep Parhar 	}
13511458bff9SNavdeep Parhar 
13521458bff9SNavdeep Parhar 	return (m_get(M_NOWAIT, MT_NOINIT));
13531458bff9SNavdeep Parhar }
13541458bff9SNavdeep Parhar 
13551458bff9SNavdeep Parhar static void
13561458bff9SNavdeep Parhar return_mbuf_to_stash(struct sge_fl *fl, struct mbuf *m)
13571458bff9SNavdeep Parhar {
13581458bff9SNavdeep Parhar 	int i;
13591458bff9SNavdeep Parhar 
13601458bff9SNavdeep Parhar 	if (m == NULL)
13611458bff9SNavdeep Parhar 		return;
13621458bff9SNavdeep Parhar 
13631458bff9SNavdeep Parhar 	for (i = 0; i < nitems(fl->mstash); i++) {
13641458bff9SNavdeep Parhar 		if (fl->mstash[i] == NULL) {
13651458bff9SNavdeep Parhar 			fl->mstash[i] = m;
13661458bff9SNavdeep Parhar 			return;
13671458bff9SNavdeep Parhar 		}
13681458bff9SNavdeep Parhar 	}
13691458bff9SNavdeep Parhar 	m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
13701458bff9SNavdeep Parhar 	m_free(m);
13711458bff9SNavdeep Parhar }
13721458bff9SNavdeep Parhar 
13731458bff9SNavdeep Parhar /* buf can be any address within the buffer */
13741458bff9SNavdeep Parhar static inline u_int *
13751458bff9SNavdeep Parhar find_buf_refcnt(caddr_t buf)
13761458bff9SNavdeep Parhar {
13771458bff9SNavdeep Parhar 	uintptr_t ptr = (uintptr_t)buf;
13781458bff9SNavdeep Parhar 
13791458bff9SNavdeep Parhar 	return ((u_int *)((ptr & ~(MJUMPAGESIZE - 1)) + MSIZE - sizeof(u_int)));
13801458bff9SNavdeep Parhar }
13811458bff9SNavdeep Parhar 
13821458bff9SNavdeep Parhar static inline struct mbuf *
13831458bff9SNavdeep Parhar find_buf_mbuf(caddr_t buf)
13841458bff9SNavdeep Parhar {
13851458bff9SNavdeep Parhar 	uintptr_t ptr = (uintptr_t)buf;
13861458bff9SNavdeep Parhar 
13871458bff9SNavdeep Parhar 	return ((struct mbuf *)(ptr & ~(MJUMPAGESIZE - 1)));
13881458bff9SNavdeep Parhar }
13891458bff9SNavdeep Parhar 
13901458bff9SNavdeep Parhar static int
13911458bff9SNavdeep Parhar rxb_free(struct mbuf *m, void *arg1, void *arg2)
13921458bff9SNavdeep Parhar {
13931458bff9SNavdeep Parhar 	uma_zone_t zone = arg1;
13941458bff9SNavdeep Parhar 	caddr_t cl = arg2;
13951458bff9SNavdeep Parhar #ifdef INVARIANTS
13961458bff9SNavdeep Parhar 	u_int refcount;
13971458bff9SNavdeep Parhar 
13981458bff9SNavdeep Parhar 	refcount = *find_buf_refcnt(cl);
13991458bff9SNavdeep Parhar 	KASSERT(refcount == 0, ("%s: cl %p refcount is %u", __func__,
14001458bff9SNavdeep Parhar 	    cl - MSIZE, refcount));
14011458bff9SNavdeep Parhar #endif
14021458bff9SNavdeep Parhar 	cl -= MSIZE;
14031458bff9SNavdeep Parhar 	uma_zfree(zone, cl);
14041458bff9SNavdeep Parhar 
14051458bff9SNavdeep Parhar 	return (EXT_FREE_OK);
14061458bff9SNavdeep Parhar }
14071458bff9SNavdeep Parhar 
14081458bff9SNavdeep Parhar static struct mbuf *
14091458bff9SNavdeep Parhar get_fl_payload1(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf,
14101458bff9SNavdeep Parhar     int *fl_bufs_used)
14111458bff9SNavdeep Parhar {
14121458bff9SNavdeep Parhar 	struct mbuf *m0, *m;
14131458bff9SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
14141458bff9SNavdeep Parhar 	unsigned int nbuf, len;
14151458bff9SNavdeep Parhar 	int pack_boundary = is_t4(sc) ? t4_fl_pack : t5_fl_pack;
14161458bff9SNavdeep Parhar 
14171458bff9SNavdeep Parhar 	/*
14181458bff9SNavdeep Parhar 	 * No assertion for the fl lock because we don't need it.  This routine
14191458bff9SNavdeep Parhar 	 * is called only from the rx interrupt handler and it only updates
14201458bff9SNavdeep Parhar 	 * fl->cidx.  (Contrast that with fl->pidx/fl->needed which could be
14211458bff9SNavdeep Parhar 	 * updated in the rx interrupt handler or the starvation helper routine.
14221458bff9SNavdeep Parhar 	 * That's why code that manipulates fl->pidx/fl->needed needs the fl
14231458bff9SNavdeep Parhar 	 * lock but this routine does not).
14241458bff9SNavdeep Parhar 	 */
14251458bff9SNavdeep Parhar 
14261458bff9SNavdeep Parhar 	KASSERT(fl->flags & FL_BUF_PACKING,
14271458bff9SNavdeep Parhar 	    ("%s: buffer packing disabled for fl %p", __func__, fl));
14281458bff9SNavdeep Parhar 
14291458bff9SNavdeep Parhar 	len = G_RSPD_LEN(len_newbuf);
14301458bff9SNavdeep Parhar 
14311458bff9SNavdeep Parhar 	if ((len_newbuf & F_RSPD_NEWBUF) == 0) {
14321458bff9SNavdeep Parhar 		KASSERT(fl->rx_offset > 0,
14331458bff9SNavdeep Parhar 		    ("%s: packed frame but driver at offset=0", __func__));
14341458bff9SNavdeep Parhar 
14351458bff9SNavdeep Parhar 		/* A packed frame is guaranteed to fit entirely in this buf. */
14361458bff9SNavdeep Parhar 		KASSERT(FL_BUF_SIZE(sc, sd->tag_idx) - fl->rx_offset >= len,
14371458bff9SNavdeep Parhar 		    ("%s: packing error.  bufsz=%u, offset=%u, len=%u",
14381458bff9SNavdeep Parhar 		    __func__, FL_BUF_SIZE(sc, sd->tag_idx), fl->rx_offset,
14391458bff9SNavdeep Parhar 		    len));
14401458bff9SNavdeep Parhar 
14411458bff9SNavdeep Parhar 		m0 = get_mbuf_from_stash(fl);
14421458bff9SNavdeep Parhar 		if (m0 == NULL ||
14431458bff9SNavdeep Parhar 		    m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR) != 0) {
14441458bff9SNavdeep Parhar 			return_mbuf_to_stash(fl, m0);
14451458bff9SNavdeep Parhar 			return (NULL);
14461458bff9SNavdeep Parhar 		}
14471458bff9SNavdeep Parhar 
14481458bff9SNavdeep Parhar 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
14491458bff9SNavdeep Parhar 		    BUS_DMASYNC_POSTREAD);
14501458bff9SNavdeep Parhar 		if (len < RX_COPY_THRESHOLD) {
14511458bff9SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
14521458bff9SNavdeep Parhar 			/* Leave room for a timestamp */
14531458bff9SNavdeep Parhar 			m0->m_data += 8;
14541458bff9SNavdeep Parhar #endif
14551458bff9SNavdeep Parhar 			bcopy(sd->cl + fl->rx_offset, mtod(m0, caddr_t), len);
14561458bff9SNavdeep Parhar 			m0->m_pkthdr.len = len;
14571458bff9SNavdeep Parhar 			m0->m_len = len;
14581458bff9SNavdeep Parhar 		} else {
14591458bff9SNavdeep Parhar 			m0->m_pkthdr.len = len;
14601458bff9SNavdeep Parhar 			m0->m_len = len;
14611458bff9SNavdeep Parhar 			m_extaddref(m0, sd->cl + fl->rx_offset,
14621458bff9SNavdeep Parhar 			    roundup2(m0->m_len, fl_pad),
14631458bff9SNavdeep Parhar 			    find_buf_refcnt(sd->cl), rxb_free,
14641458bff9SNavdeep Parhar 			    FL_BUF_ZONE(sc, sd->tag_idx), sd->cl);
14651458bff9SNavdeep Parhar 		}
14661458bff9SNavdeep Parhar 		fl->rx_offset += len;
14671458bff9SNavdeep Parhar 		fl->rx_offset = roundup2(fl->rx_offset, fl_pad);
14681458bff9SNavdeep Parhar 		fl->rx_offset = roundup2(fl->rx_offset, pack_boundary);
14691458bff9SNavdeep Parhar 		if (fl->rx_offset >= FL_BUF_SIZE(sc, sd->tag_idx)) {
14701458bff9SNavdeep Parhar 			fl->rx_offset = 0;
14711458bff9SNavdeep Parhar 			(*fl_bufs_used) += 1;
14721458bff9SNavdeep Parhar 			if (__predict_false(++fl->cidx == fl->cap))
14731458bff9SNavdeep Parhar 				fl->cidx = 0;
14741458bff9SNavdeep Parhar 		}
14751458bff9SNavdeep Parhar 
14761458bff9SNavdeep Parhar 		return (m0);
14771458bff9SNavdeep Parhar 	}
14781458bff9SNavdeep Parhar 
14791458bff9SNavdeep Parhar 	KASSERT(len_newbuf & F_RSPD_NEWBUF,
14801458bff9SNavdeep Parhar 	    ("%s: only new buffer handled here", __func__));
14811458bff9SNavdeep Parhar 
14821458bff9SNavdeep Parhar 	nbuf = 0;
14831458bff9SNavdeep Parhar 
14841458bff9SNavdeep Parhar 	/*
14851458bff9SNavdeep Parhar 	 * Move to the start of the next buffer if we are still in the middle of
14861458bff9SNavdeep Parhar 	 * some buffer.  This is the case where there was some room left in the
14871458bff9SNavdeep Parhar 	 * previous buffer but not enough to fit this frame in its entirety.
14881458bff9SNavdeep Parhar 	 */
14891458bff9SNavdeep Parhar 	if (fl->rx_offset > 0) {
14901458bff9SNavdeep Parhar 		KASSERT(roundup2(len, fl_pad) > FL_BUF_SIZE(sc, sd->tag_idx) -
14911458bff9SNavdeep Parhar 		    fl->rx_offset, ("%s: frame (%u bytes) should have fit at "
14921458bff9SNavdeep Parhar 		    "cidx %u offset %u bufsize %u", __func__, len, fl->cidx,
14931458bff9SNavdeep Parhar 		    fl->rx_offset, FL_BUF_SIZE(sc, sd->tag_idx)));
14941458bff9SNavdeep Parhar 		nbuf++;
14951458bff9SNavdeep Parhar 		fl->rx_offset = 0;
14961458bff9SNavdeep Parhar 		sd++;
14971458bff9SNavdeep Parhar 		if (__predict_false(++fl->cidx == fl->cap)) {
14981458bff9SNavdeep Parhar 			sd = fl->sdesc;
14991458bff9SNavdeep Parhar 			fl->cidx = 0;
15001458bff9SNavdeep Parhar 		}
15011458bff9SNavdeep Parhar 	}
15021458bff9SNavdeep Parhar 
15031458bff9SNavdeep Parhar 	m0 = find_buf_mbuf(sd->cl);
15041458bff9SNavdeep Parhar 	if (m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR | M_NOFREE))
15051458bff9SNavdeep Parhar 		goto done;
15061458bff9SNavdeep Parhar 	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
15071458bff9SNavdeep Parhar 	m0->m_len = min(len, FL_BUF_SIZE(sc, sd->tag_idx));
15081458bff9SNavdeep Parhar 	m_extaddref(m0, sd->cl, roundup2(m0->m_len, fl_pad),
15091458bff9SNavdeep Parhar 	    find_buf_refcnt(sd->cl), rxb_free, FL_BUF_ZONE(sc, sd->tag_idx),
15101458bff9SNavdeep Parhar 	    sd->cl);
15111458bff9SNavdeep Parhar 	m0->m_pkthdr.len = len;
15121458bff9SNavdeep Parhar 
15131458bff9SNavdeep Parhar 	fl->rx_offset = roundup2(m0->m_len, fl_pad);
15141458bff9SNavdeep Parhar 	fl->rx_offset = roundup2(fl->rx_offset, pack_boundary);
15151458bff9SNavdeep Parhar 	if (fl->rx_offset >= FL_BUF_SIZE(sc, sd->tag_idx)) {
15161458bff9SNavdeep Parhar 		fl->rx_offset = 0;
15171458bff9SNavdeep Parhar 		nbuf++;
15181458bff9SNavdeep Parhar 		sd++;
15191458bff9SNavdeep Parhar 		if (__predict_false(++fl->cidx == fl->cap)) {
15201458bff9SNavdeep Parhar 			sd = fl->sdesc;
15211458bff9SNavdeep Parhar 			fl->cidx = 0;
15221458bff9SNavdeep Parhar 		}
15231458bff9SNavdeep Parhar 	}
15241458bff9SNavdeep Parhar 
15251458bff9SNavdeep Parhar 	m = m0;
15261458bff9SNavdeep Parhar 	len -= m->m_len;
15271458bff9SNavdeep Parhar 
15281458bff9SNavdeep Parhar 	while (len > 0) {
15291458bff9SNavdeep Parhar 		m->m_next = find_buf_mbuf(sd->cl);
15301458bff9SNavdeep Parhar 		m = m->m_next;
15311458bff9SNavdeep Parhar 
15321458bff9SNavdeep Parhar 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
15331458bff9SNavdeep Parhar 		    BUS_DMASYNC_POSTREAD);
15341458bff9SNavdeep Parhar 
15351458bff9SNavdeep Parhar 		/* m_init for !M_PKTHDR can't fail so don't bother */
15361458bff9SNavdeep Parhar 		m_init(m, NULL, 0, M_NOWAIT, MT_DATA, M_NOFREE);
15371458bff9SNavdeep Parhar 		m->m_len = min(len, FL_BUF_SIZE(sc, sd->tag_idx));
15381458bff9SNavdeep Parhar 		m_extaddref(m, sd->cl, roundup2(m->m_len, fl_pad),
15391458bff9SNavdeep Parhar 		    find_buf_refcnt(sd->cl), rxb_free,
15401458bff9SNavdeep Parhar 		    FL_BUF_ZONE(sc, sd->tag_idx), sd->cl);
15411458bff9SNavdeep Parhar 
15421458bff9SNavdeep Parhar 		fl->rx_offset = roundup2(m->m_len, fl_pad);
15431458bff9SNavdeep Parhar 		fl->rx_offset = roundup2(fl->rx_offset, pack_boundary);
15441458bff9SNavdeep Parhar 		if (fl->rx_offset >= FL_BUF_SIZE(sc, sd->tag_idx)) {
15451458bff9SNavdeep Parhar 			fl->rx_offset = 0;
15461458bff9SNavdeep Parhar 			nbuf++;
15471458bff9SNavdeep Parhar 			sd++;
15481458bff9SNavdeep Parhar 			if (__predict_false(++fl->cidx == fl->cap)) {
15491458bff9SNavdeep Parhar 				sd = fl->sdesc;
15501458bff9SNavdeep Parhar 				fl->cidx = 0;
15511458bff9SNavdeep Parhar 			}
15521458bff9SNavdeep Parhar 		}
15531458bff9SNavdeep Parhar 
15541458bff9SNavdeep Parhar 		len -= m->m_len;
15551458bff9SNavdeep Parhar 	}
15561458bff9SNavdeep Parhar done:
15571458bff9SNavdeep Parhar 	(*fl_bufs_used) += nbuf;
15581458bff9SNavdeep Parhar 	return (m0);
15591458bff9SNavdeep Parhar }
15601458bff9SNavdeep Parhar 
15611458bff9SNavdeep Parhar static struct mbuf *
15621458bff9SNavdeep Parhar get_fl_payload2(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf,
1563733b9277SNavdeep Parhar     int *fl_bufs_used)
156454e4ee71SNavdeep Parhar {
156554e4ee71SNavdeep Parhar 	struct mbuf *m0, *m;
1566733b9277SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1567733b9277SNavdeep Parhar 	unsigned int nbuf, len;
156854e4ee71SNavdeep Parhar 
1569733b9277SNavdeep Parhar 	/*
1570733b9277SNavdeep Parhar 	 * No assertion for the fl lock because we don't need it.  This routine
1571733b9277SNavdeep Parhar 	 * is called only from the rx interrupt handler and it only updates
1572733b9277SNavdeep Parhar 	 * fl->cidx.  (Contrast that with fl->pidx/fl->needed which could be
1573733b9277SNavdeep Parhar 	 * updated in the rx interrupt handler or the starvation helper routine.
1574733b9277SNavdeep Parhar 	 * That's why code that manipulates fl->pidx/fl->needed needs the fl
1575733b9277SNavdeep Parhar 	 * lock but this routine does not).
1576733b9277SNavdeep Parhar 	 */
15777d29df59SNavdeep Parhar 
15781458bff9SNavdeep Parhar 	KASSERT((fl->flags & FL_BUF_PACKING) == 0,
15791458bff9SNavdeep Parhar 	    ("%s: buffer packing enabled for fl %p", __func__, fl));
1580733b9277SNavdeep Parhar 	if (__predict_false((len_newbuf & F_RSPD_NEWBUF) == 0))
1581733b9277SNavdeep Parhar 		panic("%s: cannot handle packed frames", __func__);
1582733b9277SNavdeep Parhar 	len = G_RSPD_LEN(len_newbuf);
15837d29df59SNavdeep Parhar 
15841458bff9SNavdeep Parhar 	/*
15851458bff9SNavdeep Parhar 	 * We never want to run out of mbufs in between a frame when a frame
15861458bff9SNavdeep Parhar 	 * spans multiple fl buffers.  If the fl's mbuf stash isn't full and
15871458bff9SNavdeep Parhar 	 * can't be filled up to the brim then fail early.
15881458bff9SNavdeep Parhar 	 */
15891458bff9SNavdeep Parhar 	if (len > FL_BUF_SIZE(sc, sd->tag_idx) && fill_mbuf_stash(fl) != 0)
15901458bff9SNavdeep Parhar 		return (NULL);
15911458bff9SNavdeep Parhar 
15921458bff9SNavdeep Parhar 	m0 = get_mbuf_from_stash(fl);
15931458bff9SNavdeep Parhar 	if (m0 == NULL ||
15941458bff9SNavdeep Parhar 	    m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR) != 0) {
15951458bff9SNavdeep Parhar 		return_mbuf_to_stash(fl, m0);
15961458bff9SNavdeep Parhar 		return (NULL);
15971458bff9SNavdeep Parhar 	}
159854e4ee71SNavdeep Parhar 
1599733b9277SNavdeep Parhar 	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
16001458bff9SNavdeep Parhar 
16011458bff9SNavdeep Parhar 	if (len < RX_COPY_THRESHOLD) {
1602489eeba9SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
1603733b9277SNavdeep Parhar 		/* Leave room for a timestamp */
1604489eeba9SNavdeep Parhar 		m0->m_data += 8;
1605489eeba9SNavdeep Parhar #endif
16067d29df59SNavdeep Parhar 		/* copy data to mbuf, buffer will be recycled */
16077d29df59SNavdeep Parhar 		bcopy(sd->cl, mtod(m0, caddr_t), len);
16087d29df59SNavdeep Parhar 		m0->m_len = len;
16097d29df59SNavdeep Parhar 	} else {
16107d29df59SNavdeep Parhar 		bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
16111458bff9SNavdeep Parhar 		m_cljset(m0, sd->cl, FL_BUF_TYPE(sc, sd->tag_idx));
16127d29df59SNavdeep Parhar 		sd->cl = NULL;	/* consumed */
16131458bff9SNavdeep Parhar 		m0->m_len = min(len, FL_BUF_SIZE(sc, sd->tag_idx));
161454e4ee71SNavdeep Parhar 	}
1615733b9277SNavdeep Parhar 	m0->m_pkthdr.len = len;
161654e4ee71SNavdeep Parhar 
1617733b9277SNavdeep Parhar 	sd++;
1618733b9277SNavdeep Parhar 	if (__predict_false(++fl->cidx == fl->cap)) {
1619733b9277SNavdeep Parhar 		sd = fl->sdesc;
1620733b9277SNavdeep Parhar 		fl->cidx = 0;
1621733b9277SNavdeep Parhar 	}
1622733b9277SNavdeep Parhar 
1623733b9277SNavdeep Parhar 	m = m0;
1624733b9277SNavdeep Parhar 	len -= m->m_len;
1625733b9277SNavdeep Parhar 	nbuf = 1;	/* # of fl buffers used */
1626733b9277SNavdeep Parhar 
1627733b9277SNavdeep Parhar 	while (len > 0) {
16281458bff9SNavdeep Parhar 		/* Can't fail, we checked earlier that the stash was full. */
16291458bff9SNavdeep Parhar 		m->m_next = get_mbuf_from_stash(fl);
1630733b9277SNavdeep Parhar 		m = m->m_next;
1631733b9277SNavdeep Parhar 
1632733b9277SNavdeep Parhar 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
1633733b9277SNavdeep Parhar 		    BUS_DMASYNC_POSTREAD);
1634733b9277SNavdeep Parhar 
16351458bff9SNavdeep Parhar 		/* m_init for !M_PKTHDR can't fail so don't bother */
1636733b9277SNavdeep Parhar 		m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1637733b9277SNavdeep Parhar 		if (len <= MLEN) {
1638733b9277SNavdeep Parhar 			bcopy(sd->cl, mtod(m, caddr_t), len);
1639733b9277SNavdeep Parhar 			m->m_len = len;
1640733b9277SNavdeep Parhar 		} else {
16411458bff9SNavdeep Parhar 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
16421458bff9SNavdeep Parhar 			m_cljset(m, sd->cl, FL_BUF_TYPE(sc, sd->tag_idx));
1643733b9277SNavdeep Parhar 			sd->cl = NULL;	/* consumed */
16441458bff9SNavdeep Parhar 			m->m_len = min(len, FL_BUF_SIZE(sc, sd->tag_idx));
1645733b9277SNavdeep Parhar 		}
1646733b9277SNavdeep Parhar 
1647733b9277SNavdeep Parhar 		sd++;
1648733b9277SNavdeep Parhar 		if (__predict_false(++fl->cidx == fl->cap)) {
1649733b9277SNavdeep Parhar 			sd = fl->sdesc;
1650733b9277SNavdeep Parhar 			fl->cidx = 0;
1651733b9277SNavdeep Parhar 		}
1652733b9277SNavdeep Parhar 
1653733b9277SNavdeep Parhar 		len -= m->m_len;
1654733b9277SNavdeep Parhar 		nbuf++;
1655733b9277SNavdeep Parhar 	}
1656733b9277SNavdeep Parhar 
1657733b9277SNavdeep Parhar 	(*fl_bufs_used) += nbuf;
1658733b9277SNavdeep Parhar 
1659733b9277SNavdeep Parhar 	return (m0);
1660733b9277SNavdeep Parhar }
1661733b9277SNavdeep Parhar 
1662733b9277SNavdeep Parhar static int
1663733b9277SNavdeep Parhar t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
1664733b9277SNavdeep Parhar {
16653c51d154SNavdeep Parhar 	struct sge_rxq *rxq = iq_to_rxq(iq);
1666733b9277SNavdeep Parhar 	struct ifnet *ifp = rxq->ifp;
1667733b9277SNavdeep Parhar 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
1668a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1669733b9277SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
1670733b9277SNavdeep Parhar #endif
1671733b9277SNavdeep Parhar 
1672733b9277SNavdeep Parhar 	KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
1673733b9277SNavdeep Parhar 	    rss->opcode));
1674733b9277SNavdeep Parhar 
16759fb8886bSNavdeep Parhar 	m0->m_pkthdr.len -= fl_pktshift;
16769fb8886bSNavdeep Parhar 	m0->m_len -= fl_pktshift;
16779fb8886bSNavdeep Parhar 	m0->m_data += fl_pktshift;
167854e4ee71SNavdeep Parhar 
167954e4ee71SNavdeep Parhar 	m0->m_pkthdr.rcvif = ifp;
168054e4ee71SNavdeep Parhar 	m0->m_flags |= M_FLOWID;
168154e4ee71SNavdeep Parhar 	m0->m_pkthdr.flowid = rss->hash_val;
168254e4ee71SNavdeep Parhar 
16839600bf00SNavdeep Parhar 	if (cpl->csum_calc && !cpl->err_vec) {
16849600bf00SNavdeep Parhar 		if (ifp->if_capenable & IFCAP_RXCSUM &&
16859600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP)) {
1686932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
168754e4ee71SNavdeep Parhar 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
16889600bf00SNavdeep Parhar 			rxq->rxcsum++;
16899600bf00SNavdeep Parhar 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
16909600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP6)) {
1691932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
16929600bf00SNavdeep Parhar 			    CSUM_PSEUDO_HDR);
16939600bf00SNavdeep Parhar 			rxq->rxcsum++;
16949600bf00SNavdeep Parhar 		}
16959600bf00SNavdeep Parhar 
16969600bf00SNavdeep Parhar 		if (__predict_false(cpl->ip_frag))
169754e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = be16toh(cpl->csum);
169854e4ee71SNavdeep Parhar 		else
169954e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = 0xffff;
170054e4ee71SNavdeep Parhar 	}
170154e4ee71SNavdeep Parhar 
170254e4ee71SNavdeep Parhar 	if (cpl->vlan_ex) {
170354e4ee71SNavdeep Parhar 		m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
170454e4ee71SNavdeep Parhar 		m0->m_flags |= M_VLANTAG;
170554e4ee71SNavdeep Parhar 		rxq->vlan_extraction++;
170654e4ee71SNavdeep Parhar 	}
170754e4ee71SNavdeep Parhar 
1708a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
170954e4ee71SNavdeep Parhar 	if (cpl->l2info & htobe32(F_RXF_LRO) &&
1710733b9277SNavdeep Parhar 	    iq->flags & IQ_LRO_ENABLED &&
171154e4ee71SNavdeep Parhar 	    tcp_lro_rx(lro, m0, 0) == 0) {
171254e4ee71SNavdeep Parhar 		/* queued for LRO */
171354e4ee71SNavdeep Parhar 	} else
171454e4ee71SNavdeep Parhar #endif
17157d29df59SNavdeep Parhar 	ifp->if_input(ifp, m0);
171654e4ee71SNavdeep Parhar 
1717733b9277SNavdeep Parhar 	return (0);
171854e4ee71SNavdeep Parhar }
171954e4ee71SNavdeep Parhar 
1720733b9277SNavdeep Parhar /*
1721733b9277SNavdeep Parhar  * Doesn't fail.  Holds on to work requests it can't send right away.
1722733b9277SNavdeep Parhar  */
172309fe6320SNavdeep Parhar void
172409fe6320SNavdeep Parhar t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
1725733b9277SNavdeep Parhar {
1726733b9277SNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
1727733b9277SNavdeep Parhar 	int can_reclaim;
1728733b9277SNavdeep Parhar 	caddr_t dst;
1729733b9277SNavdeep Parhar 
1730733b9277SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(wrq);
173109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1732733b9277SNavdeep Parhar 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
1733733b9277SNavdeep Parhar 	    (eq->flags & EQ_TYPEMASK) == EQ_CTRL,
1734733b9277SNavdeep Parhar 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
173509fe6320SNavdeep Parhar #else
173609fe6320SNavdeep Parhar 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL,
173709fe6320SNavdeep Parhar 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
173809fe6320SNavdeep Parhar #endif
1739733b9277SNavdeep Parhar 
174009fe6320SNavdeep Parhar 	if (__predict_true(wr != NULL))
174109fe6320SNavdeep Parhar 		STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
1742733b9277SNavdeep Parhar 
1743733b9277SNavdeep Parhar 	can_reclaim = reclaimable(eq);
1744733b9277SNavdeep Parhar 	if (__predict_false(eq->flags & EQ_STALLED)) {
1745733b9277SNavdeep Parhar 		if (can_reclaim < tx_resume_threshold(eq))
174609fe6320SNavdeep Parhar 			return;
1747733b9277SNavdeep Parhar 		eq->flags &= ~EQ_STALLED;
1748733b9277SNavdeep Parhar 		eq->unstalled++;
1749733b9277SNavdeep Parhar 	}
1750733b9277SNavdeep Parhar 	eq->cidx += can_reclaim;
1751733b9277SNavdeep Parhar 	eq->avail += can_reclaim;
1752733b9277SNavdeep Parhar 	if (__predict_false(eq->cidx >= eq->cap))
1753733b9277SNavdeep Parhar 		eq->cidx -= eq->cap;
1754733b9277SNavdeep Parhar 
175509fe6320SNavdeep Parhar 	while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) {
1756733b9277SNavdeep Parhar 		int ndesc;
1757733b9277SNavdeep Parhar 
175809fe6320SNavdeep Parhar 		if (__predict_false(wr->wr_len < 0 ||
175909fe6320SNavdeep Parhar 		    wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) {
1760733b9277SNavdeep Parhar 
1761733b9277SNavdeep Parhar #ifdef INVARIANTS
176209fe6320SNavdeep Parhar 			panic("%s: work request with length %d", __func__,
176309fe6320SNavdeep Parhar 			    wr->wr_len);
1764733b9277SNavdeep Parhar #endif
176509fe6320SNavdeep Parhar #ifdef KDB
176609fe6320SNavdeep Parhar 			kdb_backtrace();
176709fe6320SNavdeep Parhar #endif
176809fe6320SNavdeep Parhar 			log(LOG_ERR, "%s: %s work request with length %d",
176909fe6320SNavdeep Parhar 			    device_get_nameunit(sc->dev), __func__, wr->wr_len);
177009fe6320SNavdeep Parhar 			STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
177109fe6320SNavdeep Parhar 			free_wrqe(wr);
177209fe6320SNavdeep Parhar 			continue;
1773733b9277SNavdeep Parhar 		}
1774733b9277SNavdeep Parhar 
177509fe6320SNavdeep Parhar 		ndesc = howmany(wr->wr_len, EQ_ESIZE);
1776733b9277SNavdeep Parhar 		if (eq->avail < ndesc) {
1777733b9277SNavdeep Parhar 			wrq->no_desc++;
1778733b9277SNavdeep Parhar 			break;
1779733b9277SNavdeep Parhar 		}
1780733b9277SNavdeep Parhar 
1781733b9277SNavdeep Parhar 		dst = (void *)&eq->desc[eq->pidx];
178209fe6320SNavdeep Parhar 		copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len);
1783733b9277SNavdeep Parhar 
1784733b9277SNavdeep Parhar 		eq->pidx += ndesc;
1785733b9277SNavdeep Parhar 		eq->avail -= ndesc;
1786733b9277SNavdeep Parhar 		if (__predict_false(eq->pidx >= eq->cap))
1787733b9277SNavdeep Parhar 			eq->pidx -= eq->cap;
1788733b9277SNavdeep Parhar 
1789733b9277SNavdeep Parhar 		eq->pending += ndesc;
17907e2fb22fSNavdeep Parhar 		if (eq->pending >= 8)
1791733b9277SNavdeep Parhar 			ring_eq_db(sc, eq);
1792733b9277SNavdeep Parhar 
1793733b9277SNavdeep Parhar 		wrq->tx_wrs++;
179409fe6320SNavdeep Parhar 		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
179509fe6320SNavdeep Parhar 		free_wrqe(wr);
1796733b9277SNavdeep Parhar 
1797733b9277SNavdeep Parhar 		if (eq->avail < 8) {
1798733b9277SNavdeep Parhar 			can_reclaim = reclaimable(eq);
1799733b9277SNavdeep Parhar 			eq->cidx += can_reclaim;
1800733b9277SNavdeep Parhar 			eq->avail += can_reclaim;
1801733b9277SNavdeep Parhar 			if (__predict_false(eq->cidx >= eq->cap))
1802733b9277SNavdeep Parhar 				eq->cidx -= eq->cap;
1803733b9277SNavdeep Parhar 		}
1804733b9277SNavdeep Parhar 	}
1805733b9277SNavdeep Parhar 
1806733b9277SNavdeep Parhar 	if (eq->pending)
1807733b9277SNavdeep Parhar 		ring_eq_db(sc, eq);
1808733b9277SNavdeep Parhar 
180909fe6320SNavdeep Parhar 	if (wr != NULL) {
1810733b9277SNavdeep Parhar 		eq->flags |= EQ_STALLED;
1811733b9277SNavdeep Parhar 		if (callout_pending(&eq->tx_callout) == 0)
1812733b9277SNavdeep Parhar 			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1813733b9277SNavdeep Parhar 	}
1814f7dfe243SNavdeep Parhar }
1815f7dfe243SNavdeep Parhar 
181654e4ee71SNavdeep Parhar /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
181754e4ee71SNavdeep Parhar #define TXPKTS_PKT_HDR ((\
181854e4ee71SNavdeep Parhar     sizeof(struct ulp_txpkt) + \
181954e4ee71SNavdeep Parhar     sizeof(struct ulptx_idata) + \
182054e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
182154e4ee71SNavdeep Parhar     ) / 8)
182254e4ee71SNavdeep Parhar 
182354e4ee71SNavdeep Parhar /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
182454e4ee71SNavdeep Parhar #define TXPKTS_WR_HDR (\
182554e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
182654e4ee71SNavdeep Parhar     TXPKTS_PKT_HDR)
182754e4ee71SNavdeep Parhar 
182854e4ee71SNavdeep Parhar /* Header of a tx WR, before SGL of first packet (in flits) */
182954e4ee71SNavdeep Parhar #define TXPKT_WR_HDR ((\
183054e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
183154e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
183254e4ee71SNavdeep Parhar     ) / 8 )
183354e4ee71SNavdeep Parhar 
183454e4ee71SNavdeep Parhar /* Header of a tx LSO WR, before SGL of first packet (in flits) */
183554e4ee71SNavdeep Parhar #define TXPKT_LSO_WR_HDR ((\
183654e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
18372a5f6b0eSNavdeep Parhar     sizeof(struct cpl_tx_pkt_lso_core) + \
183854e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
183954e4ee71SNavdeep Parhar     ) / 8 )
184054e4ee71SNavdeep Parhar 
184154e4ee71SNavdeep Parhar int
184254e4ee71SNavdeep Parhar t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
184354e4ee71SNavdeep Parhar {
184454e4ee71SNavdeep Parhar 	struct port_info *pi = (void *)ifp->if_softc;
184554e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
184654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
1847f7dfe243SNavdeep Parhar 	struct buf_ring *br = txq->br;
184854e4ee71SNavdeep Parhar 	struct mbuf *next;
1849e874ff7aSNavdeep Parhar 	int rc, coalescing, can_reclaim;
185054e4ee71SNavdeep Parhar 	struct txpkts txpkts;
185154e4ee71SNavdeep Parhar 	struct sgl sgl;
185254e4ee71SNavdeep Parhar 
185354e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
185454e4ee71SNavdeep Parhar 	KASSERT(m, ("%s: called with nothing to do.", __func__));
1855733b9277SNavdeep Parhar 	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH,
1856733b9277SNavdeep Parhar 	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
185754e4ee71SNavdeep Parhar 
1858e874ff7aSNavdeep Parhar 	prefetch(&eq->desc[eq->pidx]);
1859f7dfe243SNavdeep Parhar 	prefetch(&txq->sdesc[eq->pidx]);
1860e874ff7aSNavdeep Parhar 
186154e4ee71SNavdeep Parhar 	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
186254e4ee71SNavdeep Parhar 	coalescing = 0;
186354e4ee71SNavdeep Parhar 
1864733b9277SNavdeep Parhar 	can_reclaim = reclaimable(eq);
1865733b9277SNavdeep Parhar 	if (__predict_false(eq->flags & EQ_STALLED)) {
1866733b9277SNavdeep Parhar 		if (can_reclaim < tx_resume_threshold(eq)) {
1867733b9277SNavdeep Parhar 			txq->m = m;
1868733b9277SNavdeep Parhar 			return (0);
1869733b9277SNavdeep Parhar 		}
1870733b9277SNavdeep Parhar 		eq->flags &= ~EQ_STALLED;
1871733b9277SNavdeep Parhar 		eq->unstalled++;
1872733b9277SNavdeep Parhar 	}
1873733b9277SNavdeep Parhar 
1874733b9277SNavdeep Parhar 	if (__predict_false(eq->flags & EQ_DOOMED)) {
1875733b9277SNavdeep Parhar 		m_freem(m);
1876733b9277SNavdeep Parhar 		while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1877733b9277SNavdeep Parhar 			m_freem(m);
1878733b9277SNavdeep Parhar 		return (ENETDOWN);
1879733b9277SNavdeep Parhar 	}
1880733b9277SNavdeep Parhar 
1881733b9277SNavdeep Parhar 	if (eq->avail < 8 && can_reclaim)
1882733b9277SNavdeep Parhar 		reclaim_tx_descs(txq, can_reclaim, 32);
188354e4ee71SNavdeep Parhar 
188454e4ee71SNavdeep Parhar 	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
188554e4ee71SNavdeep Parhar 
188654e4ee71SNavdeep Parhar 		if (eq->avail < 8)
188754e4ee71SNavdeep Parhar 			break;
188854e4ee71SNavdeep Parhar 
188954e4ee71SNavdeep Parhar 		next = m->m_nextpkt;
189054e4ee71SNavdeep Parhar 		m->m_nextpkt = NULL;
189154e4ee71SNavdeep Parhar 
189254e4ee71SNavdeep Parhar 		if (next || buf_ring_peek(br))
189354e4ee71SNavdeep Parhar 			coalescing = 1;
189454e4ee71SNavdeep Parhar 
189554e4ee71SNavdeep Parhar 		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
189654e4ee71SNavdeep Parhar 		if (rc != 0) {
189754e4ee71SNavdeep Parhar 			if (rc == ENOMEM) {
189854e4ee71SNavdeep Parhar 
189954e4ee71SNavdeep Parhar 				/* Short of resources, suspend tx */
190054e4ee71SNavdeep Parhar 
190154e4ee71SNavdeep Parhar 				m->m_nextpkt = next;
190254e4ee71SNavdeep Parhar 				break;
190354e4ee71SNavdeep Parhar 			}
190454e4ee71SNavdeep Parhar 
190554e4ee71SNavdeep Parhar 			/*
190654e4ee71SNavdeep Parhar 			 * Unrecoverable error for this packet, throw it away
190754e4ee71SNavdeep Parhar 			 * and move on to the next.  get_pkt_sgl may already
190854e4ee71SNavdeep Parhar 			 * have freed m (it will be NULL in that case and the
190954e4ee71SNavdeep Parhar 			 * m_freem here is still safe).
191054e4ee71SNavdeep Parhar 			 */
191154e4ee71SNavdeep Parhar 
191254e4ee71SNavdeep Parhar 			m_freem(m);
191354e4ee71SNavdeep Parhar 			continue;
191454e4ee71SNavdeep Parhar 		}
191554e4ee71SNavdeep Parhar 
191654e4ee71SNavdeep Parhar 		if (coalescing &&
191754e4ee71SNavdeep Parhar 		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
191854e4ee71SNavdeep Parhar 
191954e4ee71SNavdeep Parhar 			/* Successfully absorbed into txpkts */
192054e4ee71SNavdeep Parhar 
192154e4ee71SNavdeep Parhar 			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
192254e4ee71SNavdeep Parhar 			goto doorbell;
192354e4ee71SNavdeep Parhar 		}
192454e4ee71SNavdeep Parhar 
192554e4ee71SNavdeep Parhar 		/*
192654e4ee71SNavdeep Parhar 		 * We weren't coalescing to begin with, or current frame could
192754e4ee71SNavdeep Parhar 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
192854e4ee71SNavdeep Parhar 		 * given to it can't be coalesced).  Either way there should be
192954e4ee71SNavdeep Parhar 		 * nothing in txpkts.
193054e4ee71SNavdeep Parhar 		 */
193154e4ee71SNavdeep Parhar 		KASSERT(txpkts.npkt == 0,
193254e4ee71SNavdeep Parhar 		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
193354e4ee71SNavdeep Parhar 
193454e4ee71SNavdeep Parhar 		/* We're sending out individual packets now */
193554e4ee71SNavdeep Parhar 		coalescing = 0;
193654e4ee71SNavdeep Parhar 
193754e4ee71SNavdeep Parhar 		if (eq->avail < 8)
1938f7dfe243SNavdeep Parhar 			reclaim_tx_descs(txq, 0, 8);
193954e4ee71SNavdeep Parhar 		rc = write_txpkt_wr(pi, txq, m, &sgl);
194054e4ee71SNavdeep Parhar 		if (rc != 0) {
194154e4ee71SNavdeep Parhar 
194254e4ee71SNavdeep Parhar 			/* Short of hardware descriptors, suspend tx */
194354e4ee71SNavdeep Parhar 
194454e4ee71SNavdeep Parhar 			/*
194554e4ee71SNavdeep Parhar 			 * This is an unlikely but expensive failure.  We've
194654e4ee71SNavdeep Parhar 			 * done all the hard work (DMA mappings etc.) and now we
194754e4ee71SNavdeep Parhar 			 * can't send out the packet.  What's worse, we have to
194854e4ee71SNavdeep Parhar 			 * spend even more time freeing up everything in sgl.
194954e4ee71SNavdeep Parhar 			 */
195054e4ee71SNavdeep Parhar 			txq->no_desc++;
195154e4ee71SNavdeep Parhar 			free_pkt_sgl(txq, &sgl);
195254e4ee71SNavdeep Parhar 
195354e4ee71SNavdeep Parhar 			m->m_nextpkt = next;
195454e4ee71SNavdeep Parhar 			break;
195554e4ee71SNavdeep Parhar 		}
195654e4ee71SNavdeep Parhar 
195754e4ee71SNavdeep Parhar 		ETHER_BPF_MTAP(ifp, m);
195854e4ee71SNavdeep Parhar 		if (sgl.nsegs == 0)
195954e4ee71SNavdeep Parhar 			m_freem(m);
196054e4ee71SNavdeep Parhar doorbell:
19617e2fb22fSNavdeep Parhar 		if (eq->pending >= 8)
1962f7dfe243SNavdeep Parhar 			ring_eq_db(sc, eq);
1963e874ff7aSNavdeep Parhar 
1964e874ff7aSNavdeep Parhar 		can_reclaim = reclaimable(eq);
1965e874ff7aSNavdeep Parhar 		if (can_reclaim >= 32)
1966733b9277SNavdeep Parhar 			reclaim_tx_descs(txq, can_reclaim, 64);
196754e4ee71SNavdeep Parhar 	}
196854e4ee71SNavdeep Parhar 
196954e4ee71SNavdeep Parhar 	if (txpkts.npkt > 0)
197054e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, &txpkts);
197154e4ee71SNavdeep Parhar 
197254e4ee71SNavdeep Parhar 	/*
197354e4ee71SNavdeep Parhar 	 * m not NULL means there was an error but we haven't thrown it away.
197454e4ee71SNavdeep Parhar 	 * This can happen when we're short of tx descriptors (no_desc) or maybe
197554e4ee71SNavdeep Parhar 	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
197654e4ee71SNavdeep Parhar 	 * will get things going again.
197754e4ee71SNavdeep Parhar 	 */
1978733b9277SNavdeep Parhar 	if (m && !(eq->flags & EQ_CRFLUSHED)) {
1979f7dfe243SNavdeep Parhar 		struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
1980f7dfe243SNavdeep Parhar 
1981733b9277SNavdeep Parhar 		/*
1982733b9277SNavdeep Parhar 		 * If EQ_CRFLUSHED is not set then we know we have at least one
1983733b9277SNavdeep Parhar 		 * available descriptor because any WR that reduces eq->avail to
1984733b9277SNavdeep Parhar 		 * 0 also sets EQ_CRFLUSHED.
1985733b9277SNavdeep Parhar 		 */
1986733b9277SNavdeep Parhar 		KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__));
1987733b9277SNavdeep Parhar 
1988f7dfe243SNavdeep Parhar 		txsd->desc_used = 1;
1989f7dfe243SNavdeep Parhar 		txsd->credits = 0;
199054e4ee71SNavdeep Parhar 		write_eqflush_wr(eq);
1991f7dfe243SNavdeep Parhar 	}
199254e4ee71SNavdeep Parhar 	txq->m = m;
199354e4ee71SNavdeep Parhar 
199454e4ee71SNavdeep Parhar 	if (eq->pending)
1995f7dfe243SNavdeep Parhar 		ring_eq_db(sc, eq);
199654e4ee71SNavdeep Parhar 
1997733b9277SNavdeep Parhar 	reclaim_tx_descs(txq, 0, 128);
1998733b9277SNavdeep Parhar 
1999733b9277SNavdeep Parhar 	if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0)
2000733b9277SNavdeep Parhar 		callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
200154e4ee71SNavdeep Parhar 
200254e4ee71SNavdeep Parhar 	return (0);
200354e4ee71SNavdeep Parhar }
200454e4ee71SNavdeep Parhar 
200554e4ee71SNavdeep Parhar void
200654e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp)
200754e4ee71SNavdeep Parhar {
200854e4ee71SNavdeep Parhar 	struct port_info *pi = ifp->if_softc;
20091458bff9SNavdeep Parhar 	struct adapter *sc = pi->adapter;
201054e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
20116eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
20126eb3180fSNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
20136eb3180fSNavdeep Parhar #endif
201454e4ee71SNavdeep Parhar 	struct sge_fl *fl;
20156eb3180fSNavdeep Parhar 	int i, bufsize;
201654e4ee71SNavdeep Parhar 
20176eb3180fSNavdeep Parhar 	bufsize = mtu_to_bufsize(ifp->if_mtu);
201854e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
201954e4ee71SNavdeep Parhar 		fl = &rxq->fl;
202054e4ee71SNavdeep Parhar 
202154e4ee71SNavdeep Parhar 		FL_LOCK(fl);
20221458bff9SNavdeep Parhar 		set_fl_tag_idx(sc, fl, bufsize);
202354e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
202454e4ee71SNavdeep Parhar 	}
20256eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
20266eb3180fSNavdeep Parhar 	bufsize = mtu_to_bufsize_toe(pi->adapter, ifp->if_mtu);
20276eb3180fSNavdeep Parhar 	for_each_ofld_rxq(pi, i, ofld_rxq) {
20286eb3180fSNavdeep Parhar 		fl = &ofld_rxq->fl;
20296eb3180fSNavdeep Parhar 
20306eb3180fSNavdeep Parhar 		FL_LOCK(fl);
20311458bff9SNavdeep Parhar 		set_fl_tag_idx(sc, fl, bufsize);
20326eb3180fSNavdeep Parhar 		FL_UNLOCK(fl);
20336eb3180fSNavdeep Parhar 	}
20346eb3180fSNavdeep Parhar #endif
203554e4ee71SNavdeep Parhar }
203654e4ee71SNavdeep Parhar 
2037733b9277SNavdeep Parhar int
2038733b9277SNavdeep Parhar can_resume_tx(struct sge_eq *eq)
2039733b9277SNavdeep Parhar {
2040733b9277SNavdeep Parhar 	return (reclaimable(eq) >= tx_resume_threshold(eq));
2041733b9277SNavdeep Parhar }
2042733b9277SNavdeep Parhar 
204354e4ee71SNavdeep Parhar static inline void
204454e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
20455323ca8fSNavdeep Parhar     int qsize, int esize)
204654e4ee71SNavdeep Parhar {
204754e4ee71SNavdeep Parhar 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
204854e4ee71SNavdeep Parhar 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
204954e4ee71SNavdeep Parhar 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
205054e4ee71SNavdeep Parhar 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
205154e4ee71SNavdeep Parhar 
205254e4ee71SNavdeep Parhar 	iq->flags = 0;
205354e4ee71SNavdeep Parhar 	iq->adapter = sc;
20547a32954cSNavdeep Parhar 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
20557a32954cSNavdeep Parhar 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
20567a32954cSNavdeep Parhar 	if (pktc_idx >= 0) {
20577a32954cSNavdeep Parhar 		iq->intr_params |= F_QINTR_CNT_EN;
205854e4ee71SNavdeep Parhar 		iq->intr_pktc_idx = pktc_idx;
20597a32954cSNavdeep Parhar 	}
2060d14b0ac1SNavdeep Parhar 	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
206154e4ee71SNavdeep Parhar 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
206254e4ee71SNavdeep Parhar }
206354e4ee71SNavdeep Parhar 
206454e4ee71SNavdeep Parhar static inline void
20651458bff9SNavdeep Parhar init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int bufsize, int pack,
20661458bff9SNavdeep Parhar     char *name)
206754e4ee71SNavdeep Parhar {
20681458bff9SNavdeep Parhar 
206954e4ee71SNavdeep Parhar 	fl->qsize = qsize;
207054e4ee71SNavdeep Parhar 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
20711458bff9SNavdeep Parhar 	if (pack)
20721458bff9SNavdeep Parhar 		fl->flags |= FL_BUF_PACKING;
20731458bff9SNavdeep Parhar 	set_fl_tag_idx(sc, fl, bufsize);
207454e4ee71SNavdeep Parhar }
207554e4ee71SNavdeep Parhar 
207654e4ee71SNavdeep Parhar static inline void
2077733b9277SNavdeep Parhar init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan,
2078733b9277SNavdeep Parhar     uint16_t iqid, char *name)
207954e4ee71SNavdeep Parhar {
2080733b9277SNavdeep Parhar 	KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan));
2081733b9277SNavdeep Parhar 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
2082733b9277SNavdeep Parhar 
2083733b9277SNavdeep Parhar 	eq->flags = eqtype & EQ_TYPEMASK;
2084733b9277SNavdeep Parhar 	eq->tx_chan = tx_chan;
2085733b9277SNavdeep Parhar 	eq->iqid = iqid;
2086f7dfe243SNavdeep Parhar 	eq->qsize = qsize;
2087f7dfe243SNavdeep Parhar 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
2088733b9277SNavdeep Parhar 
2089733b9277SNavdeep Parhar 	TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq);
2090733b9277SNavdeep Parhar 	callout_init(&eq->tx_callout, CALLOUT_MPSAFE);
209154e4ee71SNavdeep Parhar }
209254e4ee71SNavdeep Parhar 
209354e4ee71SNavdeep Parhar static int
209454e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
209554e4ee71SNavdeep Parhar     bus_dmamap_t *map, bus_addr_t *pa, void **va)
209654e4ee71SNavdeep Parhar {
209754e4ee71SNavdeep Parhar 	int rc;
209854e4ee71SNavdeep Parhar 
209954e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
210054e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
210154e4ee71SNavdeep Parhar 	if (rc != 0) {
210254e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
210354e4ee71SNavdeep Parhar 		goto done;
210454e4ee71SNavdeep Parhar 	}
210554e4ee71SNavdeep Parhar 
210654e4ee71SNavdeep Parhar 	rc = bus_dmamem_alloc(*tag, va,
210754e4ee71SNavdeep Parhar 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
210854e4ee71SNavdeep Parhar 	if (rc != 0) {
210954e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
211054e4ee71SNavdeep Parhar 		goto done;
211154e4ee71SNavdeep Parhar 	}
211254e4ee71SNavdeep Parhar 
211354e4ee71SNavdeep Parhar 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
211454e4ee71SNavdeep Parhar 	if (rc != 0) {
211554e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
211654e4ee71SNavdeep Parhar 		goto done;
211754e4ee71SNavdeep Parhar 	}
211854e4ee71SNavdeep Parhar done:
211954e4ee71SNavdeep Parhar 	if (rc)
212054e4ee71SNavdeep Parhar 		free_ring(sc, *tag, *map, *pa, *va);
212154e4ee71SNavdeep Parhar 
212254e4ee71SNavdeep Parhar 	return (rc);
212354e4ee71SNavdeep Parhar }
212454e4ee71SNavdeep Parhar 
212554e4ee71SNavdeep Parhar static int
212654e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
212754e4ee71SNavdeep Parhar     bus_addr_t pa, void *va)
212854e4ee71SNavdeep Parhar {
212954e4ee71SNavdeep Parhar 	if (pa)
213054e4ee71SNavdeep Parhar 		bus_dmamap_unload(tag, map);
213154e4ee71SNavdeep Parhar 	if (va)
213254e4ee71SNavdeep Parhar 		bus_dmamem_free(tag, va, map);
213354e4ee71SNavdeep Parhar 	if (tag)
213454e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(tag);
213554e4ee71SNavdeep Parhar 
213654e4ee71SNavdeep Parhar 	return (0);
213754e4ee71SNavdeep Parhar }
213854e4ee71SNavdeep Parhar 
213954e4ee71SNavdeep Parhar /*
214054e4ee71SNavdeep Parhar  * Allocates the ring for an ingress queue and an optional freelist.  If the
214154e4ee71SNavdeep Parhar  * freelist is specified it will be allocated and then associated with the
214254e4ee71SNavdeep Parhar  * ingress queue.
214354e4ee71SNavdeep Parhar  *
214454e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
214554e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
214654e4ee71SNavdeep Parhar  *
2147733b9277SNavdeep Parhar  * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
214854e4ee71SNavdeep Parhar  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
2149733b9277SNavdeep Parhar  * the abs_id of the ingress queue to which its interrupts should be forwarded.
215054e4ee71SNavdeep Parhar  */
215154e4ee71SNavdeep Parhar static int
215254e4ee71SNavdeep Parhar alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
2153bc14b14dSNavdeep Parhar     int intr_idx, int cong)
215454e4ee71SNavdeep Parhar {
215554e4ee71SNavdeep Parhar 	int rc, i, cntxt_id;
215654e4ee71SNavdeep Parhar 	size_t len;
215754e4ee71SNavdeep Parhar 	struct fw_iq_cmd c;
215854e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
215954e4ee71SNavdeep Parhar 	__be32 v = 0;
216054e4ee71SNavdeep Parhar 
216154e4ee71SNavdeep Parhar 	len = iq->qsize * iq->esize;
216254e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
216354e4ee71SNavdeep Parhar 	    (void **)&iq->desc);
216454e4ee71SNavdeep Parhar 	if (rc != 0)
216554e4ee71SNavdeep Parhar 		return (rc);
216654e4ee71SNavdeep Parhar 
216754e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
216854e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
216954e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
217054e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VFN(0));
217154e4ee71SNavdeep Parhar 
217254e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
217354e4ee71SNavdeep Parhar 	    FW_LEN16(c));
217454e4ee71SNavdeep Parhar 
217554e4ee71SNavdeep Parhar 	/* Special handling for firmware event queue */
217654e4ee71SNavdeep Parhar 	if (iq == &sc->sge.fwq)
217754e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQASYNCH;
217854e4ee71SNavdeep Parhar 
2179733b9277SNavdeep Parhar 	if (iq->flags & IQ_INTR) {
218054e4ee71SNavdeep Parhar 		KASSERT(intr_idx < sc->intr_count,
218154e4ee71SNavdeep Parhar 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
2182733b9277SNavdeep Parhar 	} else
2183733b9277SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQANDST;
218454e4ee71SNavdeep Parhar 	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
218554e4ee71SNavdeep Parhar 
218654e4ee71SNavdeep Parhar 	c.type_to_iqandstindex = htobe32(v |
218754e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
218854e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VIID(pi->viid) |
218954e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
219054e4ee71SNavdeep Parhar 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
219154e4ee71SNavdeep Parhar 	    F_FW_IQ_CMD_IQGTSMODE |
219254e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
219354e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
219454e4ee71SNavdeep Parhar 	c.iqsize = htobe16(iq->qsize);
219554e4ee71SNavdeep Parhar 	c.iqaddr = htobe64(iq->ba);
2196bc14b14dSNavdeep Parhar 	if (cong >= 0)
2197bc14b14dSNavdeep Parhar 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
219854e4ee71SNavdeep Parhar 
219954e4ee71SNavdeep Parhar 	if (fl) {
220054e4ee71SNavdeep Parhar 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
220154e4ee71SNavdeep Parhar 
22021458bff9SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES(sc); i++) {
220354e4ee71SNavdeep Parhar 
220454e4ee71SNavdeep Parhar 			/*
220554e4ee71SNavdeep Parhar 			 * A freelist buffer must be 16 byte aligned as the SGE
220654e4ee71SNavdeep Parhar 			 * uses the low 4 bits of the bus addr to figure out the
220754e4ee71SNavdeep Parhar 			 * buffer size.
220854e4ee71SNavdeep Parhar 			 */
220954e4ee71SNavdeep Parhar 			rc = bus_dma_tag_create(sc->dmat, 16, 0,
221054e4ee71SNavdeep Parhar 			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
22111458bff9SNavdeep Parhar 			    FL_BUF_SIZE(sc, i), 1, FL_BUF_SIZE(sc, i),
22121458bff9SNavdeep Parhar 			    BUS_DMA_ALLOCNOW, NULL, NULL, &fl->tag[i]);
221354e4ee71SNavdeep Parhar 			if (rc != 0) {
221454e4ee71SNavdeep Parhar 				device_printf(sc->dev,
221554e4ee71SNavdeep Parhar 				    "failed to create fl DMA tag[%d]: %d\n",
221654e4ee71SNavdeep Parhar 				    i, rc);
221754e4ee71SNavdeep Parhar 				return (rc);
221854e4ee71SNavdeep Parhar 			}
221954e4ee71SNavdeep Parhar 		}
222054e4ee71SNavdeep Parhar 		len = fl->qsize * RX_FL_ESIZE;
222154e4ee71SNavdeep Parhar 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
222254e4ee71SNavdeep Parhar 		    &fl->ba, (void **)&fl->desc);
222354e4ee71SNavdeep Parhar 		if (rc)
222454e4ee71SNavdeep Parhar 			return (rc);
222554e4ee71SNavdeep Parhar 
222654e4ee71SNavdeep Parhar 		/* Allocate space for one software descriptor per buffer. */
22274defc81bSNavdeep Parhar 		fl->cap = (fl->qsize - spg_len / RX_FL_ESIZE) * 8;
222854e4ee71SNavdeep Parhar 		rc = alloc_fl_sdesc(fl);
222954e4ee71SNavdeep Parhar 		if (rc != 0) {
223054e4ee71SNavdeep Parhar 			device_printf(sc->dev,
223154e4ee71SNavdeep Parhar 			    "failed to setup fl software descriptors: %d\n",
223254e4ee71SNavdeep Parhar 			    rc);
223354e4ee71SNavdeep Parhar 			return (rc);
223454e4ee71SNavdeep Parhar 		}
2235fb12416cSNavdeep Parhar 		fl->needed = fl->cap;
2236d14b0ac1SNavdeep Parhar 		fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8);
223754e4ee71SNavdeep Parhar 
2238214c3582SNavdeep Parhar 		c.iqns_to_fl0congen |=
2239bc14b14dSNavdeep Parhar 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
2240bc14b14dSNavdeep Parhar 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
22411458bff9SNavdeep Parhar 			(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
22421458bff9SNavdeep Parhar 			(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
22431458bff9SNavdeep Parhar 			    0));
2244bc14b14dSNavdeep Parhar 		if (cong >= 0) {
2245bc14b14dSNavdeep Parhar 			c.iqns_to_fl0congen |=
2246bc14b14dSNavdeep Parhar 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
2247bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGCIF |
2248bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGEN);
2249bc14b14dSNavdeep Parhar 		}
225054e4ee71SNavdeep Parhar 		c.fl0dcaen_to_fl0cidxfthresh =
225154e4ee71SNavdeep Parhar 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
225254e4ee71SNavdeep Parhar 			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
225354e4ee71SNavdeep Parhar 		c.fl0size = htobe16(fl->qsize);
225454e4ee71SNavdeep Parhar 		c.fl0addr = htobe64(fl->ba);
225554e4ee71SNavdeep Parhar 	}
225654e4ee71SNavdeep Parhar 
225754e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
225854e4ee71SNavdeep Parhar 	if (rc != 0) {
225954e4ee71SNavdeep Parhar 		device_printf(sc->dev,
226054e4ee71SNavdeep Parhar 		    "failed to create ingress queue: %d\n", rc);
226154e4ee71SNavdeep Parhar 		return (rc);
226254e4ee71SNavdeep Parhar 	}
226354e4ee71SNavdeep Parhar 
226454e4ee71SNavdeep Parhar 	iq->cdesc = iq->desc;
226554e4ee71SNavdeep Parhar 	iq->cidx = 0;
226654e4ee71SNavdeep Parhar 	iq->gen = 1;
226754e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
226854e4ee71SNavdeep Parhar 	iq->cntxt_id = be16toh(c.iqid);
226954e4ee71SNavdeep Parhar 	iq->abs_id = be16toh(c.physiqid);
2270733b9277SNavdeep Parhar 	iq->flags |= IQ_ALLOCATED;
227154e4ee71SNavdeep Parhar 
227254e4ee71SNavdeep Parhar 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
2273733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.niq) {
2274733b9277SNavdeep Parhar 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
2275733b9277SNavdeep Parhar 		    cntxt_id, sc->sge.niq - 1);
2276733b9277SNavdeep Parhar 	}
227754e4ee71SNavdeep Parhar 	sc->sge.iqmap[cntxt_id] = iq;
227854e4ee71SNavdeep Parhar 
227954e4ee71SNavdeep Parhar 	if (fl) {
228054e4ee71SNavdeep Parhar 		fl->cntxt_id = be16toh(c.fl0id);
228154e4ee71SNavdeep Parhar 		fl->pidx = fl->cidx = 0;
228254e4ee71SNavdeep Parhar 
22839f1f7ec9SNavdeep Parhar 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
2284733b9277SNavdeep Parhar 		if (cntxt_id >= sc->sge.neq) {
2285733b9277SNavdeep Parhar 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
2286733b9277SNavdeep Parhar 			    __func__, cntxt_id, sc->sge.neq - 1);
2287733b9277SNavdeep Parhar 		}
228854e4ee71SNavdeep Parhar 		sc->sge.eqmap[cntxt_id] = (void *)fl;
228954e4ee71SNavdeep Parhar 
229054e4ee71SNavdeep Parhar 		FL_LOCK(fl);
2291733b9277SNavdeep Parhar 		/* Enough to make sure the SGE doesn't think it's starved */
2292733b9277SNavdeep Parhar 		refill_fl(sc, fl, fl->lowat);
229354e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
2294733b9277SNavdeep Parhar 
2295733b9277SNavdeep Parhar 		iq->flags |= IQ_HAS_FL;
229654e4ee71SNavdeep Parhar 	}
229754e4ee71SNavdeep Parhar 
2298ba41ec48SNavdeep Parhar 	if (is_t5(sc) && cong >= 0) {
2299ba41ec48SNavdeep Parhar 		uint32_t param, val;
2300ba41ec48SNavdeep Parhar 
2301ba41ec48SNavdeep Parhar 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2302ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2303ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
230473cd9220SNavdeep Parhar 		if (cong == 0)
230573cd9220SNavdeep Parhar 			val = 1 << 19;
230673cd9220SNavdeep Parhar 		else {
230773cd9220SNavdeep Parhar 			val = 2 << 19;
230873cd9220SNavdeep Parhar 			for (i = 0; i < 4; i++) {
230973cd9220SNavdeep Parhar 				if (cong & (1 << i))
231073cd9220SNavdeep Parhar 					val |= 1 << (i << 2);
231173cd9220SNavdeep Parhar 			}
231273cd9220SNavdeep Parhar 		}
231373cd9220SNavdeep Parhar 
2314ba41ec48SNavdeep Parhar 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2315ba41ec48SNavdeep Parhar 		if (rc != 0) {
2316ba41ec48SNavdeep Parhar 			/* report error but carry on */
2317ba41ec48SNavdeep Parhar 			device_printf(sc->dev,
2318ba41ec48SNavdeep Parhar 			    "failed to set congestion manager context for "
2319ba41ec48SNavdeep Parhar 			    "ingress queue %d: %d\n", iq->cntxt_id, rc);
2320ba41ec48SNavdeep Parhar 		}
2321ba41ec48SNavdeep Parhar 	}
2322ba41ec48SNavdeep Parhar 
232354e4ee71SNavdeep Parhar 	/* Enable IQ interrupts */
2324733b9277SNavdeep Parhar 	atomic_store_rel_int(&iq->state, IQS_IDLE);
232554e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
232654e4ee71SNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id));
232754e4ee71SNavdeep Parhar 
232854e4ee71SNavdeep Parhar 	return (0);
232954e4ee71SNavdeep Parhar }
233054e4ee71SNavdeep Parhar 
233154e4ee71SNavdeep Parhar static int
233254e4ee71SNavdeep Parhar free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
233354e4ee71SNavdeep Parhar {
233454e4ee71SNavdeep Parhar 	int i, rc;
233554e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
233654e4ee71SNavdeep Parhar 	device_t dev;
233754e4ee71SNavdeep Parhar 
233854e4ee71SNavdeep Parhar 	if (sc == NULL)
233954e4ee71SNavdeep Parhar 		return (0);	/* nothing to do */
234054e4ee71SNavdeep Parhar 
234154e4ee71SNavdeep Parhar 	dev = pi ? pi->dev : sc->dev;
234254e4ee71SNavdeep Parhar 
234354e4ee71SNavdeep Parhar 	if (iq->flags & IQ_ALLOCATED) {
234454e4ee71SNavdeep Parhar 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
234554e4ee71SNavdeep Parhar 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
234654e4ee71SNavdeep Parhar 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
234754e4ee71SNavdeep Parhar 		if (rc != 0) {
234854e4ee71SNavdeep Parhar 			device_printf(dev,
234954e4ee71SNavdeep Parhar 			    "failed to free queue %p: %d\n", iq, rc);
235054e4ee71SNavdeep Parhar 			return (rc);
235154e4ee71SNavdeep Parhar 		}
235254e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_ALLOCATED;
235354e4ee71SNavdeep Parhar 	}
235454e4ee71SNavdeep Parhar 
235554e4ee71SNavdeep Parhar 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
235654e4ee71SNavdeep Parhar 
235754e4ee71SNavdeep Parhar 	bzero(iq, sizeof(*iq));
235854e4ee71SNavdeep Parhar 
235954e4ee71SNavdeep Parhar 	if (fl) {
236054e4ee71SNavdeep Parhar 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
236154e4ee71SNavdeep Parhar 		    fl->desc);
236254e4ee71SNavdeep Parhar 
2363aa9a5cc0SNavdeep Parhar 		if (fl->sdesc)
23641458bff9SNavdeep Parhar 			free_fl_sdesc(sc, fl);
23651458bff9SNavdeep Parhar 
23661458bff9SNavdeep Parhar 		for (i = 0; i < nitems(fl->mstash); i++) {
23671458bff9SNavdeep Parhar 			struct mbuf *m = fl->mstash[i];
23681458bff9SNavdeep Parhar 
23691458bff9SNavdeep Parhar 			if (m != NULL) {
23701458bff9SNavdeep Parhar 				m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
23711458bff9SNavdeep Parhar 				m_free(m);
23721458bff9SNavdeep Parhar 			}
23731458bff9SNavdeep Parhar 		}
237454e4ee71SNavdeep Parhar 
237554e4ee71SNavdeep Parhar 		if (mtx_initialized(&fl->fl_lock))
237654e4ee71SNavdeep Parhar 			mtx_destroy(&fl->fl_lock);
237754e4ee71SNavdeep Parhar 
23781458bff9SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES(sc); i++) {
237954e4ee71SNavdeep Parhar 			if (fl->tag[i])
238054e4ee71SNavdeep Parhar 				bus_dma_tag_destroy(fl->tag[i]);
238154e4ee71SNavdeep Parhar 		}
238254e4ee71SNavdeep Parhar 
238354e4ee71SNavdeep Parhar 		bzero(fl, sizeof(*fl));
238454e4ee71SNavdeep Parhar 	}
238554e4ee71SNavdeep Parhar 
238654e4ee71SNavdeep Parhar 	return (0);
238754e4ee71SNavdeep Parhar }
238854e4ee71SNavdeep Parhar 
238954e4ee71SNavdeep Parhar static int
2390733b9277SNavdeep Parhar alloc_fwq(struct adapter *sc)
239154e4ee71SNavdeep Parhar {
2392733b9277SNavdeep Parhar 	int rc, intr_idx;
239356599263SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
2394733b9277SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
2395733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
239656599263SNavdeep Parhar 
23975323ca8fSNavdeep Parhar 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE);
2398733b9277SNavdeep Parhar 	fwq->flags |= IQ_INTR;	/* always */
2399733b9277SNavdeep Parhar 	intr_idx = sc->intr_count > 1 ? 1 : 0;
240056599263SNavdeep Parhar 	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
2401733b9277SNavdeep Parhar 	if (rc != 0) {
2402733b9277SNavdeep Parhar 		device_printf(sc->dev,
2403733b9277SNavdeep Parhar 		    "failed to create firmware event queue: %d\n", rc);
240456599263SNavdeep Parhar 		return (rc);
2405733b9277SNavdeep Parhar 	}
240656599263SNavdeep Parhar 
2407733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD,
2408733b9277SNavdeep Parhar 	    NULL, "firmware event queue");
2409733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
241056599263SNavdeep Parhar 
241159bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
241259bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
241359bc8ce0SNavdeep Parhar 	    "absolute id of the queue");
241459bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
241559bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
241659bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
241756599263SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
241856599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
241956599263SNavdeep Parhar 	    "consumer index");
242056599263SNavdeep Parhar 
2421733b9277SNavdeep Parhar 	return (0);
2422733b9277SNavdeep Parhar }
2423733b9277SNavdeep Parhar 
2424733b9277SNavdeep Parhar static int
2425733b9277SNavdeep Parhar free_fwq(struct adapter *sc)
2426733b9277SNavdeep Parhar {
2427733b9277SNavdeep Parhar 	return free_iq_fl(NULL, &sc->sge.fwq, NULL);
2428733b9277SNavdeep Parhar }
2429733b9277SNavdeep Parhar 
2430733b9277SNavdeep Parhar static int
2431733b9277SNavdeep Parhar alloc_mgmtq(struct adapter *sc)
2432733b9277SNavdeep Parhar {
2433733b9277SNavdeep Parhar 	int rc;
2434733b9277SNavdeep Parhar 	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
2435733b9277SNavdeep Parhar 	char name[16];
2436733b9277SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
2437733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2438733b9277SNavdeep Parhar 
2439733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
2440733b9277SNavdeep Parhar 	    NULL, "management queue");
2441733b9277SNavdeep Parhar 
2442733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
2443733b9277SNavdeep Parhar 	init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
2444733b9277SNavdeep Parhar 	    sc->sge.fwq.cntxt_id, name);
2445733b9277SNavdeep Parhar 	rc = alloc_wrq(sc, NULL, mgmtq, oid);
2446733b9277SNavdeep Parhar 	if (rc != 0) {
2447733b9277SNavdeep Parhar 		device_printf(sc->dev,
2448733b9277SNavdeep Parhar 		    "failed to create management queue: %d\n", rc);
244956599263SNavdeep Parhar 		return (rc);
245056599263SNavdeep Parhar 	}
245156599263SNavdeep Parhar 
2452733b9277SNavdeep Parhar 	return (0);
245354e4ee71SNavdeep Parhar }
245454e4ee71SNavdeep Parhar 
245554e4ee71SNavdeep Parhar static int
2456733b9277SNavdeep Parhar free_mgmtq(struct adapter *sc)
2457733b9277SNavdeep Parhar {
245809fe6320SNavdeep Parhar 
2459733b9277SNavdeep Parhar 	return free_wrq(sc, &sc->sge.mgmtq);
2460733b9277SNavdeep Parhar }
2461733b9277SNavdeep Parhar 
24629fb8886bSNavdeep Parhar static inline int
24639fb8886bSNavdeep Parhar tnl_cong(struct port_info *pi)
24649fb8886bSNavdeep Parhar {
24659fb8886bSNavdeep Parhar 
24669fb8886bSNavdeep Parhar 	if (cong_drop == -1)
24679fb8886bSNavdeep Parhar 		return (-1);
24689fb8886bSNavdeep Parhar 	else if (cong_drop == 1)
24699fb8886bSNavdeep Parhar 		return (0);
24709fb8886bSNavdeep Parhar 	else
24719fb8886bSNavdeep Parhar 		return (1 << pi->tx_chan);
24729fb8886bSNavdeep Parhar }
24739fb8886bSNavdeep Parhar 
2474733b9277SNavdeep Parhar static int
2475733b9277SNavdeep Parhar alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
2476733b9277SNavdeep Parhar     struct sysctl_oid *oid)
247754e4ee71SNavdeep Parhar {
247854e4ee71SNavdeep Parhar 	int rc;
247954e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
248054e4ee71SNavdeep Parhar 	char name[16];
248154e4ee71SNavdeep Parhar 
24829fb8886bSNavdeep Parhar 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi));
248354e4ee71SNavdeep Parhar 	if (rc != 0)
248454e4ee71SNavdeep Parhar 		return (rc);
248554e4ee71SNavdeep Parhar 
24869b4d7b4eSNavdeep Parhar 	FL_LOCK(&rxq->fl);
2487733b9277SNavdeep Parhar 	refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8);
24889b4d7b4eSNavdeep Parhar 	FL_UNLOCK(&rxq->fl);
24899b4d7b4eSNavdeep Parhar 
2490a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
249154e4ee71SNavdeep Parhar 	rc = tcp_lro_init(&rxq->lro);
249254e4ee71SNavdeep Parhar 	if (rc != 0)
249354e4ee71SNavdeep Parhar 		return (rc);
249454e4ee71SNavdeep Parhar 	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
249554e4ee71SNavdeep Parhar 
249654e4ee71SNavdeep Parhar 	if (pi->ifp->if_capenable & IFCAP_LRO)
2497733b9277SNavdeep Parhar 		rxq->iq.flags |= IQ_LRO_ENABLED;
249854e4ee71SNavdeep Parhar #endif
249929ca78e1SNavdeep Parhar 	rxq->ifp = pi->ifp;
250054e4ee71SNavdeep Parhar 
2501733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
250254e4ee71SNavdeep Parhar 
250354e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
250454e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
250554e4ee71SNavdeep Parhar 	    NULL, "rx queue");
250654e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
250754e4ee71SNavdeep Parhar 
2508af49c942SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
250956599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
2510af49c942SNavdeep Parhar 	    "absolute id of the queue");
251159bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
251259bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
251359bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
251459bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
251559bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
251659bc8ce0SNavdeep Parhar 	    "consumer index");
2517a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
251854e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
251954e4ee71SNavdeep Parhar 	    &rxq->lro.lro_queued, 0, NULL);
252054e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
252154e4ee71SNavdeep Parhar 	    &rxq->lro.lro_flushed, 0, NULL);
25227d29df59SNavdeep Parhar #endif
252354e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
252454e4ee71SNavdeep Parhar 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
252554e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
252654e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &rxq->vlan_extraction,
252754e4ee71SNavdeep Parhar 	    "# of times hardware extracted 802.1Q tag");
252854e4ee71SNavdeep Parhar 
252959bc8ce0SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
253059bc8ce0SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
253159bc8ce0SNavdeep Parhar 	    NULL, "freelist");
253259bc8ce0SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
253359bc8ce0SNavdeep Parhar 
253459bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
253559bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
253659bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
253759bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
253859bc8ce0SNavdeep Parhar 	    &rxq->fl.cidx, 0, "consumer index");
25391458bff9SNavdeep Parhar 	if (rxq->fl.flags & FL_BUF_PACKING) {
25401458bff9SNavdeep Parhar 		SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "rx_offset",
25411458bff9SNavdeep Parhar 		    CTLFLAG_RD, &rxq->fl.rx_offset, 0, "packing rx offset");
25421458bff9SNavdeep Parhar 	}
254359bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
254459bc8ce0SNavdeep Parhar 	    &rxq->fl.pidx, 0, "producer index");
254559bc8ce0SNavdeep Parhar 
254654e4ee71SNavdeep Parhar 	return (rc);
254754e4ee71SNavdeep Parhar }
254854e4ee71SNavdeep Parhar 
254954e4ee71SNavdeep Parhar static int
255054e4ee71SNavdeep Parhar free_rxq(struct port_info *pi, struct sge_rxq *rxq)
255154e4ee71SNavdeep Parhar {
255254e4ee71SNavdeep Parhar 	int rc;
255354e4ee71SNavdeep Parhar 
2554a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
255554e4ee71SNavdeep Parhar 	if (rxq->lro.ifp) {
255654e4ee71SNavdeep Parhar 		tcp_lro_free(&rxq->lro);
255754e4ee71SNavdeep Parhar 		rxq->lro.ifp = NULL;
255854e4ee71SNavdeep Parhar 	}
255954e4ee71SNavdeep Parhar #endif
256054e4ee71SNavdeep Parhar 
256154e4ee71SNavdeep Parhar 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
256254e4ee71SNavdeep Parhar 	if (rc == 0)
256354e4ee71SNavdeep Parhar 		bzero(rxq, sizeof(*rxq));
256454e4ee71SNavdeep Parhar 
256554e4ee71SNavdeep Parhar 	return (rc);
256654e4ee71SNavdeep Parhar }
256754e4ee71SNavdeep Parhar 
256809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
256954e4ee71SNavdeep Parhar static int
2570733b9277SNavdeep Parhar alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
2571733b9277SNavdeep Parhar     int intr_idx, int idx, struct sysctl_oid *oid)
2572f7dfe243SNavdeep Parhar {
2573733b9277SNavdeep Parhar 	int rc;
2574f7dfe243SNavdeep Parhar 	struct sysctl_oid_list *children;
2575733b9277SNavdeep Parhar 	char name[16];
2576f7dfe243SNavdeep Parhar 
2577733b9277SNavdeep Parhar 	rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
2578733b9277SNavdeep Parhar 	    1 << pi->tx_chan);
2579733b9277SNavdeep Parhar 	if (rc != 0)
2580f7dfe243SNavdeep Parhar 		return (rc);
2581f7dfe243SNavdeep Parhar 
2582733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
2583733b9277SNavdeep Parhar 
2584733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
2585733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
2586733b9277SNavdeep Parhar 	    NULL, "rx queue");
2587733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
2588733b9277SNavdeep Parhar 
2589733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
2590733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
2591733b9277SNavdeep Parhar 	    "I", "absolute id of the queue");
2592733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2593733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
2594733b9277SNavdeep Parhar 	    "I", "SGE context id of the queue");
2595733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
2596733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
2597733b9277SNavdeep Parhar 	    "consumer index");
2598733b9277SNavdeep Parhar 
2599733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
2600733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
2601733b9277SNavdeep Parhar 	    NULL, "freelist");
2602733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
2603733b9277SNavdeep Parhar 
2604733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
2605733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->fl.cntxt_id, 0, sysctl_uint16,
2606733b9277SNavdeep Parhar 	    "I", "SGE context id of the queue");
2607733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
2608733b9277SNavdeep Parhar 	    &ofld_rxq->fl.cidx, 0, "consumer index");
2609733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
2610733b9277SNavdeep Parhar 	    &ofld_rxq->fl.pidx, 0, "producer index");
2611733b9277SNavdeep Parhar 
2612733b9277SNavdeep Parhar 	return (rc);
2613733b9277SNavdeep Parhar }
2614733b9277SNavdeep Parhar 
2615733b9277SNavdeep Parhar static int
2616733b9277SNavdeep Parhar free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
2617733b9277SNavdeep Parhar {
2618733b9277SNavdeep Parhar 	int rc;
2619733b9277SNavdeep Parhar 
2620733b9277SNavdeep Parhar 	rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
2621733b9277SNavdeep Parhar 	if (rc == 0)
2622733b9277SNavdeep Parhar 		bzero(ofld_rxq, sizeof(*ofld_rxq));
2623733b9277SNavdeep Parhar 
2624733b9277SNavdeep Parhar 	return (rc);
2625733b9277SNavdeep Parhar }
2626733b9277SNavdeep Parhar #endif
2627733b9277SNavdeep Parhar 
2628733b9277SNavdeep Parhar static int
2629733b9277SNavdeep Parhar ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
2630733b9277SNavdeep Parhar {
2631733b9277SNavdeep Parhar 	int rc, cntxt_id;
2632733b9277SNavdeep Parhar 	struct fw_eq_ctrl_cmd c;
2633f7dfe243SNavdeep Parhar 
2634f7dfe243SNavdeep Parhar 	bzero(&c, sizeof(c));
2635f7dfe243SNavdeep Parhar 
2636f7dfe243SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
2637f7dfe243SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
2638f7dfe243SNavdeep Parhar 	    V_FW_EQ_CTRL_CMD_VFN(0));
2639f7dfe243SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
2640f7dfe243SNavdeep Parhar 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2641f7dfe243SNavdeep Parhar 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
2642f7dfe243SNavdeep Parhar 	c.physeqid_pkd = htobe32(0);
2643f7dfe243SNavdeep Parhar 	c.fetchszm_to_iqid =
2644f7dfe243SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2645733b9277SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
264656599263SNavdeep Parhar 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
2647f7dfe243SNavdeep Parhar 	c.dcaen_to_eqsize =
2648f7dfe243SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2649f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2650f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2651f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
2652f7dfe243SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
2653f7dfe243SNavdeep Parhar 
2654f7dfe243SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2655f7dfe243SNavdeep Parhar 	if (rc != 0) {
2656f7dfe243SNavdeep Parhar 		device_printf(sc->dev,
2657733b9277SNavdeep Parhar 		    "failed to create control queue %d: %d\n", eq->tx_chan, rc);
2658f7dfe243SNavdeep Parhar 		return (rc);
2659f7dfe243SNavdeep Parhar 	}
2660733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
2661f7dfe243SNavdeep Parhar 
2662f7dfe243SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
2663f7dfe243SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2664733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
2665733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2666733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
2667f7dfe243SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
2668f7dfe243SNavdeep Parhar 
2669f7dfe243SNavdeep Parhar 	return (rc);
2670f7dfe243SNavdeep Parhar }
2671f7dfe243SNavdeep Parhar 
2672f7dfe243SNavdeep Parhar static int
2673733b9277SNavdeep Parhar eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
267454e4ee71SNavdeep Parhar {
267554e4ee71SNavdeep Parhar 	int rc, cntxt_id;
267654e4ee71SNavdeep Parhar 	struct fw_eq_eth_cmd c;
267754e4ee71SNavdeep Parhar 
267854e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
267954e4ee71SNavdeep Parhar 
268054e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
268154e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
268254e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_VFN(0));
268354e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
268454e4ee71SNavdeep Parhar 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
268554e4ee71SNavdeep Parhar 	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
268654e4ee71SNavdeep Parhar 	c.fetchszm_to_iqid =
268754e4ee71SNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2688733b9277SNavdeep Parhar 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
2689aa2457e1SNavdeep Parhar 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
269054e4ee71SNavdeep Parhar 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
269154e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
269254e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
269354e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
269454e4ee71SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
269554e4ee71SNavdeep Parhar 
269654e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
269754e4ee71SNavdeep Parhar 	if (rc != 0) {
269854e4ee71SNavdeep Parhar 		device_printf(pi->dev,
2699733b9277SNavdeep Parhar 		    "failed to create Ethernet egress queue: %d\n", rc);
2700733b9277SNavdeep Parhar 		return (rc);
2701733b9277SNavdeep Parhar 	}
2702733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
2703733b9277SNavdeep Parhar 
2704733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
2705733b9277SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2706733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
2707733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2708733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
2709733b9277SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
2710733b9277SNavdeep Parhar 
271154e4ee71SNavdeep Parhar 	return (rc);
271254e4ee71SNavdeep Parhar }
271354e4ee71SNavdeep Parhar 
271409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
2715733b9277SNavdeep Parhar static int
2716733b9277SNavdeep Parhar ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2717733b9277SNavdeep Parhar {
2718733b9277SNavdeep Parhar 	int rc, cntxt_id;
2719733b9277SNavdeep Parhar 	struct fw_eq_ofld_cmd c;
272054e4ee71SNavdeep Parhar 
2721733b9277SNavdeep Parhar 	bzero(&c, sizeof(c));
2722733b9277SNavdeep Parhar 
2723733b9277SNavdeep Parhar 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
2724733b9277SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
2725733b9277SNavdeep Parhar 	    V_FW_EQ_OFLD_CMD_VFN(0));
2726733b9277SNavdeep Parhar 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
2727733b9277SNavdeep Parhar 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2728733b9277SNavdeep Parhar 	c.fetchszm_to_iqid =
2729733b9277SNavdeep Parhar 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
2730733b9277SNavdeep Parhar 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
2731733b9277SNavdeep Parhar 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
2732733b9277SNavdeep Parhar 	c.dcaen_to_eqsize =
2733733b9277SNavdeep Parhar 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2734733b9277SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2735733b9277SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
2736733b9277SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
2737733b9277SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
2738733b9277SNavdeep Parhar 
2739733b9277SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
2740733b9277SNavdeep Parhar 	if (rc != 0) {
2741733b9277SNavdeep Parhar 		device_printf(pi->dev,
2742733b9277SNavdeep Parhar 		    "failed to create egress queue for TCP offload: %d\n", rc);
2743733b9277SNavdeep Parhar 		return (rc);
2744733b9277SNavdeep Parhar 	}
2745733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
2746733b9277SNavdeep Parhar 
2747733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
274854e4ee71SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
2749733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
2750733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
2751733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
275254e4ee71SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
275354e4ee71SNavdeep Parhar 
2754733b9277SNavdeep Parhar 	return (rc);
2755733b9277SNavdeep Parhar }
2756733b9277SNavdeep Parhar #endif
2757733b9277SNavdeep Parhar 
2758733b9277SNavdeep Parhar static int
2759733b9277SNavdeep Parhar alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
2760733b9277SNavdeep Parhar {
2761733b9277SNavdeep Parhar 	int rc;
2762733b9277SNavdeep Parhar 	size_t len;
2763733b9277SNavdeep Parhar 
2764733b9277SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
2765733b9277SNavdeep Parhar 
2766733b9277SNavdeep Parhar 	len = eq->qsize * EQ_ESIZE;
2767733b9277SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
2768733b9277SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
2769733b9277SNavdeep Parhar 	if (rc)
2770733b9277SNavdeep Parhar 		return (rc);
2771733b9277SNavdeep Parhar 
27724defc81bSNavdeep Parhar 	eq->cap = eq->qsize - spg_len / EQ_ESIZE;
2773733b9277SNavdeep Parhar 	eq->spg = (void *)&eq->desc[eq->cap];
2774733b9277SNavdeep Parhar 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
2775733b9277SNavdeep Parhar 	eq->pidx = eq->cidx = 0;
2776d14b0ac1SNavdeep Parhar 	eq->doorbells = sc->doorbells;
2777733b9277SNavdeep Parhar 
2778733b9277SNavdeep Parhar 	switch (eq->flags & EQ_TYPEMASK) {
2779733b9277SNavdeep Parhar 	case EQ_CTRL:
2780733b9277SNavdeep Parhar 		rc = ctrl_eq_alloc(sc, eq);
2781733b9277SNavdeep Parhar 		break;
2782733b9277SNavdeep Parhar 
2783733b9277SNavdeep Parhar 	case EQ_ETH:
2784733b9277SNavdeep Parhar 		rc = eth_eq_alloc(sc, pi, eq);
2785733b9277SNavdeep Parhar 		break;
2786733b9277SNavdeep Parhar 
278709fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
2788733b9277SNavdeep Parhar 	case EQ_OFLD:
2789733b9277SNavdeep Parhar 		rc = ofld_eq_alloc(sc, pi, eq);
2790733b9277SNavdeep Parhar 		break;
2791733b9277SNavdeep Parhar #endif
2792733b9277SNavdeep Parhar 
2793733b9277SNavdeep Parhar 	default:
2794733b9277SNavdeep Parhar 		panic("%s: invalid eq type %d.", __func__,
2795733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK);
2796733b9277SNavdeep Parhar 	}
2797733b9277SNavdeep Parhar 	if (rc != 0) {
2798733b9277SNavdeep Parhar 		device_printf(sc->dev,
2799733b9277SNavdeep Parhar 		    "failed to allocate egress queue(%d): %d",
2800733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK, rc);
2801733b9277SNavdeep Parhar 	}
2802733b9277SNavdeep Parhar 
2803733b9277SNavdeep Parhar 	eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus;
2804733b9277SNavdeep Parhar 
2805d14b0ac1SNavdeep Parhar 	if (isset(&eq->doorbells, DOORBELL_UDB) ||
2806d14b0ac1SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
280777ad3c41SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_WCWR)) {
2808b3eda787SNavdeep Parhar 		uint32_t s_qpp = sc->sge.eq_s_qpp;
2809d14b0ac1SNavdeep Parhar 		uint32_t mask = (1 << s_qpp) - 1;
2810d14b0ac1SNavdeep Parhar 		volatile uint8_t *udb;
2811d14b0ac1SNavdeep Parhar 
2812d14b0ac1SNavdeep Parhar 		udb = sc->udbs_base + UDBS_DB_OFFSET;
2813d14b0ac1SNavdeep Parhar 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
2814d14b0ac1SNavdeep Parhar 		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
2815d14b0ac1SNavdeep Parhar 		if (eq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE)
281677ad3c41SNavdeep Parhar 	    		clrbit(&eq->doorbells, DOORBELL_WCWR);
2817d14b0ac1SNavdeep Parhar 		else {
2818d14b0ac1SNavdeep Parhar 			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
2819d14b0ac1SNavdeep Parhar 			eq->udb_qid = 0;
2820d14b0ac1SNavdeep Parhar 		}
2821d14b0ac1SNavdeep Parhar 		eq->udb = (volatile void *)udb;
2822d14b0ac1SNavdeep Parhar 	}
2823d14b0ac1SNavdeep Parhar 
2824733b9277SNavdeep Parhar 	return (rc);
2825733b9277SNavdeep Parhar }
2826733b9277SNavdeep Parhar 
2827733b9277SNavdeep Parhar static int
2828733b9277SNavdeep Parhar free_eq(struct adapter *sc, struct sge_eq *eq)
2829733b9277SNavdeep Parhar {
2830733b9277SNavdeep Parhar 	int rc;
2831733b9277SNavdeep Parhar 
2832733b9277SNavdeep Parhar 	if (eq->flags & EQ_ALLOCATED) {
2833733b9277SNavdeep Parhar 		switch (eq->flags & EQ_TYPEMASK) {
2834733b9277SNavdeep Parhar 		case EQ_CTRL:
2835733b9277SNavdeep Parhar 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
2836733b9277SNavdeep Parhar 			    eq->cntxt_id);
2837733b9277SNavdeep Parhar 			break;
2838733b9277SNavdeep Parhar 
2839733b9277SNavdeep Parhar 		case EQ_ETH:
2840733b9277SNavdeep Parhar 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
2841733b9277SNavdeep Parhar 			    eq->cntxt_id);
2842733b9277SNavdeep Parhar 			break;
2843733b9277SNavdeep Parhar 
284409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
2845733b9277SNavdeep Parhar 		case EQ_OFLD:
2846733b9277SNavdeep Parhar 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
2847733b9277SNavdeep Parhar 			    eq->cntxt_id);
2848733b9277SNavdeep Parhar 			break;
2849733b9277SNavdeep Parhar #endif
2850733b9277SNavdeep Parhar 
2851733b9277SNavdeep Parhar 		default:
2852733b9277SNavdeep Parhar 			panic("%s: invalid eq type %d.", __func__,
2853733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK);
2854733b9277SNavdeep Parhar 		}
2855733b9277SNavdeep Parhar 		if (rc != 0) {
2856733b9277SNavdeep Parhar 			device_printf(sc->dev,
2857733b9277SNavdeep Parhar 			    "failed to free egress queue (%d): %d\n",
2858733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK, rc);
2859733b9277SNavdeep Parhar 			return (rc);
2860733b9277SNavdeep Parhar 		}
2861733b9277SNavdeep Parhar 		eq->flags &= ~EQ_ALLOCATED;
2862733b9277SNavdeep Parhar 	}
2863733b9277SNavdeep Parhar 
2864733b9277SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
2865733b9277SNavdeep Parhar 
2866733b9277SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
2867733b9277SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
2868733b9277SNavdeep Parhar 
2869733b9277SNavdeep Parhar 	bzero(eq, sizeof(*eq));
2870733b9277SNavdeep Parhar 	return (0);
2871733b9277SNavdeep Parhar }
2872733b9277SNavdeep Parhar 
2873733b9277SNavdeep Parhar static int
2874733b9277SNavdeep Parhar alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
2875733b9277SNavdeep Parhar     struct sysctl_oid *oid)
2876733b9277SNavdeep Parhar {
2877733b9277SNavdeep Parhar 	int rc;
2878733b9277SNavdeep Parhar 	struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx;
2879733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2880733b9277SNavdeep Parhar 
2881733b9277SNavdeep Parhar 	rc = alloc_eq(sc, pi, &wrq->eq);
2882733b9277SNavdeep Parhar 	if (rc)
2883733b9277SNavdeep Parhar 		return (rc);
2884733b9277SNavdeep Parhar 
2885733b9277SNavdeep Parhar 	wrq->adapter = sc;
288609fe6320SNavdeep Parhar 	STAILQ_INIT(&wrq->wr_list);
2887733b9277SNavdeep Parhar 
2888733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
2889733b9277SNavdeep Parhar 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
2890733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
2891733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I",
2892733b9277SNavdeep Parhar 	    "consumer index");
2893733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
2894733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
2895733b9277SNavdeep Parhar 	    "producer index");
2896733b9277SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD,
2897733b9277SNavdeep Parhar 	    &wrq->tx_wrs, "# of work requests");
2898733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
2899733b9277SNavdeep Parhar 	    &wrq->no_desc, 0,
2900733b9277SNavdeep Parhar 	    "# of times queue ran out of hardware descriptors");
2901733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
2902733b9277SNavdeep Parhar 	    &wrq->eq.unstalled, 0, "# of times queue recovered after stall");
2903733b9277SNavdeep Parhar 
2904733b9277SNavdeep Parhar 
2905733b9277SNavdeep Parhar 	return (rc);
2906733b9277SNavdeep Parhar }
2907733b9277SNavdeep Parhar 
2908733b9277SNavdeep Parhar static int
2909733b9277SNavdeep Parhar free_wrq(struct adapter *sc, struct sge_wrq *wrq)
2910733b9277SNavdeep Parhar {
2911733b9277SNavdeep Parhar 	int rc;
2912733b9277SNavdeep Parhar 
2913733b9277SNavdeep Parhar 	rc = free_eq(sc, &wrq->eq);
2914733b9277SNavdeep Parhar 	if (rc)
2915733b9277SNavdeep Parhar 		return (rc);
2916733b9277SNavdeep Parhar 
2917733b9277SNavdeep Parhar 	bzero(wrq, sizeof(*wrq));
2918733b9277SNavdeep Parhar 	return (0);
2919733b9277SNavdeep Parhar }
2920733b9277SNavdeep Parhar 
2921733b9277SNavdeep Parhar static int
2922733b9277SNavdeep Parhar alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
2923733b9277SNavdeep Parhar     struct sysctl_oid *oid)
2924733b9277SNavdeep Parhar {
2925733b9277SNavdeep Parhar 	int rc;
2926733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
2927733b9277SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
2928733b9277SNavdeep Parhar 	char name[16];
2929733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
2930733b9277SNavdeep Parhar 
2931733b9277SNavdeep Parhar 	rc = alloc_eq(sc, pi, eq);
2932733b9277SNavdeep Parhar 	if (rc)
2933733b9277SNavdeep Parhar 		return (rc);
2934733b9277SNavdeep Parhar 
2935733b9277SNavdeep Parhar 	txq->ifp = pi->ifp;
2936733b9277SNavdeep Parhar 
2937733b9277SNavdeep Parhar 	txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
2938733b9277SNavdeep Parhar 	    M_ZERO | M_WAITOK);
2939733b9277SNavdeep Parhar 	txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
2940733b9277SNavdeep Parhar 
2941733b9277SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
2942733b9277SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
2943733b9277SNavdeep Parhar 	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
2944733b9277SNavdeep Parhar 	if (rc != 0) {
2945733b9277SNavdeep Parhar 		device_printf(sc->dev,
2946733b9277SNavdeep Parhar 		    "failed to create tx DMA tag: %d\n", rc);
2947733b9277SNavdeep Parhar 		return (rc);
2948733b9277SNavdeep Parhar 	}
2949733b9277SNavdeep Parhar 
2950733b9277SNavdeep Parhar 	/*
2951733b9277SNavdeep Parhar 	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
2952733b9277SNavdeep Parhar 	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
2953733b9277SNavdeep Parhar 	 * sized for the worst case.
2954733b9277SNavdeep Parhar 	 */
2955733b9277SNavdeep Parhar 	rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8,
2956733b9277SNavdeep Parhar 	    M_WAITOK);
2957733b9277SNavdeep Parhar 	if (rc != 0) {
2958733b9277SNavdeep Parhar 		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
2959733b9277SNavdeep Parhar 		return (rc);
2960733b9277SNavdeep Parhar 	}
296154e4ee71SNavdeep Parhar 
296254e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
296354e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
296454e4ee71SNavdeep Parhar 	    NULL, "tx queue");
296554e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
296654e4ee71SNavdeep Parhar 
296759bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
296859bc8ce0SNavdeep Parhar 	    &eq->cntxt_id, 0, "SGE context id of the queue");
296959bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
297059bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
297159bc8ce0SNavdeep Parhar 	    "consumer index");
297259bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
297359bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
297459bc8ce0SNavdeep Parhar 	    "producer index");
297559bc8ce0SNavdeep Parhar 
297654e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
297754e4ee71SNavdeep Parhar 	    &txq->txcsum, "# of times hardware assisted with checksum");
297854e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
297954e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &txq->vlan_insertion,
298054e4ee71SNavdeep Parhar 	    "# of times hardware inserted 802.1Q tag");
298154e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
2982a1ea9a82SNavdeep Parhar 	    &txq->tso_wrs, "# of TSO work requests");
298354e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
298454e4ee71SNavdeep Parhar 	    &txq->imm_wrs, "# of work requests with immediate data");
298554e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
298654e4ee71SNavdeep Parhar 	    &txq->sgl_wrs, "# of work requests with direct SGL");
298754e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
298854e4ee71SNavdeep Parhar 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
298954e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
299054e4ee71SNavdeep Parhar 	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
299154e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
299254e4ee71SNavdeep Parhar 	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
299354e4ee71SNavdeep Parhar 
2994c25f3787SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD,
2995c25f3787SNavdeep Parhar 	    &txq->br->br_drops, "# of drops in the buf_ring for this queue");
299654e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
299754e4ee71SNavdeep Parhar 	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
299854e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
299954e4ee71SNavdeep Parhar 	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
300054e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
3001733b9277SNavdeep Parhar 	    &eq->egr_update, 0, "egress update notifications from the SGE");
3002733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
3003733b9277SNavdeep Parhar 	    &eq->unstalled, 0, "# of times txq recovered after stall");
300454e4ee71SNavdeep Parhar 
300554e4ee71SNavdeep Parhar 	return (rc);
300654e4ee71SNavdeep Parhar }
300754e4ee71SNavdeep Parhar 
300854e4ee71SNavdeep Parhar static int
300954e4ee71SNavdeep Parhar free_txq(struct port_info *pi, struct sge_txq *txq)
301054e4ee71SNavdeep Parhar {
301154e4ee71SNavdeep Parhar 	int rc;
301254e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
301354e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
301454e4ee71SNavdeep Parhar 
3015733b9277SNavdeep Parhar 	rc = free_eq(sc, eq);
3016733b9277SNavdeep Parhar 	if (rc)
301754e4ee71SNavdeep Parhar 		return (rc);
301854e4ee71SNavdeep Parhar 
3019f7dfe243SNavdeep Parhar 	free(txq->sdesc, M_CXGBE);
302054e4ee71SNavdeep Parhar 
3021733b9277SNavdeep Parhar 	if (txq->txmaps.maps)
3022733b9277SNavdeep Parhar 		t4_free_tx_maps(&txq->txmaps, txq->tx_tag);
302354e4ee71SNavdeep Parhar 
3024f7dfe243SNavdeep Parhar 	buf_ring_free(txq->br, M_CXGBE);
302554e4ee71SNavdeep Parhar 
3026f7dfe243SNavdeep Parhar 	if (txq->tx_tag)
3027f7dfe243SNavdeep Parhar 		bus_dma_tag_destroy(txq->tx_tag);
302854e4ee71SNavdeep Parhar 
302954e4ee71SNavdeep Parhar 	bzero(txq, sizeof(*txq));
303054e4ee71SNavdeep Parhar 	return (0);
303154e4ee71SNavdeep Parhar }
303254e4ee71SNavdeep Parhar 
303354e4ee71SNavdeep Parhar static void
303454e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
303554e4ee71SNavdeep Parhar {
303654e4ee71SNavdeep Parhar 	bus_addr_t *ba = arg;
303754e4ee71SNavdeep Parhar 
303854e4ee71SNavdeep Parhar 	KASSERT(nseg == 1,
303954e4ee71SNavdeep Parhar 	    ("%s meant for single segment mappings only.", __func__));
304054e4ee71SNavdeep Parhar 
304154e4ee71SNavdeep Parhar 	*ba = error ? 0 : segs->ds_addr;
304254e4ee71SNavdeep Parhar }
304354e4ee71SNavdeep Parhar 
304454e4ee71SNavdeep Parhar static inline bool
304554e4ee71SNavdeep Parhar is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
304654e4ee71SNavdeep Parhar {
304754e4ee71SNavdeep Parhar 	*ctrl = (void *)((uintptr_t)iq->cdesc +
304854e4ee71SNavdeep Parhar 	    (iq->esize - sizeof(struct rsp_ctrl)));
304954e4ee71SNavdeep Parhar 
305054e4ee71SNavdeep Parhar 	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
305154e4ee71SNavdeep Parhar }
305254e4ee71SNavdeep Parhar 
305354e4ee71SNavdeep Parhar static inline void
305454e4ee71SNavdeep Parhar iq_next(struct sge_iq *iq)
305554e4ee71SNavdeep Parhar {
305654e4ee71SNavdeep Parhar 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
305754e4ee71SNavdeep Parhar 	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
305854e4ee71SNavdeep Parhar 		iq->cidx = 0;
305954e4ee71SNavdeep Parhar 		iq->gen ^= 1;
306054e4ee71SNavdeep Parhar 		iq->cdesc = iq->desc;
306154e4ee71SNavdeep Parhar 	}
306254e4ee71SNavdeep Parhar }
306354e4ee71SNavdeep Parhar 
3064fb12416cSNavdeep Parhar #define FL_HW_IDX(x) ((x) >> 3)
306554e4ee71SNavdeep Parhar static inline void
306654e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl)
306754e4ee71SNavdeep Parhar {
306854e4ee71SNavdeep Parhar 	int ndesc = fl->pending / 8;
3069d14b0ac1SNavdeep Parhar 	uint32_t v;
307054e4ee71SNavdeep Parhar 
3071fb12416cSNavdeep Parhar 	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
3072fb12416cSNavdeep Parhar 		ndesc--;	/* hold back one credit */
3073fb12416cSNavdeep Parhar 
3074fb12416cSNavdeep Parhar 	if (ndesc <= 0)
3075fb12416cSNavdeep Parhar 		return;		/* nothing to do */
307654e4ee71SNavdeep Parhar 
3077d14b0ac1SNavdeep Parhar 	v = F_DBPRIO | V_QID(fl->cntxt_id) | V_PIDX(ndesc);
3078d14b0ac1SNavdeep Parhar 	if (is_t5(sc))
3079d14b0ac1SNavdeep Parhar 		v |= F_DBTYPE;
3080d14b0ac1SNavdeep Parhar 
308154e4ee71SNavdeep Parhar 	wmb();
308254e4ee71SNavdeep Parhar 
3083d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v);
3084fb12416cSNavdeep Parhar 	fl->pending -= ndesc * 8;
308554e4ee71SNavdeep Parhar }
308654e4ee71SNavdeep Parhar 
3087fb12416cSNavdeep Parhar /*
3088733b9277SNavdeep Parhar  * Fill up the freelist by upto nbufs and maybe ring its doorbell.
3089733b9277SNavdeep Parhar  *
3090733b9277SNavdeep Parhar  * Returns non-zero to indicate that it should be added to the list of starving
3091733b9277SNavdeep Parhar  * freelists.
3092fb12416cSNavdeep Parhar  */
3093733b9277SNavdeep Parhar static int
3094733b9277SNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
309554e4ee71SNavdeep Parhar {
309654e4ee71SNavdeep Parhar 	__be64 *d = &fl->desc[fl->pidx];
309754e4ee71SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
309854e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
309954e4ee71SNavdeep Parhar 	bus_addr_t pa;
310054e4ee71SNavdeep Parhar 	caddr_t cl;
310154e4ee71SNavdeep Parhar 	int rc;
310254e4ee71SNavdeep Parhar 
310354e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
31041458bff9SNavdeep Parhar #ifdef INVARIANTS
31051458bff9SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING)
31061458bff9SNavdeep Parhar 		KASSERT(sd->tag_idx == 0,
31071458bff9SNavdeep Parhar 		    ("%s: expected tag 0 but found tag %d at pidx %u instead",
31081458bff9SNavdeep Parhar 		    __func__, sd->tag_idx, fl->pidx));
31091458bff9SNavdeep Parhar #endif
311054e4ee71SNavdeep Parhar 
3111733b9277SNavdeep Parhar 	if (nbufs > fl->needed)
311254e4ee71SNavdeep Parhar 		nbufs = fl->needed;
311354e4ee71SNavdeep Parhar 
311454e4ee71SNavdeep Parhar 	while (nbufs--) {
311554e4ee71SNavdeep Parhar 
311654e4ee71SNavdeep Parhar 		if (sd->cl != NULL) {
311754e4ee71SNavdeep Parhar 
31181458bff9SNavdeep Parhar 			KASSERT(*d == sd->ba_hwtag,
311954e4ee71SNavdeep Parhar 			    ("%s: recyling problem at pidx %d",
312054e4ee71SNavdeep Parhar 			    __func__, fl->pidx));
312154e4ee71SNavdeep Parhar 
31221458bff9SNavdeep Parhar 			if (fl->flags & FL_BUF_PACKING) {
31231458bff9SNavdeep Parhar 				u_int *refcount = find_buf_refcnt(sd->cl);
31241458bff9SNavdeep Parhar 
31251458bff9SNavdeep Parhar 				if (atomic_fetchadd_int(refcount, -1) == 1) {
31261458bff9SNavdeep Parhar 					*refcount = 1;	/* reinstate */
312754e4ee71SNavdeep Parhar 					d++;
312854e4ee71SNavdeep Parhar 					goto recycled;
312954e4ee71SNavdeep Parhar 				}
31301458bff9SNavdeep Parhar 				sd->cl = NULL;	/* gave up my reference */
31311458bff9SNavdeep Parhar 			} else {
31321458bff9SNavdeep Parhar 				/*
31331458bff9SNavdeep Parhar 				 * This happens when a frame small enough to fit
31341458bff9SNavdeep Parhar 				 * entirely in an mbuf was received in cl last
31351458bff9SNavdeep Parhar 				 * time.  We'd held on to cl and can reuse it
31361458bff9SNavdeep Parhar 				 * now.  Note that we reuse a cluster of the old
31371458bff9SNavdeep Parhar 				 * size if fl->tag_idx is no longer the same as
31381458bff9SNavdeep Parhar 				 * sd->tag_idx.
31391458bff9SNavdeep Parhar 				 */
31401458bff9SNavdeep Parhar 				d++;
31411458bff9SNavdeep Parhar 				goto recycled;
31421458bff9SNavdeep Parhar 			}
31431458bff9SNavdeep Parhar 		}
314454e4ee71SNavdeep Parhar 
31451458bff9SNavdeep Parhar 		if (__predict_false(fl->tag_idx != sd->tag_idx)) {
314654e4ee71SNavdeep Parhar 			bus_dmamap_t map;
314754e4ee71SNavdeep Parhar 			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
314854e4ee71SNavdeep Parhar 			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
314954e4ee71SNavdeep Parhar 
315054e4ee71SNavdeep Parhar 			/*
315154e4ee71SNavdeep Parhar 			 * An MTU change can get us here.  Discard the old map
315254e4ee71SNavdeep Parhar 			 * which was created with the old tag, but only if
315354e4ee71SNavdeep Parhar 			 * we're able to get a new one.
315454e4ee71SNavdeep Parhar 			 */
315554e4ee71SNavdeep Parhar 			rc = bus_dmamap_create(newtag, 0, &map);
315654e4ee71SNavdeep Parhar 			if (rc == 0) {
315754e4ee71SNavdeep Parhar 				bus_dmamap_destroy(oldtag, sd->map);
315854e4ee71SNavdeep Parhar 				sd->map = map;
315954e4ee71SNavdeep Parhar 				sd->tag_idx = fl->tag_idx;
316054e4ee71SNavdeep Parhar 			}
316154e4ee71SNavdeep Parhar 		}
316254e4ee71SNavdeep Parhar 
316354e4ee71SNavdeep Parhar 		tag = fl->tag[sd->tag_idx];
316454e4ee71SNavdeep Parhar 
31651458bff9SNavdeep Parhar 		cl = uma_zalloc(FL_BUF_ZONE(sc, sd->tag_idx), M_NOWAIT);
316654e4ee71SNavdeep Parhar 		if (cl == NULL)
316754e4ee71SNavdeep Parhar 			break;
31681458bff9SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
31691458bff9SNavdeep Parhar 			*find_buf_refcnt(cl) = 1;
31701458bff9SNavdeep Parhar 			cl += MSIZE;
31711458bff9SNavdeep Parhar 		}
317254e4ee71SNavdeep Parhar 
31731458bff9SNavdeep Parhar 		rc = bus_dmamap_load(tag, sd->map, cl,
31741458bff9SNavdeep Parhar 		    FL_BUF_SIZE(sc, sd->tag_idx), oneseg_dma_callback, &pa, 0);
317554e4ee71SNavdeep Parhar 		if (rc != 0 || pa == 0) {
317654e4ee71SNavdeep Parhar 			fl->dmamap_failed++;
31771458bff9SNavdeep Parhar 			if (fl->flags & FL_BUF_PACKING)
31781458bff9SNavdeep Parhar 				cl -= MSIZE;
31791458bff9SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sc, sd->tag_idx), cl);
318054e4ee71SNavdeep Parhar 			break;
318154e4ee71SNavdeep Parhar 		}
318254e4ee71SNavdeep Parhar 
318354e4ee71SNavdeep Parhar 		sd->cl = cl;
31841458bff9SNavdeep Parhar 		*d++ = htobe64(pa | FL_BUF_HWTAG(sc, sd->tag_idx));
318554e4ee71SNavdeep Parhar 
318654e4ee71SNavdeep Parhar #ifdef INVARIANTS
31871458bff9SNavdeep Parhar 		sd->ba_hwtag = htobe64(pa | FL_BUF_HWTAG(sc, sd->tag_idx));
318854e4ee71SNavdeep Parhar #endif
318954e4ee71SNavdeep Parhar 
31907d29df59SNavdeep Parhar recycled:
31917d29df59SNavdeep Parhar 		fl->pending++;
319254e4ee71SNavdeep Parhar 		fl->needed--;
319354e4ee71SNavdeep Parhar 		sd++;
319454e4ee71SNavdeep Parhar 		if (++fl->pidx == fl->cap) {
319554e4ee71SNavdeep Parhar 			fl->pidx = 0;
319654e4ee71SNavdeep Parhar 			sd = fl->sdesc;
319754e4ee71SNavdeep Parhar 			d = fl->desc;
319854e4ee71SNavdeep Parhar 		}
319954e4ee71SNavdeep Parhar 	}
3200fb12416cSNavdeep Parhar 
3201733b9277SNavdeep Parhar 	if (fl->pending >= 8)
3202fb12416cSNavdeep Parhar 		ring_fl_db(sc, fl);
3203733b9277SNavdeep Parhar 
3204733b9277SNavdeep Parhar 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
3205733b9277SNavdeep Parhar }
3206733b9277SNavdeep Parhar 
3207733b9277SNavdeep Parhar /*
3208733b9277SNavdeep Parhar  * Attempt to refill all starving freelists.
3209733b9277SNavdeep Parhar  */
3210733b9277SNavdeep Parhar static void
3211733b9277SNavdeep Parhar refill_sfl(void *arg)
3212733b9277SNavdeep Parhar {
3213733b9277SNavdeep Parhar 	struct adapter *sc = arg;
3214733b9277SNavdeep Parhar 	struct sge_fl *fl, *fl_temp;
3215733b9277SNavdeep Parhar 
3216733b9277SNavdeep Parhar 	mtx_lock(&sc->sfl_lock);
3217733b9277SNavdeep Parhar 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
3218733b9277SNavdeep Parhar 		FL_LOCK(fl);
3219733b9277SNavdeep Parhar 		refill_fl(sc, fl, 64);
3220733b9277SNavdeep Parhar 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
3221733b9277SNavdeep Parhar 			TAILQ_REMOVE(&sc->sfl, fl, link);
3222733b9277SNavdeep Parhar 			fl->flags &= ~FL_STARVING;
3223733b9277SNavdeep Parhar 		}
3224733b9277SNavdeep Parhar 		FL_UNLOCK(fl);
3225733b9277SNavdeep Parhar 	}
3226733b9277SNavdeep Parhar 
3227733b9277SNavdeep Parhar 	if (!TAILQ_EMPTY(&sc->sfl))
3228733b9277SNavdeep Parhar 		callout_schedule(&sc->sfl_callout, hz / 5);
3229733b9277SNavdeep Parhar 	mtx_unlock(&sc->sfl_lock);
323054e4ee71SNavdeep Parhar }
323154e4ee71SNavdeep Parhar 
323254e4ee71SNavdeep Parhar static int
323354e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl)
323454e4ee71SNavdeep Parhar {
323554e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
323654e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
323754e4ee71SNavdeep Parhar 	int i, rc;
323854e4ee71SNavdeep Parhar 
323954e4ee71SNavdeep Parhar 	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
324054e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
324154e4ee71SNavdeep Parhar 
324254e4ee71SNavdeep Parhar 	tag = fl->tag[fl->tag_idx];
324354e4ee71SNavdeep Parhar 	sd = fl->sdesc;
324454e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
324554e4ee71SNavdeep Parhar 
324654e4ee71SNavdeep Parhar 		sd->tag_idx = fl->tag_idx;
324754e4ee71SNavdeep Parhar 		rc = bus_dmamap_create(tag, 0, &sd->map);
324854e4ee71SNavdeep Parhar 		if (rc != 0)
324954e4ee71SNavdeep Parhar 			goto failed;
325054e4ee71SNavdeep Parhar 	}
325154e4ee71SNavdeep Parhar 
325254e4ee71SNavdeep Parhar 	return (0);
325354e4ee71SNavdeep Parhar failed:
325454e4ee71SNavdeep Parhar 	while (--i >= 0) {
325554e4ee71SNavdeep Parhar 		sd--;
325654e4ee71SNavdeep Parhar 		bus_dmamap_destroy(tag, sd->map);
325754e4ee71SNavdeep Parhar 	}
325854e4ee71SNavdeep Parhar 	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
325954e4ee71SNavdeep Parhar 
326054e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
326154e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
326254e4ee71SNavdeep Parhar 
326354e4ee71SNavdeep Parhar 	return (rc);
326454e4ee71SNavdeep Parhar }
326554e4ee71SNavdeep Parhar 
326654e4ee71SNavdeep Parhar static void
32671458bff9SNavdeep Parhar free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
326854e4ee71SNavdeep Parhar {
326954e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
327054e4ee71SNavdeep Parhar 	int i;
327154e4ee71SNavdeep Parhar 
327254e4ee71SNavdeep Parhar 	sd = fl->sdesc;
327354e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
327454e4ee71SNavdeep Parhar 
327554e4ee71SNavdeep Parhar 		if (sd->cl) {
327654e4ee71SNavdeep Parhar 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
32771458bff9SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sc, sd->tag_idx), sd->cl);
327854e4ee71SNavdeep Parhar 			sd->cl = NULL;
327954e4ee71SNavdeep Parhar 		}
328054e4ee71SNavdeep Parhar 
328154e4ee71SNavdeep Parhar 		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
328254e4ee71SNavdeep Parhar 	}
328354e4ee71SNavdeep Parhar 
328454e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
328554e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
328654e4ee71SNavdeep Parhar }
328754e4ee71SNavdeep Parhar 
3288733b9277SNavdeep Parhar int
3289733b9277SNavdeep Parhar t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count,
3290733b9277SNavdeep Parhar     int flags)
329154e4ee71SNavdeep Parhar {
329254e4ee71SNavdeep Parhar 	struct tx_map *txm;
3293733b9277SNavdeep Parhar 	int i, rc;
329454e4ee71SNavdeep Parhar 
3295733b9277SNavdeep Parhar 	txmaps->map_total = txmaps->map_avail = count;
3296733b9277SNavdeep Parhar 	txmaps->map_cidx = txmaps->map_pidx = 0;
329754e4ee71SNavdeep Parhar 
3298733b9277SNavdeep Parhar 	txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
3299733b9277SNavdeep Parhar 	    M_ZERO | flags);
330054e4ee71SNavdeep Parhar 
3301733b9277SNavdeep Parhar 	txm = txmaps->maps;
330254e4ee71SNavdeep Parhar 	for (i = 0; i < count; i++, txm++) {
3303733b9277SNavdeep Parhar 		rc = bus_dmamap_create(tx_tag, 0, &txm->map);
330454e4ee71SNavdeep Parhar 		if (rc != 0)
330554e4ee71SNavdeep Parhar 			goto failed;
330654e4ee71SNavdeep Parhar 	}
330754e4ee71SNavdeep Parhar 
330854e4ee71SNavdeep Parhar 	return (0);
330954e4ee71SNavdeep Parhar failed:
331054e4ee71SNavdeep Parhar 	while (--i >= 0) {
331154e4ee71SNavdeep Parhar 		txm--;
3312733b9277SNavdeep Parhar 		bus_dmamap_destroy(tx_tag, txm->map);
331354e4ee71SNavdeep Parhar 	}
3314733b9277SNavdeep Parhar 	KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__));
331554e4ee71SNavdeep Parhar 
3316733b9277SNavdeep Parhar 	free(txmaps->maps, M_CXGBE);
3317733b9277SNavdeep Parhar 	txmaps->maps = NULL;
331854e4ee71SNavdeep Parhar 
331954e4ee71SNavdeep Parhar 	return (rc);
332054e4ee71SNavdeep Parhar }
332154e4ee71SNavdeep Parhar 
3322733b9277SNavdeep Parhar void
3323733b9277SNavdeep Parhar t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag)
332454e4ee71SNavdeep Parhar {
332554e4ee71SNavdeep Parhar 	struct tx_map *txm;
332654e4ee71SNavdeep Parhar 	int i;
332754e4ee71SNavdeep Parhar 
3328733b9277SNavdeep Parhar 	txm = txmaps->maps;
3329733b9277SNavdeep Parhar 	for (i = 0; i < txmaps->map_total; i++, txm++) {
333054e4ee71SNavdeep Parhar 
333154e4ee71SNavdeep Parhar 		if (txm->m) {
3332733b9277SNavdeep Parhar 			bus_dmamap_unload(tx_tag, txm->map);
333354e4ee71SNavdeep Parhar 			m_freem(txm->m);
333454e4ee71SNavdeep Parhar 			txm->m = NULL;
333554e4ee71SNavdeep Parhar 		}
333654e4ee71SNavdeep Parhar 
3337733b9277SNavdeep Parhar 		bus_dmamap_destroy(tx_tag, txm->map);
333854e4ee71SNavdeep Parhar 	}
333954e4ee71SNavdeep Parhar 
3340733b9277SNavdeep Parhar 	free(txmaps->maps, M_CXGBE);
3341733b9277SNavdeep Parhar 	txmaps->maps = NULL;
334254e4ee71SNavdeep Parhar }
334354e4ee71SNavdeep Parhar 
334454e4ee71SNavdeep Parhar /*
334554e4ee71SNavdeep Parhar  * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
334654e4ee71SNavdeep Parhar  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
334754e4ee71SNavdeep Parhar  * of immediate data.
334854e4ee71SNavdeep Parhar  */
334954e4ee71SNavdeep Parhar #define IMM_LEN ( \
3350733b9277SNavdeep Parhar       2 * EQ_ESIZE \
335154e4ee71SNavdeep Parhar     - sizeof(struct fw_eth_tx_pkt_wr) \
335254e4ee71SNavdeep Parhar     - sizeof(struct cpl_tx_pkt_core))
335354e4ee71SNavdeep Parhar 
335454e4ee71SNavdeep Parhar /*
335554e4ee71SNavdeep Parhar  * Returns non-zero on failure, no need to cleanup anything in that case.
335654e4ee71SNavdeep Parhar  *
335754e4ee71SNavdeep Parhar  * Note 1: We always try to defrag the mbuf if required and return EFBIG only
335854e4ee71SNavdeep Parhar  * if the resulting chain still won't fit in a tx descriptor.
335954e4ee71SNavdeep Parhar  *
336054e4ee71SNavdeep Parhar  * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
336154e4ee71SNavdeep Parhar  * does not have the TCP header in it.
336254e4ee71SNavdeep Parhar  */
336354e4ee71SNavdeep Parhar static int
336454e4ee71SNavdeep Parhar get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
336554e4ee71SNavdeep Parhar     int sgl_only)
336654e4ee71SNavdeep Parhar {
336754e4ee71SNavdeep Parhar 	struct mbuf *m = *fp;
3368733b9277SNavdeep Parhar 	struct tx_maps *txmaps;
336954e4ee71SNavdeep Parhar 	struct tx_map *txm;
337054e4ee71SNavdeep Parhar 	int rc, defragged = 0, n;
337154e4ee71SNavdeep Parhar 
337254e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
337354e4ee71SNavdeep Parhar 
337454e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz)
337554e4ee71SNavdeep Parhar 		sgl_only = 1;	/* Do not allow immediate data with LSO */
337654e4ee71SNavdeep Parhar 
337754e4ee71SNavdeep Parhar start:	sgl->nsegs = 0;
337854e4ee71SNavdeep Parhar 
337954e4ee71SNavdeep Parhar 	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
338054e4ee71SNavdeep Parhar 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
338154e4ee71SNavdeep Parhar 
3382733b9277SNavdeep Parhar 	txmaps = &txq->txmaps;
3383733b9277SNavdeep Parhar 	if (txmaps->map_avail == 0) {
338454e4ee71SNavdeep Parhar 		txq->no_dmamap++;
338554e4ee71SNavdeep Parhar 		return (ENOMEM);
338654e4ee71SNavdeep Parhar 	}
3387733b9277SNavdeep Parhar 	txm = &txmaps->maps[txmaps->map_pidx];
338854e4ee71SNavdeep Parhar 
338954e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
339054e4ee71SNavdeep Parhar 		*fp = m_pullup(m, 50);
339154e4ee71SNavdeep Parhar 		m = *fp;
339254e4ee71SNavdeep Parhar 		if (m == NULL)
339354e4ee71SNavdeep Parhar 			return (ENOBUFS);
339454e4ee71SNavdeep Parhar 	}
339554e4ee71SNavdeep Parhar 
3396f7dfe243SNavdeep Parhar 	rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
339754e4ee71SNavdeep Parhar 	    &sgl->nsegs, BUS_DMA_NOWAIT);
339854e4ee71SNavdeep Parhar 	if (rc == EFBIG && defragged == 0) {
3399c6499eccSGleb Smirnoff 		m = m_defrag(m, M_NOWAIT);
340054e4ee71SNavdeep Parhar 		if (m == NULL)
340154e4ee71SNavdeep Parhar 			return (EFBIG);
340254e4ee71SNavdeep Parhar 
340354e4ee71SNavdeep Parhar 		defragged = 1;
340454e4ee71SNavdeep Parhar 		*fp = m;
340554e4ee71SNavdeep Parhar 		goto start;
340654e4ee71SNavdeep Parhar 	}
340754e4ee71SNavdeep Parhar 	if (rc != 0)
340854e4ee71SNavdeep Parhar 		return (rc);
340954e4ee71SNavdeep Parhar 
341054e4ee71SNavdeep Parhar 	txm->m = m;
3411733b9277SNavdeep Parhar 	txmaps->map_avail--;
3412733b9277SNavdeep Parhar 	if (++txmaps->map_pidx == txmaps->map_total)
3413733b9277SNavdeep Parhar 		txmaps->map_pidx = 0;
341454e4ee71SNavdeep Parhar 
341554e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
341654e4ee71SNavdeep Parhar 	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
341754e4ee71SNavdeep Parhar 
341854e4ee71SNavdeep Parhar 	/*
341954e4ee71SNavdeep Parhar 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
342054e4ee71SNavdeep Parhar 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
342154e4ee71SNavdeep Parhar 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
342254e4ee71SNavdeep Parhar 	 * then len1 must be set to 0.
342354e4ee71SNavdeep Parhar 	 */
342454e4ee71SNavdeep Parhar 	n = sgl->nsegs - 1;
342554e4ee71SNavdeep Parhar 	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
342654e4ee71SNavdeep Parhar 
342754e4ee71SNavdeep Parhar 	return (0);
342854e4ee71SNavdeep Parhar }
342954e4ee71SNavdeep Parhar 
343054e4ee71SNavdeep Parhar 
343154e4ee71SNavdeep Parhar /*
343254e4ee71SNavdeep Parhar  * Releases all the txq resources used up in the specified sgl.
343354e4ee71SNavdeep Parhar  */
343454e4ee71SNavdeep Parhar static int
343554e4ee71SNavdeep Parhar free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
343654e4ee71SNavdeep Parhar {
3437733b9277SNavdeep Parhar 	struct tx_maps *txmaps;
343854e4ee71SNavdeep Parhar 	struct tx_map *txm;
343954e4ee71SNavdeep Parhar 
344054e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
344154e4ee71SNavdeep Parhar 
344254e4ee71SNavdeep Parhar 	if (sgl->nsegs == 0)
344354e4ee71SNavdeep Parhar 		return (0);	/* didn't use any map */
344454e4ee71SNavdeep Parhar 
3445733b9277SNavdeep Parhar 	txmaps = &txq->txmaps;
3446733b9277SNavdeep Parhar 
344754e4ee71SNavdeep Parhar 	/* 1 pkt uses exactly 1 map, back it out */
344854e4ee71SNavdeep Parhar 
3449733b9277SNavdeep Parhar 	txmaps->map_avail++;
3450733b9277SNavdeep Parhar 	if (txmaps->map_pidx > 0)
3451733b9277SNavdeep Parhar 		txmaps->map_pidx--;
345254e4ee71SNavdeep Parhar 	else
3453733b9277SNavdeep Parhar 		txmaps->map_pidx = txmaps->map_total - 1;
345454e4ee71SNavdeep Parhar 
3455733b9277SNavdeep Parhar 	txm = &txmaps->maps[txmaps->map_pidx];
3456f7dfe243SNavdeep Parhar 	bus_dmamap_unload(txq->tx_tag, txm->map);
345754e4ee71SNavdeep Parhar 	txm->m = NULL;
345854e4ee71SNavdeep Parhar 
345954e4ee71SNavdeep Parhar 	return (0);
346054e4ee71SNavdeep Parhar }
346154e4ee71SNavdeep Parhar 
346254e4ee71SNavdeep Parhar static int
346354e4ee71SNavdeep Parhar write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
346454e4ee71SNavdeep Parhar     struct sgl *sgl)
346554e4ee71SNavdeep Parhar {
346654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
346754e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkt_wr *wr;
346854e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
346954e4ee71SNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
347054e4ee71SNavdeep Parhar 	uint64_t ctrl1;
3471ecb79ca4SNavdeep Parhar 	int nflits, ndesc, pktlen;
347254e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
347354e4ee71SNavdeep Parhar 	caddr_t dst;
347454e4ee71SNavdeep Parhar 
347554e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
347654e4ee71SNavdeep Parhar 
3477ecb79ca4SNavdeep Parhar 	pktlen = m->m_pkthdr.len;
3478ecb79ca4SNavdeep Parhar 
347954e4ee71SNavdeep Parhar 	/*
348054e4ee71SNavdeep Parhar 	 * Do we have enough flits to send this frame out?
348154e4ee71SNavdeep Parhar 	 */
348254e4ee71SNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
348354e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
348454e4ee71SNavdeep Parhar 		nflits = TXPKT_LSO_WR_HDR;
34852a5f6b0eSNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
348654e4ee71SNavdeep Parhar 	} else
348754e4ee71SNavdeep Parhar 		nflits = TXPKT_WR_HDR;
348854e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0)
348954e4ee71SNavdeep Parhar 		nflits += sgl->nflits;
349054e4ee71SNavdeep Parhar 	else {
3491ecb79ca4SNavdeep Parhar 		nflits += howmany(pktlen, 8);
3492ecb79ca4SNavdeep Parhar 		ctrl += pktlen;
349354e4ee71SNavdeep Parhar 	}
349454e4ee71SNavdeep Parhar 	ndesc = howmany(nflits, 8);
349554e4ee71SNavdeep Parhar 	if (ndesc > eq->avail)
349654e4ee71SNavdeep Parhar 		return (ENOMEM);
349754e4ee71SNavdeep Parhar 
349854e4ee71SNavdeep Parhar 	/* Firmware work request header */
349954e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
350054e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
3501733b9277SNavdeep Parhar 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
350254e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
3503733b9277SNavdeep Parhar 	if (eq->avail == ndesc) {
3504733b9277SNavdeep Parhar 		if (!(eq->flags & EQ_CRFLUSHED)) {
350554e4ee71SNavdeep Parhar 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
35066b49a4ecSNavdeep Parhar 			eq->flags |= EQ_CRFLUSHED;
35076b49a4ecSNavdeep Parhar 		}
3508733b9277SNavdeep Parhar 		eq->flags |= EQ_STALLED;
3509733b9277SNavdeep Parhar 	}
35106b49a4ecSNavdeep Parhar 
351154e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
351254e4ee71SNavdeep Parhar 	wr->r3 = 0;
351354e4ee71SNavdeep Parhar 
351454e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
35152a5f6b0eSNavdeep Parhar 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
351654e4ee71SNavdeep Parhar 		struct ether_header *eh;
3517a1ea9a82SNavdeep Parhar 		void *l3hdr;
3518a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
351954e4ee71SNavdeep Parhar 		struct tcphdr *tcp;
3520a1ea9a82SNavdeep Parhar #endif
3521a1ea9a82SNavdeep Parhar 		uint16_t eh_type;
352254e4ee71SNavdeep Parhar 
352354e4ee71SNavdeep Parhar 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
352454e4ee71SNavdeep Parhar 		    F_LSO_LAST_SLICE;
352554e4ee71SNavdeep Parhar 
352654e4ee71SNavdeep Parhar 		eh = mtod(m, struct ether_header *);
3527a1ea9a82SNavdeep Parhar 		eh_type = ntohs(eh->ether_type);
3528a1ea9a82SNavdeep Parhar 		if (eh_type == ETHERTYPE_VLAN) {
3529a1ea9a82SNavdeep Parhar 			struct ether_vlan_header *evh = (void *)eh;
3530a1ea9a82SNavdeep Parhar 
353154e4ee71SNavdeep Parhar 			ctrl |= V_LSO_ETHHDR_LEN(1);
3532a1ea9a82SNavdeep Parhar 			l3hdr = evh + 1;
3533a1ea9a82SNavdeep Parhar 			eh_type = ntohs(evh->evl_proto);
353454e4ee71SNavdeep Parhar 		} else
3535a1ea9a82SNavdeep Parhar 			l3hdr = eh + 1;
3536a1ea9a82SNavdeep Parhar 
3537a1ea9a82SNavdeep Parhar 		switch (eh_type) {
3538a1ea9a82SNavdeep Parhar #ifdef INET6
3539a1ea9a82SNavdeep Parhar 		case ETHERTYPE_IPV6:
3540a1ea9a82SNavdeep Parhar 		{
3541a1ea9a82SNavdeep Parhar 			struct ip6_hdr *ip6 = l3hdr;
3542a1ea9a82SNavdeep Parhar 
3543a1ea9a82SNavdeep Parhar 			/*
3544a1ea9a82SNavdeep Parhar 			 * XXX-BZ For now we do not pretend to support
3545a1ea9a82SNavdeep Parhar 			 * IPv6 extension headers.
3546a1ea9a82SNavdeep Parhar 			 */
3547a1ea9a82SNavdeep Parhar 			KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO "
3548a1ea9a82SNavdeep Parhar 			    "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt));
3549a1ea9a82SNavdeep Parhar 			tcp = (struct tcphdr *)(ip6 + 1);
3550a1ea9a82SNavdeep Parhar 			ctrl |= F_LSO_IPV6;
3551a1ea9a82SNavdeep Parhar 			ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) |
3552a1ea9a82SNavdeep Parhar 			    V_LSO_TCPHDR_LEN(tcp->th_off);
3553a1ea9a82SNavdeep Parhar 			break;
3554a1ea9a82SNavdeep Parhar 		}
3555a1ea9a82SNavdeep Parhar #endif
3556a1ea9a82SNavdeep Parhar #ifdef INET
3557a1ea9a82SNavdeep Parhar 		case ETHERTYPE_IP:
3558a1ea9a82SNavdeep Parhar 		{
3559a1ea9a82SNavdeep Parhar 			struct ip *ip = l3hdr;
356054e4ee71SNavdeep Parhar 
356154e4ee71SNavdeep Parhar 			tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
356254e4ee71SNavdeep Parhar 			ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
356354e4ee71SNavdeep Parhar 			    V_LSO_TCPHDR_LEN(tcp->th_off);
3564a1ea9a82SNavdeep Parhar 			break;
3565a1ea9a82SNavdeep Parhar 		}
3566a1ea9a82SNavdeep Parhar #endif
3567a1ea9a82SNavdeep Parhar 		default:
3568a1ea9a82SNavdeep Parhar 			panic("%s: CSUM_TSO but no supported IP version "
3569a1ea9a82SNavdeep Parhar 			    "(0x%04x)", __func__, eh_type);
3570a1ea9a82SNavdeep Parhar 		}
357154e4ee71SNavdeep Parhar 
357254e4ee71SNavdeep Parhar 		lso->lso_ctrl = htobe32(ctrl);
357354e4ee71SNavdeep Parhar 		lso->ipid_ofst = htobe16(0);
357454e4ee71SNavdeep Parhar 		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
357554e4ee71SNavdeep Parhar 		lso->seqno_offset = htobe32(0);
3576ecb79ca4SNavdeep Parhar 		lso->len = htobe32(pktlen);
357754e4ee71SNavdeep Parhar 
357854e4ee71SNavdeep Parhar 		cpl = (void *)(lso + 1);
357954e4ee71SNavdeep Parhar 
358054e4ee71SNavdeep Parhar 		txq->tso_wrs++;
358154e4ee71SNavdeep Parhar 	} else
358254e4ee71SNavdeep Parhar 		cpl = (void *)(wr + 1);
358354e4ee71SNavdeep Parhar 
358454e4ee71SNavdeep Parhar 	/* Checksum offload */
358554e4ee71SNavdeep Parhar 	ctrl1 = 0;
3586b8531380SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
358754e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
35889600bf00SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
3589b8531380SNavdeep Parhar 	    CSUM_TCP_IPV6 | CSUM_TSO)))
359054e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
35919600bf00SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
3592b8531380SNavdeep Parhar 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
359354e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
359454e4ee71SNavdeep Parhar 
359554e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
359654e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
359754e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
359854e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
359954e4ee71SNavdeep Parhar 	}
360054e4ee71SNavdeep Parhar 
360154e4ee71SNavdeep Parhar 	/* CPL header */
360254e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
360354e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
360454e4ee71SNavdeep Parhar 	cpl->pack = 0;
3605ecb79ca4SNavdeep Parhar 	cpl->len = htobe16(pktlen);
360654e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
360754e4ee71SNavdeep Parhar 
360854e4ee71SNavdeep Parhar 	/* Software descriptor */
3609f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
361054e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
361154e4ee71SNavdeep Parhar 
361254e4ee71SNavdeep Parhar 	eq->pending += ndesc;
361354e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
361454e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
361554e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
361654e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
361754e4ee71SNavdeep Parhar 
361854e4ee71SNavdeep Parhar 	/* SGL */
361954e4ee71SNavdeep Parhar 	dst = (void *)(cpl + 1);
362054e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0) {
3621f7dfe243SNavdeep Parhar 		txsd->credits = 1;
362254e4ee71SNavdeep Parhar 		txq->sgl_wrs++;
362354e4ee71SNavdeep Parhar 		write_sgl_to_txd(eq, sgl, &dst);
362454e4ee71SNavdeep Parhar 	} else {
3625f7dfe243SNavdeep Parhar 		txsd->credits = 0;
362654e4ee71SNavdeep Parhar 		txq->imm_wrs++;
362754e4ee71SNavdeep Parhar 		for (; m; m = m->m_next) {
362854e4ee71SNavdeep Parhar 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
3629ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
3630ecb79ca4SNavdeep Parhar 			pktlen -= m->m_len;
3631ecb79ca4SNavdeep Parhar #endif
363254e4ee71SNavdeep Parhar 		}
3633ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
3634ecb79ca4SNavdeep Parhar 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
3635ecb79ca4SNavdeep Parhar #endif
3636ecb79ca4SNavdeep Parhar 
363754e4ee71SNavdeep Parhar 	}
363854e4ee71SNavdeep Parhar 
363954e4ee71SNavdeep Parhar 	txq->txpkt_wrs++;
364054e4ee71SNavdeep Parhar 	return (0);
364154e4ee71SNavdeep Parhar }
364254e4ee71SNavdeep Parhar 
364354e4ee71SNavdeep Parhar /*
364454e4ee71SNavdeep Parhar  * Returns 0 to indicate that m has been accepted into a coalesced tx work
364554e4ee71SNavdeep Parhar  * request.  It has either been folded into txpkts or txpkts was flushed and m
364654e4ee71SNavdeep Parhar  * has started a new coalesced work request (as the first frame in a fresh
364754e4ee71SNavdeep Parhar  * txpkts).
364854e4ee71SNavdeep Parhar  *
364954e4ee71SNavdeep Parhar  * Returns non-zero to indicate a failure - caller is responsible for
365054e4ee71SNavdeep Parhar  * transmitting m, if there was anything in txpkts it has been flushed.
365154e4ee71SNavdeep Parhar  */
365254e4ee71SNavdeep Parhar static int
365354e4ee71SNavdeep Parhar add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
365454e4ee71SNavdeep Parhar     struct mbuf *m, struct sgl *sgl)
365554e4ee71SNavdeep Parhar {
365654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
365754e4ee71SNavdeep Parhar 	int can_coalesce;
365854e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
365954e4ee71SNavdeep Parhar 	int flits;
366054e4ee71SNavdeep Parhar 
366154e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
366254e4ee71SNavdeep Parhar 
3663733b9277SNavdeep Parhar 	KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__));
3664733b9277SNavdeep Parhar 
366554e4ee71SNavdeep Parhar 	if (txpkts->npkt > 0) {
366654e4ee71SNavdeep Parhar 		flits = TXPKTS_PKT_HDR + sgl->nflits;
366754e4ee71SNavdeep Parhar 		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
366854e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= TX_WR_FLITS &&
366954e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= eq->avail * 8 &&
367054e4ee71SNavdeep Parhar 		    txpkts->plen + m->m_pkthdr.len < 65536;
367154e4ee71SNavdeep Parhar 
367254e4ee71SNavdeep Parhar 		if (can_coalesce) {
367354e4ee71SNavdeep Parhar 			txpkts->npkt++;
367454e4ee71SNavdeep Parhar 			txpkts->nflits += flits;
367554e4ee71SNavdeep Parhar 			txpkts->plen += m->m_pkthdr.len;
367654e4ee71SNavdeep Parhar 
3677f7dfe243SNavdeep Parhar 			txsd = &txq->sdesc[eq->pidx];
3678f7dfe243SNavdeep Parhar 			txsd->credits++;
367954e4ee71SNavdeep Parhar 
368054e4ee71SNavdeep Parhar 			return (0);
368154e4ee71SNavdeep Parhar 		}
368254e4ee71SNavdeep Parhar 
368354e4ee71SNavdeep Parhar 		/*
368454e4ee71SNavdeep Parhar 		 * Couldn't coalesce m into txpkts.  The first order of business
368554e4ee71SNavdeep Parhar 		 * is to send txpkts on its way.  Then we'll revisit m.
368654e4ee71SNavdeep Parhar 		 */
368754e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, txpkts);
368854e4ee71SNavdeep Parhar 	}
368954e4ee71SNavdeep Parhar 
369054e4ee71SNavdeep Parhar 	/*
369154e4ee71SNavdeep Parhar 	 * Check if we can start a new coalesced tx work request with m as
369254e4ee71SNavdeep Parhar 	 * the first packet in it.
369354e4ee71SNavdeep Parhar 	 */
369454e4ee71SNavdeep Parhar 
369554e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
369654e4ee71SNavdeep Parhar 
369754e4ee71SNavdeep Parhar 	flits = TXPKTS_WR_HDR + sgl->nflits;
369854e4ee71SNavdeep Parhar 	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
369954e4ee71SNavdeep Parhar 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
370054e4ee71SNavdeep Parhar 
370154e4ee71SNavdeep Parhar 	if (can_coalesce == 0)
370254e4ee71SNavdeep Parhar 		return (EINVAL);
370354e4ee71SNavdeep Parhar 
370454e4ee71SNavdeep Parhar 	/*
370554e4ee71SNavdeep Parhar 	 * Start a fresh coalesced tx WR with m as the first frame in it.
370654e4ee71SNavdeep Parhar 	 */
370754e4ee71SNavdeep Parhar 	txpkts->npkt = 1;
370854e4ee71SNavdeep Parhar 	txpkts->nflits = flits;
370954e4ee71SNavdeep Parhar 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
371054e4ee71SNavdeep Parhar 	txpkts->plen = m->m_pkthdr.len;
371154e4ee71SNavdeep Parhar 
3712f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
3713f7dfe243SNavdeep Parhar 	txsd->credits = 1;
371454e4ee71SNavdeep Parhar 
371554e4ee71SNavdeep Parhar 	return (0);
371654e4ee71SNavdeep Parhar }
371754e4ee71SNavdeep Parhar 
371854e4ee71SNavdeep Parhar /*
371954e4ee71SNavdeep Parhar  * Note that write_txpkts_wr can never run out of hardware descriptors (but
372054e4ee71SNavdeep Parhar  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
372154e4ee71SNavdeep Parhar  * coalescing only if sufficient hardware descriptors are available.
372254e4ee71SNavdeep Parhar  */
372354e4ee71SNavdeep Parhar static void
372454e4ee71SNavdeep Parhar write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
372554e4ee71SNavdeep Parhar {
372654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
372754e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkts_wr *wr;
372854e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
372954e4ee71SNavdeep Parhar 	uint32_t ctrl;
373054e4ee71SNavdeep Parhar 	int ndesc;
373154e4ee71SNavdeep Parhar 
373254e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
373354e4ee71SNavdeep Parhar 
373454e4ee71SNavdeep Parhar 	ndesc = howmany(txpkts->nflits, 8);
373554e4ee71SNavdeep Parhar 
373654e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
3737733b9277SNavdeep Parhar 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
373854e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
3739733b9277SNavdeep Parhar 	if (eq->avail == ndesc) {
3740733b9277SNavdeep Parhar 		if (!(eq->flags & EQ_CRFLUSHED)) {
374154e4ee71SNavdeep Parhar 			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
37426b49a4ecSNavdeep Parhar 			eq->flags |= EQ_CRFLUSHED;
37436b49a4ecSNavdeep Parhar 		}
3744733b9277SNavdeep Parhar 		eq->flags |= EQ_STALLED;
3745733b9277SNavdeep Parhar 	}
374654e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
374754e4ee71SNavdeep Parhar 	wr->plen = htobe16(txpkts->plen);
374854e4ee71SNavdeep Parhar 	wr->npkt = txpkts->npkt;
3749b400f1eaSNavdeep Parhar 	wr->r3 = wr->type = 0;
375054e4ee71SNavdeep Parhar 
375154e4ee71SNavdeep Parhar 	/* Everything else already written */
375254e4ee71SNavdeep Parhar 
3753f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
375454e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
375554e4ee71SNavdeep Parhar 
37566b49a4ecSNavdeep Parhar 	KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
375754e4ee71SNavdeep Parhar 
375854e4ee71SNavdeep Parhar 	eq->pending += ndesc;
375954e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
376054e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
376154e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
376254e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
376354e4ee71SNavdeep Parhar 
376454e4ee71SNavdeep Parhar 	txq->txpkts_pkts += txpkts->npkt;
376554e4ee71SNavdeep Parhar 	txq->txpkts_wrs++;
376654e4ee71SNavdeep Parhar 	txpkts->npkt = 0;	/* emptied */
376754e4ee71SNavdeep Parhar }
376854e4ee71SNavdeep Parhar 
376954e4ee71SNavdeep Parhar static inline void
377054e4ee71SNavdeep Parhar write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
377154e4ee71SNavdeep Parhar     struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
377254e4ee71SNavdeep Parhar {
377354e4ee71SNavdeep Parhar 	struct ulp_txpkt *ulpmc;
377454e4ee71SNavdeep Parhar 	struct ulptx_idata *ulpsc;
377554e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
377654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
377754e4ee71SNavdeep Parhar 	uintptr_t flitp, start, end;
377854e4ee71SNavdeep Parhar 	uint64_t ctrl;
377954e4ee71SNavdeep Parhar 	caddr_t dst;
378054e4ee71SNavdeep Parhar 
378154e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
378254e4ee71SNavdeep Parhar 
378354e4ee71SNavdeep Parhar 	start = (uintptr_t)eq->desc;
378454e4ee71SNavdeep Parhar 	end = (uintptr_t)eq->spg;
378554e4ee71SNavdeep Parhar 
378654e4ee71SNavdeep Parhar 	/* Checksum offload */
378754e4ee71SNavdeep Parhar 	ctrl = 0;
3788b8531380SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
378954e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_IPCSUM_DIS;
3790b8531380SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
3791b8531380SNavdeep Parhar 	    CSUM_TCP_IPV6 | CSUM_TSO)))
379254e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_L4CSUM_DIS;
3793b8531380SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
3794b8531380SNavdeep Parhar 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
379554e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
379654e4ee71SNavdeep Parhar 
379754e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
379854e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
379954e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
380054e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
380154e4ee71SNavdeep Parhar 	}
380254e4ee71SNavdeep Parhar 
380354e4ee71SNavdeep Parhar 	/*
380454e4ee71SNavdeep Parhar 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
380554e4ee71SNavdeep Parhar 	 * is required by the firmware/hardware).  It follows that flitp cannot
380654e4ee71SNavdeep Parhar 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
380754e4ee71SNavdeep Parhar 	 * bytes each), and that it can not wrap around in the middle of the
380854e4ee71SNavdeep Parhar 	 * cpl_tx_pkt_core either.
380954e4ee71SNavdeep Parhar 	 */
381054e4ee71SNavdeep Parhar 	flitp = (uintptr_t)txpkts->flitp;
381154e4ee71SNavdeep Parhar 	KASSERT((flitp & 0xf) == 0,
381254e4ee71SNavdeep Parhar 	    ("%s: last SGL did not end at 16 byte boundary: %p",
381354e4ee71SNavdeep Parhar 	    __func__, txpkts->flitp));
381454e4ee71SNavdeep Parhar 
381554e4ee71SNavdeep Parhar 	/* ULP master command */
381654e4ee71SNavdeep Parhar 	ulpmc = (void *)flitp;
3817aa2457e1SNavdeep Parhar 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
3818aa2457e1SNavdeep Parhar 	    V_ULP_TXPKT_FID(eq->iqid));
381954e4ee71SNavdeep Parhar 	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
382054e4ee71SNavdeep Parhar 	    sizeof(*cpl) + 8 * sgl->nflits, 16));
382154e4ee71SNavdeep Parhar 
382254e4ee71SNavdeep Parhar 	/* ULP subcommand */
382354e4ee71SNavdeep Parhar 	ulpsc = (void *)(ulpmc + 1);
382454e4ee71SNavdeep Parhar 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
382554e4ee71SNavdeep Parhar 	    F_ULP_TX_SC_MORE);
382654e4ee71SNavdeep Parhar 	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
382754e4ee71SNavdeep Parhar 
382854e4ee71SNavdeep Parhar 	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
382954e4ee71SNavdeep Parhar 	if (flitp == end)
383054e4ee71SNavdeep Parhar 		flitp = start;
383154e4ee71SNavdeep Parhar 
383254e4ee71SNavdeep Parhar 	/* CPL_TX_PKT */
383354e4ee71SNavdeep Parhar 	cpl = (void *)flitp;
383454e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
383554e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
383654e4ee71SNavdeep Parhar 	cpl->pack = 0;
383754e4ee71SNavdeep Parhar 	cpl->len = htobe16(m->m_pkthdr.len);
383854e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl);
383954e4ee71SNavdeep Parhar 
384054e4ee71SNavdeep Parhar 	flitp += sizeof(*cpl);
384154e4ee71SNavdeep Parhar 	if (flitp == end)
384254e4ee71SNavdeep Parhar 		flitp = start;
384354e4ee71SNavdeep Parhar 
384454e4ee71SNavdeep Parhar 	/* SGL for this frame */
384554e4ee71SNavdeep Parhar 	dst = (caddr_t)flitp;
384654e4ee71SNavdeep Parhar 	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
384754e4ee71SNavdeep Parhar 	txpkts->flitp = (void *)dst;
384854e4ee71SNavdeep Parhar 
384954e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)dst & 0xf) == 0,
385054e4ee71SNavdeep Parhar 	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
385154e4ee71SNavdeep Parhar }
385254e4ee71SNavdeep Parhar 
385354e4ee71SNavdeep Parhar /*
385454e4ee71SNavdeep Parhar  * If the SGL ends on an address that is not 16 byte aligned, this function will
385554e4ee71SNavdeep Parhar  * add a 0 filled flit at the end.  It returns 1 in that case.
385654e4ee71SNavdeep Parhar  */
385754e4ee71SNavdeep Parhar static int
385854e4ee71SNavdeep Parhar write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
385954e4ee71SNavdeep Parhar {
386054e4ee71SNavdeep Parhar 	__be64 *flitp, *end;
386154e4ee71SNavdeep Parhar 	struct ulptx_sgl *usgl;
386254e4ee71SNavdeep Parhar 	bus_dma_segment_t *seg;
386354e4ee71SNavdeep Parhar 	int i, padded;
386454e4ee71SNavdeep Parhar 
386554e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
386654e4ee71SNavdeep Parhar 	    ("%s: bad SGL - nsegs=%d, nflits=%d",
386754e4ee71SNavdeep Parhar 	    __func__, sgl->nsegs, sgl->nflits));
386854e4ee71SNavdeep Parhar 
386954e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
387054e4ee71SNavdeep Parhar 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
387154e4ee71SNavdeep Parhar 
387254e4ee71SNavdeep Parhar 	flitp = (__be64 *)(*to);
387354e4ee71SNavdeep Parhar 	end = flitp + sgl->nflits;
387454e4ee71SNavdeep Parhar 	seg = &sgl->seg[0];
387554e4ee71SNavdeep Parhar 	usgl = (void *)flitp;
387654e4ee71SNavdeep Parhar 
387754e4ee71SNavdeep Parhar 	/*
387854e4ee71SNavdeep Parhar 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
387954e4ee71SNavdeep Parhar 	 * ring, so we're at least 16 bytes away from the status page.  There is
388054e4ee71SNavdeep Parhar 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
388154e4ee71SNavdeep Parhar 	 */
388254e4ee71SNavdeep Parhar 
388354e4ee71SNavdeep Parhar 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
388454e4ee71SNavdeep Parhar 	    V_ULPTX_NSGE(sgl->nsegs));
388554e4ee71SNavdeep Parhar 	usgl->len0 = htobe32(seg->ds_len);
388654e4ee71SNavdeep Parhar 	usgl->addr0 = htobe64(seg->ds_addr);
388754e4ee71SNavdeep Parhar 	seg++;
388854e4ee71SNavdeep Parhar 
388954e4ee71SNavdeep Parhar 	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
389054e4ee71SNavdeep Parhar 
389154e4ee71SNavdeep Parhar 		/* Won't wrap around at all */
389254e4ee71SNavdeep Parhar 
389354e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
389454e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
389554e4ee71SNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
389654e4ee71SNavdeep Parhar 		}
389754e4ee71SNavdeep Parhar 		if (i & 1)
389854e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
389954e4ee71SNavdeep Parhar 	} else {
390054e4ee71SNavdeep Parhar 
390154e4ee71SNavdeep Parhar 		/* Will wrap somewhere in the rest of the SGL */
390254e4ee71SNavdeep Parhar 
390354e4ee71SNavdeep Parhar 		/* 2 flits already written, write the rest flit by flit */
390454e4ee71SNavdeep Parhar 		flitp = (void *)(usgl + 1);
390554e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nflits - 2; i++) {
390654e4ee71SNavdeep Parhar 			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
390754e4ee71SNavdeep Parhar 				flitp = (void *)eq->desc;
390854e4ee71SNavdeep Parhar 			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
390954e4ee71SNavdeep Parhar 		}
391054e4ee71SNavdeep Parhar 		end = flitp;
391154e4ee71SNavdeep Parhar 	}
391254e4ee71SNavdeep Parhar 
391354e4ee71SNavdeep Parhar 	if ((uintptr_t)end & 0xf) {
391454e4ee71SNavdeep Parhar 		*(uint64_t *)end = 0;
391554e4ee71SNavdeep Parhar 		end++;
391654e4ee71SNavdeep Parhar 		padded = 1;
391754e4ee71SNavdeep Parhar 	} else
391854e4ee71SNavdeep Parhar 		padded = 0;
391954e4ee71SNavdeep Parhar 
392054e4ee71SNavdeep Parhar 	if ((uintptr_t)end == (uintptr_t)eq->spg)
392154e4ee71SNavdeep Parhar 		*to = (void *)eq->desc;
392254e4ee71SNavdeep Parhar 	else
392354e4ee71SNavdeep Parhar 		*to = (void *)end;
392454e4ee71SNavdeep Parhar 
392554e4ee71SNavdeep Parhar 	return (padded);
392654e4ee71SNavdeep Parhar }
392754e4ee71SNavdeep Parhar 
392854e4ee71SNavdeep Parhar static inline void
392954e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
393054e4ee71SNavdeep Parhar {
393109fe6320SNavdeep Parhar 	if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) {
393254e4ee71SNavdeep Parhar 		bcopy(from, *to, len);
393354e4ee71SNavdeep Parhar 		(*to) += len;
393454e4ee71SNavdeep Parhar 	} else {
393554e4ee71SNavdeep Parhar 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
393654e4ee71SNavdeep Parhar 
393754e4ee71SNavdeep Parhar 		bcopy(from, *to, portion);
393854e4ee71SNavdeep Parhar 		from += portion;
393954e4ee71SNavdeep Parhar 		portion = len - portion;	/* remaining */
394054e4ee71SNavdeep Parhar 		bcopy(from, (void *)eq->desc, portion);
394154e4ee71SNavdeep Parhar 		(*to) = (caddr_t)eq->desc + portion;
394254e4ee71SNavdeep Parhar 	}
394354e4ee71SNavdeep Parhar }
394454e4ee71SNavdeep Parhar 
394554e4ee71SNavdeep Parhar static inline void
3946f7dfe243SNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq)
394754e4ee71SNavdeep Parhar {
3948d14b0ac1SNavdeep Parhar 	u_int db, pending;
3949d14b0ac1SNavdeep Parhar 
3950d14b0ac1SNavdeep Parhar 	db = eq->doorbells;
3951d14b0ac1SNavdeep Parhar 	pending = eq->pending;
3952d14b0ac1SNavdeep Parhar 	if (pending > 1)
395377ad3c41SNavdeep Parhar 		clrbit(&db, DOORBELL_WCWR);
395454e4ee71SNavdeep Parhar 	eq->pending = 0;
3955d14b0ac1SNavdeep Parhar 	wmb();
3956d14b0ac1SNavdeep Parhar 
3957d14b0ac1SNavdeep Parhar 	switch (ffs(db) - 1) {
3958d14b0ac1SNavdeep Parhar 	case DOORBELL_UDB:
3959d14b0ac1SNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending));
3960d14b0ac1SNavdeep Parhar 		return;
3961d14b0ac1SNavdeep Parhar 
396277ad3c41SNavdeep Parhar 	case DOORBELL_WCWR: {
3963d14b0ac1SNavdeep Parhar 		volatile uint64_t *dst, *src;
3964d14b0ac1SNavdeep Parhar 		int i;
3965d14b0ac1SNavdeep Parhar 
3966d14b0ac1SNavdeep Parhar 		/*
3967d14b0ac1SNavdeep Parhar 		 * Queues whose 128B doorbell segment fits in the page do not
3968d14b0ac1SNavdeep Parhar 		 * use relative qid (udb_qid is always 0).  Only queues with
396977ad3c41SNavdeep Parhar 		 * doorbell segments can do WCWR.
3970d14b0ac1SNavdeep Parhar 		 */
3971d14b0ac1SNavdeep Parhar 		KASSERT(eq->udb_qid == 0 && pending == 1,
3972d14b0ac1SNavdeep Parhar 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
3973d14b0ac1SNavdeep Parhar 		    __func__, eq->doorbells, pending, eq->pidx, eq));
3974d14b0ac1SNavdeep Parhar 
3975d14b0ac1SNavdeep Parhar 		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
3976d14b0ac1SNavdeep Parhar 		    UDBS_DB_OFFSET);
3977d14b0ac1SNavdeep Parhar 		i = eq->pidx ? eq->pidx - 1 : eq->cap - 1;
3978d14b0ac1SNavdeep Parhar 		src = (void *)&eq->desc[i];
3979d14b0ac1SNavdeep Parhar 		while (src != (void *)&eq->desc[i + 1])
3980d14b0ac1SNavdeep Parhar 			*dst++ = *src++;
3981d14b0ac1SNavdeep Parhar 		wmb();
3982d14b0ac1SNavdeep Parhar 		return;
3983d14b0ac1SNavdeep Parhar 	}
3984d14b0ac1SNavdeep Parhar 
3985d14b0ac1SNavdeep Parhar 	case DOORBELL_UDBWC:
3986d14b0ac1SNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending));
3987d14b0ac1SNavdeep Parhar 		wmb();
3988d14b0ac1SNavdeep Parhar 		return;
3989d14b0ac1SNavdeep Parhar 
3990d14b0ac1SNavdeep Parhar 	case DOORBELL_KDB:
3991d14b0ac1SNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
3992d14b0ac1SNavdeep Parhar 		    V_QID(eq->cntxt_id) | V_PIDX(pending));
3993d14b0ac1SNavdeep Parhar 		return;
3994d14b0ac1SNavdeep Parhar 	}
399554e4ee71SNavdeep Parhar }
399654e4ee71SNavdeep Parhar 
3997e874ff7aSNavdeep Parhar static inline int
3998e874ff7aSNavdeep Parhar reclaimable(struct sge_eq *eq)
399954e4ee71SNavdeep Parhar {
4000e874ff7aSNavdeep Parhar 	unsigned int cidx;
400154e4ee71SNavdeep Parhar 
400254e4ee71SNavdeep Parhar 	cidx = eq->spg->cidx;	/* stable snapshot */
4003733b9277SNavdeep Parhar 	cidx = be16toh(cidx);
400454e4ee71SNavdeep Parhar 
400554e4ee71SNavdeep Parhar 	if (cidx >= eq->cidx)
4006e874ff7aSNavdeep Parhar 		return (cidx - eq->cidx);
400754e4ee71SNavdeep Parhar 	else
4008e874ff7aSNavdeep Parhar 		return (cidx + eq->cap - eq->cidx);
4009e874ff7aSNavdeep Parhar }
401054e4ee71SNavdeep Parhar 
4011e874ff7aSNavdeep Parhar /*
4012e874ff7aSNavdeep Parhar  * There are "can_reclaim" tx descriptors ready to be reclaimed.  Reclaim as
4013e874ff7aSNavdeep Parhar  * many as possible but stop when there are around "n" mbufs to free.
4014e874ff7aSNavdeep Parhar  *
4015e874ff7aSNavdeep Parhar  * The actual number reclaimed is provided as the return value.
4016e874ff7aSNavdeep Parhar  */
4017e874ff7aSNavdeep Parhar static int
4018f7dfe243SNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
4019e874ff7aSNavdeep Parhar {
4020e874ff7aSNavdeep Parhar 	struct tx_sdesc *txsd;
4021733b9277SNavdeep Parhar 	struct tx_maps *txmaps;
4022e874ff7aSNavdeep Parhar 	struct tx_map *txm;
4023e874ff7aSNavdeep Parhar 	unsigned int reclaimed, maps;
4024f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
402554e4ee71SNavdeep Parhar 
4026733b9277SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
4027e874ff7aSNavdeep Parhar 
4028e874ff7aSNavdeep Parhar 	if (can_reclaim == 0)
4029e874ff7aSNavdeep Parhar 		can_reclaim = reclaimable(eq);
403054e4ee71SNavdeep Parhar 
403154e4ee71SNavdeep Parhar 	maps = reclaimed = 0;
4032e874ff7aSNavdeep Parhar 	while (can_reclaim && maps < n) {
403354e4ee71SNavdeep Parhar 		int ndesc;
403454e4ee71SNavdeep Parhar 
4035f7dfe243SNavdeep Parhar 		txsd = &txq->sdesc[eq->cidx];
403654e4ee71SNavdeep Parhar 		ndesc = txsd->desc_used;
403754e4ee71SNavdeep Parhar 
403854e4ee71SNavdeep Parhar 		/* Firmware doesn't return "partial" credits. */
403954e4ee71SNavdeep Parhar 		KASSERT(can_reclaim >= ndesc,
404054e4ee71SNavdeep Parhar 		    ("%s: unexpected number of credits: %d, %d",
404154e4ee71SNavdeep Parhar 		    __func__, can_reclaim, ndesc));
404254e4ee71SNavdeep Parhar 
4043f7dfe243SNavdeep Parhar 		maps += txsd->credits;
4044e874ff7aSNavdeep Parhar 
404554e4ee71SNavdeep Parhar 		reclaimed += ndesc;
404654e4ee71SNavdeep Parhar 		can_reclaim -= ndesc;
404754e4ee71SNavdeep Parhar 
4048e874ff7aSNavdeep Parhar 		eq->cidx += ndesc;
4049e874ff7aSNavdeep Parhar 		if (__predict_false(eq->cidx >= eq->cap))
4050e874ff7aSNavdeep Parhar 			eq->cidx -= eq->cap;
4051e874ff7aSNavdeep Parhar 	}
4052e874ff7aSNavdeep Parhar 
4053733b9277SNavdeep Parhar 	txmaps = &txq->txmaps;
4054733b9277SNavdeep Parhar 	txm = &txmaps->maps[txmaps->map_cidx];
4055e874ff7aSNavdeep Parhar 	if (maps)
4056e874ff7aSNavdeep Parhar 		prefetch(txm->m);
405754e4ee71SNavdeep Parhar 
405854e4ee71SNavdeep Parhar 	eq->avail += reclaimed;
405954e4ee71SNavdeep Parhar 	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
406054e4ee71SNavdeep Parhar 	    ("%s: too many descriptors available", __func__));
406154e4ee71SNavdeep Parhar 
4062733b9277SNavdeep Parhar 	txmaps->map_avail += maps;
4063733b9277SNavdeep Parhar 	KASSERT(txmaps->map_avail <= txmaps->map_total,
406454e4ee71SNavdeep Parhar 	    ("%s: too many maps available", __func__));
406554e4ee71SNavdeep Parhar 
406654e4ee71SNavdeep Parhar 	while (maps--) {
4067e874ff7aSNavdeep Parhar 		struct tx_map *next;
4068e874ff7aSNavdeep Parhar 
4069e874ff7aSNavdeep Parhar 		next = txm + 1;
4070733b9277SNavdeep Parhar 		if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total))
4071733b9277SNavdeep Parhar 			next = txmaps->maps;
4072e874ff7aSNavdeep Parhar 		prefetch(next->m);
407354e4ee71SNavdeep Parhar 
4074f7dfe243SNavdeep Parhar 		bus_dmamap_unload(txq->tx_tag, txm->map);
407554e4ee71SNavdeep Parhar 		m_freem(txm->m);
407654e4ee71SNavdeep Parhar 		txm->m = NULL;
407754e4ee71SNavdeep Parhar 
4078e874ff7aSNavdeep Parhar 		txm = next;
4079733b9277SNavdeep Parhar 		if (__predict_false(++txmaps->map_cidx == txmaps->map_total))
4080733b9277SNavdeep Parhar 			txmaps->map_cidx = 0;
408154e4ee71SNavdeep Parhar 	}
408254e4ee71SNavdeep Parhar 
408354e4ee71SNavdeep Parhar 	return (reclaimed);
408454e4ee71SNavdeep Parhar }
408554e4ee71SNavdeep Parhar 
408654e4ee71SNavdeep Parhar static void
408754e4ee71SNavdeep Parhar write_eqflush_wr(struct sge_eq *eq)
408854e4ee71SNavdeep Parhar {
408954e4ee71SNavdeep Parhar 	struct fw_eq_flush_wr *wr;
409054e4ee71SNavdeep Parhar 
409154e4ee71SNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
409254e4ee71SNavdeep Parhar 	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
4093733b9277SNavdeep Parhar 	KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__));
409454e4ee71SNavdeep Parhar 
409554e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
409654e4ee71SNavdeep Parhar 	bzero(wr, sizeof(*wr));
409754e4ee71SNavdeep Parhar 	wr->opcode = FW_EQ_FLUSH_WR;
409854e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
409954e4ee71SNavdeep Parhar 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
410054e4ee71SNavdeep Parhar 
4101733b9277SNavdeep Parhar 	eq->flags |= (EQ_CRFLUSHED | EQ_STALLED);
410254e4ee71SNavdeep Parhar 	eq->pending++;
410354e4ee71SNavdeep Parhar 	eq->avail--;
410454e4ee71SNavdeep Parhar 	if (++eq->pidx == eq->cap)
410554e4ee71SNavdeep Parhar 		eq->pidx = 0;
410654e4ee71SNavdeep Parhar }
410754e4ee71SNavdeep Parhar 
410854e4ee71SNavdeep Parhar static __be64
410954e4ee71SNavdeep Parhar get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
411054e4ee71SNavdeep Parhar {
411154e4ee71SNavdeep Parhar 	int i = (idx / 3) * 2;
411254e4ee71SNavdeep Parhar 
411354e4ee71SNavdeep Parhar 	switch (idx % 3) {
411454e4ee71SNavdeep Parhar 	case 0: {
411554e4ee71SNavdeep Parhar 		__be64 rc;
411654e4ee71SNavdeep Parhar 
411754e4ee71SNavdeep Parhar 		rc = htobe32(sgl[i].ds_len);
411854e4ee71SNavdeep Parhar 		if (i + 1 < nsegs)
411954e4ee71SNavdeep Parhar 			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
412054e4ee71SNavdeep Parhar 
412154e4ee71SNavdeep Parhar 		return (rc);
412254e4ee71SNavdeep Parhar 	}
412354e4ee71SNavdeep Parhar 	case 1:
412454e4ee71SNavdeep Parhar 		return htobe64(sgl[i].ds_addr);
412554e4ee71SNavdeep Parhar 	case 2:
412654e4ee71SNavdeep Parhar 		return htobe64(sgl[i + 1].ds_addr);
412754e4ee71SNavdeep Parhar 	}
412854e4ee71SNavdeep Parhar 
412954e4ee71SNavdeep Parhar 	return (0);
413054e4ee71SNavdeep Parhar }
413154e4ee71SNavdeep Parhar 
41321458bff9SNavdeep Parhar /*
41331458bff9SNavdeep Parhar  * Find an SGE FL buffer size to use for the given bufsize.  Look for the the
41341458bff9SNavdeep Parhar  * smallest size that is large enough to hold bufsize or pick the largest size
41351458bff9SNavdeep Parhar  * if all sizes are less than bufsize.
41361458bff9SNavdeep Parhar  */
413754e4ee71SNavdeep Parhar static void
41381458bff9SNavdeep Parhar set_fl_tag_idx(struct adapter *sc, struct sge_fl *fl, int bufsize)
413954e4ee71SNavdeep Parhar {
41401458bff9SNavdeep Parhar 	int i, largest, best, delta, start;
414154e4ee71SNavdeep Parhar 
41421458bff9SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
41431458bff9SNavdeep Parhar 		fl->tag_idx = 0;	/* first tag is the one for packing */
41441458bff9SNavdeep Parhar 		return;
414554e4ee71SNavdeep Parhar 	}
414654e4ee71SNavdeep Parhar 
41471458bff9SNavdeep Parhar 	start = sc->flags & BUF_PACKING_OK ? 1 : 0;
41481458bff9SNavdeep Parhar 	delta = FL_BUF_SIZE(sc, start) - bufsize;
41491458bff9SNavdeep Parhar 	if (delta == 0) {
41501458bff9SNavdeep Parhar 		fl->tag_idx = start;	/* ideal fit, look no further */
41511458bff9SNavdeep Parhar 		return;
41521458bff9SNavdeep Parhar 	}
41531458bff9SNavdeep Parhar 	best = start;
41541458bff9SNavdeep Parhar 	largest = start;
41551458bff9SNavdeep Parhar 
41561458bff9SNavdeep Parhar 	for (i = start + 1; i < FL_BUF_SIZES(sc); i++) {
41571458bff9SNavdeep Parhar 		int d, fl_buf_size;
41581458bff9SNavdeep Parhar 
41591458bff9SNavdeep Parhar 		fl_buf_size = FL_BUF_SIZE(sc, i);
41601458bff9SNavdeep Parhar 		d = fl_buf_size - bufsize;
41611458bff9SNavdeep Parhar 
41621458bff9SNavdeep Parhar 		if (d == 0) {
41631458bff9SNavdeep Parhar 			fl->tag_idx = i;	/* ideal fit, look no further */
41641458bff9SNavdeep Parhar 			return;
41651458bff9SNavdeep Parhar 		}
41661458bff9SNavdeep Parhar 		if (fl_buf_size > FL_BUF_SIZE(sc, largest))
41671458bff9SNavdeep Parhar 			largest = i;
41681458bff9SNavdeep Parhar 		if (d > 0 && (delta < 0 || delta > d)) {
41691458bff9SNavdeep Parhar 			delta = d;
41701458bff9SNavdeep Parhar 			best = i;
41711458bff9SNavdeep Parhar 		}
41721458bff9SNavdeep Parhar 	}
41731458bff9SNavdeep Parhar 
41741458bff9SNavdeep Parhar 	if (delta > 0)
41751458bff9SNavdeep Parhar 		fl->tag_idx = best;	/* Found a buf bigger than bufsize */
41761458bff9SNavdeep Parhar 	else
41771458bff9SNavdeep Parhar 		fl->tag_idx = largest;	/* No buf large enough for bufsize */
417854e4ee71SNavdeep Parhar }
4179ecb79ca4SNavdeep Parhar 
4180733b9277SNavdeep Parhar static void
4181733b9277SNavdeep Parhar add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
4182ecb79ca4SNavdeep Parhar {
4183733b9277SNavdeep Parhar 	mtx_lock(&sc->sfl_lock);
4184733b9277SNavdeep Parhar 	FL_LOCK(fl);
4185733b9277SNavdeep Parhar 	if ((fl->flags & FL_DOOMED) == 0) {
4186733b9277SNavdeep Parhar 		fl->flags |= FL_STARVING;
4187733b9277SNavdeep Parhar 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
4188733b9277SNavdeep Parhar 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
4189733b9277SNavdeep Parhar 	}
4190733b9277SNavdeep Parhar 	FL_UNLOCK(fl);
4191733b9277SNavdeep Parhar 	mtx_unlock(&sc->sfl_lock);
4192733b9277SNavdeep Parhar }
4193ecb79ca4SNavdeep Parhar 
4194733b9277SNavdeep Parhar static int
4195733b9277SNavdeep Parhar handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
4196733b9277SNavdeep Parhar     struct mbuf *m)
4197733b9277SNavdeep Parhar {
4198733b9277SNavdeep Parhar 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
4199733b9277SNavdeep Parhar 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
4200733b9277SNavdeep Parhar 	struct adapter *sc = iq->adapter;
4201733b9277SNavdeep Parhar 	struct sge *s = &sc->sge;
4202733b9277SNavdeep Parhar 	struct sge_eq *eq;
4203733b9277SNavdeep Parhar 
4204733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4205733b9277SNavdeep Parhar 	    rss->opcode));
4206733b9277SNavdeep Parhar 
4207733b9277SNavdeep Parhar 	eq = s->eqmap[qid - s->eq_start];
4208733b9277SNavdeep Parhar 	EQ_LOCK(eq);
4209733b9277SNavdeep Parhar 	KASSERT(eq->flags & EQ_CRFLUSHED,
4210733b9277SNavdeep Parhar 	    ("%s: unsolicited egress update", __func__));
4211733b9277SNavdeep Parhar 	eq->flags &= ~EQ_CRFLUSHED;
4212733b9277SNavdeep Parhar 	eq->egr_update++;
4213733b9277SNavdeep Parhar 
4214733b9277SNavdeep Parhar 	if (__predict_false(eq->flags & EQ_DOOMED))
4215733b9277SNavdeep Parhar 		wakeup_one(eq);
4216733b9277SNavdeep Parhar 	else if (eq->flags & EQ_STALLED && can_resume_tx(eq))
4217733b9277SNavdeep Parhar 		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4218733b9277SNavdeep Parhar 	EQ_UNLOCK(eq);
4219ecb79ca4SNavdeep Parhar 
4220ecb79ca4SNavdeep Parhar 	return (0);
4221ecb79ca4SNavdeep Parhar }
4222f7dfe243SNavdeep Parhar 
42230abd31e2SNavdeep Parhar /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
42240abd31e2SNavdeep Parhar CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
42250abd31e2SNavdeep Parhar     offsetof(struct cpl_fw6_msg, data));
42260abd31e2SNavdeep Parhar 
4227733b9277SNavdeep Parhar static int
42281b4cc91fSNavdeep Parhar handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
422956599263SNavdeep Parhar {
42301b4cc91fSNavdeep Parhar 	struct adapter *sc = iq->adapter;
423156599263SNavdeep Parhar 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
423256599263SNavdeep Parhar 
4233733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4234733b9277SNavdeep Parhar 	    rss->opcode));
4235733b9277SNavdeep Parhar 
42360abd31e2SNavdeep Parhar 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
42370abd31e2SNavdeep Parhar 		const struct rss_header *rss2;
42380abd31e2SNavdeep Parhar 
42390abd31e2SNavdeep Parhar 		rss2 = (const struct rss_header *)&cpl->data[0];
42400abd31e2SNavdeep Parhar 		return (sc->cpl_handler[rss2->opcode](iq, rss2, m));
42410abd31e2SNavdeep Parhar 	}
42420abd31e2SNavdeep Parhar 
42431b4cc91fSNavdeep Parhar 	return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0]));
4244f7dfe243SNavdeep Parhar }
4245af49c942SNavdeep Parhar 
4246af49c942SNavdeep Parhar static int
424756599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS)
4248af49c942SNavdeep Parhar {
4249af49c942SNavdeep Parhar 	uint16_t *id = arg1;
4250af49c942SNavdeep Parhar 	int i = *id;
4251af49c942SNavdeep Parhar 
4252af49c942SNavdeep Parhar 	return sysctl_handle_int(oidp, &i, 0, req);
4253af49c942SNavdeep Parhar }
4254