xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 2f318252cbe8252d3d29cf21247127865e3d5714)
154e4ee71SNavdeep Parhar /*-
254e4ee71SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
354e4ee71SNavdeep Parhar  * All rights reserved.
454e4ee71SNavdeep Parhar  * Written by: Navdeep Parhar <np@FreeBSD.org>
554e4ee71SNavdeep Parhar  *
654e4ee71SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
754e4ee71SNavdeep Parhar  * modification, are permitted provided that the following conditions
854e4ee71SNavdeep Parhar  * are met:
954e4ee71SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
1054e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
1154e4ee71SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
1254e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
1354e4ee71SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
1454e4ee71SNavdeep Parhar  *
1554e4ee71SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1654e4ee71SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1754e4ee71SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1854e4ee71SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1954e4ee71SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2054e4ee71SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2154e4ee71SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2254e4ee71SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2354e4ee71SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2454e4ee71SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2554e4ee71SNavdeep Parhar  * SUCH DAMAGE.
2654e4ee71SNavdeep Parhar  */
2754e4ee71SNavdeep Parhar 
2854e4ee71SNavdeep Parhar #include <sys/cdefs.h>
2954e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$");
3054e4ee71SNavdeep Parhar 
3154e4ee71SNavdeep Parhar #include "opt_inet.h"
32a1ea9a82SNavdeep Parhar #include "opt_inet6.h"
3354e4ee71SNavdeep Parhar 
3454e4ee71SNavdeep Parhar #include <sys/types.h>
35c3322cb9SGleb Smirnoff #include <sys/eventhandler.h>
3654e4ee71SNavdeep Parhar #include <sys/mbuf.h>
3754e4ee71SNavdeep Parhar #include <sys/socket.h>
3854e4ee71SNavdeep Parhar #include <sys/kernel.h>
39ecb79ca4SNavdeep Parhar #include <sys/malloc.h>
40ecb79ca4SNavdeep Parhar #include <sys/queue.h>
4138035ed6SNavdeep Parhar #include <sys/sbuf.h>
42ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h>
43480e603cSNavdeep Parhar #include <sys/time.h>
447951040fSNavdeep Parhar #include <sys/sglist.h>
4554e4ee71SNavdeep Parhar #include <sys/sysctl.h>
46733b9277SNavdeep Parhar #include <sys/smp.h>
4782eff304SNavdeep Parhar #include <sys/counter.h>
4854e4ee71SNavdeep Parhar #include <net/bpf.h>
4954e4ee71SNavdeep Parhar #include <net/ethernet.h>
5054e4ee71SNavdeep Parhar #include <net/if.h>
5154e4ee71SNavdeep Parhar #include <net/if_vlan_var.h>
5254e4ee71SNavdeep Parhar #include <netinet/in.h>
5354e4ee71SNavdeep Parhar #include <netinet/ip.h>
54a1ea9a82SNavdeep Parhar #include <netinet/ip6.h>
5554e4ee71SNavdeep Parhar #include <netinet/tcp.h>
566af45170SJohn Baldwin #include <machine/in_cksum.h>
5764db8966SDimitry Andric #include <machine/md_var.h>
5838035ed6SNavdeep Parhar #include <vm/vm.h>
5938035ed6SNavdeep Parhar #include <vm/pmap.h>
60298d969cSNavdeep Parhar #ifdef DEV_NETMAP
61298d969cSNavdeep Parhar #include <machine/bus.h>
62298d969cSNavdeep Parhar #include <sys/selinfo.h>
63298d969cSNavdeep Parhar #include <net/if_var.h>
64298d969cSNavdeep Parhar #include <net/netmap.h>
65298d969cSNavdeep Parhar #include <dev/netmap/netmap_kern.h>
66298d969cSNavdeep Parhar #endif
6754e4ee71SNavdeep Parhar 
6854e4ee71SNavdeep Parhar #include "common/common.h"
6954e4ee71SNavdeep Parhar #include "common/t4_regs.h"
7054e4ee71SNavdeep Parhar #include "common/t4_regs_values.h"
7154e4ee71SNavdeep Parhar #include "common/t4_msg.h"
72671bf2b8SNavdeep Parhar #include "t4_l2t.h"
737951040fSNavdeep Parhar #include "t4_mp_ring.h"
7454e4ee71SNavdeep Parhar 
75d14b0ac1SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
76d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
77d14b0ac1SNavdeep Parhar #else
78d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE
79d14b0ac1SNavdeep Parhar #endif
80d14b0ac1SNavdeep Parhar 
819fb8886bSNavdeep Parhar /*
829fb8886bSNavdeep Parhar  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
839fb8886bSNavdeep Parhar  * 0-7 are valid values.
849fb8886bSNavdeep Parhar  */
8529c229e9SJohn Baldwin static int fl_pktshift = 2;
869fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
8754e4ee71SNavdeep Parhar 
889fb8886bSNavdeep Parhar /*
899fb8886bSNavdeep Parhar  * Pad ethernet payload up to this boundary.
909fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
911458bff9SNavdeep Parhar  *  0: disable padding.
921458bff9SNavdeep Parhar  *  Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
939fb8886bSNavdeep Parhar  */
94298d969cSNavdeep Parhar int fl_pad = -1;
959fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
969fb8886bSNavdeep Parhar 
979fb8886bSNavdeep Parhar /*
989fb8886bSNavdeep Parhar  * Status page length.
999fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
1009fb8886bSNavdeep Parhar  *  64 or 128 are the only other valid values.
1019fb8886bSNavdeep Parhar  */
10229c229e9SJohn Baldwin static int spg_len = -1;
1039fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
1049fb8886bSNavdeep Parhar 
1059fb8886bSNavdeep Parhar /*
1069fb8886bSNavdeep Parhar  * Congestion drops.
1079fb8886bSNavdeep Parhar  * -1: no congestion feedback (not recommended).
1089fb8886bSNavdeep Parhar  *  0: backpressure the channel instead of dropping packets right away.
1099fb8886bSNavdeep Parhar  *  1: no backpressure, drop packets for the congested queue immediately.
1109fb8886bSNavdeep Parhar  */
1119fb8886bSNavdeep Parhar static int cong_drop = 0;
1129fb8886bSNavdeep Parhar TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop);
11354e4ee71SNavdeep Parhar 
1141458bff9SNavdeep Parhar /*
1151458bff9SNavdeep Parhar  * Deliver multiple frames in the same free list buffer if they fit.
1161458bff9SNavdeep Parhar  * -1: let the driver decide whether to enable buffer packing or not.
1171458bff9SNavdeep Parhar  *  0: disable buffer packing.
1181458bff9SNavdeep Parhar  *  1: enable buffer packing.
1191458bff9SNavdeep Parhar  */
1201458bff9SNavdeep Parhar static int buffer_packing = -1;
1211458bff9SNavdeep Parhar TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing);
1221458bff9SNavdeep Parhar 
1231458bff9SNavdeep Parhar /*
1241458bff9SNavdeep Parhar  * Start next frame in a packed buffer at this boundary.
1251458bff9SNavdeep Parhar  * -1: driver should figure out a good value.
126e3207e19SNavdeep Parhar  * T4: driver will ignore this and use the same value as fl_pad above.
127e3207e19SNavdeep Parhar  * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
1281458bff9SNavdeep Parhar  */
1291458bff9SNavdeep Parhar static int fl_pack = -1;
1301458bff9SNavdeep Parhar TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack);
1311458bff9SNavdeep Parhar 
13238035ed6SNavdeep Parhar /*
13338035ed6SNavdeep Parhar  * Allow the driver to create mbuf(s) in a cluster allocated for rx.
13438035ed6SNavdeep Parhar  * 0: never; always allocate mbufs from the zone_mbuf UMA zone.
13538035ed6SNavdeep Parhar  * 1: ok to create mbuf(s) within a cluster if there is room.
13638035ed6SNavdeep Parhar  */
13738035ed6SNavdeep Parhar static int allow_mbufs_in_cluster = 1;
13838035ed6SNavdeep Parhar TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster);
13938035ed6SNavdeep Parhar 
14038035ed6SNavdeep Parhar /*
14138035ed6SNavdeep Parhar  * Largest rx cluster size that the driver is allowed to allocate.
14238035ed6SNavdeep Parhar  */
14338035ed6SNavdeep Parhar static int largest_rx_cluster = MJUM16BYTES;
14438035ed6SNavdeep Parhar TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster);
14538035ed6SNavdeep Parhar 
14638035ed6SNavdeep Parhar /*
14738035ed6SNavdeep Parhar  * Size of cluster allocation that's most likely to succeed.  The driver will
14838035ed6SNavdeep Parhar  * fall back to this size if it fails to allocate clusters larger than this.
14938035ed6SNavdeep Parhar  */
15038035ed6SNavdeep Parhar static int safest_rx_cluster = PAGE_SIZE;
15138035ed6SNavdeep Parhar TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster);
15238035ed6SNavdeep Parhar 
153d491f8caSNavdeep Parhar /*
154d491f8caSNavdeep Parhar  * The interrupt holdoff timers are multiplied by this value on T6+.
155d491f8caSNavdeep Parhar  * 1 and 3-17 (both inclusive) are legal values.
156d491f8caSNavdeep Parhar  */
157d491f8caSNavdeep Parhar static int tscale = 1;
158d491f8caSNavdeep Parhar TUNABLE_INT("hw.cxgbe.tscale", &tscale);
159d491f8caSNavdeep Parhar 
16046f48ee5SNavdeep Parhar /*
16146f48ee5SNavdeep Parhar  * Number of LRO entries in the lro_ctrl structure per rx queue.
16246f48ee5SNavdeep Parhar  */
16346f48ee5SNavdeep Parhar static int lro_entries = TCP_LRO_ENTRIES;
16446f48ee5SNavdeep Parhar TUNABLE_INT("hw.cxgbe.lro_entries", &lro_entries);
16546f48ee5SNavdeep Parhar 
16646f48ee5SNavdeep Parhar /*
16746f48ee5SNavdeep Parhar  * This enables presorting of frames before they're fed into tcp_lro_rx.
16846f48ee5SNavdeep Parhar  */
16946f48ee5SNavdeep Parhar static int lro_mbufs = 0;
17046f48ee5SNavdeep Parhar TUNABLE_INT("hw.cxgbe.lro_mbufs", &lro_mbufs);
17146f48ee5SNavdeep Parhar 
17254e4ee71SNavdeep Parhar struct txpkts {
1737951040fSNavdeep Parhar 	u_int wr_type;		/* type 0 or type 1 */
1747951040fSNavdeep Parhar 	u_int npkt;		/* # of packets in this work request */
1757951040fSNavdeep Parhar 	u_int plen;		/* total payload (sum of all packets) */
1767951040fSNavdeep Parhar 	u_int len16;		/* # of 16B pieces used by this work request */
17754e4ee71SNavdeep Parhar };
17854e4ee71SNavdeep Parhar 
17954e4ee71SNavdeep Parhar /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
18054e4ee71SNavdeep Parhar struct sgl {
1817951040fSNavdeep Parhar 	struct sglist sg;
1827951040fSNavdeep Parhar 	struct sglist_seg seg[TX_SGL_SEGS];
18354e4ee71SNavdeep Parhar };
18454e4ee71SNavdeep Parhar 
185733b9277SNavdeep Parhar static int service_iq(struct sge_iq *, int);
1864d6db4e0SNavdeep Parhar static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
187733b9277SNavdeep Parhar static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
188b2daa9a9SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int);
189e3207e19SNavdeep Parhar static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
19090e7434aSNavdeep Parhar static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
19190e7434aSNavdeep Parhar     uint16_t, char *);
19254e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
19354e4ee71SNavdeep Parhar     bus_addr_t *, void **);
19454e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
19554e4ee71SNavdeep Parhar     void *);
196fe2ebb76SJohn Baldwin static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
197bc14b14dSNavdeep Parhar     int, int);
198fe2ebb76SJohn Baldwin static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *);
199aa93b99aSNavdeep Parhar static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
200aa93b99aSNavdeep Parhar     struct sysctl_oid *, struct sge_fl *);
201733b9277SNavdeep Parhar static int alloc_fwq(struct adapter *);
202733b9277SNavdeep Parhar static int free_fwq(struct adapter *);
203733b9277SNavdeep Parhar static int alloc_mgmtq(struct adapter *);
204733b9277SNavdeep Parhar static int free_mgmtq(struct adapter *);
205fe2ebb76SJohn Baldwin static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
206733b9277SNavdeep Parhar     struct sysctl_oid *);
207fe2ebb76SJohn Baldwin static int free_rxq(struct vi_info *, struct sge_rxq *);
20809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
209fe2ebb76SJohn Baldwin static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
210733b9277SNavdeep Parhar     struct sysctl_oid *);
211fe2ebb76SJohn Baldwin static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
212733b9277SNavdeep Parhar #endif
213298d969cSNavdeep Parhar #ifdef DEV_NETMAP
214fe2ebb76SJohn Baldwin static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int,
215298d969cSNavdeep Parhar     struct sysctl_oid *);
216fe2ebb76SJohn Baldwin static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *);
217fe2ebb76SJohn Baldwin static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int,
218298d969cSNavdeep Parhar     struct sysctl_oid *);
219fe2ebb76SJohn Baldwin static int free_nm_txq(struct vi_info *, struct sge_nm_txq *);
220298d969cSNavdeep Parhar #endif
221733b9277SNavdeep Parhar static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
222fe2ebb76SJohn Baldwin static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
22309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
224fe2ebb76SJohn Baldwin static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
225733b9277SNavdeep Parhar #endif
226fe2ebb76SJohn Baldwin static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *);
227733b9277SNavdeep Parhar static int free_eq(struct adapter *, struct sge_eq *);
228fe2ebb76SJohn Baldwin static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
229733b9277SNavdeep Parhar     struct sysctl_oid *);
230733b9277SNavdeep Parhar static int free_wrq(struct adapter *, struct sge_wrq *);
231fe2ebb76SJohn Baldwin static int alloc_txq(struct vi_info *, struct sge_txq *, int,
232733b9277SNavdeep Parhar     struct sysctl_oid *);
233fe2ebb76SJohn Baldwin static int free_txq(struct vi_info *, struct sge_txq *);
23454e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
23554e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *);
236733b9277SNavdeep Parhar static int refill_fl(struct adapter *, struct sge_fl *, int);
237733b9277SNavdeep Parhar static void refill_sfl(void *);
23854e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *);
2391458bff9SNavdeep Parhar static void free_fl_sdesc(struct adapter *, struct sge_fl *);
24038035ed6SNavdeep Parhar static void find_best_refill_source(struct adapter *, struct sge_fl *, int);
24138035ed6SNavdeep Parhar static void find_safe_refill_source(struct adapter *, struct sge_fl *);
242733b9277SNavdeep Parhar static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
24354e4ee71SNavdeep Parhar 
2447951040fSNavdeep Parhar static inline void get_pkt_gl(struct mbuf *, struct sglist *);
2457951040fSNavdeep Parhar static inline u_int txpkt_len16(u_int, u_int);
2466af45170SJohn Baldwin static inline u_int txpkt_vm_len16(u_int, u_int);
2477951040fSNavdeep Parhar static inline u_int txpkts0_len16(u_int);
2487951040fSNavdeep Parhar static inline u_int txpkts1_len16(void);
2497951040fSNavdeep Parhar static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *,
2507951040fSNavdeep Parhar     struct mbuf *, u_int);
251472a6004SNavdeep Parhar static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
252472a6004SNavdeep Parhar     struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int);
2537951040fSNavdeep Parhar static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int);
2547951040fSNavdeep Parhar static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int);
2557951040fSNavdeep Parhar static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *,
2567951040fSNavdeep Parhar     struct mbuf *, const struct txpkts *, u_int);
2577951040fSNavdeep Parhar static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
25854e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
2597951040fSNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
2607951040fSNavdeep Parhar static inline uint16_t read_hw_cidx(struct sge_eq *);
2617951040fSNavdeep Parhar static inline u_int reclaimable_tx_desc(struct sge_eq *);
2627951040fSNavdeep Parhar static inline u_int total_available_tx_desc(struct sge_eq *);
2637951040fSNavdeep Parhar static u_int reclaim_tx_descs(struct sge_txq *, u_int);
2647951040fSNavdeep Parhar static void tx_reclaim(void *, int);
2657951040fSNavdeep Parhar static __be64 get_flit(struct sglist_seg *, int, int);
266733b9277SNavdeep Parhar static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
267733b9277SNavdeep Parhar     struct mbuf *);
2681b4cc91fSNavdeep Parhar static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
269733b9277SNavdeep Parhar     struct mbuf *);
270069af0ebSJohn Baldwin static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
2717951040fSNavdeep Parhar static void wrq_tx_drain(void *, int);
2727951040fSNavdeep Parhar static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
27354e4ee71SNavdeep Parhar 
27456599263SNavdeep Parhar static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
27538035ed6SNavdeep Parhar static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
27602f972e8SNavdeep Parhar static int sysctl_tc(SYSCTL_HANDLER_ARGS);
277f7dfe243SNavdeep Parhar 
27882eff304SNavdeep Parhar static counter_u64_t extfree_refs;
27982eff304SNavdeep Parhar static counter_u64_t extfree_rels;
28082eff304SNavdeep Parhar 
281671bf2b8SNavdeep Parhar an_handler_t t4_an_handler;
282671bf2b8SNavdeep Parhar fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
283671bf2b8SNavdeep Parhar cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
284671bf2b8SNavdeep Parhar 
285671bf2b8SNavdeep Parhar 
286671bf2b8SNavdeep Parhar static int
287671bf2b8SNavdeep Parhar an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
288671bf2b8SNavdeep Parhar {
289671bf2b8SNavdeep Parhar 
290671bf2b8SNavdeep Parhar #ifdef INVARIANTS
291671bf2b8SNavdeep Parhar 	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
292671bf2b8SNavdeep Parhar #else
293671bf2b8SNavdeep Parhar 	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
294671bf2b8SNavdeep Parhar 	    __func__, iq, ctrl);
295671bf2b8SNavdeep Parhar #endif
296671bf2b8SNavdeep Parhar 	return (EDOOFUS);
297671bf2b8SNavdeep Parhar }
298671bf2b8SNavdeep Parhar 
299671bf2b8SNavdeep Parhar int
300671bf2b8SNavdeep Parhar t4_register_an_handler(an_handler_t h)
301671bf2b8SNavdeep Parhar {
302671bf2b8SNavdeep Parhar 	uintptr_t *loc, new;
303671bf2b8SNavdeep Parhar 
304671bf2b8SNavdeep Parhar 	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
305671bf2b8SNavdeep Parhar 	loc = (uintptr_t *) &t4_an_handler;
306671bf2b8SNavdeep Parhar 	atomic_store_rel_ptr(loc, new);
307671bf2b8SNavdeep Parhar 
308671bf2b8SNavdeep Parhar 	return (0);
309671bf2b8SNavdeep Parhar }
310671bf2b8SNavdeep Parhar 
311671bf2b8SNavdeep Parhar static int
312671bf2b8SNavdeep Parhar fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
313671bf2b8SNavdeep Parhar {
314671bf2b8SNavdeep Parhar 	const struct cpl_fw6_msg *cpl =
315671bf2b8SNavdeep Parhar 	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
316671bf2b8SNavdeep Parhar 
317671bf2b8SNavdeep Parhar #ifdef INVARIANTS
318671bf2b8SNavdeep Parhar 	panic("%s: fw_msg type %d", __func__, cpl->type);
319671bf2b8SNavdeep Parhar #else
320671bf2b8SNavdeep Parhar 	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
321671bf2b8SNavdeep Parhar #endif
322671bf2b8SNavdeep Parhar 	return (EDOOFUS);
323671bf2b8SNavdeep Parhar }
324671bf2b8SNavdeep Parhar 
325671bf2b8SNavdeep Parhar int
326671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
327671bf2b8SNavdeep Parhar {
328671bf2b8SNavdeep Parhar 	uintptr_t *loc, new;
329671bf2b8SNavdeep Parhar 
330671bf2b8SNavdeep Parhar 	if (type >= nitems(t4_fw_msg_handler))
331671bf2b8SNavdeep Parhar 		return (EINVAL);
332671bf2b8SNavdeep Parhar 
333671bf2b8SNavdeep Parhar 	/*
334671bf2b8SNavdeep Parhar 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
335671bf2b8SNavdeep Parhar 	 * handler dispatch table.  Reject any attempt to install a handler for
336671bf2b8SNavdeep Parhar 	 * this subtype.
337671bf2b8SNavdeep Parhar 	 */
338671bf2b8SNavdeep Parhar 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
339671bf2b8SNavdeep Parhar 		return (EINVAL);
340671bf2b8SNavdeep Parhar 
341671bf2b8SNavdeep Parhar 	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
342671bf2b8SNavdeep Parhar 	loc = (uintptr_t *) &t4_fw_msg_handler[type];
343671bf2b8SNavdeep Parhar 	atomic_store_rel_ptr(loc, new);
344671bf2b8SNavdeep Parhar 
345671bf2b8SNavdeep Parhar 	return (0);
346671bf2b8SNavdeep Parhar }
347671bf2b8SNavdeep Parhar 
348671bf2b8SNavdeep Parhar static int
349671bf2b8SNavdeep Parhar cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
350671bf2b8SNavdeep Parhar {
351671bf2b8SNavdeep Parhar 
352671bf2b8SNavdeep Parhar #ifdef INVARIANTS
353671bf2b8SNavdeep Parhar 	panic("%s: opcode 0x%02x on iq %p with payload %p",
354671bf2b8SNavdeep Parhar 	    __func__, rss->opcode, iq, m);
355671bf2b8SNavdeep Parhar #else
356671bf2b8SNavdeep Parhar 	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
357671bf2b8SNavdeep Parhar 	    __func__, rss->opcode, iq, m);
358671bf2b8SNavdeep Parhar 	m_freem(m);
359671bf2b8SNavdeep Parhar #endif
360671bf2b8SNavdeep Parhar 	return (EDOOFUS);
361671bf2b8SNavdeep Parhar }
362671bf2b8SNavdeep Parhar 
363671bf2b8SNavdeep Parhar int
364671bf2b8SNavdeep Parhar t4_register_cpl_handler(int opcode, cpl_handler_t h)
365671bf2b8SNavdeep Parhar {
366671bf2b8SNavdeep Parhar 	uintptr_t *loc, new;
367671bf2b8SNavdeep Parhar 
368671bf2b8SNavdeep Parhar 	if (opcode >= nitems(t4_cpl_handler))
369671bf2b8SNavdeep Parhar 		return (EINVAL);
370671bf2b8SNavdeep Parhar 
371671bf2b8SNavdeep Parhar 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
372671bf2b8SNavdeep Parhar 	loc = (uintptr_t *) &t4_cpl_handler[opcode];
373671bf2b8SNavdeep Parhar 	atomic_store_rel_ptr(loc, new);
374671bf2b8SNavdeep Parhar 
375671bf2b8SNavdeep Parhar 	return (0);
376671bf2b8SNavdeep Parhar }
377671bf2b8SNavdeep Parhar 
37894586193SNavdeep Parhar /*
3791458bff9SNavdeep Parhar  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
38094586193SNavdeep Parhar  */
38194586193SNavdeep Parhar void
38294586193SNavdeep Parhar t4_sge_modload(void)
38394586193SNavdeep Parhar {
384671bf2b8SNavdeep Parhar 	int i;
3854defc81bSNavdeep Parhar 
3869fb8886bSNavdeep Parhar 	if (fl_pktshift < 0 || fl_pktshift > 7) {
3879fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
3889fb8886bSNavdeep Parhar 		    " using 2 instead.\n", fl_pktshift);
3899fb8886bSNavdeep Parhar 		fl_pktshift = 2;
3909fb8886bSNavdeep Parhar 	}
3919fb8886bSNavdeep Parhar 
3929fb8886bSNavdeep Parhar 	if (spg_len != 64 && spg_len != 128) {
3939fb8886bSNavdeep Parhar 		int len;
3949fb8886bSNavdeep Parhar 
3959fb8886bSNavdeep Parhar #if defined(__i386__) || defined(__amd64__)
3969fb8886bSNavdeep Parhar 		len = cpu_clflush_line_size > 64 ? 128 : 64;
3979fb8886bSNavdeep Parhar #else
3989fb8886bSNavdeep Parhar 		len = 64;
3999fb8886bSNavdeep Parhar #endif
4009fb8886bSNavdeep Parhar 		if (spg_len != -1) {
4019fb8886bSNavdeep Parhar 			printf("Invalid hw.cxgbe.spg_len value (%d),"
4029fb8886bSNavdeep Parhar 			    " using %d instead.\n", spg_len, len);
4039fb8886bSNavdeep Parhar 		}
4049fb8886bSNavdeep Parhar 		spg_len = len;
4059fb8886bSNavdeep Parhar 	}
4069fb8886bSNavdeep Parhar 
4079fb8886bSNavdeep Parhar 	if (cong_drop < -1 || cong_drop > 1) {
4089fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.cong_drop value (%d),"
4099fb8886bSNavdeep Parhar 		    " using 0 instead.\n", cong_drop);
4109fb8886bSNavdeep Parhar 		cong_drop = 0;
4119fb8886bSNavdeep Parhar 	}
41282eff304SNavdeep Parhar 
413d491f8caSNavdeep Parhar 	if (tscale != 1 && (tscale < 3 || tscale > 17)) {
414d491f8caSNavdeep Parhar 		printf("Invalid hw.cxgbe.tscale value (%d),"
415d491f8caSNavdeep Parhar 		    " using 1 instead.\n", tscale);
416d491f8caSNavdeep Parhar 		tscale = 1;
417d491f8caSNavdeep Parhar 	}
418d491f8caSNavdeep Parhar 
41982eff304SNavdeep Parhar 	extfree_refs = counter_u64_alloc(M_WAITOK);
42082eff304SNavdeep Parhar 	extfree_rels = counter_u64_alloc(M_WAITOK);
42182eff304SNavdeep Parhar 	counter_u64_zero(extfree_refs);
42282eff304SNavdeep Parhar 	counter_u64_zero(extfree_rels);
423671bf2b8SNavdeep Parhar 
424671bf2b8SNavdeep Parhar 	t4_an_handler = an_not_handled;
425671bf2b8SNavdeep Parhar 	for (i = 0; i < nitems(t4_fw_msg_handler); i++)
426671bf2b8SNavdeep Parhar 		t4_fw_msg_handler[i] = fw_msg_not_handled;
427671bf2b8SNavdeep Parhar 	for (i = 0; i < nitems(t4_cpl_handler); i++)
428671bf2b8SNavdeep Parhar 		t4_cpl_handler[i] = cpl_not_handled;
429671bf2b8SNavdeep Parhar 
430671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
431671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
432671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
433671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx);
434671bf2b8SNavdeep Parhar 	t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
435069af0ebSJohn Baldwin 	t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
43682eff304SNavdeep Parhar }
43782eff304SNavdeep Parhar 
43882eff304SNavdeep Parhar void
43982eff304SNavdeep Parhar t4_sge_modunload(void)
44082eff304SNavdeep Parhar {
44182eff304SNavdeep Parhar 
44282eff304SNavdeep Parhar 	counter_u64_free(extfree_refs);
44382eff304SNavdeep Parhar 	counter_u64_free(extfree_rels);
44482eff304SNavdeep Parhar }
44582eff304SNavdeep Parhar 
44682eff304SNavdeep Parhar uint64_t
44782eff304SNavdeep Parhar t4_sge_extfree_refs(void)
44882eff304SNavdeep Parhar {
44982eff304SNavdeep Parhar 	uint64_t refs, rels;
45082eff304SNavdeep Parhar 
45182eff304SNavdeep Parhar 	rels = counter_u64_fetch(extfree_rels);
45282eff304SNavdeep Parhar 	refs = counter_u64_fetch(extfree_refs);
45382eff304SNavdeep Parhar 
45482eff304SNavdeep Parhar 	return (refs - rels);
45594586193SNavdeep Parhar }
45694586193SNavdeep Parhar 
457e3207e19SNavdeep Parhar static inline void
458e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(struct adapter *sc)
459e3207e19SNavdeep Parhar {
460e3207e19SNavdeep Parhar 	uint32_t v, m;
4610dbc6cfdSNavdeep Parhar 	int pad, pack, pad_shift;
462e3207e19SNavdeep Parhar 
4630dbc6cfdSNavdeep Parhar 	pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
4640dbc6cfdSNavdeep Parhar 	    X_INGPADBOUNDARY_SHIFT;
465e3207e19SNavdeep Parhar 	pad = fl_pad;
4660dbc6cfdSNavdeep Parhar 	if (fl_pad < (1 << pad_shift) ||
4670dbc6cfdSNavdeep Parhar 	    fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
4680dbc6cfdSNavdeep Parhar 	    !powerof2(fl_pad)) {
469e3207e19SNavdeep Parhar 		/*
470e3207e19SNavdeep Parhar 		 * If there is any chance that we might use buffer packing and
471e3207e19SNavdeep Parhar 		 * the chip is a T4, then pick 64 as the pad/pack boundary.  Set
4720dbc6cfdSNavdeep Parhar 		 * it to the minimum allowed in all other cases.
473e3207e19SNavdeep Parhar 		 */
4740dbc6cfdSNavdeep Parhar 		pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
475e3207e19SNavdeep Parhar 
476e3207e19SNavdeep Parhar 		/*
477e3207e19SNavdeep Parhar 		 * For fl_pad = 0 we'll still write a reasonable value to the
478e3207e19SNavdeep Parhar 		 * register but all the freelists will opt out of padding.
479e3207e19SNavdeep Parhar 		 * We'll complain here only if the user tried to set it to a
480e3207e19SNavdeep Parhar 		 * value greater than 0 that was invalid.
481e3207e19SNavdeep Parhar 		 */
482e3207e19SNavdeep Parhar 		if (fl_pad > 0) {
483e3207e19SNavdeep Parhar 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
484e3207e19SNavdeep Parhar 			    " (%d), using %d instead.\n", fl_pad, pad);
485e3207e19SNavdeep Parhar 		}
486e3207e19SNavdeep Parhar 	}
487e3207e19SNavdeep Parhar 	m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
4880dbc6cfdSNavdeep Parhar 	v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
489e3207e19SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
490e3207e19SNavdeep Parhar 
491e3207e19SNavdeep Parhar 	if (is_t4(sc)) {
492e3207e19SNavdeep Parhar 		if (fl_pack != -1 && fl_pack != pad) {
493e3207e19SNavdeep Parhar 			/* Complain but carry on. */
494e3207e19SNavdeep Parhar 			device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
495e3207e19SNavdeep Parhar 			    " using %d instead.\n", fl_pack, pad);
496e3207e19SNavdeep Parhar 		}
497e3207e19SNavdeep Parhar 		return;
498e3207e19SNavdeep Parhar 	}
499e3207e19SNavdeep Parhar 
500e3207e19SNavdeep Parhar 	pack = fl_pack;
501e3207e19SNavdeep Parhar 	if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
502e3207e19SNavdeep Parhar 	    !powerof2(fl_pack)) {
503e3207e19SNavdeep Parhar 		pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
504e3207e19SNavdeep Parhar 		MPASS(powerof2(pack));
505e3207e19SNavdeep Parhar 		if (pack < 16)
506e3207e19SNavdeep Parhar 			pack = 16;
507e3207e19SNavdeep Parhar 		if (pack == 32)
508e3207e19SNavdeep Parhar 			pack = 64;
509e3207e19SNavdeep Parhar 		if (pack > 4096)
510e3207e19SNavdeep Parhar 			pack = 4096;
511e3207e19SNavdeep Parhar 		if (fl_pack != -1) {
512e3207e19SNavdeep Parhar 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
513e3207e19SNavdeep Parhar 			    " (%d), using %d instead.\n", fl_pack, pack);
514e3207e19SNavdeep Parhar 		}
515e3207e19SNavdeep Parhar 	}
516e3207e19SNavdeep Parhar 	m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
517e3207e19SNavdeep Parhar 	if (pack == 16)
518e3207e19SNavdeep Parhar 		v = V_INGPACKBOUNDARY(0);
519e3207e19SNavdeep Parhar 	else
520e3207e19SNavdeep Parhar 		v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
521e3207e19SNavdeep Parhar 
522e3207e19SNavdeep Parhar 	MPASS(!is_t4(sc));	/* T4 doesn't have SGE_CONTROL2 */
523e3207e19SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
524e3207e19SNavdeep Parhar }
525e3207e19SNavdeep Parhar 
526cf738022SNavdeep Parhar /*
527cf738022SNavdeep Parhar  * adap->params.vpd.cclk must be set up before this is called.
528cf738022SNavdeep Parhar  */
529d14b0ac1SNavdeep Parhar void
530d14b0ac1SNavdeep Parhar t4_tweak_chip_settings(struct adapter *sc)
531d14b0ac1SNavdeep Parhar {
532d14b0ac1SNavdeep Parhar 	int i;
533d14b0ac1SNavdeep Parhar 	uint32_t v, m;
534d14b0ac1SNavdeep Parhar 	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
535cf738022SNavdeep Parhar 	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
536d14b0ac1SNavdeep Parhar 	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
537d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
53838035ed6SNavdeep Parhar 	static int sge_flbuf_sizes[] = {
5391458bff9SNavdeep Parhar 		MCLBYTES,
5401458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
5411458bff9SNavdeep Parhar 		MJUMPAGESIZE,
54238035ed6SNavdeep Parhar 		MJUMPAGESIZE - CL_METADATA_SIZE,
54338035ed6SNavdeep Parhar 		MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE,
5441458bff9SNavdeep Parhar #endif
5451458bff9SNavdeep Parhar 		MJUM9BYTES,
5461458bff9SNavdeep Parhar 		MJUM16BYTES,
54738035ed6SNavdeep Parhar 		MCLBYTES - MSIZE - CL_METADATA_SIZE,
54838035ed6SNavdeep Parhar 		MJUM9BYTES - CL_METADATA_SIZE,
54938035ed6SNavdeep Parhar 		MJUM16BYTES - CL_METADATA_SIZE,
5501458bff9SNavdeep Parhar 	};
551d14b0ac1SNavdeep Parhar 
552d14b0ac1SNavdeep Parhar 	KASSERT(sc->flags & MASTER_PF,
553d14b0ac1SNavdeep Parhar 	    ("%s: trying to change chip settings when not master.", __func__));
554d14b0ac1SNavdeep Parhar 
5551458bff9SNavdeep Parhar 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
556d14b0ac1SNavdeep Parhar 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
5574defc81bSNavdeep Parhar 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
558d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
55954e4ee71SNavdeep Parhar 
560e3207e19SNavdeep Parhar 	setup_pad_and_pack_boundaries(sc);
5611458bff9SNavdeep Parhar 
562d14b0ac1SNavdeep Parhar 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
563733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
564733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
565733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
566733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
567733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
568733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
569733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
570d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
571733b9277SNavdeep Parhar 
57238035ed6SNavdeep Parhar 	KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES,
57338035ed6SNavdeep Parhar 	    ("%s: hw buffer size table too big", __func__));
57438035ed6SNavdeep Parhar 	for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) {
57554e4ee71SNavdeep Parhar 		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
57638035ed6SNavdeep Parhar 		    sge_flbuf_sizes[i]);
57754e4ee71SNavdeep Parhar 	}
57854e4ee71SNavdeep Parhar 
579d14b0ac1SNavdeep Parhar 	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
580d14b0ac1SNavdeep Parhar 	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
581d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
58254e4ee71SNavdeep Parhar 
583cf738022SNavdeep Parhar 	KASSERT(intr_timer[0] <= timer_max,
584cf738022SNavdeep Parhar 	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
585cf738022SNavdeep Parhar 	    timer_max));
586cf738022SNavdeep Parhar 	for (i = 1; i < nitems(intr_timer); i++) {
587cf738022SNavdeep Parhar 		KASSERT(intr_timer[i] >= intr_timer[i - 1],
588cf738022SNavdeep Parhar 		    ("%s: timers not listed in increasing order (%d)",
589cf738022SNavdeep Parhar 		    __func__, i));
590cf738022SNavdeep Parhar 
591cf738022SNavdeep Parhar 		while (intr_timer[i] > timer_max) {
592cf738022SNavdeep Parhar 			if (i == nitems(intr_timer) - 1) {
593cf738022SNavdeep Parhar 				intr_timer[i] = timer_max;
594cf738022SNavdeep Parhar 				break;
595cf738022SNavdeep Parhar 			}
596cf738022SNavdeep Parhar 			intr_timer[i] += intr_timer[i - 1];
597cf738022SNavdeep Parhar 			intr_timer[i] /= 2;
598cf738022SNavdeep Parhar 		}
599cf738022SNavdeep Parhar 	}
600cf738022SNavdeep Parhar 
601d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
602d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
603d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
604d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
605d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
606d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
607d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
608d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
609d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
61086e02bf2SNavdeep Parhar 
611d491f8caSNavdeep Parhar 	if (chip_id(sc) >= CHELSIO_T6) {
612d491f8caSNavdeep Parhar 		m = V_TSCALE(M_TSCALE);
613d491f8caSNavdeep Parhar 		if (tscale == 1)
614d491f8caSNavdeep Parhar 			v = 0;
615d491f8caSNavdeep Parhar 		else
616d491f8caSNavdeep Parhar 			v = V_TSCALE(tscale - 2);
617d491f8caSNavdeep Parhar 		t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
618*2f318252SNavdeep Parhar 
619*2f318252SNavdeep Parhar 		if (sc->debug_flags & DF_DISABLE_TCB_CACHE) {
620*2f318252SNavdeep Parhar 			m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN |
621*2f318252SNavdeep Parhar 			    V_WRTHRTHRESH(M_WRTHRTHRESH);
622*2f318252SNavdeep Parhar 			t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1);
623*2f318252SNavdeep Parhar 			v &= ~m;
624*2f318252SNavdeep Parhar 			v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN |
625*2f318252SNavdeep Parhar 			    V_WRTHRTHRESH(16);
626*2f318252SNavdeep Parhar 			t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1);
627*2f318252SNavdeep Parhar 		}
628d491f8caSNavdeep Parhar 	}
629d491f8caSNavdeep Parhar 
6307cba15b1SNavdeep Parhar 	/* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
631d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
632d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
633d14b0ac1SNavdeep Parhar 
6347cba15b1SNavdeep Parhar 	/*
6357cba15b1SNavdeep Parhar 	 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP.  These have been
6367cba15b1SNavdeep Parhar 	 * chosen with MAXPHYS = 128K in mind.  The largest DDP buffer that we
6377cba15b1SNavdeep Parhar 	 * may have to deal with is MAXPHYS + 1 page.
6387cba15b1SNavdeep Parhar 	 */
6397cba15b1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
6407cba15b1SNavdeep Parhar 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
6417cba15b1SNavdeep Parhar 
6427cba15b1SNavdeep Parhar 	/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
6437cba15b1SNavdeep Parhar 	m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
644d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
645d14b0ac1SNavdeep Parhar 
646d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
647d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
648d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
649d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
650d14b0ac1SNavdeep Parhar }
651d14b0ac1SNavdeep Parhar 
652d14b0ac1SNavdeep Parhar /*
653e3207e19SNavdeep Parhar  * SGE wants the buffer to be at least 64B and then a multiple of 16.  If
6548f6690d3SJohn Baldwin  * padding is in use, the buffer's start and end need to be aligned to the pad
655b741402cSNavdeep Parhar  * boundary as well.  We'll just make sure that the size is a multiple of the
656b741402cSNavdeep Parhar  * boundary here, it is up to the buffer allocation code to make sure the start
657b741402cSNavdeep Parhar  * of the buffer is aligned as well.
65838035ed6SNavdeep Parhar  */
65938035ed6SNavdeep Parhar static inline int
660e3207e19SNavdeep Parhar hwsz_ok(struct adapter *sc, int hwsz)
66138035ed6SNavdeep Parhar {
66290e7434aSNavdeep Parhar 	int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
66338035ed6SNavdeep Parhar 
664b741402cSNavdeep Parhar 	return (hwsz >= 64 && (hwsz & mask) == 0);
66538035ed6SNavdeep Parhar }
66638035ed6SNavdeep Parhar 
66738035ed6SNavdeep Parhar /*
668d14b0ac1SNavdeep Parhar  * XXX: driver really should be able to deal with unexpected settings.
669d14b0ac1SNavdeep Parhar  */
670d14b0ac1SNavdeep Parhar int
671d14b0ac1SNavdeep Parhar t4_read_chip_settings(struct adapter *sc)
672d14b0ac1SNavdeep Parhar {
673d14b0ac1SNavdeep Parhar 	struct sge *s = &sc->sge;
67490e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
6751458bff9SNavdeep Parhar 	int i, j, n, rc = 0;
676d14b0ac1SNavdeep Parhar 	uint32_t m, v, r;
677d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
67838035ed6SNavdeep Parhar 	static int sw_buf_sizes[] = {	/* Sorted by size */
6791458bff9SNavdeep Parhar 		MCLBYTES,
6801458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
6811458bff9SNavdeep Parhar 		MJUMPAGESIZE,
6821458bff9SNavdeep Parhar #endif
6831458bff9SNavdeep Parhar 		MJUM9BYTES,
6841458bff9SNavdeep Parhar 		MJUM16BYTES
6851458bff9SNavdeep Parhar 	};
68638035ed6SNavdeep Parhar 	struct sw_zone_info *swz, *safe_swz;
68738035ed6SNavdeep Parhar 	struct hw_buf_info *hwb;
688d14b0ac1SNavdeep Parhar 
68990e7434aSNavdeep Parhar 	m = F_RXPKTCPLMODE;
69090e7434aSNavdeep Parhar 	v = F_RXPKTCPLMODE;
69159c1e950SJohn Baldwin 	r = sc->params.sge.sge_control;
692d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
693d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
694733b9277SNavdeep Parhar 		rc = EINVAL;
695733b9277SNavdeep Parhar 	}
696733b9277SNavdeep Parhar 
69790e7434aSNavdeep Parhar 	/*
69890e7434aSNavdeep Parhar 	 * If this changes then every single use of PAGE_SHIFT in the driver
69990e7434aSNavdeep Parhar 	 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
70090e7434aSNavdeep Parhar 	 */
70190e7434aSNavdeep Parhar 	if (sp->page_shift != PAGE_SHIFT) {
702d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
703733b9277SNavdeep Parhar 		rc = EINVAL;
704733b9277SNavdeep Parhar 	}
705733b9277SNavdeep Parhar 
70638035ed6SNavdeep Parhar 	/* Filter out unusable hw buffer sizes entirely (mark with -2). */
70738035ed6SNavdeep Parhar 	hwb = &s->hw_buf_info[0];
70838035ed6SNavdeep Parhar 	for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) {
70959c1e950SJohn Baldwin 		r = sc->params.sge.sge_fl_buffer_size[i];
71038035ed6SNavdeep Parhar 		hwb->size = r;
711e3207e19SNavdeep Parhar 		hwb->zidx = hwsz_ok(sc, r) ? -1 : -2;
71238035ed6SNavdeep Parhar 		hwb->next = -1;
7131458bff9SNavdeep Parhar 	}
71438035ed6SNavdeep Parhar 
71538035ed6SNavdeep Parhar 	/*
71638035ed6SNavdeep Parhar 	 * Create a sorted list in decreasing order of hw buffer sizes (and so
71738035ed6SNavdeep Parhar 	 * increasing order of spare area) for each software zone.
718e3207e19SNavdeep Parhar 	 *
719e3207e19SNavdeep Parhar 	 * If padding is enabled then the start and end of the buffer must align
720e3207e19SNavdeep Parhar 	 * to the pad boundary; if packing is enabled then they must align with
721e3207e19SNavdeep Parhar 	 * the pack boundary as well.  Allocations from the cluster zones are
722e3207e19SNavdeep Parhar 	 * aligned to min(size, 4K), so the buffer starts at that alignment and
723e3207e19SNavdeep Parhar 	 * ends at hwb->size alignment.  If mbuf inlining is allowed the
724e3207e19SNavdeep Parhar 	 * starting alignment will be reduced to MSIZE and the driver will
725e3207e19SNavdeep Parhar 	 * exercise appropriate caution when deciding on the best buffer layout
726e3207e19SNavdeep Parhar 	 * to use.
72738035ed6SNavdeep Parhar 	 */
72838035ed6SNavdeep Parhar 	n = 0;	/* no usable buffer size to begin with */
72938035ed6SNavdeep Parhar 	swz = &s->sw_zone_info[0];
73038035ed6SNavdeep Parhar 	safe_swz = NULL;
73138035ed6SNavdeep Parhar 	for (i = 0; i < SW_ZONE_SIZES; i++, swz++) {
73238035ed6SNavdeep Parhar 		int8_t head = -1, tail = -1;
73338035ed6SNavdeep Parhar 
73438035ed6SNavdeep Parhar 		swz->size = sw_buf_sizes[i];
73538035ed6SNavdeep Parhar 		swz->zone = m_getzone(swz->size);
73638035ed6SNavdeep Parhar 		swz->type = m_gettype(swz->size);
73738035ed6SNavdeep Parhar 
738e3207e19SNavdeep Parhar 		if (swz->size < PAGE_SIZE) {
739e3207e19SNavdeep Parhar 			MPASS(powerof2(swz->size));
74090e7434aSNavdeep Parhar 			if (fl_pad && (swz->size % sp->pad_boundary != 0))
741e3207e19SNavdeep Parhar 				continue;
742e3207e19SNavdeep Parhar 		}
743e3207e19SNavdeep Parhar 
74438035ed6SNavdeep Parhar 		if (swz->size == safest_rx_cluster)
74538035ed6SNavdeep Parhar 			safe_swz = swz;
74638035ed6SNavdeep Parhar 
74738035ed6SNavdeep Parhar 		hwb = &s->hw_buf_info[0];
74838035ed6SNavdeep Parhar 		for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) {
74938035ed6SNavdeep Parhar 			if (hwb->zidx != -1 || hwb->size > swz->size)
7501458bff9SNavdeep Parhar 				continue;
751e3207e19SNavdeep Parhar #ifdef INVARIANTS
752e3207e19SNavdeep Parhar 			if (fl_pad)
75390e7434aSNavdeep Parhar 				MPASS(hwb->size % sp->pad_boundary == 0);
754e3207e19SNavdeep Parhar #endif
75538035ed6SNavdeep Parhar 			hwb->zidx = i;
75638035ed6SNavdeep Parhar 			if (head == -1)
75738035ed6SNavdeep Parhar 				head = tail = j;
75838035ed6SNavdeep Parhar 			else if (hwb->size < s->hw_buf_info[tail].size) {
75938035ed6SNavdeep Parhar 				s->hw_buf_info[tail].next = j;
76038035ed6SNavdeep Parhar 				tail = j;
76138035ed6SNavdeep Parhar 			} else {
76238035ed6SNavdeep Parhar 				int8_t *cur;
76338035ed6SNavdeep Parhar 				struct hw_buf_info *t;
76438035ed6SNavdeep Parhar 
76538035ed6SNavdeep Parhar 				for (cur = &head; *cur != -1; cur = &t->next) {
76638035ed6SNavdeep Parhar 					t = &s->hw_buf_info[*cur];
76738035ed6SNavdeep Parhar 					if (hwb->size == t->size) {
76838035ed6SNavdeep Parhar 						hwb->zidx = -2;
7691458bff9SNavdeep Parhar 						break;
7701458bff9SNavdeep Parhar 					}
77138035ed6SNavdeep Parhar 					if (hwb->size > t->size) {
77238035ed6SNavdeep Parhar 						hwb->next = *cur;
77338035ed6SNavdeep Parhar 						*cur = j;
77438035ed6SNavdeep Parhar 						break;
77538035ed6SNavdeep Parhar 					}
77638035ed6SNavdeep Parhar 				}
77738035ed6SNavdeep Parhar 			}
77838035ed6SNavdeep Parhar 		}
77938035ed6SNavdeep Parhar 		swz->head_hwidx = head;
78038035ed6SNavdeep Parhar 		swz->tail_hwidx = tail;
78138035ed6SNavdeep Parhar 
78238035ed6SNavdeep Parhar 		if (tail != -1) {
78338035ed6SNavdeep Parhar 			n++;
78438035ed6SNavdeep Parhar 			if (swz->size - s->hw_buf_info[tail].size >=
78538035ed6SNavdeep Parhar 			    CL_METADATA_SIZE)
78638035ed6SNavdeep Parhar 				sc->flags |= BUF_PACKING_OK;
78738035ed6SNavdeep Parhar 		}
7881458bff9SNavdeep Parhar 	}
7891458bff9SNavdeep Parhar 	if (n == 0) {
7901458bff9SNavdeep Parhar 		device_printf(sc->dev, "no usable SGE FL buffer size.\n");
7911458bff9SNavdeep Parhar 		rc = EINVAL;
792733b9277SNavdeep Parhar 	}
79338035ed6SNavdeep Parhar 
79438035ed6SNavdeep Parhar 	s->safe_hwidx1 = -1;
79538035ed6SNavdeep Parhar 	s->safe_hwidx2 = -1;
79638035ed6SNavdeep Parhar 	if (safe_swz != NULL) {
79738035ed6SNavdeep Parhar 		s->safe_hwidx1 = safe_swz->head_hwidx;
79838035ed6SNavdeep Parhar 		for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) {
79938035ed6SNavdeep Parhar 			int spare;
80038035ed6SNavdeep Parhar 
80138035ed6SNavdeep Parhar 			hwb = &s->hw_buf_info[i];
802e3207e19SNavdeep Parhar #ifdef INVARIANTS
803e3207e19SNavdeep Parhar 			if (fl_pad)
80490e7434aSNavdeep Parhar 				MPASS(hwb->size % sp->pad_boundary == 0);
805e3207e19SNavdeep Parhar #endif
80638035ed6SNavdeep Parhar 			spare = safe_swz->size - hwb->size;
807e3207e19SNavdeep Parhar 			if (spare >= CL_METADATA_SIZE) {
80838035ed6SNavdeep Parhar 				s->safe_hwidx2 = i;
80938035ed6SNavdeep Parhar 				break;
81038035ed6SNavdeep Parhar 			}
81138035ed6SNavdeep Parhar 		}
812e3207e19SNavdeep Parhar 	}
813733b9277SNavdeep Parhar 
8146af45170SJohn Baldwin 	if (sc->flags & IS_VF)
8156af45170SJohn Baldwin 		return (0);
8166af45170SJohn Baldwin 
817d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
818d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
819d14b0ac1SNavdeep Parhar 	if (r != v) {
820d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
821d14b0ac1SNavdeep Parhar 		rc = EINVAL;
822d14b0ac1SNavdeep Parhar 	}
823733b9277SNavdeep Parhar 
824d14b0ac1SNavdeep Parhar 	m = v = F_TDDPTAGTCB;
825d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_CTL);
826d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
827d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
828d14b0ac1SNavdeep Parhar 		rc = EINVAL;
829d14b0ac1SNavdeep Parhar 	}
830d14b0ac1SNavdeep Parhar 
831d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
832d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
833d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
834d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_TP_PARA_REG5);
835d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
836d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
837d14b0ac1SNavdeep Parhar 		rc = EINVAL;
838d14b0ac1SNavdeep Parhar 	}
839d14b0ac1SNavdeep Parhar 
840c45b1868SNavdeep Parhar 	t4_init_tp_params(sc, 1);
841d14b0ac1SNavdeep Parhar 
842d14b0ac1SNavdeep Parhar 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
843d14b0ac1SNavdeep Parhar 	t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
844d14b0ac1SNavdeep Parhar 
845733b9277SNavdeep Parhar 	return (rc);
84654e4ee71SNavdeep Parhar }
84754e4ee71SNavdeep Parhar 
84854e4ee71SNavdeep Parhar int
84954e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc)
85054e4ee71SNavdeep Parhar {
85154e4ee71SNavdeep Parhar 	int rc;
85254e4ee71SNavdeep Parhar 
85354e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
85454e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
85554e4ee71SNavdeep Parhar 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
85654e4ee71SNavdeep Parhar 	    NULL, &sc->dmat);
85754e4ee71SNavdeep Parhar 	if (rc != 0) {
85854e4ee71SNavdeep Parhar 		device_printf(sc->dev,
85954e4ee71SNavdeep Parhar 		    "failed to create main DMA tag: %d\n", rc);
86054e4ee71SNavdeep Parhar 	}
86154e4ee71SNavdeep Parhar 
86254e4ee71SNavdeep Parhar 	return (rc);
86354e4ee71SNavdeep Parhar }
86454e4ee71SNavdeep Parhar 
8656e22f9f3SNavdeep Parhar void
8666e22f9f3SNavdeep Parhar t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
8676e22f9f3SNavdeep Parhar     struct sysctl_oid_list *children)
8686e22f9f3SNavdeep Parhar {
86990e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
8706e22f9f3SNavdeep Parhar 
87138035ed6SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
87238035ed6SNavdeep Parhar 	    CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A",
87338035ed6SNavdeep Parhar 	    "freelist buffer sizes");
87438035ed6SNavdeep Parhar 
8756e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
87690e7434aSNavdeep Parhar 	    NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
8776e22f9f3SNavdeep Parhar 
8786e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
87990e7434aSNavdeep Parhar 	    NULL, sp->pad_boundary, "payload pad boundary (bytes)");
8806e22f9f3SNavdeep Parhar 
8816e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
88290e7434aSNavdeep Parhar 	    NULL, sp->spg_len, "status page size (bytes)");
8836e22f9f3SNavdeep Parhar 
8846e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
8856e22f9f3SNavdeep Parhar 	    NULL, cong_drop, "congestion drop setting");
8861458bff9SNavdeep Parhar 
8871458bff9SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
88890e7434aSNavdeep Parhar 	    NULL, sp->pack_boundary, "payload pack boundary (bytes)");
8896e22f9f3SNavdeep Parhar }
8906e22f9f3SNavdeep Parhar 
89154e4ee71SNavdeep Parhar int
89254e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc)
89354e4ee71SNavdeep Parhar {
89454e4ee71SNavdeep Parhar 	if (sc->dmat)
89554e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(sc->dmat);
89654e4ee71SNavdeep Parhar 
89754e4ee71SNavdeep Parhar 	return (0);
89854e4ee71SNavdeep Parhar }
89954e4ee71SNavdeep Parhar 
90054e4ee71SNavdeep Parhar /*
901733b9277SNavdeep Parhar  * Allocate and initialize the firmware event queue and the management queue.
90254e4ee71SNavdeep Parhar  *
90354e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
90454e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
90554e4ee71SNavdeep Parhar  */
90654e4ee71SNavdeep Parhar int
907f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc)
90854e4ee71SNavdeep Parhar {
909733b9277SNavdeep Parhar 	int rc;
91054e4ee71SNavdeep Parhar 
91154e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
91254e4ee71SNavdeep Parhar 
913733b9277SNavdeep Parhar 	sysctl_ctx_init(&sc->ctx);
914733b9277SNavdeep Parhar 	sc->flags |= ADAP_SYSCTL_CTX;
91554e4ee71SNavdeep Parhar 
91656599263SNavdeep Parhar 	/*
91756599263SNavdeep Parhar 	 * Firmware event queue
91856599263SNavdeep Parhar 	 */
919733b9277SNavdeep Parhar 	rc = alloc_fwq(sc);
920aa95b653SNavdeep Parhar 	if (rc != 0)
921f7dfe243SNavdeep Parhar 		return (rc);
922f7dfe243SNavdeep Parhar 
923f7dfe243SNavdeep Parhar 	/*
924733b9277SNavdeep Parhar 	 * Management queue.  This is just a control queue that uses the fwq as
925733b9277SNavdeep Parhar 	 * its associated iq.
926f7dfe243SNavdeep Parhar 	 */
9276af45170SJohn Baldwin 	if (!(sc->flags & IS_VF))
928733b9277SNavdeep Parhar 		rc = alloc_mgmtq(sc);
92954e4ee71SNavdeep Parhar 
93054e4ee71SNavdeep Parhar 	return (rc);
93154e4ee71SNavdeep Parhar }
93254e4ee71SNavdeep Parhar 
93354e4ee71SNavdeep Parhar /*
93454e4ee71SNavdeep Parhar  * Idempotent
93554e4ee71SNavdeep Parhar  */
93654e4ee71SNavdeep Parhar int
937f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc)
93854e4ee71SNavdeep Parhar {
93954e4ee71SNavdeep Parhar 
94054e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
94154e4ee71SNavdeep Parhar 
942733b9277SNavdeep Parhar 	/* Do this before freeing the queue */
943733b9277SNavdeep Parhar 	if (sc->flags & ADAP_SYSCTL_CTX) {
944f7dfe243SNavdeep Parhar 		sysctl_ctx_free(&sc->ctx);
945733b9277SNavdeep Parhar 		sc->flags &= ~ADAP_SYSCTL_CTX;
946f7dfe243SNavdeep Parhar 	}
947f7dfe243SNavdeep Parhar 
948733b9277SNavdeep Parhar 	free_mgmtq(sc);
949733b9277SNavdeep Parhar 	free_fwq(sc);
95054e4ee71SNavdeep Parhar 
95154e4ee71SNavdeep Parhar 	return (0);
95254e4ee71SNavdeep Parhar }
95354e4ee71SNavdeep Parhar 
954733b9277SNavdeep Parhar static inline int
955fe2ebb76SJohn Baldwin first_vector(struct vi_info *vi)
956298d969cSNavdeep Parhar {
957fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
95854e4ee71SNavdeep Parhar 
959733b9277SNavdeep Parhar 	if (sc->intr_count == 1)
960733b9277SNavdeep Parhar 		return (0);
96154e4ee71SNavdeep Parhar 
962fe2ebb76SJohn Baldwin 	return (vi->first_intr);
963733b9277SNavdeep Parhar }
964733b9277SNavdeep Parhar 
965733b9277SNavdeep Parhar /*
966733b9277SNavdeep Parhar  * Given an arbitrary "index," come up with an iq that can be used by other
967fe2ebb76SJohn Baldwin  * queues (of this VI) for interrupt forwarding, SGE egress updates, etc.
968733b9277SNavdeep Parhar  * The iq returned is guaranteed to be something that takes direct interrupts.
969733b9277SNavdeep Parhar  */
970733b9277SNavdeep Parhar static struct sge_iq *
971fe2ebb76SJohn Baldwin vi_intr_iq(struct vi_info *vi, int idx)
972733b9277SNavdeep Parhar {
973fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
974733b9277SNavdeep Parhar 	struct sge *s = &sc->sge;
975733b9277SNavdeep Parhar 	struct sge_iq *iq = NULL;
976298d969cSNavdeep Parhar 	int nintr, i;
977733b9277SNavdeep Parhar 
978733b9277SNavdeep Parhar 	if (sc->intr_count == 1)
979733b9277SNavdeep Parhar 		return (&sc->sge.fwq);
980733b9277SNavdeep Parhar 
981fe2ebb76SJohn Baldwin 	nintr = vi->nintr;
982c8da9163SNavdeep Parhar #ifdef DEV_NETMAP
983c8da9163SNavdeep Parhar 	/* Do not consider any netmap-only interrupts */
984c8da9163SNavdeep Parhar 	if (vi->flags & INTR_RXQ && vi->nnmrxq > vi->nrxq)
985c8da9163SNavdeep Parhar 		nintr -= vi->nnmrxq - vi->nrxq;
986c8da9163SNavdeep Parhar #endif
987298d969cSNavdeep Parhar 	KASSERT(nintr != 0,
988fe2ebb76SJohn Baldwin 	    ("%s: vi %p has no exclusive interrupts, total interrupts = %d",
989fe2ebb76SJohn Baldwin 	    __func__, vi, sc->intr_count));
990298d969cSNavdeep Parhar 	i = idx % nintr;
991733b9277SNavdeep Parhar 
992fe2ebb76SJohn Baldwin 	if (vi->flags & INTR_RXQ) {
993fe2ebb76SJohn Baldwin 	       	if (i < vi->nrxq) {
994fe2ebb76SJohn Baldwin 			iq = &s->rxq[vi->first_rxq + i].iq;
995298d969cSNavdeep Parhar 			goto done;
996298d969cSNavdeep Parhar 		}
997fe2ebb76SJohn Baldwin 		i -= vi->nrxq;
998298d969cSNavdeep Parhar 	}
999298d969cSNavdeep Parhar #ifdef TCP_OFFLOAD
1000fe2ebb76SJohn Baldwin 	if (vi->flags & INTR_OFLD_RXQ) {
1001fe2ebb76SJohn Baldwin 	       	if (i < vi->nofldrxq) {
1002fe2ebb76SJohn Baldwin 			iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq;
1003298d969cSNavdeep Parhar 			goto done;
1004298d969cSNavdeep Parhar 		}
1005fe2ebb76SJohn Baldwin 		i -= vi->nofldrxq;
1006298d969cSNavdeep Parhar 	}
1007298d969cSNavdeep Parhar #endif
1008fe2ebb76SJohn Baldwin 	panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
1009fe2ebb76SJohn Baldwin 	    vi, vi->flags & INTR_ALL, idx, nintr);
1010298d969cSNavdeep Parhar done:
1011298d969cSNavdeep Parhar 	MPASS(iq != NULL);
1012298d969cSNavdeep Parhar 	KASSERT(iq->flags & IQ_INTR,
1013fe2ebb76SJohn Baldwin 	    ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi,
1014fe2ebb76SJohn Baldwin 	    vi->flags & INTR_ALL, idx));
1015733b9277SNavdeep Parhar 	return (iq);
1016733b9277SNavdeep Parhar }
1017733b9277SNavdeep Parhar 
101838035ed6SNavdeep Parhar /* Maximum payload that can be delivered with a single iq descriptor */
10198340ece5SNavdeep Parhar static inline int
102038035ed6SNavdeep Parhar mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
10218340ece5SNavdeep Parhar {
102238035ed6SNavdeep Parhar 	int payload;
10238340ece5SNavdeep Parhar 
10246eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
102538035ed6SNavdeep Parhar 	if (toe) {
102638035ed6SNavdeep Parhar 		payload = sc->tt.rx_coalesce ?
102738035ed6SNavdeep Parhar 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu;
102838035ed6SNavdeep Parhar 	} else {
102938035ed6SNavdeep Parhar #endif
103038035ed6SNavdeep Parhar 		/* large enough even when hw VLAN extraction is disabled */
103190e7434aSNavdeep Parhar 		payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
103290e7434aSNavdeep Parhar 		    ETHER_VLAN_ENCAP_LEN + mtu;
103338035ed6SNavdeep Parhar #ifdef TCP_OFFLOAD
10346eb3180fSNavdeep Parhar 	}
10356eb3180fSNavdeep Parhar #endif
103638035ed6SNavdeep Parhar 
103738035ed6SNavdeep Parhar 	return (payload);
103838035ed6SNavdeep Parhar }
10396eb3180fSNavdeep Parhar 
1040733b9277SNavdeep Parhar int
1041fe2ebb76SJohn Baldwin t4_setup_vi_queues(struct vi_info *vi)
1042733b9277SNavdeep Parhar {
1043733b9277SNavdeep Parhar 	int rc = 0, i, j, intr_idx, iqid;
1044733b9277SNavdeep Parhar 	struct sge_rxq *rxq;
1045733b9277SNavdeep Parhar 	struct sge_txq *txq;
1046733b9277SNavdeep Parhar 	struct sge_wrq *ctrlq;
104709fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1048733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
1049733b9277SNavdeep Parhar 	struct sge_wrq *ofld_txq;
1050298d969cSNavdeep Parhar #endif
1051298d969cSNavdeep Parhar #ifdef DEV_NETMAP
105262291463SNavdeep Parhar 	int saved_idx;
1053298d969cSNavdeep Parhar 	struct sge_nm_rxq *nm_rxq;
1054298d969cSNavdeep Parhar 	struct sge_nm_txq *nm_txq;
1055733b9277SNavdeep Parhar #endif
1056733b9277SNavdeep Parhar 	char name[16];
1057fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
1058733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
1059fe2ebb76SJohn Baldwin 	struct ifnet *ifp = vi->ifp;
1060fe2ebb76SJohn Baldwin 	struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev);
1061733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
1062e3207e19SNavdeep Parhar 	int maxp, mtu = ifp->if_mtu;
1063733b9277SNavdeep Parhar 
1064733b9277SNavdeep Parhar 	/* Interrupt vector to start from (when using multiple vectors) */
1065fe2ebb76SJohn Baldwin 	intr_idx = first_vector(vi);
1066fe2ebb76SJohn Baldwin 
1067fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP
106862291463SNavdeep Parhar 	saved_idx = intr_idx;
106962291463SNavdeep Parhar 	if (ifp->if_capabilities & IFCAP_NETMAP) {
107062291463SNavdeep Parhar 
107162291463SNavdeep Parhar 		/* netmap is supported with direct interrupts only. */
107262291463SNavdeep Parhar 		MPASS(vi->flags & INTR_RXQ);
107362291463SNavdeep Parhar 
1074fe2ebb76SJohn Baldwin 		/*
1075fe2ebb76SJohn Baldwin 		 * We don't have buffers to back the netmap rx queues
1076fe2ebb76SJohn Baldwin 		 * right now so we create the queues in a way that
1077fe2ebb76SJohn Baldwin 		 * doesn't set off any congestion signal in the chip.
1078fe2ebb76SJohn Baldwin 		 */
107962291463SNavdeep Parhar 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
1080fe2ebb76SJohn Baldwin 		    CTLFLAG_RD, NULL, "rx queues");
1081fe2ebb76SJohn Baldwin 		for_each_nm_rxq(vi, i, nm_rxq) {
1082fe2ebb76SJohn Baldwin 			rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
1083fe2ebb76SJohn Baldwin 			if (rc != 0)
1084fe2ebb76SJohn Baldwin 				goto done;
1085fe2ebb76SJohn Baldwin 			intr_idx++;
1086fe2ebb76SJohn Baldwin 		}
1087fe2ebb76SJohn Baldwin 
108862291463SNavdeep Parhar 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
1089fe2ebb76SJohn Baldwin 		    CTLFLAG_RD, NULL, "tx queues");
1090fe2ebb76SJohn Baldwin 		for_each_nm_txq(vi, i, nm_txq) {
109162291463SNavdeep Parhar 			iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
1092fe2ebb76SJohn Baldwin 			rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
1093fe2ebb76SJohn Baldwin 			if (rc != 0)
1094fe2ebb76SJohn Baldwin 				goto done;
1095fe2ebb76SJohn Baldwin 		}
1096fe2ebb76SJohn Baldwin 	}
109762291463SNavdeep Parhar 
109862291463SNavdeep Parhar 	/* Normal rx queues and netmap rx queues share the same interrupts. */
109962291463SNavdeep Parhar 	intr_idx = saved_idx;
1100fe2ebb76SJohn Baldwin #endif
1101733b9277SNavdeep Parhar 
1102733b9277SNavdeep Parhar 	/*
1103298d969cSNavdeep Parhar 	 * First pass over all NIC and TOE rx queues:
1104733b9277SNavdeep Parhar 	 * a) initialize iq and fl
1105733b9277SNavdeep Parhar 	 * b) allocate queue iff it will take direct interrupts.
1106733b9277SNavdeep Parhar 	 */
110738035ed6SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu, 0);
1108fe2ebb76SJohn Baldwin 	if (vi->flags & INTR_RXQ) {
1109fe2ebb76SJohn Baldwin 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
1110298d969cSNavdeep Parhar 		    CTLFLAG_RD, NULL, "rx queues");
1111298d969cSNavdeep Parhar 	}
1112fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
111354e4ee71SNavdeep Parhar 
1114fe2ebb76SJohn Baldwin 		init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
111554e4ee71SNavdeep Parhar 
111654e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-fl",
1117fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1118fe2ebb76SJohn Baldwin 		init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
111954e4ee71SNavdeep Parhar 
1120fe2ebb76SJohn Baldwin 		if (vi->flags & INTR_RXQ) {
1121733b9277SNavdeep Parhar 			rxq->iq.flags |= IQ_INTR;
1122fe2ebb76SJohn Baldwin 			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
112354e4ee71SNavdeep Parhar 			if (rc != 0)
112454e4ee71SNavdeep Parhar 				goto done;
1125733b9277SNavdeep Parhar 			intr_idx++;
1126733b9277SNavdeep Parhar 		}
112754e4ee71SNavdeep Parhar 	}
112862291463SNavdeep Parhar #ifdef DEV_NETMAP
112962291463SNavdeep Parhar 	if (ifp->if_capabilities & IFCAP_NETMAP)
113062291463SNavdeep Parhar 		intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
113162291463SNavdeep Parhar #endif
113209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
113338035ed6SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu, 1);
1134fe2ebb76SJohn Baldwin 	if (vi->flags & INTR_OFLD_RXQ) {
1135fe2ebb76SJohn Baldwin 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
1136298d969cSNavdeep Parhar 		    CTLFLAG_RD, NULL,
1137298d969cSNavdeep Parhar 		    "rx queues for offloaded TCP connections");
1138298d969cSNavdeep Parhar 	}
1139fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1140733b9277SNavdeep Parhar 
1141fe2ebb76SJohn Baldwin 		init_iq(&ofld_rxq->iq, sc, vi->tmr_idx, vi->pktc_idx,
1142fe2ebb76SJohn Baldwin 		    vi->qsize_rxq);
1143733b9277SNavdeep Parhar 
1144733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
1145fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1146fe2ebb76SJohn Baldwin 		init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
1147733b9277SNavdeep Parhar 
1148fe2ebb76SJohn Baldwin 		if (vi->flags & INTR_OFLD_RXQ) {
1149733b9277SNavdeep Parhar 			ofld_rxq->iq.flags |= IQ_INTR;
1150fe2ebb76SJohn Baldwin 			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
1151733b9277SNavdeep Parhar 			if (rc != 0)
1152733b9277SNavdeep Parhar 				goto done;
1153733b9277SNavdeep Parhar 			intr_idx++;
1154733b9277SNavdeep Parhar 		}
1155733b9277SNavdeep Parhar 	}
1156733b9277SNavdeep Parhar #endif
1157733b9277SNavdeep Parhar 
1158733b9277SNavdeep Parhar 	/*
1159298d969cSNavdeep Parhar 	 * Second pass over all NIC and TOE rx queues.  The queues forwarding
1160733b9277SNavdeep Parhar 	 * their interrupts are allocated now.
1161733b9277SNavdeep Parhar 	 */
1162733b9277SNavdeep Parhar 	j = 0;
1163fe2ebb76SJohn Baldwin 	if (!(vi->flags & INTR_RXQ)) {
1164fe2ebb76SJohn Baldwin 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
1165298d969cSNavdeep Parhar 		    CTLFLAG_RD, NULL, "rx queues");
1166fe2ebb76SJohn Baldwin 		for_each_rxq(vi, i, rxq) {
1167298d969cSNavdeep Parhar 			MPASS(!(rxq->iq.flags & IQ_INTR));
1168733b9277SNavdeep Parhar 
1169fe2ebb76SJohn Baldwin 			intr_idx = vi_intr_iq(vi, j)->abs_id;
1170733b9277SNavdeep Parhar 
1171fe2ebb76SJohn Baldwin 			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
1172733b9277SNavdeep Parhar 			if (rc != 0)
1173733b9277SNavdeep Parhar 				goto done;
1174733b9277SNavdeep Parhar 			j++;
1175733b9277SNavdeep Parhar 		}
1176298d969cSNavdeep Parhar 	}
117709fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1178fe2ebb76SJohn Baldwin 	if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) {
1179fe2ebb76SJohn Baldwin 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
1180298d969cSNavdeep Parhar 		    CTLFLAG_RD, NULL,
1181298d969cSNavdeep Parhar 		    "rx queues for offloaded TCP connections");
1182fe2ebb76SJohn Baldwin 		for_each_ofld_rxq(vi, i, ofld_rxq) {
1183298d969cSNavdeep Parhar 			MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
1184733b9277SNavdeep Parhar 
1185fe2ebb76SJohn Baldwin 			intr_idx = vi_intr_iq(vi, j)->abs_id;
1186733b9277SNavdeep Parhar 
1187fe2ebb76SJohn Baldwin 			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
1188733b9277SNavdeep Parhar 			if (rc != 0)
1189733b9277SNavdeep Parhar 				goto done;
1190733b9277SNavdeep Parhar 			j++;
1191733b9277SNavdeep Parhar 		}
1192298d969cSNavdeep Parhar 	}
1193298d969cSNavdeep Parhar #endif
1194733b9277SNavdeep Parhar 
1195733b9277SNavdeep Parhar 	/*
1196733b9277SNavdeep Parhar 	 * Now the tx queues.  Only one pass needed.
1197733b9277SNavdeep Parhar 	 */
1198fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
1199733b9277SNavdeep Parhar 	    NULL, "tx queues");
1200733b9277SNavdeep Parhar 	j = 0;
1201fe2ebb76SJohn Baldwin 	for_each_txq(vi, i, txq) {
1202fe2ebb76SJohn Baldwin 		iqid = vi_intr_iq(vi, j)->cntxt_id;
120354e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s txq%d",
1204fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
120590e7434aSNavdeep Parhar 		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
1206733b9277SNavdeep Parhar 		    name);
120754e4ee71SNavdeep Parhar 
1208fe2ebb76SJohn Baldwin 		rc = alloc_txq(vi, txq, i, oid);
120954e4ee71SNavdeep Parhar 		if (rc != 0)
121054e4ee71SNavdeep Parhar 			goto done;
1211733b9277SNavdeep Parhar 		j++;
121254e4ee71SNavdeep Parhar 	}
121309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1214fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
1215733b9277SNavdeep Parhar 	    CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
1216fe2ebb76SJohn Baldwin 	for_each_ofld_txq(vi, i, ofld_txq) {
1217298d969cSNavdeep Parhar 		struct sysctl_oid *oid2;
1218733b9277SNavdeep Parhar 
1219fe2ebb76SJohn Baldwin 		iqid = vi_intr_iq(vi, j)->cntxt_id;
1220733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_txq%d",
1221fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
122290e7434aSNavdeep Parhar 		init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
1223733b9277SNavdeep Parhar 		    iqid, name);
1224733b9277SNavdeep Parhar 
1225733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%d", i);
1226fe2ebb76SJohn Baldwin 		oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
1227733b9277SNavdeep Parhar 		    name, CTLFLAG_RD, NULL, "offload tx queue");
1228733b9277SNavdeep Parhar 
1229fe2ebb76SJohn Baldwin 		rc = alloc_wrq(sc, vi, ofld_txq, oid2);
1230298d969cSNavdeep Parhar 		if (rc != 0)
1231298d969cSNavdeep Parhar 			goto done;
1232298d969cSNavdeep Parhar 		j++;
1233298d969cSNavdeep Parhar 	}
1234298d969cSNavdeep Parhar #endif
1235733b9277SNavdeep Parhar 
1236733b9277SNavdeep Parhar 	/*
1237733b9277SNavdeep Parhar 	 * Finally, the control queue.
1238733b9277SNavdeep Parhar 	 */
12396af45170SJohn Baldwin 	if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
1240fe2ebb76SJohn Baldwin 		goto done;
1241fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
1242733b9277SNavdeep Parhar 	    NULL, "ctrl queue");
1243733b9277SNavdeep Parhar 	ctrlq = &sc->sge.ctrlq[pi->port_id];
1244fe2ebb76SJohn Baldwin 	iqid = vi_intr_iq(vi, 0)->cntxt_id;
1245fe2ebb76SJohn Baldwin 	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
124690e7434aSNavdeep Parhar 	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid,
124790e7434aSNavdeep Parhar 	    name);
1248fe2ebb76SJohn Baldwin 	rc = alloc_wrq(sc, vi, ctrlq, oid);
1249733b9277SNavdeep Parhar 
125054e4ee71SNavdeep Parhar done:
125154e4ee71SNavdeep Parhar 	if (rc)
1252fe2ebb76SJohn Baldwin 		t4_teardown_vi_queues(vi);
125354e4ee71SNavdeep Parhar 
125454e4ee71SNavdeep Parhar 	return (rc);
125554e4ee71SNavdeep Parhar }
125654e4ee71SNavdeep Parhar 
125754e4ee71SNavdeep Parhar /*
125854e4ee71SNavdeep Parhar  * Idempotent
125954e4ee71SNavdeep Parhar  */
126054e4ee71SNavdeep Parhar int
1261fe2ebb76SJohn Baldwin t4_teardown_vi_queues(struct vi_info *vi)
126254e4ee71SNavdeep Parhar {
126354e4ee71SNavdeep Parhar 	int i;
1264fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
1265733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
126654e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
126754e4ee71SNavdeep Parhar 	struct sge_txq *txq;
126809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1269733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
1270733b9277SNavdeep Parhar 	struct sge_wrq *ofld_txq;
1271733b9277SNavdeep Parhar #endif
1272298d969cSNavdeep Parhar #ifdef DEV_NETMAP
1273298d969cSNavdeep Parhar 	struct sge_nm_rxq *nm_rxq;
1274298d969cSNavdeep Parhar 	struct sge_nm_txq *nm_txq;
1275298d969cSNavdeep Parhar #endif
127654e4ee71SNavdeep Parhar 
127754e4ee71SNavdeep Parhar 	/* Do this before freeing the queues */
1278fe2ebb76SJohn Baldwin 	if (vi->flags & VI_SYSCTL_CTX) {
1279fe2ebb76SJohn Baldwin 		sysctl_ctx_free(&vi->ctx);
1280fe2ebb76SJohn Baldwin 		vi->flags &= ~VI_SYSCTL_CTX;
128154e4ee71SNavdeep Parhar 	}
128254e4ee71SNavdeep Parhar 
1283fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP
128462291463SNavdeep Parhar 	if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
1285fe2ebb76SJohn Baldwin 		for_each_nm_txq(vi, i, nm_txq) {
1286fe2ebb76SJohn Baldwin 			free_nm_txq(vi, nm_txq);
1287fe2ebb76SJohn Baldwin 		}
1288fe2ebb76SJohn Baldwin 
1289fe2ebb76SJohn Baldwin 		for_each_nm_rxq(vi, i, nm_rxq) {
1290fe2ebb76SJohn Baldwin 			free_nm_rxq(vi, nm_rxq);
1291fe2ebb76SJohn Baldwin 		}
1292fe2ebb76SJohn Baldwin 	}
1293fe2ebb76SJohn Baldwin #endif
1294fe2ebb76SJohn Baldwin 
1295733b9277SNavdeep Parhar 	/*
1296733b9277SNavdeep Parhar 	 * Take down all the tx queues first, as they reference the rx queues
1297733b9277SNavdeep Parhar 	 * (for egress updates, etc.).
1298733b9277SNavdeep Parhar 	 */
1299733b9277SNavdeep Parhar 
13006af45170SJohn Baldwin 	if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
1301733b9277SNavdeep Parhar 		free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
1302733b9277SNavdeep Parhar 
1303fe2ebb76SJohn Baldwin 	for_each_txq(vi, i, txq) {
1304fe2ebb76SJohn Baldwin 		free_txq(vi, txq);
130554e4ee71SNavdeep Parhar 	}
130609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1307fe2ebb76SJohn Baldwin 	for_each_ofld_txq(vi, i, ofld_txq) {
1308733b9277SNavdeep Parhar 		free_wrq(sc, ofld_txq);
1309733b9277SNavdeep Parhar 	}
1310733b9277SNavdeep Parhar #endif
1311733b9277SNavdeep Parhar 
1312733b9277SNavdeep Parhar 	/*
1313733b9277SNavdeep Parhar 	 * Then take down the rx queues that forward their interrupts, as they
1314733b9277SNavdeep Parhar 	 * reference other rx queues.
1315733b9277SNavdeep Parhar 	 */
1316733b9277SNavdeep Parhar 
1317fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
1318733b9277SNavdeep Parhar 		if ((rxq->iq.flags & IQ_INTR) == 0)
1319fe2ebb76SJohn Baldwin 			free_rxq(vi, rxq);
132054e4ee71SNavdeep Parhar 	}
132109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1322fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1323733b9277SNavdeep Parhar 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
1324fe2ebb76SJohn Baldwin 			free_ofld_rxq(vi, ofld_rxq);
1325733b9277SNavdeep Parhar 	}
1326733b9277SNavdeep Parhar #endif
1327733b9277SNavdeep Parhar 
1328733b9277SNavdeep Parhar 	/*
1329733b9277SNavdeep Parhar 	 * Then take down the rx queues that take direct interrupts.
1330733b9277SNavdeep Parhar 	 */
1331733b9277SNavdeep Parhar 
1332fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
1333733b9277SNavdeep Parhar 		if (rxq->iq.flags & IQ_INTR)
1334fe2ebb76SJohn Baldwin 			free_rxq(vi, rxq);
1335733b9277SNavdeep Parhar 	}
133609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1337fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1338733b9277SNavdeep Parhar 		if (ofld_rxq->iq.flags & IQ_INTR)
1339fe2ebb76SJohn Baldwin 			free_ofld_rxq(vi, ofld_rxq);
1340733b9277SNavdeep Parhar 	}
1341733b9277SNavdeep Parhar #endif
1342733b9277SNavdeep Parhar 
134354e4ee71SNavdeep Parhar 	return (0);
134454e4ee71SNavdeep Parhar }
134554e4ee71SNavdeep Parhar 
1346733b9277SNavdeep Parhar /*
1347733b9277SNavdeep Parhar  * Deals with errors and the firmware event queue.  All data rx queues forward
1348733b9277SNavdeep Parhar  * their interrupt to the firmware event queue.
1349733b9277SNavdeep Parhar  */
135054e4ee71SNavdeep Parhar void
135154e4ee71SNavdeep Parhar t4_intr_all(void *arg)
135254e4ee71SNavdeep Parhar {
135354e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
1354733b9277SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
135554e4ee71SNavdeep Parhar 
135654e4ee71SNavdeep Parhar 	t4_intr_err(arg);
1357733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) {
1358733b9277SNavdeep Parhar 		service_iq(fwq, 0);
1359733b9277SNavdeep Parhar 		atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE);
136054e4ee71SNavdeep Parhar 	}
136154e4ee71SNavdeep Parhar }
136254e4ee71SNavdeep Parhar 
136354e4ee71SNavdeep Parhar /* Deals with error interrupts */
136454e4ee71SNavdeep Parhar void
136554e4ee71SNavdeep Parhar t4_intr_err(void *arg)
136654e4ee71SNavdeep Parhar {
136754e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
136854e4ee71SNavdeep Parhar 
136954e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
137054e4ee71SNavdeep Parhar 	t4_slow_intr_handler(sc);
137154e4ee71SNavdeep Parhar }
137254e4ee71SNavdeep Parhar 
137354e4ee71SNavdeep Parhar void
137454e4ee71SNavdeep Parhar t4_intr_evt(void *arg)
137554e4ee71SNavdeep Parhar {
137654e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
13772be67d29SNavdeep Parhar 
1378733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1379733b9277SNavdeep Parhar 		service_iq(iq, 0);
1380733b9277SNavdeep Parhar 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
13812be67d29SNavdeep Parhar 	}
13822be67d29SNavdeep Parhar }
13832be67d29SNavdeep Parhar 
1384733b9277SNavdeep Parhar void
1385733b9277SNavdeep Parhar t4_intr(void *arg)
13862be67d29SNavdeep Parhar {
13872be67d29SNavdeep Parhar 	struct sge_iq *iq = arg;
1388733b9277SNavdeep Parhar 
1389733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1390733b9277SNavdeep Parhar 		service_iq(iq, 0);
1391733b9277SNavdeep Parhar 		atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1392733b9277SNavdeep Parhar 	}
1393733b9277SNavdeep Parhar }
1394733b9277SNavdeep Parhar 
139562291463SNavdeep Parhar void
139662291463SNavdeep Parhar t4_vi_intr(void *arg)
139762291463SNavdeep Parhar {
139862291463SNavdeep Parhar 	struct irq *irq = arg;
139962291463SNavdeep Parhar 
140062291463SNavdeep Parhar #ifdef DEV_NETMAP
140162291463SNavdeep Parhar 	if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) {
140262291463SNavdeep Parhar 		t4_nm_intr(irq->nm_rxq);
140362291463SNavdeep Parhar 		atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON);
140462291463SNavdeep Parhar 	}
140562291463SNavdeep Parhar #endif
140662291463SNavdeep Parhar 	if (irq->rxq != NULL)
140762291463SNavdeep Parhar 		t4_intr(irq->rxq);
140862291463SNavdeep Parhar }
140962291463SNavdeep Parhar 
141046f48ee5SNavdeep Parhar static inline int
141146f48ee5SNavdeep Parhar sort_before_lro(struct lro_ctrl *lro)
141246f48ee5SNavdeep Parhar {
141346f48ee5SNavdeep Parhar 
141446f48ee5SNavdeep Parhar 	return (lro->lro_mbuf_max != 0);
141546f48ee5SNavdeep Parhar }
141646f48ee5SNavdeep Parhar 
1417733b9277SNavdeep Parhar /*
1418733b9277SNavdeep Parhar  * Deals with anything and everything on the given ingress queue.
1419733b9277SNavdeep Parhar  */
1420733b9277SNavdeep Parhar static int
1421733b9277SNavdeep Parhar service_iq(struct sge_iq *iq, int budget)
1422733b9277SNavdeep Parhar {
1423733b9277SNavdeep Parhar 	struct sge_iq *q;
142409fe6320SNavdeep Parhar 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
14254d6db4e0SNavdeep Parhar 	struct sge_fl *fl;			/* Use iff IQ_HAS_FL */
142654e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
1427b2daa9a9SNavdeep Parhar 	struct iq_desc *d = &iq->desc[iq->cidx];
14284d6db4e0SNavdeep Parhar 	int ndescs = 0, limit;
14294d6db4e0SNavdeep Parhar 	int rsp_type, refill;
1430733b9277SNavdeep Parhar 	uint32_t lq;
14314d6db4e0SNavdeep Parhar 	uint16_t fl_hw_cidx;
1432733b9277SNavdeep Parhar 	struct mbuf *m0;
1433733b9277SNavdeep Parhar 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1434480e603cSNavdeep Parhar #if defined(INET) || defined(INET6)
1435480e603cSNavdeep Parhar 	const struct timeval lro_timeout = {0, sc->lro_timeout};
143646f48ee5SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
1437480e603cSNavdeep Parhar #endif
1438733b9277SNavdeep Parhar 
1439733b9277SNavdeep Parhar 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1440733b9277SNavdeep Parhar 
14414d6db4e0SNavdeep Parhar 	limit = budget ? budget : iq->qsize / 16;
14424d6db4e0SNavdeep Parhar 
14434d6db4e0SNavdeep Parhar 	if (iq->flags & IQ_HAS_FL) {
14444d6db4e0SNavdeep Parhar 		fl = &rxq->fl;
14454d6db4e0SNavdeep Parhar 		fl_hw_cidx = fl->hw_cidx;	/* stable snapshot */
14464d6db4e0SNavdeep Parhar 	} else {
14474d6db4e0SNavdeep Parhar 		fl = NULL;
14484d6db4e0SNavdeep Parhar 		fl_hw_cidx = 0;			/* to silence gcc warning */
14494d6db4e0SNavdeep Parhar 	}
14504d6db4e0SNavdeep Parhar 
145146f48ee5SNavdeep Parhar #if defined(INET) || defined(INET6)
145246f48ee5SNavdeep Parhar 	if (iq->flags & IQ_ADJ_CREDIT) {
145346f48ee5SNavdeep Parhar 		MPASS(sort_before_lro(lro));
145446f48ee5SNavdeep Parhar 		iq->flags &= ~IQ_ADJ_CREDIT;
145546f48ee5SNavdeep Parhar 		if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
145646f48ee5SNavdeep Parhar 			tcp_lro_flush_all(lro);
145746f48ee5SNavdeep Parhar 			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
145846f48ee5SNavdeep Parhar 			    V_INGRESSQID((u32)iq->cntxt_id) |
145946f48ee5SNavdeep Parhar 			    V_SEINTARM(iq->intr_params));
146046f48ee5SNavdeep Parhar 			return (0);
146146f48ee5SNavdeep Parhar 		}
146246f48ee5SNavdeep Parhar 		ndescs = 1;
146346f48ee5SNavdeep Parhar 	}
146446f48ee5SNavdeep Parhar #else
146546f48ee5SNavdeep Parhar 	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
146646f48ee5SNavdeep Parhar #endif
146746f48ee5SNavdeep Parhar 
1468733b9277SNavdeep Parhar 	/*
1469733b9277SNavdeep Parhar 	 * We always come back and check the descriptor ring for new indirect
1470733b9277SNavdeep Parhar 	 * interrupts and other responses after running a single handler.
1471733b9277SNavdeep Parhar 	 */
1472733b9277SNavdeep Parhar 	for (;;) {
1473b2daa9a9SNavdeep Parhar 		while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
147454e4ee71SNavdeep Parhar 
147554e4ee71SNavdeep Parhar 			rmb();
147654e4ee71SNavdeep Parhar 
14774d6db4e0SNavdeep Parhar 			refill = 0;
1478733b9277SNavdeep Parhar 			m0 = NULL;
1479b2daa9a9SNavdeep Parhar 			rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1480b2daa9a9SNavdeep Parhar 			lq = be32toh(d->rsp.pldbuflen_qid);
148154e4ee71SNavdeep Parhar 
1482733b9277SNavdeep Parhar 			switch (rsp_type) {
1483733b9277SNavdeep Parhar 			case X_RSPD_TYPE_FLBUF:
148454e4ee71SNavdeep Parhar 
1485733b9277SNavdeep Parhar 				KASSERT(iq->flags & IQ_HAS_FL,
1486733b9277SNavdeep Parhar 				    ("%s: data for an iq (%p) with no freelist",
1487733b9277SNavdeep Parhar 				    __func__, iq));
1488733b9277SNavdeep Parhar 
14894d6db4e0SNavdeep Parhar 				m0 = get_fl_payload(sc, fl, lq);
14901458bff9SNavdeep Parhar 				if (__predict_false(m0 == NULL))
14911458bff9SNavdeep Parhar 					goto process_iql;
14924d6db4e0SNavdeep Parhar 				refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2;
1493733b9277SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
1494733b9277SNavdeep Parhar 				/*
1495733b9277SNavdeep Parhar 				 * 60 bit timestamp for the payload is
1496733b9277SNavdeep Parhar 				 * *(uint64_t *)m0->m_pktdat.  Note that it is
1497733b9277SNavdeep Parhar 				 * in the leading free-space in the mbuf.  The
1498733b9277SNavdeep Parhar 				 * kernel can clobber it during a pullup,
1499733b9277SNavdeep Parhar 				 * m_copymdata, etc.  You need to make sure that
1500733b9277SNavdeep Parhar 				 * the mbuf reaches you unmolested if you care
1501733b9277SNavdeep Parhar 				 * about the timestamp.
1502733b9277SNavdeep Parhar 				 */
1503733b9277SNavdeep Parhar 				*(uint64_t *)m0->m_pktdat =
1504733b9277SNavdeep Parhar 				    be64toh(ctrl->u.last_flit) &
1505733b9277SNavdeep Parhar 				    0xfffffffffffffff;
1506733b9277SNavdeep Parhar #endif
1507733b9277SNavdeep Parhar 
1508733b9277SNavdeep Parhar 				/* fall through */
1509733b9277SNavdeep Parhar 
1510733b9277SNavdeep Parhar 			case X_RSPD_TYPE_CPL:
1511b2daa9a9SNavdeep Parhar 				KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1512733b9277SNavdeep Parhar 				    ("%s: bad opcode %02x.", __func__,
1513b2daa9a9SNavdeep Parhar 				    d->rss.opcode));
1514671bf2b8SNavdeep Parhar 				t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
1515733b9277SNavdeep Parhar 				break;
1516733b9277SNavdeep Parhar 
1517733b9277SNavdeep Parhar 			case X_RSPD_TYPE_INTR:
1518733b9277SNavdeep Parhar 
1519733b9277SNavdeep Parhar 				/*
1520733b9277SNavdeep Parhar 				 * Interrupts should be forwarded only to queues
1521733b9277SNavdeep Parhar 				 * that are not forwarding their interrupts.
1522733b9277SNavdeep Parhar 				 * This means service_iq can recurse but only 1
1523733b9277SNavdeep Parhar 				 * level deep.
1524733b9277SNavdeep Parhar 				 */
1525733b9277SNavdeep Parhar 				KASSERT(budget == 0,
1526733b9277SNavdeep Parhar 				    ("%s: budget %u, rsp_type %u", __func__,
1527733b9277SNavdeep Parhar 				    budget, rsp_type));
1528733b9277SNavdeep Parhar 
152998005176SNavdeep Parhar 				/*
153098005176SNavdeep Parhar 				 * There are 1K interrupt-capable queues (qids 0
153198005176SNavdeep Parhar 				 * through 1023).  A response type indicating a
153298005176SNavdeep Parhar 				 * forwarded interrupt with a qid >= 1K is an
153398005176SNavdeep Parhar 				 * iWARP async notification.
153498005176SNavdeep Parhar 				 */
153598005176SNavdeep Parhar 				if (lq >= 1024) {
1536671bf2b8SNavdeep Parhar                                         t4_an_handler(iq, &d->rsp);
153798005176SNavdeep Parhar                                         break;
153898005176SNavdeep Parhar                                 }
153998005176SNavdeep Parhar 
1540ec55567cSJohn Baldwin 				q = sc->sge.iqmap[lq - sc->sge.iq_start -
1541ec55567cSJohn Baldwin 				    sc->sge.iq_base];
1542733b9277SNavdeep Parhar 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
1543733b9277SNavdeep Parhar 				    IQS_BUSY)) {
15444d6db4e0SNavdeep Parhar 					if (service_iq(q, q->qsize / 16) == 0) {
1545733b9277SNavdeep Parhar 						atomic_cmpset_int(&q->state,
1546733b9277SNavdeep Parhar 						    IQS_BUSY, IQS_IDLE);
1547733b9277SNavdeep Parhar 					} else {
1548733b9277SNavdeep Parhar 						STAILQ_INSERT_TAIL(&iql, q,
1549733b9277SNavdeep Parhar 						    link);
1550733b9277SNavdeep Parhar 					}
1551733b9277SNavdeep Parhar 				}
1552733b9277SNavdeep Parhar 				break;
1553733b9277SNavdeep Parhar 
1554733b9277SNavdeep Parhar 			default:
155598005176SNavdeep Parhar 				KASSERT(0,
155698005176SNavdeep Parhar 				    ("%s: illegal response type %d on iq %p",
155798005176SNavdeep Parhar 				    __func__, rsp_type, iq));
155898005176SNavdeep Parhar 				log(LOG_ERR,
155998005176SNavdeep Parhar 				    "%s: illegal response type %d on iq %p",
156098005176SNavdeep Parhar 				    device_get_nameunit(sc->dev), rsp_type, iq);
156109fe6320SNavdeep Parhar 				break;
156254e4ee71SNavdeep Parhar 			}
156356599263SNavdeep Parhar 
1564b2daa9a9SNavdeep Parhar 			d++;
1565b2daa9a9SNavdeep Parhar 			if (__predict_false(++iq->cidx == iq->sidx)) {
1566b2daa9a9SNavdeep Parhar 				iq->cidx = 0;
1567b2daa9a9SNavdeep Parhar 				iq->gen ^= F_RSPD_GEN;
1568b2daa9a9SNavdeep Parhar 				d = &iq->desc[0];
1569b2daa9a9SNavdeep Parhar 			}
1570b2daa9a9SNavdeep Parhar 			if (__predict_false(++ndescs == limit)) {
1571315048f2SJohn Baldwin 				t4_write_reg(sc, sc->sge_gts_reg,
1572733b9277SNavdeep Parhar 				    V_CIDXINC(ndescs) |
1573733b9277SNavdeep Parhar 				    V_INGRESSQID(iq->cntxt_id) |
1574733b9277SNavdeep Parhar 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1575733b9277SNavdeep Parhar 				ndescs = 0;
1576733b9277SNavdeep Parhar 
1577480e603cSNavdeep Parhar #if defined(INET) || defined(INET6)
1578480e603cSNavdeep Parhar 				if (iq->flags & IQ_LRO_ENABLED &&
157946f48ee5SNavdeep Parhar 				    !sort_before_lro(lro) &&
1580480e603cSNavdeep Parhar 				    sc->lro_timeout != 0) {
158146f48ee5SNavdeep Parhar 					tcp_lro_flush_inactive(lro,
1582480e603cSNavdeep Parhar 					    &lro_timeout);
1583480e603cSNavdeep Parhar 				}
1584480e603cSNavdeep Parhar #endif
1585480e603cSNavdeep Parhar 
1586861e42b2SNavdeep Parhar 				if (budget) {
15874d6db4e0SNavdeep Parhar 					if (iq->flags & IQ_HAS_FL) {
1588861e42b2SNavdeep Parhar 						FL_LOCK(fl);
1589861e42b2SNavdeep Parhar 						refill_fl(sc, fl, 32);
1590861e42b2SNavdeep Parhar 						FL_UNLOCK(fl);
1591861e42b2SNavdeep Parhar 					}
1592733b9277SNavdeep Parhar 					return (EINPROGRESS);
159354e4ee71SNavdeep Parhar 				}
1594733b9277SNavdeep Parhar 			}
15954d6db4e0SNavdeep Parhar 			if (refill) {
15964d6db4e0SNavdeep Parhar 				FL_LOCK(fl);
15974d6db4e0SNavdeep Parhar 				refill_fl(sc, fl, 32);
15984d6db4e0SNavdeep Parhar 				FL_UNLOCK(fl);
15994d6db4e0SNavdeep Parhar 				fl_hw_cidx = fl->hw_cidx;
16004d6db4e0SNavdeep Parhar 			}
1601861e42b2SNavdeep Parhar 		}
1602733b9277SNavdeep Parhar 
16031458bff9SNavdeep Parhar process_iql:
1604733b9277SNavdeep Parhar 		if (STAILQ_EMPTY(&iql))
1605733b9277SNavdeep Parhar 			break;
1606733b9277SNavdeep Parhar 
1607733b9277SNavdeep Parhar 		/*
1608733b9277SNavdeep Parhar 		 * Process the head only, and send it to the back of the list if
1609733b9277SNavdeep Parhar 		 * it's still not done.
1610733b9277SNavdeep Parhar 		 */
1611733b9277SNavdeep Parhar 		q = STAILQ_FIRST(&iql);
1612733b9277SNavdeep Parhar 		STAILQ_REMOVE_HEAD(&iql, link);
1613733b9277SNavdeep Parhar 		if (service_iq(q, q->qsize / 8) == 0)
1614733b9277SNavdeep Parhar 			atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
1615733b9277SNavdeep Parhar 		else
1616733b9277SNavdeep Parhar 			STAILQ_INSERT_TAIL(&iql, q, link);
1617733b9277SNavdeep Parhar 	}
1618733b9277SNavdeep Parhar 
1619a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1620733b9277SNavdeep Parhar 	if (iq->flags & IQ_LRO_ENABLED) {
162146f48ee5SNavdeep Parhar 		if (ndescs > 0 && lro->lro_mbuf_count > 8) {
162246f48ee5SNavdeep Parhar 			MPASS(sort_before_lro(lro));
162346f48ee5SNavdeep Parhar 			/* hold back one credit and don't flush LRO state */
162446f48ee5SNavdeep Parhar 			iq->flags |= IQ_ADJ_CREDIT;
162546f48ee5SNavdeep Parhar 			ndescs--;
162646f48ee5SNavdeep Parhar 		} else {
16276dd38b87SSepherosa Ziehau 			tcp_lro_flush_all(lro);
1628733b9277SNavdeep Parhar 		}
162946f48ee5SNavdeep Parhar 	}
1630733b9277SNavdeep Parhar #endif
1631733b9277SNavdeep Parhar 
1632315048f2SJohn Baldwin 	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1633733b9277SNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1634733b9277SNavdeep Parhar 
1635733b9277SNavdeep Parhar 	if (iq->flags & IQ_HAS_FL) {
1636733b9277SNavdeep Parhar 		int starved;
1637733b9277SNavdeep Parhar 
1638733b9277SNavdeep Parhar 		FL_LOCK(fl);
163938035ed6SNavdeep Parhar 		starved = refill_fl(sc, fl, 64);
1640733b9277SNavdeep Parhar 		FL_UNLOCK(fl);
1641733b9277SNavdeep Parhar 		if (__predict_false(starved != 0))
1642733b9277SNavdeep Parhar 			add_fl_to_sfl(sc, fl);
1643733b9277SNavdeep Parhar 	}
1644733b9277SNavdeep Parhar 
1645733b9277SNavdeep Parhar 	return (0);
1646733b9277SNavdeep Parhar }
1647733b9277SNavdeep Parhar 
164838035ed6SNavdeep Parhar static inline int
164938035ed6SNavdeep Parhar cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll)
16501458bff9SNavdeep Parhar {
165138035ed6SNavdeep Parhar 	int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0;
16521458bff9SNavdeep Parhar 
165338035ed6SNavdeep Parhar 	if (rc)
165438035ed6SNavdeep Parhar 		MPASS(cll->region3 >= CL_METADATA_SIZE);
165538035ed6SNavdeep Parhar 
165638035ed6SNavdeep Parhar 	return (rc);
16571458bff9SNavdeep Parhar }
16581458bff9SNavdeep Parhar 
165938035ed6SNavdeep Parhar static inline struct cluster_metadata *
166038035ed6SNavdeep Parhar cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll,
166138035ed6SNavdeep Parhar     caddr_t cl)
16621458bff9SNavdeep Parhar {
16631458bff9SNavdeep Parhar 
166438035ed6SNavdeep Parhar 	if (cl_has_metadata(fl, cll)) {
166538035ed6SNavdeep Parhar 		struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
16661458bff9SNavdeep Parhar 
166738035ed6SNavdeep Parhar 		return ((struct cluster_metadata *)(cl + swz->size) - 1);
16681458bff9SNavdeep Parhar 	}
166938035ed6SNavdeep Parhar 	return (NULL);
16701458bff9SNavdeep Parhar }
16711458bff9SNavdeep Parhar 
167215c28f87SGleb Smirnoff static void
16731458bff9SNavdeep Parhar rxb_free(struct mbuf *m, void *arg1, void *arg2)
16741458bff9SNavdeep Parhar {
16751458bff9SNavdeep Parhar 	uma_zone_t zone = arg1;
16761458bff9SNavdeep Parhar 	caddr_t cl = arg2;
16771458bff9SNavdeep Parhar 
16781458bff9SNavdeep Parhar 	uma_zfree(zone, cl);
167982eff304SNavdeep Parhar 	counter_u64_add(extfree_rels, 1);
16801458bff9SNavdeep Parhar }
16811458bff9SNavdeep Parhar 
168238035ed6SNavdeep Parhar /*
168338035ed6SNavdeep Parhar  * The mbuf returned by this function could be allocated from zone_mbuf or
168438035ed6SNavdeep Parhar  * constructed in spare room in the cluster.
168538035ed6SNavdeep Parhar  *
168638035ed6SNavdeep Parhar  * The mbuf carries the payload in one of these ways
168738035ed6SNavdeep Parhar  * a) frame inside the mbuf (mbuf from zone_mbuf)
168838035ed6SNavdeep Parhar  * b) m_cljset (for clusters without metadata) zone_mbuf
168938035ed6SNavdeep Parhar  * c) m_extaddref (cluster with metadata) inline mbuf
169038035ed6SNavdeep Parhar  * d) m_extaddref (cluster with metadata) zone_mbuf
169138035ed6SNavdeep Parhar  */
16921458bff9SNavdeep Parhar static struct mbuf *
1693b741402cSNavdeep Parhar get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1694b741402cSNavdeep Parhar     int remaining)
169538035ed6SNavdeep Parhar {
169638035ed6SNavdeep Parhar 	struct mbuf *m;
169738035ed6SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
169838035ed6SNavdeep Parhar 	struct cluster_layout *cll = &sd->cll;
169938035ed6SNavdeep Parhar 	struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
170038035ed6SNavdeep Parhar 	struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx];
170138035ed6SNavdeep Parhar 	struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl);
1702b741402cSNavdeep Parhar 	int len, blen;
170338035ed6SNavdeep Parhar 	caddr_t payload;
170438035ed6SNavdeep Parhar 
1705b741402cSNavdeep Parhar 	blen = hwb->size - fl->rx_offset;	/* max possible in this buf */
1706b741402cSNavdeep Parhar 	len = min(remaining, blen);
170738035ed6SNavdeep Parhar 	payload = sd->cl + cll->region1 + fl->rx_offset;
1708e3207e19SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
1709b741402cSNavdeep Parhar 		const u_int l = fr_offset + len;
1710b741402cSNavdeep Parhar 		const u_int pad = roundup2(l, fl->buf_boundary) - l;
1711b741402cSNavdeep Parhar 
1712b741402cSNavdeep Parhar 		if (fl->rx_offset + len + pad < hwb->size)
1713b741402cSNavdeep Parhar 			blen = len + pad;
1714b741402cSNavdeep Parhar 		MPASS(fl->rx_offset + blen <= hwb->size);
1715e3207e19SNavdeep Parhar 	} else {
1716e3207e19SNavdeep Parhar 		MPASS(fl->rx_offset == 0);	/* not packing */
1717e3207e19SNavdeep Parhar 	}
171838035ed6SNavdeep Parhar 
1719b741402cSNavdeep Parhar 
172038035ed6SNavdeep Parhar 	if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
172138035ed6SNavdeep Parhar 
172238035ed6SNavdeep Parhar 		/*
172338035ed6SNavdeep Parhar 		 * Copy payload into a freshly allocated mbuf.
172438035ed6SNavdeep Parhar 		 */
172538035ed6SNavdeep Parhar 
1726b741402cSNavdeep Parhar 		m = fr_offset == 0 ?
172738035ed6SNavdeep Parhar 		    m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
172838035ed6SNavdeep Parhar 		if (m == NULL)
172938035ed6SNavdeep Parhar 			return (NULL);
173038035ed6SNavdeep Parhar 		fl->mbuf_allocated++;
173138035ed6SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
173238035ed6SNavdeep Parhar 		/* Leave room for a timestamp */
173338035ed6SNavdeep Parhar 		m->m_data += 8;
173438035ed6SNavdeep Parhar #endif
173538035ed6SNavdeep Parhar 		/* copy data to mbuf */
173638035ed6SNavdeep Parhar 		bcopy(payload, mtod(m, caddr_t), len);
173738035ed6SNavdeep Parhar 
1738c3fb7725SNavdeep Parhar 	} else if (sd->nmbuf * MSIZE < cll->region1) {
173938035ed6SNavdeep Parhar 
174038035ed6SNavdeep Parhar 		/*
174138035ed6SNavdeep Parhar 		 * There's spare room in the cluster for an mbuf.  Create one
1742ccc69b2fSNavdeep Parhar 		 * and associate it with the payload that's in the cluster.
174338035ed6SNavdeep Parhar 		 */
174438035ed6SNavdeep Parhar 
174538035ed6SNavdeep Parhar 		MPASS(clm != NULL);
1746c3fb7725SNavdeep Parhar 		m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE);
174738035ed6SNavdeep Parhar 		/* No bzero required */
1748b4b12e52SGleb Smirnoff 		if (m_init(m, M_NOWAIT, MT_DATA,
1749b741402cSNavdeep Parhar 		    fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE))
175038035ed6SNavdeep Parhar 			return (NULL);
175138035ed6SNavdeep Parhar 		fl->mbuf_inlined++;
1752b741402cSNavdeep Parhar 		m_extaddref(m, payload, blen, &clm->refcount, rxb_free,
175338035ed6SNavdeep Parhar 		    swz->zone, sd->cl);
175482eff304SNavdeep Parhar 		if (sd->nmbuf++ == 0)
175582eff304SNavdeep Parhar 			counter_u64_add(extfree_refs, 1);
175638035ed6SNavdeep Parhar 
175738035ed6SNavdeep Parhar 	} else {
175838035ed6SNavdeep Parhar 
175938035ed6SNavdeep Parhar 		/*
176038035ed6SNavdeep Parhar 		 * Grab an mbuf from zone_mbuf and associate it with the
176138035ed6SNavdeep Parhar 		 * payload in the cluster.
176238035ed6SNavdeep Parhar 		 */
176338035ed6SNavdeep Parhar 
1764b741402cSNavdeep Parhar 		m = fr_offset == 0 ?
176538035ed6SNavdeep Parhar 		    m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
176638035ed6SNavdeep Parhar 		if (m == NULL)
176738035ed6SNavdeep Parhar 			return (NULL);
176838035ed6SNavdeep Parhar 		fl->mbuf_allocated++;
1769ccc69b2fSNavdeep Parhar 		if (clm != NULL) {
1770b741402cSNavdeep Parhar 			m_extaddref(m, payload, blen, &clm->refcount,
177138035ed6SNavdeep Parhar 			    rxb_free, swz->zone, sd->cl);
177282eff304SNavdeep Parhar 			if (sd->nmbuf++ == 0)
177382eff304SNavdeep Parhar 				counter_u64_add(extfree_refs, 1);
1774ccc69b2fSNavdeep Parhar 		} else {
177538035ed6SNavdeep Parhar 			m_cljset(m, sd->cl, swz->type);
177638035ed6SNavdeep Parhar 			sd->cl = NULL;	/* consumed, not a recycle candidate */
177738035ed6SNavdeep Parhar 		}
177838035ed6SNavdeep Parhar 	}
1779b741402cSNavdeep Parhar 	if (fr_offset == 0)
1780b741402cSNavdeep Parhar 		m->m_pkthdr.len = remaining;
178138035ed6SNavdeep Parhar 	m->m_len = len;
178238035ed6SNavdeep Parhar 
178338035ed6SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
1784b741402cSNavdeep Parhar 		fl->rx_offset += blen;
178538035ed6SNavdeep Parhar 		MPASS(fl->rx_offset <= hwb->size);
178638035ed6SNavdeep Parhar 		if (fl->rx_offset < hwb->size)
178738035ed6SNavdeep Parhar 			return (m);	/* without advancing the cidx */
178838035ed6SNavdeep Parhar 	}
178938035ed6SNavdeep Parhar 
17904d6db4e0SNavdeep Parhar 	if (__predict_false(++fl->cidx % 8 == 0)) {
17914d6db4e0SNavdeep Parhar 		uint16_t cidx = fl->cidx / 8;
17924d6db4e0SNavdeep Parhar 
17934d6db4e0SNavdeep Parhar 		if (__predict_false(cidx == fl->sidx))
17944d6db4e0SNavdeep Parhar 			fl->cidx = cidx = 0;
17954d6db4e0SNavdeep Parhar 		fl->hw_cidx = cidx;
17964d6db4e0SNavdeep Parhar 	}
179738035ed6SNavdeep Parhar 	fl->rx_offset = 0;
179838035ed6SNavdeep Parhar 
179938035ed6SNavdeep Parhar 	return (m);
180038035ed6SNavdeep Parhar }
180138035ed6SNavdeep Parhar 
180238035ed6SNavdeep Parhar static struct mbuf *
18034d6db4e0SNavdeep Parhar get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf)
18041458bff9SNavdeep Parhar {
180538035ed6SNavdeep Parhar 	struct mbuf *m0, *m, **pnext;
1806b741402cSNavdeep Parhar 	u_int remaining;
1807b741402cSNavdeep Parhar 	const u_int total = G_RSPD_LEN(len_newbuf);
18081458bff9SNavdeep Parhar 
18094d6db4e0SNavdeep Parhar 	if (__predict_false(fl->flags & FL_BUF_RESUME)) {
1810368541baSNavdeep Parhar 		M_ASSERTPKTHDR(fl->m0);
1811b741402cSNavdeep Parhar 		MPASS(fl->m0->m_pkthdr.len == total);
1812b741402cSNavdeep Parhar 		MPASS(fl->remaining < total);
18131458bff9SNavdeep Parhar 
181438035ed6SNavdeep Parhar 		m0 = fl->m0;
181538035ed6SNavdeep Parhar 		pnext = fl->pnext;
1816b741402cSNavdeep Parhar 		remaining = fl->remaining;
18174d6db4e0SNavdeep Parhar 		fl->flags &= ~FL_BUF_RESUME;
181838035ed6SNavdeep Parhar 		goto get_segment;
18191458bff9SNavdeep Parhar 	}
18201458bff9SNavdeep Parhar 
182138035ed6SNavdeep Parhar 	if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
18221458bff9SNavdeep Parhar 		fl->rx_offset = 0;
18234d6db4e0SNavdeep Parhar 		if (__predict_false(++fl->cidx % 8 == 0)) {
18244d6db4e0SNavdeep Parhar 			uint16_t cidx = fl->cidx / 8;
18254d6db4e0SNavdeep Parhar 
18264d6db4e0SNavdeep Parhar 			if (__predict_false(cidx == fl->sidx))
18274d6db4e0SNavdeep Parhar 				fl->cidx = cidx = 0;
18284d6db4e0SNavdeep Parhar 			fl->hw_cidx = cidx;
18294d6db4e0SNavdeep Parhar 		}
18301458bff9SNavdeep Parhar 	}
18311458bff9SNavdeep Parhar 
18321458bff9SNavdeep Parhar 	/*
183338035ed6SNavdeep Parhar 	 * Payload starts at rx_offset in the current hw buffer.  Its length is
183438035ed6SNavdeep Parhar 	 * 'len' and it may span multiple hw buffers.
18351458bff9SNavdeep Parhar 	 */
18361458bff9SNavdeep Parhar 
1837b741402cSNavdeep Parhar 	m0 = get_scatter_segment(sc, fl, 0, total);
1838368541baSNavdeep Parhar 	if (m0 == NULL)
18394d6db4e0SNavdeep Parhar 		return (NULL);
1840b741402cSNavdeep Parhar 	remaining = total - m0->m_len;
184138035ed6SNavdeep Parhar 	pnext = &m0->m_next;
1842b741402cSNavdeep Parhar 	while (remaining > 0) {
184338035ed6SNavdeep Parhar get_segment:
184438035ed6SNavdeep Parhar 		MPASS(fl->rx_offset == 0);
1845b741402cSNavdeep Parhar 		m = get_scatter_segment(sc, fl, total - remaining, remaining);
18464d6db4e0SNavdeep Parhar 		if (__predict_false(m == NULL)) {
184738035ed6SNavdeep Parhar 			fl->m0 = m0;
184838035ed6SNavdeep Parhar 			fl->pnext = pnext;
1849b741402cSNavdeep Parhar 			fl->remaining = remaining;
18504d6db4e0SNavdeep Parhar 			fl->flags |= FL_BUF_RESUME;
18514d6db4e0SNavdeep Parhar 			return (NULL);
18521458bff9SNavdeep Parhar 		}
185338035ed6SNavdeep Parhar 		*pnext = m;
185438035ed6SNavdeep Parhar 		pnext = &m->m_next;
1855b741402cSNavdeep Parhar 		remaining -= m->m_len;
1856733b9277SNavdeep Parhar 	}
185738035ed6SNavdeep Parhar 	*pnext = NULL;
18584d6db4e0SNavdeep Parhar 
1859dbbf46c4SNavdeep Parhar 	M_ASSERTPKTHDR(m0);
1860733b9277SNavdeep Parhar 	return (m0);
1861733b9277SNavdeep Parhar }
1862733b9277SNavdeep Parhar 
1863733b9277SNavdeep Parhar static int
1864733b9277SNavdeep Parhar t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
1865733b9277SNavdeep Parhar {
18663c51d154SNavdeep Parhar 	struct sge_rxq *rxq = iq_to_rxq(iq);
1867733b9277SNavdeep Parhar 	struct ifnet *ifp = rxq->ifp;
186890e7434aSNavdeep Parhar 	struct adapter *sc = iq->adapter;
1869733b9277SNavdeep Parhar 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
1870a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1871733b9277SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
1872733b9277SNavdeep Parhar #endif
187370ca6229SNavdeep Parhar 	static const int sw_hashtype[4][2] = {
187470ca6229SNavdeep Parhar 		{M_HASHTYPE_NONE, M_HASHTYPE_NONE},
187570ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
187670ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
187770ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
187870ca6229SNavdeep Parhar 	};
1879733b9277SNavdeep Parhar 
1880733b9277SNavdeep Parhar 	KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
1881733b9277SNavdeep Parhar 	    rss->opcode));
1882733b9277SNavdeep Parhar 
188390e7434aSNavdeep Parhar 	m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
188490e7434aSNavdeep Parhar 	m0->m_len -= sc->params.sge.fl_pktshift;
188590e7434aSNavdeep Parhar 	m0->m_data += sc->params.sge.fl_pktshift;
188654e4ee71SNavdeep Parhar 
188754e4ee71SNavdeep Parhar 	m0->m_pkthdr.rcvif = ifp;
188870ca6229SNavdeep Parhar 	M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]);
1889273ef991SNavdeep Parhar 	m0->m_pkthdr.flowid = be32toh(rss->hash_val);
189054e4ee71SNavdeep Parhar 
18911de8c69dSNavdeep Parhar 	if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) {
18929600bf00SNavdeep Parhar 		if (ifp->if_capenable & IFCAP_RXCSUM &&
18939600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP)) {
1894932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
189554e4ee71SNavdeep Parhar 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
18969600bf00SNavdeep Parhar 			rxq->rxcsum++;
18979600bf00SNavdeep Parhar 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
18989600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP6)) {
1899932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
19009600bf00SNavdeep Parhar 			    CSUM_PSEUDO_HDR);
19019600bf00SNavdeep Parhar 			rxq->rxcsum++;
19029600bf00SNavdeep Parhar 		}
19039600bf00SNavdeep Parhar 
19049600bf00SNavdeep Parhar 		if (__predict_false(cpl->ip_frag))
190554e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = be16toh(cpl->csum);
190654e4ee71SNavdeep Parhar 		else
190754e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = 0xffff;
190854e4ee71SNavdeep Parhar 	}
190954e4ee71SNavdeep Parhar 
191054e4ee71SNavdeep Parhar 	if (cpl->vlan_ex) {
191154e4ee71SNavdeep Parhar 		m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
191254e4ee71SNavdeep Parhar 		m0->m_flags |= M_VLANTAG;
191354e4ee71SNavdeep Parhar 		rxq->vlan_extraction++;
191454e4ee71SNavdeep Parhar 	}
191554e4ee71SNavdeep Parhar 
1916a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
191746f48ee5SNavdeep Parhar 	if (iq->flags & IQ_LRO_ENABLED) {
191846f48ee5SNavdeep Parhar 		if (sort_before_lro(lro)) {
191946f48ee5SNavdeep Parhar 			tcp_lro_queue_mbuf(lro, m0);
192046f48ee5SNavdeep Parhar 			return (0); /* queued for sort, then LRO */
192146f48ee5SNavdeep Parhar 		}
192246f48ee5SNavdeep Parhar 		if (tcp_lro_rx(lro, m0, 0) == 0)
192346f48ee5SNavdeep Parhar 			return (0); /* queued for LRO */
192446f48ee5SNavdeep Parhar 	}
192554e4ee71SNavdeep Parhar #endif
19267d29df59SNavdeep Parhar 	ifp->if_input(ifp, m0);
192754e4ee71SNavdeep Parhar 
1928733b9277SNavdeep Parhar 	return (0);
192954e4ee71SNavdeep Parhar }
193054e4ee71SNavdeep Parhar 
1931733b9277SNavdeep Parhar /*
19327951040fSNavdeep Parhar  * Must drain the wrq or make sure that someone else will.
19337951040fSNavdeep Parhar  */
19347951040fSNavdeep Parhar static void
19357951040fSNavdeep Parhar wrq_tx_drain(void *arg, int n)
19367951040fSNavdeep Parhar {
19377951040fSNavdeep Parhar 	struct sge_wrq *wrq = arg;
19387951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
19397951040fSNavdeep Parhar 
19407951040fSNavdeep Parhar 	EQ_LOCK(eq);
19417951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
19427951040fSNavdeep Parhar 		drain_wrq_wr_list(wrq->adapter, wrq);
19437951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
19447951040fSNavdeep Parhar }
19457951040fSNavdeep Parhar 
19467951040fSNavdeep Parhar static void
19477951040fSNavdeep Parhar drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
19487951040fSNavdeep Parhar {
19497951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
19507951040fSNavdeep Parhar 	u_int available, dbdiff;	/* # of hardware descriptors */
19517951040fSNavdeep Parhar 	u_int n;
19527951040fSNavdeep Parhar 	struct wrqe *wr;
19537951040fSNavdeep Parhar 	struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
19547951040fSNavdeep Parhar 
19557951040fSNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
19567951040fSNavdeep Parhar 	MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
19577951040fSNavdeep Parhar 	wr = STAILQ_FIRST(&wrq->wr_list);
19587951040fSNavdeep Parhar 	MPASS(wr != NULL);	/* Must be called with something useful to do */
1959cda2ab0eSNavdeep Parhar 	MPASS(eq->pidx == eq->dbidx);
1960cda2ab0eSNavdeep Parhar 	dbdiff = 0;
19617951040fSNavdeep Parhar 
19627951040fSNavdeep Parhar 	do {
19637951040fSNavdeep Parhar 		eq->cidx = read_hw_cidx(eq);
19647951040fSNavdeep Parhar 		if (eq->pidx == eq->cidx)
19657951040fSNavdeep Parhar 			available = eq->sidx - 1;
19667951040fSNavdeep Parhar 		else
19677951040fSNavdeep Parhar 			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
19687951040fSNavdeep Parhar 
19697951040fSNavdeep Parhar 		MPASS(wr->wrq == wrq);
19707951040fSNavdeep Parhar 		n = howmany(wr->wr_len, EQ_ESIZE);
19717951040fSNavdeep Parhar 		if (available < n)
1972cda2ab0eSNavdeep Parhar 			break;
19737951040fSNavdeep Parhar 
19747951040fSNavdeep Parhar 		dst = (void *)&eq->desc[eq->pidx];
19757951040fSNavdeep Parhar 		if (__predict_true(eq->sidx - eq->pidx > n)) {
19767951040fSNavdeep Parhar 			/* Won't wrap, won't end exactly at the status page. */
19777951040fSNavdeep Parhar 			bcopy(&wr->wr[0], dst, wr->wr_len);
19787951040fSNavdeep Parhar 			eq->pidx += n;
19797951040fSNavdeep Parhar 		} else {
19807951040fSNavdeep Parhar 			int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
19817951040fSNavdeep Parhar 
19827951040fSNavdeep Parhar 			bcopy(&wr->wr[0], dst, first_portion);
19837951040fSNavdeep Parhar 			if (wr->wr_len > first_portion) {
19847951040fSNavdeep Parhar 				bcopy(&wr->wr[first_portion], &eq->desc[0],
19857951040fSNavdeep Parhar 				    wr->wr_len - first_portion);
19867951040fSNavdeep Parhar 			}
19877951040fSNavdeep Parhar 			eq->pidx = n - (eq->sidx - eq->pidx);
19887951040fSNavdeep Parhar 		}
19890459a175SNavdeep Parhar 		wrq->tx_wrs_copied++;
19907951040fSNavdeep Parhar 
19917951040fSNavdeep Parhar 		if (available < eq->sidx / 4 &&
19927951040fSNavdeep Parhar 		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
19937951040fSNavdeep Parhar 			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
19947951040fSNavdeep Parhar 			    F_FW_WR_EQUEQ);
19957951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
19967951040fSNavdeep Parhar 		} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
19977951040fSNavdeep Parhar 			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
19987951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
19997951040fSNavdeep Parhar 		}
20007951040fSNavdeep Parhar 
20017951040fSNavdeep Parhar 		dbdiff += n;
20027951040fSNavdeep Parhar 		if (dbdiff >= 16) {
20037951040fSNavdeep Parhar 			ring_eq_db(sc, eq, dbdiff);
20047951040fSNavdeep Parhar 			dbdiff = 0;
20057951040fSNavdeep Parhar 		}
20067951040fSNavdeep Parhar 
20077951040fSNavdeep Parhar 		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
20087951040fSNavdeep Parhar 		free_wrqe(wr);
20097951040fSNavdeep Parhar 		MPASS(wrq->nwr_pending > 0);
20107951040fSNavdeep Parhar 		wrq->nwr_pending--;
20117951040fSNavdeep Parhar 		MPASS(wrq->ndesc_needed >= n);
20127951040fSNavdeep Parhar 		wrq->ndesc_needed -= n;
20137951040fSNavdeep Parhar 	} while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
20147951040fSNavdeep Parhar 
20157951040fSNavdeep Parhar 	if (dbdiff)
20167951040fSNavdeep Parhar 		ring_eq_db(sc, eq, dbdiff);
20177951040fSNavdeep Parhar }
20187951040fSNavdeep Parhar 
20197951040fSNavdeep Parhar /*
2020733b9277SNavdeep Parhar  * Doesn't fail.  Holds on to work requests it can't send right away.
2021733b9277SNavdeep Parhar  */
202209fe6320SNavdeep Parhar void
202309fe6320SNavdeep Parhar t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
2024733b9277SNavdeep Parhar {
2025733b9277SNavdeep Parhar #ifdef INVARIANTS
20267951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
2027733b9277SNavdeep Parhar #endif
2028733b9277SNavdeep Parhar 
20297951040fSNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
20307951040fSNavdeep Parhar 	MPASS(wr != NULL);
20317951040fSNavdeep Parhar 	MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
20327951040fSNavdeep Parhar 	MPASS((wr->wr_len & 0x7) == 0);
2033733b9277SNavdeep Parhar 
20347951040fSNavdeep Parhar 	STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
20357951040fSNavdeep Parhar 	wrq->nwr_pending++;
20367951040fSNavdeep Parhar 	wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
2037733b9277SNavdeep Parhar 
20387951040fSNavdeep Parhar 	if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
20397951040fSNavdeep Parhar 		return;	/* commit_wrq_wr will drain wr_list as well. */
2040733b9277SNavdeep Parhar 
20417951040fSNavdeep Parhar 	drain_wrq_wr_list(sc, wrq);
2042733b9277SNavdeep Parhar 
20437951040fSNavdeep Parhar 	/* Doorbell must have caught up to the pidx. */
20447951040fSNavdeep Parhar 	MPASS(eq->pidx == eq->dbidx);
204554e4ee71SNavdeep Parhar }
204654e4ee71SNavdeep Parhar 
204754e4ee71SNavdeep Parhar void
204854e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp)
204954e4ee71SNavdeep Parhar {
2050fe2ebb76SJohn Baldwin 	struct vi_info *vi = ifp->if_softc;
2051fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
205254e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
20536eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
20546eb3180fSNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
20556eb3180fSNavdeep Parhar #endif
205654e4ee71SNavdeep Parhar 	struct sge_fl *fl;
205738035ed6SNavdeep Parhar 	int i, maxp, mtu = ifp->if_mtu;
205854e4ee71SNavdeep Parhar 
205938035ed6SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu, 0);
2060fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
206154e4ee71SNavdeep Parhar 		fl = &rxq->fl;
206254e4ee71SNavdeep Parhar 
206354e4ee71SNavdeep Parhar 		FL_LOCK(fl);
206438035ed6SNavdeep Parhar 		find_best_refill_source(sc, fl, maxp);
206554e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
206654e4ee71SNavdeep Parhar 	}
20676eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
206838035ed6SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu, 1);
2069fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
20706eb3180fSNavdeep Parhar 		fl = &ofld_rxq->fl;
20716eb3180fSNavdeep Parhar 
20726eb3180fSNavdeep Parhar 		FL_LOCK(fl);
207338035ed6SNavdeep Parhar 		find_best_refill_source(sc, fl, maxp);
20746eb3180fSNavdeep Parhar 		FL_UNLOCK(fl);
20756eb3180fSNavdeep Parhar 	}
20766eb3180fSNavdeep Parhar #endif
207754e4ee71SNavdeep Parhar }
207854e4ee71SNavdeep Parhar 
20797951040fSNavdeep Parhar static inline int
20807951040fSNavdeep Parhar mbuf_nsegs(struct mbuf *m)
2081733b9277SNavdeep Parhar {
20820835ddc7SNavdeep Parhar 
20837951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
20847951040fSNavdeep Parhar 	KASSERT(m->m_pkthdr.l5hlen > 0,
20857951040fSNavdeep Parhar 	    ("%s: mbuf %p missing information on # of segments.", __func__, m));
20867951040fSNavdeep Parhar 
20877951040fSNavdeep Parhar 	return (m->m_pkthdr.l5hlen);
20887951040fSNavdeep Parhar }
20897951040fSNavdeep Parhar 
20907951040fSNavdeep Parhar static inline void
20917951040fSNavdeep Parhar set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs)
20927951040fSNavdeep Parhar {
20937951040fSNavdeep Parhar 
20947951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
20957951040fSNavdeep Parhar 	m->m_pkthdr.l5hlen = nsegs;
20967951040fSNavdeep Parhar }
20977951040fSNavdeep Parhar 
20987951040fSNavdeep Parhar static inline int
20997951040fSNavdeep Parhar mbuf_len16(struct mbuf *m)
21007951040fSNavdeep Parhar {
21017951040fSNavdeep Parhar 	int n;
21027951040fSNavdeep Parhar 
21037951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21047951040fSNavdeep Parhar 	n = m->m_pkthdr.PH_loc.eight[0];
21057951040fSNavdeep Parhar 	MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
21067951040fSNavdeep Parhar 
21077951040fSNavdeep Parhar 	return (n);
21087951040fSNavdeep Parhar }
21097951040fSNavdeep Parhar 
21107951040fSNavdeep Parhar static inline void
21117951040fSNavdeep Parhar set_mbuf_len16(struct mbuf *m, uint8_t len16)
21127951040fSNavdeep Parhar {
21137951040fSNavdeep Parhar 
21147951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21157951040fSNavdeep Parhar 	m->m_pkthdr.PH_loc.eight[0] = len16;
21167951040fSNavdeep Parhar }
21177951040fSNavdeep Parhar 
21187951040fSNavdeep Parhar static inline int
21197951040fSNavdeep Parhar needs_tso(struct mbuf *m)
21207951040fSNavdeep Parhar {
21217951040fSNavdeep Parhar 
21227951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21237951040fSNavdeep Parhar 
21247951040fSNavdeep Parhar 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
21257951040fSNavdeep Parhar 		KASSERT(m->m_pkthdr.tso_segsz > 0,
21267951040fSNavdeep Parhar 		    ("%s: TSO requested in mbuf %p but MSS not provided",
21277951040fSNavdeep Parhar 		    __func__, m));
21287951040fSNavdeep Parhar 		return (1);
21297951040fSNavdeep Parhar 	}
21307951040fSNavdeep Parhar 
21317951040fSNavdeep Parhar 	return (0);
21327951040fSNavdeep Parhar }
21337951040fSNavdeep Parhar 
21347951040fSNavdeep Parhar static inline int
21357951040fSNavdeep Parhar needs_l3_csum(struct mbuf *m)
21367951040fSNavdeep Parhar {
21377951040fSNavdeep Parhar 
21387951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21397951040fSNavdeep Parhar 
21407951040fSNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
21417951040fSNavdeep Parhar 		return (1);
21427951040fSNavdeep Parhar 	return (0);
21437951040fSNavdeep Parhar }
21447951040fSNavdeep Parhar 
21457951040fSNavdeep Parhar static inline int
21467951040fSNavdeep Parhar needs_l4_csum(struct mbuf *m)
21477951040fSNavdeep Parhar {
21487951040fSNavdeep Parhar 
21497951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21507951040fSNavdeep Parhar 
21517951040fSNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
21527951040fSNavdeep Parhar 	    CSUM_TCP_IPV6 | CSUM_TSO))
21537951040fSNavdeep Parhar 		return (1);
21547951040fSNavdeep Parhar 	return (0);
21557951040fSNavdeep Parhar }
21567951040fSNavdeep Parhar 
21577951040fSNavdeep Parhar static inline int
21587951040fSNavdeep Parhar needs_vlan_insertion(struct mbuf *m)
21597951040fSNavdeep Parhar {
21607951040fSNavdeep Parhar 
21617951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21627951040fSNavdeep Parhar 
21637951040fSNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
21647951040fSNavdeep Parhar 		KASSERT(m->m_pkthdr.ether_vtag != 0,
21657951040fSNavdeep Parhar 		    ("%s: HWVLAN requested in mbuf %p but tag not provided",
21667951040fSNavdeep Parhar 		    __func__, m));
21677951040fSNavdeep Parhar 		return (1);
21687951040fSNavdeep Parhar 	}
21697951040fSNavdeep Parhar 	return (0);
21707951040fSNavdeep Parhar }
21717951040fSNavdeep Parhar 
21727951040fSNavdeep Parhar static void *
21737951040fSNavdeep Parhar m_advance(struct mbuf **pm, int *poffset, int len)
21747951040fSNavdeep Parhar {
21757951040fSNavdeep Parhar 	struct mbuf *m = *pm;
21767951040fSNavdeep Parhar 	int offset = *poffset;
21777951040fSNavdeep Parhar 	uintptr_t p = 0;
21787951040fSNavdeep Parhar 
21797951040fSNavdeep Parhar 	MPASS(len > 0);
21807951040fSNavdeep Parhar 
2181e06ab612SJohn Baldwin 	for (;;) {
21827951040fSNavdeep Parhar 		if (offset + len < m->m_len) {
21837951040fSNavdeep Parhar 			offset += len;
21847951040fSNavdeep Parhar 			p = mtod(m, uintptr_t) + offset;
21857951040fSNavdeep Parhar 			break;
21867951040fSNavdeep Parhar 		}
21877951040fSNavdeep Parhar 		len -= m->m_len - offset;
21887951040fSNavdeep Parhar 		m = m->m_next;
21897951040fSNavdeep Parhar 		offset = 0;
21907951040fSNavdeep Parhar 		MPASS(m != NULL);
21917951040fSNavdeep Parhar 	}
21927951040fSNavdeep Parhar 	*poffset = offset;
21937951040fSNavdeep Parhar 	*pm = m;
21947951040fSNavdeep Parhar 	return ((void *)p);
21957951040fSNavdeep Parhar }
21967951040fSNavdeep Parhar 
21977951040fSNavdeep Parhar /*
21987951040fSNavdeep Parhar  * Can deal with empty mbufs in the chain that have m_len = 0, but the chain
21997951040fSNavdeep Parhar  * must have at least one mbuf that's not empty.
22007951040fSNavdeep Parhar  */
22017951040fSNavdeep Parhar static inline int
22027951040fSNavdeep Parhar count_mbuf_nsegs(struct mbuf *m)
22037951040fSNavdeep Parhar {
220477e9044cSNavdeep Parhar 	vm_paddr_t lastb, next;
220577e9044cSNavdeep Parhar 	vm_offset_t va;
22067951040fSNavdeep Parhar 	int len, nsegs;
22077951040fSNavdeep Parhar 
22087951040fSNavdeep Parhar 	MPASS(m != NULL);
22097951040fSNavdeep Parhar 
22107951040fSNavdeep Parhar 	nsegs = 0;
221177e9044cSNavdeep Parhar 	lastb = 0;
22127951040fSNavdeep Parhar 	for (; m; m = m->m_next) {
22137951040fSNavdeep Parhar 
22147951040fSNavdeep Parhar 		len = m->m_len;
22157951040fSNavdeep Parhar 		if (__predict_false(len == 0))
22167951040fSNavdeep Parhar 			continue;
221777e9044cSNavdeep Parhar 		va = mtod(m, vm_offset_t);
221877e9044cSNavdeep Parhar 		next = pmap_kextract(va);
221977e9044cSNavdeep Parhar 		nsegs += sglist_count(m->m_data, len);
222077e9044cSNavdeep Parhar 		if (lastb + 1 == next)
22217951040fSNavdeep Parhar 			nsegs--;
222277e9044cSNavdeep Parhar 		lastb = pmap_kextract(va + len - 1);
22237951040fSNavdeep Parhar 	}
22247951040fSNavdeep Parhar 
22257951040fSNavdeep Parhar 	MPASS(nsegs > 0);
22267951040fSNavdeep Parhar 	return (nsegs);
22277951040fSNavdeep Parhar }
22287951040fSNavdeep Parhar 
22297951040fSNavdeep Parhar /*
22307951040fSNavdeep Parhar  * Analyze the mbuf to determine its tx needs.  The mbuf passed in may change:
22317951040fSNavdeep Parhar  * a) caller can assume it's been freed if this function returns with an error.
22327951040fSNavdeep Parhar  * b) it may get defragged up if the gather list is too long for the hardware.
22337951040fSNavdeep Parhar  */
22347951040fSNavdeep Parhar int
22356af45170SJohn Baldwin parse_pkt(struct adapter *sc, struct mbuf **mp)
22367951040fSNavdeep Parhar {
22377951040fSNavdeep Parhar 	struct mbuf *m0 = *mp, *m;
22387951040fSNavdeep Parhar 	int rc, nsegs, defragged = 0, offset;
22397951040fSNavdeep Parhar 	struct ether_header *eh;
22407951040fSNavdeep Parhar 	void *l3hdr;
22417951040fSNavdeep Parhar #if defined(INET) || defined(INET6)
22427951040fSNavdeep Parhar 	struct tcphdr *tcp;
22437951040fSNavdeep Parhar #endif
22447951040fSNavdeep Parhar 	uint16_t eh_type;
22457951040fSNavdeep Parhar 
22467951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
22477951040fSNavdeep Parhar 	if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
22487951040fSNavdeep Parhar 		rc = EINVAL;
22497951040fSNavdeep Parhar fail:
22507951040fSNavdeep Parhar 		m_freem(m0);
22517951040fSNavdeep Parhar 		*mp = NULL;
22527951040fSNavdeep Parhar 		return (rc);
22537951040fSNavdeep Parhar 	}
22547951040fSNavdeep Parhar restart:
22557951040fSNavdeep Parhar 	/*
22567951040fSNavdeep Parhar 	 * First count the number of gather list segments in the payload.
22577951040fSNavdeep Parhar 	 * Defrag the mbuf if nsegs exceeds the hardware limit.
22587951040fSNavdeep Parhar 	 */
22597951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
22607951040fSNavdeep Parhar 	MPASS(m0->m_pkthdr.len > 0);
22617951040fSNavdeep Parhar 	nsegs = count_mbuf_nsegs(m0);
22627951040fSNavdeep Parhar 	if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) {
22637951040fSNavdeep Parhar 		if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) {
22647951040fSNavdeep Parhar 			rc = EFBIG;
22657951040fSNavdeep Parhar 			goto fail;
22667951040fSNavdeep Parhar 		}
22677951040fSNavdeep Parhar 		*mp = m0 = m;	/* update caller's copy after defrag */
22687951040fSNavdeep Parhar 		goto restart;
22697951040fSNavdeep Parhar 	}
22707951040fSNavdeep Parhar 
22717951040fSNavdeep Parhar 	if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) {
22727951040fSNavdeep Parhar 		m0 = m_pullup(m0, m0->m_pkthdr.len);
22737951040fSNavdeep Parhar 		if (m0 == NULL) {
22747951040fSNavdeep Parhar 			/* Should have left well enough alone. */
22757951040fSNavdeep Parhar 			rc = EFBIG;
22767951040fSNavdeep Parhar 			goto fail;
22777951040fSNavdeep Parhar 		}
22787951040fSNavdeep Parhar 		*mp = m0;	/* update caller's copy after pullup */
22797951040fSNavdeep Parhar 		goto restart;
22807951040fSNavdeep Parhar 	}
22817951040fSNavdeep Parhar 	set_mbuf_nsegs(m0, nsegs);
22826af45170SJohn Baldwin 	if (sc->flags & IS_VF)
22836af45170SJohn Baldwin 		set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0)));
22846af45170SJohn Baldwin 	else
22857951040fSNavdeep Parhar 		set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0)));
22867951040fSNavdeep Parhar 
22876af45170SJohn Baldwin 	if (!needs_tso(m0) &&
22886af45170SJohn Baldwin 	    !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0))))
22897951040fSNavdeep Parhar 		return (0);
22907951040fSNavdeep Parhar 
22917951040fSNavdeep Parhar 	m = m0;
22927951040fSNavdeep Parhar 	eh = mtod(m, struct ether_header *);
22937951040fSNavdeep Parhar 	eh_type = ntohs(eh->ether_type);
22947951040fSNavdeep Parhar 	if (eh_type == ETHERTYPE_VLAN) {
22957951040fSNavdeep Parhar 		struct ether_vlan_header *evh = (void *)eh;
22967951040fSNavdeep Parhar 
22977951040fSNavdeep Parhar 		eh_type = ntohs(evh->evl_proto);
22987951040fSNavdeep Parhar 		m0->m_pkthdr.l2hlen = sizeof(*evh);
22997951040fSNavdeep Parhar 	} else
23007951040fSNavdeep Parhar 		m0->m_pkthdr.l2hlen = sizeof(*eh);
23017951040fSNavdeep Parhar 
23027951040fSNavdeep Parhar 	offset = 0;
23037951040fSNavdeep Parhar 	l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
23047951040fSNavdeep Parhar 
23057951040fSNavdeep Parhar 	switch (eh_type) {
23067951040fSNavdeep Parhar #ifdef INET6
23077951040fSNavdeep Parhar 	case ETHERTYPE_IPV6:
23087951040fSNavdeep Parhar 	{
23097951040fSNavdeep Parhar 		struct ip6_hdr *ip6 = l3hdr;
23107951040fSNavdeep Parhar 
23116af45170SJohn Baldwin 		MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP);
23127951040fSNavdeep Parhar 
23137951040fSNavdeep Parhar 		m0->m_pkthdr.l3hlen = sizeof(*ip6);
23147951040fSNavdeep Parhar 		break;
23157951040fSNavdeep Parhar 	}
23167951040fSNavdeep Parhar #endif
23177951040fSNavdeep Parhar #ifdef INET
23187951040fSNavdeep Parhar 	case ETHERTYPE_IP:
23197951040fSNavdeep Parhar 	{
23207951040fSNavdeep Parhar 		struct ip *ip = l3hdr;
23217951040fSNavdeep Parhar 
23227951040fSNavdeep Parhar 		m0->m_pkthdr.l3hlen = ip->ip_hl * 4;
23237951040fSNavdeep Parhar 		break;
23247951040fSNavdeep Parhar 	}
23257951040fSNavdeep Parhar #endif
23267951040fSNavdeep Parhar 	default:
23277951040fSNavdeep Parhar 		panic("%s: ethertype 0x%04x unknown.  if_cxgbe must be compiled"
23287951040fSNavdeep Parhar 		    " with the same INET/INET6 options as the kernel.",
23297951040fSNavdeep Parhar 		    __func__, eh_type);
23307951040fSNavdeep Parhar 	}
23317951040fSNavdeep Parhar 
23327951040fSNavdeep Parhar #if defined(INET) || defined(INET6)
23336af45170SJohn Baldwin 	if (needs_tso(m0)) {
23347951040fSNavdeep Parhar 		tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
23357951040fSNavdeep Parhar 		m0->m_pkthdr.l4hlen = tcp->th_off * 4;
23366af45170SJohn Baldwin 	}
23377951040fSNavdeep Parhar #endif
23387951040fSNavdeep Parhar 	MPASS(m0 == *mp);
23397951040fSNavdeep Parhar 	return (0);
23407951040fSNavdeep Parhar }
23417951040fSNavdeep Parhar 
23427951040fSNavdeep Parhar void *
23437951040fSNavdeep Parhar start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
23447951040fSNavdeep Parhar {
23457951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
23467951040fSNavdeep Parhar 	struct adapter *sc = wrq->adapter;
23477951040fSNavdeep Parhar 	int ndesc, available;
23487951040fSNavdeep Parhar 	struct wrqe *wr;
23497951040fSNavdeep Parhar 	void *w;
23507951040fSNavdeep Parhar 
23517951040fSNavdeep Parhar 	MPASS(len16 > 0);
23527951040fSNavdeep Parhar 	ndesc = howmany(len16, EQ_ESIZE / 16);
23537951040fSNavdeep Parhar 	MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
23547951040fSNavdeep Parhar 
23557951040fSNavdeep Parhar 	EQ_LOCK(eq);
23567951040fSNavdeep Parhar 
23577951040fSNavdeep Parhar 	if (!STAILQ_EMPTY(&wrq->wr_list))
23587951040fSNavdeep Parhar 		drain_wrq_wr_list(sc, wrq);
23597951040fSNavdeep Parhar 
23607951040fSNavdeep Parhar 	if (!STAILQ_EMPTY(&wrq->wr_list)) {
23617951040fSNavdeep Parhar slowpath:
23627951040fSNavdeep Parhar 		EQ_UNLOCK(eq);
23637951040fSNavdeep Parhar 		wr = alloc_wrqe(len16 * 16, wrq);
23647951040fSNavdeep Parhar 		if (__predict_false(wr == NULL))
23657951040fSNavdeep Parhar 			return (NULL);
23667951040fSNavdeep Parhar 		cookie->pidx = -1;
23677951040fSNavdeep Parhar 		cookie->ndesc = ndesc;
23687951040fSNavdeep Parhar 		return (&wr->wr);
23697951040fSNavdeep Parhar 	}
23707951040fSNavdeep Parhar 
23717951040fSNavdeep Parhar 	eq->cidx = read_hw_cidx(eq);
23727951040fSNavdeep Parhar 	if (eq->pidx == eq->cidx)
23737951040fSNavdeep Parhar 		available = eq->sidx - 1;
23747951040fSNavdeep Parhar 	else
23757951040fSNavdeep Parhar 		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
23767951040fSNavdeep Parhar 	if (available < ndesc)
23777951040fSNavdeep Parhar 		goto slowpath;
23787951040fSNavdeep Parhar 
23797951040fSNavdeep Parhar 	cookie->pidx = eq->pidx;
23807951040fSNavdeep Parhar 	cookie->ndesc = ndesc;
23817951040fSNavdeep Parhar 	TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
23827951040fSNavdeep Parhar 
23837951040fSNavdeep Parhar 	w = &eq->desc[eq->pidx];
23847951040fSNavdeep Parhar 	IDXINCR(eq->pidx, ndesc, eq->sidx);
2385f50c49ccSNavdeep Parhar 	if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
23867951040fSNavdeep Parhar 		w = &wrq->ss[0];
23877951040fSNavdeep Parhar 		wrq->ss_pidx = cookie->pidx;
23887951040fSNavdeep Parhar 		wrq->ss_len = len16 * 16;
23897951040fSNavdeep Parhar 	}
23907951040fSNavdeep Parhar 
23917951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
23927951040fSNavdeep Parhar 
23937951040fSNavdeep Parhar 	return (w);
23947951040fSNavdeep Parhar }
23957951040fSNavdeep Parhar 
23967951040fSNavdeep Parhar void
23977951040fSNavdeep Parhar commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
23987951040fSNavdeep Parhar {
23997951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
24007951040fSNavdeep Parhar 	struct adapter *sc = wrq->adapter;
24017951040fSNavdeep Parhar 	int ndesc, pidx;
24027951040fSNavdeep Parhar 	struct wrq_cookie *prev, *next;
24037951040fSNavdeep Parhar 
24047951040fSNavdeep Parhar 	if (cookie->pidx == -1) {
24057951040fSNavdeep Parhar 		struct wrqe *wr = __containerof(w, struct wrqe, wr);
24067951040fSNavdeep Parhar 
24077951040fSNavdeep Parhar 		t4_wrq_tx(sc, wr);
24087951040fSNavdeep Parhar 		return;
24097951040fSNavdeep Parhar 	}
24107951040fSNavdeep Parhar 
24117951040fSNavdeep Parhar 	ndesc = cookie->ndesc;	/* Can be more than SGE_MAX_WR_NDESC here. */
24127951040fSNavdeep Parhar 	pidx = cookie->pidx;
24137951040fSNavdeep Parhar 	MPASS(pidx >= 0 && pidx < eq->sidx);
24147951040fSNavdeep Parhar 	if (__predict_false(w == &wrq->ss[0])) {
24157951040fSNavdeep Parhar 		int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
24167951040fSNavdeep Parhar 
24177951040fSNavdeep Parhar 		MPASS(wrq->ss_len > n);	/* WR had better wrap around. */
24187951040fSNavdeep Parhar 		bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
24197951040fSNavdeep Parhar 		bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
24207951040fSNavdeep Parhar 		wrq->tx_wrs_ss++;
24217951040fSNavdeep Parhar 	} else
24227951040fSNavdeep Parhar 		wrq->tx_wrs_direct++;
24237951040fSNavdeep Parhar 
24247951040fSNavdeep Parhar 	EQ_LOCK(eq);
24257951040fSNavdeep Parhar 	prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
24267951040fSNavdeep Parhar 	next = TAILQ_NEXT(cookie, link);
24277951040fSNavdeep Parhar 	if (prev == NULL) {
24287951040fSNavdeep Parhar 		MPASS(pidx == eq->dbidx);
24297951040fSNavdeep Parhar 		if (next == NULL || ndesc >= 16)
24307951040fSNavdeep Parhar 			ring_eq_db(wrq->adapter, eq, ndesc);
24317951040fSNavdeep Parhar 		else {
24327951040fSNavdeep Parhar 			MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
24337951040fSNavdeep Parhar 			next->pidx = pidx;
24347951040fSNavdeep Parhar 			next->ndesc += ndesc;
24357951040fSNavdeep Parhar 		}
24367951040fSNavdeep Parhar 	} else {
24377951040fSNavdeep Parhar 		MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
24387951040fSNavdeep Parhar 		prev->ndesc += ndesc;
24397951040fSNavdeep Parhar 	}
24407951040fSNavdeep Parhar 	TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
24417951040fSNavdeep Parhar 
24427951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
24437951040fSNavdeep Parhar 		drain_wrq_wr_list(sc, wrq);
24447951040fSNavdeep Parhar 
24457951040fSNavdeep Parhar #ifdef INVARIANTS
24467951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
24477951040fSNavdeep Parhar 		/* Doorbell must have caught up to the pidx. */
24487951040fSNavdeep Parhar 		MPASS(wrq->eq.pidx == wrq->eq.dbidx);
24497951040fSNavdeep Parhar 	}
24507951040fSNavdeep Parhar #endif
24517951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
24527951040fSNavdeep Parhar }
24537951040fSNavdeep Parhar 
24547951040fSNavdeep Parhar static u_int
24557951040fSNavdeep Parhar can_resume_eth_tx(struct mp_ring *r)
24567951040fSNavdeep Parhar {
24577951040fSNavdeep Parhar 	struct sge_eq *eq = r->cookie;
24587951040fSNavdeep Parhar 
24597951040fSNavdeep Parhar 	return (total_available_tx_desc(eq) > eq->sidx / 8);
24607951040fSNavdeep Parhar }
24617951040fSNavdeep Parhar 
24627951040fSNavdeep Parhar static inline int
24637951040fSNavdeep Parhar cannot_use_txpkts(struct mbuf *m)
24647951040fSNavdeep Parhar {
24657951040fSNavdeep Parhar 	/* maybe put a GL limit too, to avoid silliness? */
24667951040fSNavdeep Parhar 
24677951040fSNavdeep Parhar 	return (needs_tso(m));
24687951040fSNavdeep Parhar }
24697951040fSNavdeep Parhar 
24701404daa7SNavdeep Parhar static inline int
24711404daa7SNavdeep Parhar discard_tx(struct sge_eq *eq)
24721404daa7SNavdeep Parhar {
24731404daa7SNavdeep Parhar 
24741404daa7SNavdeep Parhar 	return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
24751404daa7SNavdeep Parhar }
24761404daa7SNavdeep Parhar 
24777951040fSNavdeep Parhar /*
24787951040fSNavdeep Parhar  * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
24797951040fSNavdeep Parhar  * be consumed.  Return the actual number consumed.  0 indicates a stall.
24807951040fSNavdeep Parhar  */
24817951040fSNavdeep Parhar static u_int
24827951040fSNavdeep Parhar eth_tx(struct mp_ring *r, u_int cidx, u_int pidx)
24837951040fSNavdeep Parhar {
24847951040fSNavdeep Parhar 	struct sge_txq *txq = r->cookie;
24857951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
24867951040fSNavdeep Parhar 	struct ifnet *ifp = txq->ifp;
2487fe2ebb76SJohn Baldwin 	struct vi_info *vi = ifp->if_softc;
2488fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
24897951040fSNavdeep Parhar 	struct adapter *sc = pi->adapter;
24907951040fSNavdeep Parhar 	u_int total, remaining;		/* # of packets */
24917951040fSNavdeep Parhar 	u_int available, dbdiff;	/* # of hardware descriptors */
24927951040fSNavdeep Parhar 	u_int n, next_cidx;
24937951040fSNavdeep Parhar 	struct mbuf *m0, *tail;
24947951040fSNavdeep Parhar 	struct txpkts txp;
24957951040fSNavdeep Parhar 	struct fw_eth_tx_pkts_wr *wr;	/* any fw WR struct will do */
24967951040fSNavdeep Parhar 
24977951040fSNavdeep Parhar 	remaining = IDXDIFF(pidx, cidx, r->size);
24987951040fSNavdeep Parhar 	MPASS(remaining > 0);	/* Must not be called without work to do. */
24997951040fSNavdeep Parhar 	total = 0;
25007951040fSNavdeep Parhar 
25017951040fSNavdeep Parhar 	TXQ_LOCK(txq);
25021404daa7SNavdeep Parhar 	if (__predict_false(discard_tx(eq))) {
25037951040fSNavdeep Parhar 		while (cidx != pidx) {
25047951040fSNavdeep Parhar 			m0 = r->items[cidx];
25057951040fSNavdeep Parhar 			m_freem(m0);
25067951040fSNavdeep Parhar 			if (++cidx == r->size)
25077951040fSNavdeep Parhar 				cidx = 0;
25087951040fSNavdeep Parhar 		}
25097951040fSNavdeep Parhar 		reclaim_tx_descs(txq, 2048);
25107951040fSNavdeep Parhar 		total = remaining;
25117951040fSNavdeep Parhar 		goto done;
25127951040fSNavdeep Parhar 	}
25137951040fSNavdeep Parhar 
25147951040fSNavdeep Parhar 	/* How many hardware descriptors do we have readily available. */
25157951040fSNavdeep Parhar 	if (eq->pidx == eq->cidx)
25167951040fSNavdeep Parhar 		available = eq->sidx - 1;
25177951040fSNavdeep Parhar 	else
25187951040fSNavdeep Parhar 		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
25197951040fSNavdeep Parhar 	dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx);
25207951040fSNavdeep Parhar 
25217951040fSNavdeep Parhar 	while (remaining > 0) {
25227951040fSNavdeep Parhar 
25237951040fSNavdeep Parhar 		m0 = r->items[cidx];
25247951040fSNavdeep Parhar 		M_ASSERTPKTHDR(m0);
25257951040fSNavdeep Parhar 		MPASS(m0->m_nextpkt == NULL);
25267951040fSNavdeep Parhar 
25277951040fSNavdeep Parhar 		if (available < SGE_MAX_WR_NDESC) {
25287951040fSNavdeep Parhar 			available += reclaim_tx_descs(txq, 64);
25297951040fSNavdeep Parhar 			if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16))
25307951040fSNavdeep Parhar 				break;	/* out of descriptors */
25317951040fSNavdeep Parhar 		}
25327951040fSNavdeep Parhar 
25337951040fSNavdeep Parhar 		next_cidx = cidx + 1;
25347951040fSNavdeep Parhar 		if (__predict_false(next_cidx == r->size))
25357951040fSNavdeep Parhar 			next_cidx = 0;
25367951040fSNavdeep Parhar 
25377951040fSNavdeep Parhar 		wr = (void *)&eq->desc[eq->pidx];
25386af45170SJohn Baldwin 		if (sc->flags & IS_VF) {
25396af45170SJohn Baldwin 			total++;
25406af45170SJohn Baldwin 			remaining--;
25416af45170SJohn Baldwin 			ETHER_BPF_MTAP(ifp, m0);
2542472a6004SNavdeep Parhar 			n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0,
2543472a6004SNavdeep Parhar 			    available);
25446af45170SJohn Baldwin 		} else if (remaining > 1 &&
25457951040fSNavdeep Parhar 		    try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) {
25467951040fSNavdeep Parhar 
25477951040fSNavdeep Parhar 			/* pkts at cidx, next_cidx should both be in txp. */
25487951040fSNavdeep Parhar 			MPASS(txp.npkt == 2);
25497951040fSNavdeep Parhar 			tail = r->items[next_cidx];
25507951040fSNavdeep Parhar 			MPASS(tail->m_nextpkt == NULL);
25517951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, m0);
25527951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, tail);
25537951040fSNavdeep Parhar 			m0->m_nextpkt = tail;
25547951040fSNavdeep Parhar 
25557951040fSNavdeep Parhar 			if (__predict_false(++next_cidx == r->size))
25567951040fSNavdeep Parhar 				next_cidx = 0;
25577951040fSNavdeep Parhar 
25587951040fSNavdeep Parhar 			while (next_cidx != pidx) {
25597951040fSNavdeep Parhar 				if (add_to_txpkts(r->items[next_cidx], &txp,
25607951040fSNavdeep Parhar 				    available) != 0)
25617951040fSNavdeep Parhar 					break;
25627951040fSNavdeep Parhar 				tail->m_nextpkt = r->items[next_cidx];
25637951040fSNavdeep Parhar 				tail = tail->m_nextpkt;
25647951040fSNavdeep Parhar 				ETHER_BPF_MTAP(ifp, tail);
25657951040fSNavdeep Parhar 				if (__predict_false(++next_cidx == r->size))
25667951040fSNavdeep Parhar 					next_cidx = 0;
25677951040fSNavdeep Parhar 			}
25687951040fSNavdeep Parhar 
25697951040fSNavdeep Parhar 			n = write_txpkts_wr(txq, wr, m0, &txp, available);
25707951040fSNavdeep Parhar 			total += txp.npkt;
25717951040fSNavdeep Parhar 			remaining -= txp.npkt;
25727951040fSNavdeep Parhar 		} else {
25737951040fSNavdeep Parhar 			total++;
25747951040fSNavdeep Parhar 			remaining--;
25757951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, m0);
257678552b23SNavdeep Parhar 			n = write_txpkt_wr(txq, (void *)wr, m0, available);
25777951040fSNavdeep Parhar 		}
25787951040fSNavdeep Parhar 		MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC);
25797951040fSNavdeep Parhar 
25807951040fSNavdeep Parhar 		available -= n;
25817951040fSNavdeep Parhar 		dbdiff += n;
25827951040fSNavdeep Parhar 		IDXINCR(eq->pidx, n, eq->sidx);
25837951040fSNavdeep Parhar 
25847951040fSNavdeep Parhar 		if (total_available_tx_desc(eq) < eq->sidx / 4 &&
25857951040fSNavdeep Parhar 		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
25867951040fSNavdeep Parhar 			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
25877951040fSNavdeep Parhar 			    F_FW_WR_EQUEQ);
25887951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
25897951040fSNavdeep Parhar 		} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
25907951040fSNavdeep Parhar 			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
25917951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
25927951040fSNavdeep Parhar 		}
25937951040fSNavdeep Parhar 
25947951040fSNavdeep Parhar 		if (dbdiff >= 16 && remaining >= 4) {
25957951040fSNavdeep Parhar 			ring_eq_db(sc, eq, dbdiff);
25967951040fSNavdeep Parhar 			available += reclaim_tx_descs(txq, 4 * dbdiff);
25977951040fSNavdeep Parhar 			dbdiff = 0;
25987951040fSNavdeep Parhar 		}
25997951040fSNavdeep Parhar 
26007951040fSNavdeep Parhar 		cidx = next_cidx;
26017951040fSNavdeep Parhar 	}
26027951040fSNavdeep Parhar 	if (dbdiff != 0) {
26037951040fSNavdeep Parhar 		ring_eq_db(sc, eq, dbdiff);
26047951040fSNavdeep Parhar 		reclaim_tx_descs(txq, 32);
26057951040fSNavdeep Parhar 	}
26067951040fSNavdeep Parhar done:
26077951040fSNavdeep Parhar 	TXQ_UNLOCK(txq);
26087951040fSNavdeep Parhar 
26097951040fSNavdeep Parhar 	return (total);
2610733b9277SNavdeep Parhar }
2611733b9277SNavdeep Parhar 
261254e4ee71SNavdeep Parhar static inline void
261354e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
2614b2daa9a9SNavdeep Parhar     int qsize)
261554e4ee71SNavdeep Parhar {
2616b2daa9a9SNavdeep Parhar 
261754e4ee71SNavdeep Parhar 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
261854e4ee71SNavdeep Parhar 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
261954e4ee71SNavdeep Parhar 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
262054e4ee71SNavdeep Parhar 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
262154e4ee71SNavdeep Parhar 
262254e4ee71SNavdeep Parhar 	iq->flags = 0;
262354e4ee71SNavdeep Parhar 	iq->adapter = sc;
26247a32954cSNavdeep Parhar 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
26257a32954cSNavdeep Parhar 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
26267a32954cSNavdeep Parhar 	if (pktc_idx >= 0) {
26277a32954cSNavdeep Parhar 		iq->intr_params |= F_QINTR_CNT_EN;
262854e4ee71SNavdeep Parhar 		iq->intr_pktc_idx = pktc_idx;
26297a32954cSNavdeep Parhar 	}
2630d14b0ac1SNavdeep Parhar 	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
263190e7434aSNavdeep Parhar 	iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
263254e4ee71SNavdeep Parhar }
263354e4ee71SNavdeep Parhar 
263454e4ee71SNavdeep Parhar static inline void
2635e3207e19SNavdeep Parhar init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
263654e4ee71SNavdeep Parhar {
26371458bff9SNavdeep Parhar 
263854e4ee71SNavdeep Parhar 	fl->qsize = qsize;
263990e7434aSNavdeep Parhar 	fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
264054e4ee71SNavdeep Parhar 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
2641e3207e19SNavdeep Parhar 	if (sc->flags & BUF_PACKING_OK &&
2642e3207e19SNavdeep Parhar 	    ((!is_t4(sc) && buffer_packing) ||	/* T5+: enabled unless 0 */
2643e3207e19SNavdeep Parhar 	    (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
26441458bff9SNavdeep Parhar 		fl->flags |= FL_BUF_PACKING;
264538035ed6SNavdeep Parhar 	find_best_refill_source(sc, fl, maxp);
264638035ed6SNavdeep Parhar 	find_safe_refill_source(sc, fl);
264754e4ee71SNavdeep Parhar }
264854e4ee71SNavdeep Parhar 
264954e4ee71SNavdeep Parhar static inline void
265090e7434aSNavdeep Parhar init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
265190e7434aSNavdeep Parhar     uint8_t tx_chan, uint16_t iqid, char *name)
265254e4ee71SNavdeep Parhar {
2653733b9277SNavdeep Parhar 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
2654733b9277SNavdeep Parhar 
2655733b9277SNavdeep Parhar 	eq->flags = eqtype & EQ_TYPEMASK;
2656733b9277SNavdeep Parhar 	eq->tx_chan = tx_chan;
2657733b9277SNavdeep Parhar 	eq->iqid = iqid;
265890e7434aSNavdeep Parhar 	eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
2659f7dfe243SNavdeep Parhar 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
266054e4ee71SNavdeep Parhar }
266154e4ee71SNavdeep Parhar 
266254e4ee71SNavdeep Parhar static int
266354e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
266454e4ee71SNavdeep Parhar     bus_dmamap_t *map, bus_addr_t *pa, void **va)
266554e4ee71SNavdeep Parhar {
266654e4ee71SNavdeep Parhar 	int rc;
266754e4ee71SNavdeep Parhar 
266854e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
266954e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
267054e4ee71SNavdeep Parhar 	if (rc != 0) {
267154e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
267254e4ee71SNavdeep Parhar 		goto done;
267354e4ee71SNavdeep Parhar 	}
267454e4ee71SNavdeep Parhar 
267554e4ee71SNavdeep Parhar 	rc = bus_dmamem_alloc(*tag, va,
267654e4ee71SNavdeep Parhar 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
267754e4ee71SNavdeep Parhar 	if (rc != 0) {
267854e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
267954e4ee71SNavdeep Parhar 		goto done;
268054e4ee71SNavdeep Parhar 	}
268154e4ee71SNavdeep Parhar 
268254e4ee71SNavdeep Parhar 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
268354e4ee71SNavdeep Parhar 	if (rc != 0) {
268454e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
268554e4ee71SNavdeep Parhar 		goto done;
268654e4ee71SNavdeep Parhar 	}
268754e4ee71SNavdeep Parhar done:
268854e4ee71SNavdeep Parhar 	if (rc)
268954e4ee71SNavdeep Parhar 		free_ring(sc, *tag, *map, *pa, *va);
269054e4ee71SNavdeep Parhar 
269154e4ee71SNavdeep Parhar 	return (rc);
269254e4ee71SNavdeep Parhar }
269354e4ee71SNavdeep Parhar 
269454e4ee71SNavdeep Parhar static int
269554e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
269654e4ee71SNavdeep Parhar     bus_addr_t pa, void *va)
269754e4ee71SNavdeep Parhar {
269854e4ee71SNavdeep Parhar 	if (pa)
269954e4ee71SNavdeep Parhar 		bus_dmamap_unload(tag, map);
270054e4ee71SNavdeep Parhar 	if (va)
270154e4ee71SNavdeep Parhar 		bus_dmamem_free(tag, va, map);
270254e4ee71SNavdeep Parhar 	if (tag)
270354e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(tag);
270454e4ee71SNavdeep Parhar 
270554e4ee71SNavdeep Parhar 	return (0);
270654e4ee71SNavdeep Parhar }
270754e4ee71SNavdeep Parhar 
270854e4ee71SNavdeep Parhar /*
270954e4ee71SNavdeep Parhar  * Allocates the ring for an ingress queue and an optional freelist.  If the
271054e4ee71SNavdeep Parhar  * freelist is specified it will be allocated and then associated with the
271154e4ee71SNavdeep Parhar  * ingress queue.
271254e4ee71SNavdeep Parhar  *
271354e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
271454e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
271554e4ee71SNavdeep Parhar  *
2716733b9277SNavdeep Parhar  * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
271754e4ee71SNavdeep Parhar  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
2718733b9277SNavdeep Parhar  * the abs_id of the ingress queue to which its interrupts should be forwarded.
271954e4ee71SNavdeep Parhar  */
272054e4ee71SNavdeep Parhar static int
2721fe2ebb76SJohn Baldwin alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
2722bc14b14dSNavdeep Parhar     int intr_idx, int cong)
272354e4ee71SNavdeep Parhar {
272454e4ee71SNavdeep Parhar 	int rc, i, cntxt_id;
272554e4ee71SNavdeep Parhar 	size_t len;
272654e4ee71SNavdeep Parhar 	struct fw_iq_cmd c;
2727fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
272854e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
272990e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
273054e4ee71SNavdeep Parhar 	__be32 v = 0;
273154e4ee71SNavdeep Parhar 
2732b2daa9a9SNavdeep Parhar 	len = iq->qsize * IQ_ESIZE;
273354e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
273454e4ee71SNavdeep Parhar 	    (void **)&iq->desc);
273554e4ee71SNavdeep Parhar 	if (rc != 0)
273654e4ee71SNavdeep Parhar 		return (rc);
273754e4ee71SNavdeep Parhar 
273854e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
273954e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
274054e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
274154e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VFN(0));
274254e4ee71SNavdeep Parhar 
274354e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
274454e4ee71SNavdeep Parhar 	    FW_LEN16(c));
274554e4ee71SNavdeep Parhar 
274654e4ee71SNavdeep Parhar 	/* Special handling for firmware event queue */
274754e4ee71SNavdeep Parhar 	if (iq == &sc->sge.fwq)
274854e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQASYNCH;
274954e4ee71SNavdeep Parhar 
2750733b9277SNavdeep Parhar 	if (iq->flags & IQ_INTR) {
275154e4ee71SNavdeep Parhar 		KASSERT(intr_idx < sc->intr_count,
275254e4ee71SNavdeep Parhar 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
2753733b9277SNavdeep Parhar 	} else
2754733b9277SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQANDST;
275554e4ee71SNavdeep Parhar 	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
275654e4ee71SNavdeep Parhar 
275754e4ee71SNavdeep Parhar 	c.type_to_iqandstindex = htobe32(v |
275854e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2759fe2ebb76SJohn Baldwin 	    V_FW_IQ_CMD_VIID(vi->viid) |
276054e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
276154e4ee71SNavdeep Parhar 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
276254e4ee71SNavdeep Parhar 	    F_FW_IQ_CMD_IQGTSMODE |
276354e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
2764b2daa9a9SNavdeep Parhar 	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
276554e4ee71SNavdeep Parhar 	c.iqsize = htobe16(iq->qsize);
276654e4ee71SNavdeep Parhar 	c.iqaddr = htobe64(iq->ba);
2767bc14b14dSNavdeep Parhar 	if (cong >= 0)
2768bc14b14dSNavdeep Parhar 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
276954e4ee71SNavdeep Parhar 
277054e4ee71SNavdeep Parhar 	if (fl) {
277154e4ee71SNavdeep Parhar 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
277254e4ee71SNavdeep Parhar 
2773b2daa9a9SNavdeep Parhar 		len = fl->qsize * EQ_ESIZE;
277454e4ee71SNavdeep Parhar 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
277554e4ee71SNavdeep Parhar 		    &fl->ba, (void **)&fl->desc);
277654e4ee71SNavdeep Parhar 		if (rc)
277754e4ee71SNavdeep Parhar 			return (rc);
277854e4ee71SNavdeep Parhar 
277954e4ee71SNavdeep Parhar 		/* Allocate space for one software descriptor per buffer. */
278054e4ee71SNavdeep Parhar 		rc = alloc_fl_sdesc(fl);
278154e4ee71SNavdeep Parhar 		if (rc != 0) {
278254e4ee71SNavdeep Parhar 			device_printf(sc->dev,
278354e4ee71SNavdeep Parhar 			    "failed to setup fl software descriptors: %d\n",
278454e4ee71SNavdeep Parhar 			    rc);
278554e4ee71SNavdeep Parhar 			return (rc);
278654e4ee71SNavdeep Parhar 		}
27874d6db4e0SNavdeep Parhar 
27884d6db4e0SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
278990e7434aSNavdeep Parhar 			fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
279090e7434aSNavdeep Parhar 			fl->buf_boundary = sp->pack_boundary;
27914d6db4e0SNavdeep Parhar 		} else {
279290e7434aSNavdeep Parhar 			fl->lowat = roundup2(sp->fl_starve_threshold, 8);
2793e3207e19SNavdeep Parhar 			fl->buf_boundary = 16;
27944d6db4e0SNavdeep Parhar 		}
279590e7434aSNavdeep Parhar 		if (fl_pad && fl->buf_boundary < sp->pad_boundary)
279690e7434aSNavdeep Parhar 			fl->buf_boundary = sp->pad_boundary;
279754e4ee71SNavdeep Parhar 
2798214c3582SNavdeep Parhar 		c.iqns_to_fl0congen |=
2799bc14b14dSNavdeep Parhar 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
2800bc14b14dSNavdeep Parhar 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
28011458bff9SNavdeep Parhar 			(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
28021458bff9SNavdeep Parhar 			(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
28031458bff9SNavdeep Parhar 			    0));
2804bc14b14dSNavdeep Parhar 		if (cong >= 0) {
2805bc14b14dSNavdeep Parhar 			c.iqns_to_fl0congen |=
2806bc14b14dSNavdeep Parhar 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
2807bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGCIF |
2808bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGEN);
2809bc14b14dSNavdeep Parhar 		}
281054e4ee71SNavdeep Parhar 		c.fl0dcaen_to_fl0cidxfthresh =
2811ed7e5640SNavdeep Parhar 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
2812ed7e5640SNavdeep Parhar 			X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
2813ed7e5640SNavdeep Parhar 			V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
2814ed7e5640SNavdeep Parhar 			X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
281554e4ee71SNavdeep Parhar 		c.fl0size = htobe16(fl->qsize);
281654e4ee71SNavdeep Parhar 		c.fl0addr = htobe64(fl->ba);
281754e4ee71SNavdeep Parhar 	}
281854e4ee71SNavdeep Parhar 
281954e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
282054e4ee71SNavdeep Parhar 	if (rc != 0) {
282154e4ee71SNavdeep Parhar 		device_printf(sc->dev,
282254e4ee71SNavdeep Parhar 		    "failed to create ingress queue: %d\n", rc);
282354e4ee71SNavdeep Parhar 		return (rc);
282454e4ee71SNavdeep Parhar 	}
282554e4ee71SNavdeep Parhar 
282654e4ee71SNavdeep Parhar 	iq->cidx = 0;
2827b2daa9a9SNavdeep Parhar 	iq->gen = F_RSPD_GEN;
282854e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
282954e4ee71SNavdeep Parhar 	iq->cntxt_id = be16toh(c.iqid);
283054e4ee71SNavdeep Parhar 	iq->abs_id = be16toh(c.physiqid);
2831733b9277SNavdeep Parhar 	iq->flags |= IQ_ALLOCATED;
283254e4ee71SNavdeep Parhar 
283354e4ee71SNavdeep Parhar 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
2834733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.niq) {
2835733b9277SNavdeep Parhar 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
2836733b9277SNavdeep Parhar 		    cntxt_id, sc->sge.niq - 1);
2837733b9277SNavdeep Parhar 	}
283854e4ee71SNavdeep Parhar 	sc->sge.iqmap[cntxt_id] = iq;
283954e4ee71SNavdeep Parhar 
284054e4ee71SNavdeep Parhar 	if (fl) {
28414d6db4e0SNavdeep Parhar 		u_int qid;
28424d6db4e0SNavdeep Parhar 
28434d6db4e0SNavdeep Parhar 		iq->flags |= IQ_HAS_FL;
284454e4ee71SNavdeep Parhar 		fl->cntxt_id = be16toh(c.fl0id);
284554e4ee71SNavdeep Parhar 		fl->pidx = fl->cidx = 0;
284654e4ee71SNavdeep Parhar 
28479f1f7ec9SNavdeep Parhar 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
2848733b9277SNavdeep Parhar 		if (cntxt_id >= sc->sge.neq) {
2849733b9277SNavdeep Parhar 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
2850733b9277SNavdeep Parhar 			    __func__, cntxt_id, sc->sge.neq - 1);
2851733b9277SNavdeep Parhar 		}
285254e4ee71SNavdeep Parhar 		sc->sge.eqmap[cntxt_id] = (void *)fl;
285354e4ee71SNavdeep Parhar 
28544d6db4e0SNavdeep Parhar 		qid = fl->cntxt_id;
28554d6db4e0SNavdeep Parhar 		if (isset(&sc->doorbells, DOORBELL_UDB)) {
285690e7434aSNavdeep Parhar 			uint32_t s_qpp = sc->params.sge.eq_s_qpp;
28574d6db4e0SNavdeep Parhar 			uint32_t mask = (1 << s_qpp) - 1;
28584d6db4e0SNavdeep Parhar 			volatile uint8_t *udb;
28594d6db4e0SNavdeep Parhar 
28604d6db4e0SNavdeep Parhar 			udb = sc->udbs_base + UDBS_DB_OFFSET;
28614d6db4e0SNavdeep Parhar 			udb += (qid >> s_qpp) << PAGE_SHIFT;
28624d6db4e0SNavdeep Parhar 			qid &= mask;
28634d6db4e0SNavdeep Parhar 			if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
28644d6db4e0SNavdeep Parhar 				udb += qid << UDBS_SEG_SHIFT;
28654d6db4e0SNavdeep Parhar 				qid = 0;
28664d6db4e0SNavdeep Parhar 			}
28674d6db4e0SNavdeep Parhar 			fl->udb = (volatile void *)udb;
28684d6db4e0SNavdeep Parhar 		}
2869d1205d09SNavdeep Parhar 		fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
28704d6db4e0SNavdeep Parhar 
287154e4ee71SNavdeep Parhar 		FL_LOCK(fl);
2872733b9277SNavdeep Parhar 		/* Enough to make sure the SGE doesn't think it's starved */
2873733b9277SNavdeep Parhar 		refill_fl(sc, fl, fl->lowat);
287454e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
287554e4ee71SNavdeep Parhar 	}
287654e4ee71SNavdeep Parhar 
28778c0ca00bSNavdeep Parhar 	if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) {
2878ba41ec48SNavdeep Parhar 		uint32_t param, val;
2879ba41ec48SNavdeep Parhar 
2880ba41ec48SNavdeep Parhar 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2881ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2882ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
288373cd9220SNavdeep Parhar 		if (cong == 0)
288473cd9220SNavdeep Parhar 			val = 1 << 19;
288573cd9220SNavdeep Parhar 		else {
288673cd9220SNavdeep Parhar 			val = 2 << 19;
288773cd9220SNavdeep Parhar 			for (i = 0; i < 4; i++) {
288873cd9220SNavdeep Parhar 				if (cong & (1 << i))
288973cd9220SNavdeep Parhar 					val |= 1 << (i << 2);
289073cd9220SNavdeep Parhar 			}
289173cd9220SNavdeep Parhar 		}
289273cd9220SNavdeep Parhar 
2893ba41ec48SNavdeep Parhar 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2894ba41ec48SNavdeep Parhar 		if (rc != 0) {
2895ba41ec48SNavdeep Parhar 			/* report error but carry on */
2896ba41ec48SNavdeep Parhar 			device_printf(sc->dev,
2897ba41ec48SNavdeep Parhar 			    "failed to set congestion manager context for "
2898ba41ec48SNavdeep Parhar 			    "ingress queue %d: %d\n", iq->cntxt_id, rc);
2899ba41ec48SNavdeep Parhar 		}
2900ba41ec48SNavdeep Parhar 	}
2901ba41ec48SNavdeep Parhar 
290254e4ee71SNavdeep Parhar 	/* Enable IQ interrupts */
2903733b9277SNavdeep Parhar 	atomic_store_rel_int(&iq->state, IQS_IDLE);
2904315048f2SJohn Baldwin 	t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
290554e4ee71SNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id));
290654e4ee71SNavdeep Parhar 
290754e4ee71SNavdeep Parhar 	return (0);
290854e4ee71SNavdeep Parhar }
290954e4ee71SNavdeep Parhar 
291054e4ee71SNavdeep Parhar static int
2911fe2ebb76SJohn Baldwin free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
291254e4ee71SNavdeep Parhar {
291338035ed6SNavdeep Parhar 	int rc;
291454e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
291554e4ee71SNavdeep Parhar 	device_t dev;
291654e4ee71SNavdeep Parhar 
291754e4ee71SNavdeep Parhar 	if (sc == NULL)
291854e4ee71SNavdeep Parhar 		return (0);	/* nothing to do */
291954e4ee71SNavdeep Parhar 
2920fe2ebb76SJohn Baldwin 	dev = vi ? vi->dev : sc->dev;
292154e4ee71SNavdeep Parhar 
292254e4ee71SNavdeep Parhar 	if (iq->flags & IQ_ALLOCATED) {
292354e4ee71SNavdeep Parhar 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
292454e4ee71SNavdeep Parhar 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
292554e4ee71SNavdeep Parhar 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
292654e4ee71SNavdeep Parhar 		if (rc != 0) {
292754e4ee71SNavdeep Parhar 			device_printf(dev,
292854e4ee71SNavdeep Parhar 			    "failed to free queue %p: %d\n", iq, rc);
292954e4ee71SNavdeep Parhar 			return (rc);
293054e4ee71SNavdeep Parhar 		}
293154e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_ALLOCATED;
293254e4ee71SNavdeep Parhar 	}
293354e4ee71SNavdeep Parhar 
293454e4ee71SNavdeep Parhar 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
293554e4ee71SNavdeep Parhar 
293654e4ee71SNavdeep Parhar 	bzero(iq, sizeof(*iq));
293754e4ee71SNavdeep Parhar 
293854e4ee71SNavdeep Parhar 	if (fl) {
293954e4ee71SNavdeep Parhar 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
294054e4ee71SNavdeep Parhar 		    fl->desc);
294154e4ee71SNavdeep Parhar 
2942aa9a5cc0SNavdeep Parhar 		if (fl->sdesc)
29431458bff9SNavdeep Parhar 			free_fl_sdesc(sc, fl);
29441458bff9SNavdeep Parhar 
294554e4ee71SNavdeep Parhar 		if (mtx_initialized(&fl->fl_lock))
294654e4ee71SNavdeep Parhar 			mtx_destroy(&fl->fl_lock);
294754e4ee71SNavdeep Parhar 
294854e4ee71SNavdeep Parhar 		bzero(fl, sizeof(*fl));
294954e4ee71SNavdeep Parhar 	}
295054e4ee71SNavdeep Parhar 
295154e4ee71SNavdeep Parhar 	return (0);
295254e4ee71SNavdeep Parhar }
295354e4ee71SNavdeep Parhar 
295438035ed6SNavdeep Parhar static void
2955aa93b99aSNavdeep Parhar add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
2956aa93b99aSNavdeep Parhar     struct sysctl_oid *oid, struct sge_fl *fl)
295738035ed6SNavdeep Parhar {
295838035ed6SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
295938035ed6SNavdeep Parhar 
296038035ed6SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
296138035ed6SNavdeep Parhar 	    "freelist");
296238035ed6SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
296338035ed6SNavdeep Parhar 
2964aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
2965aa93b99aSNavdeep Parhar 	    &fl->ba, "bus address of descriptor ring");
2966aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
2967aa93b99aSNavdeep Parhar 	    fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
2968aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
296938035ed6SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
297038035ed6SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I",
297138035ed6SNavdeep Parhar 	    "SGE context id of the freelist");
2972e3207e19SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
2973e3207e19SNavdeep Parhar 	    fl_pad ? 1 : 0, "padding enabled");
2974e3207e19SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
2975e3207e19SNavdeep Parhar 	    fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
297638035ed6SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
297738035ed6SNavdeep Parhar 	    0, "consumer index");
297838035ed6SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
297938035ed6SNavdeep Parhar 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
298038035ed6SNavdeep Parhar 		    CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
298138035ed6SNavdeep Parhar 	}
298238035ed6SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
298338035ed6SNavdeep Parhar 	    0, "producer index");
298438035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated",
298538035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated");
298638035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined",
298738035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters");
298838035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
298938035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
299038035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
299138035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
299238035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
299338035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
299438035ed6SNavdeep Parhar }
299538035ed6SNavdeep Parhar 
299654e4ee71SNavdeep Parhar static int
2997733b9277SNavdeep Parhar alloc_fwq(struct adapter *sc)
299854e4ee71SNavdeep Parhar {
2999733b9277SNavdeep Parhar 	int rc, intr_idx;
300056599263SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
3001733b9277SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
3002733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
300356599263SNavdeep Parhar 
3004b2daa9a9SNavdeep Parhar 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
3005733b9277SNavdeep Parhar 	fwq->flags |= IQ_INTR;	/* always */
30066af45170SJohn Baldwin 	if (sc->flags & IS_VF)
30076af45170SJohn Baldwin 		intr_idx = 0;
30086af45170SJohn Baldwin 	else {
3009733b9277SNavdeep Parhar 		intr_idx = sc->intr_count > 1 ? 1 : 0;
3010671bf2b8SNavdeep Parhar 		fwq->set_tcb_rpl = t4_filter_rpl;
3011671bf2b8SNavdeep Parhar 		fwq->l2t_write_rpl = do_l2t_write_rpl;
30126af45170SJohn Baldwin 	}
3013fe2ebb76SJohn Baldwin 	rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
3014733b9277SNavdeep Parhar 	if (rc != 0) {
3015733b9277SNavdeep Parhar 		device_printf(sc->dev,
3016733b9277SNavdeep Parhar 		    "failed to create firmware event queue: %d\n", rc);
301756599263SNavdeep Parhar 		return (rc);
3018733b9277SNavdeep Parhar 	}
301956599263SNavdeep Parhar 
3020733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD,
3021733b9277SNavdeep Parhar 	    NULL, "firmware event queue");
3022733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
302356599263SNavdeep Parhar 
3024aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(&sc->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3025aa93b99aSNavdeep Parhar 	    &fwq->ba, "bus address of descriptor ring");
3026aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&sc->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3027aa93b99aSNavdeep Parhar 	    fwq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
302859bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
302959bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
303059bc8ce0SNavdeep Parhar 	    "absolute id of the queue");
303159bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
303259bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
303359bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
303456599263SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
303556599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
303656599263SNavdeep Parhar 	    "consumer index");
303756599263SNavdeep Parhar 
3038733b9277SNavdeep Parhar 	return (0);
3039733b9277SNavdeep Parhar }
3040733b9277SNavdeep Parhar 
3041733b9277SNavdeep Parhar static int
3042733b9277SNavdeep Parhar free_fwq(struct adapter *sc)
3043733b9277SNavdeep Parhar {
3044733b9277SNavdeep Parhar 	return free_iq_fl(NULL, &sc->sge.fwq, NULL);
3045733b9277SNavdeep Parhar }
3046733b9277SNavdeep Parhar 
3047733b9277SNavdeep Parhar static int
3048733b9277SNavdeep Parhar alloc_mgmtq(struct adapter *sc)
3049733b9277SNavdeep Parhar {
3050733b9277SNavdeep Parhar 	int rc;
3051733b9277SNavdeep Parhar 	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
3052733b9277SNavdeep Parhar 	char name[16];
3053733b9277SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
3054733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3055733b9277SNavdeep Parhar 
3056733b9277SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
3057733b9277SNavdeep Parhar 	    NULL, "management queue");
3058733b9277SNavdeep Parhar 
3059733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
306090e7434aSNavdeep Parhar 	init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
3061733b9277SNavdeep Parhar 	    sc->sge.fwq.cntxt_id, name);
3062733b9277SNavdeep Parhar 	rc = alloc_wrq(sc, NULL, mgmtq, oid);
3063733b9277SNavdeep Parhar 	if (rc != 0) {
3064733b9277SNavdeep Parhar 		device_printf(sc->dev,
3065733b9277SNavdeep Parhar 		    "failed to create management queue: %d\n", rc);
306656599263SNavdeep Parhar 		return (rc);
306756599263SNavdeep Parhar 	}
306856599263SNavdeep Parhar 
3069733b9277SNavdeep Parhar 	return (0);
307054e4ee71SNavdeep Parhar }
307154e4ee71SNavdeep Parhar 
307254e4ee71SNavdeep Parhar static int
3073733b9277SNavdeep Parhar free_mgmtq(struct adapter *sc)
3074733b9277SNavdeep Parhar {
307509fe6320SNavdeep Parhar 
3076733b9277SNavdeep Parhar 	return free_wrq(sc, &sc->sge.mgmtq);
3077733b9277SNavdeep Parhar }
3078733b9277SNavdeep Parhar 
30791605bac6SNavdeep Parhar int
30809af71ab3SNavdeep Parhar tnl_cong(struct port_info *pi, int drop)
30819fb8886bSNavdeep Parhar {
30829fb8886bSNavdeep Parhar 
30839af71ab3SNavdeep Parhar 	if (drop == -1)
30849fb8886bSNavdeep Parhar 		return (-1);
30859af71ab3SNavdeep Parhar 	else if (drop == 1)
30869fb8886bSNavdeep Parhar 		return (0);
30879fb8886bSNavdeep Parhar 	else
3088e46dcc56SNavdeep Parhar 		return (pi->rx_chan_map);
30899fb8886bSNavdeep Parhar }
30909fb8886bSNavdeep Parhar 
3091733b9277SNavdeep Parhar static int
3092fe2ebb76SJohn Baldwin alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
3093733b9277SNavdeep Parhar     struct sysctl_oid *oid)
309454e4ee71SNavdeep Parhar {
309554e4ee71SNavdeep Parhar 	int rc;
3096ec55567cSJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
309754e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
309854e4ee71SNavdeep Parhar 	char name[16];
309954e4ee71SNavdeep Parhar 
3100fe2ebb76SJohn Baldwin 	rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx,
3101fe2ebb76SJohn Baldwin 	    tnl_cong(vi->pi, cong_drop));
310254e4ee71SNavdeep Parhar 	if (rc != 0)
310354e4ee71SNavdeep Parhar 		return (rc);
310454e4ee71SNavdeep Parhar 
3105ec55567cSJohn Baldwin 	if (idx == 0)
3106ec55567cSJohn Baldwin 		sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
3107ec55567cSJohn Baldwin 	else
3108ec55567cSJohn Baldwin 		KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
3109ec55567cSJohn Baldwin 		    ("iq_base mismatch"));
3110ec55567cSJohn Baldwin 	KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
3111ec55567cSJohn Baldwin 	    ("PF with non-zero iq_base"));
3112ec55567cSJohn Baldwin 
31134d6db4e0SNavdeep Parhar 	/*
31144d6db4e0SNavdeep Parhar 	 * The freelist is just barely above the starvation threshold right now,
31154d6db4e0SNavdeep Parhar 	 * fill it up a bit more.
31164d6db4e0SNavdeep Parhar 	 */
31179b4d7b4eSNavdeep Parhar 	FL_LOCK(&rxq->fl);
3118ec55567cSJohn Baldwin 	refill_fl(sc, &rxq->fl, 128);
31199b4d7b4eSNavdeep Parhar 	FL_UNLOCK(&rxq->fl);
31209b4d7b4eSNavdeep Parhar 
3121a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
312246f48ee5SNavdeep Parhar 	rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs);
312354e4ee71SNavdeep Parhar 	if (rc != 0)
312454e4ee71SNavdeep Parhar 		return (rc);
312546f48ee5SNavdeep Parhar 	MPASS(rxq->lro.ifp == vi->ifp);	/* also indicates LRO init'ed */
312654e4ee71SNavdeep Parhar 
3127fe2ebb76SJohn Baldwin 	if (vi->ifp->if_capenable & IFCAP_LRO)
3128733b9277SNavdeep Parhar 		rxq->iq.flags |= IQ_LRO_ENABLED;
312954e4ee71SNavdeep Parhar #endif
3130fe2ebb76SJohn Baldwin 	rxq->ifp = vi->ifp;
313154e4ee71SNavdeep Parhar 
3132733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
313354e4ee71SNavdeep Parhar 
313454e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
3135fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
313654e4ee71SNavdeep Parhar 	    NULL, "rx queue");
313754e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
313854e4ee71SNavdeep Parhar 
3139aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3140aa93b99aSNavdeep Parhar 	    &rxq->iq.ba, "bus address of descriptor ring");
3141aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3142aa93b99aSNavdeep Parhar 	    rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes");
3143fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
314456599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
3145af49c942SNavdeep Parhar 	    "absolute id of the queue");
3146fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
314759bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
314859bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
3149fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
315059bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
315159bc8ce0SNavdeep Parhar 	    "consumer index");
3152a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
3153e936121dSHans Petter Selasky 	SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
315454e4ee71SNavdeep Parhar 	    &rxq->lro.lro_queued, 0, NULL);
3155e936121dSHans Petter Selasky 	SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
315654e4ee71SNavdeep Parhar 	    &rxq->lro.lro_flushed, 0, NULL);
31577d29df59SNavdeep Parhar #endif
3158fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
315954e4ee71SNavdeep Parhar 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
3160fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction",
316154e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &rxq->vlan_extraction,
316254e4ee71SNavdeep Parhar 	    "# of times hardware extracted 802.1Q tag");
316354e4ee71SNavdeep Parhar 
3164aa93b99aSNavdeep Parhar 	add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl);
316559bc8ce0SNavdeep Parhar 
316654e4ee71SNavdeep Parhar 	return (rc);
316754e4ee71SNavdeep Parhar }
316854e4ee71SNavdeep Parhar 
316954e4ee71SNavdeep Parhar static int
3170fe2ebb76SJohn Baldwin free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
317154e4ee71SNavdeep Parhar {
317254e4ee71SNavdeep Parhar 	int rc;
317354e4ee71SNavdeep Parhar 
3174a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
317554e4ee71SNavdeep Parhar 	if (rxq->lro.ifp) {
317654e4ee71SNavdeep Parhar 		tcp_lro_free(&rxq->lro);
317754e4ee71SNavdeep Parhar 		rxq->lro.ifp = NULL;
317854e4ee71SNavdeep Parhar 	}
317954e4ee71SNavdeep Parhar #endif
318054e4ee71SNavdeep Parhar 
3181fe2ebb76SJohn Baldwin 	rc = free_iq_fl(vi, &rxq->iq, &rxq->fl);
318254e4ee71SNavdeep Parhar 	if (rc == 0)
318354e4ee71SNavdeep Parhar 		bzero(rxq, sizeof(*rxq));
318454e4ee71SNavdeep Parhar 
318554e4ee71SNavdeep Parhar 	return (rc);
318654e4ee71SNavdeep Parhar }
318754e4ee71SNavdeep Parhar 
318809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
318954e4ee71SNavdeep Parhar static int
3190fe2ebb76SJohn Baldwin alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq,
3191733b9277SNavdeep Parhar     int intr_idx, int idx, struct sysctl_oid *oid)
3192f7dfe243SNavdeep Parhar {
3193aa93b99aSNavdeep Parhar 	struct port_info *pi = vi->pi;
3194733b9277SNavdeep Parhar 	int rc;
3195f7dfe243SNavdeep Parhar 	struct sysctl_oid_list *children;
3196733b9277SNavdeep Parhar 	char name[16];
3197f7dfe243SNavdeep Parhar 
3198fe2ebb76SJohn Baldwin 	rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
3199aa93b99aSNavdeep Parhar 	    pi->rx_chan_map);
3200733b9277SNavdeep Parhar 	if (rc != 0)
3201f7dfe243SNavdeep Parhar 		return (rc);
3202f7dfe243SNavdeep Parhar 
3203733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3204733b9277SNavdeep Parhar 
3205733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
3206fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
3207733b9277SNavdeep Parhar 	    NULL, "rx queue");
3208733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3209733b9277SNavdeep Parhar 
3210aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3211aa93b99aSNavdeep Parhar 	    &ofld_rxq->iq.ba, "bus address of descriptor ring");
3212aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3213aa93b99aSNavdeep Parhar 	    ofld_rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes");
3214fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
3215733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
3216733b9277SNavdeep Parhar 	    "I", "absolute id of the queue");
3217fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
3218733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
3219733b9277SNavdeep Parhar 	    "I", "SGE context id of the queue");
3220fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
3221733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
3222733b9277SNavdeep Parhar 	    "consumer index");
3223733b9277SNavdeep Parhar 
3224aa93b99aSNavdeep Parhar 	add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl);
3225733b9277SNavdeep Parhar 
3226733b9277SNavdeep Parhar 	return (rc);
3227733b9277SNavdeep Parhar }
3228733b9277SNavdeep Parhar 
3229733b9277SNavdeep Parhar static int
3230fe2ebb76SJohn Baldwin free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
3231733b9277SNavdeep Parhar {
3232733b9277SNavdeep Parhar 	int rc;
3233733b9277SNavdeep Parhar 
3234fe2ebb76SJohn Baldwin 	rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl);
3235733b9277SNavdeep Parhar 	if (rc == 0)
3236733b9277SNavdeep Parhar 		bzero(ofld_rxq, sizeof(*ofld_rxq));
3237733b9277SNavdeep Parhar 
3238733b9277SNavdeep Parhar 	return (rc);
3239733b9277SNavdeep Parhar }
3240733b9277SNavdeep Parhar #endif
3241733b9277SNavdeep Parhar 
3242298d969cSNavdeep Parhar #ifdef DEV_NETMAP
3243298d969cSNavdeep Parhar static int
3244fe2ebb76SJohn Baldwin alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
3245298d969cSNavdeep Parhar     int idx, struct sysctl_oid *oid)
3246298d969cSNavdeep Parhar {
3247298d969cSNavdeep Parhar 	int rc;
3248298d969cSNavdeep Parhar 	struct sysctl_oid_list *children;
3249298d969cSNavdeep Parhar 	struct sysctl_ctx_list *ctx;
3250298d969cSNavdeep Parhar 	char name[16];
3251298d969cSNavdeep Parhar 	size_t len;
3252fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
3253fe2ebb76SJohn Baldwin 	struct netmap_adapter *na = NA(vi->ifp);
3254298d969cSNavdeep Parhar 
3255298d969cSNavdeep Parhar 	MPASS(na != NULL);
3256298d969cSNavdeep Parhar 
3257fe2ebb76SJohn Baldwin 	len = vi->qsize_rxq * IQ_ESIZE;
3258298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
3259298d969cSNavdeep Parhar 	    &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
3260298d969cSNavdeep Parhar 	if (rc != 0)
3261298d969cSNavdeep Parhar 		return (rc);
3262298d969cSNavdeep Parhar 
326390e7434aSNavdeep Parhar 	len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
3264298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
3265298d969cSNavdeep Parhar 	    &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
3266298d969cSNavdeep Parhar 	if (rc != 0)
3267298d969cSNavdeep Parhar 		return (rc);
3268298d969cSNavdeep Parhar 
3269fe2ebb76SJohn Baldwin 	nm_rxq->vi = vi;
3270298d969cSNavdeep Parhar 	nm_rxq->nid = idx;
3271298d969cSNavdeep Parhar 	nm_rxq->iq_cidx = 0;
327290e7434aSNavdeep Parhar 	nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
3273298d969cSNavdeep Parhar 	nm_rxq->iq_gen = F_RSPD_GEN;
3274298d969cSNavdeep Parhar 	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
3275298d969cSNavdeep Parhar 	nm_rxq->fl_sidx = na->num_rx_desc;
3276298d969cSNavdeep Parhar 	nm_rxq->intr_idx = intr_idx;
3277a8c4fcb9SNavdeep Parhar 	nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
3278298d969cSNavdeep Parhar 
3279fe2ebb76SJohn Baldwin 	ctx = &vi->ctx;
3280298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3281298d969cSNavdeep Parhar 
3282298d969cSNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
3283298d969cSNavdeep Parhar 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL,
3284298d969cSNavdeep Parhar 	    "rx queue");
3285298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3286298d969cSNavdeep Parhar 
3287298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
3288298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16,
3289298d969cSNavdeep Parhar 	    "I", "absolute id of the queue");
3290298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
3291298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16,
3292298d969cSNavdeep Parhar 	    "I", "SGE context id of the queue");
3293298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
3294298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I",
3295298d969cSNavdeep Parhar 	    "consumer index");
3296298d969cSNavdeep Parhar 
3297298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3298298d969cSNavdeep Parhar 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
3299298d969cSNavdeep Parhar 	    "freelist");
3300298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3301298d969cSNavdeep Parhar 
3302298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
3303298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16,
3304298d969cSNavdeep Parhar 	    "I", "SGE context id of the freelist");
3305298d969cSNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
3306298d969cSNavdeep Parhar 	    &nm_rxq->fl_cidx, 0, "consumer index");
3307298d969cSNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
3308298d969cSNavdeep Parhar 	    &nm_rxq->fl_pidx, 0, "producer index");
3309298d969cSNavdeep Parhar 
3310298d969cSNavdeep Parhar 	return (rc);
3311298d969cSNavdeep Parhar }
3312298d969cSNavdeep Parhar 
3313298d969cSNavdeep Parhar 
3314298d969cSNavdeep Parhar static int
3315fe2ebb76SJohn Baldwin free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
3316298d969cSNavdeep Parhar {
3317fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
3318298d969cSNavdeep Parhar 
33190fa7560dSNavdeep Parhar 	if (vi->flags & VI_INIT_DONE)
3320a8c4fcb9SNavdeep Parhar 		MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID);
33210fa7560dSNavdeep Parhar 	else
33220fa7560dSNavdeep Parhar 		MPASS(nm_rxq->iq_cntxt_id == 0);
3323a8c4fcb9SNavdeep Parhar 
3324298d969cSNavdeep Parhar 	free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
3325298d969cSNavdeep Parhar 	    nm_rxq->iq_desc);
3326298d969cSNavdeep Parhar 	free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
3327298d969cSNavdeep Parhar 	    nm_rxq->fl_desc);
3328298d969cSNavdeep Parhar 
3329298d969cSNavdeep Parhar 	return (0);
3330298d969cSNavdeep Parhar }
3331298d969cSNavdeep Parhar 
3332298d969cSNavdeep Parhar static int
3333fe2ebb76SJohn Baldwin alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
3334298d969cSNavdeep Parhar     struct sysctl_oid *oid)
3335298d969cSNavdeep Parhar {
3336298d969cSNavdeep Parhar 	int rc;
3337298d969cSNavdeep Parhar 	size_t len;
3338fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
3339298d969cSNavdeep Parhar 	struct adapter *sc = pi->adapter;
3340fe2ebb76SJohn Baldwin 	struct netmap_adapter *na = NA(vi->ifp);
3341298d969cSNavdeep Parhar 	char name[16];
3342298d969cSNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3343298d969cSNavdeep Parhar 
334490e7434aSNavdeep Parhar 	len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
3345298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
3346298d969cSNavdeep Parhar 	    &nm_txq->ba, (void **)&nm_txq->desc);
3347298d969cSNavdeep Parhar 	if (rc)
3348298d969cSNavdeep Parhar 		return (rc);
3349298d969cSNavdeep Parhar 
3350298d969cSNavdeep Parhar 	nm_txq->pidx = nm_txq->cidx = 0;
3351298d969cSNavdeep Parhar 	nm_txq->sidx = na->num_tx_desc;
3352298d969cSNavdeep Parhar 	nm_txq->nid = idx;
3353298d969cSNavdeep Parhar 	nm_txq->iqidx = iqidx;
3354298d969cSNavdeep Parhar 	nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
335597f2919dSNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
335697f2919dSNavdeep Parhar 	    V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
335797f2919dSNavdeep Parhar 	    V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
3358a8c4fcb9SNavdeep Parhar 	nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
3359298d969cSNavdeep Parhar 
3360298d969cSNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
3361fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
3362298d969cSNavdeep Parhar 	    NULL, "netmap tx queue");
3363298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3364298d969cSNavdeep Parhar 
3365fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3366298d969cSNavdeep Parhar 	    &nm_txq->cntxt_id, 0, "SGE context id of the queue");
3367fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
3368298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I",
3369298d969cSNavdeep Parhar 	    "consumer index");
3370fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
3371298d969cSNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I",
3372298d969cSNavdeep Parhar 	    "producer index");
3373298d969cSNavdeep Parhar 
3374298d969cSNavdeep Parhar 	return (rc);
3375298d969cSNavdeep Parhar }
3376298d969cSNavdeep Parhar 
3377298d969cSNavdeep Parhar static int
3378fe2ebb76SJohn Baldwin free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
3379298d969cSNavdeep Parhar {
3380fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
3381298d969cSNavdeep Parhar 
33820fa7560dSNavdeep Parhar 	if (vi->flags & VI_INIT_DONE)
3383a8c4fcb9SNavdeep Parhar 		MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
33840fa7560dSNavdeep Parhar 	else
33850fa7560dSNavdeep Parhar 		MPASS(nm_txq->cntxt_id == 0);
3386a8c4fcb9SNavdeep Parhar 
3387298d969cSNavdeep Parhar 	free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
3388298d969cSNavdeep Parhar 	    nm_txq->desc);
3389298d969cSNavdeep Parhar 
3390298d969cSNavdeep Parhar 	return (0);
3391298d969cSNavdeep Parhar }
3392298d969cSNavdeep Parhar #endif
3393298d969cSNavdeep Parhar 
3394733b9277SNavdeep Parhar static int
3395733b9277SNavdeep Parhar ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
3396733b9277SNavdeep Parhar {
3397733b9277SNavdeep Parhar 	int rc, cntxt_id;
3398733b9277SNavdeep Parhar 	struct fw_eq_ctrl_cmd c;
339990e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
3400f7dfe243SNavdeep Parhar 
3401f7dfe243SNavdeep Parhar 	bzero(&c, sizeof(c));
3402f7dfe243SNavdeep Parhar 
3403f7dfe243SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
3404f7dfe243SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
3405f7dfe243SNavdeep Parhar 	    V_FW_EQ_CTRL_CMD_VFN(0));
3406f7dfe243SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
3407f7dfe243SNavdeep Parhar 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
34087951040fSNavdeep Parhar 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
3409f7dfe243SNavdeep Parhar 	c.physeqid_pkd = htobe32(0);
3410f7dfe243SNavdeep Parhar 	c.fetchszm_to_iqid =
341187b027baSNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
3412733b9277SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
341356599263SNavdeep Parhar 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
3414f7dfe243SNavdeep Parhar 	c.dcaen_to_eqsize =
3415f7dfe243SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
3416f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
341787b027baSNavdeep Parhar 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
34187951040fSNavdeep Parhar 		V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
3419f7dfe243SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
3420f7dfe243SNavdeep Parhar 
3421f7dfe243SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3422f7dfe243SNavdeep Parhar 	if (rc != 0) {
3423f7dfe243SNavdeep Parhar 		device_printf(sc->dev,
3424733b9277SNavdeep Parhar 		    "failed to create control queue %d: %d\n", eq->tx_chan, rc);
3425f7dfe243SNavdeep Parhar 		return (rc);
3426f7dfe243SNavdeep Parhar 	}
3427733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3428f7dfe243SNavdeep Parhar 
3429f7dfe243SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
3430f7dfe243SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3431733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3432733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3433733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
3434f7dfe243SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
3435f7dfe243SNavdeep Parhar 
3436f7dfe243SNavdeep Parhar 	return (rc);
3437f7dfe243SNavdeep Parhar }
3438f7dfe243SNavdeep Parhar 
3439f7dfe243SNavdeep Parhar static int
3440fe2ebb76SJohn Baldwin eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
344154e4ee71SNavdeep Parhar {
344254e4ee71SNavdeep Parhar 	int rc, cntxt_id;
344354e4ee71SNavdeep Parhar 	struct fw_eq_eth_cmd c;
344490e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
344554e4ee71SNavdeep Parhar 
344654e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
344754e4ee71SNavdeep Parhar 
344854e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
344954e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
345054e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_VFN(0));
345154e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
345254e4ee71SNavdeep Parhar 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
34537951040fSNavdeep Parhar 	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
3454fe2ebb76SJohn Baldwin 	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
345554e4ee71SNavdeep Parhar 	c.fetchszm_to_iqid =
34567951040fSNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
3457733b9277SNavdeep Parhar 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
3458aa2457e1SNavdeep Parhar 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
345954e4ee71SNavdeep Parhar 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
346054e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
34617951040fSNavdeep Parhar 	    V_FW_EQ_ETH_CMD_EQSIZE(qsize));
346254e4ee71SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
346354e4ee71SNavdeep Parhar 
346454e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
346554e4ee71SNavdeep Parhar 	if (rc != 0) {
3466fe2ebb76SJohn Baldwin 		device_printf(vi->dev,
3467733b9277SNavdeep Parhar 		    "failed to create Ethernet egress queue: %d\n", rc);
3468733b9277SNavdeep Parhar 		return (rc);
3469733b9277SNavdeep Parhar 	}
3470733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3471733b9277SNavdeep Parhar 
3472733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
3473ec55567cSJohn Baldwin 	eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
3474733b9277SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3475733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3476733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3477733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
3478733b9277SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
3479733b9277SNavdeep Parhar 
348054e4ee71SNavdeep Parhar 	return (rc);
348154e4ee71SNavdeep Parhar }
348254e4ee71SNavdeep Parhar 
348309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
3484733b9277SNavdeep Parhar static int
3485fe2ebb76SJohn Baldwin ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
3486733b9277SNavdeep Parhar {
3487733b9277SNavdeep Parhar 	int rc, cntxt_id;
3488733b9277SNavdeep Parhar 	struct fw_eq_ofld_cmd c;
348990e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
349054e4ee71SNavdeep Parhar 
3491733b9277SNavdeep Parhar 	bzero(&c, sizeof(c));
3492733b9277SNavdeep Parhar 
3493733b9277SNavdeep Parhar 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
3494733b9277SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
3495733b9277SNavdeep Parhar 	    V_FW_EQ_OFLD_CMD_VFN(0));
3496733b9277SNavdeep Parhar 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
3497733b9277SNavdeep Parhar 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
3498733b9277SNavdeep Parhar 	c.fetchszm_to_iqid =
34997951040fSNavdeep Parhar 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
3500733b9277SNavdeep Parhar 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
3501733b9277SNavdeep Parhar 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
3502733b9277SNavdeep Parhar 	c.dcaen_to_eqsize =
3503733b9277SNavdeep Parhar 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
3504733b9277SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
35057951040fSNavdeep Parhar 		V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
3506733b9277SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
3507733b9277SNavdeep Parhar 
3508733b9277SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3509733b9277SNavdeep Parhar 	if (rc != 0) {
3510fe2ebb76SJohn Baldwin 		device_printf(vi->dev,
3511733b9277SNavdeep Parhar 		    "failed to create egress queue for TCP offload: %d\n", rc);
3512733b9277SNavdeep Parhar 		return (rc);
3513733b9277SNavdeep Parhar 	}
3514733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3515733b9277SNavdeep Parhar 
3516733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
351754e4ee71SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3518733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3519733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3520733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
352154e4ee71SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
352254e4ee71SNavdeep Parhar 
3523733b9277SNavdeep Parhar 	return (rc);
3524733b9277SNavdeep Parhar }
3525733b9277SNavdeep Parhar #endif
3526733b9277SNavdeep Parhar 
3527733b9277SNavdeep Parhar static int
3528fe2ebb76SJohn Baldwin alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
3529733b9277SNavdeep Parhar {
35307951040fSNavdeep Parhar 	int rc, qsize;
3531733b9277SNavdeep Parhar 	size_t len;
3532733b9277SNavdeep Parhar 
3533733b9277SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
3534733b9277SNavdeep Parhar 
353590e7434aSNavdeep Parhar 	qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
35367951040fSNavdeep Parhar 	len = qsize * EQ_ESIZE;
3537733b9277SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
3538733b9277SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
3539733b9277SNavdeep Parhar 	if (rc)
3540733b9277SNavdeep Parhar 		return (rc);
3541733b9277SNavdeep Parhar 
3542733b9277SNavdeep Parhar 	eq->pidx = eq->cidx = 0;
35437951040fSNavdeep Parhar 	eq->equeqidx = eq->dbidx = 0;
3544d14b0ac1SNavdeep Parhar 	eq->doorbells = sc->doorbells;
3545733b9277SNavdeep Parhar 
3546733b9277SNavdeep Parhar 	switch (eq->flags & EQ_TYPEMASK) {
3547733b9277SNavdeep Parhar 	case EQ_CTRL:
3548733b9277SNavdeep Parhar 		rc = ctrl_eq_alloc(sc, eq);
3549733b9277SNavdeep Parhar 		break;
3550733b9277SNavdeep Parhar 
3551733b9277SNavdeep Parhar 	case EQ_ETH:
3552fe2ebb76SJohn Baldwin 		rc = eth_eq_alloc(sc, vi, eq);
3553733b9277SNavdeep Parhar 		break;
3554733b9277SNavdeep Parhar 
355509fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
3556733b9277SNavdeep Parhar 	case EQ_OFLD:
3557fe2ebb76SJohn Baldwin 		rc = ofld_eq_alloc(sc, vi, eq);
3558733b9277SNavdeep Parhar 		break;
3559733b9277SNavdeep Parhar #endif
3560733b9277SNavdeep Parhar 
3561733b9277SNavdeep Parhar 	default:
3562733b9277SNavdeep Parhar 		panic("%s: invalid eq type %d.", __func__,
3563733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK);
3564733b9277SNavdeep Parhar 	}
3565733b9277SNavdeep Parhar 	if (rc != 0) {
3566733b9277SNavdeep Parhar 		device_printf(sc->dev,
3567c086e3d1SNavdeep Parhar 		    "failed to allocate egress queue(%d): %d\n",
3568733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK, rc);
3569733b9277SNavdeep Parhar 	}
3570733b9277SNavdeep Parhar 
3571d14b0ac1SNavdeep Parhar 	if (isset(&eq->doorbells, DOORBELL_UDB) ||
3572d14b0ac1SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
357377ad3c41SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_WCWR)) {
357490e7434aSNavdeep Parhar 		uint32_t s_qpp = sc->params.sge.eq_s_qpp;
3575d14b0ac1SNavdeep Parhar 		uint32_t mask = (1 << s_qpp) - 1;
3576d14b0ac1SNavdeep Parhar 		volatile uint8_t *udb;
3577d14b0ac1SNavdeep Parhar 
3578d14b0ac1SNavdeep Parhar 		udb = sc->udbs_base + UDBS_DB_OFFSET;
3579d14b0ac1SNavdeep Parhar 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
3580d14b0ac1SNavdeep Parhar 		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
3581f10405b3SNavdeep Parhar 		if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
358277ad3c41SNavdeep Parhar 	    		clrbit(&eq->doorbells, DOORBELL_WCWR);
3583d14b0ac1SNavdeep Parhar 		else {
3584d14b0ac1SNavdeep Parhar 			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
3585d14b0ac1SNavdeep Parhar 			eq->udb_qid = 0;
3586d14b0ac1SNavdeep Parhar 		}
3587d14b0ac1SNavdeep Parhar 		eq->udb = (volatile void *)udb;
3588d14b0ac1SNavdeep Parhar 	}
3589d14b0ac1SNavdeep Parhar 
3590733b9277SNavdeep Parhar 	return (rc);
3591733b9277SNavdeep Parhar }
3592733b9277SNavdeep Parhar 
3593733b9277SNavdeep Parhar static int
3594733b9277SNavdeep Parhar free_eq(struct adapter *sc, struct sge_eq *eq)
3595733b9277SNavdeep Parhar {
3596733b9277SNavdeep Parhar 	int rc;
3597733b9277SNavdeep Parhar 
3598733b9277SNavdeep Parhar 	if (eq->flags & EQ_ALLOCATED) {
3599733b9277SNavdeep Parhar 		switch (eq->flags & EQ_TYPEMASK) {
3600733b9277SNavdeep Parhar 		case EQ_CTRL:
3601733b9277SNavdeep Parhar 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
3602733b9277SNavdeep Parhar 			    eq->cntxt_id);
3603733b9277SNavdeep Parhar 			break;
3604733b9277SNavdeep Parhar 
3605733b9277SNavdeep Parhar 		case EQ_ETH:
3606733b9277SNavdeep Parhar 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
3607733b9277SNavdeep Parhar 			    eq->cntxt_id);
3608733b9277SNavdeep Parhar 			break;
3609733b9277SNavdeep Parhar 
361009fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
3611733b9277SNavdeep Parhar 		case EQ_OFLD:
3612733b9277SNavdeep Parhar 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
3613733b9277SNavdeep Parhar 			    eq->cntxt_id);
3614733b9277SNavdeep Parhar 			break;
3615733b9277SNavdeep Parhar #endif
3616733b9277SNavdeep Parhar 
3617733b9277SNavdeep Parhar 		default:
3618733b9277SNavdeep Parhar 			panic("%s: invalid eq type %d.", __func__,
3619733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK);
3620733b9277SNavdeep Parhar 		}
3621733b9277SNavdeep Parhar 		if (rc != 0) {
3622733b9277SNavdeep Parhar 			device_printf(sc->dev,
3623733b9277SNavdeep Parhar 			    "failed to free egress queue (%d): %d\n",
3624733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK, rc);
3625733b9277SNavdeep Parhar 			return (rc);
3626733b9277SNavdeep Parhar 		}
3627733b9277SNavdeep Parhar 		eq->flags &= ~EQ_ALLOCATED;
3628733b9277SNavdeep Parhar 	}
3629733b9277SNavdeep Parhar 
3630733b9277SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
3631733b9277SNavdeep Parhar 
3632733b9277SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
3633733b9277SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
3634733b9277SNavdeep Parhar 
3635733b9277SNavdeep Parhar 	bzero(eq, sizeof(*eq));
3636733b9277SNavdeep Parhar 	return (0);
3637733b9277SNavdeep Parhar }
3638733b9277SNavdeep Parhar 
3639733b9277SNavdeep Parhar static int
3640fe2ebb76SJohn Baldwin alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
3641733b9277SNavdeep Parhar     struct sysctl_oid *oid)
3642733b9277SNavdeep Parhar {
3643733b9277SNavdeep Parhar 	int rc;
3644fe2ebb76SJohn Baldwin 	struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx;
3645733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3646733b9277SNavdeep Parhar 
3647fe2ebb76SJohn Baldwin 	rc = alloc_eq(sc, vi, &wrq->eq);
3648733b9277SNavdeep Parhar 	if (rc)
3649733b9277SNavdeep Parhar 		return (rc);
3650733b9277SNavdeep Parhar 
3651733b9277SNavdeep Parhar 	wrq->adapter = sc;
36527951040fSNavdeep Parhar 	TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
36537951040fSNavdeep Parhar 	TAILQ_INIT(&wrq->incomplete_wrs);
365409fe6320SNavdeep Parhar 	STAILQ_INIT(&wrq->wr_list);
36557951040fSNavdeep Parhar 	wrq->nwr_pending = 0;
36567951040fSNavdeep Parhar 	wrq->ndesc_needed = 0;
3657733b9277SNavdeep Parhar 
3658aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3659aa93b99aSNavdeep Parhar 	    &wrq->eq.ba, "bus address of descriptor ring");
3660aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3661aa93b99aSNavdeep Parhar 	    wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len,
3662aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
3663733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3664733b9277SNavdeep Parhar 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
3665733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
3666733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I",
3667733b9277SNavdeep Parhar 	    "consumer index");
3668733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
3669733b9277SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
3670733b9277SNavdeep Parhar 	    "producer index");
3671aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
3672aa93b99aSNavdeep Parhar 	    wrq->eq.sidx, "status page index");
36737951040fSNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
36747951040fSNavdeep Parhar 	    &wrq->tx_wrs_direct, "# of work requests (direct)");
36757951040fSNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
36767951040fSNavdeep Parhar 	    &wrq->tx_wrs_copied, "# of work requests (copied)");
36770459a175SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
36780459a175SNavdeep Parhar 	    &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
3679733b9277SNavdeep Parhar 
3680733b9277SNavdeep Parhar 	return (rc);
3681733b9277SNavdeep Parhar }
3682733b9277SNavdeep Parhar 
3683733b9277SNavdeep Parhar static int
3684733b9277SNavdeep Parhar free_wrq(struct adapter *sc, struct sge_wrq *wrq)
3685733b9277SNavdeep Parhar {
3686733b9277SNavdeep Parhar 	int rc;
3687733b9277SNavdeep Parhar 
3688733b9277SNavdeep Parhar 	rc = free_eq(sc, &wrq->eq);
3689733b9277SNavdeep Parhar 	if (rc)
3690733b9277SNavdeep Parhar 		return (rc);
3691733b9277SNavdeep Parhar 
3692733b9277SNavdeep Parhar 	bzero(wrq, sizeof(*wrq));
3693733b9277SNavdeep Parhar 	return (0);
3694733b9277SNavdeep Parhar }
3695733b9277SNavdeep Parhar 
3696733b9277SNavdeep Parhar static int
3697fe2ebb76SJohn Baldwin alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
3698733b9277SNavdeep Parhar     struct sysctl_oid *oid)
3699733b9277SNavdeep Parhar {
3700733b9277SNavdeep Parhar 	int rc;
3701fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
3702733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
3703733b9277SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
3704733b9277SNavdeep Parhar 	char name[16];
3705733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3706733b9277SNavdeep Parhar 
37077951040fSNavdeep Parhar 	rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx,
37087951040fSNavdeep Parhar 	    M_CXGBE, M_WAITOK);
37097951040fSNavdeep Parhar 	if (rc != 0) {
37107951040fSNavdeep Parhar 		device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc);
37117951040fSNavdeep Parhar 		return (rc);
37127951040fSNavdeep Parhar 	}
37137951040fSNavdeep Parhar 
3714fe2ebb76SJohn Baldwin 	rc = alloc_eq(sc, vi, eq);
37157951040fSNavdeep Parhar 	if (rc != 0) {
37167951040fSNavdeep Parhar 		mp_ring_free(txq->r);
37177951040fSNavdeep Parhar 		txq->r = NULL;
3718733b9277SNavdeep Parhar 		return (rc);
37197951040fSNavdeep Parhar 	}
3720733b9277SNavdeep Parhar 
37217951040fSNavdeep Parhar 	/* Can't fail after this point. */
37227951040fSNavdeep Parhar 
3723ec55567cSJohn Baldwin 	if (idx == 0)
3724ec55567cSJohn Baldwin 		sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
3725ec55567cSJohn Baldwin 	else
3726ec55567cSJohn Baldwin 		KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
3727ec55567cSJohn Baldwin 		    ("eq_base mismatch"));
3728ec55567cSJohn Baldwin 	KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
3729ec55567cSJohn Baldwin 	    ("PF with non-zero eq_base"));
3730ec55567cSJohn Baldwin 
37317951040fSNavdeep Parhar 	TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
3732fe2ebb76SJohn Baldwin 	txq->ifp = vi->ifp;
37337951040fSNavdeep Parhar 	txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
37346af45170SJohn Baldwin 	if (sc->flags & IS_VF)
37356af45170SJohn Baldwin 		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
37366af45170SJohn Baldwin 		    V_TXPKT_INTF(pi->tx_chan));
37376af45170SJohn Baldwin 	else
37387951040fSNavdeep Parhar 		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
373997f2919dSNavdeep Parhar 		    V_TXPKT_INTF(pi->tx_chan) |
374097f2919dSNavdeep Parhar 		    V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
374197f2919dSNavdeep Parhar 		    V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
374297f2919dSNavdeep Parhar 		    V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
374302f972e8SNavdeep Parhar 	txq->tc_idx = -1;
37447951040fSNavdeep Parhar 	txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
3745733b9277SNavdeep Parhar 	    M_ZERO | M_WAITOK);
374654e4ee71SNavdeep Parhar 
374754e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
3748fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
374954e4ee71SNavdeep Parhar 	    NULL, "tx queue");
375054e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
375154e4ee71SNavdeep Parhar 
3752aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3753aa93b99aSNavdeep Parhar 	    &eq->ba, "bus address of descriptor ring");
3754aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3755aa93b99aSNavdeep Parhar 	    eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
3756aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
3757ec55567cSJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
3758ec55567cSJohn Baldwin 	    &eq->abs_id, 0, "absolute id of the queue");
3759fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
376059bc8ce0SNavdeep Parhar 	    &eq->cntxt_id, 0, "SGE context id of the queue");
3761fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
376259bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
376359bc8ce0SNavdeep Parhar 	    "consumer index");
3764fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
376559bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
376659bc8ce0SNavdeep Parhar 	    "producer index");
3767aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
3768aa93b99aSNavdeep Parhar 	    eq->sidx, "status page index");
376959bc8ce0SNavdeep Parhar 
377002f972e8SNavdeep Parhar 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc",
377102f972e8SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I",
377202f972e8SNavdeep Parhar 	    "traffic class (-1 means none)");
377302f972e8SNavdeep Parhar 
3774fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
377554e4ee71SNavdeep Parhar 	    &txq->txcsum, "# of times hardware assisted with checksum");
3776fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion",
377754e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &txq->vlan_insertion,
377854e4ee71SNavdeep Parhar 	    "# of times hardware inserted 802.1Q tag");
3779fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
3780a1ea9a82SNavdeep Parhar 	    &txq->tso_wrs, "# of TSO work requests");
3781fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
378254e4ee71SNavdeep Parhar 	    &txq->imm_wrs, "# of work requests with immediate data");
3783fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
378454e4ee71SNavdeep Parhar 	    &txq->sgl_wrs, "# of work requests with direct SGL");
3785fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
378654e4ee71SNavdeep Parhar 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
3787fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs",
37887951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts0_wrs,
37897951040fSNavdeep Parhar 	    "# of txpkts (type 0) work requests");
3790fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs",
37917951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts1_wrs,
37927951040fSNavdeep Parhar 	    "# of txpkts (type 1) work requests");
3793fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts",
37947951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts0_pkts,
37957951040fSNavdeep Parhar 	    "# of frames tx'd using type0 txpkts work requests");
3796fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts",
37977951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts1_pkts,
37987951040fSNavdeep Parhar 	    "# of frames tx'd using type1 txpkts work requests");
379954e4ee71SNavdeep Parhar 
3800fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues",
38017951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->enqueues,
38027951040fSNavdeep Parhar 	    "# of enqueues to the mp_ring for this queue");
3803fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops",
38047951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->drops,
38057951040fSNavdeep Parhar 	    "# of drops in the mp_ring for this queue");
3806fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts",
38077951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->starts,
38087951040fSNavdeep Parhar 	    "# of normal consumer starts in the mp_ring for this queue");
3809fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls",
38107951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->stalls,
38117951040fSNavdeep Parhar 	    "# of consumer stalls in the mp_ring for this queue");
3812fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts",
38137951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->restarts,
38147951040fSNavdeep Parhar 	    "# of consumer restarts in the mp_ring for this queue");
3815fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications",
38167951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->abdications,
38177951040fSNavdeep Parhar 	    "# of consumer abdications in the mp_ring for this queue");
381854e4ee71SNavdeep Parhar 
38197951040fSNavdeep Parhar 	return (0);
382054e4ee71SNavdeep Parhar }
382154e4ee71SNavdeep Parhar 
382254e4ee71SNavdeep Parhar static int
3823fe2ebb76SJohn Baldwin free_txq(struct vi_info *vi, struct sge_txq *txq)
382454e4ee71SNavdeep Parhar {
382554e4ee71SNavdeep Parhar 	int rc;
3826fe2ebb76SJohn Baldwin 	struct adapter *sc = vi->pi->adapter;
382754e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
382854e4ee71SNavdeep Parhar 
3829733b9277SNavdeep Parhar 	rc = free_eq(sc, eq);
3830733b9277SNavdeep Parhar 	if (rc)
383154e4ee71SNavdeep Parhar 		return (rc);
383254e4ee71SNavdeep Parhar 
38337951040fSNavdeep Parhar 	sglist_free(txq->gl);
3834f7dfe243SNavdeep Parhar 	free(txq->sdesc, M_CXGBE);
38357951040fSNavdeep Parhar 	mp_ring_free(txq->r);
383654e4ee71SNavdeep Parhar 
383754e4ee71SNavdeep Parhar 	bzero(txq, sizeof(*txq));
383854e4ee71SNavdeep Parhar 	return (0);
383954e4ee71SNavdeep Parhar }
384054e4ee71SNavdeep Parhar 
384154e4ee71SNavdeep Parhar static void
384254e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
384354e4ee71SNavdeep Parhar {
384454e4ee71SNavdeep Parhar 	bus_addr_t *ba = arg;
384554e4ee71SNavdeep Parhar 
384654e4ee71SNavdeep Parhar 	KASSERT(nseg == 1,
384754e4ee71SNavdeep Parhar 	    ("%s meant for single segment mappings only.", __func__));
384854e4ee71SNavdeep Parhar 
384954e4ee71SNavdeep Parhar 	*ba = error ? 0 : segs->ds_addr;
385054e4ee71SNavdeep Parhar }
385154e4ee71SNavdeep Parhar 
385254e4ee71SNavdeep Parhar static inline void
385354e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl)
385454e4ee71SNavdeep Parhar {
38554d6db4e0SNavdeep Parhar 	uint32_t n, v;
385654e4ee71SNavdeep Parhar 
38574d6db4e0SNavdeep Parhar 	n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx);
38584d6db4e0SNavdeep Parhar 	MPASS(n > 0);
3859d14b0ac1SNavdeep Parhar 
386054e4ee71SNavdeep Parhar 	wmb();
38614d6db4e0SNavdeep Parhar 	v = fl->dbval | V_PIDX(n);
38624d6db4e0SNavdeep Parhar 	if (fl->udb)
38634d6db4e0SNavdeep Parhar 		*fl->udb = htole32(v);
38644d6db4e0SNavdeep Parhar 	else
3865315048f2SJohn Baldwin 		t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
38664d6db4e0SNavdeep Parhar 	IDXINCR(fl->dbidx, n, fl->sidx);
386754e4ee71SNavdeep Parhar }
386854e4ee71SNavdeep Parhar 
3869fb12416cSNavdeep Parhar /*
38704d6db4e0SNavdeep Parhar  * Fills up the freelist by allocating up to 'n' buffers.  Buffers that are
38714d6db4e0SNavdeep Parhar  * recycled do not count towards this allocation budget.
3872733b9277SNavdeep Parhar  *
38734d6db4e0SNavdeep Parhar  * Returns non-zero to indicate that this freelist should be added to the list
38744d6db4e0SNavdeep Parhar  * of starving freelists.
3875fb12416cSNavdeep Parhar  */
3876733b9277SNavdeep Parhar static int
38774d6db4e0SNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
387854e4ee71SNavdeep Parhar {
38794d6db4e0SNavdeep Parhar 	__be64 *d;
38804d6db4e0SNavdeep Parhar 	struct fl_sdesc *sd;
388138035ed6SNavdeep Parhar 	uintptr_t pa;
388254e4ee71SNavdeep Parhar 	caddr_t cl;
38834d6db4e0SNavdeep Parhar 	struct cluster_layout *cll;
38844d6db4e0SNavdeep Parhar 	struct sw_zone_info *swz;
388538035ed6SNavdeep Parhar 	struct cluster_metadata *clm;
38864d6db4e0SNavdeep Parhar 	uint16_t max_pidx;
38874d6db4e0SNavdeep Parhar 	uint16_t hw_cidx = fl->hw_cidx;		/* stable snapshot */
388854e4ee71SNavdeep Parhar 
388954e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
389054e4ee71SNavdeep Parhar 
38914d6db4e0SNavdeep Parhar 	/*
3892453130d9SPedro F. Giffuni 	 * We always stop at the beginning of the hardware descriptor that's just
38934d6db4e0SNavdeep Parhar 	 * before the one with the hw cidx.  This is to avoid hw pidx = hw cidx,
38944d6db4e0SNavdeep Parhar 	 * which would mean an empty freelist to the chip.
38954d6db4e0SNavdeep Parhar 	 */
38964d6db4e0SNavdeep Parhar 	max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
38974d6db4e0SNavdeep Parhar 	if (fl->pidx == max_pidx * 8)
38984d6db4e0SNavdeep Parhar 		return (0);
389954e4ee71SNavdeep Parhar 
39004d6db4e0SNavdeep Parhar 	d = &fl->desc[fl->pidx];
39014d6db4e0SNavdeep Parhar 	sd = &fl->sdesc[fl->pidx];
39024d6db4e0SNavdeep Parhar 	cll = &fl->cll_def;	/* default layout */
39034d6db4e0SNavdeep Parhar 	swz = &sc->sge.sw_zone_info[cll->zidx];
39044d6db4e0SNavdeep Parhar 
39054d6db4e0SNavdeep Parhar 	while (n > 0) {
390654e4ee71SNavdeep Parhar 
390754e4ee71SNavdeep Parhar 		if (sd->cl != NULL) {
390854e4ee71SNavdeep Parhar 
3909c3fb7725SNavdeep Parhar 			if (sd->nmbuf == 0) {
391038035ed6SNavdeep Parhar 				/*
391138035ed6SNavdeep Parhar 				 * Fast recycle without involving any atomics on
391238035ed6SNavdeep Parhar 				 * the cluster's metadata (if the cluster has
391338035ed6SNavdeep Parhar 				 * metadata).  This happens when all frames
391438035ed6SNavdeep Parhar 				 * received in the cluster were small enough to
391538035ed6SNavdeep Parhar 				 * fit within a single mbuf each.
391638035ed6SNavdeep Parhar 				 */
391738035ed6SNavdeep Parhar 				fl->cl_fast_recycled++;
3918ccc69b2fSNavdeep Parhar #ifdef INVARIANTS
3919ccc69b2fSNavdeep Parhar 				clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
3920ccc69b2fSNavdeep Parhar 				if (clm != NULL)
3921ccc69b2fSNavdeep Parhar 					MPASS(clm->refcount == 1);
3922ccc69b2fSNavdeep Parhar #endif
392338035ed6SNavdeep Parhar 				goto recycled_fast;
392438035ed6SNavdeep Parhar 			}
392554e4ee71SNavdeep Parhar 
392638035ed6SNavdeep Parhar 			/*
392738035ed6SNavdeep Parhar 			 * Cluster is guaranteed to have metadata.  Clusters
392838035ed6SNavdeep Parhar 			 * without metadata always take the fast recycle path
392938035ed6SNavdeep Parhar 			 * when they're recycled.
393038035ed6SNavdeep Parhar 			 */
393138035ed6SNavdeep Parhar 			clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
393238035ed6SNavdeep Parhar 			MPASS(clm != NULL);
39331458bff9SNavdeep Parhar 
393438035ed6SNavdeep Parhar 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
393538035ed6SNavdeep Parhar 				fl->cl_recycled++;
393682eff304SNavdeep Parhar 				counter_u64_add(extfree_rels, 1);
393754e4ee71SNavdeep Parhar 				goto recycled;
393854e4ee71SNavdeep Parhar 			}
39391458bff9SNavdeep Parhar 			sd->cl = NULL;	/* gave up my reference */
39401458bff9SNavdeep Parhar 		}
394138035ed6SNavdeep Parhar 		MPASS(sd->cl == NULL);
394238035ed6SNavdeep Parhar alloc:
394338035ed6SNavdeep Parhar 		cl = uma_zalloc(swz->zone, M_NOWAIT);
394438035ed6SNavdeep Parhar 		if (__predict_false(cl == NULL)) {
394538035ed6SNavdeep Parhar 			if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 ||
394638035ed6SNavdeep Parhar 			    fl->cll_def.zidx == fl->cll_alt.zidx)
394754e4ee71SNavdeep Parhar 				break;
394854e4ee71SNavdeep Parhar 
394938035ed6SNavdeep Parhar 			/* fall back to the safe zone */
395038035ed6SNavdeep Parhar 			cll = &fl->cll_alt;
395138035ed6SNavdeep Parhar 			swz = &sc->sge.sw_zone_info[cll->zidx];
395238035ed6SNavdeep Parhar 			goto alloc;
395354e4ee71SNavdeep Parhar 		}
395438035ed6SNavdeep Parhar 		fl->cl_allocated++;
39554d6db4e0SNavdeep Parhar 		n--;
395654e4ee71SNavdeep Parhar 
395738035ed6SNavdeep Parhar 		pa = pmap_kextract((vm_offset_t)cl);
395838035ed6SNavdeep Parhar 		pa += cll->region1;
395954e4ee71SNavdeep Parhar 		sd->cl = cl;
396038035ed6SNavdeep Parhar 		sd->cll = *cll;
396138035ed6SNavdeep Parhar 		*d = htobe64(pa | cll->hwidx);
396238035ed6SNavdeep Parhar 		clm = cl_metadata(sc, fl, cll, cl);
396338035ed6SNavdeep Parhar 		if (clm != NULL) {
39647d29df59SNavdeep Parhar recycled:
396538035ed6SNavdeep Parhar #ifdef INVARIANTS
396638035ed6SNavdeep Parhar 			clm->sd = sd;
396738035ed6SNavdeep Parhar #endif
396838035ed6SNavdeep Parhar 			clm->refcount = 1;
396938035ed6SNavdeep Parhar 		}
3970c3fb7725SNavdeep Parhar 		sd->nmbuf = 0;
397138035ed6SNavdeep Parhar recycled_fast:
397238035ed6SNavdeep Parhar 		d++;
397354e4ee71SNavdeep Parhar 		sd++;
39744d6db4e0SNavdeep Parhar 		if (__predict_false(++fl->pidx % 8 == 0)) {
39754d6db4e0SNavdeep Parhar 			uint16_t pidx = fl->pidx / 8;
39764d6db4e0SNavdeep Parhar 
39774d6db4e0SNavdeep Parhar 			if (__predict_false(pidx == fl->sidx)) {
397854e4ee71SNavdeep Parhar 				fl->pidx = 0;
39794d6db4e0SNavdeep Parhar 				pidx = 0;
398054e4ee71SNavdeep Parhar 				sd = fl->sdesc;
398154e4ee71SNavdeep Parhar 				d = fl->desc;
398254e4ee71SNavdeep Parhar 			}
39834d6db4e0SNavdeep Parhar 			if (pidx == max_pidx)
39844d6db4e0SNavdeep Parhar 				break;
39854d6db4e0SNavdeep Parhar 
39864d6db4e0SNavdeep Parhar 			if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
39874d6db4e0SNavdeep Parhar 				ring_fl_db(sc, fl);
39884d6db4e0SNavdeep Parhar 		}
398954e4ee71SNavdeep Parhar 	}
3990fb12416cSNavdeep Parhar 
39914d6db4e0SNavdeep Parhar 	if (fl->pidx / 8 != fl->dbidx)
3992fb12416cSNavdeep Parhar 		ring_fl_db(sc, fl);
3993733b9277SNavdeep Parhar 
3994733b9277SNavdeep Parhar 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
3995733b9277SNavdeep Parhar }
3996733b9277SNavdeep Parhar 
3997733b9277SNavdeep Parhar /*
3998733b9277SNavdeep Parhar  * Attempt to refill all starving freelists.
3999733b9277SNavdeep Parhar  */
4000733b9277SNavdeep Parhar static void
4001733b9277SNavdeep Parhar refill_sfl(void *arg)
4002733b9277SNavdeep Parhar {
4003733b9277SNavdeep Parhar 	struct adapter *sc = arg;
4004733b9277SNavdeep Parhar 	struct sge_fl *fl, *fl_temp;
4005733b9277SNavdeep Parhar 
4006fe2ebb76SJohn Baldwin 	mtx_assert(&sc->sfl_lock, MA_OWNED);
4007733b9277SNavdeep Parhar 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
4008733b9277SNavdeep Parhar 		FL_LOCK(fl);
4009733b9277SNavdeep Parhar 		refill_fl(sc, fl, 64);
4010733b9277SNavdeep Parhar 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
4011733b9277SNavdeep Parhar 			TAILQ_REMOVE(&sc->sfl, fl, link);
4012733b9277SNavdeep Parhar 			fl->flags &= ~FL_STARVING;
4013733b9277SNavdeep Parhar 		}
4014733b9277SNavdeep Parhar 		FL_UNLOCK(fl);
4015733b9277SNavdeep Parhar 	}
4016733b9277SNavdeep Parhar 
4017733b9277SNavdeep Parhar 	if (!TAILQ_EMPTY(&sc->sfl))
4018733b9277SNavdeep Parhar 		callout_schedule(&sc->sfl_callout, hz / 5);
401954e4ee71SNavdeep Parhar }
402054e4ee71SNavdeep Parhar 
402154e4ee71SNavdeep Parhar static int
402254e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl)
402354e4ee71SNavdeep Parhar {
402454e4ee71SNavdeep Parhar 
40254d6db4e0SNavdeep Parhar 	fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE,
402654e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
402754e4ee71SNavdeep Parhar 
402854e4ee71SNavdeep Parhar 	return (0);
402954e4ee71SNavdeep Parhar }
403054e4ee71SNavdeep Parhar 
403154e4ee71SNavdeep Parhar static void
40321458bff9SNavdeep Parhar free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
403354e4ee71SNavdeep Parhar {
403454e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
403538035ed6SNavdeep Parhar 	struct cluster_metadata *clm;
403638035ed6SNavdeep Parhar 	struct cluster_layout *cll;
403754e4ee71SNavdeep Parhar 	int i;
403854e4ee71SNavdeep Parhar 
403954e4ee71SNavdeep Parhar 	sd = fl->sdesc;
40404d6db4e0SNavdeep Parhar 	for (i = 0; i < fl->sidx * 8; i++, sd++) {
404138035ed6SNavdeep Parhar 		if (sd->cl == NULL)
404238035ed6SNavdeep Parhar 			continue;
404354e4ee71SNavdeep Parhar 
404438035ed6SNavdeep Parhar 		cll = &sd->cll;
404538035ed6SNavdeep Parhar 		clm = cl_metadata(sc, fl, cll, sd->cl);
404682eff304SNavdeep Parhar 		if (sd->nmbuf == 0)
404738035ed6SNavdeep Parhar 			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
404882eff304SNavdeep Parhar 		else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) {
404982eff304SNavdeep Parhar 			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
405082eff304SNavdeep Parhar 			counter_u64_add(extfree_rels, 1);
405154e4ee71SNavdeep Parhar 		}
405238035ed6SNavdeep Parhar 		sd->cl = NULL;
405354e4ee71SNavdeep Parhar 	}
405454e4ee71SNavdeep Parhar 
405554e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
405654e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
405754e4ee71SNavdeep Parhar }
405854e4ee71SNavdeep Parhar 
40597951040fSNavdeep Parhar static inline void
40607951040fSNavdeep Parhar get_pkt_gl(struct mbuf *m, struct sglist *gl)
406154e4ee71SNavdeep Parhar {
40627951040fSNavdeep Parhar 	int rc;
406354e4ee71SNavdeep Parhar 
40647951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
406554e4ee71SNavdeep Parhar 
40667951040fSNavdeep Parhar 	sglist_reset(gl);
40677951040fSNavdeep Parhar 	rc = sglist_append_mbuf(gl, m);
40687951040fSNavdeep Parhar 	if (__predict_false(rc != 0)) {
40697951040fSNavdeep Parhar 		panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
40707951040fSNavdeep Parhar 		    "with %d.", __func__, m, mbuf_nsegs(m), rc);
407154e4ee71SNavdeep Parhar 	}
407254e4ee71SNavdeep Parhar 
40737951040fSNavdeep Parhar 	KASSERT(gl->sg_nseg == mbuf_nsegs(m),
40747951040fSNavdeep Parhar 	    ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
40757951040fSNavdeep Parhar 	    mbuf_nsegs(m), gl->sg_nseg));
40767951040fSNavdeep Parhar 	KASSERT(gl->sg_nseg > 0 &&
40777951040fSNavdeep Parhar 	    gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS),
40787951040fSNavdeep Parhar 	    ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
40797951040fSNavdeep Parhar 		gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS));
408054e4ee71SNavdeep Parhar }
408154e4ee71SNavdeep Parhar 
408254e4ee71SNavdeep Parhar /*
40837951040fSNavdeep Parhar  * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
408454e4ee71SNavdeep Parhar  */
40857951040fSNavdeep Parhar static inline u_int
40867951040fSNavdeep Parhar txpkt_len16(u_int nsegs, u_int tso)
40877951040fSNavdeep Parhar {
40887951040fSNavdeep Parhar 	u_int n;
40897951040fSNavdeep Parhar 
40907951040fSNavdeep Parhar 	MPASS(nsegs > 0);
40917951040fSNavdeep Parhar 
40927951040fSNavdeep Parhar 	nsegs--; /* first segment is part of ulptx_sgl */
40937951040fSNavdeep Parhar 	n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) +
40947951040fSNavdeep Parhar 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
40957951040fSNavdeep Parhar 	if (tso)
40967951040fSNavdeep Parhar 		n += sizeof(struct cpl_tx_pkt_lso_core);
40977951040fSNavdeep Parhar 
40987951040fSNavdeep Parhar 	return (howmany(n, 16));
40997951040fSNavdeep Parhar }
410054e4ee71SNavdeep Parhar 
410154e4ee71SNavdeep Parhar /*
41026af45170SJohn Baldwin  * len16 for a txpkt_vm WR with a GL.  Includes the firmware work
41036af45170SJohn Baldwin  * request header.
41046af45170SJohn Baldwin  */
41056af45170SJohn Baldwin static inline u_int
41066af45170SJohn Baldwin txpkt_vm_len16(u_int nsegs, u_int tso)
41076af45170SJohn Baldwin {
41086af45170SJohn Baldwin 	u_int n;
41096af45170SJohn Baldwin 
41106af45170SJohn Baldwin 	MPASS(nsegs > 0);
41116af45170SJohn Baldwin 
41126af45170SJohn Baldwin 	nsegs--; /* first segment is part of ulptx_sgl */
41136af45170SJohn Baldwin 	n = sizeof(struct fw_eth_tx_pkt_vm_wr) +
41146af45170SJohn Baldwin 	    sizeof(struct cpl_tx_pkt_core) +
41156af45170SJohn Baldwin 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
41166af45170SJohn Baldwin 	if (tso)
41176af45170SJohn Baldwin 		n += sizeof(struct cpl_tx_pkt_lso_core);
41186af45170SJohn Baldwin 
41196af45170SJohn Baldwin 	return (howmany(n, 16));
41206af45170SJohn Baldwin }
41216af45170SJohn Baldwin 
41226af45170SJohn Baldwin /*
41237951040fSNavdeep Parhar  * len16 for a txpkts type 0 WR with a GL.  Does not include the firmware work
41247951040fSNavdeep Parhar  * request header.
41257951040fSNavdeep Parhar  */
41267951040fSNavdeep Parhar static inline u_int
41277951040fSNavdeep Parhar txpkts0_len16(u_int nsegs)
41287951040fSNavdeep Parhar {
41297951040fSNavdeep Parhar 	u_int n;
41307951040fSNavdeep Parhar 
41317951040fSNavdeep Parhar 	MPASS(nsegs > 0);
41327951040fSNavdeep Parhar 
41337951040fSNavdeep Parhar 	nsegs--; /* first segment is part of ulptx_sgl */
41347951040fSNavdeep Parhar 	n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
41357951040fSNavdeep Parhar 	    sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
41367951040fSNavdeep Parhar 	    8 * ((3 * nsegs) / 2 + (nsegs & 1));
41377951040fSNavdeep Parhar 
41387951040fSNavdeep Parhar 	return (howmany(n, 16));
41397951040fSNavdeep Parhar }
41407951040fSNavdeep Parhar 
41417951040fSNavdeep Parhar /*
41427951040fSNavdeep Parhar  * len16 for a txpkts type 1 WR with a GL.  Does not include the firmware work
41437951040fSNavdeep Parhar  * request header.
41447951040fSNavdeep Parhar  */
41457951040fSNavdeep Parhar static inline u_int
41467951040fSNavdeep Parhar txpkts1_len16(void)
41477951040fSNavdeep Parhar {
41487951040fSNavdeep Parhar 	u_int n;
41497951040fSNavdeep Parhar 
41507951040fSNavdeep Parhar 	n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
41517951040fSNavdeep Parhar 
41527951040fSNavdeep Parhar 	return (howmany(n, 16));
41537951040fSNavdeep Parhar }
41547951040fSNavdeep Parhar 
41557951040fSNavdeep Parhar static inline u_int
41567951040fSNavdeep Parhar imm_payload(u_int ndesc)
41577951040fSNavdeep Parhar {
41587951040fSNavdeep Parhar 	u_int n;
41597951040fSNavdeep Parhar 
41607951040fSNavdeep Parhar 	n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
41617951040fSNavdeep Parhar 	    sizeof(struct cpl_tx_pkt_core);
41627951040fSNavdeep Parhar 
41637951040fSNavdeep Parhar 	return (n);
41647951040fSNavdeep Parhar }
41657951040fSNavdeep Parhar 
41667951040fSNavdeep Parhar /*
41676af45170SJohn Baldwin  * Write a VM txpkt WR for this packet to the hardware descriptors, update the
41686af45170SJohn Baldwin  * software descriptor, and advance the pidx.  It is guaranteed that enough
41696af45170SJohn Baldwin  * descriptors are available.
41706af45170SJohn Baldwin  *
41716af45170SJohn Baldwin  * The return value is the # of hardware descriptors used.
41726af45170SJohn Baldwin  */
41736af45170SJohn Baldwin static u_int
4174472a6004SNavdeep Parhar write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq,
4175472a6004SNavdeep Parhar     struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available)
41766af45170SJohn Baldwin {
41776af45170SJohn Baldwin 	struct sge_eq *eq = &txq->eq;
41786af45170SJohn Baldwin 	struct tx_sdesc *txsd;
41796af45170SJohn Baldwin 	struct cpl_tx_pkt_core *cpl;
41806af45170SJohn Baldwin 	uint32_t ctrl;	/* used in many unrelated places */
41816af45170SJohn Baldwin 	uint64_t ctrl1;
41826af45170SJohn Baldwin 	int csum_type, len16, ndesc, pktlen, nsegs;
41836af45170SJohn Baldwin 	caddr_t dst;
41846af45170SJohn Baldwin 
41856af45170SJohn Baldwin 	TXQ_LOCK_ASSERT_OWNED(txq);
41866af45170SJohn Baldwin 	M_ASSERTPKTHDR(m0);
41876af45170SJohn Baldwin 	MPASS(available > 0 && available < eq->sidx);
41886af45170SJohn Baldwin 
41896af45170SJohn Baldwin 	len16 = mbuf_len16(m0);
41906af45170SJohn Baldwin 	nsegs = mbuf_nsegs(m0);
41916af45170SJohn Baldwin 	pktlen = m0->m_pkthdr.len;
41926af45170SJohn Baldwin 	ctrl = sizeof(struct cpl_tx_pkt_core);
41936af45170SJohn Baldwin 	if (needs_tso(m0))
41946af45170SJohn Baldwin 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
41956af45170SJohn Baldwin 	ndesc = howmany(len16, EQ_ESIZE / 16);
41966af45170SJohn Baldwin 	MPASS(ndesc <= available);
41976af45170SJohn Baldwin 
41986af45170SJohn Baldwin 	/* Firmware work request header */
41996af45170SJohn Baldwin 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
42006af45170SJohn Baldwin 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
42016af45170SJohn Baldwin 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
42026af45170SJohn Baldwin 
42036af45170SJohn Baldwin 	ctrl = V_FW_WR_LEN16(len16);
42046af45170SJohn Baldwin 	wr->equiq_to_len16 = htobe32(ctrl);
42056af45170SJohn Baldwin 	wr->r3[0] = 0;
42066af45170SJohn Baldwin 	wr->r3[1] = 0;
42076af45170SJohn Baldwin 
42086af45170SJohn Baldwin 	/*
42096af45170SJohn Baldwin 	 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
42106af45170SJohn Baldwin 	 * vlantci is ignored unless the ethtype is 0x8100, so it's
42116af45170SJohn Baldwin 	 * simpler to always copy it rather than making it
42126af45170SJohn Baldwin 	 * conditional.  Also, it seems that we do not have to set
42136af45170SJohn Baldwin 	 * vlantci or fake the ethtype when doing VLAN tag insertion.
42146af45170SJohn Baldwin 	 */
42156af45170SJohn Baldwin 	m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst);
42166af45170SJohn Baldwin 
42176af45170SJohn Baldwin 	csum_type = -1;
42186af45170SJohn Baldwin 	if (needs_tso(m0)) {
42196af45170SJohn Baldwin 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
42206af45170SJohn Baldwin 
42216af45170SJohn Baldwin 		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
42226af45170SJohn Baldwin 		    m0->m_pkthdr.l4hlen > 0,
42236af45170SJohn Baldwin 		    ("%s: mbuf %p needs TSO but missing header lengths",
42246af45170SJohn Baldwin 			__func__, m0));
42256af45170SJohn Baldwin 
42266af45170SJohn Baldwin 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
42276af45170SJohn Baldwin 		    F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
42286af45170SJohn Baldwin 		    | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
42296af45170SJohn Baldwin 		if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
42306af45170SJohn Baldwin 			ctrl |= V_LSO_ETHHDR_LEN(1);
42316af45170SJohn Baldwin 		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
42326af45170SJohn Baldwin 			ctrl |= F_LSO_IPV6;
42336af45170SJohn Baldwin 
42346af45170SJohn Baldwin 		lso->lso_ctrl = htobe32(ctrl);
42356af45170SJohn Baldwin 		lso->ipid_ofst = htobe16(0);
42366af45170SJohn Baldwin 		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
42376af45170SJohn Baldwin 		lso->seqno_offset = htobe32(0);
42386af45170SJohn Baldwin 		lso->len = htobe32(pktlen);
42396af45170SJohn Baldwin 
42406af45170SJohn Baldwin 		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
42416af45170SJohn Baldwin 			csum_type = TX_CSUM_TCPIP6;
42426af45170SJohn Baldwin 		else
42436af45170SJohn Baldwin 			csum_type = TX_CSUM_TCPIP;
42446af45170SJohn Baldwin 
42456af45170SJohn Baldwin 		cpl = (void *)(lso + 1);
42466af45170SJohn Baldwin 
42476af45170SJohn Baldwin 		txq->tso_wrs++;
42486af45170SJohn Baldwin 	} else {
42496af45170SJohn Baldwin 		if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP)
42506af45170SJohn Baldwin 			csum_type = TX_CSUM_TCPIP;
42516af45170SJohn Baldwin 		else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP)
42526af45170SJohn Baldwin 			csum_type = TX_CSUM_UDPIP;
42536af45170SJohn Baldwin 		else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP)
42546af45170SJohn Baldwin 			csum_type = TX_CSUM_TCPIP6;
42556af45170SJohn Baldwin 		else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP)
42566af45170SJohn Baldwin 			csum_type = TX_CSUM_UDPIP6;
42576af45170SJohn Baldwin #if defined(INET)
42586af45170SJohn Baldwin 		else if (m0->m_pkthdr.csum_flags & CSUM_IP) {
42596af45170SJohn Baldwin 			/*
42606af45170SJohn Baldwin 			 * XXX: The firmware appears to stomp on the
42616af45170SJohn Baldwin 			 * fragment/flags field of the IP header when
42626af45170SJohn Baldwin 			 * using TX_CSUM_IP.  Fall back to doing
42636af45170SJohn Baldwin 			 * software checksums.
42646af45170SJohn Baldwin 			 */
42656af45170SJohn Baldwin 			u_short *sump;
42666af45170SJohn Baldwin 			struct mbuf *m;
42676af45170SJohn Baldwin 			int offset;
42686af45170SJohn Baldwin 
42696af45170SJohn Baldwin 			m = m0;
42706af45170SJohn Baldwin 			offset = 0;
42716af45170SJohn Baldwin 			sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen +
42726af45170SJohn Baldwin 			    offsetof(struct ip, ip_sum));
42736af45170SJohn Baldwin 			*sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen +
42746af45170SJohn Baldwin 			    m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen);
42756af45170SJohn Baldwin 			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
42766af45170SJohn Baldwin 		}
42776af45170SJohn Baldwin #endif
42786af45170SJohn Baldwin 
42796af45170SJohn Baldwin 		cpl = (void *)(wr + 1);
42806af45170SJohn Baldwin 	}
42816af45170SJohn Baldwin 
42826af45170SJohn Baldwin 	/* Checksum offload */
42836af45170SJohn Baldwin 	ctrl1 = 0;
42846af45170SJohn Baldwin 	if (needs_l3_csum(m0) == 0)
42856af45170SJohn Baldwin 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
42866af45170SJohn Baldwin 	if (csum_type >= 0) {
42876af45170SJohn Baldwin 		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0,
42886af45170SJohn Baldwin 	    ("%s: mbuf %p needs checksum offload but missing header lengths",
42896af45170SJohn Baldwin 			__func__, m0));
42906af45170SJohn Baldwin 
4291472a6004SNavdeep Parhar 		if (chip_id(sc) <= CHELSIO_T5) {
42926af45170SJohn Baldwin 			ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
42936af45170SJohn Baldwin 			    ETHER_HDR_LEN);
4294472a6004SNavdeep Parhar 		} else {
4295472a6004SNavdeep Parhar 			ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
4296472a6004SNavdeep Parhar 			    ETHER_HDR_LEN);
4297472a6004SNavdeep Parhar 		}
42986af45170SJohn Baldwin 		ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen);
42996af45170SJohn Baldwin 		ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type);
43006af45170SJohn Baldwin 	} else
43016af45170SJohn Baldwin 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
43026af45170SJohn Baldwin 	if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
43036af45170SJohn Baldwin 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
43046af45170SJohn Baldwin 		txq->txcsum++;	/* some hardware assistance provided */
43056af45170SJohn Baldwin 
43066af45170SJohn Baldwin 	/* VLAN tag insertion */
43076af45170SJohn Baldwin 	if (needs_vlan_insertion(m0)) {
43086af45170SJohn Baldwin 		ctrl1 |= F_TXPKT_VLAN_VLD |
43096af45170SJohn Baldwin 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
43106af45170SJohn Baldwin 		txq->vlan_insertion++;
43116af45170SJohn Baldwin 	}
43126af45170SJohn Baldwin 
43136af45170SJohn Baldwin 	/* CPL header */
43146af45170SJohn Baldwin 	cpl->ctrl0 = txq->cpl_ctrl0;
43156af45170SJohn Baldwin 	cpl->pack = 0;
43166af45170SJohn Baldwin 	cpl->len = htobe16(pktlen);
43176af45170SJohn Baldwin 	cpl->ctrl1 = htobe64(ctrl1);
43186af45170SJohn Baldwin 
43196af45170SJohn Baldwin 	/* SGL */
43206af45170SJohn Baldwin 	dst = (void *)(cpl + 1);
43216af45170SJohn Baldwin 
43226af45170SJohn Baldwin 	/*
43236af45170SJohn Baldwin 	 * A packet using TSO will use up an entire descriptor for the
43246af45170SJohn Baldwin 	 * firmware work request header, LSO CPL, and TX_PKT_XT CPL.
43256af45170SJohn Baldwin 	 * If this descriptor is the last descriptor in the ring, wrap
43266af45170SJohn Baldwin 	 * around to the front of the ring explicitly for the start of
43276af45170SJohn Baldwin 	 * the sgl.
43286af45170SJohn Baldwin 	 */
43296af45170SJohn Baldwin 	if (dst == (void *)&eq->desc[eq->sidx]) {
43306af45170SJohn Baldwin 		dst = (void *)&eq->desc[0];
43316af45170SJohn Baldwin 		write_gl_to_txd(txq, m0, &dst, 0);
43326af45170SJohn Baldwin 	} else
43336af45170SJohn Baldwin 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
43346af45170SJohn Baldwin 	txq->sgl_wrs++;
43356af45170SJohn Baldwin 
43366af45170SJohn Baldwin 	txq->txpkt_wrs++;
43376af45170SJohn Baldwin 
43386af45170SJohn Baldwin 	txsd = &txq->sdesc[eq->pidx];
43396af45170SJohn Baldwin 	txsd->m = m0;
43406af45170SJohn Baldwin 	txsd->desc_used = ndesc;
43416af45170SJohn Baldwin 
43426af45170SJohn Baldwin 	return (ndesc);
43436af45170SJohn Baldwin }
43446af45170SJohn Baldwin 
43456af45170SJohn Baldwin /*
43467951040fSNavdeep Parhar  * Write a txpkt WR for this packet to the hardware descriptors, update the
43477951040fSNavdeep Parhar  * software descriptor, and advance the pidx.  It is guaranteed that enough
43487951040fSNavdeep Parhar  * descriptors are available.
434954e4ee71SNavdeep Parhar  *
43507951040fSNavdeep Parhar  * The return value is the # of hardware descriptors used.
435154e4ee71SNavdeep Parhar  */
43527951040fSNavdeep Parhar static u_int
43537951040fSNavdeep Parhar write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr,
43547951040fSNavdeep Parhar     struct mbuf *m0, u_int available)
435554e4ee71SNavdeep Parhar {
435654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
43577951040fSNavdeep Parhar 	struct tx_sdesc *txsd;
435854e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
435954e4ee71SNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
436054e4ee71SNavdeep Parhar 	uint64_t ctrl1;
43617951040fSNavdeep Parhar 	int len16, ndesc, pktlen, nsegs;
436254e4ee71SNavdeep Parhar 	caddr_t dst;
436354e4ee71SNavdeep Parhar 
436454e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
43657951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
43667951040fSNavdeep Parhar 	MPASS(available > 0 && available < eq->sidx);
436754e4ee71SNavdeep Parhar 
43687951040fSNavdeep Parhar 	len16 = mbuf_len16(m0);
43697951040fSNavdeep Parhar 	nsegs = mbuf_nsegs(m0);
43707951040fSNavdeep Parhar 	pktlen = m0->m_pkthdr.len;
437154e4ee71SNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
43727951040fSNavdeep Parhar 	if (needs_tso(m0))
43732a5f6b0eSNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
43747951040fSNavdeep Parhar 	else if (pktlen <= imm_payload(2) && available >= 2) {
43757951040fSNavdeep Parhar 		/* Immediate data.  Recalculate len16 and set nsegs to 0. */
4376ecb79ca4SNavdeep Parhar 		ctrl += pktlen;
43777951040fSNavdeep Parhar 		len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
43787951040fSNavdeep Parhar 		    sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
43797951040fSNavdeep Parhar 		nsegs = 0;
438054e4ee71SNavdeep Parhar 	}
43817951040fSNavdeep Parhar 	ndesc = howmany(len16, EQ_ESIZE / 16);
43827951040fSNavdeep Parhar 	MPASS(ndesc <= available);
438354e4ee71SNavdeep Parhar 
438454e4ee71SNavdeep Parhar 	/* Firmware work request header */
43857951040fSNavdeep Parhar 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
438654e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
4387733b9277SNavdeep Parhar 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
43886b49a4ecSNavdeep Parhar 
43897951040fSNavdeep Parhar 	ctrl = V_FW_WR_LEN16(len16);
439054e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
439154e4ee71SNavdeep Parhar 	wr->r3 = 0;
439254e4ee71SNavdeep Parhar 
43937951040fSNavdeep Parhar 	if (needs_tso(m0)) {
43942a5f6b0eSNavdeep Parhar 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
43957951040fSNavdeep Parhar 
43967951040fSNavdeep Parhar 		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
43977951040fSNavdeep Parhar 		    m0->m_pkthdr.l4hlen > 0,
43987951040fSNavdeep Parhar 		    ("%s: mbuf %p needs TSO but missing header lengths",
43997951040fSNavdeep Parhar 			__func__, m0));
440054e4ee71SNavdeep Parhar 
440154e4ee71SNavdeep Parhar 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
44027951040fSNavdeep Parhar 		    F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
44037951040fSNavdeep Parhar 		    | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
44047951040fSNavdeep Parhar 		if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
440554e4ee71SNavdeep Parhar 			ctrl |= V_LSO_ETHHDR_LEN(1);
44067951040fSNavdeep Parhar 		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
4407a1ea9a82SNavdeep Parhar 			ctrl |= F_LSO_IPV6;
440854e4ee71SNavdeep Parhar 
440954e4ee71SNavdeep Parhar 		lso->lso_ctrl = htobe32(ctrl);
441054e4ee71SNavdeep Parhar 		lso->ipid_ofst = htobe16(0);
44117951040fSNavdeep Parhar 		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
441254e4ee71SNavdeep Parhar 		lso->seqno_offset = htobe32(0);
4413ecb79ca4SNavdeep Parhar 		lso->len = htobe32(pktlen);
441454e4ee71SNavdeep Parhar 
441554e4ee71SNavdeep Parhar 		cpl = (void *)(lso + 1);
441654e4ee71SNavdeep Parhar 
441754e4ee71SNavdeep Parhar 		txq->tso_wrs++;
441854e4ee71SNavdeep Parhar 	} else
441954e4ee71SNavdeep Parhar 		cpl = (void *)(wr + 1);
442054e4ee71SNavdeep Parhar 
442154e4ee71SNavdeep Parhar 	/* Checksum offload */
442254e4ee71SNavdeep Parhar 	ctrl1 = 0;
44237951040fSNavdeep Parhar 	if (needs_l3_csum(m0) == 0)
442454e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
44257951040fSNavdeep Parhar 	if (needs_l4_csum(m0) == 0)
442654e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
44277951040fSNavdeep Parhar 	if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
4428b8531380SNavdeep Parhar 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
442954e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
443054e4ee71SNavdeep Parhar 
443154e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
44327951040fSNavdeep Parhar 	if (needs_vlan_insertion(m0)) {
44337951040fSNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
443454e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
443554e4ee71SNavdeep Parhar 	}
443654e4ee71SNavdeep Parhar 
443754e4ee71SNavdeep Parhar 	/* CPL header */
44387951040fSNavdeep Parhar 	cpl->ctrl0 = txq->cpl_ctrl0;
443954e4ee71SNavdeep Parhar 	cpl->pack = 0;
4440ecb79ca4SNavdeep Parhar 	cpl->len = htobe16(pktlen);
444154e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
444254e4ee71SNavdeep Parhar 
444354e4ee71SNavdeep Parhar 	/* SGL */
444454e4ee71SNavdeep Parhar 	dst = (void *)(cpl + 1);
44457951040fSNavdeep Parhar 	if (nsegs > 0) {
44467951040fSNavdeep Parhar 
44477951040fSNavdeep Parhar 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
444854e4ee71SNavdeep Parhar 		txq->sgl_wrs++;
444954e4ee71SNavdeep Parhar 	} else {
44507951040fSNavdeep Parhar 		struct mbuf *m;
44517951040fSNavdeep Parhar 
44527951040fSNavdeep Parhar 		for (m = m0; m != NULL; m = m->m_next) {
445354e4ee71SNavdeep Parhar 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
4454ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
4455ecb79ca4SNavdeep Parhar 			pktlen -= m->m_len;
4456ecb79ca4SNavdeep Parhar #endif
445754e4ee71SNavdeep Parhar 		}
4458ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
4459ecb79ca4SNavdeep Parhar 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
4460ecb79ca4SNavdeep Parhar #endif
44617951040fSNavdeep Parhar 		txq->imm_wrs++;
446254e4ee71SNavdeep Parhar 	}
446354e4ee71SNavdeep Parhar 
446454e4ee71SNavdeep Parhar 	txq->txpkt_wrs++;
446554e4ee71SNavdeep Parhar 
4466f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
44677951040fSNavdeep Parhar 	txsd->m = m0;
446854e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
446954e4ee71SNavdeep Parhar 
44707951040fSNavdeep Parhar 	return (ndesc);
447154e4ee71SNavdeep Parhar }
447254e4ee71SNavdeep Parhar 
44737951040fSNavdeep Parhar static int
44747951040fSNavdeep Parhar try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available)
447554e4ee71SNavdeep Parhar {
44767951040fSNavdeep Parhar 	u_int needed, nsegs1, nsegs2, l1, l2;
44777951040fSNavdeep Parhar 
44787951040fSNavdeep Parhar 	if (cannot_use_txpkts(m) || cannot_use_txpkts(n))
44797951040fSNavdeep Parhar 		return (1);
44807951040fSNavdeep Parhar 
44817951040fSNavdeep Parhar 	nsegs1 = mbuf_nsegs(m);
44827951040fSNavdeep Parhar 	nsegs2 = mbuf_nsegs(n);
44837951040fSNavdeep Parhar 	if (nsegs1 + nsegs2 == 2) {
44847951040fSNavdeep Parhar 		txp->wr_type = 1;
44857951040fSNavdeep Parhar 		l1 = l2 = txpkts1_len16();
44867951040fSNavdeep Parhar 	} else {
44877951040fSNavdeep Parhar 		txp->wr_type = 0;
44887951040fSNavdeep Parhar 		l1 = txpkts0_len16(nsegs1);
44897951040fSNavdeep Parhar 		l2 = txpkts0_len16(nsegs2);
44907951040fSNavdeep Parhar 	}
44917951040fSNavdeep Parhar 	txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2;
44927951040fSNavdeep Parhar 	needed = howmany(txp->len16, EQ_ESIZE / 16);
44937951040fSNavdeep Parhar 	if (needed > SGE_MAX_WR_NDESC || needed > available)
44947951040fSNavdeep Parhar 		return (1);
44957951040fSNavdeep Parhar 
44967951040fSNavdeep Parhar 	txp->plen = m->m_pkthdr.len + n->m_pkthdr.len;
44977951040fSNavdeep Parhar 	if (txp->plen > 65535)
44987951040fSNavdeep Parhar 		return (1);
44997951040fSNavdeep Parhar 
45007951040fSNavdeep Parhar 	txp->npkt = 2;
45017951040fSNavdeep Parhar 	set_mbuf_len16(m, l1);
45027951040fSNavdeep Parhar 	set_mbuf_len16(n, l2);
45037951040fSNavdeep Parhar 
45047951040fSNavdeep Parhar 	return (0);
45057951040fSNavdeep Parhar }
45067951040fSNavdeep Parhar 
45077951040fSNavdeep Parhar static int
45087951040fSNavdeep Parhar add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available)
45097951040fSNavdeep Parhar {
45107951040fSNavdeep Parhar 	u_int plen, len16, needed, nsegs;
45117951040fSNavdeep Parhar 
45127951040fSNavdeep Parhar 	MPASS(txp->wr_type == 0 || txp->wr_type == 1);
45137951040fSNavdeep Parhar 
45147951040fSNavdeep Parhar 	nsegs = mbuf_nsegs(m);
45157951040fSNavdeep Parhar 	if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1))
45167951040fSNavdeep Parhar 		return (1);
45177951040fSNavdeep Parhar 
45187951040fSNavdeep Parhar 	plen = txp->plen + m->m_pkthdr.len;
45197951040fSNavdeep Parhar 	if (plen > 65535)
45207951040fSNavdeep Parhar 		return (1);
45217951040fSNavdeep Parhar 
45227951040fSNavdeep Parhar 	if (txp->wr_type == 0)
45237951040fSNavdeep Parhar 		len16 = txpkts0_len16(nsegs);
45247951040fSNavdeep Parhar 	else
45257951040fSNavdeep Parhar 		len16 = txpkts1_len16();
45267951040fSNavdeep Parhar 	needed = howmany(txp->len16 + len16, EQ_ESIZE / 16);
45277951040fSNavdeep Parhar 	if (needed > SGE_MAX_WR_NDESC || needed > available)
45287951040fSNavdeep Parhar 		return (1);
45297951040fSNavdeep Parhar 
45307951040fSNavdeep Parhar 	txp->npkt++;
45317951040fSNavdeep Parhar 	txp->plen = plen;
45327951040fSNavdeep Parhar 	txp->len16 += len16;
45337951040fSNavdeep Parhar 	set_mbuf_len16(m, len16);
45347951040fSNavdeep Parhar 
45357951040fSNavdeep Parhar 	return (0);
45367951040fSNavdeep Parhar }
45377951040fSNavdeep Parhar 
45387951040fSNavdeep Parhar /*
45397951040fSNavdeep Parhar  * Write a txpkts WR for the packets in txp to the hardware descriptors, update
45407951040fSNavdeep Parhar  * the software descriptor, and advance the pidx.  It is guaranteed that enough
45417951040fSNavdeep Parhar  * descriptors are available.
45427951040fSNavdeep Parhar  *
45437951040fSNavdeep Parhar  * The return value is the # of hardware descriptors used.
45447951040fSNavdeep Parhar  */
45457951040fSNavdeep Parhar static u_int
45467951040fSNavdeep Parhar write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr,
45477951040fSNavdeep Parhar     struct mbuf *m0, const struct txpkts *txp, u_int available)
45487951040fSNavdeep Parhar {
45497951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
45507951040fSNavdeep Parhar 	struct tx_sdesc *txsd;
45517951040fSNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
45527951040fSNavdeep Parhar 	uint32_t ctrl;
45537951040fSNavdeep Parhar 	uint64_t ctrl1;
45547951040fSNavdeep Parhar 	int ndesc, checkwrap;
45557951040fSNavdeep Parhar 	struct mbuf *m;
45567951040fSNavdeep Parhar 	void *flitp;
45577951040fSNavdeep Parhar 
45587951040fSNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
45597951040fSNavdeep Parhar 	MPASS(txp->npkt > 0);
45607951040fSNavdeep Parhar 	MPASS(txp->plen < 65536);
45617951040fSNavdeep Parhar 	MPASS(m0 != NULL);
45627951040fSNavdeep Parhar 	MPASS(m0->m_nextpkt != NULL);
45637951040fSNavdeep Parhar 	MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
45647951040fSNavdeep Parhar 	MPASS(available > 0 && available < eq->sidx);
45657951040fSNavdeep Parhar 
45667951040fSNavdeep Parhar 	ndesc = howmany(txp->len16, EQ_ESIZE / 16);
45677951040fSNavdeep Parhar 	MPASS(ndesc <= available);
45687951040fSNavdeep Parhar 
45697951040fSNavdeep Parhar 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
45707951040fSNavdeep Parhar 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
45717951040fSNavdeep Parhar 	ctrl = V_FW_WR_LEN16(txp->len16);
45727951040fSNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
45737951040fSNavdeep Parhar 	wr->plen = htobe16(txp->plen);
45747951040fSNavdeep Parhar 	wr->npkt = txp->npkt;
45757951040fSNavdeep Parhar 	wr->r3 = 0;
45767951040fSNavdeep Parhar 	wr->type = txp->wr_type;
45777951040fSNavdeep Parhar 	flitp = wr + 1;
45787951040fSNavdeep Parhar 
45797951040fSNavdeep Parhar 	/*
45807951040fSNavdeep Parhar 	 * At this point we are 16B into a hardware descriptor.  If checkwrap is
45817951040fSNavdeep Parhar 	 * set then we know the WR is going to wrap around somewhere.  We'll
45827951040fSNavdeep Parhar 	 * check for that at appropriate points.
45837951040fSNavdeep Parhar 	 */
45847951040fSNavdeep Parhar 	checkwrap = eq->sidx - ndesc < eq->pidx;
45857951040fSNavdeep Parhar 	for (m = m0; m != NULL; m = m->m_nextpkt) {
45867951040fSNavdeep Parhar 		if (txp->wr_type == 0) {
458754e4ee71SNavdeep Parhar 			struct ulp_txpkt *ulpmc;
458854e4ee71SNavdeep Parhar 			struct ulptx_idata *ulpsc;
458954e4ee71SNavdeep Parhar 
45907951040fSNavdeep Parhar 			/* ULP master command */
45917951040fSNavdeep Parhar 			ulpmc = flitp;
45927951040fSNavdeep Parhar 			ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
45937951040fSNavdeep Parhar 			    V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
45947951040fSNavdeep Parhar 			ulpmc->len = htobe32(mbuf_len16(m));
459554e4ee71SNavdeep Parhar 
45967951040fSNavdeep Parhar 			/* ULP subcommand */
45977951040fSNavdeep Parhar 			ulpsc = (void *)(ulpmc + 1);
45987951040fSNavdeep Parhar 			ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
45997951040fSNavdeep Parhar 			    F_ULP_TX_SC_MORE);
46007951040fSNavdeep Parhar 			ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
46017951040fSNavdeep Parhar 
46027951040fSNavdeep Parhar 			cpl = (void *)(ulpsc + 1);
46037951040fSNavdeep Parhar 			if (checkwrap &&
46047951040fSNavdeep Parhar 			    (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
46057951040fSNavdeep Parhar 				cpl = (void *)&eq->desc[0];
46067951040fSNavdeep Parhar 		} else {
46077951040fSNavdeep Parhar 			cpl = flitp;
46087951040fSNavdeep Parhar 		}
460954e4ee71SNavdeep Parhar 
461054e4ee71SNavdeep Parhar 		/* Checksum offload */
46117951040fSNavdeep Parhar 		ctrl1 = 0;
46127951040fSNavdeep Parhar 		if (needs_l3_csum(m) == 0)
46137951040fSNavdeep Parhar 			ctrl1 |= F_TXPKT_IPCSUM_DIS;
46147951040fSNavdeep Parhar 		if (needs_l4_csum(m) == 0)
46157951040fSNavdeep Parhar 			ctrl1 |= F_TXPKT_L4CSUM_DIS;
4616b8531380SNavdeep Parhar 		if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
4617b8531380SNavdeep Parhar 		    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
461854e4ee71SNavdeep Parhar 			txq->txcsum++;	/* some hardware assistance provided */
461954e4ee71SNavdeep Parhar 
462054e4ee71SNavdeep Parhar 		/* VLAN tag insertion */
46217951040fSNavdeep Parhar 		if (needs_vlan_insertion(m)) {
46227951040fSNavdeep Parhar 			ctrl1 |= F_TXPKT_VLAN_VLD |
46237951040fSNavdeep Parhar 			    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
462454e4ee71SNavdeep Parhar 			txq->vlan_insertion++;
462554e4ee71SNavdeep Parhar 		}
462654e4ee71SNavdeep Parhar 
46277951040fSNavdeep Parhar 		/* CPL header */
46287951040fSNavdeep Parhar 		cpl->ctrl0 = txq->cpl_ctrl0;
462954e4ee71SNavdeep Parhar 		cpl->pack = 0;
463054e4ee71SNavdeep Parhar 		cpl->len = htobe16(m->m_pkthdr.len);
46317951040fSNavdeep Parhar 		cpl->ctrl1 = htobe64(ctrl1);
463254e4ee71SNavdeep Parhar 
46337951040fSNavdeep Parhar 		flitp = cpl + 1;
46347951040fSNavdeep Parhar 		if (checkwrap &&
46357951040fSNavdeep Parhar 		    (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
46367951040fSNavdeep Parhar 			flitp = (void *)&eq->desc[0];
463754e4ee71SNavdeep Parhar 
46387951040fSNavdeep Parhar 		write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
463954e4ee71SNavdeep Parhar 
46407951040fSNavdeep Parhar 	}
46417951040fSNavdeep Parhar 
4642a59a1477SNavdeep Parhar 	if (txp->wr_type == 0) {
4643a59a1477SNavdeep Parhar 		txq->txpkts0_pkts += txp->npkt;
4644a59a1477SNavdeep Parhar 		txq->txpkts0_wrs++;
4645a59a1477SNavdeep Parhar 	} else {
4646a59a1477SNavdeep Parhar 		txq->txpkts1_pkts += txp->npkt;
4647a59a1477SNavdeep Parhar 		txq->txpkts1_wrs++;
4648a59a1477SNavdeep Parhar 	}
4649a59a1477SNavdeep Parhar 
46507951040fSNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
46517951040fSNavdeep Parhar 	txsd->m = m0;
46527951040fSNavdeep Parhar 	txsd->desc_used = ndesc;
46537951040fSNavdeep Parhar 
46547951040fSNavdeep Parhar 	return (ndesc);
465554e4ee71SNavdeep Parhar }
465654e4ee71SNavdeep Parhar 
465754e4ee71SNavdeep Parhar /*
465854e4ee71SNavdeep Parhar  * If the SGL ends on an address that is not 16 byte aligned, this function will
46597951040fSNavdeep Parhar  * add a 0 filled flit at the end.
466054e4ee71SNavdeep Parhar  */
46617951040fSNavdeep Parhar static void
46627951040fSNavdeep Parhar write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
466354e4ee71SNavdeep Parhar {
46647951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
46657951040fSNavdeep Parhar 	struct sglist *gl = txq->gl;
46667951040fSNavdeep Parhar 	struct sglist_seg *seg;
46677951040fSNavdeep Parhar 	__be64 *flitp, *wrap;
466854e4ee71SNavdeep Parhar 	struct ulptx_sgl *usgl;
46697951040fSNavdeep Parhar 	int i, nflits, nsegs;
467054e4ee71SNavdeep Parhar 
467154e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
467254e4ee71SNavdeep Parhar 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
46737951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
46747951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
467554e4ee71SNavdeep Parhar 
46767951040fSNavdeep Parhar 	get_pkt_gl(m, gl);
46777951040fSNavdeep Parhar 	nsegs = gl->sg_nseg;
46787951040fSNavdeep Parhar 	MPASS(nsegs > 0);
46797951040fSNavdeep Parhar 
46807951040fSNavdeep Parhar 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
468154e4ee71SNavdeep Parhar 	flitp = (__be64 *)(*to);
46827951040fSNavdeep Parhar 	wrap = (__be64 *)(&eq->desc[eq->sidx]);
46837951040fSNavdeep Parhar 	seg = &gl->sg_segs[0];
468454e4ee71SNavdeep Parhar 	usgl = (void *)flitp;
468554e4ee71SNavdeep Parhar 
468654e4ee71SNavdeep Parhar 	/*
468754e4ee71SNavdeep Parhar 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
468854e4ee71SNavdeep Parhar 	 * ring, so we're at least 16 bytes away from the status page.  There is
468954e4ee71SNavdeep Parhar 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
469054e4ee71SNavdeep Parhar 	 */
469154e4ee71SNavdeep Parhar 
469254e4ee71SNavdeep Parhar 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
46937951040fSNavdeep Parhar 	    V_ULPTX_NSGE(nsegs));
46947951040fSNavdeep Parhar 	usgl->len0 = htobe32(seg->ss_len);
46957951040fSNavdeep Parhar 	usgl->addr0 = htobe64(seg->ss_paddr);
469654e4ee71SNavdeep Parhar 	seg++;
469754e4ee71SNavdeep Parhar 
46987951040fSNavdeep Parhar 	if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
469954e4ee71SNavdeep Parhar 
470054e4ee71SNavdeep Parhar 		/* Won't wrap around at all */
470154e4ee71SNavdeep Parhar 
47027951040fSNavdeep Parhar 		for (i = 0; i < nsegs - 1; i++, seg++) {
47037951040fSNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
47047951040fSNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
470554e4ee71SNavdeep Parhar 		}
470654e4ee71SNavdeep Parhar 		if (i & 1)
470754e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
47087951040fSNavdeep Parhar 		flitp += nflits;
470954e4ee71SNavdeep Parhar 	} else {
471054e4ee71SNavdeep Parhar 
471154e4ee71SNavdeep Parhar 		/* Will wrap somewhere in the rest of the SGL */
471254e4ee71SNavdeep Parhar 
471354e4ee71SNavdeep Parhar 		/* 2 flits already written, write the rest flit by flit */
471454e4ee71SNavdeep Parhar 		flitp = (void *)(usgl + 1);
47157951040fSNavdeep Parhar 		for (i = 0; i < nflits - 2; i++) {
47167951040fSNavdeep Parhar 			if (flitp == wrap)
471754e4ee71SNavdeep Parhar 				flitp = (void *)eq->desc;
47187951040fSNavdeep Parhar 			*flitp++ = get_flit(seg, nsegs - 1, i);
471954e4ee71SNavdeep Parhar 		}
472054e4ee71SNavdeep Parhar 	}
472154e4ee71SNavdeep Parhar 
47227951040fSNavdeep Parhar 	if (nflits & 1) {
47237951040fSNavdeep Parhar 		MPASS(((uintptr_t)flitp) & 0xf);
47247951040fSNavdeep Parhar 		*flitp++ = 0;
47257951040fSNavdeep Parhar 	}
472654e4ee71SNavdeep Parhar 
47277951040fSNavdeep Parhar 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
47287951040fSNavdeep Parhar 	if (__predict_false(flitp == wrap))
472954e4ee71SNavdeep Parhar 		*to = (void *)eq->desc;
473054e4ee71SNavdeep Parhar 	else
47317951040fSNavdeep Parhar 		*to = (void *)flitp;
473254e4ee71SNavdeep Parhar }
473354e4ee71SNavdeep Parhar 
473454e4ee71SNavdeep Parhar static inline void
473554e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
473654e4ee71SNavdeep Parhar {
47377951040fSNavdeep Parhar 
47387951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
47397951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
47407951040fSNavdeep Parhar 
47417951040fSNavdeep Parhar 	if (__predict_true((uintptr_t)(*to) + len <=
47427951040fSNavdeep Parhar 	    (uintptr_t)&eq->desc[eq->sidx])) {
474354e4ee71SNavdeep Parhar 		bcopy(from, *to, len);
474454e4ee71SNavdeep Parhar 		(*to) += len;
474554e4ee71SNavdeep Parhar 	} else {
47467951040fSNavdeep Parhar 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
474754e4ee71SNavdeep Parhar 
474854e4ee71SNavdeep Parhar 		bcopy(from, *to, portion);
474954e4ee71SNavdeep Parhar 		from += portion;
475054e4ee71SNavdeep Parhar 		portion = len - portion;	/* remaining */
475154e4ee71SNavdeep Parhar 		bcopy(from, (void *)eq->desc, portion);
475254e4ee71SNavdeep Parhar 		(*to) = (caddr_t)eq->desc + portion;
475354e4ee71SNavdeep Parhar 	}
475454e4ee71SNavdeep Parhar }
475554e4ee71SNavdeep Parhar 
475654e4ee71SNavdeep Parhar static inline void
47577951040fSNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
475854e4ee71SNavdeep Parhar {
47597951040fSNavdeep Parhar 	u_int db;
47607951040fSNavdeep Parhar 
47617951040fSNavdeep Parhar 	MPASS(n > 0);
4762d14b0ac1SNavdeep Parhar 
4763d14b0ac1SNavdeep Parhar 	db = eq->doorbells;
47647951040fSNavdeep Parhar 	if (n > 1)
476577ad3c41SNavdeep Parhar 		clrbit(&db, DOORBELL_WCWR);
4766d14b0ac1SNavdeep Parhar 	wmb();
4767d14b0ac1SNavdeep Parhar 
4768d14b0ac1SNavdeep Parhar 	switch (ffs(db) - 1) {
4769d14b0ac1SNavdeep Parhar 	case DOORBELL_UDB:
47707951040fSNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
47717951040fSNavdeep Parhar 		break;
4772d14b0ac1SNavdeep Parhar 
477377ad3c41SNavdeep Parhar 	case DOORBELL_WCWR: {
4774d14b0ac1SNavdeep Parhar 		volatile uint64_t *dst, *src;
4775d14b0ac1SNavdeep Parhar 		int i;
4776d14b0ac1SNavdeep Parhar 
4777d14b0ac1SNavdeep Parhar 		/*
4778d14b0ac1SNavdeep Parhar 		 * Queues whose 128B doorbell segment fits in the page do not
4779d14b0ac1SNavdeep Parhar 		 * use relative qid (udb_qid is always 0).  Only queues with
478077ad3c41SNavdeep Parhar 		 * doorbell segments can do WCWR.
4781d14b0ac1SNavdeep Parhar 		 */
47827951040fSNavdeep Parhar 		KASSERT(eq->udb_qid == 0 && n == 1,
4783d14b0ac1SNavdeep Parhar 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
47847951040fSNavdeep Parhar 		    __func__, eq->doorbells, n, eq->dbidx, eq));
4785d14b0ac1SNavdeep Parhar 
4786d14b0ac1SNavdeep Parhar 		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
4787d14b0ac1SNavdeep Parhar 		    UDBS_DB_OFFSET);
47887951040fSNavdeep Parhar 		i = eq->dbidx;
4789d14b0ac1SNavdeep Parhar 		src = (void *)&eq->desc[i];
4790d14b0ac1SNavdeep Parhar 		while (src != (void *)&eq->desc[i + 1])
4791d14b0ac1SNavdeep Parhar 			*dst++ = *src++;
4792d14b0ac1SNavdeep Parhar 		wmb();
47937951040fSNavdeep Parhar 		break;
4794d14b0ac1SNavdeep Parhar 	}
4795d14b0ac1SNavdeep Parhar 
4796d14b0ac1SNavdeep Parhar 	case DOORBELL_UDBWC:
47977951040fSNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
4798d14b0ac1SNavdeep Parhar 		wmb();
47997951040fSNavdeep Parhar 		break;
4800d14b0ac1SNavdeep Parhar 
4801d14b0ac1SNavdeep Parhar 	case DOORBELL_KDB:
4802315048f2SJohn Baldwin 		t4_write_reg(sc, sc->sge_kdoorbell_reg,
48037951040fSNavdeep Parhar 		    V_QID(eq->cntxt_id) | V_PIDX(n));
48047951040fSNavdeep Parhar 		break;
480554e4ee71SNavdeep Parhar 	}
480654e4ee71SNavdeep Parhar 
48077951040fSNavdeep Parhar 	IDXINCR(eq->dbidx, n, eq->sidx);
48087951040fSNavdeep Parhar }
48097951040fSNavdeep Parhar 
48107951040fSNavdeep Parhar static inline u_int
48117951040fSNavdeep Parhar reclaimable_tx_desc(struct sge_eq *eq)
481254e4ee71SNavdeep Parhar {
48137951040fSNavdeep Parhar 	uint16_t hw_cidx;
481454e4ee71SNavdeep Parhar 
48157951040fSNavdeep Parhar 	hw_cidx = read_hw_cidx(eq);
48167951040fSNavdeep Parhar 	return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
48177951040fSNavdeep Parhar }
481854e4ee71SNavdeep Parhar 
48197951040fSNavdeep Parhar static inline u_int
48207951040fSNavdeep Parhar total_available_tx_desc(struct sge_eq *eq)
48217951040fSNavdeep Parhar {
48227951040fSNavdeep Parhar 	uint16_t hw_cidx, pidx;
48237951040fSNavdeep Parhar 
48247951040fSNavdeep Parhar 	hw_cidx = read_hw_cidx(eq);
48257951040fSNavdeep Parhar 	pidx = eq->pidx;
48267951040fSNavdeep Parhar 
48277951040fSNavdeep Parhar 	if (pidx == hw_cidx)
48287951040fSNavdeep Parhar 		return (eq->sidx - 1);
482954e4ee71SNavdeep Parhar 	else
48307951040fSNavdeep Parhar 		return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
48317951040fSNavdeep Parhar }
48327951040fSNavdeep Parhar 
48337951040fSNavdeep Parhar static inline uint16_t
48347951040fSNavdeep Parhar read_hw_cidx(struct sge_eq *eq)
48357951040fSNavdeep Parhar {
48367951040fSNavdeep Parhar 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
48377951040fSNavdeep Parhar 	uint16_t cidx = spg->cidx;	/* stable snapshot */
48387951040fSNavdeep Parhar 
48397951040fSNavdeep Parhar 	return (be16toh(cidx));
4840e874ff7aSNavdeep Parhar }
484154e4ee71SNavdeep Parhar 
4842e874ff7aSNavdeep Parhar /*
48437951040fSNavdeep Parhar  * Reclaim 'n' descriptors approximately.
4844e874ff7aSNavdeep Parhar  */
48457951040fSNavdeep Parhar static u_int
48467951040fSNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, u_int n)
4847e874ff7aSNavdeep Parhar {
4848e874ff7aSNavdeep Parhar 	struct tx_sdesc *txsd;
4849f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
48507951040fSNavdeep Parhar 	u_int can_reclaim, reclaimed;
485154e4ee71SNavdeep Parhar 
4852733b9277SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
48537951040fSNavdeep Parhar 	MPASS(n > 0);
4854e874ff7aSNavdeep Parhar 
48557951040fSNavdeep Parhar 	reclaimed = 0;
48567951040fSNavdeep Parhar 	can_reclaim = reclaimable_tx_desc(eq);
48577951040fSNavdeep Parhar 	while (can_reclaim && reclaimed < n) {
485854e4ee71SNavdeep Parhar 		int ndesc;
48597951040fSNavdeep Parhar 		struct mbuf *m, *nextpkt;
486054e4ee71SNavdeep Parhar 
4861f7dfe243SNavdeep Parhar 		txsd = &txq->sdesc[eq->cidx];
486254e4ee71SNavdeep Parhar 		ndesc = txsd->desc_used;
486354e4ee71SNavdeep Parhar 
486454e4ee71SNavdeep Parhar 		/* Firmware doesn't return "partial" credits. */
486554e4ee71SNavdeep Parhar 		KASSERT(can_reclaim >= ndesc,
486654e4ee71SNavdeep Parhar 		    ("%s: unexpected number of credits: %d, %d",
486754e4ee71SNavdeep Parhar 		    __func__, can_reclaim, ndesc));
486854e4ee71SNavdeep Parhar 
48697951040fSNavdeep Parhar 		for (m = txsd->m; m != NULL; m = nextpkt) {
48707951040fSNavdeep Parhar 			nextpkt = m->m_nextpkt;
48717951040fSNavdeep Parhar 			m->m_nextpkt = NULL;
48727951040fSNavdeep Parhar 			m_freem(m);
48737951040fSNavdeep Parhar 		}
487454e4ee71SNavdeep Parhar 		reclaimed += ndesc;
487554e4ee71SNavdeep Parhar 		can_reclaim -= ndesc;
48767951040fSNavdeep Parhar 		IDXINCR(eq->cidx, ndesc, eq->sidx);
487754e4ee71SNavdeep Parhar 	}
487854e4ee71SNavdeep Parhar 
487954e4ee71SNavdeep Parhar 	return (reclaimed);
488054e4ee71SNavdeep Parhar }
488154e4ee71SNavdeep Parhar 
488254e4ee71SNavdeep Parhar static void
48837951040fSNavdeep Parhar tx_reclaim(void *arg, int n)
488454e4ee71SNavdeep Parhar {
48857951040fSNavdeep Parhar 	struct sge_txq *txq = arg;
48867951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
488754e4ee71SNavdeep Parhar 
48887951040fSNavdeep Parhar 	do {
48897951040fSNavdeep Parhar 		if (TXQ_TRYLOCK(txq) == 0)
48907951040fSNavdeep Parhar 			break;
48917951040fSNavdeep Parhar 		n = reclaim_tx_descs(txq, 32);
48927951040fSNavdeep Parhar 		if (eq->cidx == eq->pidx)
48937951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
48947951040fSNavdeep Parhar 		TXQ_UNLOCK(txq);
48957951040fSNavdeep Parhar 	} while (n > 0);
489654e4ee71SNavdeep Parhar }
489754e4ee71SNavdeep Parhar 
489854e4ee71SNavdeep Parhar static __be64
48997951040fSNavdeep Parhar get_flit(struct sglist_seg *segs, int nsegs, int idx)
490054e4ee71SNavdeep Parhar {
490154e4ee71SNavdeep Parhar 	int i = (idx / 3) * 2;
490254e4ee71SNavdeep Parhar 
490354e4ee71SNavdeep Parhar 	switch (idx % 3) {
490454e4ee71SNavdeep Parhar 	case 0: {
490554e4ee71SNavdeep Parhar 		__be64 rc;
490654e4ee71SNavdeep Parhar 
49077951040fSNavdeep Parhar 		rc = htobe32(segs[i].ss_len);
490854e4ee71SNavdeep Parhar 		if (i + 1 < nsegs)
49097951040fSNavdeep Parhar 			rc |= (uint64_t)htobe32(segs[i + 1].ss_len) << 32;
491054e4ee71SNavdeep Parhar 
491154e4ee71SNavdeep Parhar 		return (rc);
491254e4ee71SNavdeep Parhar 	}
491354e4ee71SNavdeep Parhar 	case 1:
49147951040fSNavdeep Parhar 		return (htobe64(segs[i].ss_paddr));
491554e4ee71SNavdeep Parhar 	case 2:
49167951040fSNavdeep Parhar 		return (htobe64(segs[i + 1].ss_paddr));
491754e4ee71SNavdeep Parhar 	}
491854e4ee71SNavdeep Parhar 
491954e4ee71SNavdeep Parhar 	return (0);
492054e4ee71SNavdeep Parhar }
492154e4ee71SNavdeep Parhar 
492254e4ee71SNavdeep Parhar static void
492338035ed6SNavdeep Parhar find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp)
492454e4ee71SNavdeep Parhar {
492538035ed6SNavdeep Parhar 	int8_t zidx, hwidx, idx;
492638035ed6SNavdeep Parhar 	uint16_t region1, region3;
492738035ed6SNavdeep Parhar 	int spare, spare_needed, n;
492838035ed6SNavdeep Parhar 	struct sw_zone_info *swz;
492938035ed6SNavdeep Parhar 	struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0];
493054e4ee71SNavdeep Parhar 
493138035ed6SNavdeep Parhar 	/*
493238035ed6SNavdeep Parhar 	 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize
493338035ed6SNavdeep Parhar 	 * large enough for the max payload and cluster metadata.  Otherwise
493438035ed6SNavdeep Parhar 	 * settle for the largest bufsize that leaves enough room in the cluster
493538035ed6SNavdeep Parhar 	 * for metadata.
493638035ed6SNavdeep Parhar 	 *
493738035ed6SNavdeep Parhar 	 * Without buffer packing: Look for the smallest zone which has a
493838035ed6SNavdeep Parhar 	 * bufsize large enough for the max payload.  Settle for the largest
493938035ed6SNavdeep Parhar 	 * bufsize available if there's nothing big enough for max payload.
494038035ed6SNavdeep Parhar 	 */
494138035ed6SNavdeep Parhar 	spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0;
494238035ed6SNavdeep Parhar 	swz = &sc->sge.sw_zone_info[0];
494338035ed6SNavdeep Parhar 	hwidx = -1;
494438035ed6SNavdeep Parhar 	for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) {
494538035ed6SNavdeep Parhar 		if (swz->size > largest_rx_cluster) {
494638035ed6SNavdeep Parhar 			if (__predict_true(hwidx != -1))
494738035ed6SNavdeep Parhar 				break;
494838035ed6SNavdeep Parhar 
494938035ed6SNavdeep Parhar 			/*
495038035ed6SNavdeep Parhar 			 * This is a misconfiguration.  largest_rx_cluster is
495138035ed6SNavdeep Parhar 			 * preventing us from finding a refill source.  See
495238035ed6SNavdeep Parhar 			 * dev.t5nex.<n>.buffer_sizes to figure out why.
495338035ed6SNavdeep Parhar 			 */
495438035ed6SNavdeep Parhar 			device_printf(sc->dev, "largest_rx_cluster=%u leaves no"
495538035ed6SNavdeep Parhar 			    " refill source for fl %p (dma %u).  Ignored.\n",
495638035ed6SNavdeep Parhar 			    largest_rx_cluster, fl, maxp);
495738035ed6SNavdeep Parhar 		}
495838035ed6SNavdeep Parhar 		for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) {
495938035ed6SNavdeep Parhar 			hwb = &hwb_list[idx];
496038035ed6SNavdeep Parhar 			spare = swz->size - hwb->size;
496138035ed6SNavdeep Parhar 			if (spare < spare_needed)
496238035ed6SNavdeep Parhar 				continue;
496338035ed6SNavdeep Parhar 
496438035ed6SNavdeep Parhar 			hwidx = idx;		/* best option so far */
496538035ed6SNavdeep Parhar 			if (hwb->size >= maxp) {
496638035ed6SNavdeep Parhar 
496738035ed6SNavdeep Parhar 				if ((fl->flags & FL_BUF_PACKING) == 0)
496838035ed6SNavdeep Parhar 					goto done; /* stop looking (not packing) */
496938035ed6SNavdeep Parhar 
497038035ed6SNavdeep Parhar 				if (swz->size >= safest_rx_cluster)
497138035ed6SNavdeep Parhar 					goto done; /* stop looking (packing) */
497238035ed6SNavdeep Parhar 			}
497338035ed6SNavdeep Parhar 			break;		/* keep looking, next zone */
497438035ed6SNavdeep Parhar 		}
497538035ed6SNavdeep Parhar 	}
497638035ed6SNavdeep Parhar done:
497738035ed6SNavdeep Parhar 	/* A usable hwidx has been located. */
497838035ed6SNavdeep Parhar 	MPASS(hwidx != -1);
497938035ed6SNavdeep Parhar 	hwb = &hwb_list[hwidx];
498038035ed6SNavdeep Parhar 	zidx = hwb->zidx;
498138035ed6SNavdeep Parhar 	swz = &sc->sge.sw_zone_info[zidx];
498238035ed6SNavdeep Parhar 	region1 = 0;
498338035ed6SNavdeep Parhar 	region3 = swz->size - hwb->size;
498438035ed6SNavdeep Parhar 
498538035ed6SNavdeep Parhar 	/*
498638035ed6SNavdeep Parhar 	 * Stay within this zone and see if there is a better match when mbuf
498738035ed6SNavdeep Parhar 	 * inlining is allowed.  Remember that the hwidx's are sorted in
498838035ed6SNavdeep Parhar 	 * decreasing order of size (so in increasing order of spare area).
498938035ed6SNavdeep Parhar 	 */
499038035ed6SNavdeep Parhar 	for (idx = hwidx; idx != -1; idx = hwb->next) {
499138035ed6SNavdeep Parhar 		hwb = &hwb_list[idx];
499238035ed6SNavdeep Parhar 		spare = swz->size - hwb->size;
499338035ed6SNavdeep Parhar 
499438035ed6SNavdeep Parhar 		if (allow_mbufs_in_cluster == 0 || hwb->size < maxp)
499538035ed6SNavdeep Parhar 			break;
4996e3207e19SNavdeep Parhar 
4997e3207e19SNavdeep Parhar 		/*
4998e3207e19SNavdeep Parhar 		 * Do not inline mbufs if doing so would violate the pad/pack
4999e3207e19SNavdeep Parhar 		 * boundary alignment requirement.
5000e3207e19SNavdeep Parhar 		 */
500190e7434aSNavdeep Parhar 		if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0)
5002e3207e19SNavdeep Parhar 			continue;
5003e3207e19SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING &&
500490e7434aSNavdeep Parhar 		    (MSIZE % sc->params.sge.pack_boundary) != 0)
5005e3207e19SNavdeep Parhar 			continue;
5006e3207e19SNavdeep Parhar 
500738035ed6SNavdeep Parhar 		if (spare < CL_METADATA_SIZE + MSIZE)
500838035ed6SNavdeep Parhar 			continue;
500938035ed6SNavdeep Parhar 		n = (spare - CL_METADATA_SIZE) / MSIZE;
501038035ed6SNavdeep Parhar 		if (n > howmany(hwb->size, maxp))
501138035ed6SNavdeep Parhar 			break;
501238035ed6SNavdeep Parhar 
501338035ed6SNavdeep Parhar 		hwidx = idx;
50141458bff9SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
501538035ed6SNavdeep Parhar 			region1 = n * MSIZE;
501638035ed6SNavdeep Parhar 			region3 = spare - region1;
501738035ed6SNavdeep Parhar 		} else {
501838035ed6SNavdeep Parhar 			region1 = MSIZE;
501938035ed6SNavdeep Parhar 			region3 = spare - region1;
502038035ed6SNavdeep Parhar 			break;
502138035ed6SNavdeep Parhar 		}
502238035ed6SNavdeep Parhar 	}
502338035ed6SNavdeep Parhar 
502438035ed6SNavdeep Parhar 	KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES,
502538035ed6SNavdeep Parhar 	    ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp));
502638035ed6SNavdeep Parhar 	KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES,
502738035ed6SNavdeep Parhar 	    ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp));
502838035ed6SNavdeep Parhar 	KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 ==
502938035ed6SNavdeep Parhar 	    sc->sge.sw_zone_info[zidx].size,
503038035ed6SNavdeep Parhar 	    ("%s: bad buffer layout for fl %p, maxp %d. "
503138035ed6SNavdeep Parhar 		"cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
503238035ed6SNavdeep Parhar 		sc->sge.sw_zone_info[zidx].size, region1,
503338035ed6SNavdeep Parhar 		sc->sge.hw_buf_info[hwidx].size, region3));
503438035ed6SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING || region1 > 0) {
503538035ed6SNavdeep Parhar 		KASSERT(region3 >= CL_METADATA_SIZE,
503638035ed6SNavdeep Parhar 		    ("%s: no room for metadata.  fl %p, maxp %d; "
503738035ed6SNavdeep Parhar 		    "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
503838035ed6SNavdeep Parhar 		    sc->sge.sw_zone_info[zidx].size, region1,
503938035ed6SNavdeep Parhar 		    sc->sge.hw_buf_info[hwidx].size, region3));
504038035ed6SNavdeep Parhar 		KASSERT(region1 % MSIZE == 0,
504138035ed6SNavdeep Parhar 		    ("%s: bad mbuf region for fl %p, maxp %d. "
504238035ed6SNavdeep Parhar 		    "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
504338035ed6SNavdeep Parhar 		    sc->sge.sw_zone_info[zidx].size, region1,
504438035ed6SNavdeep Parhar 		    sc->sge.hw_buf_info[hwidx].size, region3));
504538035ed6SNavdeep Parhar 	}
504638035ed6SNavdeep Parhar 
504738035ed6SNavdeep Parhar 	fl->cll_def.zidx = zidx;
504838035ed6SNavdeep Parhar 	fl->cll_def.hwidx = hwidx;
504938035ed6SNavdeep Parhar 	fl->cll_def.region1 = region1;
505038035ed6SNavdeep Parhar 	fl->cll_def.region3 = region3;
505138035ed6SNavdeep Parhar }
505238035ed6SNavdeep Parhar 
505338035ed6SNavdeep Parhar static void
505438035ed6SNavdeep Parhar find_safe_refill_source(struct adapter *sc, struct sge_fl *fl)
505538035ed6SNavdeep Parhar {
505638035ed6SNavdeep Parhar 	struct sge *s = &sc->sge;
505738035ed6SNavdeep Parhar 	struct hw_buf_info *hwb;
505838035ed6SNavdeep Parhar 	struct sw_zone_info *swz;
505938035ed6SNavdeep Parhar 	int spare;
506038035ed6SNavdeep Parhar 	int8_t hwidx;
506138035ed6SNavdeep Parhar 
506238035ed6SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING)
506338035ed6SNavdeep Parhar 		hwidx = s->safe_hwidx2;	/* with room for metadata */
506438035ed6SNavdeep Parhar 	else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) {
506538035ed6SNavdeep Parhar 		hwidx = s->safe_hwidx2;
506638035ed6SNavdeep Parhar 		hwb = &s->hw_buf_info[hwidx];
506738035ed6SNavdeep Parhar 		swz = &s->sw_zone_info[hwb->zidx];
506838035ed6SNavdeep Parhar 		spare = swz->size - hwb->size;
506938035ed6SNavdeep Parhar 
507038035ed6SNavdeep Parhar 		/* no good if there isn't room for an mbuf as well */
507138035ed6SNavdeep Parhar 		if (spare < CL_METADATA_SIZE + MSIZE)
507238035ed6SNavdeep Parhar 			hwidx = s->safe_hwidx1;
507338035ed6SNavdeep Parhar 	} else
507438035ed6SNavdeep Parhar 		hwidx = s->safe_hwidx1;
507538035ed6SNavdeep Parhar 
507638035ed6SNavdeep Parhar 	if (hwidx == -1) {
507738035ed6SNavdeep Parhar 		/* No fallback source */
507838035ed6SNavdeep Parhar 		fl->cll_alt.hwidx = -1;
507938035ed6SNavdeep Parhar 		fl->cll_alt.zidx = -1;
508038035ed6SNavdeep Parhar 
50811458bff9SNavdeep Parhar 		return;
508254e4ee71SNavdeep Parhar 	}
508354e4ee71SNavdeep Parhar 
508438035ed6SNavdeep Parhar 	hwb = &s->hw_buf_info[hwidx];
508538035ed6SNavdeep Parhar 	swz = &s->sw_zone_info[hwb->zidx];
508638035ed6SNavdeep Parhar 	spare = swz->size - hwb->size;
508738035ed6SNavdeep Parhar 	fl->cll_alt.hwidx = hwidx;
508838035ed6SNavdeep Parhar 	fl->cll_alt.zidx = hwb->zidx;
5089e3207e19SNavdeep Parhar 	if (allow_mbufs_in_cluster &&
509090e7434aSNavdeep Parhar 	    (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0))
509138035ed6SNavdeep Parhar 		fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE;
50921458bff9SNavdeep Parhar 	else
509338035ed6SNavdeep Parhar 		fl->cll_alt.region1 = 0;
509438035ed6SNavdeep Parhar 	fl->cll_alt.region3 = spare - fl->cll_alt.region1;
509554e4ee71SNavdeep Parhar }
5096ecb79ca4SNavdeep Parhar 
5097733b9277SNavdeep Parhar static void
5098733b9277SNavdeep Parhar add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
5099ecb79ca4SNavdeep Parhar {
5100733b9277SNavdeep Parhar 	mtx_lock(&sc->sfl_lock);
5101733b9277SNavdeep Parhar 	FL_LOCK(fl);
5102733b9277SNavdeep Parhar 	if ((fl->flags & FL_DOOMED) == 0) {
5103733b9277SNavdeep Parhar 		fl->flags |= FL_STARVING;
5104733b9277SNavdeep Parhar 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
5105733b9277SNavdeep Parhar 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
5106733b9277SNavdeep Parhar 	}
5107733b9277SNavdeep Parhar 	FL_UNLOCK(fl);
5108733b9277SNavdeep Parhar 	mtx_unlock(&sc->sfl_lock);
5109733b9277SNavdeep Parhar }
5110ecb79ca4SNavdeep Parhar 
51117951040fSNavdeep Parhar static void
51127951040fSNavdeep Parhar handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
51137951040fSNavdeep Parhar {
51147951040fSNavdeep Parhar 	struct sge_wrq *wrq = (void *)eq;
51157951040fSNavdeep Parhar 
51167951040fSNavdeep Parhar 	atomic_readandclear_int(&eq->equiq);
51177951040fSNavdeep Parhar 	taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task);
51187951040fSNavdeep Parhar }
51197951040fSNavdeep Parhar 
51207951040fSNavdeep Parhar static void
51217951040fSNavdeep Parhar handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
51227951040fSNavdeep Parhar {
51237951040fSNavdeep Parhar 	struct sge_txq *txq = (void *)eq;
51247951040fSNavdeep Parhar 
51257951040fSNavdeep Parhar 	MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH);
51267951040fSNavdeep Parhar 
51277951040fSNavdeep Parhar 	atomic_readandclear_int(&eq->equiq);
51287951040fSNavdeep Parhar 	mp_ring_check_drainage(txq->r, 0);
51297951040fSNavdeep Parhar 	taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task);
51307951040fSNavdeep Parhar }
51317951040fSNavdeep Parhar 
5132733b9277SNavdeep Parhar static int
5133733b9277SNavdeep Parhar handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
5134733b9277SNavdeep Parhar     struct mbuf *m)
5135733b9277SNavdeep Parhar {
5136733b9277SNavdeep Parhar 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
5137733b9277SNavdeep Parhar 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
5138733b9277SNavdeep Parhar 	struct adapter *sc = iq->adapter;
5139733b9277SNavdeep Parhar 	struct sge *s = &sc->sge;
5140733b9277SNavdeep Parhar 	struct sge_eq *eq;
51417951040fSNavdeep Parhar 	static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
51427951040fSNavdeep Parhar 		&handle_wrq_egr_update, &handle_eth_egr_update,
51437951040fSNavdeep Parhar 		&handle_wrq_egr_update};
5144733b9277SNavdeep Parhar 
5145733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
5146733b9277SNavdeep Parhar 	    rss->opcode));
5147733b9277SNavdeep Parhar 
5148ec55567cSJohn Baldwin 	eq = s->eqmap[qid - s->eq_start - s->eq_base];
51497951040fSNavdeep Parhar 	(*h[eq->flags & EQ_TYPEMASK])(sc, eq);
5150ecb79ca4SNavdeep Parhar 
5151ecb79ca4SNavdeep Parhar 	return (0);
5152ecb79ca4SNavdeep Parhar }
5153f7dfe243SNavdeep Parhar 
51540abd31e2SNavdeep Parhar /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
51550abd31e2SNavdeep Parhar CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
51560abd31e2SNavdeep Parhar     offsetof(struct cpl_fw6_msg, data));
51570abd31e2SNavdeep Parhar 
5158733b9277SNavdeep Parhar static int
51591b4cc91fSNavdeep Parhar handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
516056599263SNavdeep Parhar {
51611b4cc91fSNavdeep Parhar 	struct adapter *sc = iq->adapter;
516256599263SNavdeep Parhar 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
516356599263SNavdeep Parhar 
5164733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
5165733b9277SNavdeep Parhar 	    rss->opcode));
5166733b9277SNavdeep Parhar 
51670abd31e2SNavdeep Parhar 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
51680abd31e2SNavdeep Parhar 		const struct rss_header *rss2;
51690abd31e2SNavdeep Parhar 
51700abd31e2SNavdeep Parhar 		rss2 = (const struct rss_header *)&cpl->data[0];
5171671bf2b8SNavdeep Parhar 		return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
51720abd31e2SNavdeep Parhar 	}
51730abd31e2SNavdeep Parhar 
5174671bf2b8SNavdeep Parhar 	return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
5175f7dfe243SNavdeep Parhar }
5176af49c942SNavdeep Parhar 
5177069af0ebSJohn Baldwin /**
5178069af0ebSJohn Baldwin  *	t4_handle_wrerr_rpl - process a FW work request error message
5179069af0ebSJohn Baldwin  *	@adap: the adapter
5180069af0ebSJohn Baldwin  *	@rpl: start of the FW message
5181069af0ebSJohn Baldwin  */
5182069af0ebSJohn Baldwin static int
5183069af0ebSJohn Baldwin t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
5184069af0ebSJohn Baldwin {
5185069af0ebSJohn Baldwin 	u8 opcode = *(const u8 *)rpl;
5186069af0ebSJohn Baldwin 	const struct fw_error_cmd *e = (const void *)rpl;
5187069af0ebSJohn Baldwin 	unsigned int i;
5188069af0ebSJohn Baldwin 
5189069af0ebSJohn Baldwin 	if (opcode != FW_ERROR_CMD) {
5190069af0ebSJohn Baldwin 		log(LOG_ERR,
5191069af0ebSJohn Baldwin 		    "%s: Received WRERR_RPL message with opcode %#x\n",
5192069af0ebSJohn Baldwin 		    device_get_nameunit(adap->dev), opcode);
5193069af0ebSJohn Baldwin 		return (EINVAL);
5194069af0ebSJohn Baldwin 	}
5195069af0ebSJohn Baldwin 	log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
5196069af0ebSJohn Baldwin 	    G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
5197069af0ebSJohn Baldwin 	    "non-fatal");
5198069af0ebSJohn Baldwin 	switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
5199069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_EXCEPTION:
5200069af0ebSJohn Baldwin 		log(LOG_ERR, "exception info:\n");
5201069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.exception.info); i++)
5202069af0ebSJohn Baldwin 			log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
5203069af0ebSJohn Baldwin 			    be32toh(e->u.exception.info[i]));
5204069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5205069af0ebSJohn Baldwin 		break;
5206069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_HWMODULE:
5207069af0ebSJohn Baldwin 		log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
5208069af0ebSJohn Baldwin 		    be32toh(e->u.hwmodule.regaddr),
5209069af0ebSJohn Baldwin 		    be32toh(e->u.hwmodule.regval));
5210069af0ebSJohn Baldwin 		break;
5211069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_WR:
5212069af0ebSJohn Baldwin 		log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
5213069af0ebSJohn Baldwin 		    be16toh(e->u.wr.cidx),
5214069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
5215069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
5216069af0ebSJohn Baldwin 		    be32toh(e->u.wr.eqid));
5217069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
5218069af0ebSJohn Baldwin 			log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
5219069af0ebSJohn Baldwin 			    e->u.wr.wrhdr[i]);
5220069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5221069af0ebSJohn Baldwin 		break;
5222069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_ACL:
5223069af0ebSJohn Baldwin 		log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
5224069af0ebSJohn Baldwin 		    be16toh(e->u.acl.cidx),
5225069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
5226069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
5227069af0ebSJohn Baldwin 		    be32toh(e->u.acl.eqid),
5228069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
5229069af0ebSJohn Baldwin 		    "MAC");
5230069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.acl.val); i++)
5231069af0ebSJohn Baldwin 			log(LOG_ERR, " %02x", e->u.acl.val[i]);
5232069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5233069af0ebSJohn Baldwin 		break;
5234069af0ebSJohn Baldwin 	default:
5235069af0ebSJohn Baldwin 		log(LOG_ERR, "type %#x\n",
5236069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
5237069af0ebSJohn Baldwin 		return (EINVAL);
5238069af0ebSJohn Baldwin 	}
5239069af0ebSJohn Baldwin 	return (0);
5240069af0ebSJohn Baldwin }
5241069af0ebSJohn Baldwin 
5242af49c942SNavdeep Parhar static int
524356599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS)
5244af49c942SNavdeep Parhar {
5245af49c942SNavdeep Parhar 	uint16_t *id = arg1;
5246af49c942SNavdeep Parhar 	int i = *id;
5247af49c942SNavdeep Parhar 
5248af49c942SNavdeep Parhar 	return sysctl_handle_int(oidp, &i, 0, req);
5249af49c942SNavdeep Parhar }
525038035ed6SNavdeep Parhar 
525138035ed6SNavdeep Parhar static int
525238035ed6SNavdeep Parhar sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
525338035ed6SNavdeep Parhar {
525438035ed6SNavdeep Parhar 	struct sge *s = arg1;
525538035ed6SNavdeep Parhar 	struct hw_buf_info *hwb = &s->hw_buf_info[0];
525638035ed6SNavdeep Parhar 	struct sw_zone_info *swz = &s->sw_zone_info[0];
525738035ed6SNavdeep Parhar 	int i, rc;
525838035ed6SNavdeep Parhar 	struct sbuf sb;
525938035ed6SNavdeep Parhar 	char c;
526038035ed6SNavdeep Parhar 
526138035ed6SNavdeep Parhar 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
526238035ed6SNavdeep Parhar 	for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
526338035ed6SNavdeep Parhar 		if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster)
526438035ed6SNavdeep Parhar 			c = '*';
526538035ed6SNavdeep Parhar 		else
526638035ed6SNavdeep Parhar 			c = '\0';
526738035ed6SNavdeep Parhar 
526838035ed6SNavdeep Parhar 		sbuf_printf(&sb, "%u%c ", hwb->size, c);
526938035ed6SNavdeep Parhar 	}
527038035ed6SNavdeep Parhar 	sbuf_trim(&sb);
527138035ed6SNavdeep Parhar 	sbuf_finish(&sb);
527238035ed6SNavdeep Parhar 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
527338035ed6SNavdeep Parhar 	sbuf_delete(&sb);
527438035ed6SNavdeep Parhar 	return (rc);
527538035ed6SNavdeep Parhar }
527602f972e8SNavdeep Parhar 
527702f972e8SNavdeep Parhar static int
527802f972e8SNavdeep Parhar sysctl_tc(SYSCTL_HANDLER_ARGS)
527902f972e8SNavdeep Parhar {
528002f972e8SNavdeep Parhar 	struct vi_info *vi = arg1;
528102f972e8SNavdeep Parhar 	struct port_info *pi;
528202f972e8SNavdeep Parhar 	struct adapter *sc;
528302f972e8SNavdeep Parhar 	struct sge_txq *txq;
52842204b427SNavdeep Parhar 	struct tx_cl_rl_params *tc;
528502f972e8SNavdeep Parhar 	int qidx = arg2, rc, tc_idx;
528602f972e8SNavdeep Parhar 	uint32_t fw_queue, fw_class;
528702f972e8SNavdeep Parhar 
528802f972e8SNavdeep Parhar 	MPASS(qidx >= 0 && qidx < vi->ntxq);
528902f972e8SNavdeep Parhar 	pi = vi->pi;
529002f972e8SNavdeep Parhar 	sc = pi->adapter;
529102f972e8SNavdeep Parhar 	txq = &sc->sge.txq[vi->first_txq + qidx];
529202f972e8SNavdeep Parhar 
529302f972e8SNavdeep Parhar 	tc_idx = txq->tc_idx;
529402f972e8SNavdeep Parhar 	rc = sysctl_handle_int(oidp, &tc_idx, 0, req);
529502f972e8SNavdeep Parhar 	if (rc != 0 || req->newptr == NULL)
529602f972e8SNavdeep Parhar 		return (rc);
529702f972e8SNavdeep Parhar 
52982204b427SNavdeep Parhar 	if (sc->flags & IS_VF)
52992204b427SNavdeep Parhar 		return (EPERM);
53002204b427SNavdeep Parhar 
530102f972e8SNavdeep Parhar 	/* Note that -1 is legitimate input (it means unbind). */
530202f972e8SNavdeep Parhar 	if (tc_idx < -1 || tc_idx >= sc->chip_params->nsched_cls)
530302f972e8SNavdeep Parhar 		return (EINVAL);
530402f972e8SNavdeep Parhar 
53052204b427SNavdeep Parhar 	mtx_lock(&sc->tc_lock);
530602f972e8SNavdeep Parhar 	if (tc_idx == txq->tc_idx) {
530702f972e8SNavdeep Parhar 		rc = 0;		/* No change, nothing to do. */
530802f972e8SNavdeep Parhar 		goto done;
530902f972e8SNavdeep Parhar 	}
531002f972e8SNavdeep Parhar 
531102f972e8SNavdeep Parhar 	fw_queue = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
531202f972e8SNavdeep Parhar 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
531302f972e8SNavdeep Parhar 	    V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id);
531402f972e8SNavdeep Parhar 
531502f972e8SNavdeep Parhar 	if (tc_idx == -1)
531602f972e8SNavdeep Parhar 		fw_class = 0xffffffff;	/* Unbind. */
531702f972e8SNavdeep Parhar 	else {
531802f972e8SNavdeep Parhar 		/*
53192204b427SNavdeep Parhar 		 * Bind to a different class.
532002f972e8SNavdeep Parhar 		 */
53212204b427SNavdeep Parhar 		tc = &pi->sched_params->cl_rl[tc_idx];
53222204b427SNavdeep Parhar 		if (tc->flags & TX_CLRL_ERROR) {
53232204b427SNavdeep Parhar 			/* Previous attempt to set the cl-rl params failed. */
53242204b427SNavdeep Parhar 			rc = EIO;
532502f972e8SNavdeep Parhar 			goto done;
53262204b427SNavdeep Parhar 		} else {
53272204b427SNavdeep Parhar 			/*
53282204b427SNavdeep Parhar 			 * Ok to proceed.  Place a reference on the new class
53292204b427SNavdeep Parhar 			 * while still holding on to the reference on the
53302204b427SNavdeep Parhar 			 * previous class, if any.
53312204b427SNavdeep Parhar 			 */
53322204b427SNavdeep Parhar 			fw_class = tc_idx;
53332204b427SNavdeep Parhar 			tc->refcount++;
533402f972e8SNavdeep Parhar 		}
533502f972e8SNavdeep Parhar 	}
53362204b427SNavdeep Parhar 	mtx_unlock(&sc->tc_lock);
533702f972e8SNavdeep Parhar 
53382204b427SNavdeep Parhar 	rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4stc");
53392204b427SNavdeep Parhar 	if (rc)
53402204b427SNavdeep Parhar 		return (rc);
534102f972e8SNavdeep Parhar 	rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, &fw_class);
53422204b427SNavdeep Parhar 	end_synchronized_op(sc, 0);
53432204b427SNavdeep Parhar 
53442204b427SNavdeep Parhar 	mtx_lock(&sc->tc_lock);
534502f972e8SNavdeep Parhar 	if (rc == 0) {
534602f972e8SNavdeep Parhar 		if (txq->tc_idx != -1) {
53472204b427SNavdeep Parhar 			tc = &pi->sched_params->cl_rl[txq->tc_idx];
534802f972e8SNavdeep Parhar 			MPASS(tc->refcount > 0);
534902f972e8SNavdeep Parhar 			tc->refcount--;
535002f972e8SNavdeep Parhar 		}
535102f972e8SNavdeep Parhar 		txq->tc_idx = tc_idx;
53523f1466a5SNavdeep Parhar 	} else if (tc_idx != -1) {
53532204b427SNavdeep Parhar 		tc = &pi->sched_params->cl_rl[tc_idx];
53542204b427SNavdeep Parhar 		MPASS(tc->refcount > 0);
53552204b427SNavdeep Parhar 		tc->refcount--;
535602f972e8SNavdeep Parhar 	}
535702f972e8SNavdeep Parhar done:
53582204b427SNavdeep Parhar 	mtx_unlock(&sc->tc_lock);
535902f972e8SNavdeep Parhar 	return (rc);
536002f972e8SNavdeep Parhar }
5361