xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 7c228be30bd14fd8b683426bc91a608827426abf)
154e4ee71SNavdeep Parhar /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
454e4ee71SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
554e4ee71SNavdeep Parhar  * All rights reserved.
654e4ee71SNavdeep Parhar  * Written by: Navdeep Parhar <np@FreeBSD.org>
754e4ee71SNavdeep Parhar  *
854e4ee71SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
954e4ee71SNavdeep Parhar  * modification, are permitted provided that the following conditions
1054e4ee71SNavdeep Parhar  * are met:
1154e4ee71SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
1254e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
1354e4ee71SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
1454e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
1554e4ee71SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
1654e4ee71SNavdeep Parhar  *
1754e4ee71SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1854e4ee71SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1954e4ee71SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2054e4ee71SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2154e4ee71SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2254e4ee71SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2354e4ee71SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2454e4ee71SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2554e4ee71SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2654e4ee71SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2754e4ee71SNavdeep Parhar  * SUCH DAMAGE.
2854e4ee71SNavdeep Parhar  */
2954e4ee71SNavdeep Parhar 
3054e4ee71SNavdeep Parhar #include <sys/cdefs.h>
3154e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$");
3254e4ee71SNavdeep Parhar 
3354e4ee71SNavdeep Parhar #include "opt_inet.h"
34a1ea9a82SNavdeep Parhar #include "opt_inet6.h"
35bddf7343SJohn Baldwin #include "opt_kern_tls.h"
36eff62dbaSNavdeep Parhar #include "opt_ratelimit.h"
3754e4ee71SNavdeep Parhar 
3854e4ee71SNavdeep Parhar #include <sys/types.h>
39c3322cb9SGleb Smirnoff #include <sys/eventhandler.h>
4054e4ee71SNavdeep Parhar #include <sys/mbuf.h>
4154e4ee71SNavdeep Parhar #include <sys/socket.h>
4254e4ee71SNavdeep Parhar #include <sys/kernel.h>
43bddf7343SJohn Baldwin #include <sys/ktls.h>
44ecb79ca4SNavdeep Parhar #include <sys/malloc.h>
45ecb79ca4SNavdeep Parhar #include <sys/queue.h>
4638035ed6SNavdeep Parhar #include <sys/sbuf.h>
47ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h>
48480e603cSNavdeep Parhar #include <sys/time.h>
497951040fSNavdeep Parhar #include <sys/sglist.h>
5054e4ee71SNavdeep Parhar #include <sys/sysctl.h>
51733b9277SNavdeep Parhar #include <sys/smp.h>
52bddf7343SJohn Baldwin #include <sys/socketvar.h>
5382eff304SNavdeep Parhar #include <sys/counter.h>
5454e4ee71SNavdeep Parhar #include <net/bpf.h>
5554e4ee71SNavdeep Parhar #include <net/ethernet.h>
5654e4ee71SNavdeep Parhar #include <net/if.h>
5754e4ee71SNavdeep Parhar #include <net/if_vlan_var.h>
5854e4ee71SNavdeep Parhar #include <netinet/in.h>
5954e4ee71SNavdeep Parhar #include <netinet/ip.h>
60a1ea9a82SNavdeep Parhar #include <netinet/ip6.h>
6154e4ee71SNavdeep Parhar #include <netinet/tcp.h>
62786099deSNavdeep Parhar #include <netinet/udp.h>
636af45170SJohn Baldwin #include <machine/in_cksum.h>
6464db8966SDimitry Andric #include <machine/md_var.h>
6538035ed6SNavdeep Parhar #include <vm/vm.h>
6638035ed6SNavdeep Parhar #include <vm/pmap.h>
67298d969cSNavdeep Parhar #ifdef DEV_NETMAP
68298d969cSNavdeep Parhar #include <machine/bus.h>
69298d969cSNavdeep Parhar #include <sys/selinfo.h>
70298d969cSNavdeep Parhar #include <net/if_var.h>
71298d969cSNavdeep Parhar #include <net/netmap.h>
72298d969cSNavdeep Parhar #include <dev/netmap/netmap_kern.h>
73298d969cSNavdeep Parhar #endif
7454e4ee71SNavdeep Parhar 
7554e4ee71SNavdeep Parhar #include "common/common.h"
7654e4ee71SNavdeep Parhar #include "common/t4_regs.h"
7754e4ee71SNavdeep Parhar #include "common/t4_regs_values.h"
7854e4ee71SNavdeep Parhar #include "common/t4_msg.h"
79671bf2b8SNavdeep Parhar #include "t4_l2t.h"
807951040fSNavdeep Parhar #include "t4_mp_ring.h"
8154e4ee71SNavdeep Parhar 
82d14b0ac1SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
83d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
84d14b0ac1SNavdeep Parhar #else
85d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE
86d14b0ac1SNavdeep Parhar #endif
87d14b0ac1SNavdeep Parhar 
885cdaef71SJohn Baldwin /* Internal mbuf flags stored in PH_loc.eight[1]. */
89d76bbe17SJohn Baldwin #define	MC_NOMAP		0x01
905cdaef71SJohn Baldwin #define	MC_RAW_WR		0x02
91bddf7343SJohn Baldwin #define	MC_TLS			0x04
925cdaef71SJohn Baldwin 
939fb8886bSNavdeep Parhar /*
949fb8886bSNavdeep Parhar  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
959fb8886bSNavdeep Parhar  * 0-7 are valid values.
969fb8886bSNavdeep Parhar  */
97518bca2cSNavdeep Parhar static int fl_pktshift = 0;
982d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0,
992d714dbcSJohn Baldwin     "payload DMA offset in rx buffer (bytes)");
10054e4ee71SNavdeep Parhar 
1019fb8886bSNavdeep Parhar /*
1029fb8886bSNavdeep Parhar  * Pad ethernet payload up to this boundary.
1039fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
1041458bff9SNavdeep Parhar  *  0: disable padding.
1051458bff9SNavdeep Parhar  *  Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
1069fb8886bSNavdeep Parhar  */
107298d969cSNavdeep Parhar int fl_pad = -1;
1082d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0,
1092d714dbcSJohn Baldwin     "payload pad boundary (bytes)");
1109fb8886bSNavdeep Parhar 
1119fb8886bSNavdeep Parhar /*
1129fb8886bSNavdeep Parhar  * Status page length.
1139fb8886bSNavdeep Parhar  * -1: driver should figure out a good value.
1149fb8886bSNavdeep Parhar  *  64 or 128 are the only other valid values.
1159fb8886bSNavdeep Parhar  */
11629c229e9SJohn Baldwin static int spg_len = -1;
1172d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0,
1182d714dbcSJohn Baldwin     "status page size (bytes)");
1199fb8886bSNavdeep Parhar 
1209fb8886bSNavdeep Parhar /*
1219fb8886bSNavdeep Parhar  * Congestion drops.
1229fb8886bSNavdeep Parhar  * -1: no congestion feedback (not recommended).
1239fb8886bSNavdeep Parhar  *  0: backpressure the channel instead of dropping packets right away.
1249fb8886bSNavdeep Parhar  *  1: no backpressure, drop packets for the congested queue immediately.
1259fb8886bSNavdeep Parhar  */
1269fb8886bSNavdeep Parhar static int cong_drop = 0;
1272d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0,
1282d714dbcSJohn Baldwin     "Congestion control for RX queues (0 = backpressure, 1 = drop");
12954e4ee71SNavdeep Parhar 
1301458bff9SNavdeep Parhar /*
1311458bff9SNavdeep Parhar  * Deliver multiple frames in the same free list buffer if they fit.
1321458bff9SNavdeep Parhar  * -1: let the driver decide whether to enable buffer packing or not.
1331458bff9SNavdeep Parhar  *  0: disable buffer packing.
1341458bff9SNavdeep Parhar  *  1: enable buffer packing.
1351458bff9SNavdeep Parhar  */
1361458bff9SNavdeep Parhar static int buffer_packing = -1;
1372d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing,
1382d714dbcSJohn Baldwin     0, "Enable buffer packing");
1391458bff9SNavdeep Parhar 
1401458bff9SNavdeep Parhar /*
1411458bff9SNavdeep Parhar  * Start next frame in a packed buffer at this boundary.
1421458bff9SNavdeep Parhar  * -1: driver should figure out a good value.
143e3207e19SNavdeep Parhar  * T4: driver will ignore this and use the same value as fl_pad above.
144e3207e19SNavdeep Parhar  * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
1451458bff9SNavdeep Parhar  */
1461458bff9SNavdeep Parhar static int fl_pack = -1;
1472d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0,
1482d714dbcSJohn Baldwin     "payload pack boundary (bytes)");
1491458bff9SNavdeep Parhar 
15038035ed6SNavdeep Parhar /*
15138035ed6SNavdeep Parhar  * Largest rx cluster size that the driver is allowed to allocate.
15238035ed6SNavdeep Parhar  */
15338035ed6SNavdeep Parhar static int largest_rx_cluster = MJUM16BYTES;
1542d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN,
1552d714dbcSJohn Baldwin     &largest_rx_cluster, 0, "Largest rx cluster (bytes)");
15638035ed6SNavdeep Parhar 
15738035ed6SNavdeep Parhar /*
15838035ed6SNavdeep Parhar  * Size of cluster allocation that's most likely to succeed.  The driver will
15938035ed6SNavdeep Parhar  * fall back to this size if it fails to allocate clusters larger than this.
16038035ed6SNavdeep Parhar  */
16138035ed6SNavdeep Parhar static int safest_rx_cluster = PAGE_SIZE;
1622d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN,
1632d714dbcSJohn Baldwin     &safest_rx_cluster, 0, "Safe rx cluster (bytes)");
16438035ed6SNavdeep Parhar 
165786099deSNavdeep Parhar #ifdef RATELIMIT
166786099deSNavdeep Parhar /*
167786099deSNavdeep Parhar  * Knob to control TCP timestamp rewriting, and the granularity of the tick used
168786099deSNavdeep Parhar  * for rewriting.  -1 and 0-3 are all valid values.
169786099deSNavdeep Parhar  * -1: hardware should leave the TCP timestamps alone.
170786099deSNavdeep Parhar  * 0: 1ms
171786099deSNavdeep Parhar  * 1: 100us
172786099deSNavdeep Parhar  * 2: 10us
173786099deSNavdeep Parhar  * 3: 1us
174786099deSNavdeep Parhar  */
175786099deSNavdeep Parhar static int tsclk = -1;
1762d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0,
1772d714dbcSJohn Baldwin     "Control TCP timestamp rewriting when using pacing");
178786099deSNavdeep Parhar 
179786099deSNavdeep Parhar static int eo_max_backlog = 1024 * 1024;
1802d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog,
1812d714dbcSJohn Baldwin     0, "Maximum backlog of ratelimited data per flow");
182786099deSNavdeep Parhar #endif
183786099deSNavdeep Parhar 
184d491f8caSNavdeep Parhar /*
185d491f8caSNavdeep Parhar  * The interrupt holdoff timers are multiplied by this value on T6+.
186d491f8caSNavdeep Parhar  * 1 and 3-17 (both inclusive) are legal values.
187d491f8caSNavdeep Parhar  */
188d491f8caSNavdeep Parhar static int tscale = 1;
1892d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0,
1902d714dbcSJohn Baldwin     "Interrupt holdoff timer scale on T6+");
191d491f8caSNavdeep Parhar 
19246f48ee5SNavdeep Parhar /*
19346f48ee5SNavdeep Parhar  * Number of LRO entries in the lro_ctrl structure per rx queue.
19446f48ee5SNavdeep Parhar  */
19546f48ee5SNavdeep Parhar static int lro_entries = TCP_LRO_ENTRIES;
1962d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0,
1972d714dbcSJohn Baldwin     "Number of LRO entries per RX queue");
19846f48ee5SNavdeep Parhar 
19946f48ee5SNavdeep Parhar /*
20046f48ee5SNavdeep Parhar  * This enables presorting of frames before they're fed into tcp_lro_rx.
20146f48ee5SNavdeep Parhar  */
20246f48ee5SNavdeep Parhar static int lro_mbufs = 0;
2032d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0,
2042d714dbcSJohn Baldwin     "Enable presorting of LRO frames");
20546f48ee5SNavdeep Parhar 
20654e4ee71SNavdeep Parhar struct txpkts {
2077951040fSNavdeep Parhar 	u_int wr_type;		/* type 0 or type 1 */
2087951040fSNavdeep Parhar 	u_int npkt;		/* # of packets in this work request */
2097951040fSNavdeep Parhar 	u_int plen;		/* total payload (sum of all packets) */
2107951040fSNavdeep Parhar 	u_int len16;		/* # of 16B pieces used by this work request */
21154e4ee71SNavdeep Parhar };
21254e4ee71SNavdeep Parhar 
21354e4ee71SNavdeep Parhar /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
21454e4ee71SNavdeep Parhar struct sgl {
2157951040fSNavdeep Parhar 	struct sglist sg;
2167951040fSNavdeep Parhar 	struct sglist_seg seg[TX_SGL_SEGS];
21754e4ee71SNavdeep Parhar };
21854e4ee71SNavdeep Parhar 
219733b9277SNavdeep Parhar static int service_iq(struct sge_iq *, int);
2203098bcfcSNavdeep Parhar static int service_iq_fl(struct sge_iq *, int);
2214d6db4e0SNavdeep Parhar static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
2221486d2deSNavdeep Parhar static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *,
2231486d2deSNavdeep Parhar     u_int);
224b2daa9a9SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int);
225e3207e19SNavdeep Parhar static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
22690e7434aSNavdeep Parhar static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
22790e7434aSNavdeep Parhar     uint16_t, char *);
22854e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
22954e4ee71SNavdeep Parhar     bus_addr_t *, void **);
23054e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
23154e4ee71SNavdeep Parhar     void *);
232fe2ebb76SJohn Baldwin static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
233bc14b14dSNavdeep Parhar     int, int);
234fe2ebb76SJohn Baldwin static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *);
235348694daSNavdeep Parhar static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
236348694daSNavdeep Parhar     struct sge_iq *);
237aa93b99aSNavdeep Parhar static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
238aa93b99aSNavdeep Parhar     struct sysctl_oid *, struct sge_fl *);
239733b9277SNavdeep Parhar static int alloc_fwq(struct adapter *);
240733b9277SNavdeep Parhar static int free_fwq(struct adapter *);
24137310a98SNavdeep Parhar static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int,
24237310a98SNavdeep Parhar     struct sysctl_oid *);
243fe2ebb76SJohn Baldwin static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
244733b9277SNavdeep Parhar     struct sysctl_oid *);
245fe2ebb76SJohn Baldwin static int free_rxq(struct vi_info *, struct sge_rxq *);
24609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
247fe2ebb76SJohn Baldwin static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
248733b9277SNavdeep Parhar     struct sysctl_oid *);
249fe2ebb76SJohn Baldwin static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
250733b9277SNavdeep Parhar #endif
251298d969cSNavdeep Parhar #ifdef DEV_NETMAP
252fe2ebb76SJohn Baldwin static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int,
253298d969cSNavdeep Parhar     struct sysctl_oid *);
254fe2ebb76SJohn Baldwin static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *);
255fe2ebb76SJohn Baldwin static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int,
256298d969cSNavdeep Parhar     struct sysctl_oid *);
257fe2ebb76SJohn Baldwin static int free_nm_txq(struct vi_info *, struct sge_nm_txq *);
258298d969cSNavdeep Parhar #endif
259733b9277SNavdeep Parhar static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
260fe2ebb76SJohn Baldwin static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
261eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
262fe2ebb76SJohn Baldwin static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
263733b9277SNavdeep Parhar #endif
264fe2ebb76SJohn Baldwin static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *);
265733b9277SNavdeep Parhar static int free_eq(struct adapter *, struct sge_eq *);
266fe2ebb76SJohn Baldwin static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
267733b9277SNavdeep Parhar     struct sysctl_oid *);
268733b9277SNavdeep Parhar static int free_wrq(struct adapter *, struct sge_wrq *);
269fe2ebb76SJohn Baldwin static int alloc_txq(struct vi_info *, struct sge_txq *, int,
270733b9277SNavdeep Parhar     struct sysctl_oid *);
271fe2ebb76SJohn Baldwin static int free_txq(struct vi_info *, struct sge_txq *);
27254e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
27354e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *);
274733b9277SNavdeep Parhar static int refill_fl(struct adapter *, struct sge_fl *, int);
275733b9277SNavdeep Parhar static void refill_sfl(void *);
27654e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *);
2771458bff9SNavdeep Parhar static void free_fl_sdesc(struct adapter *, struct sge_fl *);
27846e1e307SNavdeep Parhar static int find_refill_source(struct adapter *, int, bool);
279733b9277SNavdeep Parhar static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
28054e4ee71SNavdeep Parhar 
2817951040fSNavdeep Parhar static inline void get_pkt_gl(struct mbuf *, struct sglist *);
2827951040fSNavdeep Parhar static inline u_int txpkt_len16(u_int, u_int);
2836af45170SJohn Baldwin static inline u_int txpkt_vm_len16(u_int, u_int);
2847951040fSNavdeep Parhar static inline u_int txpkts0_len16(u_int);
2857951040fSNavdeep Parhar static inline u_int txpkts1_len16(void);
2865cdaef71SJohn Baldwin static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int);
287c0236bd9SNavdeep Parhar static u_int write_txpkt_wr(struct adapter *, struct sge_txq *,
288c0236bd9SNavdeep Parhar     struct fw_eth_tx_pkt_wr *, struct mbuf *, u_int);
289472a6004SNavdeep Parhar static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
290472a6004SNavdeep Parhar     struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int);
2917951040fSNavdeep Parhar static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int);
2927951040fSNavdeep Parhar static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int);
293c0236bd9SNavdeep Parhar static u_int write_txpkts_wr(struct adapter *, struct sge_txq *,
294c0236bd9SNavdeep Parhar     struct fw_eth_tx_pkts_wr *, struct mbuf *, const struct txpkts *, u_int);
2957951040fSNavdeep Parhar static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
29654e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
2977951040fSNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
2987951040fSNavdeep Parhar static inline uint16_t read_hw_cidx(struct sge_eq *);
2997951040fSNavdeep Parhar static inline u_int reclaimable_tx_desc(struct sge_eq *);
3007951040fSNavdeep Parhar static inline u_int total_available_tx_desc(struct sge_eq *);
3017951040fSNavdeep Parhar static u_int reclaim_tx_descs(struct sge_txq *, u_int);
3027951040fSNavdeep Parhar static void tx_reclaim(void *, int);
3037951040fSNavdeep Parhar static __be64 get_flit(struct sglist_seg *, int, int);
304733b9277SNavdeep Parhar static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
305733b9277SNavdeep Parhar     struct mbuf *);
3061b4cc91fSNavdeep Parhar static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
307733b9277SNavdeep Parhar     struct mbuf *);
308069af0ebSJohn Baldwin static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
3097951040fSNavdeep Parhar static void wrq_tx_drain(void *, int);
3107951040fSNavdeep Parhar static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
31154e4ee71SNavdeep Parhar 
31256599263SNavdeep Parhar static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
31338035ed6SNavdeep Parhar static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
314786099deSNavdeep Parhar #ifdef RATELIMIT
315786099deSNavdeep Parhar static inline u_int txpkt_eo_len16(u_int, u_int, u_int);
316786099deSNavdeep Parhar static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *,
317786099deSNavdeep Parhar     struct mbuf *);
318786099deSNavdeep Parhar #endif
319f7dfe243SNavdeep Parhar 
32082eff304SNavdeep Parhar static counter_u64_t extfree_refs;
32182eff304SNavdeep Parhar static counter_u64_t extfree_rels;
32282eff304SNavdeep Parhar 
323671bf2b8SNavdeep Parhar an_handler_t t4_an_handler;
324671bf2b8SNavdeep Parhar fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
325671bf2b8SNavdeep Parhar cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
3264535e804SNavdeep Parhar cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES];
3274535e804SNavdeep Parhar cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
328111638bfSNavdeep Parhar cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
32989f651e7SNavdeep Parhar cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
3309c707b32SNavdeep Parhar cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
331671bf2b8SNavdeep Parhar 
3324535e804SNavdeep Parhar void
333671bf2b8SNavdeep Parhar t4_register_an_handler(an_handler_t h)
334671bf2b8SNavdeep Parhar {
3354535e804SNavdeep Parhar 	uintptr_t *loc;
336671bf2b8SNavdeep Parhar 
3374535e804SNavdeep Parhar 	MPASS(h == NULL || t4_an_handler == NULL);
3384535e804SNavdeep Parhar 
339671bf2b8SNavdeep Parhar 	loc = (uintptr_t *)&t4_an_handler;
3404535e804SNavdeep Parhar 	atomic_store_rel_ptr(loc, (uintptr_t)h);
341671bf2b8SNavdeep Parhar }
342671bf2b8SNavdeep Parhar 
3434535e804SNavdeep Parhar void
344671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
345671bf2b8SNavdeep Parhar {
3464535e804SNavdeep Parhar 	uintptr_t *loc;
347671bf2b8SNavdeep Parhar 
3484535e804SNavdeep Parhar 	MPASS(type < nitems(t4_fw_msg_handler));
3494535e804SNavdeep Parhar 	MPASS(h == NULL || t4_fw_msg_handler[type] == NULL);
350671bf2b8SNavdeep Parhar 	/*
351671bf2b8SNavdeep Parhar 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
352671bf2b8SNavdeep Parhar 	 * handler dispatch table.  Reject any attempt to install a handler for
353671bf2b8SNavdeep Parhar 	 * this subtype.
354671bf2b8SNavdeep Parhar 	 */
3554535e804SNavdeep Parhar 	MPASS(type != FW_TYPE_RSSCPL);
3564535e804SNavdeep Parhar 	MPASS(type != FW6_TYPE_RSSCPL);
357671bf2b8SNavdeep Parhar 
358671bf2b8SNavdeep Parhar 	loc = (uintptr_t *)&t4_fw_msg_handler[type];
3594535e804SNavdeep Parhar 	atomic_store_rel_ptr(loc, (uintptr_t)h);
3604535e804SNavdeep Parhar }
361671bf2b8SNavdeep Parhar 
3624535e804SNavdeep Parhar void
3634535e804SNavdeep Parhar t4_register_cpl_handler(int opcode, cpl_handler_t h)
3644535e804SNavdeep Parhar {
3654535e804SNavdeep Parhar 	uintptr_t *loc;
3664535e804SNavdeep Parhar 
3674535e804SNavdeep Parhar 	MPASS(opcode < nitems(t4_cpl_handler));
3684535e804SNavdeep Parhar 	MPASS(h == NULL || t4_cpl_handler[opcode] == NULL);
3694535e804SNavdeep Parhar 
3704535e804SNavdeep Parhar 	loc = (uintptr_t *)&t4_cpl_handler[opcode];
3714535e804SNavdeep Parhar 	atomic_store_rel_ptr(loc, (uintptr_t)h);
372671bf2b8SNavdeep Parhar }
373671bf2b8SNavdeep Parhar 
374671bf2b8SNavdeep Parhar static int
3754535e804SNavdeep Parhar set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
3764535e804SNavdeep Parhar     struct mbuf *m)
377671bf2b8SNavdeep Parhar {
3784535e804SNavdeep Parhar 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
3794535e804SNavdeep Parhar 	u_int tid;
3804535e804SNavdeep Parhar 	int cookie;
381671bf2b8SNavdeep Parhar 
3824535e804SNavdeep Parhar 	MPASS(m == NULL);
3834535e804SNavdeep Parhar 
3844535e804SNavdeep Parhar 	tid = GET_TID(cpl);
3855fc0f72fSNavdeep Parhar 	if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) {
3864535e804SNavdeep Parhar 		/*
3874535e804SNavdeep Parhar 		 * The return code for filter-write is put in the CPL cookie so
3884535e804SNavdeep Parhar 		 * we have to rely on the hardware tid (is_ftid) to determine
3894535e804SNavdeep Parhar 		 * that this is a response to a filter.
3904535e804SNavdeep Parhar 		 */
3914535e804SNavdeep Parhar 		cookie = CPL_COOKIE_FILTER;
3924535e804SNavdeep Parhar 	} else {
3934535e804SNavdeep Parhar 		cookie = G_COOKIE(cpl->cookie);
3944535e804SNavdeep Parhar 	}
3954535e804SNavdeep Parhar 	MPASS(cookie > CPL_COOKIE_RESERVED);
3964535e804SNavdeep Parhar 	MPASS(cookie < nitems(set_tcb_rpl_handlers));
3974535e804SNavdeep Parhar 
3984535e804SNavdeep Parhar 	return (set_tcb_rpl_handlers[cookie](iq, rss, m));
399671bf2b8SNavdeep Parhar }
400671bf2b8SNavdeep Parhar 
4014535e804SNavdeep Parhar static int
4024535e804SNavdeep Parhar l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
4034535e804SNavdeep Parhar     struct mbuf *m)
404671bf2b8SNavdeep Parhar {
4054535e804SNavdeep Parhar 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
4064535e804SNavdeep Parhar 	unsigned int cookie;
407671bf2b8SNavdeep Parhar 
4084535e804SNavdeep Parhar 	MPASS(m == NULL);
409671bf2b8SNavdeep Parhar 
4104535e804SNavdeep Parhar 	cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER;
4114535e804SNavdeep Parhar 	return (l2t_write_rpl_handlers[cookie](iq, rss, m));
4124535e804SNavdeep Parhar }
413671bf2b8SNavdeep Parhar 
414111638bfSNavdeep Parhar static int
415111638bfSNavdeep Parhar act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
416111638bfSNavdeep Parhar     struct mbuf *m)
417111638bfSNavdeep Parhar {
418111638bfSNavdeep Parhar 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
419111638bfSNavdeep Parhar 	u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status)));
420111638bfSNavdeep Parhar 
421111638bfSNavdeep Parhar 	MPASS(m == NULL);
422111638bfSNavdeep Parhar 	MPASS(cookie != CPL_COOKIE_RESERVED);
423111638bfSNavdeep Parhar 
424111638bfSNavdeep Parhar 	return (act_open_rpl_handlers[cookie](iq, rss, m));
425111638bfSNavdeep Parhar }
426111638bfSNavdeep Parhar 
42789f651e7SNavdeep Parhar static int
42889f651e7SNavdeep Parhar abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss,
42989f651e7SNavdeep Parhar     struct mbuf *m)
43089f651e7SNavdeep Parhar {
43189f651e7SNavdeep Parhar 	struct adapter *sc = iq->adapter;
43289f651e7SNavdeep Parhar 	u_int cookie;
43389f651e7SNavdeep Parhar 
43489f651e7SNavdeep Parhar 	MPASS(m == NULL);
43589f651e7SNavdeep Parhar 	if (is_hashfilter(sc))
43689f651e7SNavdeep Parhar 		cookie = CPL_COOKIE_HASHFILTER;
43789f651e7SNavdeep Parhar 	else
43889f651e7SNavdeep Parhar 		cookie = CPL_COOKIE_TOM;
43989f651e7SNavdeep Parhar 
44089f651e7SNavdeep Parhar 	return (abort_rpl_rss_handlers[cookie](iq, rss, m));
44189f651e7SNavdeep Parhar }
44289f651e7SNavdeep Parhar 
4439c707b32SNavdeep Parhar static int
4449c707b32SNavdeep Parhar fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4459c707b32SNavdeep Parhar {
4469c707b32SNavdeep Parhar 	struct adapter *sc = iq->adapter;
4479c707b32SNavdeep Parhar 	const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
4489c707b32SNavdeep Parhar 	unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
4499c707b32SNavdeep Parhar 	u_int cookie;
4509c707b32SNavdeep Parhar 
4519c707b32SNavdeep Parhar 	MPASS(m == NULL);
4529c707b32SNavdeep Parhar 	if (is_etid(sc, tid))
4539c707b32SNavdeep Parhar 		cookie = CPL_COOKIE_ETHOFLD;
4549c707b32SNavdeep Parhar 	else
4559c707b32SNavdeep Parhar 		cookie = CPL_COOKIE_TOM;
4569c707b32SNavdeep Parhar 
4579c707b32SNavdeep Parhar 	return (fw4_ack_handlers[cookie](iq, rss, m));
4589c707b32SNavdeep Parhar }
4599c707b32SNavdeep Parhar 
4604535e804SNavdeep Parhar static void
4614535e804SNavdeep Parhar t4_init_shared_cpl_handlers(void)
4624535e804SNavdeep Parhar {
4634535e804SNavdeep Parhar 
4644535e804SNavdeep Parhar 	t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler);
4654535e804SNavdeep Parhar 	t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler);
466111638bfSNavdeep Parhar 	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
46789f651e7SNavdeep Parhar 	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
4689c707b32SNavdeep Parhar 	t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
4694535e804SNavdeep Parhar }
4704535e804SNavdeep Parhar 
4714535e804SNavdeep Parhar void
4724535e804SNavdeep Parhar t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
4734535e804SNavdeep Parhar {
4744535e804SNavdeep Parhar 	uintptr_t *loc;
4754535e804SNavdeep Parhar 
4764535e804SNavdeep Parhar 	MPASS(opcode < nitems(t4_cpl_handler));
4774535e804SNavdeep Parhar 	MPASS(cookie > CPL_COOKIE_RESERVED);
4784535e804SNavdeep Parhar 	MPASS(cookie < NUM_CPL_COOKIES);
4794535e804SNavdeep Parhar 	MPASS(t4_cpl_handler[opcode] != NULL);
4804535e804SNavdeep Parhar 
4814535e804SNavdeep Parhar 	switch (opcode) {
4824535e804SNavdeep Parhar 	case CPL_SET_TCB_RPL:
4834535e804SNavdeep Parhar 		loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie];
4844535e804SNavdeep Parhar 		break;
4854535e804SNavdeep Parhar 	case CPL_L2T_WRITE_RPL:
4864535e804SNavdeep Parhar 		loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie];
4874535e804SNavdeep Parhar 		break;
488111638bfSNavdeep Parhar 	case CPL_ACT_OPEN_RPL:
489111638bfSNavdeep Parhar 		loc = (uintptr_t *)&act_open_rpl_handlers[cookie];
490111638bfSNavdeep Parhar 		break;
49189f651e7SNavdeep Parhar 	case CPL_ABORT_RPL_RSS:
49289f651e7SNavdeep Parhar 		loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie];
49389f651e7SNavdeep Parhar 		break;
4949c707b32SNavdeep Parhar 	case CPL_FW4_ACK:
4959c707b32SNavdeep Parhar 		loc = (uintptr_t *)&fw4_ack_handlers[cookie];
4969c707b32SNavdeep Parhar 		break;
4974535e804SNavdeep Parhar 	default:
4984535e804SNavdeep Parhar 		MPASS(0);
4994535e804SNavdeep Parhar 		return;
5004535e804SNavdeep Parhar 	}
5014535e804SNavdeep Parhar 	MPASS(h == NULL || *loc == (uintptr_t)NULL);
5024535e804SNavdeep Parhar 	atomic_store_rel_ptr(loc, (uintptr_t)h);
503671bf2b8SNavdeep Parhar }
504671bf2b8SNavdeep Parhar 
50594586193SNavdeep Parhar /*
5061458bff9SNavdeep Parhar  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
50794586193SNavdeep Parhar  */
50894586193SNavdeep Parhar void
50994586193SNavdeep Parhar t4_sge_modload(void)
51094586193SNavdeep Parhar {
5114defc81bSNavdeep Parhar 
5129fb8886bSNavdeep Parhar 	if (fl_pktshift < 0 || fl_pktshift > 7) {
5139fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
514518bca2cSNavdeep Parhar 		    " using 0 instead.\n", fl_pktshift);
515518bca2cSNavdeep Parhar 		fl_pktshift = 0;
5169fb8886bSNavdeep Parhar 	}
5179fb8886bSNavdeep Parhar 
5189fb8886bSNavdeep Parhar 	if (spg_len != 64 && spg_len != 128) {
5199fb8886bSNavdeep Parhar 		int len;
5209fb8886bSNavdeep Parhar 
5219fb8886bSNavdeep Parhar #if defined(__i386__) || defined(__amd64__)
5229fb8886bSNavdeep Parhar 		len = cpu_clflush_line_size > 64 ? 128 : 64;
5239fb8886bSNavdeep Parhar #else
5249fb8886bSNavdeep Parhar 		len = 64;
5259fb8886bSNavdeep Parhar #endif
5269fb8886bSNavdeep Parhar 		if (spg_len != -1) {
5279fb8886bSNavdeep Parhar 			printf("Invalid hw.cxgbe.spg_len value (%d),"
5289fb8886bSNavdeep Parhar 			    " using %d instead.\n", spg_len, len);
5299fb8886bSNavdeep Parhar 		}
5309fb8886bSNavdeep Parhar 		spg_len = len;
5319fb8886bSNavdeep Parhar 	}
5329fb8886bSNavdeep Parhar 
5339fb8886bSNavdeep Parhar 	if (cong_drop < -1 || cong_drop > 1) {
5349fb8886bSNavdeep Parhar 		printf("Invalid hw.cxgbe.cong_drop value (%d),"
5359fb8886bSNavdeep Parhar 		    " using 0 instead.\n", cong_drop);
5369fb8886bSNavdeep Parhar 		cong_drop = 0;
5379fb8886bSNavdeep Parhar 	}
53882eff304SNavdeep Parhar 
539d491f8caSNavdeep Parhar 	if (tscale != 1 && (tscale < 3 || tscale > 17)) {
540d491f8caSNavdeep Parhar 		printf("Invalid hw.cxgbe.tscale value (%d),"
541d491f8caSNavdeep Parhar 		    " using 1 instead.\n", tscale);
542d491f8caSNavdeep Parhar 		tscale = 1;
543d491f8caSNavdeep Parhar 	}
544d491f8caSNavdeep Parhar 
54582eff304SNavdeep Parhar 	extfree_refs = counter_u64_alloc(M_WAITOK);
54682eff304SNavdeep Parhar 	extfree_rels = counter_u64_alloc(M_WAITOK);
54782eff304SNavdeep Parhar 	counter_u64_zero(extfree_refs);
54882eff304SNavdeep Parhar 	counter_u64_zero(extfree_rels);
549671bf2b8SNavdeep Parhar 
5504535e804SNavdeep Parhar 	t4_init_shared_cpl_handlers();
551671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
552671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
553671bf2b8SNavdeep Parhar 	t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
554786099deSNavdeep Parhar #ifdef RATELIMIT
555786099deSNavdeep Parhar 	t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack,
556786099deSNavdeep Parhar 	    CPL_COOKIE_ETHOFLD);
557786099deSNavdeep Parhar #endif
558671bf2b8SNavdeep Parhar 	t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
559069af0ebSJohn Baldwin 	t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
56082eff304SNavdeep Parhar }
56182eff304SNavdeep Parhar 
56282eff304SNavdeep Parhar void
56382eff304SNavdeep Parhar t4_sge_modunload(void)
56482eff304SNavdeep Parhar {
56582eff304SNavdeep Parhar 
56682eff304SNavdeep Parhar 	counter_u64_free(extfree_refs);
56782eff304SNavdeep Parhar 	counter_u64_free(extfree_rels);
56882eff304SNavdeep Parhar }
56982eff304SNavdeep Parhar 
57082eff304SNavdeep Parhar uint64_t
57182eff304SNavdeep Parhar t4_sge_extfree_refs(void)
57282eff304SNavdeep Parhar {
57382eff304SNavdeep Parhar 	uint64_t refs, rels;
57482eff304SNavdeep Parhar 
57582eff304SNavdeep Parhar 	rels = counter_u64_fetch(extfree_rels);
57682eff304SNavdeep Parhar 	refs = counter_u64_fetch(extfree_refs);
57782eff304SNavdeep Parhar 
57882eff304SNavdeep Parhar 	return (refs - rels);
57994586193SNavdeep Parhar }
58094586193SNavdeep Parhar 
58144c6fea8SNavdeep Parhar /* max 4096 */
58244c6fea8SNavdeep Parhar #define MAX_PACK_BOUNDARY 512
58344c6fea8SNavdeep Parhar 
584e3207e19SNavdeep Parhar static inline void
585e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(struct adapter *sc)
586e3207e19SNavdeep Parhar {
587e3207e19SNavdeep Parhar 	uint32_t v, m;
5880dbc6cfdSNavdeep Parhar 	int pad, pack, pad_shift;
589e3207e19SNavdeep Parhar 
5900dbc6cfdSNavdeep Parhar 	pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
5910dbc6cfdSNavdeep Parhar 	    X_INGPADBOUNDARY_SHIFT;
592e3207e19SNavdeep Parhar 	pad = fl_pad;
5930dbc6cfdSNavdeep Parhar 	if (fl_pad < (1 << pad_shift) ||
5940dbc6cfdSNavdeep Parhar 	    fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
5950dbc6cfdSNavdeep Parhar 	    !powerof2(fl_pad)) {
596e3207e19SNavdeep Parhar 		/*
597e3207e19SNavdeep Parhar 		 * If there is any chance that we might use buffer packing and
598e3207e19SNavdeep Parhar 		 * the chip is a T4, then pick 64 as the pad/pack boundary.  Set
5990dbc6cfdSNavdeep Parhar 		 * it to the minimum allowed in all other cases.
600e3207e19SNavdeep Parhar 		 */
6010dbc6cfdSNavdeep Parhar 		pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
602e3207e19SNavdeep Parhar 
603e3207e19SNavdeep Parhar 		/*
604e3207e19SNavdeep Parhar 		 * For fl_pad = 0 we'll still write a reasonable value to the
605e3207e19SNavdeep Parhar 		 * register but all the freelists will opt out of padding.
606e3207e19SNavdeep Parhar 		 * We'll complain here only if the user tried to set it to a
607e3207e19SNavdeep Parhar 		 * value greater than 0 that was invalid.
608e3207e19SNavdeep Parhar 		 */
609e3207e19SNavdeep Parhar 		if (fl_pad > 0) {
610e3207e19SNavdeep Parhar 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
611e3207e19SNavdeep Parhar 			    " (%d), using %d instead.\n", fl_pad, pad);
612e3207e19SNavdeep Parhar 		}
613e3207e19SNavdeep Parhar 	}
614e3207e19SNavdeep Parhar 	m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
6150dbc6cfdSNavdeep Parhar 	v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
616e3207e19SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
617e3207e19SNavdeep Parhar 
618e3207e19SNavdeep Parhar 	if (is_t4(sc)) {
619e3207e19SNavdeep Parhar 		if (fl_pack != -1 && fl_pack != pad) {
620e3207e19SNavdeep Parhar 			/* Complain but carry on. */
621e3207e19SNavdeep Parhar 			device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
622e3207e19SNavdeep Parhar 			    " using %d instead.\n", fl_pack, pad);
623e3207e19SNavdeep Parhar 		}
624e3207e19SNavdeep Parhar 		return;
625e3207e19SNavdeep Parhar 	}
626e3207e19SNavdeep Parhar 
627e3207e19SNavdeep Parhar 	pack = fl_pack;
628e3207e19SNavdeep Parhar 	if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
629e3207e19SNavdeep Parhar 	    !powerof2(fl_pack)) {
63044c6fea8SNavdeep Parhar 		if (sc->params.pci.mps > MAX_PACK_BOUNDARY)
63144c6fea8SNavdeep Parhar 			pack = MAX_PACK_BOUNDARY;
63244c6fea8SNavdeep Parhar 		else
633e3207e19SNavdeep Parhar 			pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
634e3207e19SNavdeep Parhar 		MPASS(powerof2(pack));
635e3207e19SNavdeep Parhar 		if (pack < 16)
636e3207e19SNavdeep Parhar 			pack = 16;
637e3207e19SNavdeep Parhar 		if (pack == 32)
638e3207e19SNavdeep Parhar 			pack = 64;
639e3207e19SNavdeep Parhar 		if (pack > 4096)
640e3207e19SNavdeep Parhar 			pack = 4096;
641e3207e19SNavdeep Parhar 		if (fl_pack != -1) {
642e3207e19SNavdeep Parhar 			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
643e3207e19SNavdeep Parhar 			    " (%d), using %d instead.\n", fl_pack, pack);
644e3207e19SNavdeep Parhar 		}
645e3207e19SNavdeep Parhar 	}
646e3207e19SNavdeep Parhar 	m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
647e3207e19SNavdeep Parhar 	if (pack == 16)
648e3207e19SNavdeep Parhar 		v = V_INGPACKBOUNDARY(0);
649e3207e19SNavdeep Parhar 	else
650e3207e19SNavdeep Parhar 		v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
651e3207e19SNavdeep Parhar 
652e3207e19SNavdeep Parhar 	MPASS(!is_t4(sc));	/* T4 doesn't have SGE_CONTROL2 */
653e3207e19SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
654e3207e19SNavdeep Parhar }
655e3207e19SNavdeep Parhar 
656cf738022SNavdeep Parhar /*
657cf738022SNavdeep Parhar  * adap->params.vpd.cclk must be set up before this is called.
658cf738022SNavdeep Parhar  */
659d14b0ac1SNavdeep Parhar void
660d14b0ac1SNavdeep Parhar t4_tweak_chip_settings(struct adapter *sc)
661d14b0ac1SNavdeep Parhar {
66246e1e307SNavdeep Parhar 	int i, reg;
663d14b0ac1SNavdeep Parhar 	uint32_t v, m;
664d14b0ac1SNavdeep Parhar 	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
665cf738022SNavdeep Parhar 	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
666d14b0ac1SNavdeep Parhar 	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
667d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
66846e1e307SNavdeep Parhar 	static int sw_buf_sizes[] = {
6691458bff9SNavdeep Parhar 		MCLBYTES,
6701458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
6711458bff9SNavdeep Parhar 		MJUMPAGESIZE,
6721458bff9SNavdeep Parhar #endif
6731458bff9SNavdeep Parhar 		MJUM9BYTES,
67446e1e307SNavdeep Parhar 		MJUM16BYTES
6751458bff9SNavdeep Parhar 	};
676d14b0ac1SNavdeep Parhar 
677d14b0ac1SNavdeep Parhar 	KASSERT(sc->flags & MASTER_PF,
678d14b0ac1SNavdeep Parhar 	    ("%s: trying to change chip settings when not master.", __func__));
679d14b0ac1SNavdeep Parhar 
6801458bff9SNavdeep Parhar 	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
681d14b0ac1SNavdeep Parhar 	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
6824defc81bSNavdeep Parhar 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
683d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
68454e4ee71SNavdeep Parhar 
685e3207e19SNavdeep Parhar 	setup_pad_and_pack_boundaries(sc);
6861458bff9SNavdeep Parhar 
687d14b0ac1SNavdeep Parhar 	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
688733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
689733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
690733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
691733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
692733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
693733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
694733b9277SNavdeep Parhar 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
695d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
696733b9277SNavdeep Parhar 
6979b11a65dSNavdeep Parhar 	t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096);
6989b11a65dSNavdeep Parhar 	t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536);
69946e1e307SNavdeep Parhar 	reg = A_SGE_FL_BUFFER_SIZE2;
70046e1e307SNavdeep Parhar 	for (i = 0; i < nitems(sw_buf_sizes); i++) {
70146e1e307SNavdeep Parhar 		MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
70246e1e307SNavdeep Parhar 		t4_write_reg(sc, reg, sw_buf_sizes[i]);
70346e1e307SNavdeep Parhar 		reg += 4;
70446e1e307SNavdeep Parhar 		MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
70546e1e307SNavdeep Parhar 		t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE);
70646e1e307SNavdeep Parhar 		reg += 4;
70754e4ee71SNavdeep Parhar 	}
70854e4ee71SNavdeep Parhar 
709d14b0ac1SNavdeep Parhar 	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
710d14b0ac1SNavdeep Parhar 	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
711d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
71254e4ee71SNavdeep Parhar 
713cf738022SNavdeep Parhar 	KASSERT(intr_timer[0] <= timer_max,
714cf738022SNavdeep Parhar 	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
715cf738022SNavdeep Parhar 	    timer_max));
716cf738022SNavdeep Parhar 	for (i = 1; i < nitems(intr_timer); i++) {
717cf738022SNavdeep Parhar 		KASSERT(intr_timer[i] >= intr_timer[i - 1],
718cf738022SNavdeep Parhar 		    ("%s: timers not listed in increasing order (%d)",
719cf738022SNavdeep Parhar 		    __func__, i));
720cf738022SNavdeep Parhar 
721cf738022SNavdeep Parhar 		while (intr_timer[i] > timer_max) {
722cf738022SNavdeep Parhar 			if (i == nitems(intr_timer) - 1) {
723cf738022SNavdeep Parhar 				intr_timer[i] = timer_max;
724cf738022SNavdeep Parhar 				break;
725cf738022SNavdeep Parhar 			}
726cf738022SNavdeep Parhar 			intr_timer[i] += intr_timer[i - 1];
727cf738022SNavdeep Parhar 			intr_timer[i] /= 2;
728cf738022SNavdeep Parhar 		}
729cf738022SNavdeep Parhar 	}
730cf738022SNavdeep Parhar 
731d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
732d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
733d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
734d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
735d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
736d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
737d14b0ac1SNavdeep Parhar 	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
738d14b0ac1SNavdeep Parhar 	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
739d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
74086e02bf2SNavdeep Parhar 
741d491f8caSNavdeep Parhar 	if (chip_id(sc) >= CHELSIO_T6) {
742d491f8caSNavdeep Parhar 		m = V_TSCALE(M_TSCALE);
743d491f8caSNavdeep Parhar 		if (tscale == 1)
744d491f8caSNavdeep Parhar 			v = 0;
745d491f8caSNavdeep Parhar 		else
746d491f8caSNavdeep Parhar 			v = V_TSCALE(tscale - 2);
747d491f8caSNavdeep Parhar 		t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
7482f318252SNavdeep Parhar 
7492f318252SNavdeep Parhar 		if (sc->debug_flags & DF_DISABLE_TCB_CACHE) {
7502f318252SNavdeep Parhar 			m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN |
7512f318252SNavdeep Parhar 			    V_WRTHRTHRESH(M_WRTHRTHRESH);
7522f318252SNavdeep Parhar 			t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1);
7532f318252SNavdeep Parhar 			v &= ~m;
7542f318252SNavdeep Parhar 			v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN |
7552f318252SNavdeep Parhar 			    V_WRTHRTHRESH(16);
7562f318252SNavdeep Parhar 			t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1);
7572f318252SNavdeep Parhar 		}
758d491f8caSNavdeep Parhar 	}
759d491f8caSNavdeep Parhar 
7607cba15b1SNavdeep Parhar 	/* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
761d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
762d14b0ac1SNavdeep Parhar 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
763d14b0ac1SNavdeep Parhar 
7647cba15b1SNavdeep Parhar 	/*
7657cba15b1SNavdeep Parhar 	 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP.  These have been
7667cba15b1SNavdeep Parhar 	 * chosen with MAXPHYS = 128K in mind.  The largest DDP buffer that we
7677cba15b1SNavdeep Parhar 	 * may have to deal with is MAXPHYS + 1 page.
7687cba15b1SNavdeep Parhar 	 */
7697cba15b1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
7707cba15b1SNavdeep Parhar 	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
7717cba15b1SNavdeep Parhar 
7727cba15b1SNavdeep Parhar 	/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
7737cba15b1SNavdeep Parhar 	m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
774d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
775d14b0ac1SNavdeep Parhar 
776d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
777d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
778d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
779d14b0ac1SNavdeep Parhar 	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
780d14b0ac1SNavdeep Parhar }
781d14b0ac1SNavdeep Parhar 
782d14b0ac1SNavdeep Parhar /*
78346e1e307SNavdeep Parhar  * SGE wants the buffer to be at least 64B and then a multiple of 16.  Its
78446e1e307SNavdeep Parhar  * address mut be 16B aligned.  If padding is in use the buffer's start and end
78546e1e307SNavdeep Parhar  * need to be aligned to the pad boundary as well.  We'll just make sure that
78646e1e307SNavdeep Parhar  * the size is a multiple of the pad boundary here, it is up to the buffer
78746e1e307SNavdeep Parhar  * allocation code to make sure the start of the buffer is aligned.
78838035ed6SNavdeep Parhar  */
78938035ed6SNavdeep Parhar static inline int
790e3207e19SNavdeep Parhar hwsz_ok(struct adapter *sc, int hwsz)
79138035ed6SNavdeep Parhar {
79290e7434aSNavdeep Parhar 	int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
79338035ed6SNavdeep Parhar 
794b741402cSNavdeep Parhar 	return (hwsz >= 64 && (hwsz & mask) == 0);
79538035ed6SNavdeep Parhar }
79638035ed6SNavdeep Parhar 
79738035ed6SNavdeep Parhar /*
798d14b0ac1SNavdeep Parhar  * XXX: driver really should be able to deal with unexpected settings.
799d14b0ac1SNavdeep Parhar  */
800d14b0ac1SNavdeep Parhar int
801d14b0ac1SNavdeep Parhar t4_read_chip_settings(struct adapter *sc)
802d14b0ac1SNavdeep Parhar {
803d14b0ac1SNavdeep Parhar 	struct sge *s = &sc->sge;
80490e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
8051458bff9SNavdeep Parhar 	int i, j, n, rc = 0;
806d14b0ac1SNavdeep Parhar 	uint32_t m, v, r;
807d14b0ac1SNavdeep Parhar 	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
80838035ed6SNavdeep Parhar 	static int sw_buf_sizes[] = {	/* Sorted by size */
8091458bff9SNavdeep Parhar 		MCLBYTES,
8101458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
8111458bff9SNavdeep Parhar 		MJUMPAGESIZE,
8121458bff9SNavdeep Parhar #endif
8131458bff9SNavdeep Parhar 		MJUM9BYTES,
8141458bff9SNavdeep Parhar 		MJUM16BYTES
8151458bff9SNavdeep Parhar 	};
81646e1e307SNavdeep Parhar 	struct rx_buf_info *rxb;
817d14b0ac1SNavdeep Parhar 
81890e7434aSNavdeep Parhar 	m = F_RXPKTCPLMODE;
81990e7434aSNavdeep Parhar 	v = F_RXPKTCPLMODE;
82059c1e950SJohn Baldwin 	r = sc->params.sge.sge_control;
821d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
822d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
823733b9277SNavdeep Parhar 		rc = EINVAL;
824733b9277SNavdeep Parhar 	}
825733b9277SNavdeep Parhar 
82690e7434aSNavdeep Parhar 	/*
82790e7434aSNavdeep Parhar 	 * If this changes then every single use of PAGE_SHIFT in the driver
82890e7434aSNavdeep Parhar 	 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
82990e7434aSNavdeep Parhar 	 */
83090e7434aSNavdeep Parhar 	if (sp->page_shift != PAGE_SHIFT) {
831d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
832733b9277SNavdeep Parhar 		rc = EINVAL;
833733b9277SNavdeep Parhar 	}
834733b9277SNavdeep Parhar 
83546e1e307SNavdeep Parhar 	s->safe_zidx = -1;
83646e1e307SNavdeep Parhar 	rxb = &s->rx_buf_info[0];
83746e1e307SNavdeep Parhar 	for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
83846e1e307SNavdeep Parhar 		rxb->size1 = sw_buf_sizes[i];
83946e1e307SNavdeep Parhar 		rxb->zone = m_getzone(rxb->size1);
84046e1e307SNavdeep Parhar 		rxb->type = m_gettype(rxb->size1);
84146e1e307SNavdeep Parhar 		rxb->size2 = 0;
84246e1e307SNavdeep Parhar 		rxb->hwidx1 = -1;
84346e1e307SNavdeep Parhar 		rxb->hwidx2 = -1;
84446e1e307SNavdeep Parhar 		for (j = 0; j < SGE_FLBUF_SIZES; j++) {
84546e1e307SNavdeep Parhar 			int hwsize = sp->sge_fl_buffer_size[j];
84638035ed6SNavdeep Parhar 
84746e1e307SNavdeep Parhar 			if (!hwsz_ok(sc, hwsize))
848e3207e19SNavdeep Parhar 				continue;
849e3207e19SNavdeep Parhar 
85046e1e307SNavdeep Parhar 			/* hwidx for size1 */
85146e1e307SNavdeep Parhar 			if (rxb->hwidx1 == -1 && rxb->size1 == hwsize)
85246e1e307SNavdeep Parhar 				rxb->hwidx1 = j;
85338035ed6SNavdeep Parhar 
85446e1e307SNavdeep Parhar 			/* hwidx for size2 (buffer packing) */
85546e1e307SNavdeep Parhar 			if (rxb->size1 - CL_METADATA_SIZE < hwsize)
8561458bff9SNavdeep Parhar 				continue;
85746e1e307SNavdeep Parhar 			n = rxb->size1 - hwsize - CL_METADATA_SIZE;
8581458bff9SNavdeep Parhar 			if (n == 0) {
85946e1e307SNavdeep Parhar 				rxb->hwidx2 = j;
86046e1e307SNavdeep Parhar 				rxb->size2 = hwsize;
86146e1e307SNavdeep Parhar 				break;	/* stop looking */
862733b9277SNavdeep Parhar 			}
86346e1e307SNavdeep Parhar 			if (rxb->hwidx2 != -1) {
86446e1e307SNavdeep Parhar 				if (n < sp->sge_fl_buffer_size[rxb->hwidx2] -
86546e1e307SNavdeep Parhar 				    hwsize - CL_METADATA_SIZE) {
86646e1e307SNavdeep Parhar 					rxb->hwidx2 = j;
86746e1e307SNavdeep Parhar 					rxb->size2 = hwsize;
86846e1e307SNavdeep Parhar 				}
86946e1e307SNavdeep Parhar 			} else if (n <= 2 * CL_METADATA_SIZE) {
87046e1e307SNavdeep Parhar 				rxb->hwidx2 = j;
87146e1e307SNavdeep Parhar 				rxb->size2 = hwsize;
87238035ed6SNavdeep Parhar 			}
87338035ed6SNavdeep Parhar 		}
87446e1e307SNavdeep Parhar 		if (rxb->hwidx2 != -1)
87546e1e307SNavdeep Parhar 			sc->flags |= BUF_PACKING_OK;
87646e1e307SNavdeep Parhar 		if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster)
87746e1e307SNavdeep Parhar 			s->safe_zidx = i;
878e3207e19SNavdeep Parhar 	}
879733b9277SNavdeep Parhar 
8806af45170SJohn Baldwin 	if (sc->flags & IS_VF)
8816af45170SJohn Baldwin 		return (0);
8826af45170SJohn Baldwin 
883d14b0ac1SNavdeep Parhar 	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
884d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
885d14b0ac1SNavdeep Parhar 	if (r != v) {
886d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
887d14b0ac1SNavdeep Parhar 		rc = EINVAL;
888d14b0ac1SNavdeep Parhar 	}
889733b9277SNavdeep Parhar 
890d14b0ac1SNavdeep Parhar 	m = v = F_TDDPTAGTCB;
891d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_ULP_RX_CTL);
892d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
893d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
894d14b0ac1SNavdeep Parhar 		rc = EINVAL;
895d14b0ac1SNavdeep Parhar 	}
896d14b0ac1SNavdeep Parhar 
897d14b0ac1SNavdeep Parhar 	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
898d14b0ac1SNavdeep Parhar 	    F_RESETDDPOFFSET;
899d14b0ac1SNavdeep Parhar 	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
900d14b0ac1SNavdeep Parhar 	r = t4_read_reg(sc, A_TP_PARA_REG5);
901d14b0ac1SNavdeep Parhar 	if ((r & m) != v) {
902d14b0ac1SNavdeep Parhar 		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
903d14b0ac1SNavdeep Parhar 		rc = EINVAL;
904d14b0ac1SNavdeep Parhar 	}
905d14b0ac1SNavdeep Parhar 
906c45b1868SNavdeep Parhar 	t4_init_tp_params(sc, 1);
907d14b0ac1SNavdeep Parhar 
908d14b0ac1SNavdeep Parhar 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
909d14b0ac1SNavdeep Parhar 	t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
910d14b0ac1SNavdeep Parhar 
911733b9277SNavdeep Parhar 	return (rc);
91254e4ee71SNavdeep Parhar }
91354e4ee71SNavdeep Parhar 
91454e4ee71SNavdeep Parhar int
91554e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc)
91654e4ee71SNavdeep Parhar {
91754e4ee71SNavdeep Parhar 	int rc;
91854e4ee71SNavdeep Parhar 
91954e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
92054e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
92154e4ee71SNavdeep Parhar 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
92254e4ee71SNavdeep Parhar 	    NULL, &sc->dmat);
92354e4ee71SNavdeep Parhar 	if (rc != 0) {
92454e4ee71SNavdeep Parhar 		device_printf(sc->dev,
92554e4ee71SNavdeep Parhar 		    "failed to create main DMA tag: %d\n", rc);
92654e4ee71SNavdeep Parhar 	}
92754e4ee71SNavdeep Parhar 
92854e4ee71SNavdeep Parhar 	return (rc);
92954e4ee71SNavdeep Parhar }
93054e4ee71SNavdeep Parhar 
9316e22f9f3SNavdeep Parhar void
9326e22f9f3SNavdeep Parhar t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
9336e22f9f3SNavdeep Parhar     struct sysctl_oid_list *children)
9346e22f9f3SNavdeep Parhar {
93590e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
9366e22f9f3SNavdeep Parhar 
93738035ed6SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
9387029da5cSPawel Biernacki 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
9397029da5cSPawel Biernacki 	    sysctl_bufsizes, "A", "freelist buffer sizes");
94038035ed6SNavdeep Parhar 
9416e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
94290e7434aSNavdeep Parhar 	    NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
9436e22f9f3SNavdeep Parhar 
9446e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
94590e7434aSNavdeep Parhar 	    NULL, sp->pad_boundary, "payload pad boundary (bytes)");
9466e22f9f3SNavdeep Parhar 
9476e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
94890e7434aSNavdeep Parhar 	    NULL, sp->spg_len, "status page size (bytes)");
9496e22f9f3SNavdeep Parhar 
9506e22f9f3SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
9516e22f9f3SNavdeep Parhar 	    NULL, cong_drop, "congestion drop setting");
9521458bff9SNavdeep Parhar 
9531458bff9SNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
95490e7434aSNavdeep Parhar 	    NULL, sp->pack_boundary, "payload pack boundary (bytes)");
9556e22f9f3SNavdeep Parhar }
9566e22f9f3SNavdeep Parhar 
95754e4ee71SNavdeep Parhar int
95854e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc)
95954e4ee71SNavdeep Parhar {
96054e4ee71SNavdeep Parhar 	if (sc->dmat)
96154e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(sc->dmat);
96254e4ee71SNavdeep Parhar 
96354e4ee71SNavdeep Parhar 	return (0);
96454e4ee71SNavdeep Parhar }
96554e4ee71SNavdeep Parhar 
96654e4ee71SNavdeep Parhar /*
96737310a98SNavdeep Parhar  * Allocate and initialize the firmware event queue, control queues, and special
96837310a98SNavdeep Parhar  * purpose rx queues owned by the adapter.
96954e4ee71SNavdeep Parhar  *
97054e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
97154e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
97254e4ee71SNavdeep Parhar  */
97354e4ee71SNavdeep Parhar int
974f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc)
97554e4ee71SNavdeep Parhar {
97637310a98SNavdeep Parhar 	struct sysctl_oid *oid;
97737310a98SNavdeep Parhar 	struct sysctl_oid_list *children;
97837310a98SNavdeep Parhar 	int rc, i;
97954e4ee71SNavdeep Parhar 
98054e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
98154e4ee71SNavdeep Parhar 
982733b9277SNavdeep Parhar 	sysctl_ctx_init(&sc->ctx);
983733b9277SNavdeep Parhar 	sc->flags |= ADAP_SYSCTL_CTX;
98454e4ee71SNavdeep Parhar 
98556599263SNavdeep Parhar 	/*
98656599263SNavdeep Parhar 	 * Firmware event queue
98756599263SNavdeep Parhar 	 */
988733b9277SNavdeep Parhar 	rc = alloc_fwq(sc);
989aa95b653SNavdeep Parhar 	if (rc != 0)
990f7dfe243SNavdeep Parhar 		return (rc);
991f7dfe243SNavdeep Parhar 
992f7dfe243SNavdeep Parhar 	/*
99337310a98SNavdeep Parhar 	 * That's all for the VF driver.
994f7dfe243SNavdeep Parhar 	 */
99537310a98SNavdeep Parhar 	if (sc->flags & IS_VF)
99637310a98SNavdeep Parhar 		return (rc);
99737310a98SNavdeep Parhar 
99837310a98SNavdeep Parhar 	oid = device_get_sysctl_tree(sc->dev);
99937310a98SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
100037310a98SNavdeep Parhar 
100137310a98SNavdeep Parhar 	/*
100237310a98SNavdeep Parhar 	 * XXX: General purpose rx queues, one per port.
100337310a98SNavdeep Parhar 	 */
100437310a98SNavdeep Parhar 
100537310a98SNavdeep Parhar 	/*
100637310a98SNavdeep Parhar 	 * Control queues, one per port.
100737310a98SNavdeep Parhar 	 */
100837310a98SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq",
10097029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
101037310a98SNavdeep Parhar 	for_each_port(sc, i) {
101137310a98SNavdeep Parhar 		struct sge_wrq *ctrlq = &sc->sge.ctrlq[i];
101237310a98SNavdeep Parhar 
101337310a98SNavdeep Parhar 		rc = alloc_ctrlq(sc, ctrlq, i, oid);
101437310a98SNavdeep Parhar 		if (rc != 0)
101537310a98SNavdeep Parhar 			return (rc);
101637310a98SNavdeep Parhar 	}
101754e4ee71SNavdeep Parhar 
101854e4ee71SNavdeep Parhar 	return (rc);
101954e4ee71SNavdeep Parhar }
102054e4ee71SNavdeep Parhar 
102154e4ee71SNavdeep Parhar /*
102254e4ee71SNavdeep Parhar  * Idempotent
102354e4ee71SNavdeep Parhar  */
102454e4ee71SNavdeep Parhar int
1025f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc)
102654e4ee71SNavdeep Parhar {
102737310a98SNavdeep Parhar 	int i;
102854e4ee71SNavdeep Parhar 
102954e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
103054e4ee71SNavdeep Parhar 
1031733b9277SNavdeep Parhar 	/* Do this before freeing the queue */
1032733b9277SNavdeep Parhar 	if (sc->flags & ADAP_SYSCTL_CTX) {
1033f7dfe243SNavdeep Parhar 		sysctl_ctx_free(&sc->ctx);
1034733b9277SNavdeep Parhar 		sc->flags &= ~ADAP_SYSCTL_CTX;
1035f7dfe243SNavdeep Parhar 	}
1036f7dfe243SNavdeep Parhar 
1037b8bfcb71SNavdeep Parhar 	if (!(sc->flags & IS_VF)) {
103837310a98SNavdeep Parhar 		for_each_port(sc, i)
103937310a98SNavdeep Parhar 			free_wrq(sc, &sc->sge.ctrlq[i]);
1040b8bfcb71SNavdeep Parhar 	}
1041733b9277SNavdeep Parhar 	free_fwq(sc);
104254e4ee71SNavdeep Parhar 
104354e4ee71SNavdeep Parhar 	return (0);
104454e4ee71SNavdeep Parhar }
104554e4ee71SNavdeep Parhar 
104638035ed6SNavdeep Parhar /* Maximum payload that can be delivered with a single iq descriptor */
10478340ece5SNavdeep Parhar static inline int
10488bf30903SNavdeep Parhar mtu_to_max_payload(struct adapter *sc, int mtu)
10498340ece5SNavdeep Parhar {
10508340ece5SNavdeep Parhar 
105138035ed6SNavdeep Parhar 	/* large enough even when hw VLAN extraction is disabled */
10528bf30903SNavdeep Parhar 	return (sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
10538bf30903SNavdeep Parhar 	    ETHER_VLAN_ENCAP_LEN + mtu);
105438035ed6SNavdeep Parhar }
10556eb3180fSNavdeep Parhar 
1056733b9277SNavdeep Parhar int
1057fe2ebb76SJohn Baldwin t4_setup_vi_queues(struct vi_info *vi)
1058733b9277SNavdeep Parhar {
1059f549e352SNavdeep Parhar 	int rc = 0, i, intr_idx, iqidx;
1060733b9277SNavdeep Parhar 	struct sge_rxq *rxq;
1061733b9277SNavdeep Parhar 	struct sge_txq *txq;
106209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1063733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
1064eff62dbaSNavdeep Parhar #endif
1065eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1066733b9277SNavdeep Parhar 	struct sge_wrq *ofld_txq;
1067298d969cSNavdeep Parhar #endif
1068298d969cSNavdeep Parhar #ifdef DEV_NETMAP
106962291463SNavdeep Parhar 	int saved_idx;
1070298d969cSNavdeep Parhar 	struct sge_nm_rxq *nm_rxq;
1071298d969cSNavdeep Parhar 	struct sge_nm_txq *nm_txq;
1072733b9277SNavdeep Parhar #endif
1073733b9277SNavdeep Parhar 	char name[16];
1074fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
1075733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
1076fe2ebb76SJohn Baldwin 	struct ifnet *ifp = vi->ifp;
1077fe2ebb76SJohn Baldwin 	struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev);
1078733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
1079e3207e19SNavdeep Parhar 	int maxp, mtu = ifp->if_mtu;
1080733b9277SNavdeep Parhar 
1081733b9277SNavdeep Parhar 	/* Interrupt vector to start from (when using multiple vectors) */
1082f549e352SNavdeep Parhar 	intr_idx = vi->first_intr;
1083fe2ebb76SJohn Baldwin 
1084fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP
108562291463SNavdeep Parhar 	saved_idx = intr_idx;
108662291463SNavdeep Parhar 	if (ifp->if_capabilities & IFCAP_NETMAP) {
108762291463SNavdeep Parhar 
108862291463SNavdeep Parhar 		/* netmap is supported with direct interrupts only. */
1089f549e352SNavdeep Parhar 		MPASS(!forwarding_intr_to_fwq(sc));
109062291463SNavdeep Parhar 
1091fe2ebb76SJohn Baldwin 		/*
1092fe2ebb76SJohn Baldwin 		 * We don't have buffers to back the netmap rx queues
1093fe2ebb76SJohn Baldwin 		 * right now so we create the queues in a way that
1094fe2ebb76SJohn Baldwin 		 * doesn't set off any congestion signal in the chip.
1095fe2ebb76SJohn Baldwin 		 */
109662291463SNavdeep Parhar 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
10977029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues");
1098fe2ebb76SJohn Baldwin 		for_each_nm_rxq(vi, i, nm_rxq) {
1099fe2ebb76SJohn Baldwin 			rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
1100fe2ebb76SJohn Baldwin 			if (rc != 0)
1101fe2ebb76SJohn Baldwin 				goto done;
1102fe2ebb76SJohn Baldwin 			intr_idx++;
1103fe2ebb76SJohn Baldwin 		}
1104fe2ebb76SJohn Baldwin 
110562291463SNavdeep Parhar 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
11067029da5cSPawel Biernacki 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues");
1107fe2ebb76SJohn Baldwin 		for_each_nm_txq(vi, i, nm_txq) {
1108f549e352SNavdeep Parhar 			iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
1109f549e352SNavdeep Parhar 			rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid);
1110fe2ebb76SJohn Baldwin 			if (rc != 0)
1111fe2ebb76SJohn Baldwin 				goto done;
1112fe2ebb76SJohn Baldwin 		}
1113fe2ebb76SJohn Baldwin 	}
111462291463SNavdeep Parhar 
111562291463SNavdeep Parhar 	/* Normal rx queues and netmap rx queues share the same interrupts. */
111662291463SNavdeep Parhar 	intr_idx = saved_idx;
1117fe2ebb76SJohn Baldwin #endif
1118733b9277SNavdeep Parhar 
1119733b9277SNavdeep Parhar 	/*
1120f549e352SNavdeep Parhar 	 * Allocate rx queues first because a default iqid is required when
1121f549e352SNavdeep Parhar 	 * creating a tx queue.
1122733b9277SNavdeep Parhar 	 */
11238bf30903SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu);
1124fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
11257029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues");
1126fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
112754e4ee71SNavdeep Parhar 
1128fe2ebb76SJohn Baldwin 		init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
112954e4ee71SNavdeep Parhar 
113054e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-fl",
1131fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1132fe2ebb76SJohn Baldwin 		init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
113354e4ee71SNavdeep Parhar 
1134f549e352SNavdeep Parhar 		rc = alloc_rxq(vi, rxq,
1135f549e352SNavdeep Parhar 		    forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
113654e4ee71SNavdeep Parhar 		if (rc != 0)
113754e4ee71SNavdeep Parhar 			goto done;
1138733b9277SNavdeep Parhar 		intr_idx++;
1139733b9277SNavdeep Parhar 	}
114062291463SNavdeep Parhar #ifdef DEV_NETMAP
114162291463SNavdeep Parhar 	if (ifp->if_capabilities & IFCAP_NETMAP)
114262291463SNavdeep Parhar 		intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
114362291463SNavdeep Parhar #endif
114409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1145fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
11467029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues for offloaded TCP connections");
1147fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1148733b9277SNavdeep Parhar 
114908cd1f11SNavdeep Parhar 		init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
1150fe2ebb76SJohn Baldwin 		    vi->qsize_rxq);
1151733b9277SNavdeep Parhar 
1152733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
1153fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1154fe2ebb76SJohn Baldwin 		init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
1155733b9277SNavdeep Parhar 
1156f549e352SNavdeep Parhar 		rc = alloc_ofld_rxq(vi, ofld_rxq,
1157f549e352SNavdeep Parhar 		    forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
1158733b9277SNavdeep Parhar 		if (rc != 0)
1159733b9277SNavdeep Parhar 			goto done;
1160733b9277SNavdeep Parhar 		intr_idx++;
1161733b9277SNavdeep Parhar 	}
1162733b9277SNavdeep Parhar #endif
1163733b9277SNavdeep Parhar 
1164733b9277SNavdeep Parhar 	/*
1165f549e352SNavdeep Parhar 	 * Now the tx queues.
1166733b9277SNavdeep Parhar 	 */
11677029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq",
11687029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues");
1169fe2ebb76SJohn Baldwin 	for_each_txq(vi, i, txq) {
1170f549e352SNavdeep Parhar 		iqidx = vi->first_rxq + (i % vi->nrxq);
117154e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s txq%d",
1172fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1173f549e352SNavdeep Parhar 		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan,
1174f549e352SNavdeep Parhar 		    sc->sge.rxq[iqidx].iq.cntxt_id, name);
117554e4ee71SNavdeep Parhar 
1176fe2ebb76SJohn Baldwin 		rc = alloc_txq(vi, txq, i, oid);
117754e4ee71SNavdeep Parhar 		if (rc != 0)
117854e4ee71SNavdeep Parhar 			goto done;
117954e4ee71SNavdeep Parhar 	}
1180eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1181fe2ebb76SJohn Baldwin 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
11827029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues for TOE/ETHOFLD");
1183fe2ebb76SJohn Baldwin 	for_each_ofld_txq(vi, i, ofld_txq) {
1184298d969cSNavdeep Parhar 		struct sysctl_oid *oid2;
1185733b9277SNavdeep Parhar 
1186733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ofld_txq%d",
1187fe2ebb76SJohn Baldwin 		    device_get_nameunit(vi->dev), i);
1188c3a88be4SNavdeep Parhar 		if (vi->nofldrxq > 0) {
1189eff62dbaSNavdeep Parhar 			iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq);
1190c3a88be4SNavdeep Parhar 			init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq,
1191c3a88be4SNavdeep Parhar 			    pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id,
1192c3a88be4SNavdeep Parhar 			    name);
1193c3a88be4SNavdeep Parhar 		} else {
1194eff62dbaSNavdeep Parhar 			iqidx = vi->first_rxq + (i % vi->nrxq);
1195c3a88be4SNavdeep Parhar 			init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq,
1196c3a88be4SNavdeep Parhar 			    pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name);
1197c3a88be4SNavdeep Parhar 		}
1198733b9277SNavdeep Parhar 
1199733b9277SNavdeep Parhar 		snprintf(name, sizeof(name), "%d", i);
1200fe2ebb76SJohn Baldwin 		oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
12017029da5cSPawel Biernacki 		    name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue");
1202733b9277SNavdeep Parhar 
1203fe2ebb76SJohn Baldwin 		rc = alloc_wrq(sc, vi, ofld_txq, oid2);
1204298d969cSNavdeep Parhar 		if (rc != 0)
1205298d969cSNavdeep Parhar 			goto done;
1206298d969cSNavdeep Parhar 	}
1207298d969cSNavdeep Parhar #endif
120854e4ee71SNavdeep Parhar done:
120954e4ee71SNavdeep Parhar 	if (rc)
1210fe2ebb76SJohn Baldwin 		t4_teardown_vi_queues(vi);
121154e4ee71SNavdeep Parhar 
121254e4ee71SNavdeep Parhar 	return (rc);
121354e4ee71SNavdeep Parhar }
121454e4ee71SNavdeep Parhar 
121554e4ee71SNavdeep Parhar /*
121654e4ee71SNavdeep Parhar  * Idempotent
121754e4ee71SNavdeep Parhar  */
121854e4ee71SNavdeep Parhar int
1219fe2ebb76SJohn Baldwin t4_teardown_vi_queues(struct vi_info *vi)
122054e4ee71SNavdeep Parhar {
122154e4ee71SNavdeep Parhar 	int i;
122254e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
122354e4ee71SNavdeep Parhar 	struct sge_txq *txq;
122437310a98SNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
122537310a98SNavdeep Parhar 	struct port_info *pi = vi->pi;
122637310a98SNavdeep Parhar 	struct adapter *sc = pi->adapter;
122737310a98SNavdeep Parhar 	struct sge_wrq *ofld_txq;
122837310a98SNavdeep Parhar #endif
122909fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1230733b9277SNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
1231eff62dbaSNavdeep Parhar #endif
1232298d969cSNavdeep Parhar #ifdef DEV_NETMAP
1233298d969cSNavdeep Parhar 	struct sge_nm_rxq *nm_rxq;
1234298d969cSNavdeep Parhar 	struct sge_nm_txq *nm_txq;
1235298d969cSNavdeep Parhar #endif
123654e4ee71SNavdeep Parhar 
123754e4ee71SNavdeep Parhar 	/* Do this before freeing the queues */
1238fe2ebb76SJohn Baldwin 	if (vi->flags & VI_SYSCTL_CTX) {
1239fe2ebb76SJohn Baldwin 		sysctl_ctx_free(&vi->ctx);
1240fe2ebb76SJohn Baldwin 		vi->flags &= ~VI_SYSCTL_CTX;
124154e4ee71SNavdeep Parhar 	}
124254e4ee71SNavdeep Parhar 
1243fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP
124462291463SNavdeep Parhar 	if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
1245fe2ebb76SJohn Baldwin 		for_each_nm_txq(vi, i, nm_txq) {
1246fe2ebb76SJohn Baldwin 			free_nm_txq(vi, nm_txq);
1247fe2ebb76SJohn Baldwin 		}
1248fe2ebb76SJohn Baldwin 
1249fe2ebb76SJohn Baldwin 		for_each_nm_rxq(vi, i, nm_rxq) {
1250fe2ebb76SJohn Baldwin 			free_nm_rxq(vi, nm_rxq);
1251fe2ebb76SJohn Baldwin 		}
1252fe2ebb76SJohn Baldwin 	}
1253fe2ebb76SJohn Baldwin #endif
1254fe2ebb76SJohn Baldwin 
1255733b9277SNavdeep Parhar 	/*
1256733b9277SNavdeep Parhar 	 * Take down all the tx queues first, as they reference the rx queues
1257733b9277SNavdeep Parhar 	 * (for egress updates, etc.).
1258733b9277SNavdeep Parhar 	 */
1259733b9277SNavdeep Parhar 
1260fe2ebb76SJohn Baldwin 	for_each_txq(vi, i, txq) {
1261fe2ebb76SJohn Baldwin 		free_txq(vi, txq);
126254e4ee71SNavdeep Parhar 	}
1263eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1264fe2ebb76SJohn Baldwin 	for_each_ofld_txq(vi, i, ofld_txq) {
1265733b9277SNavdeep Parhar 		free_wrq(sc, ofld_txq);
1266733b9277SNavdeep Parhar 	}
1267733b9277SNavdeep Parhar #endif
1268733b9277SNavdeep Parhar 
1269733b9277SNavdeep Parhar 	/*
1270f549e352SNavdeep Parhar 	 * Then take down the rx queues.
1271733b9277SNavdeep Parhar 	 */
1272733b9277SNavdeep Parhar 
1273fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
1274fe2ebb76SJohn Baldwin 		free_rxq(vi, rxq);
127554e4ee71SNavdeep Parhar 	}
127609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
1277fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
1278fe2ebb76SJohn Baldwin 		free_ofld_rxq(vi, ofld_rxq);
1279733b9277SNavdeep Parhar 	}
1280733b9277SNavdeep Parhar #endif
1281733b9277SNavdeep Parhar 
128254e4ee71SNavdeep Parhar 	return (0);
128354e4ee71SNavdeep Parhar }
128454e4ee71SNavdeep Parhar 
1285733b9277SNavdeep Parhar /*
12863098bcfcSNavdeep Parhar  * Interrupt handler when the driver is using only 1 interrupt.  This is a very
12873098bcfcSNavdeep Parhar  * unusual scenario.
12883098bcfcSNavdeep Parhar  *
12893098bcfcSNavdeep Parhar  * a) Deals with errors, if any.
12903098bcfcSNavdeep Parhar  * b) Services firmware event queue, which is taking interrupts for all other
12913098bcfcSNavdeep Parhar  *    queues.
1292733b9277SNavdeep Parhar  */
129354e4ee71SNavdeep Parhar void
129454e4ee71SNavdeep Parhar t4_intr_all(void *arg)
129554e4ee71SNavdeep Parhar {
129654e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
1297733b9277SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
129854e4ee71SNavdeep Parhar 
12993098bcfcSNavdeep Parhar 	MPASS(sc->intr_count == 1);
13003098bcfcSNavdeep Parhar 
13011dca7005SNavdeep Parhar 	if (sc->intr_type == INTR_INTX)
13021dca7005SNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
13031dca7005SNavdeep Parhar 
130454e4ee71SNavdeep Parhar 	t4_intr_err(arg);
13053098bcfcSNavdeep Parhar 	t4_intr_evt(fwq);
130654e4ee71SNavdeep Parhar }
130754e4ee71SNavdeep Parhar 
13083098bcfcSNavdeep Parhar /*
13093098bcfcSNavdeep Parhar  * Interrupt handler for errors (installed directly when multiple interrupts are
13103098bcfcSNavdeep Parhar  * being used, or called by t4_intr_all).
13113098bcfcSNavdeep Parhar  */
131254e4ee71SNavdeep Parhar void
131354e4ee71SNavdeep Parhar t4_intr_err(void *arg)
131454e4ee71SNavdeep Parhar {
131554e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
1316dd3b96ecSNavdeep Parhar 	uint32_t v;
1317cb7c3f12SNavdeep Parhar 	const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
131854e4ee71SNavdeep Parhar 
1319cb7c3f12SNavdeep Parhar 	if (sc->flags & ADAP_ERR)
1320cb7c3f12SNavdeep Parhar 		return;
1321cb7c3f12SNavdeep Parhar 
1322dd3b96ecSNavdeep Parhar 	v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE));
1323dd3b96ecSNavdeep Parhar 	if (v & F_PFSW) {
1324dd3b96ecSNavdeep Parhar 		sc->swintr++;
1325dd3b96ecSNavdeep Parhar 		t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v);
1326dd3b96ecSNavdeep Parhar 	}
1327dd3b96ecSNavdeep Parhar 
1328cb7c3f12SNavdeep Parhar 	t4_slow_intr_handler(sc, verbose);
132954e4ee71SNavdeep Parhar }
133054e4ee71SNavdeep Parhar 
13313098bcfcSNavdeep Parhar /*
13323098bcfcSNavdeep Parhar  * Interrupt handler for iq-only queues.  The firmware event queue is the only
13333098bcfcSNavdeep Parhar  * such queue right now.
13343098bcfcSNavdeep Parhar  */
133554e4ee71SNavdeep Parhar void
133654e4ee71SNavdeep Parhar t4_intr_evt(void *arg)
133754e4ee71SNavdeep Parhar {
133854e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
13392be67d29SNavdeep Parhar 
1340733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1341733b9277SNavdeep Parhar 		service_iq(iq, 0);
1342da6e3387SNavdeep Parhar 		(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
13432be67d29SNavdeep Parhar 	}
13442be67d29SNavdeep Parhar }
13452be67d29SNavdeep Parhar 
13463098bcfcSNavdeep Parhar /*
13473098bcfcSNavdeep Parhar  * Interrupt handler for iq+fl queues.
13483098bcfcSNavdeep Parhar  */
1349733b9277SNavdeep Parhar void
1350733b9277SNavdeep Parhar t4_intr(void *arg)
13512be67d29SNavdeep Parhar {
13522be67d29SNavdeep Parhar 	struct sge_iq *iq = arg;
1353733b9277SNavdeep Parhar 
1354733b9277SNavdeep Parhar 	if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
13553098bcfcSNavdeep Parhar 		service_iq_fl(iq, 0);
1356da6e3387SNavdeep Parhar 		(void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1357733b9277SNavdeep Parhar 	}
1358733b9277SNavdeep Parhar }
1359733b9277SNavdeep Parhar 
13603098bcfcSNavdeep Parhar #ifdef DEV_NETMAP
13613098bcfcSNavdeep Parhar /*
13623098bcfcSNavdeep Parhar  * Interrupt handler for netmap rx queues.
13633098bcfcSNavdeep Parhar  */
13643098bcfcSNavdeep Parhar void
13653098bcfcSNavdeep Parhar t4_nm_intr(void *arg)
13663098bcfcSNavdeep Parhar {
13673098bcfcSNavdeep Parhar 	struct sge_nm_rxq *nm_rxq = arg;
13683098bcfcSNavdeep Parhar 
13693098bcfcSNavdeep Parhar 	if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) {
13703098bcfcSNavdeep Parhar 		service_nm_rxq(nm_rxq);
1371da6e3387SNavdeep Parhar 		(void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON);
13723098bcfcSNavdeep Parhar 	}
13733098bcfcSNavdeep Parhar }
13743098bcfcSNavdeep Parhar 
13753098bcfcSNavdeep Parhar /*
13763098bcfcSNavdeep Parhar  * Interrupt handler for vectors shared between NIC and netmap rx queues.
13773098bcfcSNavdeep Parhar  */
137862291463SNavdeep Parhar void
137962291463SNavdeep Parhar t4_vi_intr(void *arg)
138062291463SNavdeep Parhar {
138162291463SNavdeep Parhar 	struct irq *irq = arg;
138262291463SNavdeep Parhar 
13833098bcfcSNavdeep Parhar 	MPASS(irq->nm_rxq != NULL);
138462291463SNavdeep Parhar 	t4_nm_intr(irq->nm_rxq);
13853098bcfcSNavdeep Parhar 
13863098bcfcSNavdeep Parhar 	MPASS(irq->rxq != NULL);
138762291463SNavdeep Parhar 	t4_intr(irq->rxq);
138862291463SNavdeep Parhar }
13893098bcfcSNavdeep Parhar #endif
139046f48ee5SNavdeep Parhar 
1391733b9277SNavdeep Parhar /*
13923098bcfcSNavdeep Parhar  * Deals with interrupts on an iq-only (no freelist) queue.
1393733b9277SNavdeep Parhar  */
1394733b9277SNavdeep Parhar static int
1395733b9277SNavdeep Parhar service_iq(struct sge_iq *iq, int budget)
1396733b9277SNavdeep Parhar {
1397733b9277SNavdeep Parhar 	struct sge_iq *q;
139854e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
1399b2daa9a9SNavdeep Parhar 	struct iq_desc *d = &iq->desc[iq->cidx];
14004d6db4e0SNavdeep Parhar 	int ndescs = 0, limit;
14013098bcfcSNavdeep Parhar 	int rsp_type;
1402733b9277SNavdeep Parhar 	uint32_t lq;
1403733b9277SNavdeep Parhar 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1404733b9277SNavdeep Parhar 
1405733b9277SNavdeep Parhar 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
14063098bcfcSNavdeep Parhar 	KASSERT((iq->flags & IQ_HAS_FL) == 0,
14073098bcfcSNavdeep Parhar 	    ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq,
14083098bcfcSNavdeep Parhar 	    iq->flags));
14093098bcfcSNavdeep Parhar 	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
14103098bcfcSNavdeep Parhar 	MPASS((iq->flags & IQ_LRO_ENABLED) == 0);
1411733b9277SNavdeep Parhar 
14124d6db4e0SNavdeep Parhar 	limit = budget ? budget : iq->qsize / 16;
14134d6db4e0SNavdeep Parhar 
1414733b9277SNavdeep Parhar 	/*
1415733b9277SNavdeep Parhar 	 * We always come back and check the descriptor ring for new indirect
1416733b9277SNavdeep Parhar 	 * interrupts and other responses after running a single handler.
1417733b9277SNavdeep Parhar 	 */
1418733b9277SNavdeep Parhar 	for (;;) {
1419b2daa9a9SNavdeep Parhar 		while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
142054e4ee71SNavdeep Parhar 
142154e4ee71SNavdeep Parhar 			rmb();
142254e4ee71SNavdeep Parhar 
1423b2daa9a9SNavdeep Parhar 			rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1424b2daa9a9SNavdeep Parhar 			lq = be32toh(d->rsp.pldbuflen_qid);
142554e4ee71SNavdeep Parhar 
1426733b9277SNavdeep Parhar 			switch (rsp_type) {
1427733b9277SNavdeep Parhar 			case X_RSPD_TYPE_FLBUF:
14283098bcfcSNavdeep Parhar 				panic("%s: data for an iq (%p) with no freelist",
14293098bcfcSNavdeep Parhar 				    __func__, iq);
143054e4ee71SNavdeep Parhar 
14313098bcfcSNavdeep Parhar 				/* NOTREACHED */
1432733b9277SNavdeep Parhar 
1433733b9277SNavdeep Parhar 			case X_RSPD_TYPE_CPL:
1434b2daa9a9SNavdeep Parhar 				KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1435733b9277SNavdeep Parhar 				    ("%s: bad opcode %02x.", __func__,
1436b2daa9a9SNavdeep Parhar 				    d->rss.opcode));
14373098bcfcSNavdeep Parhar 				t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL);
1438733b9277SNavdeep Parhar 				break;
1439733b9277SNavdeep Parhar 
1440733b9277SNavdeep Parhar 			case X_RSPD_TYPE_INTR:
144198005176SNavdeep Parhar 				/*
144298005176SNavdeep Parhar 				 * There are 1K interrupt-capable queues (qids 0
144398005176SNavdeep Parhar 				 * through 1023).  A response type indicating a
144498005176SNavdeep Parhar 				 * forwarded interrupt with a qid >= 1K is an
144598005176SNavdeep Parhar 				 * iWARP async notification.
144698005176SNavdeep Parhar 				 */
14473098bcfcSNavdeep Parhar 				if (__predict_true(lq >= 1024)) {
1448671bf2b8SNavdeep Parhar 					t4_an_handler(iq, &d->rsp);
144998005176SNavdeep Parhar 					break;
145098005176SNavdeep Parhar 				}
145198005176SNavdeep Parhar 
1452ec55567cSJohn Baldwin 				q = sc->sge.iqmap[lq - sc->sge.iq_start -
1453ec55567cSJohn Baldwin 				    sc->sge.iq_base];
1454733b9277SNavdeep Parhar 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
1455733b9277SNavdeep Parhar 				    IQS_BUSY)) {
14563098bcfcSNavdeep Parhar 					if (service_iq_fl(q, q->qsize / 16) == 0) {
1457da6e3387SNavdeep Parhar 						(void) atomic_cmpset_int(&q->state,
1458733b9277SNavdeep Parhar 						    IQS_BUSY, IQS_IDLE);
1459733b9277SNavdeep Parhar 					} else {
1460733b9277SNavdeep Parhar 						STAILQ_INSERT_TAIL(&iql, q,
1461733b9277SNavdeep Parhar 						    link);
1462733b9277SNavdeep Parhar 					}
1463733b9277SNavdeep Parhar 				}
1464733b9277SNavdeep Parhar 				break;
1465733b9277SNavdeep Parhar 
1466733b9277SNavdeep Parhar 			default:
146798005176SNavdeep Parhar 				KASSERT(0,
146898005176SNavdeep Parhar 				    ("%s: illegal response type %d on iq %p",
146998005176SNavdeep Parhar 				    __func__, rsp_type, iq));
147098005176SNavdeep Parhar 				log(LOG_ERR,
147198005176SNavdeep Parhar 				    "%s: illegal response type %d on iq %p",
147298005176SNavdeep Parhar 				    device_get_nameunit(sc->dev), rsp_type, iq);
147309fe6320SNavdeep Parhar 				break;
147454e4ee71SNavdeep Parhar 			}
147556599263SNavdeep Parhar 
1476b2daa9a9SNavdeep Parhar 			d++;
1477b2daa9a9SNavdeep Parhar 			if (__predict_false(++iq->cidx == iq->sidx)) {
1478b2daa9a9SNavdeep Parhar 				iq->cidx = 0;
1479b2daa9a9SNavdeep Parhar 				iq->gen ^= F_RSPD_GEN;
1480b2daa9a9SNavdeep Parhar 				d = &iq->desc[0];
1481b2daa9a9SNavdeep Parhar 			}
1482b2daa9a9SNavdeep Parhar 			if (__predict_false(++ndescs == limit)) {
1483315048f2SJohn Baldwin 				t4_write_reg(sc, sc->sge_gts_reg,
1484733b9277SNavdeep Parhar 				    V_CIDXINC(ndescs) |
1485733b9277SNavdeep Parhar 				    V_INGRESSQID(iq->cntxt_id) |
1486733b9277SNavdeep Parhar 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1487733b9277SNavdeep Parhar 				ndescs = 0;
1488733b9277SNavdeep Parhar 
14893098bcfcSNavdeep Parhar 				if (budget) {
14903098bcfcSNavdeep Parhar 					return (EINPROGRESS);
14913098bcfcSNavdeep Parhar 				}
14923098bcfcSNavdeep Parhar 			}
14933098bcfcSNavdeep Parhar 		}
14943098bcfcSNavdeep Parhar 
14953098bcfcSNavdeep Parhar 		if (STAILQ_EMPTY(&iql))
14963098bcfcSNavdeep Parhar 			break;
14973098bcfcSNavdeep Parhar 
14983098bcfcSNavdeep Parhar 		/*
14993098bcfcSNavdeep Parhar 		 * Process the head only, and send it to the back of the list if
15003098bcfcSNavdeep Parhar 		 * it's still not done.
15013098bcfcSNavdeep Parhar 		 */
15023098bcfcSNavdeep Parhar 		q = STAILQ_FIRST(&iql);
15033098bcfcSNavdeep Parhar 		STAILQ_REMOVE_HEAD(&iql, link);
15043098bcfcSNavdeep Parhar 		if (service_iq_fl(q, q->qsize / 8) == 0)
1505da6e3387SNavdeep Parhar 			(void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
15063098bcfcSNavdeep Parhar 		else
15073098bcfcSNavdeep Parhar 			STAILQ_INSERT_TAIL(&iql, q, link);
15083098bcfcSNavdeep Parhar 	}
15093098bcfcSNavdeep Parhar 
15103098bcfcSNavdeep Parhar 	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
15113098bcfcSNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
15123098bcfcSNavdeep Parhar 
15133098bcfcSNavdeep Parhar 	return (0);
15143098bcfcSNavdeep Parhar }
15153098bcfcSNavdeep Parhar 
15163098bcfcSNavdeep Parhar static inline int
15173098bcfcSNavdeep Parhar sort_before_lro(struct lro_ctrl *lro)
15183098bcfcSNavdeep Parhar {
15193098bcfcSNavdeep Parhar 
15203098bcfcSNavdeep Parhar 	return (lro->lro_mbuf_max != 0);
15213098bcfcSNavdeep Parhar }
15223098bcfcSNavdeep Parhar 
1523e7e08444SNavdeep Parhar static inline uint64_t
1524e7e08444SNavdeep Parhar last_flit_to_ns(struct adapter *sc, uint64_t lf)
1525e7e08444SNavdeep Parhar {
1526e7e08444SNavdeep Parhar 	uint64_t n = be64toh(lf) & 0xfffffffffffffff;	/* 60b, not 64b. */
1527e7e08444SNavdeep Parhar 
1528e7e08444SNavdeep Parhar 	if (n > UINT64_MAX / 1000000)
1529e7e08444SNavdeep Parhar 		return (n / sc->params.vpd.cclk * 1000000);
1530e7e08444SNavdeep Parhar 	else
1531e7e08444SNavdeep Parhar 		return (n * 1000000 / sc->params.vpd.cclk);
1532e7e08444SNavdeep Parhar }
1533e7e08444SNavdeep Parhar 
153446e1e307SNavdeep Parhar static inline void
153546e1e307SNavdeep Parhar move_to_next_rxbuf(struct sge_fl *fl)
153646e1e307SNavdeep Parhar {
153746e1e307SNavdeep Parhar 
153846e1e307SNavdeep Parhar 	fl->rx_offset = 0;
153946e1e307SNavdeep Parhar 	if (__predict_false((++fl->cidx & 7) == 0)) {
154046e1e307SNavdeep Parhar 		uint16_t cidx = fl->cidx >> 3;
154146e1e307SNavdeep Parhar 
154246e1e307SNavdeep Parhar 		if (__predict_false(cidx == fl->sidx))
154346e1e307SNavdeep Parhar 			fl->cidx = cidx = 0;
154446e1e307SNavdeep Parhar 		fl->hw_cidx = cidx;
154546e1e307SNavdeep Parhar 	}
154646e1e307SNavdeep Parhar }
154746e1e307SNavdeep Parhar 
15483098bcfcSNavdeep Parhar /*
15493098bcfcSNavdeep Parhar  * Deals with interrupts on an iq+fl queue.
15503098bcfcSNavdeep Parhar  */
15513098bcfcSNavdeep Parhar static int
15523098bcfcSNavdeep Parhar service_iq_fl(struct sge_iq *iq, int budget)
15533098bcfcSNavdeep Parhar {
15543098bcfcSNavdeep Parhar 	struct sge_rxq *rxq = iq_to_rxq(iq);
15553098bcfcSNavdeep Parhar 	struct sge_fl *fl;
15563098bcfcSNavdeep Parhar 	struct adapter *sc = iq->adapter;
15573098bcfcSNavdeep Parhar 	struct iq_desc *d = &iq->desc[iq->cidx];
155846e1e307SNavdeep Parhar 	int ndescs, limit;
155946e1e307SNavdeep Parhar 	int rsp_type, starved;
15603098bcfcSNavdeep Parhar 	uint32_t lq;
15613098bcfcSNavdeep Parhar 	uint16_t fl_hw_cidx;
15623098bcfcSNavdeep Parhar 	struct mbuf *m0;
15633098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6)
15643098bcfcSNavdeep Parhar 	const struct timeval lro_timeout = {0, sc->lro_timeout};
15653098bcfcSNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
15663098bcfcSNavdeep Parhar #endif
15673098bcfcSNavdeep Parhar 
15683098bcfcSNavdeep Parhar 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
15693098bcfcSNavdeep Parhar 	MPASS(iq->flags & IQ_HAS_FL);
15703098bcfcSNavdeep Parhar 
157146e1e307SNavdeep Parhar 	ndescs = 0;
15723098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6)
15733098bcfcSNavdeep Parhar 	if (iq->flags & IQ_ADJ_CREDIT) {
15743098bcfcSNavdeep Parhar 		MPASS(sort_before_lro(lro));
15753098bcfcSNavdeep Parhar 		iq->flags &= ~IQ_ADJ_CREDIT;
15763098bcfcSNavdeep Parhar 		if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
15773098bcfcSNavdeep Parhar 			tcp_lro_flush_all(lro);
15783098bcfcSNavdeep Parhar 			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
15793098bcfcSNavdeep Parhar 			    V_INGRESSQID((u32)iq->cntxt_id) |
15803098bcfcSNavdeep Parhar 			    V_SEINTARM(iq->intr_params));
15813098bcfcSNavdeep Parhar 			return (0);
15823098bcfcSNavdeep Parhar 		}
15833098bcfcSNavdeep Parhar 		ndescs = 1;
15843098bcfcSNavdeep Parhar 	}
15853098bcfcSNavdeep Parhar #else
15863098bcfcSNavdeep Parhar 	MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
15873098bcfcSNavdeep Parhar #endif
15883098bcfcSNavdeep Parhar 
158946e1e307SNavdeep Parhar 	limit = budget ? budget : iq->qsize / 16;
159046e1e307SNavdeep Parhar 	fl = &rxq->fl;
159146e1e307SNavdeep Parhar 	fl_hw_cidx = fl->hw_cidx;	/* stable snapshot */
15923098bcfcSNavdeep Parhar 	while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
15933098bcfcSNavdeep Parhar 
15943098bcfcSNavdeep Parhar 		rmb();
15953098bcfcSNavdeep Parhar 
15963098bcfcSNavdeep Parhar 		m0 = NULL;
15973098bcfcSNavdeep Parhar 		rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
15983098bcfcSNavdeep Parhar 		lq = be32toh(d->rsp.pldbuflen_qid);
15993098bcfcSNavdeep Parhar 
16003098bcfcSNavdeep Parhar 		switch (rsp_type) {
16013098bcfcSNavdeep Parhar 		case X_RSPD_TYPE_FLBUF:
160246e1e307SNavdeep Parhar 			if (lq & F_RSPD_NEWBUF) {
160346e1e307SNavdeep Parhar 				if (fl->rx_offset > 0)
160446e1e307SNavdeep Parhar 					move_to_next_rxbuf(fl);
160546e1e307SNavdeep Parhar 				lq = G_RSPD_LEN(lq);
160646e1e307SNavdeep Parhar 			}
160746e1e307SNavdeep Parhar 			if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) {
160846e1e307SNavdeep Parhar 				FL_LOCK(fl);
160946e1e307SNavdeep Parhar 				refill_fl(sc, fl, 64);
161046e1e307SNavdeep Parhar 				FL_UNLOCK(fl);
161146e1e307SNavdeep Parhar 				fl_hw_cidx = fl->hw_cidx;
161246e1e307SNavdeep Parhar 			}
16133098bcfcSNavdeep Parhar 
16141486d2deSNavdeep Parhar 			if (d->rss.opcode == CPL_RX_PKT) {
16151486d2deSNavdeep Parhar 				if (__predict_true(eth_rx(sc, rxq, d, lq) == 0))
16161486d2deSNavdeep Parhar 					break;
16171486d2deSNavdeep Parhar 				goto out;
16181486d2deSNavdeep Parhar 			}
16193098bcfcSNavdeep Parhar 			m0 = get_fl_payload(sc, fl, lq);
16203098bcfcSNavdeep Parhar 			if (__predict_false(m0 == NULL))
16213098bcfcSNavdeep Parhar 				goto out;
1622e7e08444SNavdeep Parhar 
16233098bcfcSNavdeep Parhar 			/* fall through */
16243098bcfcSNavdeep Parhar 
16253098bcfcSNavdeep Parhar 		case X_RSPD_TYPE_CPL:
16263098bcfcSNavdeep Parhar 			KASSERT(d->rss.opcode < NUM_CPL_CMDS,
16273098bcfcSNavdeep Parhar 			    ("%s: bad opcode %02x.", __func__, d->rss.opcode));
16283098bcfcSNavdeep Parhar 			t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
16293098bcfcSNavdeep Parhar 			break;
16303098bcfcSNavdeep Parhar 
16313098bcfcSNavdeep Parhar 		case X_RSPD_TYPE_INTR:
16323098bcfcSNavdeep Parhar 
16333098bcfcSNavdeep Parhar 			/*
16343098bcfcSNavdeep Parhar 			 * There are 1K interrupt-capable queues (qids 0
16353098bcfcSNavdeep Parhar 			 * through 1023).  A response type indicating a
16363098bcfcSNavdeep Parhar 			 * forwarded interrupt with a qid >= 1K is an
16373098bcfcSNavdeep Parhar 			 * iWARP async notification.  That is the only
16383098bcfcSNavdeep Parhar 			 * acceptable indirect interrupt on this queue.
16393098bcfcSNavdeep Parhar 			 */
16403098bcfcSNavdeep Parhar 			if (__predict_false(lq < 1024)) {
16413098bcfcSNavdeep Parhar 				panic("%s: indirect interrupt on iq_fl %p "
16423098bcfcSNavdeep Parhar 				    "with qid %u", __func__, iq, lq);
16433098bcfcSNavdeep Parhar 			}
16443098bcfcSNavdeep Parhar 
16453098bcfcSNavdeep Parhar 			t4_an_handler(iq, &d->rsp);
16463098bcfcSNavdeep Parhar 			break;
16473098bcfcSNavdeep Parhar 
16483098bcfcSNavdeep Parhar 		default:
16493098bcfcSNavdeep Parhar 			KASSERT(0, ("%s: illegal response type %d on iq %p",
16503098bcfcSNavdeep Parhar 			    __func__, rsp_type, iq));
16513098bcfcSNavdeep Parhar 			log(LOG_ERR, "%s: illegal response type %d on iq %p",
16523098bcfcSNavdeep Parhar 			    device_get_nameunit(sc->dev), rsp_type, iq);
16533098bcfcSNavdeep Parhar 			break;
16543098bcfcSNavdeep Parhar 		}
16553098bcfcSNavdeep Parhar 
16563098bcfcSNavdeep Parhar 		d++;
16573098bcfcSNavdeep Parhar 		if (__predict_false(++iq->cidx == iq->sidx)) {
16583098bcfcSNavdeep Parhar 			iq->cidx = 0;
16593098bcfcSNavdeep Parhar 			iq->gen ^= F_RSPD_GEN;
16603098bcfcSNavdeep Parhar 			d = &iq->desc[0];
16613098bcfcSNavdeep Parhar 		}
16623098bcfcSNavdeep Parhar 		if (__predict_false(++ndescs == limit)) {
16633098bcfcSNavdeep Parhar 			t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
16643098bcfcSNavdeep Parhar 			    V_INGRESSQID(iq->cntxt_id) |
16653098bcfcSNavdeep Parhar 			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
16663098bcfcSNavdeep Parhar 
1667480e603cSNavdeep Parhar #if defined(INET) || defined(INET6)
1668480e603cSNavdeep Parhar 			if (iq->flags & IQ_LRO_ENABLED &&
166946f48ee5SNavdeep Parhar 			    !sort_before_lro(lro) &&
1670480e603cSNavdeep Parhar 			    sc->lro_timeout != 0) {
16713098bcfcSNavdeep Parhar 				tcp_lro_flush_inactive(lro, &lro_timeout);
1672480e603cSNavdeep Parhar 			}
1673480e603cSNavdeep Parhar #endif
167446e1e307SNavdeep Parhar 			if (budget)
1675733b9277SNavdeep Parhar 				return (EINPROGRESS);
167646e1e307SNavdeep Parhar 			ndescs = 0;
16774d6db4e0SNavdeep Parhar 		}
1678861e42b2SNavdeep Parhar 	}
16793098bcfcSNavdeep Parhar out:
1680a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1681733b9277SNavdeep Parhar 	if (iq->flags & IQ_LRO_ENABLED) {
168246f48ee5SNavdeep Parhar 		if (ndescs > 0 && lro->lro_mbuf_count > 8) {
168346f48ee5SNavdeep Parhar 			MPASS(sort_before_lro(lro));
168446f48ee5SNavdeep Parhar 			/* hold back one credit and don't flush LRO state */
168546f48ee5SNavdeep Parhar 			iq->flags |= IQ_ADJ_CREDIT;
168646f48ee5SNavdeep Parhar 			ndescs--;
168746f48ee5SNavdeep Parhar 		} else {
16886dd38b87SSepherosa Ziehau 			tcp_lro_flush_all(lro);
1689733b9277SNavdeep Parhar 		}
169046f48ee5SNavdeep Parhar 	}
1691733b9277SNavdeep Parhar #endif
1692733b9277SNavdeep Parhar 
1693315048f2SJohn Baldwin 	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1694733b9277SNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1695733b9277SNavdeep Parhar 
1696733b9277SNavdeep Parhar 	FL_LOCK(fl);
169738035ed6SNavdeep Parhar 	starved = refill_fl(sc, fl, 64);
1698733b9277SNavdeep Parhar 	FL_UNLOCK(fl);
1699733b9277SNavdeep Parhar 	if (__predict_false(starved != 0))
1700733b9277SNavdeep Parhar 		add_fl_to_sfl(sc, fl);
1701733b9277SNavdeep Parhar 
1702733b9277SNavdeep Parhar 	return (0);
1703733b9277SNavdeep Parhar }
1704733b9277SNavdeep Parhar 
170538035ed6SNavdeep Parhar static inline struct cluster_metadata *
170646e1e307SNavdeep Parhar cl_metadata(struct fl_sdesc *sd)
17071458bff9SNavdeep Parhar {
17081458bff9SNavdeep Parhar 
170946e1e307SNavdeep Parhar 	return ((void *)(sd->cl + sd->moff));
17101458bff9SNavdeep Parhar }
17111458bff9SNavdeep Parhar 
171215c28f87SGleb Smirnoff static void
1713e8fd18f3SGleb Smirnoff rxb_free(struct mbuf *m)
17141458bff9SNavdeep Parhar {
1715d6f79b27SNavdeep Parhar 	struct cluster_metadata *clm = m->m_ext.ext_arg1;
17161458bff9SNavdeep Parhar 
1717d6f79b27SNavdeep Parhar 	uma_zfree(clm->zone, clm->cl);
171882eff304SNavdeep Parhar 	counter_u64_add(extfree_rels, 1);
17191458bff9SNavdeep Parhar }
17201458bff9SNavdeep Parhar 
172138035ed6SNavdeep Parhar /*
172246e1e307SNavdeep Parhar  * The mbuf returned comes from zone_muf and carries the payload in one of these
172346e1e307SNavdeep Parhar  * ways
172446e1e307SNavdeep Parhar  * a) complete frame inside the mbuf
172546e1e307SNavdeep Parhar  * b) m_cljset (for clusters without metadata)
172646e1e307SNavdeep Parhar  * d) m_extaddref (cluster with metadata)
172738035ed6SNavdeep Parhar  */
17281458bff9SNavdeep Parhar static struct mbuf *
1729b741402cSNavdeep Parhar get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1730b741402cSNavdeep Parhar     int remaining)
173138035ed6SNavdeep Parhar {
173238035ed6SNavdeep Parhar 	struct mbuf *m;
173338035ed6SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
173446e1e307SNavdeep Parhar 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
173546e1e307SNavdeep Parhar 	struct cluster_metadata *clm;
1736b741402cSNavdeep Parhar 	int len, blen;
173738035ed6SNavdeep Parhar 	caddr_t payload;
173838035ed6SNavdeep Parhar 
1739e3207e19SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
174046e1e307SNavdeep Parhar 		u_int l, pad;
1741b741402cSNavdeep Parhar 
174246e1e307SNavdeep Parhar 		blen = rxb->size2 - fl->rx_offset;	/* max possible in this buf */
174346e1e307SNavdeep Parhar 		len = min(remaining, blen);
174446e1e307SNavdeep Parhar 		payload = sd->cl + fl->rx_offset;
174546e1e307SNavdeep Parhar 
174646e1e307SNavdeep Parhar 		l = fr_offset + len;
174746e1e307SNavdeep Parhar 		pad = roundup2(l, fl->buf_boundary) - l;
174846e1e307SNavdeep Parhar 		if (fl->rx_offset + len + pad < rxb->size2)
1749b741402cSNavdeep Parhar 			blen = len + pad;
175046e1e307SNavdeep Parhar 		MPASS(fl->rx_offset + blen <= rxb->size2);
1751e3207e19SNavdeep Parhar 	} else {
1752e3207e19SNavdeep Parhar 		MPASS(fl->rx_offset == 0);	/* not packing */
175346e1e307SNavdeep Parhar 		blen = rxb->size1;
175446e1e307SNavdeep Parhar 		len = min(remaining, blen);
175546e1e307SNavdeep Parhar 		payload = sd->cl;
1756e3207e19SNavdeep Parhar 	}
175738035ed6SNavdeep Parhar 
175846e1e307SNavdeep Parhar 	if (fr_offset == 0) {
175946e1e307SNavdeep Parhar 		m = m_gethdr(M_NOWAIT, MT_DATA);
176046e1e307SNavdeep Parhar 		if (__predict_false(m == NULL))
176146e1e307SNavdeep Parhar 			return (NULL);
176246e1e307SNavdeep Parhar 		m->m_pkthdr.len = remaining;
176346e1e307SNavdeep Parhar 	} else {
176446e1e307SNavdeep Parhar 		m = m_get(M_NOWAIT, MT_DATA);
176546e1e307SNavdeep Parhar 		if (__predict_false(m == NULL))
176646e1e307SNavdeep Parhar 			return (NULL);
176746e1e307SNavdeep Parhar 	}
176846e1e307SNavdeep Parhar 	m->m_len = len;
1769b741402cSNavdeep Parhar 
177038035ed6SNavdeep Parhar 	if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
177138035ed6SNavdeep Parhar 		/* copy data to mbuf */
177238035ed6SNavdeep Parhar 		bcopy(payload, mtod(m, caddr_t), len);
177346e1e307SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
177446e1e307SNavdeep Parhar 			fl->rx_offset += blen;
177546e1e307SNavdeep Parhar 			MPASS(fl->rx_offset <= rxb->size2);
177646e1e307SNavdeep Parhar 			if (fl->rx_offset < rxb->size2)
177746e1e307SNavdeep Parhar 				return (m);	/* without advancing the cidx */
177846e1e307SNavdeep Parhar 		}
177946e1e307SNavdeep Parhar 	} else if (fl->flags & FL_BUF_PACKING) {
178046e1e307SNavdeep Parhar 		clm = cl_metadata(sd);
1781a9c4062aSNavdeep Parhar 		if (sd->nmbuf++ == 0) {
1782a9c4062aSNavdeep Parhar 			clm->refcount = 1;
178346e1e307SNavdeep Parhar 			clm->zone = rxb->zone;
1784d6f79b27SNavdeep Parhar 			clm->cl = sd->cl;
1785a9c4062aSNavdeep Parhar 			counter_u64_add(extfree_refs, 1);
1786a9c4062aSNavdeep Parhar 		}
1787d6f79b27SNavdeep Parhar 		m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm,
1788d6f79b27SNavdeep Parhar 		    NULL);
178938035ed6SNavdeep Parhar 
179046e1e307SNavdeep Parhar 		fl->rx_offset += blen;
179146e1e307SNavdeep Parhar 		MPASS(fl->rx_offset <= rxb->size2);
179246e1e307SNavdeep Parhar 		if (fl->rx_offset < rxb->size2)
179346e1e307SNavdeep Parhar 			return (m);	/* without advancing the cidx */
1794ccc69b2fSNavdeep Parhar 	} else {
179546e1e307SNavdeep Parhar 		m_cljset(m, sd->cl, rxb->type);
179638035ed6SNavdeep Parhar 		sd->cl = NULL;	/* consumed, not a recycle candidate */
179738035ed6SNavdeep Parhar 	}
179838035ed6SNavdeep Parhar 
179946e1e307SNavdeep Parhar 	move_to_next_rxbuf(fl);
180038035ed6SNavdeep Parhar 
180138035ed6SNavdeep Parhar 	return (m);
180238035ed6SNavdeep Parhar }
180338035ed6SNavdeep Parhar 
180438035ed6SNavdeep Parhar static struct mbuf *
180546e1e307SNavdeep Parhar get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen)
18061458bff9SNavdeep Parhar {
180738035ed6SNavdeep Parhar 	struct mbuf *m0, *m, **pnext;
1808b741402cSNavdeep Parhar 	u_int remaining;
18091458bff9SNavdeep Parhar 
18104d6db4e0SNavdeep Parhar 	if (__predict_false(fl->flags & FL_BUF_RESUME)) {
1811368541baSNavdeep Parhar 		M_ASSERTPKTHDR(fl->m0);
181246e1e307SNavdeep Parhar 		MPASS(fl->m0->m_pkthdr.len == plen);
181346e1e307SNavdeep Parhar 		MPASS(fl->remaining < plen);
18141458bff9SNavdeep Parhar 
181538035ed6SNavdeep Parhar 		m0 = fl->m0;
181638035ed6SNavdeep Parhar 		pnext = fl->pnext;
1817b741402cSNavdeep Parhar 		remaining = fl->remaining;
18184d6db4e0SNavdeep Parhar 		fl->flags &= ~FL_BUF_RESUME;
181938035ed6SNavdeep Parhar 		goto get_segment;
18201458bff9SNavdeep Parhar 	}
18211458bff9SNavdeep Parhar 
18221458bff9SNavdeep Parhar 	/*
182338035ed6SNavdeep Parhar 	 * Payload starts at rx_offset in the current hw buffer.  Its length is
182438035ed6SNavdeep Parhar 	 * 'len' and it may span multiple hw buffers.
18251458bff9SNavdeep Parhar 	 */
18261458bff9SNavdeep Parhar 
182746e1e307SNavdeep Parhar 	m0 = get_scatter_segment(sc, fl, 0, plen);
1828368541baSNavdeep Parhar 	if (m0 == NULL)
18294d6db4e0SNavdeep Parhar 		return (NULL);
183046e1e307SNavdeep Parhar 	remaining = plen - m0->m_len;
183138035ed6SNavdeep Parhar 	pnext = &m0->m_next;
1832b741402cSNavdeep Parhar 	while (remaining > 0) {
183338035ed6SNavdeep Parhar get_segment:
183438035ed6SNavdeep Parhar 		MPASS(fl->rx_offset == 0);
183546e1e307SNavdeep Parhar 		m = get_scatter_segment(sc, fl, plen - remaining, remaining);
18364d6db4e0SNavdeep Parhar 		if (__predict_false(m == NULL)) {
183738035ed6SNavdeep Parhar 			fl->m0 = m0;
183838035ed6SNavdeep Parhar 			fl->pnext = pnext;
1839b741402cSNavdeep Parhar 			fl->remaining = remaining;
18404d6db4e0SNavdeep Parhar 			fl->flags |= FL_BUF_RESUME;
18414d6db4e0SNavdeep Parhar 			return (NULL);
18421458bff9SNavdeep Parhar 		}
184338035ed6SNavdeep Parhar 		*pnext = m;
184438035ed6SNavdeep Parhar 		pnext = &m->m_next;
1845b741402cSNavdeep Parhar 		remaining -= m->m_len;
1846733b9277SNavdeep Parhar 	}
184738035ed6SNavdeep Parhar 	*pnext = NULL;
18484d6db4e0SNavdeep Parhar 
1849dbbf46c4SNavdeep Parhar 	M_ASSERTPKTHDR(m0);
1850733b9277SNavdeep Parhar 	return (m0);
1851733b9277SNavdeep Parhar }
1852733b9277SNavdeep Parhar 
1853733b9277SNavdeep Parhar static int
185487bbb333SNavdeep Parhar skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
185587bbb333SNavdeep Parhar     int remaining)
185687bbb333SNavdeep Parhar {
185787bbb333SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
185887bbb333SNavdeep Parhar 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
185987bbb333SNavdeep Parhar 	int len, blen;
186087bbb333SNavdeep Parhar 
186187bbb333SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
186287bbb333SNavdeep Parhar 		u_int l, pad;
186387bbb333SNavdeep Parhar 
186487bbb333SNavdeep Parhar 		blen = rxb->size2 - fl->rx_offset;	/* max possible in this buf */
186587bbb333SNavdeep Parhar 		len = min(remaining, blen);
186687bbb333SNavdeep Parhar 
186787bbb333SNavdeep Parhar 		l = fr_offset + len;
186887bbb333SNavdeep Parhar 		pad = roundup2(l, fl->buf_boundary) - l;
186987bbb333SNavdeep Parhar 		if (fl->rx_offset + len + pad < rxb->size2)
187087bbb333SNavdeep Parhar 			blen = len + pad;
187187bbb333SNavdeep Parhar 		fl->rx_offset += blen;
187287bbb333SNavdeep Parhar 		MPASS(fl->rx_offset <= rxb->size2);
187387bbb333SNavdeep Parhar 		if (fl->rx_offset < rxb->size2)
187487bbb333SNavdeep Parhar 			return (len);	/* without advancing the cidx */
187587bbb333SNavdeep Parhar 	} else {
187687bbb333SNavdeep Parhar 		MPASS(fl->rx_offset == 0);	/* not packing */
187787bbb333SNavdeep Parhar 		blen = rxb->size1;
187887bbb333SNavdeep Parhar 		len = min(remaining, blen);
187987bbb333SNavdeep Parhar 	}
188087bbb333SNavdeep Parhar 	move_to_next_rxbuf(fl);
188187bbb333SNavdeep Parhar 	return (len);
188287bbb333SNavdeep Parhar }
188387bbb333SNavdeep Parhar 
188487bbb333SNavdeep Parhar static inline void
188587bbb333SNavdeep Parhar skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen)
188687bbb333SNavdeep Parhar {
188787bbb333SNavdeep Parhar 	int remaining, fr_offset, len;
188887bbb333SNavdeep Parhar 
188987bbb333SNavdeep Parhar 	fr_offset = 0;
189087bbb333SNavdeep Parhar 	remaining = plen;
189187bbb333SNavdeep Parhar 	while (remaining > 0) {
189287bbb333SNavdeep Parhar 		len = skip_scatter_segment(sc, fl, fr_offset, remaining);
189387bbb333SNavdeep Parhar 		fr_offset += len;
189487bbb333SNavdeep Parhar 		remaining -= len;
189587bbb333SNavdeep Parhar 	}
189687bbb333SNavdeep Parhar }
189787bbb333SNavdeep Parhar 
189887bbb333SNavdeep Parhar static inline int
189987bbb333SNavdeep Parhar get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen)
190087bbb333SNavdeep Parhar {
190187bbb333SNavdeep Parhar 	int len;
190287bbb333SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
190387bbb333SNavdeep Parhar 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
190487bbb333SNavdeep Parhar 
190587bbb333SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING)
190687bbb333SNavdeep Parhar 		len = rxb->size2 - fl->rx_offset;
190787bbb333SNavdeep Parhar 	else
190887bbb333SNavdeep Parhar 		len = rxb->size1;
190987bbb333SNavdeep Parhar 
191087bbb333SNavdeep Parhar 	return (min(plen, len));
191187bbb333SNavdeep Parhar }
191287bbb333SNavdeep Parhar 
191387bbb333SNavdeep Parhar static int
19141486d2deSNavdeep Parhar eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
19151486d2deSNavdeep Parhar     u_int plen)
1916733b9277SNavdeep Parhar {
19171486d2deSNavdeep Parhar 	struct mbuf *m0;
1918733b9277SNavdeep Parhar 	struct ifnet *ifp = rxq->ifp;
19191486d2deSNavdeep Parhar 	struct sge_fl *fl = &rxq->fl;
192087bbb333SNavdeep Parhar 	struct vi_info *vi = ifp->if_softc;
19211486d2deSNavdeep Parhar 	const struct cpl_rx_pkt *cpl;
1922a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
1923733b9277SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
1924733b9277SNavdeep Parhar #endif
192570ca6229SNavdeep Parhar 	static const int sw_hashtype[4][2] = {
192670ca6229SNavdeep Parhar 		{M_HASHTYPE_NONE, M_HASHTYPE_NONE},
192770ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
192870ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
192970ca6229SNavdeep Parhar 		{M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
193070ca6229SNavdeep Parhar 	};
1931733b9277SNavdeep Parhar 
19321486d2deSNavdeep Parhar 	MPASS(plen > sc->params.sge.fl_pktshift);
193387bbb333SNavdeep Parhar 	if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) &&
193487bbb333SNavdeep Parhar 	    __predict_true((fl->flags & FL_BUF_RESUME) == 0)) {
193587bbb333SNavdeep Parhar 		struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
193687bbb333SNavdeep Parhar 		caddr_t frame;
193787bbb333SNavdeep Parhar 		int rc, slen;
193887bbb333SNavdeep Parhar 
193987bbb333SNavdeep Parhar 		slen = get_segment_len(sc, fl, plen) -
194087bbb333SNavdeep Parhar 		    sc->params.sge.fl_pktshift;
194187bbb333SNavdeep Parhar 		frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift;
194287bbb333SNavdeep Parhar 		CURVNET_SET_QUIET(ifp->if_vnet);
194387bbb333SNavdeep Parhar 		rc = pfil_run_hooks(vi->pfil, frame, ifp,
194487bbb333SNavdeep Parhar 		    slen | PFIL_MEMPTR | PFIL_IN, NULL);
194587bbb333SNavdeep Parhar 		CURVNET_RESTORE();
194687bbb333SNavdeep Parhar 		if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) {
194787bbb333SNavdeep Parhar 			skip_fl_payload(sc, fl, plen);
194887bbb333SNavdeep Parhar 			return (0);
194987bbb333SNavdeep Parhar 		}
195087bbb333SNavdeep Parhar 		if (rc == PFIL_REALLOCED) {
195187bbb333SNavdeep Parhar 			skip_fl_payload(sc, fl, plen);
195287bbb333SNavdeep Parhar 			m0 = pfil_mem2mbuf(frame);
195387bbb333SNavdeep Parhar 			goto have_mbuf;
195487bbb333SNavdeep Parhar 		}
195587bbb333SNavdeep Parhar 	}
195687bbb333SNavdeep Parhar 
19571486d2deSNavdeep Parhar 	m0 = get_fl_payload(sc, fl, plen);
19581486d2deSNavdeep Parhar 	if (__predict_false(m0 == NULL))
19591486d2deSNavdeep Parhar 		return (ENOMEM);
1960733b9277SNavdeep Parhar 
196190e7434aSNavdeep Parhar 	m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
196290e7434aSNavdeep Parhar 	m0->m_len -= sc->params.sge.fl_pktshift;
196390e7434aSNavdeep Parhar 	m0->m_data += sc->params.sge.fl_pktshift;
196454e4ee71SNavdeep Parhar 
196587bbb333SNavdeep Parhar have_mbuf:
196654e4ee71SNavdeep Parhar 	m0->m_pkthdr.rcvif = ifp;
19671486d2deSNavdeep Parhar 	M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]);
19681486d2deSNavdeep Parhar 	m0->m_pkthdr.flowid = be32toh(d->rss.hash_val);
196954e4ee71SNavdeep Parhar 
19701486d2deSNavdeep Parhar 	cpl = (const void *)(&d->rss + 1);
19711de8c69dSNavdeep Parhar 	if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) {
19729600bf00SNavdeep Parhar 		if (ifp->if_capenable & IFCAP_RXCSUM &&
19739600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP)) {
1974932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
197554e4ee71SNavdeep Parhar 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
19769600bf00SNavdeep Parhar 			rxq->rxcsum++;
19779600bf00SNavdeep Parhar 		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
19789600bf00SNavdeep Parhar 		    cpl->l2info & htobe32(F_RXF_IP6)) {
1979932b1a5fSNavdeep Parhar 			m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
19809600bf00SNavdeep Parhar 			    CSUM_PSEUDO_HDR);
19819600bf00SNavdeep Parhar 			rxq->rxcsum++;
19829600bf00SNavdeep Parhar 		}
19839600bf00SNavdeep Parhar 
19849600bf00SNavdeep Parhar 		if (__predict_false(cpl->ip_frag))
198554e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = be16toh(cpl->csum);
198654e4ee71SNavdeep Parhar 		else
198754e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_data = 0xffff;
198854e4ee71SNavdeep Parhar 	}
198954e4ee71SNavdeep Parhar 
199054e4ee71SNavdeep Parhar 	if (cpl->vlan_ex) {
199154e4ee71SNavdeep Parhar 		m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
199254e4ee71SNavdeep Parhar 		m0->m_flags |= M_VLANTAG;
199354e4ee71SNavdeep Parhar 		rxq->vlan_extraction++;
199454e4ee71SNavdeep Parhar 	}
199554e4ee71SNavdeep Parhar 
19961486d2deSNavdeep Parhar 	if (rxq->iq.flags & IQ_RX_TIMESTAMP) {
19971486d2deSNavdeep Parhar 		/*
19981486d2deSNavdeep Parhar 		 * Fill up rcv_tstmp but do not set M_TSTMP.
19991486d2deSNavdeep Parhar 		 * rcv_tstmp is not in the format that the
20001486d2deSNavdeep Parhar 		 * kernel expects and we don't want to mislead
20011486d2deSNavdeep Parhar 		 * it.  For now this is only for custom code
20021486d2deSNavdeep Parhar 		 * that knows how to interpret cxgbe's stamp.
20031486d2deSNavdeep Parhar 		 */
20041486d2deSNavdeep Parhar 		m0->m_pkthdr.rcv_tstmp =
20051486d2deSNavdeep Parhar 		    last_flit_to_ns(sc, d->rsp.u.last_flit);
20061486d2deSNavdeep Parhar #ifdef notyet
20071486d2deSNavdeep Parhar 		m0->m_flags |= M_TSTMP;
20081486d2deSNavdeep Parhar #endif
20091486d2deSNavdeep Parhar 	}
20101486d2deSNavdeep Parhar 
201150575ce1SAndrew Gallatin #ifdef NUMA
201250575ce1SAndrew Gallatin 	m0->m_pkthdr.numa_domain = ifp->if_numa_domain;
201350575ce1SAndrew Gallatin #endif
2014a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
20151486d2deSNavdeep Parhar 	if (rxq->iq.flags & IQ_LRO_ENABLED &&
20169087a3dfSNavdeep Parhar 	    (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 ||
20179087a3dfSNavdeep Parhar 	    M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) {
201846f48ee5SNavdeep Parhar 		if (sort_before_lro(lro)) {
201946f48ee5SNavdeep Parhar 			tcp_lro_queue_mbuf(lro, m0);
202046f48ee5SNavdeep Parhar 			return (0); /* queued for sort, then LRO */
202146f48ee5SNavdeep Parhar 		}
202246f48ee5SNavdeep Parhar 		if (tcp_lro_rx(lro, m0, 0) == 0)
202346f48ee5SNavdeep Parhar 			return (0); /* queued for LRO */
202446f48ee5SNavdeep Parhar 	}
202554e4ee71SNavdeep Parhar #endif
20267d29df59SNavdeep Parhar 	ifp->if_input(ifp, m0);
202754e4ee71SNavdeep Parhar 
2028733b9277SNavdeep Parhar 	return (0);
202954e4ee71SNavdeep Parhar }
203054e4ee71SNavdeep Parhar 
2031733b9277SNavdeep Parhar /*
20327951040fSNavdeep Parhar  * Must drain the wrq or make sure that someone else will.
20337951040fSNavdeep Parhar  */
20347951040fSNavdeep Parhar static void
20357951040fSNavdeep Parhar wrq_tx_drain(void *arg, int n)
20367951040fSNavdeep Parhar {
20377951040fSNavdeep Parhar 	struct sge_wrq *wrq = arg;
20387951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
20397951040fSNavdeep Parhar 
20407951040fSNavdeep Parhar 	EQ_LOCK(eq);
20417951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
20427951040fSNavdeep Parhar 		drain_wrq_wr_list(wrq->adapter, wrq);
20437951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
20447951040fSNavdeep Parhar }
20457951040fSNavdeep Parhar 
20467951040fSNavdeep Parhar static void
20477951040fSNavdeep Parhar drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
20487951040fSNavdeep Parhar {
20497951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
20507951040fSNavdeep Parhar 	u_int available, dbdiff;	/* # of hardware descriptors */
20517951040fSNavdeep Parhar 	u_int n;
20527951040fSNavdeep Parhar 	struct wrqe *wr;
20537951040fSNavdeep Parhar 	struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
20547951040fSNavdeep Parhar 
20557951040fSNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
20567951040fSNavdeep Parhar 	MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
20577951040fSNavdeep Parhar 	wr = STAILQ_FIRST(&wrq->wr_list);
20587951040fSNavdeep Parhar 	MPASS(wr != NULL);	/* Must be called with something useful to do */
2059cda2ab0eSNavdeep Parhar 	MPASS(eq->pidx == eq->dbidx);
2060cda2ab0eSNavdeep Parhar 	dbdiff = 0;
20617951040fSNavdeep Parhar 
20627951040fSNavdeep Parhar 	do {
20637951040fSNavdeep Parhar 		eq->cidx = read_hw_cidx(eq);
20647951040fSNavdeep Parhar 		if (eq->pidx == eq->cidx)
20657951040fSNavdeep Parhar 			available = eq->sidx - 1;
20667951040fSNavdeep Parhar 		else
20677951040fSNavdeep Parhar 			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
20687951040fSNavdeep Parhar 
20697951040fSNavdeep Parhar 		MPASS(wr->wrq == wrq);
20707951040fSNavdeep Parhar 		n = howmany(wr->wr_len, EQ_ESIZE);
20717951040fSNavdeep Parhar 		if (available < n)
2072cda2ab0eSNavdeep Parhar 			break;
20737951040fSNavdeep Parhar 
20747951040fSNavdeep Parhar 		dst = (void *)&eq->desc[eq->pidx];
20757951040fSNavdeep Parhar 		if (__predict_true(eq->sidx - eq->pidx > n)) {
20767951040fSNavdeep Parhar 			/* Won't wrap, won't end exactly at the status page. */
20777951040fSNavdeep Parhar 			bcopy(&wr->wr[0], dst, wr->wr_len);
20787951040fSNavdeep Parhar 			eq->pidx += n;
20797951040fSNavdeep Parhar 		} else {
20807951040fSNavdeep Parhar 			int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
20817951040fSNavdeep Parhar 
20827951040fSNavdeep Parhar 			bcopy(&wr->wr[0], dst, first_portion);
20837951040fSNavdeep Parhar 			if (wr->wr_len > first_portion) {
20847951040fSNavdeep Parhar 				bcopy(&wr->wr[first_portion], &eq->desc[0],
20857951040fSNavdeep Parhar 				    wr->wr_len - first_portion);
20867951040fSNavdeep Parhar 			}
20877951040fSNavdeep Parhar 			eq->pidx = n - (eq->sidx - eq->pidx);
20887951040fSNavdeep Parhar 		}
20890459a175SNavdeep Parhar 		wrq->tx_wrs_copied++;
20907951040fSNavdeep Parhar 
20917951040fSNavdeep Parhar 		if (available < eq->sidx / 4 &&
20927951040fSNavdeep Parhar 		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
2093ddf09ad6SNavdeep Parhar 				/*
2094ddf09ad6SNavdeep Parhar 				 * XXX: This is not 100% reliable with some
2095ddf09ad6SNavdeep Parhar 				 * types of WRs.  But this is a very unusual
2096ddf09ad6SNavdeep Parhar 				 * situation for an ofld/ctrl queue anyway.
2097ddf09ad6SNavdeep Parhar 				 */
20987951040fSNavdeep Parhar 			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
20997951040fSNavdeep Parhar 			    F_FW_WR_EQUEQ);
21007951040fSNavdeep Parhar 		}
21017951040fSNavdeep Parhar 
21027951040fSNavdeep Parhar 		dbdiff += n;
21037951040fSNavdeep Parhar 		if (dbdiff >= 16) {
21047951040fSNavdeep Parhar 			ring_eq_db(sc, eq, dbdiff);
21057951040fSNavdeep Parhar 			dbdiff = 0;
21067951040fSNavdeep Parhar 		}
21077951040fSNavdeep Parhar 
21087951040fSNavdeep Parhar 		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
21097951040fSNavdeep Parhar 		free_wrqe(wr);
21107951040fSNavdeep Parhar 		MPASS(wrq->nwr_pending > 0);
21117951040fSNavdeep Parhar 		wrq->nwr_pending--;
21127951040fSNavdeep Parhar 		MPASS(wrq->ndesc_needed >= n);
21137951040fSNavdeep Parhar 		wrq->ndesc_needed -= n;
21147951040fSNavdeep Parhar 	} while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
21157951040fSNavdeep Parhar 
21167951040fSNavdeep Parhar 	if (dbdiff)
21177951040fSNavdeep Parhar 		ring_eq_db(sc, eq, dbdiff);
21187951040fSNavdeep Parhar }
21197951040fSNavdeep Parhar 
21207951040fSNavdeep Parhar /*
2121733b9277SNavdeep Parhar  * Doesn't fail.  Holds on to work requests it can't send right away.
2122733b9277SNavdeep Parhar  */
212309fe6320SNavdeep Parhar void
212409fe6320SNavdeep Parhar t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
2125733b9277SNavdeep Parhar {
2126733b9277SNavdeep Parhar #ifdef INVARIANTS
21277951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
2128733b9277SNavdeep Parhar #endif
2129733b9277SNavdeep Parhar 
21307951040fSNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
21317951040fSNavdeep Parhar 	MPASS(wr != NULL);
21327951040fSNavdeep Parhar 	MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
21337951040fSNavdeep Parhar 	MPASS((wr->wr_len & 0x7) == 0);
2134733b9277SNavdeep Parhar 
21357951040fSNavdeep Parhar 	STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
21367951040fSNavdeep Parhar 	wrq->nwr_pending++;
21377951040fSNavdeep Parhar 	wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
2138733b9277SNavdeep Parhar 
21397951040fSNavdeep Parhar 	if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
21407951040fSNavdeep Parhar 		return;	/* commit_wrq_wr will drain wr_list as well. */
2141733b9277SNavdeep Parhar 
21427951040fSNavdeep Parhar 	drain_wrq_wr_list(sc, wrq);
2143733b9277SNavdeep Parhar 
21447951040fSNavdeep Parhar 	/* Doorbell must have caught up to the pidx. */
21457951040fSNavdeep Parhar 	MPASS(eq->pidx == eq->dbidx);
214654e4ee71SNavdeep Parhar }
214754e4ee71SNavdeep Parhar 
214854e4ee71SNavdeep Parhar void
214954e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp)
215054e4ee71SNavdeep Parhar {
2151fe2ebb76SJohn Baldwin 	struct vi_info *vi = ifp->if_softc;
2152*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
215354e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
21546eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
21556eb3180fSNavdeep Parhar 	struct sge_ofld_rxq *ofld_rxq;
21566eb3180fSNavdeep Parhar #endif
215754e4ee71SNavdeep Parhar 	struct sge_fl *fl;
215838035ed6SNavdeep Parhar 	int i, maxp, mtu = ifp->if_mtu;
215954e4ee71SNavdeep Parhar 
21608bf30903SNavdeep Parhar 	maxp = mtu_to_max_payload(sc, mtu);
2161fe2ebb76SJohn Baldwin 	for_each_rxq(vi, i, rxq) {
216254e4ee71SNavdeep Parhar 		fl = &rxq->fl;
216354e4ee71SNavdeep Parhar 
216454e4ee71SNavdeep Parhar 		FL_LOCK(fl);
216546e1e307SNavdeep Parhar 		fl->zidx = find_refill_source(sc, maxp,
216646e1e307SNavdeep Parhar 		    fl->flags & FL_BUF_PACKING);
216754e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
216854e4ee71SNavdeep Parhar 	}
21696eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD
2170fe2ebb76SJohn Baldwin 	for_each_ofld_rxq(vi, i, ofld_rxq) {
21716eb3180fSNavdeep Parhar 		fl = &ofld_rxq->fl;
21726eb3180fSNavdeep Parhar 
21736eb3180fSNavdeep Parhar 		FL_LOCK(fl);
217446e1e307SNavdeep Parhar 		fl->zidx = find_refill_source(sc, maxp,
217546e1e307SNavdeep Parhar 		    fl->flags & FL_BUF_PACKING);
21766eb3180fSNavdeep Parhar 		FL_UNLOCK(fl);
21776eb3180fSNavdeep Parhar 	}
21786eb3180fSNavdeep Parhar #endif
217954e4ee71SNavdeep Parhar }
218054e4ee71SNavdeep Parhar 
21817951040fSNavdeep Parhar static inline int
21827951040fSNavdeep Parhar mbuf_nsegs(struct mbuf *m)
2183733b9277SNavdeep Parhar {
21840835ddc7SNavdeep Parhar 
21857951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21867951040fSNavdeep Parhar 	KASSERT(m->m_pkthdr.l5hlen > 0,
21877951040fSNavdeep Parhar 	    ("%s: mbuf %p missing information on # of segments.", __func__, m));
21887951040fSNavdeep Parhar 
21897951040fSNavdeep Parhar 	return (m->m_pkthdr.l5hlen);
21907951040fSNavdeep Parhar }
21917951040fSNavdeep Parhar 
21927951040fSNavdeep Parhar static inline void
21937951040fSNavdeep Parhar set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs)
21947951040fSNavdeep Parhar {
21957951040fSNavdeep Parhar 
21967951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
21977951040fSNavdeep Parhar 	m->m_pkthdr.l5hlen = nsegs;
21987951040fSNavdeep Parhar }
21997951040fSNavdeep Parhar 
22007951040fSNavdeep Parhar static inline int
22015cdaef71SJohn Baldwin mbuf_cflags(struct mbuf *m)
22025cdaef71SJohn Baldwin {
22035cdaef71SJohn Baldwin 
22045cdaef71SJohn Baldwin 	M_ASSERTPKTHDR(m);
22055cdaef71SJohn Baldwin 	return (m->m_pkthdr.PH_loc.eight[4]);
22065cdaef71SJohn Baldwin }
22075cdaef71SJohn Baldwin 
22085cdaef71SJohn Baldwin static inline void
22095cdaef71SJohn Baldwin set_mbuf_cflags(struct mbuf *m, uint8_t flags)
22105cdaef71SJohn Baldwin {
22115cdaef71SJohn Baldwin 
22125cdaef71SJohn Baldwin 	M_ASSERTPKTHDR(m);
22135cdaef71SJohn Baldwin 	m->m_pkthdr.PH_loc.eight[4] = flags;
22145cdaef71SJohn Baldwin }
22155cdaef71SJohn Baldwin 
22165cdaef71SJohn Baldwin static inline int
22177951040fSNavdeep Parhar mbuf_len16(struct mbuf *m)
22187951040fSNavdeep Parhar {
22197951040fSNavdeep Parhar 	int n;
22207951040fSNavdeep Parhar 
22217951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
22227951040fSNavdeep Parhar 	n = m->m_pkthdr.PH_loc.eight[0];
2223bddf7343SJohn Baldwin 	if (!(mbuf_cflags(m) & MC_TLS))
22247951040fSNavdeep Parhar 		MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
22257951040fSNavdeep Parhar 
22267951040fSNavdeep Parhar 	return (n);
22277951040fSNavdeep Parhar }
22287951040fSNavdeep Parhar 
22297951040fSNavdeep Parhar static inline void
22307951040fSNavdeep Parhar set_mbuf_len16(struct mbuf *m, uint8_t len16)
22317951040fSNavdeep Parhar {
22327951040fSNavdeep Parhar 
22337951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
22347951040fSNavdeep Parhar 	m->m_pkthdr.PH_loc.eight[0] = len16;
22357951040fSNavdeep Parhar }
22367951040fSNavdeep Parhar 
2237786099deSNavdeep Parhar #ifdef RATELIMIT
2238786099deSNavdeep Parhar static inline int
2239786099deSNavdeep Parhar mbuf_eo_nsegs(struct mbuf *m)
2240786099deSNavdeep Parhar {
2241786099deSNavdeep Parhar 
2242786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2243786099deSNavdeep Parhar 	return (m->m_pkthdr.PH_loc.eight[1]);
2244786099deSNavdeep Parhar }
2245786099deSNavdeep Parhar 
2246786099deSNavdeep Parhar static inline void
2247786099deSNavdeep Parhar set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs)
2248786099deSNavdeep Parhar {
2249786099deSNavdeep Parhar 
2250786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2251786099deSNavdeep Parhar 	m->m_pkthdr.PH_loc.eight[1] = nsegs;
2252786099deSNavdeep Parhar }
2253786099deSNavdeep Parhar 
2254786099deSNavdeep Parhar static inline int
2255786099deSNavdeep Parhar mbuf_eo_len16(struct mbuf *m)
2256786099deSNavdeep Parhar {
2257786099deSNavdeep Parhar 	int n;
2258786099deSNavdeep Parhar 
2259786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2260786099deSNavdeep Parhar 	n = m->m_pkthdr.PH_loc.eight[2];
2261786099deSNavdeep Parhar 	MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
2262786099deSNavdeep Parhar 
2263786099deSNavdeep Parhar 	return (n);
2264786099deSNavdeep Parhar }
2265786099deSNavdeep Parhar 
2266786099deSNavdeep Parhar static inline void
2267786099deSNavdeep Parhar set_mbuf_eo_len16(struct mbuf *m, uint8_t len16)
2268786099deSNavdeep Parhar {
2269786099deSNavdeep Parhar 
2270786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2271786099deSNavdeep Parhar 	m->m_pkthdr.PH_loc.eight[2] = len16;
2272786099deSNavdeep Parhar }
2273786099deSNavdeep Parhar 
2274786099deSNavdeep Parhar static inline int
2275786099deSNavdeep Parhar mbuf_eo_tsclk_tsoff(struct mbuf *m)
2276786099deSNavdeep Parhar {
2277786099deSNavdeep Parhar 
2278786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2279786099deSNavdeep Parhar 	return (m->m_pkthdr.PH_loc.eight[3]);
2280786099deSNavdeep Parhar }
2281786099deSNavdeep Parhar 
2282786099deSNavdeep Parhar static inline void
2283786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff)
2284786099deSNavdeep Parhar {
2285786099deSNavdeep Parhar 
2286786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2287786099deSNavdeep Parhar 	m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff;
2288786099deSNavdeep Parhar }
2289786099deSNavdeep Parhar 
2290786099deSNavdeep Parhar static inline int
2291e38a50e8SJohn Baldwin needs_eo(struct cxgbe_snd_tag *cst)
2292786099deSNavdeep Parhar {
2293786099deSNavdeep Parhar 
2294e38a50e8SJohn Baldwin 	return (cst != NULL && cst->type == IF_SND_TAG_TYPE_RATE_LIMIT);
2295786099deSNavdeep Parhar }
2296786099deSNavdeep Parhar #endif
2297786099deSNavdeep Parhar 
22985cdaef71SJohn Baldwin /*
22995cdaef71SJohn Baldwin  * Try to allocate an mbuf to contain a raw work request.  To make it
23005cdaef71SJohn Baldwin  * easy to construct the work request, don't allocate a chain but a
23015cdaef71SJohn Baldwin  * single mbuf.
23025cdaef71SJohn Baldwin  */
23035cdaef71SJohn Baldwin struct mbuf *
23045cdaef71SJohn Baldwin alloc_wr_mbuf(int len, int how)
23055cdaef71SJohn Baldwin {
23065cdaef71SJohn Baldwin 	struct mbuf *m;
23075cdaef71SJohn Baldwin 
23085cdaef71SJohn Baldwin 	if (len <= MHLEN)
23095cdaef71SJohn Baldwin 		m = m_gethdr(how, MT_DATA);
23105cdaef71SJohn Baldwin 	else if (len <= MCLBYTES)
23115cdaef71SJohn Baldwin 		m = m_getcl(how, MT_DATA, M_PKTHDR);
23125cdaef71SJohn Baldwin 	else
23135cdaef71SJohn Baldwin 		m = NULL;
23145cdaef71SJohn Baldwin 	if (m == NULL)
23155cdaef71SJohn Baldwin 		return (NULL);
23165cdaef71SJohn Baldwin 	m->m_pkthdr.len = len;
23175cdaef71SJohn Baldwin 	m->m_len = len;
23185cdaef71SJohn Baldwin 	set_mbuf_cflags(m, MC_RAW_WR);
23195cdaef71SJohn Baldwin 	set_mbuf_len16(m, howmany(len, 16));
23205cdaef71SJohn Baldwin 	return (m);
23215cdaef71SJohn Baldwin }
23225cdaef71SJohn Baldwin 
23237951040fSNavdeep Parhar static inline int
2324c0236bd9SNavdeep Parhar needs_hwcsum(struct mbuf *m)
2325c0236bd9SNavdeep Parhar {
2326c0236bd9SNavdeep Parhar 
2327c0236bd9SNavdeep Parhar 	M_ASSERTPKTHDR(m);
2328c0236bd9SNavdeep Parhar 
2329c0236bd9SNavdeep Parhar 	return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP |
2330c0236bd9SNavdeep Parhar 	    CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
2331c0236bd9SNavdeep Parhar }
2332c0236bd9SNavdeep Parhar 
2333c0236bd9SNavdeep Parhar static inline int
23347951040fSNavdeep Parhar needs_tso(struct mbuf *m)
23357951040fSNavdeep Parhar {
23367951040fSNavdeep Parhar 
23377951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
23387951040fSNavdeep Parhar 
2339a6a8ff35SNavdeep Parhar 	return (m->m_pkthdr.csum_flags & CSUM_TSO);
23407951040fSNavdeep Parhar }
23417951040fSNavdeep Parhar 
23427951040fSNavdeep Parhar static inline int
23437951040fSNavdeep Parhar needs_l3_csum(struct mbuf *m)
23447951040fSNavdeep Parhar {
23457951040fSNavdeep Parhar 
23467951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
23477951040fSNavdeep Parhar 
2348a6a8ff35SNavdeep Parhar 	return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO));
23497951040fSNavdeep Parhar }
23507951040fSNavdeep Parhar 
23517951040fSNavdeep Parhar static inline int
2352c0236bd9SNavdeep Parhar needs_tcp_csum(struct mbuf *m)
2353c0236bd9SNavdeep Parhar {
2354c0236bd9SNavdeep Parhar 
2355c0236bd9SNavdeep Parhar 	M_ASSERTPKTHDR(m);
2356c0236bd9SNavdeep Parhar 	return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TCP_IPV6 | CSUM_TSO));
2357c0236bd9SNavdeep Parhar }
2358c0236bd9SNavdeep Parhar 
2359c0236bd9SNavdeep Parhar #ifdef RATELIMIT
2360c0236bd9SNavdeep Parhar static inline int
23617951040fSNavdeep Parhar needs_l4_csum(struct mbuf *m)
23627951040fSNavdeep Parhar {
23637951040fSNavdeep Parhar 
23647951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
23657951040fSNavdeep Parhar 
2366a6a8ff35SNavdeep Parhar 	return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
2367a6a8ff35SNavdeep Parhar 	    CSUM_TCP_IPV6 | CSUM_TSO));
23687951040fSNavdeep Parhar }
23697951040fSNavdeep Parhar 
23707951040fSNavdeep Parhar static inline int
2371786099deSNavdeep Parhar needs_udp_csum(struct mbuf *m)
2372786099deSNavdeep Parhar {
2373786099deSNavdeep Parhar 
2374786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2375786099deSNavdeep Parhar 	return (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6));
2376786099deSNavdeep Parhar }
2377c3fce948SNavdeep Parhar #endif
2378786099deSNavdeep Parhar 
2379786099deSNavdeep Parhar static inline int
23807951040fSNavdeep Parhar needs_vlan_insertion(struct mbuf *m)
23817951040fSNavdeep Parhar {
23827951040fSNavdeep Parhar 
23837951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
23847951040fSNavdeep Parhar 
2385a6a8ff35SNavdeep Parhar 	return (m->m_flags & M_VLANTAG);
23867951040fSNavdeep Parhar }
23877951040fSNavdeep Parhar 
23887951040fSNavdeep Parhar static void *
23897951040fSNavdeep Parhar m_advance(struct mbuf **pm, int *poffset, int len)
23907951040fSNavdeep Parhar {
23917951040fSNavdeep Parhar 	struct mbuf *m = *pm;
23927951040fSNavdeep Parhar 	int offset = *poffset;
23937951040fSNavdeep Parhar 	uintptr_t p = 0;
23947951040fSNavdeep Parhar 
23957951040fSNavdeep Parhar 	MPASS(len > 0);
23967951040fSNavdeep Parhar 
2397e06ab612SJohn Baldwin 	for (;;) {
23987951040fSNavdeep Parhar 		if (offset + len < m->m_len) {
23997951040fSNavdeep Parhar 			offset += len;
24007951040fSNavdeep Parhar 			p = mtod(m, uintptr_t) + offset;
24017951040fSNavdeep Parhar 			break;
24027951040fSNavdeep Parhar 		}
24037951040fSNavdeep Parhar 		len -= m->m_len - offset;
24047951040fSNavdeep Parhar 		m = m->m_next;
24057951040fSNavdeep Parhar 		offset = 0;
24067951040fSNavdeep Parhar 		MPASS(m != NULL);
24077951040fSNavdeep Parhar 	}
24087951040fSNavdeep Parhar 	*poffset = offset;
24097951040fSNavdeep Parhar 	*pm = m;
24107951040fSNavdeep Parhar 	return ((void *)p);
24117951040fSNavdeep Parhar }
24127951040fSNavdeep Parhar 
2413d76bbe17SJohn Baldwin static inline int
2414d76bbe17SJohn Baldwin count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
2415d76bbe17SJohn Baldwin {
2416d76bbe17SJohn Baldwin 	vm_paddr_t paddr;
2417d76bbe17SJohn Baldwin 	int i, len, off, pglen, pgoff, seglen, segoff;
2418d76bbe17SJohn Baldwin 	int nsegs = 0;
2419d76bbe17SJohn Baldwin 
2420365e8da4SGleb Smirnoff 	M_ASSERTEXTPG(m);
2421d76bbe17SJohn Baldwin 	off = mtod(m, vm_offset_t);
2422d76bbe17SJohn Baldwin 	len = m->m_len;
2423d76bbe17SJohn Baldwin 	off += skip;
2424d76bbe17SJohn Baldwin 	len -= skip;
2425d76bbe17SJohn Baldwin 
24267b6c99d0SGleb Smirnoff 	if (m->m_epg_hdrlen != 0) {
24277b6c99d0SGleb Smirnoff 		if (off >= m->m_epg_hdrlen) {
24287b6c99d0SGleb Smirnoff 			off -= m->m_epg_hdrlen;
2429d76bbe17SJohn Baldwin 		} else {
24307b6c99d0SGleb Smirnoff 			seglen = m->m_epg_hdrlen - off;
2431d76bbe17SJohn Baldwin 			segoff = off;
2432d76bbe17SJohn Baldwin 			seglen = min(seglen, len);
2433d76bbe17SJohn Baldwin 			off = 0;
2434d76bbe17SJohn Baldwin 			len -= seglen;
2435d76bbe17SJohn Baldwin 			paddr = pmap_kextract(
24360c103266SGleb Smirnoff 			    (vm_offset_t)&m->m_epg_hdr[segoff]);
2437d76bbe17SJohn Baldwin 			if (*nextaddr != paddr)
2438d76bbe17SJohn Baldwin 				nsegs++;
2439d76bbe17SJohn Baldwin 			*nextaddr = paddr + seglen;
2440d76bbe17SJohn Baldwin 		}
2441d76bbe17SJohn Baldwin 	}
24427b6c99d0SGleb Smirnoff 	pgoff = m->m_epg_1st_off;
24437b6c99d0SGleb Smirnoff 	for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
2444c4ee38f8SGleb Smirnoff 		pglen = m_epg_pagelen(m, i, pgoff);
2445d76bbe17SJohn Baldwin 		if (off >= pglen) {
2446d76bbe17SJohn Baldwin 			off -= pglen;
2447d76bbe17SJohn Baldwin 			pgoff = 0;
2448d76bbe17SJohn Baldwin 			continue;
2449d76bbe17SJohn Baldwin 		}
2450d76bbe17SJohn Baldwin 		seglen = pglen - off;
2451d76bbe17SJohn Baldwin 		segoff = pgoff + off;
2452d76bbe17SJohn Baldwin 		off = 0;
2453d76bbe17SJohn Baldwin 		seglen = min(seglen, len);
2454d76bbe17SJohn Baldwin 		len -= seglen;
24550c103266SGleb Smirnoff 		paddr = m->m_epg_pa[i] + segoff;
2456d76bbe17SJohn Baldwin 		if (*nextaddr != paddr)
2457d76bbe17SJohn Baldwin 			nsegs++;
2458d76bbe17SJohn Baldwin 		*nextaddr = paddr + seglen;
2459d76bbe17SJohn Baldwin 		pgoff = 0;
2460d76bbe17SJohn Baldwin 	};
2461d76bbe17SJohn Baldwin 	if (len != 0) {
24627b6c99d0SGleb Smirnoff 		seglen = min(len, m->m_epg_trllen - off);
2463d76bbe17SJohn Baldwin 		len -= seglen;
24640c103266SGleb Smirnoff 		paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
2465d76bbe17SJohn Baldwin 		if (*nextaddr != paddr)
2466d76bbe17SJohn Baldwin 			nsegs++;
2467d76bbe17SJohn Baldwin 		*nextaddr = paddr + seglen;
2468d76bbe17SJohn Baldwin 	}
2469d76bbe17SJohn Baldwin 
2470d76bbe17SJohn Baldwin 	return (nsegs);
2471d76bbe17SJohn Baldwin }
2472d76bbe17SJohn Baldwin 
2473d76bbe17SJohn Baldwin 
24747951040fSNavdeep Parhar /*
24757951040fSNavdeep Parhar  * Can deal with empty mbufs in the chain that have m_len = 0, but the chain
2476786099deSNavdeep Parhar  * must have at least one mbuf that's not empty.  It is possible for this
2477786099deSNavdeep Parhar  * routine to return 0 if skip accounts for all the contents of the mbuf chain.
24787951040fSNavdeep Parhar  */
24797951040fSNavdeep Parhar static inline int
2480d76bbe17SJohn Baldwin count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags)
24817951040fSNavdeep Parhar {
2482d76bbe17SJohn Baldwin 	vm_paddr_t nextaddr, paddr;
248377e9044cSNavdeep Parhar 	vm_offset_t va;
24847951040fSNavdeep Parhar 	int len, nsegs;
24857951040fSNavdeep Parhar 
2486786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m);
2487786099deSNavdeep Parhar 	MPASS(m->m_pkthdr.len > 0);
2488786099deSNavdeep Parhar 	MPASS(m->m_pkthdr.len >= skip);
24897951040fSNavdeep Parhar 
24907951040fSNavdeep Parhar 	nsegs = 0;
2491d76bbe17SJohn Baldwin 	nextaddr = 0;
24927951040fSNavdeep Parhar 	for (; m; m = m->m_next) {
24937951040fSNavdeep Parhar 		len = m->m_len;
24947951040fSNavdeep Parhar 		if (__predict_false(len == 0))
24957951040fSNavdeep Parhar 			continue;
2496786099deSNavdeep Parhar 		if (skip >= len) {
2497786099deSNavdeep Parhar 			skip -= len;
2498786099deSNavdeep Parhar 			continue;
2499786099deSNavdeep Parhar 		}
25006edfd179SGleb Smirnoff 		if ((m->m_flags & M_EXTPG) != 0) {
2501d76bbe17SJohn Baldwin 			*cflags |= MC_NOMAP;
2502d76bbe17SJohn Baldwin 			nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr);
2503d76bbe17SJohn Baldwin 			skip = 0;
2504d76bbe17SJohn Baldwin 			continue;
2505d76bbe17SJohn Baldwin 		}
2506786099deSNavdeep Parhar 		va = mtod(m, vm_offset_t) + skip;
2507786099deSNavdeep Parhar 		len -= skip;
2508786099deSNavdeep Parhar 		skip = 0;
2509d76bbe17SJohn Baldwin 		paddr = pmap_kextract(va);
2510786099deSNavdeep Parhar 		nsegs += sglist_count((void *)(uintptr_t)va, len);
2511d76bbe17SJohn Baldwin 		if (paddr == nextaddr)
25127951040fSNavdeep Parhar 			nsegs--;
2513d76bbe17SJohn Baldwin 		nextaddr = pmap_kextract(va + len - 1) + 1;
25147951040fSNavdeep Parhar 	}
25157951040fSNavdeep Parhar 
25167951040fSNavdeep Parhar 	return (nsegs);
25177951040fSNavdeep Parhar }
25187951040fSNavdeep Parhar 
25197951040fSNavdeep Parhar /*
25207951040fSNavdeep Parhar  * Analyze the mbuf to determine its tx needs.  The mbuf passed in may change:
25217951040fSNavdeep Parhar  * a) caller can assume it's been freed if this function returns with an error.
25227951040fSNavdeep Parhar  * b) it may get defragged up if the gather list is too long for the hardware.
25237951040fSNavdeep Parhar  */
25247951040fSNavdeep Parhar int
25256af45170SJohn Baldwin parse_pkt(struct adapter *sc, struct mbuf **mp)
25267951040fSNavdeep Parhar {
25277951040fSNavdeep Parhar 	struct mbuf *m0 = *mp, *m;
25287951040fSNavdeep Parhar 	int rc, nsegs, defragged = 0, offset;
25297951040fSNavdeep Parhar 	struct ether_header *eh;
25307951040fSNavdeep Parhar 	void *l3hdr;
25317951040fSNavdeep Parhar #if defined(INET) || defined(INET6)
25327951040fSNavdeep Parhar 	struct tcphdr *tcp;
25337951040fSNavdeep Parhar #endif
2534bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT)
2535e38a50e8SJohn Baldwin 	struct cxgbe_snd_tag *cst;
2536e38a50e8SJohn Baldwin #endif
25377951040fSNavdeep Parhar 	uint16_t eh_type;
2538d76bbe17SJohn Baldwin 	uint8_t cflags;
25397951040fSNavdeep Parhar 
2540d76bbe17SJohn Baldwin 	cflags = 0;
25417951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
25427951040fSNavdeep Parhar 	if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
25437951040fSNavdeep Parhar 		rc = EINVAL;
25447951040fSNavdeep Parhar fail:
25457951040fSNavdeep Parhar 		m_freem(m0);
25467951040fSNavdeep Parhar 		*mp = NULL;
25477951040fSNavdeep Parhar 		return (rc);
25487951040fSNavdeep Parhar 	}
25497951040fSNavdeep Parhar restart:
25507951040fSNavdeep Parhar 	/*
25517951040fSNavdeep Parhar 	 * First count the number of gather list segments in the payload.
25527951040fSNavdeep Parhar 	 * Defrag the mbuf if nsegs exceeds the hardware limit.
25537951040fSNavdeep Parhar 	 */
25547951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
25557951040fSNavdeep Parhar 	MPASS(m0->m_pkthdr.len > 0);
2556d76bbe17SJohn Baldwin 	nsegs = count_mbuf_nsegs(m0, 0, &cflags);
2557bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT)
2558e38a50e8SJohn Baldwin 	if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG)
2559e38a50e8SJohn Baldwin 		cst = mst_to_cst(m0->m_pkthdr.snd_tag);
2560e38a50e8SJohn Baldwin 	else
2561e38a50e8SJohn Baldwin 		cst = NULL;
2562e38a50e8SJohn Baldwin #endif
2563bddf7343SJohn Baldwin #ifdef KERN_TLS
2564bddf7343SJohn Baldwin 	if (cst != NULL && cst->type == IF_SND_TAG_TYPE_TLS) {
2565bddf7343SJohn Baldwin 		int len16;
2566bddf7343SJohn Baldwin 
2567bddf7343SJohn Baldwin 		cflags |= MC_TLS;
2568bddf7343SJohn Baldwin 		set_mbuf_cflags(m0, cflags);
2569bddf7343SJohn Baldwin 		rc = t6_ktls_parse_pkt(m0, &nsegs, &len16);
2570bddf7343SJohn Baldwin 		if (rc != 0)
2571bddf7343SJohn Baldwin 			goto fail;
2572bddf7343SJohn Baldwin 		set_mbuf_nsegs(m0, nsegs);
2573bddf7343SJohn Baldwin 		set_mbuf_len16(m0, len16);
2574bddf7343SJohn Baldwin 		return (0);
2575bddf7343SJohn Baldwin 	}
2576bddf7343SJohn Baldwin #endif
25777951040fSNavdeep Parhar 	if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) {
25787951040fSNavdeep Parhar 		if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) {
25797951040fSNavdeep Parhar 			rc = EFBIG;
25807951040fSNavdeep Parhar 			goto fail;
25817951040fSNavdeep Parhar 		}
25827951040fSNavdeep Parhar 		*mp = m0 = m;	/* update caller's copy after defrag */
25837951040fSNavdeep Parhar 		goto restart;
25847951040fSNavdeep Parhar 	}
25857951040fSNavdeep Parhar 
2586d76bbe17SJohn Baldwin 	if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN &&
2587d76bbe17SJohn Baldwin 	    !(cflags & MC_NOMAP))) {
25887951040fSNavdeep Parhar 		m0 = m_pullup(m0, m0->m_pkthdr.len);
25897951040fSNavdeep Parhar 		if (m0 == NULL) {
25907951040fSNavdeep Parhar 			/* Should have left well enough alone. */
25917951040fSNavdeep Parhar 			rc = EFBIG;
25927951040fSNavdeep Parhar 			goto fail;
25937951040fSNavdeep Parhar 		}
25947951040fSNavdeep Parhar 		*mp = m0;	/* update caller's copy after pullup */
25957951040fSNavdeep Parhar 		goto restart;
25967951040fSNavdeep Parhar 	}
25977951040fSNavdeep Parhar 	set_mbuf_nsegs(m0, nsegs);
2598d76bbe17SJohn Baldwin 	set_mbuf_cflags(m0, cflags);
25996af45170SJohn Baldwin 	if (sc->flags & IS_VF)
26006af45170SJohn Baldwin 		set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0)));
26016af45170SJohn Baldwin 	else
26027951040fSNavdeep Parhar 		set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0)));
26037951040fSNavdeep Parhar 
2604786099deSNavdeep Parhar #ifdef RATELIMIT
2605786099deSNavdeep Parhar 	/*
2606786099deSNavdeep Parhar 	 * Ethofld is limited to TCP and UDP for now, and only when L4 hw
2607786099deSNavdeep Parhar 	 * checksumming is enabled.  needs_l4_csum happens to check for all the
2608786099deSNavdeep Parhar 	 * right things.
2609786099deSNavdeep Parhar 	 */
2610e38a50e8SJohn Baldwin 	if (__predict_false(needs_eo(cst) && !needs_l4_csum(m0))) {
2611fb3bc596SJohn Baldwin 		m_snd_tag_rele(m0->m_pkthdr.snd_tag);
2612786099deSNavdeep Parhar 		m0->m_pkthdr.snd_tag = NULL;
2613fb3bc596SJohn Baldwin 		m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
2614e38a50e8SJohn Baldwin 		cst = NULL;
2615fb3bc596SJohn Baldwin 	}
2616786099deSNavdeep Parhar #endif
2617786099deSNavdeep Parhar 
2618c0236bd9SNavdeep Parhar 	if (!needs_hwcsum(m0)
2619786099deSNavdeep Parhar #ifdef RATELIMIT
2620c0236bd9SNavdeep Parhar    		 && !needs_eo(cst)
2621786099deSNavdeep Parhar #endif
2622c0236bd9SNavdeep Parhar 	)
26237951040fSNavdeep Parhar 		return (0);
26247951040fSNavdeep Parhar 
26257951040fSNavdeep Parhar 	m = m0;
26267951040fSNavdeep Parhar 	eh = mtod(m, struct ether_header *);
26277951040fSNavdeep Parhar 	eh_type = ntohs(eh->ether_type);
26287951040fSNavdeep Parhar 	if (eh_type == ETHERTYPE_VLAN) {
26297951040fSNavdeep Parhar 		struct ether_vlan_header *evh = (void *)eh;
26307951040fSNavdeep Parhar 
26317951040fSNavdeep Parhar 		eh_type = ntohs(evh->evl_proto);
26327951040fSNavdeep Parhar 		m0->m_pkthdr.l2hlen = sizeof(*evh);
26337951040fSNavdeep Parhar 	} else
26347951040fSNavdeep Parhar 		m0->m_pkthdr.l2hlen = sizeof(*eh);
26357951040fSNavdeep Parhar 
26367951040fSNavdeep Parhar 	offset = 0;
26377951040fSNavdeep Parhar 	l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
26387951040fSNavdeep Parhar 
26397951040fSNavdeep Parhar 	switch (eh_type) {
26407951040fSNavdeep Parhar #ifdef INET6
26417951040fSNavdeep Parhar 	case ETHERTYPE_IPV6:
26427951040fSNavdeep Parhar 	{
26437951040fSNavdeep Parhar 		struct ip6_hdr *ip6 = l3hdr;
26447951040fSNavdeep Parhar 
26456af45170SJohn Baldwin 		MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP);
26467951040fSNavdeep Parhar 
26477951040fSNavdeep Parhar 		m0->m_pkthdr.l3hlen = sizeof(*ip6);
26487951040fSNavdeep Parhar 		break;
26497951040fSNavdeep Parhar 	}
26507951040fSNavdeep Parhar #endif
26517951040fSNavdeep Parhar #ifdef INET
26527951040fSNavdeep Parhar 	case ETHERTYPE_IP:
26537951040fSNavdeep Parhar 	{
26547951040fSNavdeep Parhar 		struct ip *ip = l3hdr;
26557951040fSNavdeep Parhar 
26567951040fSNavdeep Parhar 		m0->m_pkthdr.l3hlen = ip->ip_hl * 4;
26577951040fSNavdeep Parhar 		break;
26587951040fSNavdeep Parhar 	}
26597951040fSNavdeep Parhar #endif
26607951040fSNavdeep Parhar 	default:
26617951040fSNavdeep Parhar 		panic("%s: ethertype 0x%04x unknown.  if_cxgbe must be compiled"
26627951040fSNavdeep Parhar 		    " with the same INET/INET6 options as the kernel.",
26637951040fSNavdeep Parhar 		    __func__, eh_type);
26647951040fSNavdeep Parhar 	}
26657951040fSNavdeep Parhar 
26667951040fSNavdeep Parhar #if defined(INET) || defined(INET6)
2667786099deSNavdeep Parhar 	if (needs_tcp_csum(m0)) {
26687951040fSNavdeep Parhar 		tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
26697951040fSNavdeep Parhar 		m0->m_pkthdr.l4hlen = tcp->th_off * 4;
2670786099deSNavdeep Parhar #ifdef RATELIMIT
2671786099deSNavdeep Parhar 		if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) {
2672786099deSNavdeep Parhar 			set_mbuf_eo_tsclk_tsoff(m0,
2673786099deSNavdeep Parhar 			    V_FW_ETH_TX_EO_WR_TSCLK(tsclk) |
2674786099deSNavdeep Parhar 			    V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1));
2675786099deSNavdeep Parhar 		} else
2676786099deSNavdeep Parhar 			set_mbuf_eo_tsclk_tsoff(m0, 0);
2677e9edde41SGleb Smirnoff 	} else if (needs_udp_csum(m0)) {
2678786099deSNavdeep Parhar 		m0->m_pkthdr.l4hlen = sizeof(struct udphdr);
2679786099deSNavdeep Parhar #endif
26806af45170SJohn Baldwin 	}
2681786099deSNavdeep Parhar #ifdef RATELIMIT
2682e38a50e8SJohn Baldwin 	if (needs_eo(cst)) {
2683786099deSNavdeep Parhar 		u_int immhdrs;
2684786099deSNavdeep Parhar 
2685786099deSNavdeep Parhar 		/* EO WRs have the headers in the WR and not the GL. */
2686786099deSNavdeep Parhar 		immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen +
2687786099deSNavdeep Parhar 		    m0->m_pkthdr.l4hlen;
2688d76bbe17SJohn Baldwin 		cflags = 0;
2689d76bbe17SJohn Baldwin 		nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags);
2690d76bbe17SJohn Baldwin 		MPASS(cflags == mbuf_cflags(m0));
2691786099deSNavdeep Parhar 		set_mbuf_eo_nsegs(m0, nsegs);
2692786099deSNavdeep Parhar 		set_mbuf_eo_len16(m0,
2693786099deSNavdeep Parhar 		    txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0)));
2694786099deSNavdeep Parhar 	}
2695786099deSNavdeep Parhar #endif
26967951040fSNavdeep Parhar #endif
26977951040fSNavdeep Parhar 	MPASS(m0 == *mp);
26987951040fSNavdeep Parhar 	return (0);
26997951040fSNavdeep Parhar }
27007951040fSNavdeep Parhar 
27017951040fSNavdeep Parhar void *
27027951040fSNavdeep Parhar start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
27037951040fSNavdeep Parhar {
27047951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
27057951040fSNavdeep Parhar 	struct adapter *sc = wrq->adapter;
27067951040fSNavdeep Parhar 	int ndesc, available;
27077951040fSNavdeep Parhar 	struct wrqe *wr;
27087951040fSNavdeep Parhar 	void *w;
27097951040fSNavdeep Parhar 
27107951040fSNavdeep Parhar 	MPASS(len16 > 0);
27110cadedfcSNavdeep Parhar 	ndesc = tx_len16_to_desc(len16);
27127951040fSNavdeep Parhar 	MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
27137951040fSNavdeep Parhar 
27147951040fSNavdeep Parhar 	EQ_LOCK(eq);
27157951040fSNavdeep Parhar 
27168d6ae10aSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
27177951040fSNavdeep Parhar 		drain_wrq_wr_list(sc, wrq);
27187951040fSNavdeep Parhar 
27197951040fSNavdeep Parhar 	if (!STAILQ_EMPTY(&wrq->wr_list)) {
27207951040fSNavdeep Parhar slowpath:
27217951040fSNavdeep Parhar 		EQ_UNLOCK(eq);
27227951040fSNavdeep Parhar 		wr = alloc_wrqe(len16 * 16, wrq);
27237951040fSNavdeep Parhar 		if (__predict_false(wr == NULL))
27247951040fSNavdeep Parhar 			return (NULL);
27257951040fSNavdeep Parhar 		cookie->pidx = -1;
27267951040fSNavdeep Parhar 		cookie->ndesc = ndesc;
27277951040fSNavdeep Parhar 		return (&wr->wr);
27287951040fSNavdeep Parhar 	}
27297951040fSNavdeep Parhar 
27307951040fSNavdeep Parhar 	eq->cidx = read_hw_cidx(eq);
27317951040fSNavdeep Parhar 	if (eq->pidx == eq->cidx)
27327951040fSNavdeep Parhar 		available = eq->sidx - 1;
27337951040fSNavdeep Parhar 	else
27347951040fSNavdeep Parhar 		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
27357951040fSNavdeep Parhar 	if (available < ndesc)
27367951040fSNavdeep Parhar 		goto slowpath;
27377951040fSNavdeep Parhar 
27387951040fSNavdeep Parhar 	cookie->pidx = eq->pidx;
27397951040fSNavdeep Parhar 	cookie->ndesc = ndesc;
27407951040fSNavdeep Parhar 	TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
27417951040fSNavdeep Parhar 
27427951040fSNavdeep Parhar 	w = &eq->desc[eq->pidx];
27437951040fSNavdeep Parhar 	IDXINCR(eq->pidx, ndesc, eq->sidx);
2744f50c49ccSNavdeep Parhar 	if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
27457951040fSNavdeep Parhar 		w = &wrq->ss[0];
27467951040fSNavdeep Parhar 		wrq->ss_pidx = cookie->pidx;
27477951040fSNavdeep Parhar 		wrq->ss_len = len16 * 16;
27487951040fSNavdeep Parhar 	}
27497951040fSNavdeep Parhar 
27507951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
27517951040fSNavdeep Parhar 
27527951040fSNavdeep Parhar 	return (w);
27537951040fSNavdeep Parhar }
27547951040fSNavdeep Parhar 
27557951040fSNavdeep Parhar void
27567951040fSNavdeep Parhar commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
27577951040fSNavdeep Parhar {
27587951040fSNavdeep Parhar 	struct sge_eq *eq = &wrq->eq;
27597951040fSNavdeep Parhar 	struct adapter *sc = wrq->adapter;
27607951040fSNavdeep Parhar 	int ndesc, pidx;
27617951040fSNavdeep Parhar 	struct wrq_cookie *prev, *next;
27627951040fSNavdeep Parhar 
27637951040fSNavdeep Parhar 	if (cookie->pidx == -1) {
27647951040fSNavdeep Parhar 		struct wrqe *wr = __containerof(w, struct wrqe, wr);
27657951040fSNavdeep Parhar 
27667951040fSNavdeep Parhar 		t4_wrq_tx(sc, wr);
27677951040fSNavdeep Parhar 		return;
27687951040fSNavdeep Parhar 	}
27697951040fSNavdeep Parhar 
27707951040fSNavdeep Parhar 	if (__predict_false(w == &wrq->ss[0])) {
27717951040fSNavdeep Parhar 		int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
27727951040fSNavdeep Parhar 
27737951040fSNavdeep Parhar 		MPASS(wrq->ss_len > n);	/* WR had better wrap around. */
27747951040fSNavdeep Parhar 		bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
27757951040fSNavdeep Parhar 		bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
27767951040fSNavdeep Parhar 		wrq->tx_wrs_ss++;
27777951040fSNavdeep Parhar 	} else
27787951040fSNavdeep Parhar 		wrq->tx_wrs_direct++;
27797951040fSNavdeep Parhar 
27807951040fSNavdeep Parhar 	EQ_LOCK(eq);
27818d6ae10aSNavdeep Parhar 	ndesc = cookie->ndesc;	/* Can be more than SGE_MAX_WR_NDESC here. */
27828d6ae10aSNavdeep Parhar 	pidx = cookie->pidx;
27838d6ae10aSNavdeep Parhar 	MPASS(pidx >= 0 && pidx < eq->sidx);
27847951040fSNavdeep Parhar 	prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
27857951040fSNavdeep Parhar 	next = TAILQ_NEXT(cookie, link);
27867951040fSNavdeep Parhar 	if (prev == NULL) {
27877951040fSNavdeep Parhar 		MPASS(pidx == eq->dbidx);
27882e09fe91SNavdeep Parhar 		if (next == NULL || ndesc >= 16) {
27892e09fe91SNavdeep Parhar 			int available;
27902e09fe91SNavdeep Parhar 			struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
27912e09fe91SNavdeep Parhar 
27922e09fe91SNavdeep Parhar 			/*
27932e09fe91SNavdeep Parhar 			 * Note that the WR via which we'll request tx updates
27942e09fe91SNavdeep Parhar 			 * is at pidx and not eq->pidx, which has moved on
27952e09fe91SNavdeep Parhar 			 * already.
27962e09fe91SNavdeep Parhar 			 */
27972e09fe91SNavdeep Parhar 			dst = (void *)&eq->desc[pidx];
27982e09fe91SNavdeep Parhar 			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
27992e09fe91SNavdeep Parhar 			if (available < eq->sidx / 4 &&
28002e09fe91SNavdeep Parhar 			    atomic_cmpset_int(&eq->equiq, 0, 1)) {
2801ddf09ad6SNavdeep Parhar 				/*
2802ddf09ad6SNavdeep Parhar 				 * XXX: This is not 100% reliable with some
2803ddf09ad6SNavdeep Parhar 				 * types of WRs.  But this is a very unusual
2804ddf09ad6SNavdeep Parhar 				 * situation for an ofld/ctrl queue anyway.
2805ddf09ad6SNavdeep Parhar 				 */
28062e09fe91SNavdeep Parhar 				dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
28072e09fe91SNavdeep Parhar 				    F_FW_WR_EQUEQ);
28082e09fe91SNavdeep Parhar 			}
28092e09fe91SNavdeep Parhar 
28107951040fSNavdeep Parhar 			ring_eq_db(wrq->adapter, eq, ndesc);
28112e09fe91SNavdeep Parhar 		} else {
28127951040fSNavdeep Parhar 			MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
28137951040fSNavdeep Parhar 			next->pidx = pidx;
28147951040fSNavdeep Parhar 			next->ndesc += ndesc;
28157951040fSNavdeep Parhar 		}
28167951040fSNavdeep Parhar 	} else {
28177951040fSNavdeep Parhar 		MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
28187951040fSNavdeep Parhar 		prev->ndesc += ndesc;
28197951040fSNavdeep Parhar 	}
28207951040fSNavdeep Parhar 	TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
28217951040fSNavdeep Parhar 
28227951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
28237951040fSNavdeep Parhar 		drain_wrq_wr_list(sc, wrq);
28247951040fSNavdeep Parhar 
28257951040fSNavdeep Parhar #ifdef INVARIANTS
28267951040fSNavdeep Parhar 	if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
28277951040fSNavdeep Parhar 		/* Doorbell must have caught up to the pidx. */
28287951040fSNavdeep Parhar 		MPASS(wrq->eq.pidx == wrq->eq.dbidx);
28297951040fSNavdeep Parhar 	}
28307951040fSNavdeep Parhar #endif
28317951040fSNavdeep Parhar 	EQ_UNLOCK(eq);
28327951040fSNavdeep Parhar }
28337951040fSNavdeep Parhar 
28347951040fSNavdeep Parhar static u_int
28357951040fSNavdeep Parhar can_resume_eth_tx(struct mp_ring *r)
28367951040fSNavdeep Parhar {
28377951040fSNavdeep Parhar 	struct sge_eq *eq = r->cookie;
28387951040fSNavdeep Parhar 
28397951040fSNavdeep Parhar 	return (total_available_tx_desc(eq) > eq->sidx / 8);
28407951040fSNavdeep Parhar }
28417951040fSNavdeep Parhar 
28427951040fSNavdeep Parhar static inline int
28437951040fSNavdeep Parhar cannot_use_txpkts(struct mbuf *m)
28447951040fSNavdeep Parhar {
28457951040fSNavdeep Parhar 	/* maybe put a GL limit too, to avoid silliness? */
28467951040fSNavdeep Parhar 
2847bddf7343SJohn Baldwin 	return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0);
28487951040fSNavdeep Parhar }
28497951040fSNavdeep Parhar 
28501404daa7SNavdeep Parhar static inline int
28511404daa7SNavdeep Parhar discard_tx(struct sge_eq *eq)
28521404daa7SNavdeep Parhar {
28531404daa7SNavdeep Parhar 
28541404daa7SNavdeep Parhar 	return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
28551404daa7SNavdeep Parhar }
28561404daa7SNavdeep Parhar 
28575cdaef71SJohn Baldwin static inline int
28585cdaef71SJohn Baldwin wr_can_update_eq(struct fw_eth_tx_pkts_wr *wr)
28595cdaef71SJohn Baldwin {
28605cdaef71SJohn Baldwin 
28615cdaef71SJohn Baldwin 	switch (G_FW_WR_OP(be32toh(wr->op_pkd))) {
28625cdaef71SJohn Baldwin 	case FW_ULPTX_WR:
28635cdaef71SJohn Baldwin 	case FW_ETH_TX_PKT_WR:
28645cdaef71SJohn Baldwin 	case FW_ETH_TX_PKTS_WR:
2865693a9dfcSNavdeep Parhar 	case FW_ETH_TX_PKTS2_WR:
28665cdaef71SJohn Baldwin 	case FW_ETH_TX_PKT_VM_WR:
28675cdaef71SJohn Baldwin 		return (1);
28685cdaef71SJohn Baldwin 	default:
28695cdaef71SJohn Baldwin 		return (0);
28705cdaef71SJohn Baldwin 	}
28715cdaef71SJohn Baldwin }
28725cdaef71SJohn Baldwin 
28737951040fSNavdeep Parhar /*
28747951040fSNavdeep Parhar  * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
28757951040fSNavdeep Parhar  * be consumed.  Return the actual number consumed.  0 indicates a stall.
28767951040fSNavdeep Parhar  */
28777951040fSNavdeep Parhar static u_int
28787951040fSNavdeep Parhar eth_tx(struct mp_ring *r, u_int cidx, u_int pidx)
28797951040fSNavdeep Parhar {
28807951040fSNavdeep Parhar 	struct sge_txq *txq = r->cookie;
28817951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
28827951040fSNavdeep Parhar 	struct ifnet *ifp = txq->ifp;
2883fe2ebb76SJohn Baldwin 	struct vi_info *vi = ifp->if_softc;
2884*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
28857951040fSNavdeep Parhar 	u_int total, remaining;		/* # of packets */
28867951040fSNavdeep Parhar 	u_int available, dbdiff;	/* # of hardware descriptors */
28877951040fSNavdeep Parhar 	u_int n, next_cidx;
28887951040fSNavdeep Parhar 	struct mbuf *m0, *tail;
28897951040fSNavdeep Parhar 	struct txpkts txp;
28907951040fSNavdeep Parhar 	struct fw_eth_tx_pkts_wr *wr;	/* any fw WR struct will do */
28917951040fSNavdeep Parhar 
28927951040fSNavdeep Parhar 	remaining = IDXDIFF(pidx, cidx, r->size);
28937951040fSNavdeep Parhar 	MPASS(remaining > 0);	/* Must not be called without work to do. */
28947951040fSNavdeep Parhar 	total = 0;
28957951040fSNavdeep Parhar 
28967951040fSNavdeep Parhar 	TXQ_LOCK(txq);
28971404daa7SNavdeep Parhar 	if (__predict_false(discard_tx(eq))) {
28987951040fSNavdeep Parhar 		while (cidx != pidx) {
28997951040fSNavdeep Parhar 			m0 = r->items[cidx];
29007951040fSNavdeep Parhar 			m_freem(m0);
29017951040fSNavdeep Parhar 			if (++cidx == r->size)
29027951040fSNavdeep Parhar 				cidx = 0;
29037951040fSNavdeep Parhar 		}
29047951040fSNavdeep Parhar 		reclaim_tx_descs(txq, 2048);
29057951040fSNavdeep Parhar 		total = remaining;
29067951040fSNavdeep Parhar 		goto done;
29077951040fSNavdeep Parhar 	}
29087951040fSNavdeep Parhar 
29097951040fSNavdeep Parhar 	/* How many hardware descriptors do we have readily available. */
29107951040fSNavdeep Parhar 	if (eq->pidx == eq->cidx)
29117951040fSNavdeep Parhar 		available = eq->sidx - 1;
29127951040fSNavdeep Parhar 	else
29137951040fSNavdeep Parhar 		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
29147951040fSNavdeep Parhar 	dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx);
29157951040fSNavdeep Parhar 
29167951040fSNavdeep Parhar 	while (remaining > 0) {
29177951040fSNavdeep Parhar 
29187951040fSNavdeep Parhar 		m0 = r->items[cidx];
29197951040fSNavdeep Parhar 		M_ASSERTPKTHDR(m0);
29207951040fSNavdeep Parhar 		MPASS(m0->m_nextpkt == NULL);
29217951040fSNavdeep Parhar 
29220cadedfcSNavdeep Parhar 		if (available < tx_len16_to_desc(mbuf_len16(m0))) {
29237951040fSNavdeep Parhar 			available += reclaim_tx_descs(txq, 64);
29240cadedfcSNavdeep Parhar 			if (available < tx_len16_to_desc(mbuf_len16(m0)))
29257951040fSNavdeep Parhar 				break;	/* out of descriptors */
29267951040fSNavdeep Parhar 		}
29277951040fSNavdeep Parhar 
29287951040fSNavdeep Parhar 		next_cidx = cidx + 1;
29297951040fSNavdeep Parhar 		if (__predict_false(next_cidx == r->size))
29307951040fSNavdeep Parhar 			next_cidx = 0;
29317951040fSNavdeep Parhar 
29327951040fSNavdeep Parhar 		wr = (void *)&eq->desc[eq->pidx];
2933bddf7343SJohn Baldwin 		if (mbuf_cflags(m0) & MC_RAW_WR) {
2934bddf7343SJohn Baldwin 			total++;
2935bddf7343SJohn Baldwin 			remaining--;
2936bddf7343SJohn Baldwin 			n = write_raw_wr(txq, (void *)wr, m0, available);
2937bddf7343SJohn Baldwin #ifdef KERN_TLS
2938bddf7343SJohn Baldwin 		} else if (mbuf_cflags(m0) & MC_TLS) {
2939bddf7343SJohn Baldwin 			total++;
2940bddf7343SJohn Baldwin 			remaining--;
2941bddf7343SJohn Baldwin 			ETHER_BPF_MTAP(ifp, m0);
2942bddf7343SJohn Baldwin 			n = t6_ktls_write_wr(txq,(void *)wr, m0,
2943bddf7343SJohn Baldwin 			    mbuf_nsegs(m0), available);
2944bddf7343SJohn Baldwin #endif
2945bddf7343SJohn Baldwin 		} else if (sc->flags & IS_VF) {
29466af45170SJohn Baldwin 			total++;
29476af45170SJohn Baldwin 			remaining--;
29486af45170SJohn Baldwin 			ETHER_BPF_MTAP(ifp, m0);
2949472a6004SNavdeep Parhar 			n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0,
2950472a6004SNavdeep Parhar 			    available);
29516af45170SJohn Baldwin 		} else if (remaining > 1 &&
29527951040fSNavdeep Parhar 		    try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) {
29537951040fSNavdeep Parhar 
29547951040fSNavdeep Parhar 			/* pkts at cidx, next_cidx should both be in txp. */
29557951040fSNavdeep Parhar 			MPASS(txp.npkt == 2);
29567951040fSNavdeep Parhar 			tail = r->items[next_cidx];
29577951040fSNavdeep Parhar 			MPASS(tail->m_nextpkt == NULL);
29587951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, m0);
29597951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, tail);
29607951040fSNavdeep Parhar 			m0->m_nextpkt = tail;
29617951040fSNavdeep Parhar 
29627951040fSNavdeep Parhar 			if (__predict_false(++next_cidx == r->size))
29637951040fSNavdeep Parhar 				next_cidx = 0;
29647951040fSNavdeep Parhar 
29657951040fSNavdeep Parhar 			while (next_cidx != pidx) {
29667951040fSNavdeep Parhar 				if (add_to_txpkts(r->items[next_cidx], &txp,
29677951040fSNavdeep Parhar 				    available) != 0)
29687951040fSNavdeep Parhar 					break;
29697951040fSNavdeep Parhar 				tail->m_nextpkt = r->items[next_cidx];
29707951040fSNavdeep Parhar 				tail = tail->m_nextpkt;
29717951040fSNavdeep Parhar 				ETHER_BPF_MTAP(ifp, tail);
29727951040fSNavdeep Parhar 				if (__predict_false(++next_cidx == r->size))
29737951040fSNavdeep Parhar 					next_cidx = 0;
29747951040fSNavdeep Parhar 			}
29757951040fSNavdeep Parhar 
2976c0236bd9SNavdeep Parhar 			n = write_txpkts_wr(sc, txq, wr, m0, &txp, available);
29777951040fSNavdeep Parhar 			total += txp.npkt;
29787951040fSNavdeep Parhar 			remaining -= txp.npkt;
29797951040fSNavdeep Parhar 		} else {
29807951040fSNavdeep Parhar 			total++;
29817951040fSNavdeep Parhar 			remaining--;
29827951040fSNavdeep Parhar 			ETHER_BPF_MTAP(ifp, m0);
2983c0236bd9SNavdeep Parhar 			n = write_txpkt_wr(sc, txq, (void *)wr, m0, available);
29847951040fSNavdeep Parhar 		}
2985bddf7343SJohn Baldwin 		MPASS(n >= 1 && n <= available);
2986bddf7343SJohn Baldwin 		if (!(mbuf_cflags(m0) & MC_TLS))
2987bddf7343SJohn Baldwin 			MPASS(n <= SGE_MAX_WR_NDESC);
29887951040fSNavdeep Parhar 
29897951040fSNavdeep Parhar 		available -= n;
29907951040fSNavdeep Parhar 		dbdiff += n;
29917951040fSNavdeep Parhar 		IDXINCR(eq->pidx, n, eq->sidx);
29927951040fSNavdeep Parhar 
29935cdaef71SJohn Baldwin 		if (wr_can_update_eq(wr)) {
29947951040fSNavdeep Parhar 			if (total_available_tx_desc(eq) < eq->sidx / 4 &&
29957951040fSNavdeep Parhar 			    atomic_cmpset_int(&eq->equiq, 0, 1)) {
29967951040fSNavdeep Parhar 				wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
29977951040fSNavdeep Parhar 				    F_FW_WR_EQUEQ);
29987951040fSNavdeep Parhar 				eq->equeqidx = eq->pidx;
29995cdaef71SJohn Baldwin 			} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >=
30005cdaef71SJohn Baldwin 			    32) {
30017951040fSNavdeep Parhar 				wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
30027951040fSNavdeep Parhar 				eq->equeqidx = eq->pidx;
30037951040fSNavdeep Parhar 			}
30045cdaef71SJohn Baldwin 		}
30057951040fSNavdeep Parhar 
30067951040fSNavdeep Parhar 		if (dbdiff >= 16 && remaining >= 4) {
30077951040fSNavdeep Parhar 			ring_eq_db(sc, eq, dbdiff);
30087951040fSNavdeep Parhar 			available += reclaim_tx_descs(txq, 4 * dbdiff);
30097951040fSNavdeep Parhar 			dbdiff = 0;
30107951040fSNavdeep Parhar 		}
30117951040fSNavdeep Parhar 
30127951040fSNavdeep Parhar 		cidx = next_cidx;
30137951040fSNavdeep Parhar 	}
30147951040fSNavdeep Parhar 	if (dbdiff != 0) {
30157951040fSNavdeep Parhar 		ring_eq_db(sc, eq, dbdiff);
30167951040fSNavdeep Parhar 		reclaim_tx_descs(txq, 32);
30177951040fSNavdeep Parhar 	}
30187951040fSNavdeep Parhar done:
30197951040fSNavdeep Parhar 	TXQ_UNLOCK(txq);
30207951040fSNavdeep Parhar 
30217951040fSNavdeep Parhar 	return (total);
3022733b9277SNavdeep Parhar }
3023733b9277SNavdeep Parhar 
302454e4ee71SNavdeep Parhar static inline void
302554e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
3026b2daa9a9SNavdeep Parhar     int qsize)
302754e4ee71SNavdeep Parhar {
3028b2daa9a9SNavdeep Parhar 
302954e4ee71SNavdeep Parhar 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
303054e4ee71SNavdeep Parhar 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
303154e4ee71SNavdeep Parhar 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
303254e4ee71SNavdeep Parhar 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
303354e4ee71SNavdeep Parhar 
303454e4ee71SNavdeep Parhar 	iq->flags = 0;
303554e4ee71SNavdeep Parhar 	iq->adapter = sc;
30367a32954cSNavdeep Parhar 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
30377a32954cSNavdeep Parhar 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
30387a32954cSNavdeep Parhar 	if (pktc_idx >= 0) {
30397a32954cSNavdeep Parhar 		iq->intr_params |= F_QINTR_CNT_EN;
304054e4ee71SNavdeep Parhar 		iq->intr_pktc_idx = pktc_idx;
30417a32954cSNavdeep Parhar 	}
3042d14b0ac1SNavdeep Parhar 	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
304390e7434aSNavdeep Parhar 	iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
304454e4ee71SNavdeep Parhar }
304554e4ee71SNavdeep Parhar 
304654e4ee71SNavdeep Parhar static inline void
3047e3207e19SNavdeep Parhar init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
304854e4ee71SNavdeep Parhar {
30491458bff9SNavdeep Parhar 
305054e4ee71SNavdeep Parhar 	fl->qsize = qsize;
305190e7434aSNavdeep Parhar 	fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
305254e4ee71SNavdeep Parhar 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
3053e3207e19SNavdeep Parhar 	if (sc->flags & BUF_PACKING_OK &&
3054e3207e19SNavdeep Parhar 	    ((!is_t4(sc) && buffer_packing) ||	/* T5+: enabled unless 0 */
3055e3207e19SNavdeep Parhar 	    (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
30561458bff9SNavdeep Parhar 		fl->flags |= FL_BUF_PACKING;
305746e1e307SNavdeep Parhar 	fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING);
305846e1e307SNavdeep Parhar 	fl->safe_zidx = sc->sge.safe_zidx;
305954e4ee71SNavdeep Parhar }
306054e4ee71SNavdeep Parhar 
306154e4ee71SNavdeep Parhar static inline void
306290e7434aSNavdeep Parhar init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
306390e7434aSNavdeep Parhar     uint8_t tx_chan, uint16_t iqid, char *name)
306454e4ee71SNavdeep Parhar {
3065733b9277SNavdeep Parhar 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
3066733b9277SNavdeep Parhar 
3067733b9277SNavdeep Parhar 	eq->flags = eqtype & EQ_TYPEMASK;
3068733b9277SNavdeep Parhar 	eq->tx_chan = tx_chan;
3069733b9277SNavdeep Parhar 	eq->iqid = iqid;
307090e7434aSNavdeep Parhar 	eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
3071f7dfe243SNavdeep Parhar 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
307254e4ee71SNavdeep Parhar }
307354e4ee71SNavdeep Parhar 
307454e4ee71SNavdeep Parhar static int
307554e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
307654e4ee71SNavdeep Parhar     bus_dmamap_t *map, bus_addr_t *pa, void **va)
307754e4ee71SNavdeep Parhar {
307854e4ee71SNavdeep Parhar 	int rc;
307954e4ee71SNavdeep Parhar 
308054e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
308154e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
308254e4ee71SNavdeep Parhar 	if (rc != 0) {
308354e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
308454e4ee71SNavdeep Parhar 		goto done;
308554e4ee71SNavdeep Parhar 	}
308654e4ee71SNavdeep Parhar 
308754e4ee71SNavdeep Parhar 	rc = bus_dmamem_alloc(*tag, va,
308854e4ee71SNavdeep Parhar 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
308954e4ee71SNavdeep Parhar 	if (rc != 0) {
309054e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
309154e4ee71SNavdeep Parhar 		goto done;
309254e4ee71SNavdeep Parhar 	}
309354e4ee71SNavdeep Parhar 
309454e4ee71SNavdeep Parhar 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
309554e4ee71SNavdeep Parhar 	if (rc != 0) {
309654e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
309754e4ee71SNavdeep Parhar 		goto done;
309854e4ee71SNavdeep Parhar 	}
309954e4ee71SNavdeep Parhar done:
310054e4ee71SNavdeep Parhar 	if (rc)
310154e4ee71SNavdeep Parhar 		free_ring(sc, *tag, *map, *pa, *va);
310254e4ee71SNavdeep Parhar 
310354e4ee71SNavdeep Parhar 	return (rc);
310454e4ee71SNavdeep Parhar }
310554e4ee71SNavdeep Parhar 
310654e4ee71SNavdeep Parhar static int
310754e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
310854e4ee71SNavdeep Parhar     bus_addr_t pa, void *va)
310954e4ee71SNavdeep Parhar {
311054e4ee71SNavdeep Parhar 	if (pa)
311154e4ee71SNavdeep Parhar 		bus_dmamap_unload(tag, map);
311254e4ee71SNavdeep Parhar 	if (va)
311354e4ee71SNavdeep Parhar 		bus_dmamem_free(tag, va, map);
311454e4ee71SNavdeep Parhar 	if (tag)
311554e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(tag);
311654e4ee71SNavdeep Parhar 
311754e4ee71SNavdeep Parhar 	return (0);
311854e4ee71SNavdeep Parhar }
311954e4ee71SNavdeep Parhar 
312054e4ee71SNavdeep Parhar /*
312154e4ee71SNavdeep Parhar  * Allocates the ring for an ingress queue and an optional freelist.  If the
312254e4ee71SNavdeep Parhar  * freelist is specified it will be allocated and then associated with the
312354e4ee71SNavdeep Parhar  * ingress queue.
312454e4ee71SNavdeep Parhar  *
312554e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
312654e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
312754e4ee71SNavdeep Parhar  *
3128f549e352SNavdeep Parhar  * If the ingress queue will take interrupts directly then the intr_idx
3129f549e352SNavdeep Parhar  * specifies the vector, starting from 0.  -1 means the interrupts for this
3130f549e352SNavdeep Parhar  * queue should be forwarded to the fwq.
313154e4ee71SNavdeep Parhar  */
313254e4ee71SNavdeep Parhar static int
3133fe2ebb76SJohn Baldwin alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
3134bc14b14dSNavdeep Parhar     int intr_idx, int cong)
313554e4ee71SNavdeep Parhar {
313654e4ee71SNavdeep Parhar 	int rc, i, cntxt_id;
313754e4ee71SNavdeep Parhar 	size_t len;
313854e4ee71SNavdeep Parhar 	struct fw_iq_cmd c;
3139fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
314054e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
314190e7434aSNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
314254e4ee71SNavdeep Parhar 	__be32 v = 0;
314354e4ee71SNavdeep Parhar 
3144b2daa9a9SNavdeep Parhar 	len = iq->qsize * IQ_ESIZE;
314554e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
314654e4ee71SNavdeep Parhar 	    (void **)&iq->desc);
314754e4ee71SNavdeep Parhar 	if (rc != 0)
314854e4ee71SNavdeep Parhar 		return (rc);
314954e4ee71SNavdeep Parhar 
315054e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
315154e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
315254e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
315354e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VFN(0));
315454e4ee71SNavdeep Parhar 
315554e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
315654e4ee71SNavdeep Parhar 	    FW_LEN16(c));
315754e4ee71SNavdeep Parhar 
315854e4ee71SNavdeep Parhar 	/* Special handling for firmware event queue */
315954e4ee71SNavdeep Parhar 	if (iq == &sc->sge.fwq)
316054e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQASYNCH;
316154e4ee71SNavdeep Parhar 
3162f549e352SNavdeep Parhar 	if (intr_idx < 0) {
3163f549e352SNavdeep Parhar 		/* Forwarded interrupts, all headed to fwq */
3164f549e352SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQANDST;
3165f549e352SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
3166f549e352SNavdeep Parhar 	} else {
316754e4ee71SNavdeep Parhar 		KASSERT(intr_idx < sc->intr_count,
316854e4ee71SNavdeep Parhar 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
316954e4ee71SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
3170f549e352SNavdeep Parhar 	}
317154e4ee71SNavdeep Parhar 
317254e4ee71SNavdeep Parhar 	c.type_to_iqandstindex = htobe32(v |
317354e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
3174fe2ebb76SJohn Baldwin 	    V_FW_IQ_CMD_VIID(vi->viid) |
317554e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
317654e4ee71SNavdeep Parhar 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
317754e4ee71SNavdeep Parhar 	    F_FW_IQ_CMD_IQGTSMODE |
317854e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
3179b2daa9a9SNavdeep Parhar 	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
318054e4ee71SNavdeep Parhar 	c.iqsize = htobe16(iq->qsize);
318154e4ee71SNavdeep Parhar 	c.iqaddr = htobe64(iq->ba);
3182bc14b14dSNavdeep Parhar 	if (cong >= 0)
3183bc14b14dSNavdeep Parhar 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
318454e4ee71SNavdeep Parhar 
318554e4ee71SNavdeep Parhar 	if (fl) {
318654e4ee71SNavdeep Parhar 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
318754e4ee71SNavdeep Parhar 
3188b2daa9a9SNavdeep Parhar 		len = fl->qsize * EQ_ESIZE;
318954e4ee71SNavdeep Parhar 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
319054e4ee71SNavdeep Parhar 		    &fl->ba, (void **)&fl->desc);
319154e4ee71SNavdeep Parhar 		if (rc)
319254e4ee71SNavdeep Parhar 			return (rc);
319354e4ee71SNavdeep Parhar 
319454e4ee71SNavdeep Parhar 		/* Allocate space for one software descriptor per buffer. */
319554e4ee71SNavdeep Parhar 		rc = alloc_fl_sdesc(fl);
319654e4ee71SNavdeep Parhar 		if (rc != 0) {
319754e4ee71SNavdeep Parhar 			device_printf(sc->dev,
319854e4ee71SNavdeep Parhar 			    "failed to setup fl software descriptors: %d\n",
319954e4ee71SNavdeep Parhar 			    rc);
320054e4ee71SNavdeep Parhar 			return (rc);
320154e4ee71SNavdeep Parhar 		}
32024d6db4e0SNavdeep Parhar 
32034d6db4e0SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
320490e7434aSNavdeep Parhar 			fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
320590e7434aSNavdeep Parhar 			fl->buf_boundary = sp->pack_boundary;
32064d6db4e0SNavdeep Parhar 		} else {
320790e7434aSNavdeep Parhar 			fl->lowat = roundup2(sp->fl_starve_threshold, 8);
3208e3207e19SNavdeep Parhar 			fl->buf_boundary = 16;
32094d6db4e0SNavdeep Parhar 		}
321090e7434aSNavdeep Parhar 		if (fl_pad && fl->buf_boundary < sp->pad_boundary)
321190e7434aSNavdeep Parhar 			fl->buf_boundary = sp->pad_boundary;
321254e4ee71SNavdeep Parhar 
3213214c3582SNavdeep Parhar 		c.iqns_to_fl0congen |=
3214bc14b14dSNavdeep Parhar 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
3215bc14b14dSNavdeep Parhar 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
32161458bff9SNavdeep Parhar 			(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
32171458bff9SNavdeep Parhar 			(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
32181458bff9SNavdeep Parhar 			    0));
3219bc14b14dSNavdeep Parhar 		if (cong >= 0) {
3220bc14b14dSNavdeep Parhar 			c.iqns_to_fl0congen |=
3221bc14b14dSNavdeep Parhar 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
3222bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGCIF |
3223bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGEN);
3224bc14b14dSNavdeep Parhar 		}
322554e4ee71SNavdeep Parhar 		c.fl0dcaen_to_fl0cidxfthresh =
3226ed7e5640SNavdeep Parhar 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3227adb0cd84SNavdeep Parhar 			X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) |
3228ed7e5640SNavdeep Parhar 			V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
3229ed7e5640SNavdeep Parhar 			X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
323054e4ee71SNavdeep Parhar 		c.fl0size = htobe16(fl->qsize);
323154e4ee71SNavdeep Parhar 		c.fl0addr = htobe64(fl->ba);
323254e4ee71SNavdeep Parhar 	}
323354e4ee71SNavdeep Parhar 
323454e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
323554e4ee71SNavdeep Parhar 	if (rc != 0) {
323654e4ee71SNavdeep Parhar 		device_printf(sc->dev,
323754e4ee71SNavdeep Parhar 		    "failed to create ingress queue: %d\n", rc);
323854e4ee71SNavdeep Parhar 		return (rc);
323954e4ee71SNavdeep Parhar 	}
324054e4ee71SNavdeep Parhar 
324154e4ee71SNavdeep Parhar 	iq->cidx = 0;
3242b2daa9a9SNavdeep Parhar 	iq->gen = F_RSPD_GEN;
324354e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
324454e4ee71SNavdeep Parhar 	iq->cntxt_id = be16toh(c.iqid);
324554e4ee71SNavdeep Parhar 	iq->abs_id = be16toh(c.physiqid);
3246733b9277SNavdeep Parhar 	iq->flags |= IQ_ALLOCATED;
324754e4ee71SNavdeep Parhar 
324854e4ee71SNavdeep Parhar 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
3249733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.niq) {
3250733b9277SNavdeep Parhar 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
3251733b9277SNavdeep Parhar 		    cntxt_id, sc->sge.niq - 1);
3252733b9277SNavdeep Parhar 	}
325354e4ee71SNavdeep Parhar 	sc->sge.iqmap[cntxt_id] = iq;
325454e4ee71SNavdeep Parhar 
325554e4ee71SNavdeep Parhar 	if (fl) {
32564d6db4e0SNavdeep Parhar 		u_int qid;
32574d6db4e0SNavdeep Parhar 
32584d6db4e0SNavdeep Parhar 		iq->flags |= IQ_HAS_FL;
325954e4ee71SNavdeep Parhar 		fl->cntxt_id = be16toh(c.fl0id);
326054e4ee71SNavdeep Parhar 		fl->pidx = fl->cidx = 0;
326154e4ee71SNavdeep Parhar 
32629f1f7ec9SNavdeep Parhar 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
3263733b9277SNavdeep Parhar 		if (cntxt_id >= sc->sge.neq) {
3264733b9277SNavdeep Parhar 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
3265733b9277SNavdeep Parhar 			    __func__, cntxt_id, sc->sge.neq - 1);
3266733b9277SNavdeep Parhar 		}
326754e4ee71SNavdeep Parhar 		sc->sge.eqmap[cntxt_id] = (void *)fl;
326854e4ee71SNavdeep Parhar 
32694d6db4e0SNavdeep Parhar 		qid = fl->cntxt_id;
32704d6db4e0SNavdeep Parhar 		if (isset(&sc->doorbells, DOORBELL_UDB)) {
327190e7434aSNavdeep Parhar 			uint32_t s_qpp = sc->params.sge.eq_s_qpp;
32724d6db4e0SNavdeep Parhar 			uint32_t mask = (1 << s_qpp) - 1;
32734d6db4e0SNavdeep Parhar 			volatile uint8_t *udb;
32744d6db4e0SNavdeep Parhar 
32754d6db4e0SNavdeep Parhar 			udb = sc->udbs_base + UDBS_DB_OFFSET;
32764d6db4e0SNavdeep Parhar 			udb += (qid >> s_qpp) << PAGE_SHIFT;
32774d6db4e0SNavdeep Parhar 			qid &= mask;
32784d6db4e0SNavdeep Parhar 			if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
32794d6db4e0SNavdeep Parhar 				udb += qid << UDBS_SEG_SHIFT;
32804d6db4e0SNavdeep Parhar 				qid = 0;
32814d6db4e0SNavdeep Parhar 			}
32824d6db4e0SNavdeep Parhar 			fl->udb = (volatile void *)udb;
32834d6db4e0SNavdeep Parhar 		}
3284d1205d09SNavdeep Parhar 		fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
32854d6db4e0SNavdeep Parhar 
328654e4ee71SNavdeep Parhar 		FL_LOCK(fl);
3287733b9277SNavdeep Parhar 		/* Enough to make sure the SGE doesn't think it's starved */
3288733b9277SNavdeep Parhar 		refill_fl(sc, fl, fl->lowat);
328954e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
329054e4ee71SNavdeep Parhar 	}
329154e4ee71SNavdeep Parhar 
32928c0ca00bSNavdeep Parhar 	if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) {
3293ba41ec48SNavdeep Parhar 		uint32_t param, val;
3294ba41ec48SNavdeep Parhar 
3295ba41ec48SNavdeep Parhar 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
3296ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3297ba41ec48SNavdeep Parhar 		    V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
329873cd9220SNavdeep Parhar 		if (cong == 0)
329973cd9220SNavdeep Parhar 			val = 1 << 19;
330073cd9220SNavdeep Parhar 		else {
330173cd9220SNavdeep Parhar 			val = 2 << 19;
330273cd9220SNavdeep Parhar 			for (i = 0; i < 4; i++) {
330373cd9220SNavdeep Parhar 				if (cong & (1 << i))
330473cd9220SNavdeep Parhar 					val |= 1 << (i << 2);
330573cd9220SNavdeep Parhar 			}
330673cd9220SNavdeep Parhar 		}
330773cd9220SNavdeep Parhar 
3308ba41ec48SNavdeep Parhar 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3309ba41ec48SNavdeep Parhar 		if (rc != 0) {
3310ba41ec48SNavdeep Parhar 			/* report error but carry on */
3311ba41ec48SNavdeep Parhar 			device_printf(sc->dev,
3312ba41ec48SNavdeep Parhar 			    "failed to set congestion manager context for "
3313ba41ec48SNavdeep Parhar 			    "ingress queue %d: %d\n", iq->cntxt_id, rc);
3314ba41ec48SNavdeep Parhar 		}
3315ba41ec48SNavdeep Parhar 	}
3316ba41ec48SNavdeep Parhar 
331754e4ee71SNavdeep Parhar 	/* Enable IQ interrupts */
3318733b9277SNavdeep Parhar 	atomic_store_rel_int(&iq->state, IQS_IDLE);
3319315048f2SJohn Baldwin 	t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
332054e4ee71SNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id));
332154e4ee71SNavdeep Parhar 
332254e4ee71SNavdeep Parhar 	return (0);
332354e4ee71SNavdeep Parhar }
332454e4ee71SNavdeep Parhar 
332554e4ee71SNavdeep Parhar static int
3326fe2ebb76SJohn Baldwin free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
332754e4ee71SNavdeep Parhar {
332838035ed6SNavdeep Parhar 	int rc;
332954e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
333054e4ee71SNavdeep Parhar 	device_t dev;
333154e4ee71SNavdeep Parhar 
333254e4ee71SNavdeep Parhar 	if (sc == NULL)
333354e4ee71SNavdeep Parhar 		return (0);	/* nothing to do */
333454e4ee71SNavdeep Parhar 
3335fe2ebb76SJohn Baldwin 	dev = vi ? vi->dev : sc->dev;
333654e4ee71SNavdeep Parhar 
333754e4ee71SNavdeep Parhar 	if (iq->flags & IQ_ALLOCATED) {
333854e4ee71SNavdeep Parhar 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
333954e4ee71SNavdeep Parhar 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
334054e4ee71SNavdeep Parhar 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
334154e4ee71SNavdeep Parhar 		if (rc != 0) {
334254e4ee71SNavdeep Parhar 			device_printf(dev,
334354e4ee71SNavdeep Parhar 			    "failed to free queue %p: %d\n", iq, rc);
334454e4ee71SNavdeep Parhar 			return (rc);
334554e4ee71SNavdeep Parhar 		}
334654e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_ALLOCATED;
334754e4ee71SNavdeep Parhar 	}
334854e4ee71SNavdeep Parhar 
334954e4ee71SNavdeep Parhar 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
335054e4ee71SNavdeep Parhar 
335154e4ee71SNavdeep Parhar 	bzero(iq, sizeof(*iq));
335254e4ee71SNavdeep Parhar 
335354e4ee71SNavdeep Parhar 	if (fl) {
335454e4ee71SNavdeep Parhar 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
335554e4ee71SNavdeep Parhar 		    fl->desc);
335654e4ee71SNavdeep Parhar 
3357aa9a5cc0SNavdeep Parhar 		if (fl->sdesc)
33581458bff9SNavdeep Parhar 			free_fl_sdesc(sc, fl);
33591458bff9SNavdeep Parhar 
336054e4ee71SNavdeep Parhar 		if (mtx_initialized(&fl->fl_lock))
336154e4ee71SNavdeep Parhar 			mtx_destroy(&fl->fl_lock);
336254e4ee71SNavdeep Parhar 
336354e4ee71SNavdeep Parhar 		bzero(fl, sizeof(*fl));
336454e4ee71SNavdeep Parhar 	}
336554e4ee71SNavdeep Parhar 
336654e4ee71SNavdeep Parhar 	return (0);
336754e4ee71SNavdeep Parhar }
336854e4ee71SNavdeep Parhar 
336938035ed6SNavdeep Parhar static void
3370348694daSNavdeep Parhar add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
3371348694daSNavdeep Parhar     struct sge_iq *iq)
3372348694daSNavdeep Parhar {
3373348694daSNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3374348694daSNavdeep Parhar 
3375348694daSNavdeep Parhar 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba,
3376348694daSNavdeep Parhar 	    "bus address of descriptor ring");
3377348694daSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3378348694daSNavdeep Parhar 	    iq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
3379348694daSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
33807029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->abs_id, 0,
33817029da5cSPawel Biernacki 	    sysctl_uint16, "I", "absolute id of the queue");
3382348694daSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
33837029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cntxt_id, 0,
33847029da5cSPawel Biernacki 	    sysctl_uint16, "I", "SGE context id of the queue");
3385348694daSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
33867029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cidx, 0,
33877029da5cSPawel Biernacki 	    sysctl_uint16, "I", "consumer index");
3388348694daSNavdeep Parhar }
3389348694daSNavdeep Parhar 
3390348694daSNavdeep Parhar static void
3391aa93b99aSNavdeep Parhar add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
3392aa93b99aSNavdeep Parhar     struct sysctl_oid *oid, struct sge_fl *fl)
339338035ed6SNavdeep Parhar {
339438035ed6SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
339538035ed6SNavdeep Parhar 
33967029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
33977029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
339838035ed6SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
339938035ed6SNavdeep Parhar 
3400aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3401aa93b99aSNavdeep Parhar 	    &fl->ba, "bus address of descriptor ring");
3402aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3403aa93b99aSNavdeep Parhar 	    fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
3404aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
340538035ed6SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
34067029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &fl->cntxt_id, 0,
34077029da5cSPawel Biernacki 	    sysctl_uint16, "I", "SGE context id of the freelist");
3408e3207e19SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
3409e3207e19SNavdeep Parhar 	    fl_pad ? 1 : 0, "padding enabled");
3410e3207e19SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
3411e3207e19SNavdeep Parhar 	    fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
341238035ed6SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
341338035ed6SNavdeep Parhar 	    0, "consumer index");
341438035ed6SNavdeep Parhar 	if (fl->flags & FL_BUF_PACKING) {
341538035ed6SNavdeep Parhar 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
341638035ed6SNavdeep Parhar 		    CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
341738035ed6SNavdeep Parhar 	}
341838035ed6SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
341938035ed6SNavdeep Parhar 	    0, "producer index");
342038035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
342138035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
342238035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
342338035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
342438035ed6SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
342538035ed6SNavdeep Parhar 	    CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
342638035ed6SNavdeep Parhar }
342738035ed6SNavdeep Parhar 
342854e4ee71SNavdeep Parhar static int
3429733b9277SNavdeep Parhar alloc_fwq(struct adapter *sc)
343054e4ee71SNavdeep Parhar {
3431733b9277SNavdeep Parhar 	int rc, intr_idx;
343256599263SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
3433733b9277SNavdeep Parhar 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
3434733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
343556599263SNavdeep Parhar 
3436b2daa9a9SNavdeep Parhar 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
34376af45170SJohn Baldwin 	if (sc->flags & IS_VF)
34386af45170SJohn Baldwin 		intr_idx = 0;
34394535e804SNavdeep Parhar 	else
3440733b9277SNavdeep Parhar 		intr_idx = sc->intr_count > 1 ? 1 : 0;
3441fe2ebb76SJohn Baldwin 	rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
3442733b9277SNavdeep Parhar 	if (rc != 0) {
3443733b9277SNavdeep Parhar 		device_printf(sc->dev,
3444733b9277SNavdeep Parhar 		    "failed to create firmware event queue: %d\n", rc);
344556599263SNavdeep Parhar 		return (rc);
3446733b9277SNavdeep Parhar 	}
344756599263SNavdeep Parhar 
34487029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq",
34497029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue");
3450348694daSNavdeep Parhar 	add_iq_sysctls(&sc->ctx, oid, fwq);
345156599263SNavdeep Parhar 
3452733b9277SNavdeep Parhar 	return (0);
3453733b9277SNavdeep Parhar }
3454733b9277SNavdeep Parhar 
3455733b9277SNavdeep Parhar static int
3456733b9277SNavdeep Parhar free_fwq(struct adapter *sc)
3457733b9277SNavdeep Parhar {
3458733b9277SNavdeep Parhar 	return free_iq_fl(NULL, &sc->sge.fwq, NULL);
3459733b9277SNavdeep Parhar }
3460733b9277SNavdeep Parhar 
3461733b9277SNavdeep Parhar static int
346237310a98SNavdeep Parhar alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx,
346337310a98SNavdeep Parhar     struct sysctl_oid *oid)
3464733b9277SNavdeep Parhar {
3465733b9277SNavdeep Parhar 	int rc;
3466733b9277SNavdeep Parhar 	char name[16];
346737310a98SNavdeep Parhar 	struct sysctl_oid_list *children;
3468733b9277SNavdeep Parhar 
346937310a98SNavdeep Parhar 	snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev),
347037310a98SNavdeep Parhar 	    idx);
347137310a98SNavdeep Parhar 	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan,
3472733b9277SNavdeep Parhar 	    sc->sge.fwq.cntxt_id, name);
347337310a98SNavdeep Parhar 
347437310a98SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
347537310a98SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
34767029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name,
34777029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ctrl queue");
347837310a98SNavdeep Parhar 	rc = alloc_wrq(sc, NULL, ctrlq, oid);
347937310a98SNavdeep Parhar 
348056599263SNavdeep Parhar 	return (rc);
348156599263SNavdeep Parhar }
348256599263SNavdeep Parhar 
34831605bac6SNavdeep Parhar int
34849af71ab3SNavdeep Parhar tnl_cong(struct port_info *pi, int drop)
34859fb8886bSNavdeep Parhar {
34869fb8886bSNavdeep Parhar 
34879af71ab3SNavdeep Parhar 	if (drop == -1)
34889fb8886bSNavdeep Parhar 		return (-1);
34899af71ab3SNavdeep Parhar 	else if (drop == 1)
34909fb8886bSNavdeep Parhar 		return (0);
34919fb8886bSNavdeep Parhar 	else
34925bcae8ddSNavdeep Parhar 		return (pi->rx_e_chan_map);
34939fb8886bSNavdeep Parhar }
34949fb8886bSNavdeep Parhar 
3495733b9277SNavdeep Parhar static int
3496fe2ebb76SJohn Baldwin alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
3497733b9277SNavdeep Parhar     struct sysctl_oid *oid)
349854e4ee71SNavdeep Parhar {
349954e4ee71SNavdeep Parhar 	int rc;
3500*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
350154e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
350254e4ee71SNavdeep Parhar 	char name[16];
350354e4ee71SNavdeep Parhar 
3504fe2ebb76SJohn Baldwin 	rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx,
3505fe2ebb76SJohn Baldwin 	    tnl_cong(vi->pi, cong_drop));
350654e4ee71SNavdeep Parhar 	if (rc != 0)
350754e4ee71SNavdeep Parhar 		return (rc);
350854e4ee71SNavdeep Parhar 
3509ec55567cSJohn Baldwin 	if (idx == 0)
3510ec55567cSJohn Baldwin 		sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
3511ec55567cSJohn Baldwin 	else
3512ec55567cSJohn Baldwin 		KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
3513ec55567cSJohn Baldwin 		    ("iq_base mismatch"));
3514ec55567cSJohn Baldwin 	KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
3515ec55567cSJohn Baldwin 	    ("PF with non-zero iq_base"));
3516ec55567cSJohn Baldwin 
35174d6db4e0SNavdeep Parhar 	/*
35184d6db4e0SNavdeep Parhar 	 * The freelist is just barely above the starvation threshold right now,
35194d6db4e0SNavdeep Parhar 	 * fill it up a bit more.
35204d6db4e0SNavdeep Parhar 	 */
35219b4d7b4eSNavdeep Parhar 	FL_LOCK(&rxq->fl);
3522ec55567cSJohn Baldwin 	refill_fl(sc, &rxq->fl, 128);
35239b4d7b4eSNavdeep Parhar 	FL_UNLOCK(&rxq->fl);
35249b4d7b4eSNavdeep Parhar 
3525a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
352646f48ee5SNavdeep Parhar 	rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs);
352754e4ee71SNavdeep Parhar 	if (rc != 0)
352854e4ee71SNavdeep Parhar 		return (rc);
352946f48ee5SNavdeep Parhar 	MPASS(rxq->lro.ifp == vi->ifp);	/* also indicates LRO init'ed */
353054e4ee71SNavdeep Parhar 
3531fe2ebb76SJohn Baldwin 	if (vi->ifp->if_capenable & IFCAP_LRO)
3532733b9277SNavdeep Parhar 		rxq->iq.flags |= IQ_LRO_ENABLED;
353354e4ee71SNavdeep Parhar #endif
35349877f735SNavdeep Parhar 	if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP)
35359877f735SNavdeep Parhar 		rxq->iq.flags |= IQ_RX_TIMESTAMP;
3536fe2ebb76SJohn Baldwin 	rxq->ifp = vi->ifp;
353754e4ee71SNavdeep Parhar 
3538733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
353954e4ee71SNavdeep Parhar 
354054e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
35417029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
35427029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue");
354354e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
354454e4ee71SNavdeep Parhar 
3545348694daSNavdeep Parhar 	add_iq_sysctls(&vi->ctx, oid, &rxq->iq);
3546a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
3547e936121dSHans Petter Selasky 	SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
354854e4ee71SNavdeep Parhar 	    &rxq->lro.lro_queued, 0, NULL);
3549e936121dSHans Petter Selasky 	SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
355054e4ee71SNavdeep Parhar 	    &rxq->lro.lro_flushed, 0, NULL);
35517d29df59SNavdeep Parhar #endif
3552fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
355354e4ee71SNavdeep Parhar 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
3554fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction",
355554e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &rxq->vlan_extraction,
355654e4ee71SNavdeep Parhar 	    "# of times hardware extracted 802.1Q tag");
355754e4ee71SNavdeep Parhar 
3558aa93b99aSNavdeep Parhar 	add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl);
355959bc8ce0SNavdeep Parhar 
356054e4ee71SNavdeep Parhar 	return (rc);
356154e4ee71SNavdeep Parhar }
356254e4ee71SNavdeep Parhar 
356354e4ee71SNavdeep Parhar static int
3564fe2ebb76SJohn Baldwin free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
356554e4ee71SNavdeep Parhar {
356654e4ee71SNavdeep Parhar 	int rc;
356754e4ee71SNavdeep Parhar 
3568a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6)
356954e4ee71SNavdeep Parhar 	if (rxq->lro.ifp) {
357054e4ee71SNavdeep Parhar 		tcp_lro_free(&rxq->lro);
357154e4ee71SNavdeep Parhar 		rxq->lro.ifp = NULL;
357254e4ee71SNavdeep Parhar 	}
357354e4ee71SNavdeep Parhar #endif
357454e4ee71SNavdeep Parhar 
3575fe2ebb76SJohn Baldwin 	rc = free_iq_fl(vi, &rxq->iq, &rxq->fl);
357654e4ee71SNavdeep Parhar 	if (rc == 0)
357754e4ee71SNavdeep Parhar 		bzero(rxq, sizeof(*rxq));
357854e4ee71SNavdeep Parhar 
357954e4ee71SNavdeep Parhar 	return (rc);
358054e4ee71SNavdeep Parhar }
358154e4ee71SNavdeep Parhar 
358209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
358354e4ee71SNavdeep Parhar static int
3584fe2ebb76SJohn Baldwin alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq,
3585733b9277SNavdeep Parhar     int intr_idx, int idx, struct sysctl_oid *oid)
3586f7dfe243SNavdeep Parhar {
3587aa93b99aSNavdeep Parhar 	struct port_info *pi = vi->pi;
3588733b9277SNavdeep Parhar 	int rc;
3589f7dfe243SNavdeep Parhar 	struct sysctl_oid_list *children;
3590733b9277SNavdeep Parhar 	char name[16];
3591f7dfe243SNavdeep Parhar 
35925bcae8ddSNavdeep Parhar 	rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0);
3593733b9277SNavdeep Parhar 	if (rc != 0)
3594f7dfe243SNavdeep Parhar 		return (rc);
3595f7dfe243SNavdeep Parhar 
3596733b9277SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3597733b9277SNavdeep Parhar 
3598733b9277SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
35997029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
36007029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue");
3601348694daSNavdeep Parhar 	add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq);
3602aa93b99aSNavdeep Parhar 	add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl);
3603733b9277SNavdeep Parhar 
3604733b9277SNavdeep Parhar 	return (rc);
3605733b9277SNavdeep Parhar }
3606733b9277SNavdeep Parhar 
3607733b9277SNavdeep Parhar static int
3608fe2ebb76SJohn Baldwin free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
3609733b9277SNavdeep Parhar {
3610733b9277SNavdeep Parhar 	int rc;
3611733b9277SNavdeep Parhar 
3612fe2ebb76SJohn Baldwin 	rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl);
3613733b9277SNavdeep Parhar 	if (rc == 0)
3614733b9277SNavdeep Parhar 		bzero(ofld_rxq, sizeof(*ofld_rxq));
3615733b9277SNavdeep Parhar 
3616733b9277SNavdeep Parhar 	return (rc);
3617733b9277SNavdeep Parhar }
3618733b9277SNavdeep Parhar #endif
3619733b9277SNavdeep Parhar 
3620298d969cSNavdeep Parhar #ifdef DEV_NETMAP
3621298d969cSNavdeep Parhar static int
3622fe2ebb76SJohn Baldwin alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
3623298d969cSNavdeep Parhar     int idx, struct sysctl_oid *oid)
3624298d969cSNavdeep Parhar {
3625298d969cSNavdeep Parhar 	int rc;
3626298d969cSNavdeep Parhar 	struct sysctl_oid_list *children;
3627298d969cSNavdeep Parhar 	struct sysctl_ctx_list *ctx;
3628298d969cSNavdeep Parhar 	char name[16];
3629298d969cSNavdeep Parhar 	size_t len;
3630*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
3631fe2ebb76SJohn Baldwin 	struct netmap_adapter *na = NA(vi->ifp);
3632298d969cSNavdeep Parhar 
3633298d969cSNavdeep Parhar 	MPASS(na != NULL);
3634298d969cSNavdeep Parhar 
3635fe2ebb76SJohn Baldwin 	len = vi->qsize_rxq * IQ_ESIZE;
3636298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
3637298d969cSNavdeep Parhar 	    &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
3638298d969cSNavdeep Parhar 	if (rc != 0)
3639298d969cSNavdeep Parhar 		return (rc);
3640298d969cSNavdeep Parhar 
364190e7434aSNavdeep Parhar 	len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
3642298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
3643298d969cSNavdeep Parhar 	    &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
3644298d969cSNavdeep Parhar 	if (rc != 0)
3645298d969cSNavdeep Parhar 		return (rc);
3646298d969cSNavdeep Parhar 
3647fe2ebb76SJohn Baldwin 	nm_rxq->vi = vi;
3648298d969cSNavdeep Parhar 	nm_rxq->nid = idx;
3649298d969cSNavdeep Parhar 	nm_rxq->iq_cidx = 0;
365090e7434aSNavdeep Parhar 	nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
3651298d969cSNavdeep Parhar 	nm_rxq->iq_gen = F_RSPD_GEN;
3652298d969cSNavdeep Parhar 	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
3653298d969cSNavdeep Parhar 	nm_rxq->fl_sidx = na->num_rx_desc;
3654aa301e5fSNavdeep Parhar 	nm_rxq->fl_sidx2 = nm_rxq->fl_sidx;	/* copy for rxsync cacheline */
3655298d969cSNavdeep Parhar 	nm_rxq->intr_idx = intr_idx;
3656a8c4fcb9SNavdeep Parhar 	nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
3657298d969cSNavdeep Parhar 
3658fe2ebb76SJohn Baldwin 	ctx = &vi->ctx;
3659298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3660298d969cSNavdeep Parhar 
3661298d969cSNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
36627029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name,
36637029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue");
3664298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3665298d969cSNavdeep Parhar 
3666298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
36677029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_abs_id,
36687029da5cSPawel Biernacki 	    0, sysctl_uint16, "I", "absolute id of the queue");
3669298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
36707029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cntxt_id,
36717029da5cSPawel Biernacki 	    0, sysctl_uint16, "I", "SGE context id of the queue");
3672298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
36737029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cidx, 0,
36747029da5cSPawel Biernacki 	    sysctl_uint16, "I", "consumer index");
3675298d969cSNavdeep Parhar 
3676298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
36777029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
36787029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
3679298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3680298d969cSNavdeep Parhar 
3681298d969cSNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
36827029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->fl_cntxt_id,
36837029da5cSPawel Biernacki 	    0, sysctl_uint16, "I", "SGE context id of the freelist");
3684298d969cSNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
3685298d969cSNavdeep Parhar 	    &nm_rxq->fl_cidx, 0, "consumer index");
3686298d969cSNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
3687298d969cSNavdeep Parhar 	    &nm_rxq->fl_pidx, 0, "producer index");
3688298d969cSNavdeep Parhar 
3689298d969cSNavdeep Parhar 	return (rc);
3690298d969cSNavdeep Parhar }
3691298d969cSNavdeep Parhar 
3692298d969cSNavdeep Parhar 
3693298d969cSNavdeep Parhar static int
3694fe2ebb76SJohn Baldwin free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
3695298d969cSNavdeep Parhar {
3696*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
3697298d969cSNavdeep Parhar 
36980fa7560dSNavdeep Parhar 	if (vi->flags & VI_INIT_DONE)
3699a8c4fcb9SNavdeep Parhar 		MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID);
37000fa7560dSNavdeep Parhar 	else
37010fa7560dSNavdeep Parhar 		MPASS(nm_rxq->iq_cntxt_id == 0);
3702a8c4fcb9SNavdeep Parhar 
3703298d969cSNavdeep Parhar 	free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
3704298d969cSNavdeep Parhar 	    nm_rxq->iq_desc);
3705298d969cSNavdeep Parhar 	free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
3706298d969cSNavdeep Parhar 	    nm_rxq->fl_desc);
3707298d969cSNavdeep Parhar 
3708298d969cSNavdeep Parhar 	return (0);
3709298d969cSNavdeep Parhar }
3710298d969cSNavdeep Parhar 
3711298d969cSNavdeep Parhar static int
3712fe2ebb76SJohn Baldwin alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
3713298d969cSNavdeep Parhar     struct sysctl_oid *oid)
3714298d969cSNavdeep Parhar {
3715298d969cSNavdeep Parhar 	int rc;
3716298d969cSNavdeep Parhar 	size_t len;
3717fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
3718298d969cSNavdeep Parhar 	struct adapter *sc = pi->adapter;
3719fe2ebb76SJohn Baldwin 	struct netmap_adapter *na = NA(vi->ifp);
3720298d969cSNavdeep Parhar 	char name[16];
3721298d969cSNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
3722298d969cSNavdeep Parhar 
372390e7434aSNavdeep Parhar 	len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
3724298d969cSNavdeep Parhar 	rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
3725298d969cSNavdeep Parhar 	    &nm_txq->ba, (void **)&nm_txq->desc);
3726298d969cSNavdeep Parhar 	if (rc)
3727298d969cSNavdeep Parhar 		return (rc);
3728298d969cSNavdeep Parhar 
3729298d969cSNavdeep Parhar 	nm_txq->pidx = nm_txq->cidx = 0;
3730298d969cSNavdeep Parhar 	nm_txq->sidx = na->num_tx_desc;
3731298d969cSNavdeep Parhar 	nm_txq->nid = idx;
3732298d969cSNavdeep Parhar 	nm_txq->iqidx = iqidx;
3733298d969cSNavdeep Parhar 	nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
3734edb518f4SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
3735edb518f4SNavdeep Parhar 	    V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
3736aa7bdbc0SNavdeep Parhar 	if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
3737aa7bdbc0SNavdeep Parhar 		nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
3738aa7bdbc0SNavdeep Parhar 	else
3739aa7bdbc0SNavdeep Parhar 		nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
3740a8c4fcb9SNavdeep Parhar 	nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
3741298d969cSNavdeep Parhar 
3742298d969cSNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
37437029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
37447029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue");
3745298d969cSNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
3746298d969cSNavdeep Parhar 
3747fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3748298d969cSNavdeep Parhar 	    &nm_txq->cntxt_id, 0, "SGE context id of the queue");
3749fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
37507029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->cidx, 0,
37517029da5cSPawel Biernacki 	    sysctl_uint16, "I", "consumer index");
3752fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
37537029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->pidx, 0,
37547029da5cSPawel Biernacki 	    sysctl_uint16, "I", "producer index");
3755298d969cSNavdeep Parhar 
3756298d969cSNavdeep Parhar 	return (rc);
3757298d969cSNavdeep Parhar }
3758298d969cSNavdeep Parhar 
3759298d969cSNavdeep Parhar static int
3760fe2ebb76SJohn Baldwin free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
3761298d969cSNavdeep Parhar {
3762*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
3763298d969cSNavdeep Parhar 
37640fa7560dSNavdeep Parhar 	if (vi->flags & VI_INIT_DONE)
3765a8c4fcb9SNavdeep Parhar 		MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
37660fa7560dSNavdeep Parhar 	else
37670fa7560dSNavdeep Parhar 		MPASS(nm_txq->cntxt_id == 0);
3768a8c4fcb9SNavdeep Parhar 
3769298d969cSNavdeep Parhar 	free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
3770298d969cSNavdeep Parhar 	    nm_txq->desc);
3771298d969cSNavdeep Parhar 
3772298d969cSNavdeep Parhar 	return (0);
3773298d969cSNavdeep Parhar }
3774298d969cSNavdeep Parhar #endif
3775298d969cSNavdeep Parhar 
3776ddf09ad6SNavdeep Parhar /*
3777ddf09ad6SNavdeep Parhar  * Returns a reasonable automatic cidx flush threshold for a given queue size.
3778ddf09ad6SNavdeep Parhar  */
3779ddf09ad6SNavdeep Parhar static u_int
3780ddf09ad6SNavdeep Parhar qsize_to_fthresh(int qsize)
3781ddf09ad6SNavdeep Parhar {
3782ddf09ad6SNavdeep Parhar 	u_int fthresh;
3783ddf09ad6SNavdeep Parhar 
3784ddf09ad6SNavdeep Parhar 	while (!powerof2(qsize))
3785ddf09ad6SNavdeep Parhar 		qsize++;
3786ddf09ad6SNavdeep Parhar 	fthresh = ilog2(qsize);
3787ddf09ad6SNavdeep Parhar 	if (fthresh > X_CIDXFLUSHTHRESH_128)
3788ddf09ad6SNavdeep Parhar 		fthresh = X_CIDXFLUSHTHRESH_128;
3789ddf09ad6SNavdeep Parhar 
3790ddf09ad6SNavdeep Parhar 	return (fthresh);
3791ddf09ad6SNavdeep Parhar }
3792ddf09ad6SNavdeep Parhar 
3793733b9277SNavdeep Parhar static int
3794733b9277SNavdeep Parhar ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
3795733b9277SNavdeep Parhar {
3796733b9277SNavdeep Parhar 	int rc, cntxt_id;
3797733b9277SNavdeep Parhar 	struct fw_eq_ctrl_cmd c;
379890e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
3799f7dfe243SNavdeep Parhar 
3800f7dfe243SNavdeep Parhar 	bzero(&c, sizeof(c));
3801f7dfe243SNavdeep Parhar 
3802f7dfe243SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
3803f7dfe243SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
3804f7dfe243SNavdeep Parhar 	    V_FW_EQ_CTRL_CMD_VFN(0));
3805f7dfe243SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
3806f7dfe243SNavdeep Parhar 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
38077951040fSNavdeep Parhar 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
3808f7dfe243SNavdeep Parhar 	c.physeqid_pkd = htobe32(0);
3809f7dfe243SNavdeep Parhar 	c.fetchszm_to_iqid =
381087b027baSNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
3811733b9277SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
381256599263SNavdeep Parhar 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
3813f7dfe243SNavdeep Parhar 	c.dcaen_to_eqsize =
3814adb0cd84SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3815adb0cd84SNavdeep Parhar 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
3816f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
3817ddf09ad6SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
38187951040fSNavdeep Parhar 		V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
3819f7dfe243SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
3820f7dfe243SNavdeep Parhar 
3821f7dfe243SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3822f7dfe243SNavdeep Parhar 	if (rc != 0) {
3823f7dfe243SNavdeep Parhar 		device_printf(sc->dev,
3824733b9277SNavdeep Parhar 		    "failed to create control queue %d: %d\n", eq->tx_chan, rc);
3825f7dfe243SNavdeep Parhar 		return (rc);
3826f7dfe243SNavdeep Parhar 	}
3827733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3828f7dfe243SNavdeep Parhar 
3829f7dfe243SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
3830f7dfe243SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3831733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3832733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3833733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
3834f7dfe243SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
3835f7dfe243SNavdeep Parhar 
3836f7dfe243SNavdeep Parhar 	return (rc);
3837f7dfe243SNavdeep Parhar }
3838f7dfe243SNavdeep Parhar 
3839f7dfe243SNavdeep Parhar static int
3840fe2ebb76SJohn Baldwin eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
384154e4ee71SNavdeep Parhar {
384254e4ee71SNavdeep Parhar 	int rc, cntxt_id;
384354e4ee71SNavdeep Parhar 	struct fw_eq_eth_cmd c;
384490e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
384554e4ee71SNavdeep Parhar 
384654e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
384754e4ee71SNavdeep Parhar 
384854e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
384954e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
385054e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_VFN(0));
385154e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
385254e4ee71SNavdeep Parhar 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
38537951040fSNavdeep Parhar 	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
3854fe2ebb76SJohn Baldwin 	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
385554e4ee71SNavdeep Parhar 	c.fetchszm_to_iqid =
38567951040fSNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
3857733b9277SNavdeep Parhar 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
3858aa2457e1SNavdeep Parhar 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
3859adb0cd84SNavdeep Parhar 	c.dcaen_to_eqsize =
3860adb0cd84SNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3861adb0cd84SNavdeep Parhar 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
386254e4ee71SNavdeep Parhar 		V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
38637951040fSNavdeep Parhar 		V_FW_EQ_ETH_CMD_EQSIZE(qsize));
386454e4ee71SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
386554e4ee71SNavdeep Parhar 
386654e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
386754e4ee71SNavdeep Parhar 	if (rc != 0) {
3868fe2ebb76SJohn Baldwin 		device_printf(vi->dev,
3869733b9277SNavdeep Parhar 		    "failed to create Ethernet egress queue: %d\n", rc);
3870733b9277SNavdeep Parhar 		return (rc);
3871733b9277SNavdeep Parhar 	}
3872733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3873733b9277SNavdeep Parhar 
3874733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
3875ec55567cSJohn Baldwin 	eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
3876733b9277SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3877733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3878733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3879733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
3880733b9277SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
3881733b9277SNavdeep Parhar 
388254e4ee71SNavdeep Parhar 	return (rc);
388354e4ee71SNavdeep Parhar }
388454e4ee71SNavdeep Parhar 
3885eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3886733b9277SNavdeep Parhar static int
3887fe2ebb76SJohn Baldwin ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
3888733b9277SNavdeep Parhar {
3889733b9277SNavdeep Parhar 	int rc, cntxt_id;
3890733b9277SNavdeep Parhar 	struct fw_eq_ofld_cmd c;
389190e7434aSNavdeep Parhar 	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
389254e4ee71SNavdeep Parhar 
3893733b9277SNavdeep Parhar 	bzero(&c, sizeof(c));
3894733b9277SNavdeep Parhar 
3895733b9277SNavdeep Parhar 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
3896733b9277SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
3897733b9277SNavdeep Parhar 	    V_FW_EQ_OFLD_CMD_VFN(0));
3898733b9277SNavdeep Parhar 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
3899733b9277SNavdeep Parhar 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
3900733b9277SNavdeep Parhar 	c.fetchszm_to_iqid =
3901ddf09ad6SNavdeep Parhar 		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
3902733b9277SNavdeep Parhar 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
3903733b9277SNavdeep Parhar 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
3904733b9277SNavdeep Parhar 	c.dcaen_to_eqsize =
3905adb0cd84SNavdeep Parhar 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3906adb0cd84SNavdeep Parhar 		X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
3907733b9277SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
3908ddf09ad6SNavdeep Parhar 		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
39097951040fSNavdeep Parhar 		V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
3910733b9277SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
3911733b9277SNavdeep Parhar 
3912733b9277SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3913733b9277SNavdeep Parhar 	if (rc != 0) {
3914fe2ebb76SJohn Baldwin 		device_printf(vi->dev,
3915733b9277SNavdeep Parhar 		    "failed to create egress queue for TCP offload: %d\n", rc);
3916733b9277SNavdeep Parhar 		return (rc);
3917733b9277SNavdeep Parhar 	}
3918733b9277SNavdeep Parhar 	eq->flags |= EQ_ALLOCATED;
3919733b9277SNavdeep Parhar 
3920733b9277SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
392154e4ee71SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
3922733b9277SNavdeep Parhar 	if (cntxt_id >= sc->sge.neq)
3923733b9277SNavdeep Parhar 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
3924733b9277SNavdeep Parhar 		cntxt_id, sc->sge.neq - 1);
392554e4ee71SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
392654e4ee71SNavdeep Parhar 
3927733b9277SNavdeep Parhar 	return (rc);
3928733b9277SNavdeep Parhar }
3929733b9277SNavdeep Parhar #endif
3930733b9277SNavdeep Parhar 
3931733b9277SNavdeep Parhar static int
3932fe2ebb76SJohn Baldwin alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
3933733b9277SNavdeep Parhar {
39347951040fSNavdeep Parhar 	int rc, qsize;
3935733b9277SNavdeep Parhar 	size_t len;
3936733b9277SNavdeep Parhar 
3937733b9277SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
3938733b9277SNavdeep Parhar 
393990e7434aSNavdeep Parhar 	qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
39407951040fSNavdeep Parhar 	len = qsize * EQ_ESIZE;
3941733b9277SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
3942733b9277SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
3943733b9277SNavdeep Parhar 	if (rc)
3944733b9277SNavdeep Parhar 		return (rc);
3945733b9277SNavdeep Parhar 
3946ddf09ad6SNavdeep Parhar 	eq->pidx = eq->cidx = eq->dbidx = 0;
3947ddf09ad6SNavdeep Parhar 	/* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */
3948ddf09ad6SNavdeep Parhar 	eq->equeqidx = 0;
3949d14b0ac1SNavdeep Parhar 	eq->doorbells = sc->doorbells;
3950733b9277SNavdeep Parhar 
3951733b9277SNavdeep Parhar 	switch (eq->flags & EQ_TYPEMASK) {
3952733b9277SNavdeep Parhar 	case EQ_CTRL:
3953733b9277SNavdeep Parhar 		rc = ctrl_eq_alloc(sc, eq);
3954733b9277SNavdeep Parhar 		break;
3955733b9277SNavdeep Parhar 
3956733b9277SNavdeep Parhar 	case EQ_ETH:
3957fe2ebb76SJohn Baldwin 		rc = eth_eq_alloc(sc, vi, eq);
3958733b9277SNavdeep Parhar 		break;
3959733b9277SNavdeep Parhar 
3960eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3961733b9277SNavdeep Parhar 	case EQ_OFLD:
3962fe2ebb76SJohn Baldwin 		rc = ofld_eq_alloc(sc, vi, eq);
3963733b9277SNavdeep Parhar 		break;
3964733b9277SNavdeep Parhar #endif
3965733b9277SNavdeep Parhar 
3966733b9277SNavdeep Parhar 	default:
3967733b9277SNavdeep Parhar 		panic("%s: invalid eq type %d.", __func__,
3968733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK);
3969733b9277SNavdeep Parhar 	}
3970733b9277SNavdeep Parhar 	if (rc != 0) {
3971733b9277SNavdeep Parhar 		device_printf(sc->dev,
3972c086e3d1SNavdeep Parhar 		    "failed to allocate egress queue(%d): %d\n",
3973733b9277SNavdeep Parhar 		    eq->flags & EQ_TYPEMASK, rc);
3974733b9277SNavdeep Parhar 	}
3975733b9277SNavdeep Parhar 
3976d14b0ac1SNavdeep Parhar 	if (isset(&eq->doorbells, DOORBELL_UDB) ||
3977d14b0ac1SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
397877ad3c41SNavdeep Parhar 	    isset(&eq->doorbells, DOORBELL_WCWR)) {
397990e7434aSNavdeep Parhar 		uint32_t s_qpp = sc->params.sge.eq_s_qpp;
3980d14b0ac1SNavdeep Parhar 		uint32_t mask = (1 << s_qpp) - 1;
3981d14b0ac1SNavdeep Parhar 		volatile uint8_t *udb;
3982d14b0ac1SNavdeep Parhar 
3983d14b0ac1SNavdeep Parhar 		udb = sc->udbs_base + UDBS_DB_OFFSET;
3984d14b0ac1SNavdeep Parhar 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
3985d14b0ac1SNavdeep Parhar 		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
3986f10405b3SNavdeep Parhar 		if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
398777ad3c41SNavdeep Parhar 	    		clrbit(&eq->doorbells, DOORBELL_WCWR);
3988d14b0ac1SNavdeep Parhar 		else {
3989d14b0ac1SNavdeep Parhar 			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
3990d14b0ac1SNavdeep Parhar 			eq->udb_qid = 0;
3991d14b0ac1SNavdeep Parhar 		}
3992d14b0ac1SNavdeep Parhar 		eq->udb = (volatile void *)udb;
3993d14b0ac1SNavdeep Parhar 	}
3994d14b0ac1SNavdeep Parhar 
3995733b9277SNavdeep Parhar 	return (rc);
3996733b9277SNavdeep Parhar }
3997733b9277SNavdeep Parhar 
3998733b9277SNavdeep Parhar static int
3999733b9277SNavdeep Parhar free_eq(struct adapter *sc, struct sge_eq *eq)
4000733b9277SNavdeep Parhar {
4001733b9277SNavdeep Parhar 	int rc;
4002733b9277SNavdeep Parhar 
4003733b9277SNavdeep Parhar 	if (eq->flags & EQ_ALLOCATED) {
4004733b9277SNavdeep Parhar 		switch (eq->flags & EQ_TYPEMASK) {
4005733b9277SNavdeep Parhar 		case EQ_CTRL:
4006733b9277SNavdeep Parhar 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
4007733b9277SNavdeep Parhar 			    eq->cntxt_id);
4008733b9277SNavdeep Parhar 			break;
4009733b9277SNavdeep Parhar 
4010733b9277SNavdeep Parhar 		case EQ_ETH:
4011733b9277SNavdeep Parhar 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
4012733b9277SNavdeep Parhar 			    eq->cntxt_id);
4013733b9277SNavdeep Parhar 			break;
4014733b9277SNavdeep Parhar 
4015eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4016733b9277SNavdeep Parhar 		case EQ_OFLD:
4017733b9277SNavdeep Parhar 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
4018733b9277SNavdeep Parhar 			    eq->cntxt_id);
4019733b9277SNavdeep Parhar 			break;
4020733b9277SNavdeep Parhar #endif
4021733b9277SNavdeep Parhar 
4022733b9277SNavdeep Parhar 		default:
4023733b9277SNavdeep Parhar 			panic("%s: invalid eq type %d.", __func__,
4024733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK);
4025733b9277SNavdeep Parhar 		}
4026733b9277SNavdeep Parhar 		if (rc != 0) {
4027733b9277SNavdeep Parhar 			device_printf(sc->dev,
4028733b9277SNavdeep Parhar 			    "failed to free egress queue (%d): %d\n",
4029733b9277SNavdeep Parhar 			    eq->flags & EQ_TYPEMASK, rc);
4030733b9277SNavdeep Parhar 			return (rc);
4031733b9277SNavdeep Parhar 		}
4032733b9277SNavdeep Parhar 		eq->flags &= ~EQ_ALLOCATED;
4033733b9277SNavdeep Parhar 	}
4034733b9277SNavdeep Parhar 
4035733b9277SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
4036733b9277SNavdeep Parhar 
4037733b9277SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
4038733b9277SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
4039733b9277SNavdeep Parhar 
4040733b9277SNavdeep Parhar 	bzero(eq, sizeof(*eq));
4041733b9277SNavdeep Parhar 	return (0);
4042733b9277SNavdeep Parhar }
4043733b9277SNavdeep Parhar 
4044733b9277SNavdeep Parhar static int
4045fe2ebb76SJohn Baldwin alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
4046733b9277SNavdeep Parhar     struct sysctl_oid *oid)
4047733b9277SNavdeep Parhar {
4048733b9277SNavdeep Parhar 	int rc;
4049fe2ebb76SJohn Baldwin 	struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx;
4050733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
4051733b9277SNavdeep Parhar 
4052fe2ebb76SJohn Baldwin 	rc = alloc_eq(sc, vi, &wrq->eq);
4053733b9277SNavdeep Parhar 	if (rc)
4054733b9277SNavdeep Parhar 		return (rc);
4055733b9277SNavdeep Parhar 
4056733b9277SNavdeep Parhar 	wrq->adapter = sc;
40577951040fSNavdeep Parhar 	TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
40587951040fSNavdeep Parhar 	TAILQ_INIT(&wrq->incomplete_wrs);
405909fe6320SNavdeep Parhar 	STAILQ_INIT(&wrq->wr_list);
40607951040fSNavdeep Parhar 	wrq->nwr_pending = 0;
40617951040fSNavdeep Parhar 	wrq->ndesc_needed = 0;
4062733b9277SNavdeep Parhar 
4063aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
4064aa93b99aSNavdeep Parhar 	    &wrq->eq.ba, "bus address of descriptor ring");
4065aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
4066aa93b99aSNavdeep Parhar 	    wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len,
4067aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
4068733b9277SNavdeep Parhar 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
4069733b9277SNavdeep Parhar 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
4070733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
40717029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.cidx, 0,
40727029da5cSPawel Biernacki 	    sysctl_uint16, "I", "consumer index");
4073733b9277SNavdeep Parhar 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
40747029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.pidx, 0,
40757029da5cSPawel Biernacki 	    sysctl_uint16, "I", "producer index");
4076aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
4077aa93b99aSNavdeep Parhar 	    wrq->eq.sidx, "status page index");
40787951040fSNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
40797951040fSNavdeep Parhar 	    &wrq->tx_wrs_direct, "# of work requests (direct)");
40807951040fSNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
40817951040fSNavdeep Parhar 	    &wrq->tx_wrs_copied, "# of work requests (copied)");
40820459a175SNavdeep Parhar 	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
40830459a175SNavdeep Parhar 	    &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
4084733b9277SNavdeep Parhar 
4085733b9277SNavdeep Parhar 	return (rc);
4086733b9277SNavdeep Parhar }
4087733b9277SNavdeep Parhar 
4088733b9277SNavdeep Parhar static int
4089733b9277SNavdeep Parhar free_wrq(struct adapter *sc, struct sge_wrq *wrq)
4090733b9277SNavdeep Parhar {
4091733b9277SNavdeep Parhar 	int rc;
4092733b9277SNavdeep Parhar 
4093733b9277SNavdeep Parhar 	rc = free_eq(sc, &wrq->eq);
4094733b9277SNavdeep Parhar 	if (rc)
4095733b9277SNavdeep Parhar 		return (rc);
4096733b9277SNavdeep Parhar 
4097733b9277SNavdeep Parhar 	bzero(wrq, sizeof(*wrq));
4098733b9277SNavdeep Parhar 	return (0);
4099733b9277SNavdeep Parhar }
4100733b9277SNavdeep Parhar 
4101733b9277SNavdeep Parhar static int
4102fe2ebb76SJohn Baldwin alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
4103733b9277SNavdeep Parhar     struct sysctl_oid *oid)
4104733b9277SNavdeep Parhar {
4105733b9277SNavdeep Parhar 	int rc;
4106fe2ebb76SJohn Baldwin 	struct port_info *pi = vi->pi;
4107733b9277SNavdeep Parhar 	struct adapter *sc = pi->adapter;
4108733b9277SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
4109733b9277SNavdeep Parhar 	char name[16];
4110733b9277SNavdeep Parhar 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
4111733b9277SNavdeep Parhar 
41127951040fSNavdeep Parhar 	rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx,
41137951040fSNavdeep Parhar 	    M_CXGBE, M_WAITOK);
41147951040fSNavdeep Parhar 	if (rc != 0) {
41157951040fSNavdeep Parhar 		device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc);
41167951040fSNavdeep Parhar 		return (rc);
41177951040fSNavdeep Parhar 	}
41187951040fSNavdeep Parhar 
4119fe2ebb76SJohn Baldwin 	rc = alloc_eq(sc, vi, eq);
41207951040fSNavdeep Parhar 	if (rc != 0) {
41217951040fSNavdeep Parhar 		mp_ring_free(txq->r);
41227951040fSNavdeep Parhar 		txq->r = NULL;
4123733b9277SNavdeep Parhar 		return (rc);
41247951040fSNavdeep Parhar 	}
4125733b9277SNavdeep Parhar 
41267951040fSNavdeep Parhar 	/* Can't fail after this point. */
41277951040fSNavdeep Parhar 
4128ec55567cSJohn Baldwin 	if (idx == 0)
4129ec55567cSJohn Baldwin 		sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
4130ec55567cSJohn Baldwin 	else
4131ec55567cSJohn Baldwin 		KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
4132ec55567cSJohn Baldwin 		    ("eq_base mismatch"));
4133ec55567cSJohn Baldwin 	KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
4134ec55567cSJohn Baldwin 	    ("PF with non-zero eq_base"));
4135ec55567cSJohn Baldwin 
41367951040fSNavdeep Parhar 	TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
4137fe2ebb76SJohn Baldwin 	txq->ifp = vi->ifp;
41387951040fSNavdeep Parhar 	txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
41396af45170SJohn Baldwin 	if (sc->flags & IS_VF)
41406af45170SJohn Baldwin 		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
41416af45170SJohn Baldwin 		    V_TXPKT_INTF(pi->tx_chan));
41426af45170SJohn Baldwin 	else
4143c0236bd9SNavdeep Parhar 		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
4144edb518f4SNavdeep Parhar 		    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
4145edb518f4SNavdeep Parhar 		    V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
414602f972e8SNavdeep Parhar 	txq->tc_idx = -1;
41477951040fSNavdeep Parhar 	txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
4148733b9277SNavdeep Parhar 	    M_ZERO | M_WAITOK);
414954e4ee71SNavdeep Parhar 
415054e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
41517029da5cSPawel Biernacki 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
41527029da5cSPawel Biernacki 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queue");
415354e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
415454e4ee71SNavdeep Parhar 
4155aa93b99aSNavdeep Parhar 	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
4156aa93b99aSNavdeep Parhar 	    &eq->ba, "bus address of descriptor ring");
4157aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
4158aa93b99aSNavdeep Parhar 	    eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
4159aa93b99aSNavdeep Parhar 	    "desc ring size in bytes");
4160ec55567cSJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
4161ec55567cSJohn Baldwin 	    &eq->abs_id, 0, "absolute id of the queue");
4162fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
416359bc8ce0SNavdeep Parhar 	    &eq->cntxt_id, 0, "SGE context id of the queue");
4164fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
41657029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->cidx, 0,
41667029da5cSPawel Biernacki 	    sysctl_uint16, "I", "consumer index");
4167fe2ebb76SJohn Baldwin 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
41687029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->pidx, 0,
41697029da5cSPawel Biernacki 	    sysctl_uint16, "I", "producer index");
4170aa93b99aSNavdeep Parhar 	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
4171aa93b99aSNavdeep Parhar 	    eq->sidx, "status page index");
417259bc8ce0SNavdeep Parhar 
417302f972e8SNavdeep Parhar 	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc",
41747029da5cSPawel Biernacki 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, idx, sysctl_tc,
41757029da5cSPawel Biernacki 	    "I", "traffic class (-1 means none)");
417602f972e8SNavdeep Parhar 
4177fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
417854e4ee71SNavdeep Parhar 	    &txq->txcsum, "# of times hardware assisted with checksum");
4179fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion",
418054e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &txq->vlan_insertion,
418154e4ee71SNavdeep Parhar 	    "# of times hardware inserted 802.1Q tag");
4182fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
4183a1ea9a82SNavdeep Parhar 	    &txq->tso_wrs, "# of TSO work requests");
4184fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
418554e4ee71SNavdeep Parhar 	    &txq->imm_wrs, "# of work requests with immediate data");
4186fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
418754e4ee71SNavdeep Parhar 	    &txq->sgl_wrs, "# of work requests with direct SGL");
4188fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
418954e4ee71SNavdeep Parhar 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
4190fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs",
41917951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts0_wrs,
41927951040fSNavdeep Parhar 	    "# of txpkts (type 0) work requests");
4193fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs",
41947951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts1_wrs,
41957951040fSNavdeep Parhar 	    "# of txpkts (type 1) work requests");
4196fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts",
41977951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts0_pkts,
41987951040fSNavdeep Parhar 	    "# of frames tx'd using type0 txpkts work requests");
4199fe2ebb76SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts",
42007951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->txpkts1_pkts,
42017951040fSNavdeep Parhar 	    "# of frames tx'd using type1 txpkts work requests");
42025cdaef71SJohn Baldwin 	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD,
42035cdaef71SJohn Baldwin 	    &txq->raw_wrs, "# of raw work requests (non-packets)");
4204bddf7343SJohn Baldwin 
4205bddf7343SJohn Baldwin #ifdef KERN_TLS
4206bddf7343SJohn Baldwin 	if (sc->flags & KERN_TLS_OK) {
4207bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4208bddf7343SJohn Baldwin 		    "kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records,
4209bddf7343SJohn Baldwin 		    "# of NIC TLS records transmitted");
4210bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4211bddf7343SJohn Baldwin 		    "kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short,
4212bddf7343SJohn Baldwin 		    "# of short NIC TLS records transmitted");
4213bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4214bddf7343SJohn Baldwin 		    "kern_tls_partial", CTLFLAG_RD, &txq->kern_tls_partial,
4215bddf7343SJohn Baldwin 		    "# of partial NIC TLS records transmitted");
4216bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4217bddf7343SJohn Baldwin 		    "kern_tls_full", CTLFLAG_RD, &txq->kern_tls_full,
4218bddf7343SJohn Baldwin 		    "# of full NIC TLS records transmitted");
4219bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4220bddf7343SJohn Baldwin 		    "kern_tls_octets", CTLFLAG_RD, &txq->kern_tls_octets,
4221bddf7343SJohn Baldwin 		    "# of payload octets in transmitted NIC TLS records");
4222bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4223bddf7343SJohn Baldwin 		    "kern_tls_waste", CTLFLAG_RD, &txq->kern_tls_waste,
4224bddf7343SJohn Baldwin 		    "# of octets DMAd but not transmitted in NIC TLS records");
4225bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4226bddf7343SJohn Baldwin 		    "kern_tls_options", CTLFLAG_RD, &txq->kern_tls_options,
4227bddf7343SJohn Baldwin 		    "# of NIC TLS options-only packets transmitted");
4228bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4229bddf7343SJohn Baldwin 		    "kern_tls_header", CTLFLAG_RD, &txq->kern_tls_header,
4230bddf7343SJohn Baldwin 		    "# of NIC TLS header-only packets transmitted");
4231bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4232bddf7343SJohn Baldwin 		    "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin,
4233bddf7343SJohn Baldwin 		    "# of NIC TLS FIN-only packets transmitted");
4234bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4235bddf7343SJohn Baldwin 		    "kern_tls_fin_short", CTLFLAG_RD, &txq->kern_tls_fin_short,
4236bddf7343SJohn Baldwin 		    "# of NIC TLS padded FIN packets on short TLS records");
4237bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4238bddf7343SJohn Baldwin 		    "kern_tls_cbc", CTLFLAG_RD, &txq->kern_tls_cbc,
4239bddf7343SJohn Baldwin 		    "# of NIC TLS sessions using AES-CBC");
4240bddf7343SJohn Baldwin 		SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO,
4241bddf7343SJohn Baldwin 		    "kern_tls_gcm", CTLFLAG_RD, &txq->kern_tls_gcm,
4242bddf7343SJohn Baldwin 		    "# of NIC TLS sessions using AES-GCM");
4243bddf7343SJohn Baldwin 	}
4244bddf7343SJohn Baldwin #endif
424554e4ee71SNavdeep Parhar 
4246fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues",
42477951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->enqueues,
42487951040fSNavdeep Parhar 	    "# of enqueues to the mp_ring for this queue");
4249fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops",
42507951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->drops,
42517951040fSNavdeep Parhar 	    "# of drops in the mp_ring for this queue");
4252fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts",
42537951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->starts,
42547951040fSNavdeep Parhar 	    "# of normal consumer starts in the mp_ring for this queue");
4255fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls",
42567951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->stalls,
42577951040fSNavdeep Parhar 	    "# of consumer stalls in the mp_ring for this queue");
4258fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts",
42597951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->restarts,
42607951040fSNavdeep Parhar 	    "# of consumer restarts in the mp_ring for this queue");
4261fe2ebb76SJohn Baldwin 	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications",
42627951040fSNavdeep Parhar 	    CTLFLAG_RD, &txq->r->abdications,
42637951040fSNavdeep Parhar 	    "# of consumer abdications in the mp_ring for this queue");
426454e4ee71SNavdeep Parhar 
42657951040fSNavdeep Parhar 	return (0);
426654e4ee71SNavdeep Parhar }
426754e4ee71SNavdeep Parhar 
426854e4ee71SNavdeep Parhar static int
4269fe2ebb76SJohn Baldwin free_txq(struct vi_info *vi, struct sge_txq *txq)
427054e4ee71SNavdeep Parhar {
427154e4ee71SNavdeep Parhar 	int rc;
4272*7c228be3SNavdeep Parhar 	struct adapter *sc = vi->adapter;
427354e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
427454e4ee71SNavdeep Parhar 
4275733b9277SNavdeep Parhar 	rc = free_eq(sc, eq);
4276733b9277SNavdeep Parhar 	if (rc)
427754e4ee71SNavdeep Parhar 		return (rc);
427854e4ee71SNavdeep Parhar 
42797951040fSNavdeep Parhar 	sglist_free(txq->gl);
4280f7dfe243SNavdeep Parhar 	free(txq->sdesc, M_CXGBE);
42817951040fSNavdeep Parhar 	mp_ring_free(txq->r);
428254e4ee71SNavdeep Parhar 
428354e4ee71SNavdeep Parhar 	bzero(txq, sizeof(*txq));
428454e4ee71SNavdeep Parhar 	return (0);
428554e4ee71SNavdeep Parhar }
428654e4ee71SNavdeep Parhar 
428754e4ee71SNavdeep Parhar static void
428854e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
428954e4ee71SNavdeep Parhar {
429054e4ee71SNavdeep Parhar 	bus_addr_t *ba = arg;
429154e4ee71SNavdeep Parhar 
429254e4ee71SNavdeep Parhar 	KASSERT(nseg == 1,
429354e4ee71SNavdeep Parhar 	    ("%s meant for single segment mappings only.", __func__));
429454e4ee71SNavdeep Parhar 
429554e4ee71SNavdeep Parhar 	*ba = error ? 0 : segs->ds_addr;
429654e4ee71SNavdeep Parhar }
429754e4ee71SNavdeep Parhar 
429854e4ee71SNavdeep Parhar static inline void
429954e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl)
430054e4ee71SNavdeep Parhar {
43014d6db4e0SNavdeep Parhar 	uint32_t n, v;
430254e4ee71SNavdeep Parhar 
430346e1e307SNavdeep Parhar 	n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx);
43044d6db4e0SNavdeep Parhar 	MPASS(n > 0);
4305d14b0ac1SNavdeep Parhar 
430654e4ee71SNavdeep Parhar 	wmb();
43074d6db4e0SNavdeep Parhar 	v = fl->dbval | V_PIDX(n);
43084d6db4e0SNavdeep Parhar 	if (fl->udb)
43094d6db4e0SNavdeep Parhar 		*fl->udb = htole32(v);
43104d6db4e0SNavdeep Parhar 	else
4311315048f2SJohn Baldwin 		t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
43124d6db4e0SNavdeep Parhar 	IDXINCR(fl->dbidx, n, fl->sidx);
431354e4ee71SNavdeep Parhar }
431454e4ee71SNavdeep Parhar 
4315fb12416cSNavdeep Parhar /*
43164d6db4e0SNavdeep Parhar  * Fills up the freelist by allocating up to 'n' buffers.  Buffers that are
43174d6db4e0SNavdeep Parhar  * recycled do not count towards this allocation budget.
4318733b9277SNavdeep Parhar  *
43194d6db4e0SNavdeep Parhar  * Returns non-zero to indicate that this freelist should be added to the list
43204d6db4e0SNavdeep Parhar  * of starving freelists.
4321fb12416cSNavdeep Parhar  */
4322733b9277SNavdeep Parhar static int
43234d6db4e0SNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
432454e4ee71SNavdeep Parhar {
43254d6db4e0SNavdeep Parhar 	__be64 *d;
43264d6db4e0SNavdeep Parhar 	struct fl_sdesc *sd;
432738035ed6SNavdeep Parhar 	uintptr_t pa;
432854e4ee71SNavdeep Parhar 	caddr_t cl;
432946e1e307SNavdeep Parhar 	struct rx_buf_info *rxb;
433038035ed6SNavdeep Parhar 	struct cluster_metadata *clm;
43314d6db4e0SNavdeep Parhar 	uint16_t max_pidx;
43324d6db4e0SNavdeep Parhar 	uint16_t hw_cidx = fl->hw_cidx;		/* stable snapshot */
433354e4ee71SNavdeep Parhar 
433454e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
433554e4ee71SNavdeep Parhar 
43364d6db4e0SNavdeep Parhar 	/*
4337453130d9SPedro F. Giffuni 	 * We always stop at the beginning of the hardware descriptor that's just
43384d6db4e0SNavdeep Parhar 	 * before the one with the hw cidx.  This is to avoid hw pidx = hw cidx,
43394d6db4e0SNavdeep Parhar 	 * which would mean an empty freelist to the chip.
43404d6db4e0SNavdeep Parhar 	 */
43414d6db4e0SNavdeep Parhar 	max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
43424d6db4e0SNavdeep Parhar 	if (fl->pidx == max_pidx * 8)
43434d6db4e0SNavdeep Parhar 		return (0);
434454e4ee71SNavdeep Parhar 
43454d6db4e0SNavdeep Parhar 	d = &fl->desc[fl->pidx];
43464d6db4e0SNavdeep Parhar 	sd = &fl->sdesc[fl->pidx];
43474d6db4e0SNavdeep Parhar 
43484d6db4e0SNavdeep Parhar 	while (n > 0) {
434954e4ee71SNavdeep Parhar 
435054e4ee71SNavdeep Parhar 		if (sd->cl != NULL) {
435154e4ee71SNavdeep Parhar 
4352c3fb7725SNavdeep Parhar 			if (sd->nmbuf == 0) {
435338035ed6SNavdeep Parhar 				/*
435438035ed6SNavdeep Parhar 				 * Fast recycle without involving any atomics on
435538035ed6SNavdeep Parhar 				 * the cluster's metadata (if the cluster has
435638035ed6SNavdeep Parhar 				 * metadata).  This happens when all frames
435738035ed6SNavdeep Parhar 				 * received in the cluster were small enough to
435838035ed6SNavdeep Parhar 				 * fit within a single mbuf each.
435938035ed6SNavdeep Parhar 				 */
436038035ed6SNavdeep Parhar 				fl->cl_fast_recycled++;
4361a9c4062aSNavdeep Parhar 				goto recycled;
436238035ed6SNavdeep Parhar 			}
436354e4ee71SNavdeep Parhar 
436438035ed6SNavdeep Parhar 			/*
436538035ed6SNavdeep Parhar 			 * Cluster is guaranteed to have metadata.  Clusters
436638035ed6SNavdeep Parhar 			 * without metadata always take the fast recycle path
436738035ed6SNavdeep Parhar 			 * when they're recycled.
436838035ed6SNavdeep Parhar 			 */
436946e1e307SNavdeep Parhar 			clm = cl_metadata(sd);
437038035ed6SNavdeep Parhar 			MPASS(clm != NULL);
43711458bff9SNavdeep Parhar 
437238035ed6SNavdeep Parhar 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
437338035ed6SNavdeep Parhar 				fl->cl_recycled++;
437482eff304SNavdeep Parhar 				counter_u64_add(extfree_rels, 1);
437554e4ee71SNavdeep Parhar 				goto recycled;
437654e4ee71SNavdeep Parhar 			}
43771458bff9SNavdeep Parhar 			sd->cl = NULL;	/* gave up my reference */
43781458bff9SNavdeep Parhar 		}
437938035ed6SNavdeep Parhar 		MPASS(sd->cl == NULL);
438046e1e307SNavdeep Parhar 		rxb = &sc->sge.rx_buf_info[fl->zidx];
438146e1e307SNavdeep Parhar 		cl = uma_zalloc(rxb->zone, M_NOWAIT);
43822b9010f0SNavdeep Parhar 		if (__predict_false(cl == NULL)) {
43832b9010f0SNavdeep Parhar 			if (fl->zidx != fl->safe_zidx) {
438446e1e307SNavdeep Parhar 				rxb = &sc->sge.rx_buf_info[fl->safe_zidx];
438546e1e307SNavdeep Parhar 				cl = uma_zalloc(rxb->zone, M_NOWAIT);
43862b9010f0SNavdeep Parhar 			}
43872b9010f0SNavdeep Parhar 			if (cl == NULL)
438854e4ee71SNavdeep Parhar 				break;
438954e4ee71SNavdeep Parhar 		}
439038035ed6SNavdeep Parhar 		fl->cl_allocated++;
43914d6db4e0SNavdeep Parhar 		n--;
439254e4ee71SNavdeep Parhar 
439338035ed6SNavdeep Parhar 		pa = pmap_kextract((vm_offset_t)cl);
439454e4ee71SNavdeep Parhar 		sd->cl = cl;
439546e1e307SNavdeep Parhar 		sd->zidx = fl->zidx;
439646e1e307SNavdeep Parhar 
439746e1e307SNavdeep Parhar 		if (fl->flags & FL_BUF_PACKING) {
439846e1e307SNavdeep Parhar 			*d = htobe64(pa | rxb->hwidx2);
439946e1e307SNavdeep Parhar 			sd->moff = rxb->size2;
440046e1e307SNavdeep Parhar 		} else {
440146e1e307SNavdeep Parhar 			*d = htobe64(pa | rxb->hwidx1);
440246e1e307SNavdeep Parhar 			sd->moff = 0;
440346e1e307SNavdeep Parhar 		}
44047d29df59SNavdeep Parhar recycled:
4405c3fb7725SNavdeep Parhar 		sd->nmbuf = 0;
440638035ed6SNavdeep Parhar 		d++;
440754e4ee71SNavdeep Parhar 		sd++;
440846e1e307SNavdeep Parhar 		if (__predict_false((++fl->pidx & 7) == 0)) {
440946e1e307SNavdeep Parhar 			uint16_t pidx = fl->pidx >> 3;
44104d6db4e0SNavdeep Parhar 
44114d6db4e0SNavdeep Parhar 			if (__predict_false(pidx == fl->sidx)) {
441254e4ee71SNavdeep Parhar 				fl->pidx = 0;
44134d6db4e0SNavdeep Parhar 				pidx = 0;
441454e4ee71SNavdeep Parhar 				sd = fl->sdesc;
441554e4ee71SNavdeep Parhar 				d = fl->desc;
441654e4ee71SNavdeep Parhar 			}
441746e1e307SNavdeep Parhar 			if (n < 8 || pidx == max_pidx)
44184d6db4e0SNavdeep Parhar 				break;
44194d6db4e0SNavdeep Parhar 
44204d6db4e0SNavdeep Parhar 			if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
44214d6db4e0SNavdeep Parhar 				ring_fl_db(sc, fl);
44224d6db4e0SNavdeep Parhar 		}
442354e4ee71SNavdeep Parhar 	}
4424fb12416cSNavdeep Parhar 
442546e1e307SNavdeep Parhar 	if ((fl->pidx >> 3) != fl->dbidx)
4426fb12416cSNavdeep Parhar 		ring_fl_db(sc, fl);
4427733b9277SNavdeep Parhar 
4428733b9277SNavdeep Parhar 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
4429733b9277SNavdeep Parhar }
4430733b9277SNavdeep Parhar 
4431733b9277SNavdeep Parhar /*
4432733b9277SNavdeep Parhar  * Attempt to refill all starving freelists.
4433733b9277SNavdeep Parhar  */
4434733b9277SNavdeep Parhar static void
4435733b9277SNavdeep Parhar refill_sfl(void *arg)
4436733b9277SNavdeep Parhar {
4437733b9277SNavdeep Parhar 	struct adapter *sc = arg;
4438733b9277SNavdeep Parhar 	struct sge_fl *fl, *fl_temp;
4439733b9277SNavdeep Parhar 
4440fe2ebb76SJohn Baldwin 	mtx_assert(&sc->sfl_lock, MA_OWNED);
4441733b9277SNavdeep Parhar 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
4442733b9277SNavdeep Parhar 		FL_LOCK(fl);
4443733b9277SNavdeep Parhar 		refill_fl(sc, fl, 64);
4444733b9277SNavdeep Parhar 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
4445733b9277SNavdeep Parhar 			TAILQ_REMOVE(&sc->sfl, fl, link);
4446733b9277SNavdeep Parhar 			fl->flags &= ~FL_STARVING;
4447733b9277SNavdeep Parhar 		}
4448733b9277SNavdeep Parhar 		FL_UNLOCK(fl);
4449733b9277SNavdeep Parhar 	}
4450733b9277SNavdeep Parhar 
4451733b9277SNavdeep Parhar 	if (!TAILQ_EMPTY(&sc->sfl))
4452733b9277SNavdeep Parhar 		callout_schedule(&sc->sfl_callout, hz / 5);
445354e4ee71SNavdeep Parhar }
445454e4ee71SNavdeep Parhar 
445554e4ee71SNavdeep Parhar static int
445654e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl)
445754e4ee71SNavdeep Parhar {
445854e4ee71SNavdeep Parhar 
44594d6db4e0SNavdeep Parhar 	fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE,
446054e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
446154e4ee71SNavdeep Parhar 
446254e4ee71SNavdeep Parhar 	return (0);
446354e4ee71SNavdeep Parhar }
446454e4ee71SNavdeep Parhar 
446554e4ee71SNavdeep Parhar static void
44661458bff9SNavdeep Parhar free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
446754e4ee71SNavdeep Parhar {
446854e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
446938035ed6SNavdeep Parhar 	struct cluster_metadata *clm;
447054e4ee71SNavdeep Parhar 	int i;
447154e4ee71SNavdeep Parhar 
447254e4ee71SNavdeep Parhar 	sd = fl->sdesc;
44734d6db4e0SNavdeep Parhar 	for (i = 0; i < fl->sidx * 8; i++, sd++) {
447438035ed6SNavdeep Parhar 		if (sd->cl == NULL)
447538035ed6SNavdeep Parhar 			continue;
447654e4ee71SNavdeep Parhar 
447782eff304SNavdeep Parhar 		if (sd->nmbuf == 0)
447846e1e307SNavdeep Parhar 			uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl);
447946e1e307SNavdeep Parhar 		else if (fl->flags & FL_BUF_PACKING) {
448046e1e307SNavdeep Parhar 			clm = cl_metadata(sd);
448146e1e307SNavdeep Parhar 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
448246e1e307SNavdeep Parhar 				uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone,
448346e1e307SNavdeep Parhar 				    sd->cl);
448482eff304SNavdeep Parhar 				counter_u64_add(extfree_rels, 1);
448554e4ee71SNavdeep Parhar 			}
448646e1e307SNavdeep Parhar 		}
448738035ed6SNavdeep Parhar 		sd->cl = NULL;
448854e4ee71SNavdeep Parhar 	}
448954e4ee71SNavdeep Parhar 
449054e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
449154e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
449254e4ee71SNavdeep Parhar }
449354e4ee71SNavdeep Parhar 
44947951040fSNavdeep Parhar static inline void
44957951040fSNavdeep Parhar get_pkt_gl(struct mbuf *m, struct sglist *gl)
449654e4ee71SNavdeep Parhar {
44977951040fSNavdeep Parhar 	int rc;
449854e4ee71SNavdeep Parhar 
44997951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m);
450054e4ee71SNavdeep Parhar 
45017951040fSNavdeep Parhar 	sglist_reset(gl);
45027951040fSNavdeep Parhar 	rc = sglist_append_mbuf(gl, m);
45037951040fSNavdeep Parhar 	if (__predict_false(rc != 0)) {
45047951040fSNavdeep Parhar 		panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
45057951040fSNavdeep Parhar 		    "with %d.", __func__, m, mbuf_nsegs(m), rc);
450654e4ee71SNavdeep Parhar 	}
450754e4ee71SNavdeep Parhar 
45087951040fSNavdeep Parhar 	KASSERT(gl->sg_nseg == mbuf_nsegs(m),
45097951040fSNavdeep Parhar 	    ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
45107951040fSNavdeep Parhar 	    mbuf_nsegs(m), gl->sg_nseg));
45117951040fSNavdeep Parhar 	KASSERT(gl->sg_nseg > 0 &&
45127951040fSNavdeep Parhar 	    gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS),
45137951040fSNavdeep Parhar 	    ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
45147951040fSNavdeep Parhar 		gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS));
451554e4ee71SNavdeep Parhar }
451654e4ee71SNavdeep Parhar 
451754e4ee71SNavdeep Parhar /*
45187951040fSNavdeep Parhar  * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
451954e4ee71SNavdeep Parhar  */
45207951040fSNavdeep Parhar static inline u_int
45217951040fSNavdeep Parhar txpkt_len16(u_int nsegs, u_int tso)
45227951040fSNavdeep Parhar {
45237951040fSNavdeep Parhar 	u_int n;
45247951040fSNavdeep Parhar 
45257951040fSNavdeep Parhar 	MPASS(nsegs > 0);
45267951040fSNavdeep Parhar 
45277951040fSNavdeep Parhar 	nsegs--; /* first segment is part of ulptx_sgl */
45287951040fSNavdeep Parhar 	n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) +
45297951040fSNavdeep Parhar 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
45307951040fSNavdeep Parhar 	if (tso)
45317951040fSNavdeep Parhar 		n += sizeof(struct cpl_tx_pkt_lso_core);
45327951040fSNavdeep Parhar 
45337951040fSNavdeep Parhar 	return (howmany(n, 16));
45347951040fSNavdeep Parhar }
453554e4ee71SNavdeep Parhar 
453654e4ee71SNavdeep Parhar /*
45376af45170SJohn Baldwin  * len16 for a txpkt_vm WR with a GL.  Includes the firmware work
45386af45170SJohn Baldwin  * request header.
45396af45170SJohn Baldwin  */
45406af45170SJohn Baldwin static inline u_int
45416af45170SJohn Baldwin txpkt_vm_len16(u_int nsegs, u_int tso)
45426af45170SJohn Baldwin {
45436af45170SJohn Baldwin 	u_int n;
45446af45170SJohn Baldwin 
45456af45170SJohn Baldwin 	MPASS(nsegs > 0);
45466af45170SJohn Baldwin 
45476af45170SJohn Baldwin 	nsegs--; /* first segment is part of ulptx_sgl */
45486af45170SJohn Baldwin 	n = sizeof(struct fw_eth_tx_pkt_vm_wr) +
45496af45170SJohn Baldwin 	    sizeof(struct cpl_tx_pkt_core) +
45506af45170SJohn Baldwin 	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
45516af45170SJohn Baldwin 	if (tso)
45526af45170SJohn Baldwin 		n += sizeof(struct cpl_tx_pkt_lso_core);
45536af45170SJohn Baldwin 
45546af45170SJohn Baldwin 	return (howmany(n, 16));
45556af45170SJohn Baldwin }
45566af45170SJohn Baldwin 
45576af45170SJohn Baldwin /*
45587951040fSNavdeep Parhar  * len16 for a txpkts type 0 WR with a GL.  Does not include the firmware work
45597951040fSNavdeep Parhar  * request header.
45607951040fSNavdeep Parhar  */
45617951040fSNavdeep Parhar static inline u_int
45627951040fSNavdeep Parhar txpkts0_len16(u_int nsegs)
45637951040fSNavdeep Parhar {
45647951040fSNavdeep Parhar 	u_int n;
45657951040fSNavdeep Parhar 
45667951040fSNavdeep Parhar 	MPASS(nsegs > 0);
45677951040fSNavdeep Parhar 
45687951040fSNavdeep Parhar 	nsegs--; /* first segment is part of ulptx_sgl */
45697951040fSNavdeep Parhar 	n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
45707951040fSNavdeep Parhar 	    sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
45717951040fSNavdeep Parhar 	    8 * ((3 * nsegs) / 2 + (nsegs & 1));
45727951040fSNavdeep Parhar 
45737951040fSNavdeep Parhar 	return (howmany(n, 16));
45747951040fSNavdeep Parhar }
45757951040fSNavdeep Parhar 
45767951040fSNavdeep Parhar /*
45777951040fSNavdeep Parhar  * len16 for a txpkts type 1 WR with a GL.  Does not include the firmware work
45787951040fSNavdeep Parhar  * request header.
45797951040fSNavdeep Parhar  */
45807951040fSNavdeep Parhar static inline u_int
45817951040fSNavdeep Parhar txpkts1_len16(void)
45827951040fSNavdeep Parhar {
45837951040fSNavdeep Parhar 	u_int n;
45847951040fSNavdeep Parhar 
45857951040fSNavdeep Parhar 	n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
45867951040fSNavdeep Parhar 
45877951040fSNavdeep Parhar 	return (howmany(n, 16));
45887951040fSNavdeep Parhar }
45897951040fSNavdeep Parhar 
45907951040fSNavdeep Parhar static inline u_int
45917951040fSNavdeep Parhar imm_payload(u_int ndesc)
45927951040fSNavdeep Parhar {
45937951040fSNavdeep Parhar 	u_int n;
45947951040fSNavdeep Parhar 
45957951040fSNavdeep Parhar 	n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
45967951040fSNavdeep Parhar 	    sizeof(struct cpl_tx_pkt_core);
45977951040fSNavdeep Parhar 
45987951040fSNavdeep Parhar 	return (n);
45997951040fSNavdeep Parhar }
46007951040fSNavdeep Parhar 
4601c0236bd9SNavdeep Parhar static inline uint64_t
4602c0236bd9SNavdeep Parhar csum_to_ctrl(struct adapter *sc, struct mbuf *m)
4603c0236bd9SNavdeep Parhar {
4604c0236bd9SNavdeep Parhar 	uint64_t ctrl;
4605c0236bd9SNavdeep Parhar 	int csum_type;
4606c0236bd9SNavdeep Parhar 
4607c0236bd9SNavdeep Parhar 	M_ASSERTPKTHDR(m);
4608c0236bd9SNavdeep Parhar 
4609c0236bd9SNavdeep Parhar 	if (needs_hwcsum(m) == 0)
4610c0236bd9SNavdeep Parhar 		return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
4611c0236bd9SNavdeep Parhar 
4612c0236bd9SNavdeep Parhar 	ctrl = 0;
4613c0236bd9SNavdeep Parhar 	if (needs_l3_csum(m) == 0)
4614c0236bd9SNavdeep Parhar 		ctrl |= F_TXPKT_IPCSUM_DIS;
4615c0236bd9SNavdeep Parhar 	switch (m->m_pkthdr.csum_flags &
4616c0236bd9SNavdeep Parhar 	    (CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) {
4617c0236bd9SNavdeep Parhar 	case CSUM_IP_TCP:
4618c0236bd9SNavdeep Parhar 		csum_type = TX_CSUM_TCPIP;
4619c0236bd9SNavdeep Parhar 		break;
4620c0236bd9SNavdeep Parhar 	case CSUM_IP_UDP:
4621c0236bd9SNavdeep Parhar 		csum_type = TX_CSUM_UDPIP;
4622c0236bd9SNavdeep Parhar 		break;
4623c0236bd9SNavdeep Parhar 	case CSUM_IP6_TCP:
4624c0236bd9SNavdeep Parhar 		csum_type = TX_CSUM_TCPIP6;
4625c0236bd9SNavdeep Parhar 		break;
4626c0236bd9SNavdeep Parhar 	case CSUM_IP6_UDP:
4627c0236bd9SNavdeep Parhar 		csum_type = TX_CSUM_UDPIP6;
4628c0236bd9SNavdeep Parhar 		break;
4629c0236bd9SNavdeep Parhar 	default:
4630c0236bd9SNavdeep Parhar 		/* needs_hwcsum told us that at least some hwcsum is needed. */
4631c0236bd9SNavdeep Parhar 		MPASS(ctrl == 0);
4632c0236bd9SNavdeep Parhar 		MPASS(m->m_pkthdr.csum_flags & CSUM_IP);
4633c0236bd9SNavdeep Parhar 		ctrl |= F_TXPKT_L4CSUM_DIS;
4634c0236bd9SNavdeep Parhar 		csum_type = TX_CSUM_IP;
4635c0236bd9SNavdeep Parhar 		break;
4636c0236bd9SNavdeep Parhar 	}
4637c0236bd9SNavdeep Parhar 
4638c0236bd9SNavdeep Parhar 	MPASS(m->m_pkthdr.l2hlen > 0);
4639c0236bd9SNavdeep Parhar 	MPASS(m->m_pkthdr.l3hlen > 0);
4640c0236bd9SNavdeep Parhar 	ctrl |= V_TXPKT_CSUM_TYPE(csum_type) |
4641c0236bd9SNavdeep Parhar 	    V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
4642c0236bd9SNavdeep Parhar 	if (chip_id(sc) <= CHELSIO_T5)
4643c0236bd9SNavdeep Parhar 		ctrl |= V_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN);
4644c0236bd9SNavdeep Parhar 	else
4645c0236bd9SNavdeep Parhar 		ctrl |= V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN);
4646c0236bd9SNavdeep Parhar 
4647c0236bd9SNavdeep Parhar 	return (ctrl);
4648c0236bd9SNavdeep Parhar }
4649c0236bd9SNavdeep Parhar 
46507951040fSNavdeep Parhar /*
46516af45170SJohn Baldwin  * Write a VM txpkt WR for this packet to the hardware descriptors, update the
46526af45170SJohn Baldwin  * software descriptor, and advance the pidx.  It is guaranteed that enough
46536af45170SJohn Baldwin  * descriptors are available.
46546af45170SJohn Baldwin  *
46556af45170SJohn Baldwin  * The return value is the # of hardware descriptors used.
46566af45170SJohn Baldwin  */
46576af45170SJohn Baldwin static u_int
4658472a6004SNavdeep Parhar write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq,
4659472a6004SNavdeep Parhar     struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available)
46606af45170SJohn Baldwin {
46616af45170SJohn Baldwin 	struct sge_eq *eq = &txq->eq;
46626af45170SJohn Baldwin 	struct tx_sdesc *txsd;
46636af45170SJohn Baldwin 	struct cpl_tx_pkt_core *cpl;
46646af45170SJohn Baldwin 	uint32_t ctrl;	/* used in many unrelated places */
46656af45170SJohn Baldwin 	uint64_t ctrl1;
4666c0236bd9SNavdeep Parhar 	int len16, ndesc, pktlen, nsegs;
46676af45170SJohn Baldwin 	caddr_t dst;
46686af45170SJohn Baldwin 
46696af45170SJohn Baldwin 	TXQ_LOCK_ASSERT_OWNED(txq);
46706af45170SJohn Baldwin 	M_ASSERTPKTHDR(m0);
46716af45170SJohn Baldwin 	MPASS(available > 0 && available < eq->sidx);
46726af45170SJohn Baldwin 
46736af45170SJohn Baldwin 	len16 = mbuf_len16(m0);
46746af45170SJohn Baldwin 	nsegs = mbuf_nsegs(m0);
46756af45170SJohn Baldwin 	pktlen = m0->m_pkthdr.len;
46766af45170SJohn Baldwin 	ctrl = sizeof(struct cpl_tx_pkt_core);
46776af45170SJohn Baldwin 	if (needs_tso(m0))
46786af45170SJohn Baldwin 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
46790cadedfcSNavdeep Parhar 	ndesc = tx_len16_to_desc(len16);
46806af45170SJohn Baldwin 	MPASS(ndesc <= available);
46816af45170SJohn Baldwin 
46826af45170SJohn Baldwin 	/* Firmware work request header */
46836af45170SJohn Baldwin 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
46846af45170SJohn Baldwin 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
46856af45170SJohn Baldwin 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
46866af45170SJohn Baldwin 
46876af45170SJohn Baldwin 	ctrl = V_FW_WR_LEN16(len16);
46886af45170SJohn Baldwin 	wr->equiq_to_len16 = htobe32(ctrl);
46896af45170SJohn Baldwin 	wr->r3[0] = 0;
46906af45170SJohn Baldwin 	wr->r3[1] = 0;
46916af45170SJohn Baldwin 
46926af45170SJohn Baldwin 	/*
46936af45170SJohn Baldwin 	 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
46946af45170SJohn Baldwin 	 * vlantci is ignored unless the ethtype is 0x8100, so it's
46956af45170SJohn Baldwin 	 * simpler to always copy it rather than making it
46966af45170SJohn Baldwin 	 * conditional.  Also, it seems that we do not have to set
46976af45170SJohn Baldwin 	 * vlantci or fake the ethtype when doing VLAN tag insertion.
46986af45170SJohn Baldwin 	 */
46996af45170SJohn Baldwin 	m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst);
47006af45170SJohn Baldwin 
47016af45170SJohn Baldwin 	if (needs_tso(m0)) {
47026af45170SJohn Baldwin 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
47036af45170SJohn Baldwin 
47046af45170SJohn Baldwin 		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
47056af45170SJohn Baldwin 		    m0->m_pkthdr.l4hlen > 0,
47066af45170SJohn Baldwin 		    ("%s: mbuf %p needs TSO but missing header lengths",
47076af45170SJohn Baldwin 			__func__, m0));
47086af45170SJohn Baldwin 
47096af45170SJohn Baldwin 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
4710c0236bd9SNavdeep Parhar 		    F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen -
4711c0236bd9SNavdeep Parhar 			ETHER_HDR_LEN) >> 2) |
4712c0236bd9SNavdeep Parhar 		    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
4713c0236bd9SNavdeep Parhar 		    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
47146af45170SJohn Baldwin 		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
47156af45170SJohn Baldwin 			ctrl |= F_LSO_IPV6;
47166af45170SJohn Baldwin 
47176af45170SJohn Baldwin 		lso->lso_ctrl = htobe32(ctrl);
47186af45170SJohn Baldwin 		lso->ipid_ofst = htobe16(0);
47196af45170SJohn Baldwin 		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
47206af45170SJohn Baldwin 		lso->seqno_offset = htobe32(0);
47216af45170SJohn Baldwin 		lso->len = htobe32(pktlen);
47226af45170SJohn Baldwin 
47236af45170SJohn Baldwin 		cpl = (void *)(lso + 1);
47246af45170SJohn Baldwin 
47256af45170SJohn Baldwin 		txq->tso_wrs++;
4726c0236bd9SNavdeep Parhar 	} else
47276af45170SJohn Baldwin 		cpl = (void *)(wr + 1);
47286af45170SJohn Baldwin 
47296af45170SJohn Baldwin 	/* Checksum offload */
4730c0236bd9SNavdeep Parhar 	ctrl1 = csum_to_ctrl(sc, m0);
4731c0236bd9SNavdeep Parhar 	if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
47326af45170SJohn Baldwin 		txq->txcsum++;	/* some hardware assistance provided */
47336af45170SJohn Baldwin 
47346af45170SJohn Baldwin 	/* VLAN tag insertion */
47356af45170SJohn Baldwin 	if (needs_vlan_insertion(m0)) {
47366af45170SJohn Baldwin 		ctrl1 |= F_TXPKT_VLAN_VLD |
47376af45170SJohn Baldwin 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
47386af45170SJohn Baldwin 		txq->vlan_insertion++;
47396af45170SJohn Baldwin 	}
47406af45170SJohn Baldwin 
47416af45170SJohn Baldwin 	/* CPL header */
47426af45170SJohn Baldwin 	cpl->ctrl0 = txq->cpl_ctrl0;
47436af45170SJohn Baldwin 	cpl->pack = 0;
47446af45170SJohn Baldwin 	cpl->len = htobe16(pktlen);
47456af45170SJohn Baldwin 	cpl->ctrl1 = htobe64(ctrl1);
47466af45170SJohn Baldwin 
47476af45170SJohn Baldwin 	/* SGL */
47486af45170SJohn Baldwin 	dst = (void *)(cpl + 1);
47496af45170SJohn Baldwin 
47506af45170SJohn Baldwin 	/*
47516af45170SJohn Baldwin 	 * A packet using TSO will use up an entire descriptor for the
47526af45170SJohn Baldwin 	 * firmware work request header, LSO CPL, and TX_PKT_XT CPL.
47536af45170SJohn Baldwin 	 * If this descriptor is the last descriptor in the ring, wrap
47546af45170SJohn Baldwin 	 * around to the front of the ring explicitly for the start of
47556af45170SJohn Baldwin 	 * the sgl.
47566af45170SJohn Baldwin 	 */
47576af45170SJohn Baldwin 	if (dst == (void *)&eq->desc[eq->sidx]) {
47586af45170SJohn Baldwin 		dst = (void *)&eq->desc[0];
47596af45170SJohn Baldwin 		write_gl_to_txd(txq, m0, &dst, 0);
47606af45170SJohn Baldwin 	} else
47616af45170SJohn Baldwin 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
47626af45170SJohn Baldwin 	txq->sgl_wrs++;
47636af45170SJohn Baldwin 
47646af45170SJohn Baldwin 	txq->txpkt_wrs++;
47656af45170SJohn Baldwin 
47666af45170SJohn Baldwin 	txsd = &txq->sdesc[eq->pidx];
47676af45170SJohn Baldwin 	txsd->m = m0;
47686af45170SJohn Baldwin 	txsd->desc_used = ndesc;
47696af45170SJohn Baldwin 
47706af45170SJohn Baldwin 	return (ndesc);
47716af45170SJohn Baldwin }
47726af45170SJohn Baldwin 
47736af45170SJohn Baldwin /*
47745cdaef71SJohn Baldwin  * Write a raw WR to the hardware descriptors, update the software
47755cdaef71SJohn Baldwin  * descriptor, and advance the pidx.  It is guaranteed that enough
47765cdaef71SJohn Baldwin  * descriptors are available.
47775cdaef71SJohn Baldwin  *
47785cdaef71SJohn Baldwin  * The return value is the # of hardware descriptors used.
47795cdaef71SJohn Baldwin  */
47805cdaef71SJohn Baldwin static u_int
47815cdaef71SJohn Baldwin write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available)
47825cdaef71SJohn Baldwin {
47835cdaef71SJohn Baldwin 	struct sge_eq *eq = &txq->eq;
47845cdaef71SJohn Baldwin 	struct tx_sdesc *txsd;
47855cdaef71SJohn Baldwin 	struct mbuf *m;
47865cdaef71SJohn Baldwin 	caddr_t dst;
47875cdaef71SJohn Baldwin 	int len16, ndesc;
47885cdaef71SJohn Baldwin 
47895cdaef71SJohn Baldwin 	len16 = mbuf_len16(m0);
47900cadedfcSNavdeep Parhar 	ndesc = tx_len16_to_desc(len16);
47915cdaef71SJohn Baldwin 	MPASS(ndesc <= available);
47925cdaef71SJohn Baldwin 
47935cdaef71SJohn Baldwin 	dst = wr;
47945cdaef71SJohn Baldwin 	for (m = m0; m != NULL; m = m->m_next)
47955cdaef71SJohn Baldwin 		copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
47965cdaef71SJohn Baldwin 
47975cdaef71SJohn Baldwin 	txq->raw_wrs++;
47985cdaef71SJohn Baldwin 
47995cdaef71SJohn Baldwin 	txsd = &txq->sdesc[eq->pidx];
48005cdaef71SJohn Baldwin 	txsd->m = m0;
48015cdaef71SJohn Baldwin 	txsd->desc_used = ndesc;
48025cdaef71SJohn Baldwin 
48035cdaef71SJohn Baldwin 	return (ndesc);
48045cdaef71SJohn Baldwin }
48055cdaef71SJohn Baldwin 
48065cdaef71SJohn Baldwin /*
48077951040fSNavdeep Parhar  * Write a txpkt WR for this packet to the hardware descriptors, update the
48087951040fSNavdeep Parhar  * software descriptor, and advance the pidx.  It is guaranteed that enough
48097951040fSNavdeep Parhar  * descriptors are available.
481054e4ee71SNavdeep Parhar  *
48117951040fSNavdeep Parhar  * The return value is the # of hardware descriptors used.
481254e4ee71SNavdeep Parhar  */
48137951040fSNavdeep Parhar static u_int
4814c0236bd9SNavdeep Parhar write_txpkt_wr(struct adapter *sc, struct sge_txq *txq,
4815c0236bd9SNavdeep Parhar     struct fw_eth_tx_pkt_wr *wr, struct mbuf *m0, u_int available)
481654e4ee71SNavdeep Parhar {
481754e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
48187951040fSNavdeep Parhar 	struct tx_sdesc *txsd;
481954e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
482054e4ee71SNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
482154e4ee71SNavdeep Parhar 	uint64_t ctrl1;
48227951040fSNavdeep Parhar 	int len16, ndesc, pktlen, nsegs;
482354e4ee71SNavdeep Parhar 	caddr_t dst;
482454e4ee71SNavdeep Parhar 
482554e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
48267951040fSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
48277951040fSNavdeep Parhar 	MPASS(available > 0 && available < eq->sidx);
482854e4ee71SNavdeep Parhar 
48297951040fSNavdeep Parhar 	len16 = mbuf_len16(m0);
48307951040fSNavdeep Parhar 	nsegs = mbuf_nsegs(m0);
48317951040fSNavdeep Parhar 	pktlen = m0->m_pkthdr.len;
483254e4ee71SNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
48337951040fSNavdeep Parhar 	if (needs_tso(m0))
48342a5f6b0eSNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
4835d76bbe17SJohn Baldwin 	else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) &&
4836d76bbe17SJohn Baldwin 	    available >= 2) {
48377951040fSNavdeep Parhar 		/* Immediate data.  Recalculate len16 and set nsegs to 0. */
4838ecb79ca4SNavdeep Parhar 		ctrl += pktlen;
48397951040fSNavdeep Parhar 		len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
48407951040fSNavdeep Parhar 		    sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
48417951040fSNavdeep Parhar 		nsegs = 0;
484254e4ee71SNavdeep Parhar 	}
48430cadedfcSNavdeep Parhar 	ndesc = tx_len16_to_desc(len16);
48447951040fSNavdeep Parhar 	MPASS(ndesc <= available);
484554e4ee71SNavdeep Parhar 
484654e4ee71SNavdeep Parhar 	/* Firmware work request header */
48477951040fSNavdeep Parhar 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
484854e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
4849733b9277SNavdeep Parhar 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
48506b49a4ecSNavdeep Parhar 
48517951040fSNavdeep Parhar 	ctrl = V_FW_WR_LEN16(len16);
485254e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
485354e4ee71SNavdeep Parhar 	wr->r3 = 0;
485454e4ee71SNavdeep Parhar 
48557951040fSNavdeep Parhar 	if (needs_tso(m0)) {
48562a5f6b0eSNavdeep Parhar 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
48577951040fSNavdeep Parhar 
48587951040fSNavdeep Parhar 		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
48597951040fSNavdeep Parhar 		    m0->m_pkthdr.l4hlen > 0,
48607951040fSNavdeep Parhar 		    ("%s: mbuf %p needs TSO but missing header lengths",
48617951040fSNavdeep Parhar 			__func__, m0));
486254e4ee71SNavdeep Parhar 
486354e4ee71SNavdeep Parhar 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
4864c0236bd9SNavdeep Parhar 		    F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen -
4865c0236bd9SNavdeep Parhar 			ETHER_HDR_LEN) >> 2) |
4866c0236bd9SNavdeep Parhar 		    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
4867c0236bd9SNavdeep Parhar 		    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
48687951040fSNavdeep Parhar 		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
4869a1ea9a82SNavdeep Parhar 			ctrl |= F_LSO_IPV6;
487054e4ee71SNavdeep Parhar 
487154e4ee71SNavdeep Parhar 		lso->lso_ctrl = htobe32(ctrl);
487254e4ee71SNavdeep Parhar 		lso->ipid_ofst = htobe16(0);
48737951040fSNavdeep Parhar 		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
487454e4ee71SNavdeep Parhar 		lso->seqno_offset = htobe32(0);
4875ecb79ca4SNavdeep Parhar 		lso->len = htobe32(pktlen);
487654e4ee71SNavdeep Parhar 
487754e4ee71SNavdeep Parhar 		cpl = (void *)(lso + 1);
487854e4ee71SNavdeep Parhar 
487954e4ee71SNavdeep Parhar 		txq->tso_wrs++;
488054e4ee71SNavdeep Parhar 	} else
488154e4ee71SNavdeep Parhar 		cpl = (void *)(wr + 1);
488254e4ee71SNavdeep Parhar 
488354e4ee71SNavdeep Parhar 	/* Checksum offload */
4884c0236bd9SNavdeep Parhar 	ctrl1 = csum_to_ctrl(sc, m0);
4885c0236bd9SNavdeep Parhar 	if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
488654e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
488754e4ee71SNavdeep Parhar 
488854e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
48897951040fSNavdeep Parhar 	if (needs_vlan_insertion(m0)) {
48907951040fSNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
489154e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
489254e4ee71SNavdeep Parhar 	}
489354e4ee71SNavdeep Parhar 
489454e4ee71SNavdeep Parhar 	/* CPL header */
48957951040fSNavdeep Parhar 	cpl->ctrl0 = txq->cpl_ctrl0;
489654e4ee71SNavdeep Parhar 	cpl->pack = 0;
4897ecb79ca4SNavdeep Parhar 	cpl->len = htobe16(pktlen);
489854e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
489954e4ee71SNavdeep Parhar 
490054e4ee71SNavdeep Parhar 	/* SGL */
490154e4ee71SNavdeep Parhar 	dst = (void *)(cpl + 1);
49027951040fSNavdeep Parhar 	if (nsegs > 0) {
49037951040fSNavdeep Parhar 
49047951040fSNavdeep Parhar 		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
490554e4ee71SNavdeep Parhar 		txq->sgl_wrs++;
490654e4ee71SNavdeep Parhar 	} else {
49077951040fSNavdeep Parhar 		struct mbuf *m;
49087951040fSNavdeep Parhar 
49097951040fSNavdeep Parhar 		for (m = m0; m != NULL; m = m->m_next) {
491054e4ee71SNavdeep Parhar 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
4911ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
4912ecb79ca4SNavdeep Parhar 			pktlen -= m->m_len;
4913ecb79ca4SNavdeep Parhar #endif
491454e4ee71SNavdeep Parhar 		}
4915ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
4916ecb79ca4SNavdeep Parhar 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
4917ecb79ca4SNavdeep Parhar #endif
49187951040fSNavdeep Parhar 		txq->imm_wrs++;
491954e4ee71SNavdeep Parhar 	}
492054e4ee71SNavdeep Parhar 
492154e4ee71SNavdeep Parhar 	txq->txpkt_wrs++;
492254e4ee71SNavdeep Parhar 
4923f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
49247951040fSNavdeep Parhar 	txsd->m = m0;
492554e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
492654e4ee71SNavdeep Parhar 
49277951040fSNavdeep Parhar 	return (ndesc);
492854e4ee71SNavdeep Parhar }
492954e4ee71SNavdeep Parhar 
49307951040fSNavdeep Parhar static int
49317951040fSNavdeep Parhar try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available)
493254e4ee71SNavdeep Parhar {
49337951040fSNavdeep Parhar 	u_int needed, nsegs1, nsegs2, l1, l2;
49347951040fSNavdeep Parhar 
49357951040fSNavdeep Parhar 	if (cannot_use_txpkts(m) || cannot_use_txpkts(n))
49367951040fSNavdeep Parhar 		return (1);
49377951040fSNavdeep Parhar 
49387951040fSNavdeep Parhar 	nsegs1 = mbuf_nsegs(m);
49397951040fSNavdeep Parhar 	nsegs2 = mbuf_nsegs(n);
49407951040fSNavdeep Parhar 	if (nsegs1 + nsegs2 == 2) {
49417951040fSNavdeep Parhar 		txp->wr_type = 1;
49427951040fSNavdeep Parhar 		l1 = l2 = txpkts1_len16();
49437951040fSNavdeep Parhar 	} else {
49447951040fSNavdeep Parhar 		txp->wr_type = 0;
49457951040fSNavdeep Parhar 		l1 = txpkts0_len16(nsegs1);
49467951040fSNavdeep Parhar 		l2 = txpkts0_len16(nsegs2);
49477951040fSNavdeep Parhar 	}
49487951040fSNavdeep Parhar 	txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2;
49490cadedfcSNavdeep Parhar 	needed = tx_len16_to_desc(txp->len16);
49507951040fSNavdeep Parhar 	if (needed > SGE_MAX_WR_NDESC || needed > available)
49517951040fSNavdeep Parhar 		return (1);
49527951040fSNavdeep Parhar 
49537951040fSNavdeep Parhar 	txp->plen = m->m_pkthdr.len + n->m_pkthdr.len;
49547951040fSNavdeep Parhar 	if (txp->plen > 65535)
49557951040fSNavdeep Parhar 		return (1);
49567951040fSNavdeep Parhar 
49577951040fSNavdeep Parhar 	txp->npkt = 2;
49587951040fSNavdeep Parhar 	set_mbuf_len16(m, l1);
49597951040fSNavdeep Parhar 	set_mbuf_len16(n, l2);
49607951040fSNavdeep Parhar 
49617951040fSNavdeep Parhar 	return (0);
49627951040fSNavdeep Parhar }
49637951040fSNavdeep Parhar 
49647951040fSNavdeep Parhar static int
49657951040fSNavdeep Parhar add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available)
49667951040fSNavdeep Parhar {
49677951040fSNavdeep Parhar 	u_int plen, len16, needed, nsegs;
49687951040fSNavdeep Parhar 
49697951040fSNavdeep Parhar 	MPASS(txp->wr_type == 0 || txp->wr_type == 1);
49707951040fSNavdeep Parhar 
49717890b5c1SJohn Baldwin 	if (cannot_use_txpkts(m))
49727890b5c1SJohn Baldwin 		return (1);
49737890b5c1SJohn Baldwin 
49747951040fSNavdeep Parhar 	nsegs = mbuf_nsegs(m);
49757890b5c1SJohn Baldwin 	if (txp->wr_type == 1 && nsegs != 1)
49767951040fSNavdeep Parhar 		return (1);
49777951040fSNavdeep Parhar 
49787951040fSNavdeep Parhar 	plen = txp->plen + m->m_pkthdr.len;
49797951040fSNavdeep Parhar 	if (plen > 65535)
49807951040fSNavdeep Parhar 		return (1);
49817951040fSNavdeep Parhar 
49827951040fSNavdeep Parhar 	if (txp->wr_type == 0)
49837951040fSNavdeep Parhar 		len16 = txpkts0_len16(nsegs);
49847951040fSNavdeep Parhar 	else
49857951040fSNavdeep Parhar 		len16 = txpkts1_len16();
49860cadedfcSNavdeep Parhar 	needed = tx_len16_to_desc(txp->len16 + len16);
49877951040fSNavdeep Parhar 	if (needed > SGE_MAX_WR_NDESC || needed > available)
49887951040fSNavdeep Parhar 		return (1);
49897951040fSNavdeep Parhar 
49907951040fSNavdeep Parhar 	txp->npkt++;
49917951040fSNavdeep Parhar 	txp->plen = plen;
49927951040fSNavdeep Parhar 	txp->len16 += len16;
49937951040fSNavdeep Parhar 	set_mbuf_len16(m, len16);
49947951040fSNavdeep Parhar 
49957951040fSNavdeep Parhar 	return (0);
49967951040fSNavdeep Parhar }
49977951040fSNavdeep Parhar 
49987951040fSNavdeep Parhar /*
49997951040fSNavdeep Parhar  * Write a txpkts WR for the packets in txp to the hardware descriptors, update
50007951040fSNavdeep Parhar  * the software descriptor, and advance the pidx.  It is guaranteed that enough
50017951040fSNavdeep Parhar  * descriptors are available.
50027951040fSNavdeep Parhar  *
50037951040fSNavdeep Parhar  * The return value is the # of hardware descriptors used.
50047951040fSNavdeep Parhar  */
50057951040fSNavdeep Parhar static u_int
5006c0236bd9SNavdeep Parhar write_txpkts_wr(struct adapter *sc, struct sge_txq *txq,
5007c0236bd9SNavdeep Parhar     struct fw_eth_tx_pkts_wr *wr, struct mbuf *m0, const struct txpkts *txp,
5008c0236bd9SNavdeep Parhar     u_int available)
50097951040fSNavdeep Parhar {
50107951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
50117951040fSNavdeep Parhar 	struct tx_sdesc *txsd;
50127951040fSNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
50137951040fSNavdeep Parhar 	uint32_t ctrl;
50147951040fSNavdeep Parhar 	uint64_t ctrl1;
50157951040fSNavdeep Parhar 	int ndesc, checkwrap;
50167951040fSNavdeep Parhar 	struct mbuf *m;
50177951040fSNavdeep Parhar 	void *flitp;
50187951040fSNavdeep Parhar 
50197951040fSNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
50207951040fSNavdeep Parhar 	MPASS(txp->npkt > 0);
50217951040fSNavdeep Parhar 	MPASS(txp->plen < 65536);
50227951040fSNavdeep Parhar 	MPASS(m0 != NULL);
50237951040fSNavdeep Parhar 	MPASS(m0->m_nextpkt != NULL);
50247951040fSNavdeep Parhar 	MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
50257951040fSNavdeep Parhar 	MPASS(available > 0 && available < eq->sidx);
50267951040fSNavdeep Parhar 
50270cadedfcSNavdeep Parhar 	ndesc = tx_len16_to_desc(txp->len16);
50287951040fSNavdeep Parhar 	MPASS(ndesc <= available);
50297951040fSNavdeep Parhar 
50307951040fSNavdeep Parhar 	MPASS(wr == (void *)&eq->desc[eq->pidx]);
50317951040fSNavdeep Parhar 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
50327951040fSNavdeep Parhar 	ctrl = V_FW_WR_LEN16(txp->len16);
50337951040fSNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
50347951040fSNavdeep Parhar 	wr->plen = htobe16(txp->plen);
50357951040fSNavdeep Parhar 	wr->npkt = txp->npkt;
50367951040fSNavdeep Parhar 	wr->r3 = 0;
50377951040fSNavdeep Parhar 	wr->type = txp->wr_type;
50387951040fSNavdeep Parhar 	flitp = wr + 1;
50397951040fSNavdeep Parhar 
50407951040fSNavdeep Parhar 	/*
50417951040fSNavdeep Parhar 	 * At this point we are 16B into a hardware descriptor.  If checkwrap is
50427951040fSNavdeep Parhar 	 * set then we know the WR is going to wrap around somewhere.  We'll
50437951040fSNavdeep Parhar 	 * check for that at appropriate points.
50447951040fSNavdeep Parhar 	 */
50457951040fSNavdeep Parhar 	checkwrap = eq->sidx - ndesc < eq->pidx;
50467951040fSNavdeep Parhar 	for (m = m0; m != NULL; m = m->m_nextpkt) {
50477951040fSNavdeep Parhar 		if (txp->wr_type == 0) {
504854e4ee71SNavdeep Parhar 			struct ulp_txpkt *ulpmc;
504954e4ee71SNavdeep Parhar 			struct ulptx_idata *ulpsc;
505054e4ee71SNavdeep Parhar 
50517951040fSNavdeep Parhar 			/* ULP master command */
50527951040fSNavdeep Parhar 			ulpmc = flitp;
50537951040fSNavdeep Parhar 			ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
50547951040fSNavdeep Parhar 			    V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
50557951040fSNavdeep Parhar 			ulpmc->len = htobe32(mbuf_len16(m));
505654e4ee71SNavdeep Parhar 
50577951040fSNavdeep Parhar 			/* ULP subcommand */
50587951040fSNavdeep Parhar 			ulpsc = (void *)(ulpmc + 1);
50597951040fSNavdeep Parhar 			ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
50607951040fSNavdeep Parhar 			    F_ULP_TX_SC_MORE);
50617951040fSNavdeep Parhar 			ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
50627951040fSNavdeep Parhar 
50637951040fSNavdeep Parhar 			cpl = (void *)(ulpsc + 1);
50647951040fSNavdeep Parhar 			if (checkwrap &&
50657951040fSNavdeep Parhar 			    (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
50667951040fSNavdeep Parhar 				cpl = (void *)&eq->desc[0];
50677951040fSNavdeep Parhar 		} else {
50687951040fSNavdeep Parhar 			cpl = flitp;
50697951040fSNavdeep Parhar 		}
507054e4ee71SNavdeep Parhar 
507154e4ee71SNavdeep Parhar 		/* Checksum offload */
5072c0236bd9SNavdeep Parhar 		ctrl1 = csum_to_ctrl(sc, m);
5073c0236bd9SNavdeep Parhar 		if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
507454e4ee71SNavdeep Parhar 			txq->txcsum++;	/* some hardware assistance provided */
507554e4ee71SNavdeep Parhar 
507654e4ee71SNavdeep Parhar 		/* VLAN tag insertion */
50777951040fSNavdeep Parhar 		if (needs_vlan_insertion(m)) {
50787951040fSNavdeep Parhar 			ctrl1 |= F_TXPKT_VLAN_VLD |
50797951040fSNavdeep Parhar 			    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
508054e4ee71SNavdeep Parhar 			txq->vlan_insertion++;
508154e4ee71SNavdeep Parhar 		}
508254e4ee71SNavdeep Parhar 
50837951040fSNavdeep Parhar 		/* CPL header */
50847951040fSNavdeep Parhar 		cpl->ctrl0 = txq->cpl_ctrl0;
508554e4ee71SNavdeep Parhar 		cpl->pack = 0;
508654e4ee71SNavdeep Parhar 		cpl->len = htobe16(m->m_pkthdr.len);
50877951040fSNavdeep Parhar 		cpl->ctrl1 = htobe64(ctrl1);
508854e4ee71SNavdeep Parhar 
50897951040fSNavdeep Parhar 		flitp = cpl + 1;
50907951040fSNavdeep Parhar 		if (checkwrap &&
50917951040fSNavdeep Parhar 		    (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
50927951040fSNavdeep Parhar 			flitp = (void *)&eq->desc[0];
509354e4ee71SNavdeep Parhar 
50947951040fSNavdeep Parhar 		write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
509554e4ee71SNavdeep Parhar 
50967951040fSNavdeep Parhar 	}
50977951040fSNavdeep Parhar 
5098a59a1477SNavdeep Parhar 	if (txp->wr_type == 0) {
5099a59a1477SNavdeep Parhar 		txq->txpkts0_pkts += txp->npkt;
5100a59a1477SNavdeep Parhar 		txq->txpkts0_wrs++;
5101a59a1477SNavdeep Parhar 	} else {
5102a59a1477SNavdeep Parhar 		txq->txpkts1_pkts += txp->npkt;
5103a59a1477SNavdeep Parhar 		txq->txpkts1_wrs++;
5104a59a1477SNavdeep Parhar 	}
5105a59a1477SNavdeep Parhar 
51067951040fSNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
51077951040fSNavdeep Parhar 	txsd->m = m0;
51087951040fSNavdeep Parhar 	txsd->desc_used = ndesc;
51097951040fSNavdeep Parhar 
51107951040fSNavdeep Parhar 	return (ndesc);
511154e4ee71SNavdeep Parhar }
511254e4ee71SNavdeep Parhar 
511354e4ee71SNavdeep Parhar /*
511454e4ee71SNavdeep Parhar  * If the SGL ends on an address that is not 16 byte aligned, this function will
51157951040fSNavdeep Parhar  * add a 0 filled flit at the end.
511654e4ee71SNavdeep Parhar  */
51177951040fSNavdeep Parhar static void
51187951040fSNavdeep Parhar write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
511954e4ee71SNavdeep Parhar {
51207951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
51217951040fSNavdeep Parhar 	struct sglist *gl = txq->gl;
51227951040fSNavdeep Parhar 	struct sglist_seg *seg;
51237951040fSNavdeep Parhar 	__be64 *flitp, *wrap;
512454e4ee71SNavdeep Parhar 	struct ulptx_sgl *usgl;
51257951040fSNavdeep Parhar 	int i, nflits, nsegs;
512654e4ee71SNavdeep Parhar 
512754e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
512854e4ee71SNavdeep Parhar 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
51297951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
51307951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
513154e4ee71SNavdeep Parhar 
51327951040fSNavdeep Parhar 	get_pkt_gl(m, gl);
51337951040fSNavdeep Parhar 	nsegs = gl->sg_nseg;
51347951040fSNavdeep Parhar 	MPASS(nsegs > 0);
51357951040fSNavdeep Parhar 
51367951040fSNavdeep Parhar 	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
513754e4ee71SNavdeep Parhar 	flitp = (__be64 *)(*to);
51387951040fSNavdeep Parhar 	wrap = (__be64 *)(&eq->desc[eq->sidx]);
51397951040fSNavdeep Parhar 	seg = &gl->sg_segs[0];
514054e4ee71SNavdeep Parhar 	usgl = (void *)flitp;
514154e4ee71SNavdeep Parhar 
514254e4ee71SNavdeep Parhar 	/*
514354e4ee71SNavdeep Parhar 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
514454e4ee71SNavdeep Parhar 	 * ring, so we're at least 16 bytes away from the status page.  There is
514554e4ee71SNavdeep Parhar 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
514654e4ee71SNavdeep Parhar 	 */
514754e4ee71SNavdeep Parhar 
514854e4ee71SNavdeep Parhar 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
51497951040fSNavdeep Parhar 	    V_ULPTX_NSGE(nsegs));
51507951040fSNavdeep Parhar 	usgl->len0 = htobe32(seg->ss_len);
51517951040fSNavdeep Parhar 	usgl->addr0 = htobe64(seg->ss_paddr);
515254e4ee71SNavdeep Parhar 	seg++;
515354e4ee71SNavdeep Parhar 
51547951040fSNavdeep Parhar 	if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
515554e4ee71SNavdeep Parhar 
515654e4ee71SNavdeep Parhar 		/* Won't wrap around at all */
515754e4ee71SNavdeep Parhar 
51587951040fSNavdeep Parhar 		for (i = 0; i < nsegs - 1; i++, seg++) {
51597951040fSNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
51607951040fSNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
516154e4ee71SNavdeep Parhar 		}
516254e4ee71SNavdeep Parhar 		if (i & 1)
516354e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
51647951040fSNavdeep Parhar 		flitp += nflits;
516554e4ee71SNavdeep Parhar 	} else {
516654e4ee71SNavdeep Parhar 
516754e4ee71SNavdeep Parhar 		/* Will wrap somewhere in the rest of the SGL */
516854e4ee71SNavdeep Parhar 
516954e4ee71SNavdeep Parhar 		/* 2 flits already written, write the rest flit by flit */
517054e4ee71SNavdeep Parhar 		flitp = (void *)(usgl + 1);
51717951040fSNavdeep Parhar 		for (i = 0; i < nflits - 2; i++) {
51727951040fSNavdeep Parhar 			if (flitp == wrap)
517354e4ee71SNavdeep Parhar 				flitp = (void *)eq->desc;
51747951040fSNavdeep Parhar 			*flitp++ = get_flit(seg, nsegs - 1, i);
517554e4ee71SNavdeep Parhar 		}
517654e4ee71SNavdeep Parhar 	}
517754e4ee71SNavdeep Parhar 
51787951040fSNavdeep Parhar 	if (nflits & 1) {
51797951040fSNavdeep Parhar 		MPASS(((uintptr_t)flitp) & 0xf);
51807951040fSNavdeep Parhar 		*flitp++ = 0;
51817951040fSNavdeep Parhar 	}
518254e4ee71SNavdeep Parhar 
51837951040fSNavdeep Parhar 	MPASS((((uintptr_t)flitp) & 0xf) == 0);
51847951040fSNavdeep Parhar 	if (__predict_false(flitp == wrap))
518554e4ee71SNavdeep Parhar 		*to = (void *)eq->desc;
518654e4ee71SNavdeep Parhar 	else
51877951040fSNavdeep Parhar 		*to = (void *)flitp;
518854e4ee71SNavdeep Parhar }
518954e4ee71SNavdeep Parhar 
519054e4ee71SNavdeep Parhar static inline void
519154e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
519254e4ee71SNavdeep Parhar {
51937951040fSNavdeep Parhar 
51947951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
51957951040fSNavdeep Parhar 	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
51967951040fSNavdeep Parhar 
51977951040fSNavdeep Parhar 	if (__predict_true((uintptr_t)(*to) + len <=
51987951040fSNavdeep Parhar 	    (uintptr_t)&eq->desc[eq->sidx])) {
519954e4ee71SNavdeep Parhar 		bcopy(from, *to, len);
520054e4ee71SNavdeep Parhar 		(*to) += len;
520154e4ee71SNavdeep Parhar 	} else {
52027951040fSNavdeep Parhar 		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
520354e4ee71SNavdeep Parhar 
520454e4ee71SNavdeep Parhar 		bcopy(from, *to, portion);
520554e4ee71SNavdeep Parhar 		from += portion;
520654e4ee71SNavdeep Parhar 		portion = len - portion;	/* remaining */
520754e4ee71SNavdeep Parhar 		bcopy(from, (void *)eq->desc, portion);
520854e4ee71SNavdeep Parhar 		(*to) = (caddr_t)eq->desc + portion;
520954e4ee71SNavdeep Parhar 	}
521054e4ee71SNavdeep Parhar }
521154e4ee71SNavdeep Parhar 
521254e4ee71SNavdeep Parhar static inline void
52137951040fSNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
521454e4ee71SNavdeep Parhar {
52157951040fSNavdeep Parhar 	u_int db;
52167951040fSNavdeep Parhar 
52177951040fSNavdeep Parhar 	MPASS(n > 0);
5218d14b0ac1SNavdeep Parhar 
5219d14b0ac1SNavdeep Parhar 	db = eq->doorbells;
52207951040fSNavdeep Parhar 	if (n > 1)
522177ad3c41SNavdeep Parhar 		clrbit(&db, DOORBELL_WCWR);
5222d14b0ac1SNavdeep Parhar 	wmb();
5223d14b0ac1SNavdeep Parhar 
5224d14b0ac1SNavdeep Parhar 	switch (ffs(db) - 1) {
5225d14b0ac1SNavdeep Parhar 	case DOORBELL_UDB:
52267951040fSNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
52277951040fSNavdeep Parhar 		break;
5228d14b0ac1SNavdeep Parhar 
522977ad3c41SNavdeep Parhar 	case DOORBELL_WCWR: {
5230d14b0ac1SNavdeep Parhar 		volatile uint64_t *dst, *src;
5231d14b0ac1SNavdeep Parhar 		int i;
5232d14b0ac1SNavdeep Parhar 
5233d14b0ac1SNavdeep Parhar 		/*
5234d14b0ac1SNavdeep Parhar 		 * Queues whose 128B doorbell segment fits in the page do not
5235d14b0ac1SNavdeep Parhar 		 * use relative qid (udb_qid is always 0).  Only queues with
523677ad3c41SNavdeep Parhar 		 * doorbell segments can do WCWR.
5237d14b0ac1SNavdeep Parhar 		 */
52387951040fSNavdeep Parhar 		KASSERT(eq->udb_qid == 0 && n == 1,
5239d14b0ac1SNavdeep Parhar 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
52407951040fSNavdeep Parhar 		    __func__, eq->doorbells, n, eq->dbidx, eq));
5241d14b0ac1SNavdeep Parhar 
5242d14b0ac1SNavdeep Parhar 		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
5243d14b0ac1SNavdeep Parhar 		    UDBS_DB_OFFSET);
52447951040fSNavdeep Parhar 		i = eq->dbidx;
5245d14b0ac1SNavdeep Parhar 		src = (void *)&eq->desc[i];
5246d14b0ac1SNavdeep Parhar 		while (src != (void *)&eq->desc[i + 1])
5247d14b0ac1SNavdeep Parhar 			*dst++ = *src++;
5248d14b0ac1SNavdeep Parhar 		wmb();
52497951040fSNavdeep Parhar 		break;
5250d14b0ac1SNavdeep Parhar 	}
5251d14b0ac1SNavdeep Parhar 
5252d14b0ac1SNavdeep Parhar 	case DOORBELL_UDBWC:
52537951040fSNavdeep Parhar 		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
5254d14b0ac1SNavdeep Parhar 		wmb();
52557951040fSNavdeep Parhar 		break;
5256d14b0ac1SNavdeep Parhar 
5257d14b0ac1SNavdeep Parhar 	case DOORBELL_KDB:
5258315048f2SJohn Baldwin 		t4_write_reg(sc, sc->sge_kdoorbell_reg,
52597951040fSNavdeep Parhar 		    V_QID(eq->cntxt_id) | V_PIDX(n));
52607951040fSNavdeep Parhar 		break;
526154e4ee71SNavdeep Parhar 	}
526254e4ee71SNavdeep Parhar 
52637951040fSNavdeep Parhar 	IDXINCR(eq->dbidx, n, eq->sidx);
52647951040fSNavdeep Parhar }
52657951040fSNavdeep Parhar 
52667951040fSNavdeep Parhar static inline u_int
52677951040fSNavdeep Parhar reclaimable_tx_desc(struct sge_eq *eq)
526854e4ee71SNavdeep Parhar {
52697951040fSNavdeep Parhar 	uint16_t hw_cidx;
527054e4ee71SNavdeep Parhar 
52717951040fSNavdeep Parhar 	hw_cidx = read_hw_cidx(eq);
52727951040fSNavdeep Parhar 	return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
52737951040fSNavdeep Parhar }
527454e4ee71SNavdeep Parhar 
52757951040fSNavdeep Parhar static inline u_int
52767951040fSNavdeep Parhar total_available_tx_desc(struct sge_eq *eq)
52777951040fSNavdeep Parhar {
52787951040fSNavdeep Parhar 	uint16_t hw_cidx, pidx;
52797951040fSNavdeep Parhar 
52807951040fSNavdeep Parhar 	hw_cidx = read_hw_cidx(eq);
52817951040fSNavdeep Parhar 	pidx = eq->pidx;
52827951040fSNavdeep Parhar 
52837951040fSNavdeep Parhar 	if (pidx == hw_cidx)
52847951040fSNavdeep Parhar 		return (eq->sidx - 1);
528554e4ee71SNavdeep Parhar 	else
52867951040fSNavdeep Parhar 		return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
52877951040fSNavdeep Parhar }
52887951040fSNavdeep Parhar 
52897951040fSNavdeep Parhar static inline uint16_t
52907951040fSNavdeep Parhar read_hw_cidx(struct sge_eq *eq)
52917951040fSNavdeep Parhar {
52927951040fSNavdeep Parhar 	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
52937951040fSNavdeep Parhar 	uint16_t cidx = spg->cidx;	/* stable snapshot */
52947951040fSNavdeep Parhar 
52957951040fSNavdeep Parhar 	return (be16toh(cidx));
5296e874ff7aSNavdeep Parhar }
529754e4ee71SNavdeep Parhar 
5298e874ff7aSNavdeep Parhar /*
52997951040fSNavdeep Parhar  * Reclaim 'n' descriptors approximately.
5300e874ff7aSNavdeep Parhar  */
53017951040fSNavdeep Parhar static u_int
53027951040fSNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, u_int n)
5303e874ff7aSNavdeep Parhar {
5304e874ff7aSNavdeep Parhar 	struct tx_sdesc *txsd;
5305f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
53067951040fSNavdeep Parhar 	u_int can_reclaim, reclaimed;
530754e4ee71SNavdeep Parhar 
5308733b9277SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
53097951040fSNavdeep Parhar 	MPASS(n > 0);
5310e874ff7aSNavdeep Parhar 
53117951040fSNavdeep Parhar 	reclaimed = 0;
53127951040fSNavdeep Parhar 	can_reclaim = reclaimable_tx_desc(eq);
53137951040fSNavdeep Parhar 	while (can_reclaim && reclaimed < n) {
531454e4ee71SNavdeep Parhar 		int ndesc;
53157951040fSNavdeep Parhar 		struct mbuf *m, *nextpkt;
531654e4ee71SNavdeep Parhar 
5317f7dfe243SNavdeep Parhar 		txsd = &txq->sdesc[eq->cidx];
531854e4ee71SNavdeep Parhar 		ndesc = txsd->desc_used;
531954e4ee71SNavdeep Parhar 
532054e4ee71SNavdeep Parhar 		/* Firmware doesn't return "partial" credits. */
532154e4ee71SNavdeep Parhar 		KASSERT(can_reclaim >= ndesc,
532254e4ee71SNavdeep Parhar 		    ("%s: unexpected number of credits: %d, %d",
532354e4ee71SNavdeep Parhar 		    __func__, can_reclaim, ndesc));
5324dcd50a20SJohn Baldwin 		KASSERT(ndesc != 0,
5325dcd50a20SJohn Baldwin 		    ("%s: descriptor with no credits: cidx %d",
5326dcd50a20SJohn Baldwin 		    __func__, eq->cidx));
532754e4ee71SNavdeep Parhar 
53287951040fSNavdeep Parhar 		for (m = txsd->m; m != NULL; m = nextpkt) {
53297951040fSNavdeep Parhar 			nextpkt = m->m_nextpkt;
53307951040fSNavdeep Parhar 			m->m_nextpkt = NULL;
53317951040fSNavdeep Parhar 			m_freem(m);
53327951040fSNavdeep Parhar 		}
533354e4ee71SNavdeep Parhar 		reclaimed += ndesc;
533454e4ee71SNavdeep Parhar 		can_reclaim -= ndesc;
53357951040fSNavdeep Parhar 		IDXINCR(eq->cidx, ndesc, eq->sidx);
533654e4ee71SNavdeep Parhar 	}
533754e4ee71SNavdeep Parhar 
533854e4ee71SNavdeep Parhar 	return (reclaimed);
533954e4ee71SNavdeep Parhar }
534054e4ee71SNavdeep Parhar 
534154e4ee71SNavdeep Parhar static void
53427951040fSNavdeep Parhar tx_reclaim(void *arg, int n)
534354e4ee71SNavdeep Parhar {
53447951040fSNavdeep Parhar 	struct sge_txq *txq = arg;
53457951040fSNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
534654e4ee71SNavdeep Parhar 
53477951040fSNavdeep Parhar 	do {
53487951040fSNavdeep Parhar 		if (TXQ_TRYLOCK(txq) == 0)
53497951040fSNavdeep Parhar 			break;
53507951040fSNavdeep Parhar 		n = reclaim_tx_descs(txq, 32);
53517951040fSNavdeep Parhar 		if (eq->cidx == eq->pidx)
53527951040fSNavdeep Parhar 			eq->equeqidx = eq->pidx;
53537951040fSNavdeep Parhar 		TXQ_UNLOCK(txq);
53547951040fSNavdeep Parhar 	} while (n > 0);
535554e4ee71SNavdeep Parhar }
535654e4ee71SNavdeep Parhar 
535754e4ee71SNavdeep Parhar static __be64
53587951040fSNavdeep Parhar get_flit(struct sglist_seg *segs, int nsegs, int idx)
535954e4ee71SNavdeep Parhar {
536054e4ee71SNavdeep Parhar 	int i = (idx / 3) * 2;
536154e4ee71SNavdeep Parhar 
536254e4ee71SNavdeep Parhar 	switch (idx % 3) {
536354e4ee71SNavdeep Parhar 	case 0: {
5364f078ecf6SWojciech Macek 		uint64_t rc;
536554e4ee71SNavdeep Parhar 
5366f078ecf6SWojciech Macek 		rc = (uint64_t)segs[i].ss_len << 32;
536754e4ee71SNavdeep Parhar 		if (i + 1 < nsegs)
5368f078ecf6SWojciech Macek 			rc |= (uint64_t)(segs[i + 1].ss_len);
536954e4ee71SNavdeep Parhar 
5370f078ecf6SWojciech Macek 		return (htobe64(rc));
537154e4ee71SNavdeep Parhar 	}
537254e4ee71SNavdeep Parhar 	case 1:
53737951040fSNavdeep Parhar 		return (htobe64(segs[i].ss_paddr));
537454e4ee71SNavdeep Parhar 	case 2:
53757951040fSNavdeep Parhar 		return (htobe64(segs[i + 1].ss_paddr));
537654e4ee71SNavdeep Parhar 	}
537754e4ee71SNavdeep Parhar 
537854e4ee71SNavdeep Parhar 	return (0);
537954e4ee71SNavdeep Parhar }
538054e4ee71SNavdeep Parhar 
538146e1e307SNavdeep Parhar static int
538246e1e307SNavdeep Parhar find_refill_source(struct adapter *sc, int maxp, bool packing)
538354e4ee71SNavdeep Parhar {
538446e1e307SNavdeep Parhar 	int i, zidx = -1;
538546e1e307SNavdeep Parhar 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
538654e4ee71SNavdeep Parhar 
538746e1e307SNavdeep Parhar 	if (packing) {
538846e1e307SNavdeep Parhar 		for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
538946e1e307SNavdeep Parhar 			if (rxb->hwidx2 == -1)
539046e1e307SNavdeep Parhar 				continue;
539146e1e307SNavdeep Parhar 			if (rxb->size1 < PAGE_SIZE &&
539246e1e307SNavdeep Parhar 			    rxb->size1 < largest_rx_cluster)
539346e1e307SNavdeep Parhar 				continue;
539446e1e307SNavdeep Parhar 			if (rxb->size1 > largest_rx_cluster)
539538035ed6SNavdeep Parhar 				break;
539646e1e307SNavdeep Parhar 			MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE);
539746e1e307SNavdeep Parhar 			if (rxb->size2 >= maxp)
539846e1e307SNavdeep Parhar 				return (i);
539946e1e307SNavdeep Parhar 			zidx = i;
540038035ed6SNavdeep Parhar 		}
540138035ed6SNavdeep Parhar 	} else {
540246e1e307SNavdeep Parhar 		for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
540346e1e307SNavdeep Parhar 			if (rxb->hwidx1 == -1)
540446e1e307SNavdeep Parhar 				continue;
540546e1e307SNavdeep Parhar 			if (rxb->size1 > largest_rx_cluster)
540638035ed6SNavdeep Parhar 				break;
540746e1e307SNavdeep Parhar 			if (rxb->size1 >= maxp)
540846e1e307SNavdeep Parhar 				return (i);
540946e1e307SNavdeep Parhar 			zidx = i;
541038035ed6SNavdeep Parhar 		}
541138035ed6SNavdeep Parhar 	}
541238035ed6SNavdeep Parhar 
541346e1e307SNavdeep Parhar 	return (zidx);
541454e4ee71SNavdeep Parhar }
5415ecb79ca4SNavdeep Parhar 
5416733b9277SNavdeep Parhar static void
5417733b9277SNavdeep Parhar add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
5418ecb79ca4SNavdeep Parhar {
5419733b9277SNavdeep Parhar 	mtx_lock(&sc->sfl_lock);
5420733b9277SNavdeep Parhar 	FL_LOCK(fl);
5421733b9277SNavdeep Parhar 	if ((fl->flags & FL_DOOMED) == 0) {
5422733b9277SNavdeep Parhar 		fl->flags |= FL_STARVING;
5423733b9277SNavdeep Parhar 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
5424733b9277SNavdeep Parhar 		callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
5425733b9277SNavdeep Parhar 	}
5426733b9277SNavdeep Parhar 	FL_UNLOCK(fl);
5427733b9277SNavdeep Parhar 	mtx_unlock(&sc->sfl_lock);
5428733b9277SNavdeep Parhar }
5429ecb79ca4SNavdeep Parhar 
54307951040fSNavdeep Parhar static void
54317951040fSNavdeep Parhar handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
54327951040fSNavdeep Parhar {
54337951040fSNavdeep Parhar 	struct sge_wrq *wrq = (void *)eq;
54347951040fSNavdeep Parhar 
54357951040fSNavdeep Parhar 	atomic_readandclear_int(&eq->equiq);
54367951040fSNavdeep Parhar 	taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task);
54377951040fSNavdeep Parhar }
54387951040fSNavdeep Parhar 
54397951040fSNavdeep Parhar static void
54407951040fSNavdeep Parhar handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
54417951040fSNavdeep Parhar {
54427951040fSNavdeep Parhar 	struct sge_txq *txq = (void *)eq;
54437951040fSNavdeep Parhar 
54447951040fSNavdeep Parhar 	MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH);
54457951040fSNavdeep Parhar 
54467951040fSNavdeep Parhar 	atomic_readandclear_int(&eq->equiq);
54477951040fSNavdeep Parhar 	mp_ring_check_drainage(txq->r, 0);
54487951040fSNavdeep Parhar 	taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task);
54497951040fSNavdeep Parhar }
54507951040fSNavdeep Parhar 
5451733b9277SNavdeep Parhar static int
5452733b9277SNavdeep Parhar handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
5453733b9277SNavdeep Parhar     struct mbuf *m)
5454733b9277SNavdeep Parhar {
5455733b9277SNavdeep Parhar 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
5456733b9277SNavdeep Parhar 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
5457733b9277SNavdeep Parhar 	struct adapter *sc = iq->adapter;
5458733b9277SNavdeep Parhar 	struct sge *s = &sc->sge;
5459733b9277SNavdeep Parhar 	struct sge_eq *eq;
54607951040fSNavdeep Parhar 	static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
54617951040fSNavdeep Parhar 		&handle_wrq_egr_update, &handle_eth_egr_update,
54627951040fSNavdeep Parhar 		&handle_wrq_egr_update};
5463733b9277SNavdeep Parhar 
5464733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
5465733b9277SNavdeep Parhar 	    rss->opcode));
5466733b9277SNavdeep Parhar 
5467ec55567cSJohn Baldwin 	eq = s->eqmap[qid - s->eq_start - s->eq_base];
54687951040fSNavdeep Parhar 	(*h[eq->flags & EQ_TYPEMASK])(sc, eq);
5469ecb79ca4SNavdeep Parhar 
5470ecb79ca4SNavdeep Parhar 	return (0);
5471ecb79ca4SNavdeep Parhar }
5472f7dfe243SNavdeep Parhar 
54730abd31e2SNavdeep Parhar /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
54740abd31e2SNavdeep Parhar CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
54750abd31e2SNavdeep Parhar     offsetof(struct cpl_fw6_msg, data));
54760abd31e2SNavdeep Parhar 
5477733b9277SNavdeep Parhar static int
54781b4cc91fSNavdeep Parhar handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
547956599263SNavdeep Parhar {
54801b4cc91fSNavdeep Parhar 	struct adapter *sc = iq->adapter;
548156599263SNavdeep Parhar 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
548256599263SNavdeep Parhar 
5483733b9277SNavdeep Parhar 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
5484733b9277SNavdeep Parhar 	    rss->opcode));
5485733b9277SNavdeep Parhar 
54860abd31e2SNavdeep Parhar 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
54870abd31e2SNavdeep Parhar 		const struct rss_header *rss2;
54880abd31e2SNavdeep Parhar 
54890abd31e2SNavdeep Parhar 		rss2 = (const struct rss_header *)&cpl->data[0];
5490671bf2b8SNavdeep Parhar 		return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
54910abd31e2SNavdeep Parhar 	}
54920abd31e2SNavdeep Parhar 
5493671bf2b8SNavdeep Parhar 	return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
5494f7dfe243SNavdeep Parhar }
5495af49c942SNavdeep Parhar 
5496069af0ebSJohn Baldwin /**
5497069af0ebSJohn Baldwin  *	t4_handle_wrerr_rpl - process a FW work request error message
5498069af0ebSJohn Baldwin  *	@adap: the adapter
5499069af0ebSJohn Baldwin  *	@rpl: start of the FW message
5500069af0ebSJohn Baldwin  */
5501069af0ebSJohn Baldwin static int
5502069af0ebSJohn Baldwin t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
5503069af0ebSJohn Baldwin {
5504069af0ebSJohn Baldwin 	u8 opcode = *(const u8 *)rpl;
5505069af0ebSJohn Baldwin 	const struct fw_error_cmd *e = (const void *)rpl;
5506069af0ebSJohn Baldwin 	unsigned int i;
5507069af0ebSJohn Baldwin 
5508069af0ebSJohn Baldwin 	if (opcode != FW_ERROR_CMD) {
5509069af0ebSJohn Baldwin 		log(LOG_ERR,
5510069af0ebSJohn Baldwin 		    "%s: Received WRERR_RPL message with opcode %#x\n",
5511069af0ebSJohn Baldwin 		    device_get_nameunit(adap->dev), opcode);
5512069af0ebSJohn Baldwin 		return (EINVAL);
5513069af0ebSJohn Baldwin 	}
5514069af0ebSJohn Baldwin 	log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
5515069af0ebSJohn Baldwin 	    G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
5516069af0ebSJohn Baldwin 	    "non-fatal");
5517069af0ebSJohn Baldwin 	switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
5518069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_EXCEPTION:
5519069af0ebSJohn Baldwin 		log(LOG_ERR, "exception info:\n");
5520069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.exception.info); i++)
5521069af0ebSJohn Baldwin 			log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
5522069af0ebSJohn Baldwin 			    be32toh(e->u.exception.info[i]));
5523069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5524069af0ebSJohn Baldwin 		break;
5525069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_HWMODULE:
5526069af0ebSJohn Baldwin 		log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
5527069af0ebSJohn Baldwin 		    be32toh(e->u.hwmodule.regaddr),
5528069af0ebSJohn Baldwin 		    be32toh(e->u.hwmodule.regval));
5529069af0ebSJohn Baldwin 		break;
5530069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_WR:
5531069af0ebSJohn Baldwin 		log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
5532069af0ebSJohn Baldwin 		    be16toh(e->u.wr.cidx),
5533069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
5534069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
5535069af0ebSJohn Baldwin 		    be32toh(e->u.wr.eqid));
5536069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
5537069af0ebSJohn Baldwin 			log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
5538069af0ebSJohn Baldwin 			    e->u.wr.wrhdr[i]);
5539069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5540069af0ebSJohn Baldwin 		break;
5541069af0ebSJohn Baldwin 	case FW_ERROR_TYPE_ACL:
5542069af0ebSJohn Baldwin 		log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
5543069af0ebSJohn Baldwin 		    be16toh(e->u.acl.cidx),
5544069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
5545069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
5546069af0ebSJohn Baldwin 		    be32toh(e->u.acl.eqid),
5547069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
5548069af0ebSJohn Baldwin 		    "MAC");
5549069af0ebSJohn Baldwin 		for (i = 0; i < nitems(e->u.acl.val); i++)
5550069af0ebSJohn Baldwin 			log(LOG_ERR, " %02x", e->u.acl.val[i]);
5551069af0ebSJohn Baldwin 		log(LOG_ERR, "\n");
5552069af0ebSJohn Baldwin 		break;
5553069af0ebSJohn Baldwin 	default:
5554069af0ebSJohn Baldwin 		log(LOG_ERR, "type %#x\n",
5555069af0ebSJohn Baldwin 		    G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
5556069af0ebSJohn Baldwin 		return (EINVAL);
5557069af0ebSJohn Baldwin 	}
5558069af0ebSJohn Baldwin 	return (0);
5559069af0ebSJohn Baldwin }
5560069af0ebSJohn Baldwin 
5561af49c942SNavdeep Parhar static int
556256599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS)
5563af49c942SNavdeep Parhar {
5564af49c942SNavdeep Parhar 	uint16_t *id = arg1;
5565af49c942SNavdeep Parhar 	int i = *id;
5566af49c942SNavdeep Parhar 
5567af49c942SNavdeep Parhar 	return sysctl_handle_int(oidp, &i, 0, req);
5568af49c942SNavdeep Parhar }
556938035ed6SNavdeep Parhar 
557046e1e307SNavdeep Parhar static inline bool
557146e1e307SNavdeep Parhar bufidx_used(struct adapter *sc, int idx)
557246e1e307SNavdeep Parhar {
557346e1e307SNavdeep Parhar 	struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
557446e1e307SNavdeep Parhar 	int i;
557546e1e307SNavdeep Parhar 
557646e1e307SNavdeep Parhar 	for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
557746e1e307SNavdeep Parhar 		if (rxb->size1 > largest_rx_cluster)
557846e1e307SNavdeep Parhar 			continue;
557946e1e307SNavdeep Parhar 		if (rxb->hwidx1 == idx || rxb->hwidx2 == idx)
558046e1e307SNavdeep Parhar 			return (true);
558146e1e307SNavdeep Parhar 	}
558246e1e307SNavdeep Parhar 
558346e1e307SNavdeep Parhar 	return (false);
558446e1e307SNavdeep Parhar }
558546e1e307SNavdeep Parhar 
558638035ed6SNavdeep Parhar static int
558738035ed6SNavdeep Parhar sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
558838035ed6SNavdeep Parhar {
558946e1e307SNavdeep Parhar 	struct adapter *sc = arg1;
559046e1e307SNavdeep Parhar 	struct sge_params *sp = &sc->params.sge;
559138035ed6SNavdeep Parhar 	int i, rc;
559238035ed6SNavdeep Parhar 	struct sbuf sb;
559338035ed6SNavdeep Parhar 	char c;
559438035ed6SNavdeep Parhar 
559546e1e307SNavdeep Parhar 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
559646e1e307SNavdeep Parhar 	for (i = 0; i < SGE_FLBUF_SIZES; i++) {
559746e1e307SNavdeep Parhar 		if (bufidx_used(sc, i))
559838035ed6SNavdeep Parhar 			c = '*';
559938035ed6SNavdeep Parhar 		else
560038035ed6SNavdeep Parhar 			c = '\0';
560138035ed6SNavdeep Parhar 
560246e1e307SNavdeep Parhar 		sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c);
560338035ed6SNavdeep Parhar 	}
560438035ed6SNavdeep Parhar 	sbuf_trim(&sb);
560538035ed6SNavdeep Parhar 	sbuf_finish(&sb);
560638035ed6SNavdeep Parhar 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
560738035ed6SNavdeep Parhar 	sbuf_delete(&sb);
560838035ed6SNavdeep Parhar 	return (rc);
560938035ed6SNavdeep Parhar }
561002f972e8SNavdeep Parhar 
5611786099deSNavdeep Parhar #ifdef RATELIMIT
5612786099deSNavdeep Parhar /*
5613786099deSNavdeep Parhar  * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
5614786099deSNavdeep Parhar  */
5615786099deSNavdeep Parhar static inline u_int
5616786099deSNavdeep Parhar txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso)
5617786099deSNavdeep Parhar {
5618786099deSNavdeep Parhar 	u_int n;
5619786099deSNavdeep Parhar 
5620786099deSNavdeep Parhar 	MPASS(immhdrs > 0);
5621786099deSNavdeep Parhar 
5622786099deSNavdeep Parhar 	n = roundup2(sizeof(struct fw_eth_tx_eo_wr) +
5623786099deSNavdeep Parhar 	    sizeof(struct cpl_tx_pkt_core) + immhdrs, 16);
5624786099deSNavdeep Parhar 	if (__predict_false(nsegs == 0))
5625786099deSNavdeep Parhar 		goto done;
5626786099deSNavdeep Parhar 
5627786099deSNavdeep Parhar 	nsegs--; /* first segment is part of ulptx_sgl */
5628786099deSNavdeep Parhar 	n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5629786099deSNavdeep Parhar 	if (tso)
5630786099deSNavdeep Parhar 		n += sizeof(struct cpl_tx_pkt_lso_core);
5631786099deSNavdeep Parhar 
5632786099deSNavdeep Parhar done:
5633786099deSNavdeep Parhar 	return (howmany(n, 16));
5634786099deSNavdeep Parhar }
5635786099deSNavdeep Parhar 
5636786099deSNavdeep Parhar #define ETID_FLOWC_NPARAMS 6
5637786099deSNavdeep Parhar #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \
5638786099deSNavdeep Parhar     ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16))
5639786099deSNavdeep Parhar #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16))
5640786099deSNavdeep Parhar 
5641786099deSNavdeep Parhar static int
5642e38a50e8SJohn Baldwin send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
5643786099deSNavdeep Parhar     struct vi_info *vi)
5644786099deSNavdeep Parhar {
5645786099deSNavdeep Parhar 	struct wrq_cookie cookie;
5646edb518f4SNavdeep Parhar 	u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN;
5647786099deSNavdeep Parhar 	struct fw_flowc_wr *flowc;
5648786099deSNavdeep Parhar 
5649786099deSNavdeep Parhar 	mtx_assert(&cst->lock, MA_OWNED);
5650786099deSNavdeep Parhar 	MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) ==
5651786099deSNavdeep Parhar 	    EO_FLOWC_PENDING);
5652786099deSNavdeep Parhar 
5653786099deSNavdeep Parhar 	flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie);
5654786099deSNavdeep Parhar 	if (__predict_false(flowc == NULL))
5655786099deSNavdeep Parhar 		return (ENOMEM);
5656786099deSNavdeep Parhar 
5657786099deSNavdeep Parhar 	bzero(flowc, ETID_FLOWC_LEN);
5658786099deSNavdeep Parhar 	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
5659786099deSNavdeep Parhar 	    V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0));
5660786099deSNavdeep Parhar 	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) |
5661786099deSNavdeep Parhar 	    V_FW_WR_FLOWID(cst->etid));
5662786099deSNavdeep Parhar 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
5663786099deSNavdeep Parhar 	flowc->mnemval[0].val = htobe32(pfvf);
5664786099deSNavdeep Parhar 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
5665786099deSNavdeep Parhar 	flowc->mnemval[1].val = htobe32(pi->tx_chan);
5666786099deSNavdeep Parhar 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
5667786099deSNavdeep Parhar 	flowc->mnemval[2].val = htobe32(pi->tx_chan);
5668786099deSNavdeep Parhar 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
5669786099deSNavdeep Parhar 	flowc->mnemval[3].val = htobe32(cst->iqid);
5670786099deSNavdeep Parhar 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
5671786099deSNavdeep Parhar 	flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
5672786099deSNavdeep Parhar 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
5673786099deSNavdeep Parhar 	flowc->mnemval[5].val = htobe32(cst->schedcl);
5674786099deSNavdeep Parhar 
5675786099deSNavdeep Parhar 	commit_wrq_wr(cst->eo_txq, flowc, &cookie);
5676786099deSNavdeep Parhar 
5677786099deSNavdeep Parhar 	cst->flags &= ~EO_FLOWC_PENDING;
5678786099deSNavdeep Parhar 	cst->flags |= EO_FLOWC_RPL_PENDING;
5679786099deSNavdeep Parhar 	MPASS(cst->tx_credits >= ETID_FLOWC_LEN16);	/* flowc is first WR. */
5680786099deSNavdeep Parhar 	cst->tx_credits -= ETID_FLOWC_LEN16;
5681786099deSNavdeep Parhar 
5682786099deSNavdeep Parhar 	return (0);
5683786099deSNavdeep Parhar }
5684786099deSNavdeep Parhar 
5685786099deSNavdeep Parhar #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16))
5686786099deSNavdeep Parhar 
5687786099deSNavdeep Parhar void
5688e38a50e8SJohn Baldwin send_etid_flush_wr(struct cxgbe_rate_tag *cst)
5689786099deSNavdeep Parhar {
5690786099deSNavdeep Parhar 	struct fw_flowc_wr *flowc;
5691786099deSNavdeep Parhar 	struct wrq_cookie cookie;
5692786099deSNavdeep Parhar 
5693786099deSNavdeep Parhar 	mtx_assert(&cst->lock, MA_OWNED);
5694786099deSNavdeep Parhar 
5695786099deSNavdeep Parhar 	flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie);
5696786099deSNavdeep Parhar 	if (__predict_false(flowc == NULL))
5697786099deSNavdeep Parhar 		CXGBE_UNIMPLEMENTED(__func__);
5698786099deSNavdeep Parhar 
5699786099deSNavdeep Parhar 	bzero(flowc, ETID_FLUSH_LEN16 * 16);
5700786099deSNavdeep Parhar 	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
5701786099deSNavdeep Parhar 	    V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL);
5702786099deSNavdeep Parhar 	flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) |
5703786099deSNavdeep Parhar 	    V_FW_WR_FLOWID(cst->etid));
5704786099deSNavdeep Parhar 
5705786099deSNavdeep Parhar 	commit_wrq_wr(cst->eo_txq, flowc, &cookie);
5706786099deSNavdeep Parhar 
5707786099deSNavdeep Parhar 	cst->flags |= EO_FLUSH_RPL_PENDING;
5708786099deSNavdeep Parhar 	MPASS(cst->tx_credits >= ETID_FLUSH_LEN16);
5709786099deSNavdeep Parhar 	cst->tx_credits -= ETID_FLUSH_LEN16;
5710786099deSNavdeep Parhar 	cst->ncompl++;
5711786099deSNavdeep Parhar }
5712786099deSNavdeep Parhar 
5713786099deSNavdeep Parhar static void
5714e38a50e8SJohn Baldwin write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr,
5715786099deSNavdeep Parhar     struct mbuf *m0, int compl)
5716786099deSNavdeep Parhar {
5717786099deSNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
5718786099deSNavdeep Parhar 	uint64_t ctrl1;
5719786099deSNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
5720786099deSNavdeep Parhar 	int len16, pktlen, nsegs, immhdrs;
5721786099deSNavdeep Parhar 	caddr_t dst;
5722786099deSNavdeep Parhar 	uintptr_t p;
5723786099deSNavdeep Parhar 	struct ulptx_sgl *usgl;
5724786099deSNavdeep Parhar 	struct sglist sg;
5725786099deSNavdeep Parhar 	struct sglist_seg segs[38];	/* XXX: find real limit.  XXX: get off the stack */
5726786099deSNavdeep Parhar 
5727786099deSNavdeep Parhar 	mtx_assert(&cst->lock, MA_OWNED);
5728786099deSNavdeep Parhar 	M_ASSERTPKTHDR(m0);
5729786099deSNavdeep Parhar 	KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
5730786099deSNavdeep Parhar 	    m0->m_pkthdr.l4hlen > 0,
5731786099deSNavdeep Parhar 	    ("%s: ethofld mbuf %p is missing header lengths", __func__, m0));
5732786099deSNavdeep Parhar 
5733786099deSNavdeep Parhar 	len16 = mbuf_eo_len16(m0);
5734786099deSNavdeep Parhar 	nsegs = mbuf_eo_nsegs(m0);
5735786099deSNavdeep Parhar 	pktlen = m0->m_pkthdr.len;
5736786099deSNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
5737786099deSNavdeep Parhar 	if (needs_tso(m0))
5738786099deSNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
5739786099deSNavdeep Parhar 	immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen;
5740786099deSNavdeep Parhar 	ctrl += immhdrs;
5741786099deSNavdeep Parhar 
5742786099deSNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) |
5743786099deSNavdeep Parhar 	    V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl));
5744786099deSNavdeep Parhar 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) |
5745786099deSNavdeep Parhar 	    V_FW_WR_FLOWID(cst->etid));
5746786099deSNavdeep Parhar 	wr->r3 = 0;
57476933902dSNavdeep Parhar 	if (needs_udp_csum(m0)) {
57486933902dSNavdeep Parhar 		wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
57496933902dSNavdeep Parhar 		wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen;
57506933902dSNavdeep Parhar 		wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
57516933902dSNavdeep Parhar 		wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen;
57526933902dSNavdeep Parhar 		wr->u.udpseg.rtplen = 0;
57536933902dSNavdeep Parhar 		wr->u.udpseg.r4 = 0;
57546933902dSNavdeep Parhar 		wr->u.udpseg.mss = htobe16(pktlen - immhdrs);
57556933902dSNavdeep Parhar 		wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
57566933902dSNavdeep Parhar 		wr->u.udpseg.plen = htobe32(pktlen - immhdrs);
57576933902dSNavdeep Parhar 		cpl = (void *)(wr + 1);
57586933902dSNavdeep Parhar 	} else {
57596933902dSNavdeep Parhar 		MPASS(needs_tcp_csum(m0));
5760786099deSNavdeep Parhar 		wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
5761786099deSNavdeep Parhar 		wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen;
5762786099deSNavdeep Parhar 		wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
5763786099deSNavdeep Parhar 		wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen;
5764786099deSNavdeep Parhar 		wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0);
5765786099deSNavdeep Parhar 		wr->u.tcpseg.r4 = 0;
5766786099deSNavdeep Parhar 		wr->u.tcpseg.r5 = 0;
5767786099deSNavdeep Parhar 		wr->u.tcpseg.plen = htobe32(pktlen - immhdrs);
5768786099deSNavdeep Parhar 
5769786099deSNavdeep Parhar 		if (needs_tso(m0)) {
5770786099deSNavdeep Parhar 			struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
5771786099deSNavdeep Parhar 
5772786099deSNavdeep Parhar 			wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz);
5773786099deSNavdeep Parhar 
57746933902dSNavdeep Parhar 			ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
57756933902dSNavdeep Parhar 			    F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
5776c0236bd9SNavdeep Parhar 			    V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen -
5777c0236bd9SNavdeep Parhar 				ETHER_HDR_LEN) >> 2) |
57786933902dSNavdeep Parhar 			    V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
57796933902dSNavdeep Parhar 			    V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
5780786099deSNavdeep Parhar 			if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
5781786099deSNavdeep Parhar 				ctrl |= F_LSO_IPV6;
5782786099deSNavdeep Parhar 			lso->lso_ctrl = htobe32(ctrl);
5783786099deSNavdeep Parhar 			lso->ipid_ofst = htobe16(0);
5784786099deSNavdeep Parhar 			lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
5785786099deSNavdeep Parhar 			lso->seqno_offset = htobe32(0);
5786786099deSNavdeep Parhar 			lso->len = htobe32(pktlen);
5787786099deSNavdeep Parhar 
5788786099deSNavdeep Parhar 			cpl = (void *)(lso + 1);
5789786099deSNavdeep Parhar 		} else {
5790786099deSNavdeep Parhar 			wr->u.tcpseg.mss = htobe16(0xffff);
5791786099deSNavdeep Parhar 			cpl = (void *)(wr + 1);
5792786099deSNavdeep Parhar 		}
57936933902dSNavdeep Parhar 	}
5794786099deSNavdeep Parhar 
5795786099deSNavdeep Parhar 	/* Checksum offload must be requested for ethofld. */
5796786099deSNavdeep Parhar 	MPASS(needs_l4_csum(m0));
5797c0236bd9SNavdeep Parhar 	ctrl1 = csum_to_ctrl(cst->adapter, m0);
5798786099deSNavdeep Parhar 
5799786099deSNavdeep Parhar 	/* VLAN tag insertion */
5800786099deSNavdeep Parhar 	if (needs_vlan_insertion(m0)) {
5801786099deSNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD |
5802786099deSNavdeep Parhar 		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
5803786099deSNavdeep Parhar 	}
5804786099deSNavdeep Parhar 
5805786099deSNavdeep Parhar 	/* CPL header */
5806786099deSNavdeep Parhar 	cpl->ctrl0 = cst->ctrl0;
5807786099deSNavdeep Parhar 	cpl->pack = 0;
5808786099deSNavdeep Parhar 	cpl->len = htobe16(pktlen);
5809786099deSNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
5810786099deSNavdeep Parhar 
58116933902dSNavdeep Parhar 	/* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */
5812786099deSNavdeep Parhar 	p = (uintptr_t)(cpl + 1);
5813786099deSNavdeep Parhar 	m_copydata(m0, 0, immhdrs, (void *)p);
5814786099deSNavdeep Parhar 
5815786099deSNavdeep Parhar 	/* SGL */
5816786099deSNavdeep Parhar 	dst = (void *)(cpl + 1);
5817786099deSNavdeep Parhar 	if (nsegs > 0) {
5818786099deSNavdeep Parhar 		int i, pad;
5819786099deSNavdeep Parhar 
5820786099deSNavdeep Parhar 		/* zero-pad upto next 16Byte boundary, if not 16Byte aligned */
5821786099deSNavdeep Parhar 		p += immhdrs;
5822786099deSNavdeep Parhar 		pad = 16 - (immhdrs & 0xf);
5823786099deSNavdeep Parhar 		bzero((void *)p, pad);
5824786099deSNavdeep Parhar 
5825786099deSNavdeep Parhar 		usgl = (void *)(p + pad);
5826786099deSNavdeep Parhar 		usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
5827786099deSNavdeep Parhar 		    V_ULPTX_NSGE(nsegs));
5828786099deSNavdeep Parhar 
5829786099deSNavdeep Parhar 		sglist_init(&sg, nitems(segs), segs);
5830786099deSNavdeep Parhar 		for (; m0 != NULL; m0 = m0->m_next) {
5831786099deSNavdeep Parhar 			if (__predict_false(m0->m_len == 0))
5832786099deSNavdeep Parhar 				continue;
5833786099deSNavdeep Parhar 			if (immhdrs >= m0->m_len) {
5834786099deSNavdeep Parhar 				immhdrs -= m0->m_len;
5835786099deSNavdeep Parhar 				continue;
5836786099deSNavdeep Parhar 			}
58376edfd179SGleb Smirnoff 			if (m0->m_flags & M_EXTPG)
583849b6b60eSGleb Smirnoff 				sglist_append_mbuf_epg(&sg, m0,
583949b6b60eSGleb Smirnoff 				    mtod(m0, vm_offset_t), m0->m_len);
584049b6b60eSGleb Smirnoff                         else
5841786099deSNavdeep Parhar 				sglist_append(&sg, mtod(m0, char *) + immhdrs,
5842786099deSNavdeep Parhar 				    m0->m_len - immhdrs);
5843786099deSNavdeep Parhar 			immhdrs = 0;
5844786099deSNavdeep Parhar 		}
5845786099deSNavdeep Parhar 		MPASS(sg.sg_nseg == nsegs);
5846786099deSNavdeep Parhar 
5847786099deSNavdeep Parhar 		/*
5848786099deSNavdeep Parhar 		 * Zero pad last 8B in case the WR doesn't end on a 16B
5849786099deSNavdeep Parhar 		 * boundary.
5850786099deSNavdeep Parhar 		 */
5851786099deSNavdeep Parhar 		*(uint64_t *)((char *)wr + len16 * 16 - 8) = 0;
5852786099deSNavdeep Parhar 
5853786099deSNavdeep Parhar 		usgl->len0 = htobe32(segs[0].ss_len);
5854786099deSNavdeep Parhar 		usgl->addr0 = htobe64(segs[0].ss_paddr);
5855786099deSNavdeep Parhar 		for (i = 0; i < nsegs - 1; i++) {
5856786099deSNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len);
5857786099deSNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr);
5858786099deSNavdeep Parhar 		}
5859786099deSNavdeep Parhar 		if (i & 1)
5860786099deSNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
5861786099deSNavdeep Parhar 	}
5862786099deSNavdeep Parhar 
5863786099deSNavdeep Parhar }
5864786099deSNavdeep Parhar 
5865786099deSNavdeep Parhar static void
5866e38a50e8SJohn Baldwin ethofld_tx(struct cxgbe_rate_tag *cst)
5867786099deSNavdeep Parhar {
5868786099deSNavdeep Parhar 	struct mbuf *m;
5869786099deSNavdeep Parhar 	struct wrq_cookie cookie;
5870786099deSNavdeep Parhar 	int next_credits, compl;
5871786099deSNavdeep Parhar 	struct fw_eth_tx_eo_wr *wr;
5872786099deSNavdeep Parhar 
5873786099deSNavdeep Parhar 	mtx_assert(&cst->lock, MA_OWNED);
5874786099deSNavdeep Parhar 
5875786099deSNavdeep Parhar 	while ((m = mbufq_first(&cst->pending_tx)) != NULL) {
5876786099deSNavdeep Parhar 		M_ASSERTPKTHDR(m);
5877786099deSNavdeep Parhar 
5878786099deSNavdeep Parhar 		/* How many len16 credits do we need to send this mbuf. */
5879786099deSNavdeep Parhar 		next_credits = mbuf_eo_len16(m);
5880786099deSNavdeep Parhar 		MPASS(next_credits > 0);
5881786099deSNavdeep Parhar 		if (next_credits > cst->tx_credits) {
5882786099deSNavdeep Parhar 			/*
5883786099deSNavdeep Parhar 			 * Tx will make progress eventually because there is at
5884786099deSNavdeep Parhar 			 * least one outstanding fw4_ack that will return
5885786099deSNavdeep Parhar 			 * credits and kick the tx.
5886786099deSNavdeep Parhar 			 */
5887786099deSNavdeep Parhar 			MPASS(cst->ncompl > 0);
5888786099deSNavdeep Parhar 			return;
5889786099deSNavdeep Parhar 		}
5890786099deSNavdeep Parhar 		wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie);
5891786099deSNavdeep Parhar 		if (__predict_false(wr == NULL)) {
5892786099deSNavdeep Parhar 			/* XXX: wishful thinking, not a real assertion. */
5893786099deSNavdeep Parhar 			MPASS(cst->ncompl > 0);
5894786099deSNavdeep Parhar 			return;
5895786099deSNavdeep Parhar 		}
5896786099deSNavdeep Parhar 		cst->tx_credits -= next_credits;
5897786099deSNavdeep Parhar 		cst->tx_nocompl += next_credits;
5898786099deSNavdeep Parhar 		compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
5899e38a50e8SJohn Baldwin 		ETHER_BPF_MTAP(cst->com.com.ifp, m);
5900786099deSNavdeep Parhar 		write_ethofld_wr(cst, wr, m, compl);
5901786099deSNavdeep Parhar 		commit_wrq_wr(cst->eo_txq, wr, &cookie);
5902786099deSNavdeep Parhar 		if (compl) {
5903786099deSNavdeep Parhar 			cst->ncompl++;
5904786099deSNavdeep Parhar 			cst->tx_nocompl	= 0;
5905786099deSNavdeep Parhar 		}
5906786099deSNavdeep Parhar 		(void) mbufq_dequeue(&cst->pending_tx);
5907fb3bc596SJohn Baldwin 
5908fb3bc596SJohn Baldwin 		/*
5909fb3bc596SJohn Baldwin 		 * Drop the mbuf's reference on the tag now rather
5910fb3bc596SJohn Baldwin 		 * than waiting until m_freem().  This ensures that
5911e38a50e8SJohn Baldwin 		 * cxgbe_rate_tag_free gets called when the inp drops
5912fb3bc596SJohn Baldwin 		 * its reference on the tag and there are no more
5913fb3bc596SJohn Baldwin 		 * mbufs in the pending_tx queue and can flush any
5914fb3bc596SJohn Baldwin 		 * pending requests.  Otherwise if the last mbuf
5915fb3bc596SJohn Baldwin 		 * doesn't request a completion the etid will never be
5916fb3bc596SJohn Baldwin 		 * released.
5917fb3bc596SJohn Baldwin 		 */
5918fb3bc596SJohn Baldwin 		m->m_pkthdr.snd_tag = NULL;
5919fb3bc596SJohn Baldwin 		m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
5920e38a50e8SJohn Baldwin 		m_snd_tag_rele(&cst->com.com);
5921fb3bc596SJohn Baldwin 
5922786099deSNavdeep Parhar 		mbufq_enqueue(&cst->pending_fwack, m);
5923786099deSNavdeep Parhar 	}
5924786099deSNavdeep Parhar }
5925786099deSNavdeep Parhar 
5926786099deSNavdeep Parhar int
5927786099deSNavdeep Parhar ethofld_transmit(struct ifnet *ifp, struct mbuf *m0)
5928786099deSNavdeep Parhar {
5929e38a50e8SJohn Baldwin 	struct cxgbe_rate_tag *cst;
5930786099deSNavdeep Parhar 	int rc;
5931786099deSNavdeep Parhar 
5932786099deSNavdeep Parhar 	MPASS(m0->m_nextpkt == NULL);
5933fb3bc596SJohn Baldwin 	MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG);
5934786099deSNavdeep Parhar 	MPASS(m0->m_pkthdr.snd_tag != NULL);
5935e38a50e8SJohn Baldwin 	cst = mst_to_crt(m0->m_pkthdr.snd_tag);
5936786099deSNavdeep Parhar 
5937786099deSNavdeep Parhar 	mtx_lock(&cst->lock);
5938786099deSNavdeep Parhar 	MPASS(cst->flags & EO_SND_TAG_REF);
5939786099deSNavdeep Parhar 
5940786099deSNavdeep Parhar 	if (__predict_false(cst->flags & EO_FLOWC_PENDING)) {
5941786099deSNavdeep Parhar 		struct vi_info *vi = ifp->if_softc;
5942786099deSNavdeep Parhar 		struct port_info *pi = vi->pi;
5943786099deSNavdeep Parhar 		struct adapter *sc = pi->adapter;
5944786099deSNavdeep Parhar 		const uint32_t rss_mask = vi->rss_size - 1;
5945786099deSNavdeep Parhar 		uint32_t rss_hash;
5946786099deSNavdeep Parhar 
5947786099deSNavdeep Parhar 		cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq];
5948786099deSNavdeep Parhar 		if (M_HASHTYPE_ISHASH(m0))
5949786099deSNavdeep Parhar 			rss_hash = m0->m_pkthdr.flowid;
5950786099deSNavdeep Parhar 		else
5951786099deSNavdeep Parhar 			rss_hash = arc4random();
5952786099deSNavdeep Parhar 		/* We assume RSS hashing */
5953786099deSNavdeep Parhar 		cst->iqid = vi->rss[rss_hash & rss_mask];
5954786099deSNavdeep Parhar 		cst->eo_txq += rss_hash % vi->nofldtxq;
5955786099deSNavdeep Parhar 		rc = send_etid_flowc_wr(cst, pi, vi);
5956786099deSNavdeep Parhar 		if (rc != 0)
5957786099deSNavdeep Parhar 			goto done;
5958786099deSNavdeep Parhar 	}
5959786099deSNavdeep Parhar 
5960786099deSNavdeep Parhar 	if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) {
5961786099deSNavdeep Parhar 		rc = ENOBUFS;
5962786099deSNavdeep Parhar 		goto done;
5963786099deSNavdeep Parhar 	}
5964786099deSNavdeep Parhar 
5965786099deSNavdeep Parhar 	mbufq_enqueue(&cst->pending_tx, m0);
5966786099deSNavdeep Parhar 	cst->plen += m0->m_pkthdr.len;
5967786099deSNavdeep Parhar 
5968fb3bc596SJohn Baldwin 	/*
5969fb3bc596SJohn Baldwin 	 * Hold an extra reference on the tag while generating work
5970fb3bc596SJohn Baldwin 	 * requests to ensure that we don't try to free the tag during
5971fb3bc596SJohn Baldwin 	 * ethofld_tx() in case we are sending the final mbuf after
5972fb3bc596SJohn Baldwin 	 * the inp was freed.
5973fb3bc596SJohn Baldwin 	 */
5974e38a50e8SJohn Baldwin 	m_snd_tag_ref(&cst->com.com);
5975786099deSNavdeep Parhar 	ethofld_tx(cst);
5976fb3bc596SJohn Baldwin 	mtx_unlock(&cst->lock);
5977e38a50e8SJohn Baldwin 	m_snd_tag_rele(&cst->com.com);
5978fb3bc596SJohn Baldwin 	return (0);
5979fb3bc596SJohn Baldwin 
5980786099deSNavdeep Parhar done:
5981786099deSNavdeep Parhar 	mtx_unlock(&cst->lock);
5982786099deSNavdeep Parhar 	if (__predict_false(rc != 0))
5983786099deSNavdeep Parhar 		m_freem(m0);
5984786099deSNavdeep Parhar 	return (rc);
5985786099deSNavdeep Parhar }
5986786099deSNavdeep Parhar 
5987786099deSNavdeep Parhar static int
5988786099deSNavdeep Parhar ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
5989786099deSNavdeep Parhar {
5990786099deSNavdeep Parhar 	struct adapter *sc = iq->adapter;
5991786099deSNavdeep Parhar 	const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
5992786099deSNavdeep Parhar 	struct mbuf *m;
5993786099deSNavdeep Parhar 	u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
5994e38a50e8SJohn Baldwin 	struct cxgbe_rate_tag *cst;
5995786099deSNavdeep Parhar 	uint8_t credits = cpl->credits;
5996786099deSNavdeep Parhar 
5997786099deSNavdeep Parhar 	cst = lookup_etid(sc, etid);
5998786099deSNavdeep Parhar 	mtx_lock(&cst->lock);
5999786099deSNavdeep Parhar 	if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) {
6000786099deSNavdeep Parhar 		MPASS(credits >= ETID_FLOWC_LEN16);
6001786099deSNavdeep Parhar 		credits -= ETID_FLOWC_LEN16;
6002786099deSNavdeep Parhar 		cst->flags &= ~EO_FLOWC_RPL_PENDING;
6003786099deSNavdeep Parhar 	}
6004786099deSNavdeep Parhar 
6005786099deSNavdeep Parhar 	KASSERT(cst->ncompl > 0,
6006786099deSNavdeep Parhar 	    ("%s: etid %u (%p) wasn't expecting completion.",
6007786099deSNavdeep Parhar 	    __func__, etid, cst));
6008786099deSNavdeep Parhar 	cst->ncompl--;
6009786099deSNavdeep Parhar 
6010786099deSNavdeep Parhar 	while (credits > 0) {
6011786099deSNavdeep Parhar 		m = mbufq_dequeue(&cst->pending_fwack);
6012786099deSNavdeep Parhar 		if (__predict_false(m == NULL)) {
6013786099deSNavdeep Parhar 			/*
6014786099deSNavdeep Parhar 			 * The remaining credits are for the final flush that
6015786099deSNavdeep Parhar 			 * was issued when the tag was freed by the kernel.
6016786099deSNavdeep Parhar 			 */
6017786099deSNavdeep Parhar 			MPASS((cst->flags &
6018786099deSNavdeep Parhar 			    (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) ==
6019786099deSNavdeep Parhar 			    EO_FLUSH_RPL_PENDING);
6020786099deSNavdeep Parhar 			MPASS(credits == ETID_FLUSH_LEN16);
6021786099deSNavdeep Parhar 			MPASS(cst->tx_credits + cpl->credits == cst->tx_total);
6022786099deSNavdeep Parhar 			MPASS(cst->ncompl == 0);
6023786099deSNavdeep Parhar 
6024786099deSNavdeep Parhar 			cst->flags &= ~EO_FLUSH_RPL_PENDING;
6025786099deSNavdeep Parhar 			cst->tx_credits += cpl->credits;
6026e38a50e8SJohn Baldwin 			cxgbe_rate_tag_free_locked(cst);
6027786099deSNavdeep Parhar 			return (0);	/* cst is gone. */
6028786099deSNavdeep Parhar 		}
6029786099deSNavdeep Parhar 		KASSERT(m != NULL,
6030786099deSNavdeep Parhar 		    ("%s: too many credits (%u, %u)", __func__, cpl->credits,
6031786099deSNavdeep Parhar 		    credits));
6032786099deSNavdeep Parhar 		KASSERT(credits >= mbuf_eo_len16(m),
6033786099deSNavdeep Parhar 		    ("%s: too few credits (%u, %u, %u)", __func__,
6034786099deSNavdeep Parhar 		    cpl->credits, credits, mbuf_eo_len16(m)));
6035786099deSNavdeep Parhar 		credits -= mbuf_eo_len16(m);
6036786099deSNavdeep Parhar 		cst->plen -= m->m_pkthdr.len;
6037786099deSNavdeep Parhar 		m_freem(m);
6038786099deSNavdeep Parhar 	}
6039786099deSNavdeep Parhar 
6040786099deSNavdeep Parhar 	cst->tx_credits += cpl->credits;
6041786099deSNavdeep Parhar 	MPASS(cst->tx_credits <= cst->tx_total);
6042786099deSNavdeep Parhar 
6043fb3bc596SJohn Baldwin 	if (cst->flags & EO_SND_TAG_REF) {
6044fb3bc596SJohn Baldwin 		/*
6045fb3bc596SJohn Baldwin 		 * As with ethofld_transmit(), hold an extra reference
6046fb3bc596SJohn Baldwin 		 * so that the tag is stable across ethold_tx().
6047fb3bc596SJohn Baldwin 		 */
6048e38a50e8SJohn Baldwin 		m_snd_tag_ref(&cst->com.com);
6049786099deSNavdeep Parhar 		m = mbufq_first(&cst->pending_tx);
6050786099deSNavdeep Parhar 		if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m))
6051786099deSNavdeep Parhar 			ethofld_tx(cst);
6052786099deSNavdeep Parhar 		mtx_unlock(&cst->lock);
6053e38a50e8SJohn Baldwin 		m_snd_tag_rele(&cst->com.com);
6054fb3bc596SJohn Baldwin 	} else {
6055fb3bc596SJohn Baldwin 		/*
6056fb3bc596SJohn Baldwin 		 * There shouldn't be any pending packets if the tag
6057fb3bc596SJohn Baldwin 		 * was freed by the kernel since any pending packet
6058fb3bc596SJohn Baldwin 		 * should hold a reference to the tag.
6059fb3bc596SJohn Baldwin 		 */
6060fb3bc596SJohn Baldwin 		MPASS(mbufq_first(&cst->pending_tx) == NULL);
6061fb3bc596SJohn Baldwin 		mtx_unlock(&cst->lock);
6062fb3bc596SJohn Baldwin 	}
6063786099deSNavdeep Parhar 
6064786099deSNavdeep Parhar 	return (0);
6065786099deSNavdeep Parhar }
6066786099deSNavdeep Parhar #endif
6067