xref: /freebsd/sys/dev/cxgbe/t4_sge.c (revision 59bc8ce0355473fe15023bd6ec554f14bcdf3c5c)
154e4ee71SNavdeep Parhar /*-
254e4ee71SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
354e4ee71SNavdeep Parhar  * All rights reserved.
454e4ee71SNavdeep Parhar  * Written by: Navdeep Parhar <np@FreeBSD.org>
554e4ee71SNavdeep Parhar  *
654e4ee71SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
754e4ee71SNavdeep Parhar  * modification, are permitted provided that the following conditions
854e4ee71SNavdeep Parhar  * are met:
954e4ee71SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
1054e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
1154e4ee71SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
1254e4ee71SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
1354e4ee71SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
1454e4ee71SNavdeep Parhar  *
1554e4ee71SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1654e4ee71SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1754e4ee71SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1854e4ee71SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1954e4ee71SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2054e4ee71SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2154e4ee71SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2254e4ee71SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2354e4ee71SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2454e4ee71SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2554e4ee71SNavdeep Parhar  * SUCH DAMAGE.
2654e4ee71SNavdeep Parhar  */
2754e4ee71SNavdeep Parhar 
2854e4ee71SNavdeep Parhar #include <sys/cdefs.h>
2954e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$");
3054e4ee71SNavdeep Parhar 
3154e4ee71SNavdeep Parhar #include "opt_inet.h"
3254e4ee71SNavdeep Parhar 
3354e4ee71SNavdeep Parhar #include <sys/types.h>
3454e4ee71SNavdeep Parhar #include <sys/mbuf.h>
3554e4ee71SNavdeep Parhar #include <sys/socket.h>
3654e4ee71SNavdeep Parhar #include <sys/kernel.h>
37ecb79ca4SNavdeep Parhar #include <sys/malloc.h>
38ecb79ca4SNavdeep Parhar #include <sys/queue.h>
39ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h>
4054e4ee71SNavdeep Parhar #include <sys/sysctl.h>
4154e4ee71SNavdeep Parhar #include <net/bpf.h>
4254e4ee71SNavdeep Parhar #include <net/ethernet.h>
4354e4ee71SNavdeep Parhar #include <net/if.h>
4454e4ee71SNavdeep Parhar #include <net/if_vlan_var.h>
4554e4ee71SNavdeep Parhar #include <netinet/in.h>
4654e4ee71SNavdeep Parhar #include <netinet/ip.h>
4754e4ee71SNavdeep Parhar #include <netinet/tcp.h>
4854e4ee71SNavdeep Parhar 
4954e4ee71SNavdeep Parhar #include "common/common.h"
5054e4ee71SNavdeep Parhar #include "common/t4_regs.h"
5154e4ee71SNavdeep Parhar #include "common/t4_regs_values.h"
5254e4ee71SNavdeep Parhar #include "common/t4_msg.h"
5354e4ee71SNavdeep Parhar #include "common/t4fw_interface.h"
5454e4ee71SNavdeep Parhar 
5554e4ee71SNavdeep Parhar struct fl_buf_info {
5654e4ee71SNavdeep Parhar 	int size;
5754e4ee71SNavdeep Parhar 	int type;
5854e4ee71SNavdeep Parhar 	uma_zone_t zone;
5954e4ee71SNavdeep Parhar };
6054e4ee71SNavdeep Parhar 
6194586193SNavdeep Parhar /* Filled up by t4_sge_modload */
6294586193SNavdeep Parhar static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
6394586193SNavdeep Parhar 
6454e4ee71SNavdeep Parhar #define FL_BUF_SIZE(x)	(fl_buf_info[x].size)
6554e4ee71SNavdeep Parhar #define FL_BUF_TYPE(x)	(fl_buf_info[x].type)
6654e4ee71SNavdeep Parhar #define FL_BUF_ZONE(x)	(fl_buf_info[x].zone)
6754e4ee71SNavdeep Parhar 
6854e4ee71SNavdeep Parhar enum {
6954e4ee71SNavdeep Parhar 	FL_PKTSHIFT = 2
7054e4ee71SNavdeep Parhar };
7154e4ee71SNavdeep Parhar 
7254e4ee71SNavdeep Parhar #define FL_ALIGN	min(CACHE_LINE_SIZE, 32)
7354e4ee71SNavdeep Parhar #if CACHE_LINE_SIZE > 64
7454e4ee71SNavdeep Parhar #define SPG_LEN		128
7554e4ee71SNavdeep Parhar #else
7654e4ee71SNavdeep Parhar #define SPG_LEN		64
7754e4ee71SNavdeep Parhar #endif
7854e4ee71SNavdeep Parhar 
7954e4ee71SNavdeep Parhar /* Used to track coalesced tx work request */
8054e4ee71SNavdeep Parhar struct txpkts {
8154e4ee71SNavdeep Parhar 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
8254e4ee71SNavdeep Parhar 	uint8_t npkt;		/* # of packets in this work request */
8354e4ee71SNavdeep Parhar 	uint8_t nflits;		/* # of flits used by this work request */
8454e4ee71SNavdeep Parhar 	uint16_t plen;		/* total payload (sum of all packets) */
8554e4ee71SNavdeep Parhar };
8654e4ee71SNavdeep Parhar 
8754e4ee71SNavdeep Parhar /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
8854e4ee71SNavdeep Parhar struct sgl {
8954e4ee71SNavdeep Parhar 	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
9054e4ee71SNavdeep Parhar 	int nflits;		/* # of flits needed for the SGL */
9154e4ee71SNavdeep Parhar 	bus_dma_segment_t seg[TX_SGL_SEGS];
9254e4ee71SNavdeep Parhar };
9354e4ee71SNavdeep Parhar 
9456599263SNavdeep Parhar static void t4_evt_rx(void *);
9556599263SNavdeep Parhar static void t4_eth_rx(void *);
9654e4ee71SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
9754e4ee71SNavdeep Parhar     int, iq_intr_handler_t *, char *);
9854e4ee71SNavdeep Parhar static inline void init_fl(struct sge_fl *, int, char *);
99f7dfe243SNavdeep Parhar static inline void init_eq(struct sge_eq *, int, char *);
10054e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
10154e4ee71SNavdeep Parhar     bus_addr_t *, void **);
10254e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
10354e4ee71SNavdeep Parhar     void *);
10454e4ee71SNavdeep Parhar static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
105bc14b14dSNavdeep Parhar     int, int);
10654e4ee71SNavdeep Parhar static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
10756599263SNavdeep Parhar static int alloc_intrq(struct adapter *, int, int, int);
10856599263SNavdeep Parhar static int free_intrq(struct sge_iq *);
10956599263SNavdeep Parhar static int alloc_fwq(struct adapter *, int);
11056599263SNavdeep Parhar static int free_fwq(struct sge_iq *);
11154e4ee71SNavdeep Parhar static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
11254e4ee71SNavdeep Parhar static int free_rxq(struct port_info *, struct sge_rxq *);
113f7dfe243SNavdeep Parhar static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int);
114f7dfe243SNavdeep Parhar static int free_ctrlq(struct adapter *, struct sge_ctrlq *);
11554e4ee71SNavdeep Parhar static int alloc_txq(struct port_info *, struct sge_txq *, int);
11654e4ee71SNavdeep Parhar static int free_txq(struct port_info *, struct sge_txq *);
11754e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
11854e4ee71SNavdeep Parhar static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
11954e4ee71SNavdeep Parhar static inline void iq_next(struct sge_iq *);
12054e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *);
121fb12416cSNavdeep Parhar static void refill_fl(struct adapter *, struct sge_fl *, int, int);
12254e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *);
12354e4ee71SNavdeep Parhar static void free_fl_sdesc(struct sge_fl *);
124f7dfe243SNavdeep Parhar static int alloc_tx_maps(struct sge_txq *);
125f7dfe243SNavdeep Parhar static void free_tx_maps(struct sge_txq *);
12654e4ee71SNavdeep Parhar static void set_fl_tag_idx(struct sge_fl *, int);
12754e4ee71SNavdeep Parhar 
12854e4ee71SNavdeep Parhar static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
12954e4ee71SNavdeep Parhar static int free_pkt_sgl(struct sge_txq *, struct sgl *);
13054e4ee71SNavdeep Parhar static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
13154e4ee71SNavdeep Parhar     struct sgl *);
13254e4ee71SNavdeep Parhar static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
13354e4ee71SNavdeep Parhar     struct mbuf *, struct sgl *);
13454e4ee71SNavdeep Parhar static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
13554e4ee71SNavdeep Parhar static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
13654e4ee71SNavdeep Parhar     struct txpkts *, struct mbuf *, struct sgl *);
13754e4ee71SNavdeep Parhar static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
13854e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
139f7dfe243SNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *);
140e874ff7aSNavdeep Parhar static inline int reclaimable(struct sge_eq *);
141f7dfe243SNavdeep Parhar static int reclaim_tx_descs(struct sge_txq *, int, int);
14254e4ee71SNavdeep Parhar static void write_eqflush_wr(struct sge_eq *);
14354e4ee71SNavdeep Parhar static __be64 get_flit(bus_dma_segment_t *, int, int);
144ecb79ca4SNavdeep Parhar static int handle_sge_egr_update(struct adapter *,
145ecb79ca4SNavdeep Parhar     const struct cpl_sge_egr_update *);
14656599263SNavdeep Parhar static void handle_cpl(struct adapter *, struct sge_iq *);
14754e4ee71SNavdeep Parhar 
148f7dfe243SNavdeep Parhar static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *);
14956599263SNavdeep Parhar static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
150f7dfe243SNavdeep Parhar 
1518820ce5fSNavdeep Parhar extern void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
1528820ce5fSNavdeep Parhar 
15394586193SNavdeep Parhar /*
15494586193SNavdeep Parhar  * Called on MOD_LOAD and fills up fl_buf_info[].
15594586193SNavdeep Parhar  */
15694586193SNavdeep Parhar void
15794586193SNavdeep Parhar t4_sge_modload(void)
15894586193SNavdeep Parhar {
15994586193SNavdeep Parhar 	int i;
16094586193SNavdeep Parhar 	int bufsize[FL_BUF_SIZES] = {
16194586193SNavdeep Parhar 		MCLBYTES,
16294586193SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES
16394586193SNavdeep Parhar 		MJUMPAGESIZE,
16494586193SNavdeep Parhar #endif
16594586193SNavdeep Parhar 		MJUM9BYTES,
16694586193SNavdeep Parhar 		MJUM16BYTES
16794586193SNavdeep Parhar 	};
16894586193SNavdeep Parhar 
16994586193SNavdeep Parhar 	for (i = 0; i < FL_BUF_SIZES; i++) {
17094586193SNavdeep Parhar 		FL_BUF_SIZE(i) = bufsize[i];
17194586193SNavdeep Parhar 		FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
17294586193SNavdeep Parhar 		FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
17394586193SNavdeep Parhar 	}
17494586193SNavdeep Parhar }
17594586193SNavdeep Parhar 
17654e4ee71SNavdeep Parhar /**
17754e4ee71SNavdeep Parhar  *	t4_sge_init - initialize SGE
17854e4ee71SNavdeep Parhar  *	@sc: the adapter
17954e4ee71SNavdeep Parhar  *
18054e4ee71SNavdeep Parhar  *	Performs SGE initialization needed every time after a chip reset.
18154e4ee71SNavdeep Parhar  *	We do not initialize any of the queues here, instead the driver
18254e4ee71SNavdeep Parhar  *	top-level must request them individually.
18354e4ee71SNavdeep Parhar  */
18454e4ee71SNavdeep Parhar void
18554e4ee71SNavdeep Parhar t4_sge_init(struct adapter *sc)
18654e4ee71SNavdeep Parhar {
18754e4ee71SNavdeep Parhar 	struct sge *s = &sc->sge;
18854e4ee71SNavdeep Parhar 	int i;
18954e4ee71SNavdeep Parhar 
19054e4ee71SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) |
19154e4ee71SNavdeep Parhar 			 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
19254e4ee71SNavdeep Parhar 			 F_EGRSTATUSPAGESIZE,
19354e4ee71SNavdeep Parhar 			 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
19454e4ee71SNavdeep Parhar 			 V_PKTSHIFT(FL_PKTSHIFT) |
19554e4ee71SNavdeep Parhar 			 F_RXPKTCPLMODE |
19654e4ee71SNavdeep Parhar 			 V_EGRSTATUSPAGESIZE(SPG_LEN == 128));
19754e4ee71SNavdeep Parhar 	t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE,
19854e4ee71SNavdeep Parhar 			 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0),
19954e4ee71SNavdeep Parhar 			 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
20054e4ee71SNavdeep Parhar 
20154e4ee71SNavdeep Parhar 	for (i = 0; i < FL_BUF_SIZES; i++) {
20254e4ee71SNavdeep Parhar 		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
20354e4ee71SNavdeep Parhar 		    FL_BUF_SIZE(i));
20454e4ee71SNavdeep Parhar 	}
20554e4ee71SNavdeep Parhar 
2069b4d7b4eSNavdeep Parhar 	i = t4_read_reg(sc, A_SGE_CONM_CTRL);
2079b4d7b4eSNavdeep Parhar 	s->fl_starve_threshold = G_EGRTHRESHOLD(i) * 2 + 1;
2089b4d7b4eSNavdeep Parhar 
20954e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
21054e4ee71SNavdeep Parhar 		     V_THRESHOLD_0(s->counter_val[0]) |
21154e4ee71SNavdeep Parhar 		     V_THRESHOLD_1(s->counter_val[1]) |
21254e4ee71SNavdeep Parhar 		     V_THRESHOLD_2(s->counter_val[2]) |
21354e4ee71SNavdeep Parhar 		     V_THRESHOLD_3(s->counter_val[3]));
21454e4ee71SNavdeep Parhar 
21554e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
21654e4ee71SNavdeep Parhar 		     V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) |
21754e4ee71SNavdeep Parhar 		     V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1])));
21854e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
21954e4ee71SNavdeep Parhar 		     V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) |
22054e4ee71SNavdeep Parhar 		     V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3])));
22154e4ee71SNavdeep Parhar 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
22254e4ee71SNavdeep Parhar 		     V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) |
22354e4ee71SNavdeep Parhar 		     V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5])));
22454e4ee71SNavdeep Parhar }
22554e4ee71SNavdeep Parhar 
22654e4ee71SNavdeep Parhar int
22754e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc)
22854e4ee71SNavdeep Parhar {
22954e4ee71SNavdeep Parhar 	int rc;
23054e4ee71SNavdeep Parhar 
23154e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
23254e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
23354e4ee71SNavdeep Parhar 	    BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
23454e4ee71SNavdeep Parhar 	    NULL, &sc->dmat);
23554e4ee71SNavdeep Parhar 	if (rc != 0) {
23654e4ee71SNavdeep Parhar 		device_printf(sc->dev,
23754e4ee71SNavdeep Parhar 		    "failed to create main DMA tag: %d\n", rc);
23854e4ee71SNavdeep Parhar 	}
23954e4ee71SNavdeep Parhar 
24054e4ee71SNavdeep Parhar 	return (rc);
24154e4ee71SNavdeep Parhar }
24254e4ee71SNavdeep Parhar 
24354e4ee71SNavdeep Parhar int
24454e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc)
24554e4ee71SNavdeep Parhar {
24654e4ee71SNavdeep Parhar 	if (sc->dmat)
24754e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(sc->dmat);
24854e4ee71SNavdeep Parhar 
24954e4ee71SNavdeep Parhar 	return (0);
25054e4ee71SNavdeep Parhar }
25154e4ee71SNavdeep Parhar 
25254e4ee71SNavdeep Parhar /*
253f7dfe243SNavdeep Parhar  * Allocate and initialize the firmware event queue, control queues, and the
25456599263SNavdeep Parhar  * interrupt queues.  The adapter owns all of these queues.
25554e4ee71SNavdeep Parhar  *
25654e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
25754e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
25854e4ee71SNavdeep Parhar  */
25954e4ee71SNavdeep Parhar int
260f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc)
26154e4ee71SNavdeep Parhar {
26256599263SNavdeep Parhar 	int i, j, rc, intr_idx, qsize;
26356599263SNavdeep Parhar 	struct sge_iq *iq;
264f7dfe243SNavdeep Parhar 	struct sge_ctrlq *ctrlq;
26554e4ee71SNavdeep Parhar 	iq_intr_handler_t *handler;
26654e4ee71SNavdeep Parhar 	char name[16];
26754e4ee71SNavdeep Parhar 
26854e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
26954e4ee71SNavdeep Parhar 
270f7dfe243SNavdeep Parhar 	if (sysctl_ctx_init(&sc->ctx) == 0) {
271f7dfe243SNavdeep Parhar 		struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
272f7dfe243SNavdeep Parhar 		struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
273f7dfe243SNavdeep Parhar 
27456599263SNavdeep Parhar 		sc->oid_fwq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
27556599263SNavdeep Parhar 		    "fwq", CTLFLAG_RD, NULL, "firmware event queue");
276f7dfe243SNavdeep Parhar 		sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
277f7dfe243SNavdeep Parhar 		    "ctrlq", CTLFLAG_RD, NULL, "ctrl queues");
27856599263SNavdeep Parhar 		sc->oid_intrq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
27956599263SNavdeep Parhar 		    "intrq", CTLFLAG_RD, NULL, "interrupt queues");
280f7dfe243SNavdeep Parhar 	}
281f7dfe243SNavdeep Parhar 
28254e4ee71SNavdeep Parhar 	/*
28356599263SNavdeep Parhar 	 * Interrupt queues
28454e4ee71SNavdeep Parhar 	 */
28556599263SNavdeep Parhar 	intr_idx = sc->intr_count - NINTRQ(sc);
28656599263SNavdeep Parhar 	if (sc->flags & INTR_SHARED) {
28756599263SNavdeep Parhar 		qsize = max((sc->sge.nrxq + 1) * 2, INTR_IQ_QSIZE);
28856599263SNavdeep Parhar 		for (i = 0; i < NINTRQ(sc); i++, intr_idx++) {
28956599263SNavdeep Parhar 			snprintf(name, sizeof(name), "%s intrq%d",
29054e4ee71SNavdeep Parhar 			    device_get_nameunit(sc->dev), i);
29154e4ee71SNavdeep Parhar 
29256599263SNavdeep Parhar 			iq = &sc->sge.intrq[i];
29356599263SNavdeep Parhar 			init_iq(iq, sc, 0, 0, qsize, INTR_IQ_ESIZE, NULL, name);
29456599263SNavdeep Parhar 			rc = alloc_intrq(sc, i % sc->params.nports, i,
29556599263SNavdeep Parhar 			    intr_idx);
29656599263SNavdeep Parhar 
29754e4ee71SNavdeep Parhar 			if (rc != 0) {
29854e4ee71SNavdeep Parhar 				device_printf(sc->dev,
29956599263SNavdeep Parhar 				    "failed to create %s: %d\n", name, rc);
30054e4ee71SNavdeep Parhar 				return (rc);
30154e4ee71SNavdeep Parhar 			}
30254e4ee71SNavdeep Parhar 		}
30354e4ee71SNavdeep Parhar 	} else {
30456599263SNavdeep Parhar 		int qidx = 0;
30556599263SNavdeep Parhar 		struct port_info *pi;
30656599263SNavdeep Parhar 
30756599263SNavdeep Parhar 		for (i = 0; i < sc->params.nports; i++) {
30856599263SNavdeep Parhar 			pi = sc->port[i];
30956599263SNavdeep Parhar 			qsize = max((pi->nrxq + 1) * 2, INTR_IQ_QSIZE);
31056599263SNavdeep Parhar 			for (j = 0; j < pi->nrxq; j++, qidx++, intr_idx++) {
31156599263SNavdeep Parhar 				snprintf(name, sizeof(name), "%s intrq%d",
31256599263SNavdeep Parhar 				    device_get_nameunit(pi->dev), j);
31356599263SNavdeep Parhar 
31456599263SNavdeep Parhar 				iq = &sc->sge.intrq[qidx];
31556599263SNavdeep Parhar 				init_iq(iq, sc, 0, 0, qsize, INTR_IQ_ESIZE,
31656599263SNavdeep Parhar 				    NULL, name);
31756599263SNavdeep Parhar 				rc = alloc_intrq(sc, i, qidx, intr_idx);
31856599263SNavdeep Parhar 
31956599263SNavdeep Parhar 				if (rc != 0) {
32056599263SNavdeep Parhar 					device_printf(sc->dev,
32156599263SNavdeep Parhar 					    "failed to create %s: %d\n",
32256599263SNavdeep Parhar 					    name, rc);
32356599263SNavdeep Parhar 					return (rc);
32456599263SNavdeep Parhar 				}
32556599263SNavdeep Parhar 			}
32656599263SNavdeep Parhar 		}
32754e4ee71SNavdeep Parhar 	}
32854e4ee71SNavdeep Parhar 
32956599263SNavdeep Parhar 	/*
33056599263SNavdeep Parhar 	 * Firmware event queue
33156599263SNavdeep Parhar 	 */
33254e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
33356599263SNavdeep Parhar 	if (sc->intr_count > T4_EXTRA_INTR) {
33456599263SNavdeep Parhar 		handler = NULL;
33556599263SNavdeep Parhar 		intr_idx = 1;
33656599263SNavdeep Parhar 	} else {
33756599263SNavdeep Parhar 		handler = t4_evt_rx;
33856599263SNavdeep Parhar 		intr_idx = 0;
33956599263SNavdeep Parhar 	}
34056599263SNavdeep Parhar 
34156599263SNavdeep Parhar 	iq = &sc->sge.fwq;
34256599263SNavdeep Parhar 	init_iq(iq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
34356599263SNavdeep Parhar 	rc = alloc_fwq(sc, intr_idx);
34454e4ee71SNavdeep Parhar 	if (rc != 0) {
34554e4ee71SNavdeep Parhar 		device_printf(sc->dev,
34654e4ee71SNavdeep Parhar 		    "failed to create firmware event queue: %d\n", rc);
347f7dfe243SNavdeep Parhar 
348f7dfe243SNavdeep Parhar 		return (rc);
349f7dfe243SNavdeep Parhar 	}
350f7dfe243SNavdeep Parhar 
351f7dfe243SNavdeep Parhar 	/*
35256599263SNavdeep Parhar 	 * Control queues - one per port.
353f7dfe243SNavdeep Parhar 	 */
354f7dfe243SNavdeep Parhar 	ctrlq = &sc->sge.ctrlq[0];
35556599263SNavdeep Parhar 	for (i = 0; i < sc->params.nports; i++, ctrlq++) {
356f7dfe243SNavdeep Parhar 		snprintf(name, sizeof(name), "%s ctrlq%d",
357f7dfe243SNavdeep Parhar 		    device_get_nameunit(sc->dev), i);
358f7dfe243SNavdeep Parhar 		init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name);
359f7dfe243SNavdeep Parhar 
360f7dfe243SNavdeep Parhar 		rc = alloc_ctrlq(sc, ctrlq, i);
361f7dfe243SNavdeep Parhar 		if (rc != 0) {
362f7dfe243SNavdeep Parhar 			device_printf(sc->dev,
363f7dfe243SNavdeep Parhar 			    "failed to create control queue %d: %d\n", i, rc);
364f7dfe243SNavdeep Parhar 			return (rc);
365f7dfe243SNavdeep Parhar 		}
36654e4ee71SNavdeep Parhar 	}
36754e4ee71SNavdeep Parhar 
36854e4ee71SNavdeep Parhar 	return (rc);
36954e4ee71SNavdeep Parhar }
37054e4ee71SNavdeep Parhar 
37154e4ee71SNavdeep Parhar /*
37254e4ee71SNavdeep Parhar  * Idempotent
37354e4ee71SNavdeep Parhar  */
37454e4ee71SNavdeep Parhar int
375f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc)
37654e4ee71SNavdeep Parhar {
37754e4ee71SNavdeep Parhar 	int i;
37854e4ee71SNavdeep Parhar 	struct sge_iq *iq;
37954e4ee71SNavdeep Parhar 
38054e4ee71SNavdeep Parhar 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
38154e4ee71SNavdeep Parhar 
382f7dfe243SNavdeep Parhar 	/* Do this before freeing the queues */
38356599263SNavdeep Parhar 	if (sc->oid_fwq || sc->oid_ctrlq || sc->oid_intrq) {
384f7dfe243SNavdeep Parhar 		sysctl_ctx_free(&sc->ctx);
38556599263SNavdeep Parhar 		sc->oid_fwq = NULL;
386f7dfe243SNavdeep Parhar 		sc->oid_ctrlq = NULL;
38756599263SNavdeep Parhar 		sc->oid_intrq = NULL;
388f7dfe243SNavdeep Parhar 	}
389f7dfe243SNavdeep Parhar 
39056599263SNavdeep Parhar 	for (i = 0; i < sc->params.nports; i++)
391f7dfe243SNavdeep Parhar 		free_ctrlq(sc, &sc->sge.ctrlq[i]);
392f7dfe243SNavdeep Parhar 
39354e4ee71SNavdeep Parhar 	iq = &sc->sge.fwq;
39456599263SNavdeep Parhar 	free_fwq(iq);
39556599263SNavdeep Parhar 
39656599263SNavdeep Parhar 	for (i = 0; i < NINTRQ(sc); i++) {
39756599263SNavdeep Parhar 		iq = &sc->sge.intrq[i];
39856599263SNavdeep Parhar 		free_intrq(iq);
39954e4ee71SNavdeep Parhar 	}
40054e4ee71SNavdeep Parhar 
40154e4ee71SNavdeep Parhar 	return (0);
40254e4ee71SNavdeep Parhar }
40354e4ee71SNavdeep Parhar 
40454e4ee71SNavdeep Parhar int
40554e4ee71SNavdeep Parhar t4_setup_eth_queues(struct port_info *pi)
40654e4ee71SNavdeep Parhar {
40754e4ee71SNavdeep Parhar 	int rc = 0, i, intr_idx;
40854e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
40954e4ee71SNavdeep Parhar 	struct sge_txq *txq;
41054e4ee71SNavdeep Parhar 	char name[16];
41154e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
41254e4ee71SNavdeep Parhar 
41354e4ee71SNavdeep Parhar 	if (sysctl_ctx_init(&pi->ctx) == 0) {
41454e4ee71SNavdeep Parhar 		struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
41554e4ee71SNavdeep Parhar 		struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
41654e4ee71SNavdeep Parhar 
41754e4ee71SNavdeep Parhar 		pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
41854e4ee71SNavdeep Parhar 		    "rxq", CTLFLAG_RD, NULL, "rx queues");
41954e4ee71SNavdeep Parhar 		pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
42054e4ee71SNavdeep Parhar 		    "txq", CTLFLAG_RD, NULL, "tx queues");
42154e4ee71SNavdeep Parhar 	}
42254e4ee71SNavdeep Parhar 
42354e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
42454e4ee71SNavdeep Parhar 
42554e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-iq",
42654e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
42754e4ee71SNavdeep Parhar 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
42856599263SNavdeep Parhar 		    pi->qsize_rxq, RX_IQ_ESIZE, t4_eth_rx, name);
42954e4ee71SNavdeep Parhar 
43054e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s rxq%d-fl",
43154e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
43254e4ee71SNavdeep Parhar 		init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
43354e4ee71SNavdeep Parhar 
43456599263SNavdeep Parhar 		intr_idx = pi->first_rxq + i;
43556599263SNavdeep Parhar 		if (sc->flags & INTR_SHARED)
43656599263SNavdeep Parhar 			intr_idx %= NINTRQ(sc);
43754e4ee71SNavdeep Parhar 
43854e4ee71SNavdeep Parhar 		rc = alloc_rxq(pi, rxq, intr_idx, i);
43954e4ee71SNavdeep Parhar 		if (rc != 0)
44054e4ee71SNavdeep Parhar 			goto done;
44154e4ee71SNavdeep Parhar 	}
44254e4ee71SNavdeep Parhar 
44354e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
44454e4ee71SNavdeep Parhar 
44554e4ee71SNavdeep Parhar 		snprintf(name, sizeof(name), "%s txq%d",
44654e4ee71SNavdeep Parhar 		    device_get_nameunit(pi->dev), i);
447f7dfe243SNavdeep Parhar 		init_eq(&txq->eq, pi->qsize_txq, name);
44854e4ee71SNavdeep Parhar 
44954e4ee71SNavdeep Parhar 		rc = alloc_txq(pi, txq, i);
45054e4ee71SNavdeep Parhar 		if (rc != 0)
45154e4ee71SNavdeep Parhar 			goto done;
45254e4ee71SNavdeep Parhar 	}
45354e4ee71SNavdeep Parhar 
45454e4ee71SNavdeep Parhar done:
45554e4ee71SNavdeep Parhar 	if (rc)
45654e4ee71SNavdeep Parhar 		t4_teardown_eth_queues(pi);
45754e4ee71SNavdeep Parhar 
45854e4ee71SNavdeep Parhar 	return (rc);
45954e4ee71SNavdeep Parhar }
46054e4ee71SNavdeep Parhar 
46154e4ee71SNavdeep Parhar /*
46254e4ee71SNavdeep Parhar  * Idempotent
46354e4ee71SNavdeep Parhar  */
46454e4ee71SNavdeep Parhar int
46554e4ee71SNavdeep Parhar t4_teardown_eth_queues(struct port_info *pi)
46654e4ee71SNavdeep Parhar {
46754e4ee71SNavdeep Parhar 	int i;
46854e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
46954e4ee71SNavdeep Parhar 	struct sge_txq *txq;
47054e4ee71SNavdeep Parhar 
47154e4ee71SNavdeep Parhar 	/* Do this before freeing the queues */
47254e4ee71SNavdeep Parhar 	if (pi->oid_txq || pi->oid_rxq) {
47354e4ee71SNavdeep Parhar 		sysctl_ctx_free(&pi->ctx);
47454e4ee71SNavdeep Parhar 		pi->oid_txq = pi->oid_rxq = NULL;
47554e4ee71SNavdeep Parhar 	}
47654e4ee71SNavdeep Parhar 
47754e4ee71SNavdeep Parhar 	for_each_txq(pi, i, txq) {
47854e4ee71SNavdeep Parhar 		free_txq(pi, txq);
47954e4ee71SNavdeep Parhar 	}
48054e4ee71SNavdeep Parhar 
48154e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
48254e4ee71SNavdeep Parhar 		free_rxq(pi, rxq);
48354e4ee71SNavdeep Parhar 	}
48454e4ee71SNavdeep Parhar 
48554e4ee71SNavdeep Parhar 	return (0);
48654e4ee71SNavdeep Parhar }
48754e4ee71SNavdeep Parhar 
48856599263SNavdeep Parhar /* Deals with errors and the first (and only) interrupt queue */
48954e4ee71SNavdeep Parhar void
49054e4ee71SNavdeep Parhar t4_intr_all(void *arg)
49154e4ee71SNavdeep Parhar {
49254e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
49354e4ee71SNavdeep Parhar 
49454e4ee71SNavdeep Parhar 	t4_intr_err(arg);
49556599263SNavdeep Parhar 	t4_intr(&sc->sge.intrq[0]);
49654e4ee71SNavdeep Parhar }
49754e4ee71SNavdeep Parhar 
49856599263SNavdeep Parhar /* Deals with interrupts, and a few CPLs, on the given interrupt queue */
49954e4ee71SNavdeep Parhar void
50056599263SNavdeep Parhar t4_intr(void *arg)
50154e4ee71SNavdeep Parhar {
50254e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg, *q;
50354e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
50454e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
50556599263SNavdeep Parhar 	const struct rss_header *rss;
50654e4ee71SNavdeep Parhar 	int ndesc_pending = 0, ndesc_total = 0;
50756599263SNavdeep Parhar 	int qid, rsp_type;
50854e4ee71SNavdeep Parhar 
5092be67d29SNavdeep Parhar 	if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
5102be67d29SNavdeep Parhar 		return;
5112be67d29SNavdeep Parhar 
51254e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
51354e4ee71SNavdeep Parhar 
51454e4ee71SNavdeep Parhar 		rmb();
51554e4ee71SNavdeep Parhar 
51656599263SNavdeep Parhar 		rss = (const void *)iq->cdesc;
51756599263SNavdeep Parhar 		rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
51856599263SNavdeep Parhar 
51956599263SNavdeep Parhar 		if (__predict_false(rsp_type == X_RSPD_TYPE_CPL)) {
52056599263SNavdeep Parhar 			handle_cpl(sc, iq);
52156599263SNavdeep Parhar 			goto nextdesc;
52256599263SNavdeep Parhar 		}
52354e4ee71SNavdeep Parhar 
52454e4ee71SNavdeep Parhar 		qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
52554e4ee71SNavdeep Parhar 		q = sc->sge.iqmap[qid];
52654e4ee71SNavdeep Parhar 
52756599263SNavdeep Parhar 		if (atomic_cmpset_32(&q->state, IQS_IDLE, IQS_BUSY)) {
52854e4ee71SNavdeep Parhar 			q->handler(q);
52956599263SNavdeep Parhar 			atomic_cmpset_32(&q->state, IQS_BUSY, IQS_IDLE);
53056599263SNavdeep Parhar 		}
53154e4ee71SNavdeep Parhar 
53256599263SNavdeep Parhar nextdesc:	ndesc_total++;
53354e4ee71SNavdeep Parhar 		if (++ndesc_pending >= iq->qsize / 4) {
53454e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
53554e4ee71SNavdeep Parhar 			    V_CIDXINC(ndesc_pending) |
53654e4ee71SNavdeep Parhar 			    V_INGRESSQID(iq->cntxt_id) |
53754e4ee71SNavdeep Parhar 			    V_SEINTARM(
53854e4ee71SNavdeep Parhar 				V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
53954e4ee71SNavdeep Parhar 			ndesc_pending = 0;
54054e4ee71SNavdeep Parhar 		}
54154e4ee71SNavdeep Parhar 
54254e4ee71SNavdeep Parhar 		iq_next(iq);
54354e4ee71SNavdeep Parhar 	}
54454e4ee71SNavdeep Parhar 
545b815af1bSNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
546b815af1bSNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
5472be67d29SNavdeep Parhar 
5482be67d29SNavdeep Parhar 	atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
54954e4ee71SNavdeep Parhar }
55054e4ee71SNavdeep Parhar 
55154e4ee71SNavdeep Parhar /* Deals with error interrupts */
55254e4ee71SNavdeep Parhar void
55354e4ee71SNavdeep Parhar t4_intr_err(void *arg)
55454e4ee71SNavdeep Parhar {
55554e4ee71SNavdeep Parhar 	struct adapter *sc = arg;
55654e4ee71SNavdeep Parhar 
55754e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
55854e4ee71SNavdeep Parhar 	t4_slow_intr_handler(sc);
55954e4ee71SNavdeep Parhar }
56054e4ee71SNavdeep Parhar 
56154e4ee71SNavdeep Parhar /* Deals with the firmware event queue */
56254e4ee71SNavdeep Parhar void
56354e4ee71SNavdeep Parhar t4_intr_evt(void *arg)
56454e4ee71SNavdeep Parhar {
56554e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
5662be67d29SNavdeep Parhar 
56756599263SNavdeep Parhar 	if (atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) {
5682be67d29SNavdeep Parhar 		t4_evt_rx(arg);
5692be67d29SNavdeep Parhar 		atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
5702be67d29SNavdeep Parhar 	}
5712be67d29SNavdeep Parhar }
5722be67d29SNavdeep Parhar 
57356599263SNavdeep Parhar static void
5742be67d29SNavdeep Parhar t4_evt_rx(void *arg)
5752be67d29SNavdeep Parhar {
5762be67d29SNavdeep Parhar 	struct sge_iq *iq = arg;
57754e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
57854e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
57954e4ee71SNavdeep Parhar 	int ndesc_pending = 0, ndesc_total = 0;
58054e4ee71SNavdeep Parhar 
58154e4ee71SNavdeep Parhar 	KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
58254e4ee71SNavdeep Parhar 
58354e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
58456599263SNavdeep Parhar 		int rsp_type;
58554e4ee71SNavdeep Parhar 
58654e4ee71SNavdeep Parhar 		rmb();
58754e4ee71SNavdeep Parhar 
58856599263SNavdeep Parhar 		rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
58956599263SNavdeep Parhar 		if (__predict_false(rsp_type != X_RSPD_TYPE_CPL))
59056599263SNavdeep Parhar 			panic("%s: unexpected rsp_type %d", __func__, rsp_type);
59154e4ee71SNavdeep Parhar 
59256599263SNavdeep Parhar 		handle_cpl(sc, iq);
59354e4ee71SNavdeep Parhar 
59454e4ee71SNavdeep Parhar 		ndesc_total++;
59554e4ee71SNavdeep Parhar 		if (++ndesc_pending >= iq->qsize / 4) {
59654e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
59754e4ee71SNavdeep Parhar 			    V_CIDXINC(ndesc_pending) |
59854e4ee71SNavdeep Parhar 			    V_INGRESSQID(iq->cntxt_id) |
59954e4ee71SNavdeep Parhar 			    V_SEINTARM(
60054e4ee71SNavdeep Parhar 				V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
60154e4ee71SNavdeep Parhar 			ndesc_pending = 0;
60254e4ee71SNavdeep Parhar 		}
60356599263SNavdeep Parhar 
60454e4ee71SNavdeep Parhar 		iq_next(iq);
60554e4ee71SNavdeep Parhar 	}
60654e4ee71SNavdeep Parhar 
607b815af1bSNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
608b815af1bSNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(iq->intr_params));
60954e4ee71SNavdeep Parhar }
61054e4ee71SNavdeep Parhar 
611489eeba9SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
612489eeba9SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
613489eeba9SNavdeep Parhar #else
614489eeba9SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE
615489eeba9SNavdeep Parhar #endif
616489eeba9SNavdeep Parhar 
61756599263SNavdeep Parhar static void
6182be67d29SNavdeep Parhar t4_eth_rx(void *arg)
61954e4ee71SNavdeep Parhar {
62054e4ee71SNavdeep Parhar 	struct sge_rxq *rxq = arg;
62154e4ee71SNavdeep Parhar 	struct sge_iq *iq = arg;
62229ca78e1SNavdeep Parhar 	struct adapter *sc = iq->adapter;
62354e4ee71SNavdeep Parhar 	struct rsp_ctrl *ctrl;
62429ca78e1SNavdeep Parhar 	struct ifnet *ifp = rxq->ifp;
6257d29df59SNavdeep Parhar 	struct sge_fl *fl = &rxq->fl;
6267d29df59SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next;
62754e4ee71SNavdeep Parhar 	const struct rss_header *rss;
62854e4ee71SNavdeep Parhar 	const struct cpl_rx_pkt *cpl;
62954e4ee71SNavdeep Parhar 	uint32_t len;
6307d29df59SNavdeep Parhar 	int ndescs = 0, i;
63154e4ee71SNavdeep Parhar 	struct mbuf *m0, *m;
63254e4ee71SNavdeep Parhar #ifdef INET
63354e4ee71SNavdeep Parhar 	struct lro_ctrl *lro = &rxq->lro;
63454e4ee71SNavdeep Parhar 	struct lro_entry *l;
63554e4ee71SNavdeep Parhar #endif
63654e4ee71SNavdeep Parhar 
6377d29df59SNavdeep Parhar 	prefetch(sd->m);
6387d29df59SNavdeep Parhar 	prefetch(sd->cl);
6397d29df59SNavdeep Parhar 
64054e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
64154e4ee71SNavdeep Parhar 	while (is_new_response(iq, &ctrl)) {
64254e4ee71SNavdeep Parhar 
64354e4ee71SNavdeep Parhar 		rmb();
64454e4ee71SNavdeep Parhar 
64554e4ee71SNavdeep Parhar 		rss = (const void *)iq->cdesc;
6467d29df59SNavdeep Parhar 		i = G_RSPD_TYPE(ctrl->u.type_gen);
64754e4ee71SNavdeep Parhar 
6487d29df59SNavdeep Parhar 		KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT,
64956599263SNavdeep Parhar 		    ("%s: unexpected type %d CPL opcode 0x%x",
65056599263SNavdeep Parhar 		    __func__, i, rss->opcode));
65154e4ee71SNavdeep Parhar 
6527d29df59SNavdeep Parhar 		sd_next = sd + 1;
6537d29df59SNavdeep Parhar 		if (__predict_false(fl->cidx + 1 == fl->cap))
6547d29df59SNavdeep Parhar 			sd_next = fl->sdesc;
6557d29df59SNavdeep Parhar 		prefetch(sd_next->m);
6567d29df59SNavdeep Parhar 		prefetch(sd_next->cl);
6577d29df59SNavdeep Parhar 
6587d29df59SNavdeep Parhar 		cpl = (const void *)(rss + 1);
6597d29df59SNavdeep Parhar 
6607d29df59SNavdeep Parhar 		m0 = sd->m;
6617d29df59SNavdeep Parhar 		sd->m = NULL;	/* consumed */
66254e4ee71SNavdeep Parhar 
66354e4ee71SNavdeep Parhar 		len = be32toh(ctrl->pldbuflen_qid);
6647d29df59SNavdeep Parhar 		if (__predict_false((len & F_RSPD_NEWBUF) == 0))
6657d29df59SNavdeep Parhar 			panic("%s: cannot handle packed frames", __func__);
66654e4ee71SNavdeep Parhar 		len = G_RSPD_LEN(len);
6677d29df59SNavdeep Parhar 
6687d29df59SNavdeep Parhar 		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
6697d29df59SNavdeep Parhar 		    BUS_DMASYNC_POSTREAD);
6707d29df59SNavdeep Parhar 
67194586193SNavdeep Parhar 		m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
672489eeba9SNavdeep Parhar 
673489eeba9SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP
674489eeba9SNavdeep Parhar 		*mtod(m0, uint64_t *) =
675489eeba9SNavdeep Parhar 		    be64toh(ctrl->u.last_flit & 0xfffffffffffffff);
676489eeba9SNavdeep Parhar 		m0->m_data += 8;
677489eeba9SNavdeep Parhar 
678489eeba9SNavdeep Parhar 		/*
679489eeba9SNavdeep Parhar 		 * 60 bit timestamp value is *(uint64_t *)m0->m_pktdat.  Note
680489eeba9SNavdeep Parhar 		 * that it is in the leading free-space (see M_LEADINGSPACE) in
681489eeba9SNavdeep Parhar 		 * the mbuf.  The kernel can clobber it during a pullup,
682489eeba9SNavdeep Parhar 		 * m_copymdata, etc.  You need to make sure that the mbuf
683489eeba9SNavdeep Parhar 		 * reaches you unmolested if you care about the timestamp.
684489eeba9SNavdeep Parhar 		 */
685489eeba9SNavdeep Parhar #endif
686489eeba9SNavdeep Parhar 
687489eeba9SNavdeep Parhar 		if (len < RX_COPY_THRESHOLD) {
6887d29df59SNavdeep Parhar 			/* copy data to mbuf, buffer will be recycled */
6897d29df59SNavdeep Parhar 			bcopy(sd->cl, mtod(m0, caddr_t), len);
6907d29df59SNavdeep Parhar 			m0->m_len = len;
6917d29df59SNavdeep Parhar 		} else {
6927d29df59SNavdeep Parhar 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
6937d29df59SNavdeep Parhar 			m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
6947d29df59SNavdeep Parhar 			sd->cl = NULL;	/* consumed */
6957d29df59SNavdeep Parhar 			m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
69654e4ee71SNavdeep Parhar 		}
69754e4ee71SNavdeep Parhar 
69854e4ee71SNavdeep Parhar 		len -= FL_PKTSHIFT;
69954e4ee71SNavdeep Parhar 		m0->m_len -= FL_PKTSHIFT;
70054e4ee71SNavdeep Parhar 		m0->m_data += FL_PKTSHIFT;
70154e4ee71SNavdeep Parhar 
70254e4ee71SNavdeep Parhar 		m0->m_pkthdr.len = len;
70354e4ee71SNavdeep Parhar 		m0->m_pkthdr.rcvif = ifp;
70454e4ee71SNavdeep Parhar 		m0->m_flags |= M_FLOWID;
70554e4ee71SNavdeep Parhar 		m0->m_pkthdr.flowid = rss->hash_val;
70654e4ee71SNavdeep Parhar 
70754e4ee71SNavdeep Parhar 		if (cpl->csum_calc && !cpl->err_vec &&
70854e4ee71SNavdeep Parhar 		    ifp->if_capenable & IFCAP_RXCSUM) {
70954e4ee71SNavdeep Parhar 			m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
71054e4ee71SNavdeep Parhar 			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
71154e4ee71SNavdeep Parhar 			if (cpl->ip_frag)
71254e4ee71SNavdeep Parhar 				m0->m_pkthdr.csum_data = be16toh(cpl->csum);
71354e4ee71SNavdeep Parhar 			else
71454e4ee71SNavdeep Parhar 				m0->m_pkthdr.csum_data = 0xffff;
71554e4ee71SNavdeep Parhar 			rxq->rxcsum++;
71654e4ee71SNavdeep Parhar 		}
71754e4ee71SNavdeep Parhar 
71854e4ee71SNavdeep Parhar 		if (cpl->vlan_ex) {
71954e4ee71SNavdeep Parhar 			m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
72054e4ee71SNavdeep Parhar 			m0->m_flags |= M_VLANTAG;
72154e4ee71SNavdeep Parhar 			rxq->vlan_extraction++;
72254e4ee71SNavdeep Parhar 		}
72354e4ee71SNavdeep Parhar 
7247d29df59SNavdeep Parhar 		i = 1;	/* # of fl sdesc used */
7257d29df59SNavdeep Parhar 		sd = sd_next;
7267d29df59SNavdeep Parhar 		if (__predict_false(++fl->cidx == fl->cap))
7277d29df59SNavdeep Parhar 			fl->cidx = 0;
7287d29df59SNavdeep Parhar 
72954e4ee71SNavdeep Parhar 		len -= m0->m_len;
73054e4ee71SNavdeep Parhar 		m = m0;
73154e4ee71SNavdeep Parhar 		while (len) {
7327d29df59SNavdeep Parhar 			i++;
73354e4ee71SNavdeep Parhar 
7347d29df59SNavdeep Parhar 			sd_next = sd + 1;
7357d29df59SNavdeep Parhar 			if (__predict_false(fl->cidx + 1 == fl->cap))
7367d29df59SNavdeep Parhar 				sd_next = fl->sdesc;
7377d29df59SNavdeep Parhar 			prefetch(sd_next->m);
7387d29df59SNavdeep Parhar 			prefetch(sd_next->cl);
7397d29df59SNavdeep Parhar 
7407d29df59SNavdeep Parhar 			m->m_next = sd->m;
7417d29df59SNavdeep Parhar 			sd->m = NULL;	/* consumed */
74254e4ee71SNavdeep Parhar 			m = m->m_next;
7437d29df59SNavdeep Parhar 
7447d29df59SNavdeep Parhar 			bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
7457d29df59SNavdeep Parhar 			    BUS_DMASYNC_POSTREAD);
7467d29df59SNavdeep Parhar 
74794586193SNavdeep Parhar 			m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
7487d29df59SNavdeep Parhar 			if (len <= MLEN) {
7497d29df59SNavdeep Parhar 				bcopy(sd->cl, mtod(m, caddr_t), len);
7507d29df59SNavdeep Parhar 				m->m_len = len;
7517d29df59SNavdeep Parhar 			} else {
7527d29df59SNavdeep Parhar 				bus_dmamap_unload(fl->tag[sd->tag_idx],
7537d29df59SNavdeep Parhar 				    sd->map);
7547d29df59SNavdeep Parhar 				m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
7557d29df59SNavdeep Parhar 				sd->cl = NULL;	/* consumed */
7567d29df59SNavdeep Parhar 				m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
7577d29df59SNavdeep Parhar 			}
7587d29df59SNavdeep Parhar 
7597d29df59SNavdeep Parhar 			i++;
7607d29df59SNavdeep Parhar 			sd = sd_next;
7617d29df59SNavdeep Parhar 			if (__predict_false(++fl->cidx == fl->cap))
7627d29df59SNavdeep Parhar 				fl->cidx = 0;
7637d29df59SNavdeep Parhar 
76454e4ee71SNavdeep Parhar 			len -= m->m_len;
76554e4ee71SNavdeep Parhar 		}
7667d29df59SNavdeep Parhar 
76754e4ee71SNavdeep Parhar #ifdef INET
76854e4ee71SNavdeep Parhar 		if (cpl->l2info & htobe32(F_RXF_LRO) &&
76954e4ee71SNavdeep Parhar 		    rxq->flags & RXQ_LRO_ENABLED &&
77054e4ee71SNavdeep Parhar 		    tcp_lro_rx(lro, m0, 0) == 0) {
77154e4ee71SNavdeep Parhar 			/* queued for LRO */
77254e4ee71SNavdeep Parhar 		} else
77354e4ee71SNavdeep Parhar #endif
7747d29df59SNavdeep Parhar 		ifp->if_input(ifp, m0);
77554e4ee71SNavdeep Parhar 
77654e4ee71SNavdeep Parhar 		FL_LOCK(fl);
7777d29df59SNavdeep Parhar 		fl->needed += i;
7787d29df59SNavdeep Parhar 		if (fl->needed >= 32)
779fb12416cSNavdeep Parhar 			refill_fl(sc, fl, 64, 32);
78054e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
78154e4ee71SNavdeep Parhar 
78256599263SNavdeep Parhar 		if (++ndescs > 32) {
78354e4ee71SNavdeep Parhar 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
78454e4ee71SNavdeep Parhar 			    V_CIDXINC(ndescs) |
78554e4ee71SNavdeep Parhar 			    V_INGRESSQID((u32)iq->cntxt_id) |
78654e4ee71SNavdeep Parhar 			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
78754e4ee71SNavdeep Parhar 			ndescs = 0;
78854e4ee71SNavdeep Parhar 		}
78956599263SNavdeep Parhar 
79056599263SNavdeep Parhar 		iq_next(iq);
79154e4ee71SNavdeep Parhar 	}
79254e4ee71SNavdeep Parhar 
79354e4ee71SNavdeep Parhar #ifdef INET
79454e4ee71SNavdeep Parhar 	while (!SLIST_EMPTY(&lro->lro_active)) {
79554e4ee71SNavdeep Parhar 		l = SLIST_FIRST(&lro->lro_active);
79654e4ee71SNavdeep Parhar 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
79754e4ee71SNavdeep Parhar 		tcp_lro_flush(lro, l);
79854e4ee71SNavdeep Parhar 	}
79954e4ee71SNavdeep Parhar #endif
80054e4ee71SNavdeep Parhar 
80154e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
80254e4ee71SNavdeep Parhar 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
80354e4ee71SNavdeep Parhar 
80454e4ee71SNavdeep Parhar 	FL_LOCK(fl);
8057d29df59SNavdeep Parhar 	if (fl->needed >= 32)
806fb12416cSNavdeep Parhar 		refill_fl(sc, fl, 128, 8);
80754e4ee71SNavdeep Parhar 	FL_UNLOCK(fl);
80854e4ee71SNavdeep Parhar }
80954e4ee71SNavdeep Parhar 
810f7dfe243SNavdeep Parhar int
811f7dfe243SNavdeep Parhar t4_mgmt_tx(struct adapter *sc, struct mbuf *m)
812f7dfe243SNavdeep Parhar {
813f7dfe243SNavdeep Parhar 	return ctrl_tx(sc, &sc->sge.ctrlq[0], m);
814f7dfe243SNavdeep Parhar }
815f7dfe243SNavdeep Parhar 
81654e4ee71SNavdeep Parhar /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
81754e4ee71SNavdeep Parhar #define TXPKTS_PKT_HDR ((\
81854e4ee71SNavdeep Parhar     sizeof(struct ulp_txpkt) + \
81954e4ee71SNavdeep Parhar     sizeof(struct ulptx_idata) + \
82054e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
82154e4ee71SNavdeep Parhar     ) / 8)
82254e4ee71SNavdeep Parhar 
82354e4ee71SNavdeep Parhar /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
82454e4ee71SNavdeep Parhar #define TXPKTS_WR_HDR (\
82554e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
82654e4ee71SNavdeep Parhar     TXPKTS_PKT_HDR)
82754e4ee71SNavdeep Parhar 
82854e4ee71SNavdeep Parhar /* Header of a tx WR, before SGL of first packet (in flits) */
82954e4ee71SNavdeep Parhar #define TXPKT_WR_HDR ((\
83054e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
83154e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
83254e4ee71SNavdeep Parhar     ) / 8 )
83354e4ee71SNavdeep Parhar 
83454e4ee71SNavdeep Parhar /* Header of a tx LSO WR, before SGL of first packet (in flits) */
83554e4ee71SNavdeep Parhar #define TXPKT_LSO_WR_HDR ((\
83654e4ee71SNavdeep Parhar     sizeof(struct fw_eth_tx_pkt_wr) + \
83754e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_lso) + \
83854e4ee71SNavdeep Parhar     sizeof(struct cpl_tx_pkt_core) \
83954e4ee71SNavdeep Parhar     ) / 8 )
84054e4ee71SNavdeep Parhar 
84154e4ee71SNavdeep Parhar int
84254e4ee71SNavdeep Parhar t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
84354e4ee71SNavdeep Parhar {
84454e4ee71SNavdeep Parhar 	struct port_info *pi = (void *)ifp->if_softc;
84554e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
84654e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
847f7dfe243SNavdeep Parhar 	struct buf_ring *br = txq->br;
84854e4ee71SNavdeep Parhar 	struct mbuf *next;
849e874ff7aSNavdeep Parhar 	int rc, coalescing, can_reclaim;
85054e4ee71SNavdeep Parhar 	struct txpkts txpkts;
85154e4ee71SNavdeep Parhar 	struct sgl sgl;
85254e4ee71SNavdeep Parhar 
85354e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
85454e4ee71SNavdeep Parhar 	KASSERT(m, ("%s: called with nothing to do.", __func__));
85554e4ee71SNavdeep Parhar 
856e874ff7aSNavdeep Parhar 	prefetch(&eq->desc[eq->pidx]);
857f7dfe243SNavdeep Parhar 	prefetch(&txq->sdesc[eq->pidx]);
858e874ff7aSNavdeep Parhar 
85954e4ee71SNavdeep Parhar 	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
86054e4ee71SNavdeep Parhar 	coalescing = 0;
86154e4ee71SNavdeep Parhar 
86254e4ee71SNavdeep Parhar 	if (eq->avail < 8)
863f7dfe243SNavdeep Parhar 		reclaim_tx_descs(txq, 0, 8);
86454e4ee71SNavdeep Parhar 
86554e4ee71SNavdeep Parhar 	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
86654e4ee71SNavdeep Parhar 
86754e4ee71SNavdeep Parhar 		if (eq->avail < 8)
86854e4ee71SNavdeep Parhar 			break;
86954e4ee71SNavdeep Parhar 
87054e4ee71SNavdeep Parhar 		next = m->m_nextpkt;
87154e4ee71SNavdeep Parhar 		m->m_nextpkt = NULL;
87254e4ee71SNavdeep Parhar 
87354e4ee71SNavdeep Parhar 		if (next || buf_ring_peek(br))
87454e4ee71SNavdeep Parhar 			coalescing = 1;
87554e4ee71SNavdeep Parhar 
87654e4ee71SNavdeep Parhar 		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
87754e4ee71SNavdeep Parhar 		if (rc != 0) {
87854e4ee71SNavdeep Parhar 			if (rc == ENOMEM) {
87954e4ee71SNavdeep Parhar 
88054e4ee71SNavdeep Parhar 				/* Short of resources, suspend tx */
88154e4ee71SNavdeep Parhar 
88254e4ee71SNavdeep Parhar 				m->m_nextpkt = next;
88354e4ee71SNavdeep Parhar 				break;
88454e4ee71SNavdeep Parhar 			}
88554e4ee71SNavdeep Parhar 
88654e4ee71SNavdeep Parhar 			/*
88754e4ee71SNavdeep Parhar 			 * Unrecoverable error for this packet, throw it away
88854e4ee71SNavdeep Parhar 			 * and move on to the next.  get_pkt_sgl may already
88954e4ee71SNavdeep Parhar 			 * have freed m (it will be NULL in that case and the
89054e4ee71SNavdeep Parhar 			 * m_freem here is still safe).
89154e4ee71SNavdeep Parhar 			 */
89254e4ee71SNavdeep Parhar 
89354e4ee71SNavdeep Parhar 			m_freem(m);
89454e4ee71SNavdeep Parhar 			continue;
89554e4ee71SNavdeep Parhar 		}
89654e4ee71SNavdeep Parhar 
89754e4ee71SNavdeep Parhar 		if (coalescing &&
89854e4ee71SNavdeep Parhar 		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
89954e4ee71SNavdeep Parhar 
90054e4ee71SNavdeep Parhar 			/* Successfully absorbed into txpkts */
90154e4ee71SNavdeep Parhar 
90254e4ee71SNavdeep Parhar 			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
90354e4ee71SNavdeep Parhar 			goto doorbell;
90454e4ee71SNavdeep Parhar 		}
90554e4ee71SNavdeep Parhar 
90654e4ee71SNavdeep Parhar 		/*
90754e4ee71SNavdeep Parhar 		 * We weren't coalescing to begin with, or current frame could
90854e4ee71SNavdeep Parhar 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
90954e4ee71SNavdeep Parhar 		 * given to it can't be coalesced).  Either way there should be
91054e4ee71SNavdeep Parhar 		 * nothing in txpkts.
91154e4ee71SNavdeep Parhar 		 */
91254e4ee71SNavdeep Parhar 		KASSERT(txpkts.npkt == 0,
91354e4ee71SNavdeep Parhar 		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
91454e4ee71SNavdeep Parhar 
91554e4ee71SNavdeep Parhar 		/* We're sending out individual packets now */
91654e4ee71SNavdeep Parhar 		coalescing = 0;
91754e4ee71SNavdeep Parhar 
91854e4ee71SNavdeep Parhar 		if (eq->avail < 8)
919f7dfe243SNavdeep Parhar 			reclaim_tx_descs(txq, 0, 8);
92054e4ee71SNavdeep Parhar 		rc = write_txpkt_wr(pi, txq, m, &sgl);
92154e4ee71SNavdeep Parhar 		if (rc != 0) {
92254e4ee71SNavdeep Parhar 
92354e4ee71SNavdeep Parhar 			/* Short of hardware descriptors, suspend tx */
92454e4ee71SNavdeep Parhar 
92554e4ee71SNavdeep Parhar 			/*
92654e4ee71SNavdeep Parhar 			 * This is an unlikely but expensive failure.  We've
92754e4ee71SNavdeep Parhar 			 * done all the hard work (DMA mappings etc.) and now we
92854e4ee71SNavdeep Parhar 			 * can't send out the packet.  What's worse, we have to
92954e4ee71SNavdeep Parhar 			 * spend even more time freeing up everything in sgl.
93054e4ee71SNavdeep Parhar 			 */
93154e4ee71SNavdeep Parhar 			txq->no_desc++;
93254e4ee71SNavdeep Parhar 			free_pkt_sgl(txq, &sgl);
93354e4ee71SNavdeep Parhar 
93454e4ee71SNavdeep Parhar 			m->m_nextpkt = next;
93554e4ee71SNavdeep Parhar 			break;
93654e4ee71SNavdeep Parhar 		}
93754e4ee71SNavdeep Parhar 
93854e4ee71SNavdeep Parhar 		ETHER_BPF_MTAP(ifp, m);
93954e4ee71SNavdeep Parhar 		if (sgl.nsegs == 0)
94054e4ee71SNavdeep Parhar 			m_freem(m);
94154e4ee71SNavdeep Parhar 
94254e4ee71SNavdeep Parhar doorbell:
94354e4ee71SNavdeep Parhar 		/* Fewer and fewer doorbells as the queue fills up */
94454e4ee71SNavdeep Parhar 		if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2)))
945f7dfe243SNavdeep Parhar 		    ring_eq_db(sc, eq);
946e874ff7aSNavdeep Parhar 
947e874ff7aSNavdeep Parhar 		can_reclaim = reclaimable(eq);
948e874ff7aSNavdeep Parhar 		if (can_reclaim >= 32)
949f7dfe243SNavdeep Parhar 			reclaim_tx_descs(txq, can_reclaim, 32);
95054e4ee71SNavdeep Parhar 	}
95154e4ee71SNavdeep Parhar 
95254e4ee71SNavdeep Parhar 	if (txpkts.npkt > 0)
95354e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, &txpkts);
95454e4ee71SNavdeep Parhar 
95554e4ee71SNavdeep Parhar 	/*
95654e4ee71SNavdeep Parhar 	 * m not NULL means there was an error but we haven't thrown it away.
95754e4ee71SNavdeep Parhar 	 * This can happen when we're short of tx descriptors (no_desc) or maybe
95854e4ee71SNavdeep Parhar 	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
95954e4ee71SNavdeep Parhar 	 * will get things going again.
96054e4ee71SNavdeep Parhar 	 *
96154e4ee71SNavdeep Parhar 	 * If eq->avail is already 0 we know a credit flush was requested in the
96254e4ee71SNavdeep Parhar 	 * WR that reduced it to 0 so we don't need another flush (we don't have
96354e4ee71SNavdeep Parhar 	 * any descriptor for a flush WR anyway, duh).
96454e4ee71SNavdeep Parhar 	 */
965f7dfe243SNavdeep Parhar 	if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) {
966f7dfe243SNavdeep Parhar 		struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
967f7dfe243SNavdeep Parhar 
968f7dfe243SNavdeep Parhar 		txsd->desc_used = 1;
969f7dfe243SNavdeep Parhar 		txsd->credits = 0;
97054e4ee71SNavdeep Parhar 		write_eqflush_wr(eq);
971f7dfe243SNavdeep Parhar 	}
97254e4ee71SNavdeep Parhar 	txq->m = m;
97354e4ee71SNavdeep Parhar 
97454e4ee71SNavdeep Parhar 	if (eq->pending)
975f7dfe243SNavdeep Parhar 		ring_eq_db(sc, eq);
97654e4ee71SNavdeep Parhar 
977e874ff7aSNavdeep Parhar 	can_reclaim = reclaimable(eq);
978e874ff7aSNavdeep Parhar 	if (can_reclaim >= 32)
979f7dfe243SNavdeep Parhar 		reclaim_tx_descs(txq, can_reclaim, 128);
98054e4ee71SNavdeep Parhar 
98154e4ee71SNavdeep Parhar 	return (0);
98254e4ee71SNavdeep Parhar }
98354e4ee71SNavdeep Parhar 
98454e4ee71SNavdeep Parhar void
98554e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp)
98654e4ee71SNavdeep Parhar {
98754e4ee71SNavdeep Parhar 	struct port_info *pi = ifp->if_softc;
98854e4ee71SNavdeep Parhar 	struct sge_rxq *rxq;
98954e4ee71SNavdeep Parhar 	struct sge_fl *fl;
99054e4ee71SNavdeep Parhar 	int i;
99154e4ee71SNavdeep Parhar 
99254e4ee71SNavdeep Parhar 	for_each_rxq(pi, i, rxq) {
99354e4ee71SNavdeep Parhar 		fl = &rxq->fl;
99454e4ee71SNavdeep Parhar 
99554e4ee71SNavdeep Parhar 		FL_LOCK(fl);
99654e4ee71SNavdeep Parhar 		set_fl_tag_idx(fl, ifp->if_mtu);
99754e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
99854e4ee71SNavdeep Parhar 	}
99954e4ee71SNavdeep Parhar }
100054e4ee71SNavdeep Parhar 
100154e4ee71SNavdeep Parhar /*
100254e4ee71SNavdeep Parhar  * A non-NULL handler indicates this iq will not receive direct interrupts, the
100356599263SNavdeep Parhar  * handler will be invoked by an interrupt queue.
100454e4ee71SNavdeep Parhar  */
100554e4ee71SNavdeep Parhar static inline void
100654e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
100754e4ee71SNavdeep Parhar     int qsize, int esize, iq_intr_handler_t *handler, char *name)
100854e4ee71SNavdeep Parhar {
100954e4ee71SNavdeep Parhar 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
101054e4ee71SNavdeep Parhar 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
101154e4ee71SNavdeep Parhar 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
101254e4ee71SNavdeep Parhar 	    ("%s: bad pktc_idx %d", __func__, pktc_idx));
101354e4ee71SNavdeep Parhar 
101454e4ee71SNavdeep Parhar 	iq->flags = 0;
101554e4ee71SNavdeep Parhar 	iq->adapter = sc;
101654e4ee71SNavdeep Parhar 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) |
101754e4ee71SNavdeep Parhar 	    V_QINTR_CNT_EN(pktc_idx >= 0);
101854e4ee71SNavdeep Parhar 	iq->intr_pktc_idx = pktc_idx;
101954e4ee71SNavdeep Parhar 	iq->qsize = roundup(qsize, 16);		/* See FW_IQ_CMD/iqsize */
102054e4ee71SNavdeep Parhar 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
102154e4ee71SNavdeep Parhar 	iq->handler = handler;
102254e4ee71SNavdeep Parhar 	strlcpy(iq->lockname, name, sizeof(iq->lockname));
102354e4ee71SNavdeep Parhar }
102454e4ee71SNavdeep Parhar 
102554e4ee71SNavdeep Parhar static inline void
102654e4ee71SNavdeep Parhar init_fl(struct sge_fl *fl, int qsize, char *name)
102754e4ee71SNavdeep Parhar {
102854e4ee71SNavdeep Parhar 	fl->qsize = qsize;
102954e4ee71SNavdeep Parhar 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
103054e4ee71SNavdeep Parhar }
103154e4ee71SNavdeep Parhar 
103254e4ee71SNavdeep Parhar static inline void
1033f7dfe243SNavdeep Parhar init_eq(struct sge_eq *eq, int qsize, char *name)
103454e4ee71SNavdeep Parhar {
1035f7dfe243SNavdeep Parhar 	eq->qsize = qsize;
1036f7dfe243SNavdeep Parhar 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
103754e4ee71SNavdeep Parhar }
103854e4ee71SNavdeep Parhar 
103954e4ee71SNavdeep Parhar static int
104054e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
104154e4ee71SNavdeep Parhar     bus_dmamap_t *map, bus_addr_t *pa, void **va)
104254e4ee71SNavdeep Parhar {
104354e4ee71SNavdeep Parhar 	int rc;
104454e4ee71SNavdeep Parhar 
104554e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
104654e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
104754e4ee71SNavdeep Parhar 	if (rc != 0) {
104854e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
104954e4ee71SNavdeep Parhar 		goto done;
105054e4ee71SNavdeep Parhar 	}
105154e4ee71SNavdeep Parhar 
105254e4ee71SNavdeep Parhar 	rc = bus_dmamem_alloc(*tag, va,
105354e4ee71SNavdeep Parhar 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
105454e4ee71SNavdeep Parhar 	if (rc != 0) {
105554e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
105654e4ee71SNavdeep Parhar 		goto done;
105754e4ee71SNavdeep Parhar 	}
105854e4ee71SNavdeep Parhar 
105954e4ee71SNavdeep Parhar 	rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
106054e4ee71SNavdeep Parhar 	if (rc != 0) {
106154e4ee71SNavdeep Parhar 		device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
106254e4ee71SNavdeep Parhar 		goto done;
106354e4ee71SNavdeep Parhar 	}
106454e4ee71SNavdeep Parhar done:
106554e4ee71SNavdeep Parhar 	if (rc)
106654e4ee71SNavdeep Parhar 		free_ring(sc, *tag, *map, *pa, *va);
106754e4ee71SNavdeep Parhar 
106854e4ee71SNavdeep Parhar 	return (rc);
106954e4ee71SNavdeep Parhar }
107054e4ee71SNavdeep Parhar 
107154e4ee71SNavdeep Parhar static int
107254e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
107354e4ee71SNavdeep Parhar     bus_addr_t pa, void *va)
107454e4ee71SNavdeep Parhar {
107554e4ee71SNavdeep Parhar 	if (pa)
107654e4ee71SNavdeep Parhar 		bus_dmamap_unload(tag, map);
107754e4ee71SNavdeep Parhar 	if (va)
107854e4ee71SNavdeep Parhar 		bus_dmamem_free(tag, va, map);
107954e4ee71SNavdeep Parhar 	if (tag)
108054e4ee71SNavdeep Parhar 		bus_dma_tag_destroy(tag);
108154e4ee71SNavdeep Parhar 
108254e4ee71SNavdeep Parhar 	return (0);
108354e4ee71SNavdeep Parhar }
108454e4ee71SNavdeep Parhar 
108554e4ee71SNavdeep Parhar /*
108654e4ee71SNavdeep Parhar  * Allocates the ring for an ingress queue and an optional freelist.  If the
108754e4ee71SNavdeep Parhar  * freelist is specified it will be allocated and then associated with the
108854e4ee71SNavdeep Parhar  * ingress queue.
108954e4ee71SNavdeep Parhar  *
109054e4ee71SNavdeep Parhar  * Returns errno on failure.  Resources allocated up to that point may still be
109154e4ee71SNavdeep Parhar  * allocated.  Caller is responsible for cleanup in case this function fails.
109254e4ee71SNavdeep Parhar  *
109354e4ee71SNavdeep Parhar  * If the ingress queue will take interrupts directly (iq->handler == NULL) then
109454e4ee71SNavdeep Parhar  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
109556599263SNavdeep Parhar  * the index of the interrupt queue to which its interrupts will be forwarded.
109654e4ee71SNavdeep Parhar  */
109754e4ee71SNavdeep Parhar static int
109854e4ee71SNavdeep Parhar alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
1099bc14b14dSNavdeep Parhar     int intr_idx, int cong)
110054e4ee71SNavdeep Parhar {
110154e4ee71SNavdeep Parhar 	int rc, i, cntxt_id;
110254e4ee71SNavdeep Parhar 	size_t len;
110354e4ee71SNavdeep Parhar 	struct fw_iq_cmd c;
110454e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
110554e4ee71SNavdeep Parhar 	__be32 v = 0;
110654e4ee71SNavdeep Parhar 
110754e4ee71SNavdeep Parhar 	len = iq->qsize * iq->esize;
110854e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
110954e4ee71SNavdeep Parhar 	    (void **)&iq->desc);
111054e4ee71SNavdeep Parhar 	if (rc != 0)
111154e4ee71SNavdeep Parhar 		return (rc);
111254e4ee71SNavdeep Parhar 
111354e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
111454e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
111554e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
111654e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VFN(0));
111754e4ee71SNavdeep Parhar 
111854e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
111954e4ee71SNavdeep Parhar 	    FW_LEN16(c));
112054e4ee71SNavdeep Parhar 
112154e4ee71SNavdeep Parhar 	/* Special handling for firmware event queue */
112254e4ee71SNavdeep Parhar 	if (iq == &sc->sge.fwq)
112354e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQASYNCH;
112454e4ee71SNavdeep Parhar 
112554e4ee71SNavdeep Parhar 	if (iq->handler) {
112656599263SNavdeep Parhar 		KASSERT(intr_idx < NINTRQ(sc),
112754e4ee71SNavdeep Parhar 		    ("%s: invalid indirect intr_idx %d", __func__, intr_idx));
112854e4ee71SNavdeep Parhar 		v |= F_FW_IQ_CMD_IQANDST;
112956599263SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.intrq[intr_idx].abs_id);
113054e4ee71SNavdeep Parhar 	} else {
113154e4ee71SNavdeep Parhar 		KASSERT(intr_idx < sc->intr_count,
113254e4ee71SNavdeep Parhar 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
113354e4ee71SNavdeep Parhar 		v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
113454e4ee71SNavdeep Parhar 	}
113554e4ee71SNavdeep Parhar 
113654e4ee71SNavdeep Parhar 	c.type_to_iqandstindex = htobe32(v |
113754e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
113854e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_VIID(pi->viid) |
113954e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
114054e4ee71SNavdeep Parhar 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
114154e4ee71SNavdeep Parhar 	    F_FW_IQ_CMD_IQGTSMODE |
114254e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
114354e4ee71SNavdeep Parhar 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
114454e4ee71SNavdeep Parhar 	c.iqsize = htobe16(iq->qsize);
114554e4ee71SNavdeep Parhar 	c.iqaddr = htobe64(iq->ba);
1146bc14b14dSNavdeep Parhar 	if (cong >= 0)
1147bc14b14dSNavdeep Parhar 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
114854e4ee71SNavdeep Parhar 
114954e4ee71SNavdeep Parhar 	if (fl) {
115054e4ee71SNavdeep Parhar 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
115154e4ee71SNavdeep Parhar 
115254e4ee71SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES; i++) {
115354e4ee71SNavdeep Parhar 
115454e4ee71SNavdeep Parhar 			/*
115554e4ee71SNavdeep Parhar 			 * A freelist buffer must be 16 byte aligned as the SGE
115654e4ee71SNavdeep Parhar 			 * uses the low 4 bits of the bus addr to figure out the
115754e4ee71SNavdeep Parhar 			 * buffer size.
115854e4ee71SNavdeep Parhar 			 */
115954e4ee71SNavdeep Parhar 			rc = bus_dma_tag_create(sc->dmat, 16, 0,
116054e4ee71SNavdeep Parhar 			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
116154e4ee71SNavdeep Parhar 			    FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
116254e4ee71SNavdeep Parhar 			    NULL, NULL, &fl->tag[i]);
116354e4ee71SNavdeep Parhar 			if (rc != 0) {
116454e4ee71SNavdeep Parhar 				device_printf(sc->dev,
116554e4ee71SNavdeep Parhar 				    "failed to create fl DMA tag[%d]: %d\n",
116654e4ee71SNavdeep Parhar 				    i, rc);
116754e4ee71SNavdeep Parhar 				return (rc);
116854e4ee71SNavdeep Parhar 			}
116954e4ee71SNavdeep Parhar 		}
117054e4ee71SNavdeep Parhar 		len = fl->qsize * RX_FL_ESIZE;
117154e4ee71SNavdeep Parhar 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
117254e4ee71SNavdeep Parhar 		    &fl->ba, (void **)&fl->desc);
117354e4ee71SNavdeep Parhar 		if (rc)
117454e4ee71SNavdeep Parhar 			return (rc);
117554e4ee71SNavdeep Parhar 
117654e4ee71SNavdeep Parhar 		/* Allocate space for one software descriptor per buffer. */
117754e4ee71SNavdeep Parhar 		fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
117854e4ee71SNavdeep Parhar 		FL_LOCK(fl);
117954e4ee71SNavdeep Parhar 		set_fl_tag_idx(fl, pi->ifp->if_mtu);
118054e4ee71SNavdeep Parhar 		rc = alloc_fl_sdesc(fl);
118154e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
118254e4ee71SNavdeep Parhar 		if (rc != 0) {
118354e4ee71SNavdeep Parhar 			device_printf(sc->dev,
118454e4ee71SNavdeep Parhar 			    "failed to setup fl software descriptors: %d\n",
118554e4ee71SNavdeep Parhar 			    rc);
118654e4ee71SNavdeep Parhar 			return (rc);
118754e4ee71SNavdeep Parhar 		}
1188fb12416cSNavdeep Parhar 		fl->needed = fl->cap;
118954e4ee71SNavdeep Parhar 
119054e4ee71SNavdeep Parhar 		c.iqns_to_fl0congen =
1191bc14b14dSNavdeep Parhar 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1192bc14b14dSNavdeep Parhar 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1193bc14b14dSNavdeep Parhar 			F_FW_IQ_CMD_FL0PADEN);
1194bc14b14dSNavdeep Parhar 		if (cong >= 0) {
1195bc14b14dSNavdeep Parhar 			c.iqns_to_fl0congen |=
1196bc14b14dSNavdeep Parhar 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1197bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGCIF |
1198bc14b14dSNavdeep Parhar 				    F_FW_IQ_CMD_FL0CONGEN);
1199bc14b14dSNavdeep Parhar 		}
120054e4ee71SNavdeep Parhar 		c.fl0dcaen_to_fl0cidxfthresh =
120154e4ee71SNavdeep Parhar 		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
120254e4ee71SNavdeep Parhar 			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
120354e4ee71SNavdeep Parhar 		c.fl0size = htobe16(fl->qsize);
120454e4ee71SNavdeep Parhar 		c.fl0addr = htobe64(fl->ba);
120554e4ee71SNavdeep Parhar 	}
120654e4ee71SNavdeep Parhar 
120754e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
120854e4ee71SNavdeep Parhar 	if (rc != 0) {
120954e4ee71SNavdeep Parhar 		device_printf(sc->dev,
121054e4ee71SNavdeep Parhar 		    "failed to create ingress queue: %d\n", rc);
121154e4ee71SNavdeep Parhar 		return (rc);
121254e4ee71SNavdeep Parhar 	}
121354e4ee71SNavdeep Parhar 
121454e4ee71SNavdeep Parhar 	iq->cdesc = iq->desc;
121554e4ee71SNavdeep Parhar 	iq->cidx = 0;
121654e4ee71SNavdeep Parhar 	iq->gen = 1;
121754e4ee71SNavdeep Parhar 	iq->intr_next = iq->intr_params;
121854e4ee71SNavdeep Parhar 	iq->cntxt_id = be16toh(c.iqid);
121954e4ee71SNavdeep Parhar 	iq->abs_id = be16toh(c.physiqid);
122054e4ee71SNavdeep Parhar 	iq->flags |= (IQ_ALLOCATED | IQ_STARTED);
122154e4ee71SNavdeep Parhar 
122254e4ee71SNavdeep Parhar 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
122354e4ee71SNavdeep Parhar 	KASSERT(cntxt_id < sc->sge.niq,
122454e4ee71SNavdeep Parhar 	    ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
122554e4ee71SNavdeep Parhar 	    cntxt_id, sc->sge.niq - 1));
122654e4ee71SNavdeep Parhar 	sc->sge.iqmap[cntxt_id] = iq;
122754e4ee71SNavdeep Parhar 
122854e4ee71SNavdeep Parhar 	if (fl) {
122954e4ee71SNavdeep Parhar 		fl->cntxt_id = be16toh(c.fl0id);
123054e4ee71SNavdeep Parhar 		fl->pidx = fl->cidx = 0;
123154e4ee71SNavdeep Parhar 
12329f1f7ec9SNavdeep Parhar 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
123354e4ee71SNavdeep Parhar 		KASSERT(cntxt_id < sc->sge.neq,
123454e4ee71SNavdeep Parhar 		    ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__,
123554e4ee71SNavdeep Parhar 		    cntxt_id, sc->sge.neq - 1));
123654e4ee71SNavdeep Parhar 		sc->sge.eqmap[cntxt_id] = (void *)fl;
123754e4ee71SNavdeep Parhar 
123854e4ee71SNavdeep Parhar 		FL_LOCK(fl);
12399b4d7b4eSNavdeep Parhar 		/* Just enough to make sure it doesn't starve right away. */
12409b4d7b4eSNavdeep Parhar 		refill_fl(sc, fl, roundup(sc->sge.fl_starve_threshold, 8), 8);
124154e4ee71SNavdeep Parhar 		FL_UNLOCK(fl);
124254e4ee71SNavdeep Parhar 	}
124354e4ee71SNavdeep Parhar 
124454e4ee71SNavdeep Parhar 	/* Enable IQ interrupts */
12452be67d29SNavdeep Parhar 	atomic_store_rel_32(&iq->state, IQS_IDLE);
124654e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
124754e4ee71SNavdeep Parhar 	    V_INGRESSQID(iq->cntxt_id));
124854e4ee71SNavdeep Parhar 
124954e4ee71SNavdeep Parhar 	return (0);
125054e4ee71SNavdeep Parhar }
125154e4ee71SNavdeep Parhar 
125254e4ee71SNavdeep Parhar /*
125354e4ee71SNavdeep Parhar  * This can be called with the iq/fl in any state - fully allocated and
125454e4ee71SNavdeep Parhar  * functional, partially allocated, even all-zeroed out.
125554e4ee71SNavdeep Parhar  */
125654e4ee71SNavdeep Parhar static int
125754e4ee71SNavdeep Parhar free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
125854e4ee71SNavdeep Parhar {
125954e4ee71SNavdeep Parhar 	int i, rc;
126054e4ee71SNavdeep Parhar 	struct adapter *sc = iq->adapter;
126154e4ee71SNavdeep Parhar 	device_t dev;
126254e4ee71SNavdeep Parhar 
126354e4ee71SNavdeep Parhar 	if (sc == NULL)
126454e4ee71SNavdeep Parhar 		return (0);	/* nothing to do */
126554e4ee71SNavdeep Parhar 
126654e4ee71SNavdeep Parhar 	dev = pi ? pi->dev : sc->dev;
126754e4ee71SNavdeep Parhar 
126854e4ee71SNavdeep Parhar 	if (iq->flags & IQ_STARTED) {
126954e4ee71SNavdeep Parhar 		rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0,
127054e4ee71SNavdeep Parhar 		    iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
127154e4ee71SNavdeep Parhar 		if (rc != 0) {
127254e4ee71SNavdeep Parhar 			device_printf(dev,
127354e4ee71SNavdeep Parhar 			    "failed to stop queue %p: %d\n", iq, rc);
127454e4ee71SNavdeep Parhar 			return (rc);
127554e4ee71SNavdeep Parhar 		}
127654e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_STARTED;
12772be67d29SNavdeep Parhar 
12782be67d29SNavdeep Parhar 		/* Synchronize with the interrupt handler */
12792be67d29SNavdeep Parhar 		while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED))
12802be67d29SNavdeep Parhar 			pause("iqfree", hz / 1000);
128154e4ee71SNavdeep Parhar 	}
128254e4ee71SNavdeep Parhar 
128354e4ee71SNavdeep Parhar 	if (iq->flags & IQ_ALLOCATED) {
128454e4ee71SNavdeep Parhar 
128554e4ee71SNavdeep Parhar 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
128654e4ee71SNavdeep Parhar 		    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
128754e4ee71SNavdeep Parhar 		    fl ? fl->cntxt_id : 0xffff, 0xffff);
128854e4ee71SNavdeep Parhar 		if (rc != 0) {
128954e4ee71SNavdeep Parhar 			device_printf(dev,
129054e4ee71SNavdeep Parhar 			    "failed to free queue %p: %d\n", iq, rc);
129154e4ee71SNavdeep Parhar 			return (rc);
129254e4ee71SNavdeep Parhar 		}
129354e4ee71SNavdeep Parhar 		iq->flags &= ~IQ_ALLOCATED;
129454e4ee71SNavdeep Parhar 	}
129554e4ee71SNavdeep Parhar 
129654e4ee71SNavdeep Parhar 	free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
129754e4ee71SNavdeep Parhar 
129854e4ee71SNavdeep Parhar 	bzero(iq, sizeof(*iq));
129954e4ee71SNavdeep Parhar 
130054e4ee71SNavdeep Parhar 	if (fl) {
130154e4ee71SNavdeep Parhar 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
130254e4ee71SNavdeep Parhar 		    fl->desc);
130354e4ee71SNavdeep Parhar 
130454e4ee71SNavdeep Parhar 		if (fl->sdesc) {
130554e4ee71SNavdeep Parhar 			FL_LOCK(fl);
130654e4ee71SNavdeep Parhar 			free_fl_sdesc(fl);
130754e4ee71SNavdeep Parhar 			FL_UNLOCK(fl);
130854e4ee71SNavdeep Parhar 		}
130954e4ee71SNavdeep Parhar 
131054e4ee71SNavdeep Parhar 		if (mtx_initialized(&fl->fl_lock))
131154e4ee71SNavdeep Parhar 			mtx_destroy(&fl->fl_lock);
131254e4ee71SNavdeep Parhar 
131354e4ee71SNavdeep Parhar 		for (i = 0; i < FL_BUF_SIZES; i++) {
131454e4ee71SNavdeep Parhar 			if (fl->tag[i])
131554e4ee71SNavdeep Parhar 				bus_dma_tag_destroy(fl->tag[i]);
131654e4ee71SNavdeep Parhar 		}
131754e4ee71SNavdeep Parhar 
131854e4ee71SNavdeep Parhar 		bzero(fl, sizeof(*fl));
131954e4ee71SNavdeep Parhar 	}
132054e4ee71SNavdeep Parhar 
132154e4ee71SNavdeep Parhar 	return (0);
132254e4ee71SNavdeep Parhar }
132354e4ee71SNavdeep Parhar 
132454e4ee71SNavdeep Parhar static int
132556599263SNavdeep Parhar alloc_intrq(struct adapter *sc, int port_idx, int intrq_idx, int intr_idx)
132654e4ee71SNavdeep Parhar {
132756599263SNavdeep Parhar 	int rc;
132856599263SNavdeep Parhar 	struct sysctl_oid *oid;
132956599263SNavdeep Parhar 	struct sysctl_oid_list *children;
133056599263SNavdeep Parhar 	char name[16];
133156599263SNavdeep Parhar 	struct sge_iq *intrq = &sc->sge.intrq[intrq_idx];
133256599263SNavdeep Parhar 
133356599263SNavdeep Parhar 	rc = alloc_iq_fl(sc->port[port_idx], intrq, NULL, intr_idx, -1);
133456599263SNavdeep Parhar 	if (rc != 0)
133556599263SNavdeep Parhar 		return (rc);
133656599263SNavdeep Parhar 
133756599263SNavdeep Parhar 	children = SYSCTL_CHILDREN(sc->oid_intrq);
133856599263SNavdeep Parhar 
133956599263SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", intrq_idx);
134056599263SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
134156599263SNavdeep Parhar 	    NULL, "interrupt queue");
134256599263SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
134356599263SNavdeep Parhar 
134456599263SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
134556599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &intrq->cidx, 0, sysctl_uint16, "I",
134656599263SNavdeep Parhar 	    "consumer index");
134756599263SNavdeep Parhar 
134856599263SNavdeep Parhar 	return (rc);
134954e4ee71SNavdeep Parhar }
135054e4ee71SNavdeep Parhar 
135154e4ee71SNavdeep Parhar static int
135256599263SNavdeep Parhar free_intrq(struct sge_iq *iq)
135356599263SNavdeep Parhar {
135456599263SNavdeep Parhar 	return free_iq_fl(NULL, iq, NULL);
135556599263SNavdeep Parhar 
135656599263SNavdeep Parhar }
135756599263SNavdeep Parhar 
135856599263SNavdeep Parhar static int
135956599263SNavdeep Parhar alloc_fwq(struct adapter *sc, int intr_idx)
136056599263SNavdeep Parhar {
136156599263SNavdeep Parhar 	int rc;
136256599263SNavdeep Parhar 	struct sysctl_oid_list *children;
136356599263SNavdeep Parhar 	struct sge_iq *fwq = &sc->sge.fwq;
136456599263SNavdeep Parhar 
136556599263SNavdeep Parhar 	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
136656599263SNavdeep Parhar 	if (rc != 0)
136756599263SNavdeep Parhar 		return (rc);
136856599263SNavdeep Parhar 
136956599263SNavdeep Parhar 	children = SYSCTL_CHILDREN(sc->oid_fwq);
137056599263SNavdeep Parhar 
1371*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
1372*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
1373*59bc8ce0SNavdeep Parhar 	    "absolute id of the queue");
1374*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
1375*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
1376*59bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
137756599263SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
137856599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
137956599263SNavdeep Parhar 	    "consumer index");
138056599263SNavdeep Parhar 
138156599263SNavdeep Parhar 	return (rc);
138256599263SNavdeep Parhar }
138356599263SNavdeep Parhar 
138456599263SNavdeep Parhar static int
138556599263SNavdeep Parhar free_fwq(struct sge_iq *iq)
138654e4ee71SNavdeep Parhar {
138754e4ee71SNavdeep Parhar 	return free_iq_fl(NULL, iq, NULL);
138854e4ee71SNavdeep Parhar }
138954e4ee71SNavdeep Parhar 
139054e4ee71SNavdeep Parhar static int
139154e4ee71SNavdeep Parhar alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
139254e4ee71SNavdeep Parhar {
139354e4ee71SNavdeep Parhar 	int rc;
139454e4ee71SNavdeep Parhar 	struct sysctl_oid *oid;
139554e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
139654e4ee71SNavdeep Parhar 	char name[16];
139754e4ee71SNavdeep Parhar 
1398bc14b14dSNavdeep Parhar 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, 1 << pi->tx_chan);
139954e4ee71SNavdeep Parhar 	if (rc != 0)
140054e4ee71SNavdeep Parhar 		return (rc);
140154e4ee71SNavdeep Parhar 
14029b4d7b4eSNavdeep Parhar 	FL_LOCK(&rxq->fl);
14039b4d7b4eSNavdeep Parhar 	refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8, 8);
14049b4d7b4eSNavdeep Parhar 	FL_UNLOCK(&rxq->fl);
14059b4d7b4eSNavdeep Parhar 
140654e4ee71SNavdeep Parhar #ifdef INET
140754e4ee71SNavdeep Parhar 	rc = tcp_lro_init(&rxq->lro);
140854e4ee71SNavdeep Parhar 	if (rc != 0)
140954e4ee71SNavdeep Parhar 		return (rc);
141054e4ee71SNavdeep Parhar 	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
141154e4ee71SNavdeep Parhar 
141254e4ee71SNavdeep Parhar 	if (pi->ifp->if_capenable & IFCAP_LRO)
141354e4ee71SNavdeep Parhar 		rxq->flags |= RXQ_LRO_ENABLED;
141454e4ee71SNavdeep Parhar #endif
141529ca78e1SNavdeep Parhar 	rxq->ifp = pi->ifp;
141654e4ee71SNavdeep Parhar 
141754e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(pi->oid_rxq);
141854e4ee71SNavdeep Parhar 
141954e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
142054e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
142154e4ee71SNavdeep Parhar 	    NULL, "rx queue");
142254e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
142354e4ee71SNavdeep Parhar 
1424af49c942SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
142556599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
1426af49c942SNavdeep Parhar 	    "absolute id of the queue");
1427*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1428*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
1429*59bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
1430*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
1431*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
1432*59bc8ce0SNavdeep Parhar 	    "consumer index");
14337d29df59SNavdeep Parhar #ifdef INET
143454e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
143554e4ee71SNavdeep Parhar 	    &rxq->lro.lro_queued, 0, NULL);
143654e4ee71SNavdeep Parhar 	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
143754e4ee71SNavdeep Parhar 	    &rxq->lro.lro_flushed, 0, NULL);
14387d29df59SNavdeep Parhar #endif
143954e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
144054e4ee71SNavdeep Parhar 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
144154e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
144254e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &rxq->vlan_extraction,
144354e4ee71SNavdeep Parhar 	    "# of times hardware extracted 802.1Q tag");
144454e4ee71SNavdeep Parhar 
1445*59bc8ce0SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
1446*59bc8ce0SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
1447*59bc8ce0SNavdeep Parhar 	    NULL, "freelist");
1448*59bc8ce0SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
1449*59bc8ce0SNavdeep Parhar 
1450*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
1451*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
1452*59bc8ce0SNavdeep Parhar 	    "SGE context id of the queue");
1453*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
1454*59bc8ce0SNavdeep Parhar 	    &rxq->fl.cidx, 0, "consumer index");
1455*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
1456*59bc8ce0SNavdeep Parhar 	    &rxq->fl.pidx, 0, "producer index");
1457*59bc8ce0SNavdeep Parhar 
145854e4ee71SNavdeep Parhar 	return (rc);
145954e4ee71SNavdeep Parhar }
146054e4ee71SNavdeep Parhar 
146154e4ee71SNavdeep Parhar static int
146254e4ee71SNavdeep Parhar free_rxq(struct port_info *pi, struct sge_rxq *rxq)
146354e4ee71SNavdeep Parhar {
146454e4ee71SNavdeep Parhar 	int rc;
146554e4ee71SNavdeep Parhar 
146654e4ee71SNavdeep Parhar #ifdef INET
146754e4ee71SNavdeep Parhar 	if (rxq->lro.ifp) {
146854e4ee71SNavdeep Parhar 		tcp_lro_free(&rxq->lro);
146954e4ee71SNavdeep Parhar 		rxq->lro.ifp = NULL;
147054e4ee71SNavdeep Parhar 	}
147154e4ee71SNavdeep Parhar #endif
147254e4ee71SNavdeep Parhar 
147354e4ee71SNavdeep Parhar 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
147454e4ee71SNavdeep Parhar 	if (rc == 0)
147554e4ee71SNavdeep Parhar 		bzero(rxq, sizeof(*rxq));
147654e4ee71SNavdeep Parhar 
147754e4ee71SNavdeep Parhar 	return (rc);
147854e4ee71SNavdeep Parhar }
147954e4ee71SNavdeep Parhar 
148054e4ee71SNavdeep Parhar static int
1481f7dfe243SNavdeep Parhar alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
1482f7dfe243SNavdeep Parhar {
1483f7dfe243SNavdeep Parhar 	int rc, cntxt_id;
1484f7dfe243SNavdeep Parhar 	size_t len;
1485f7dfe243SNavdeep Parhar 	struct fw_eq_ctrl_cmd c;
1486f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &ctrlq->eq;
1487f7dfe243SNavdeep Parhar 	char name[16];
1488f7dfe243SNavdeep Parhar 	struct sysctl_oid *oid;
1489f7dfe243SNavdeep Parhar 	struct sysctl_oid_list *children;
1490f7dfe243SNavdeep Parhar 
1491f7dfe243SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1492f7dfe243SNavdeep Parhar 
1493f7dfe243SNavdeep Parhar 	len = eq->qsize * CTRL_EQ_ESIZE;
1494f7dfe243SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1495f7dfe243SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
1496f7dfe243SNavdeep Parhar 	if (rc)
1497f7dfe243SNavdeep Parhar 		return (rc);
1498f7dfe243SNavdeep Parhar 
1499f7dfe243SNavdeep Parhar 	eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE;
1500f7dfe243SNavdeep Parhar 	eq->spg = (void *)&eq->desc[eq->cap];
1501f7dfe243SNavdeep Parhar 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
150256599263SNavdeep Parhar 	if (sc->flags & INTR_SHARED)
150356599263SNavdeep Parhar 		eq->iqid = sc->sge.intrq[idx % NINTRQ(sc)].cntxt_id;
150456599263SNavdeep Parhar 	else
150556599263SNavdeep Parhar 		eq->iqid = sc->sge.intrq[sc->port[idx]->first_rxq].cntxt_id;
1506f7dfe243SNavdeep Parhar 
1507f7dfe243SNavdeep Parhar 	bzero(&c, sizeof(c));
1508f7dfe243SNavdeep Parhar 
1509f7dfe243SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
1510f7dfe243SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
1511f7dfe243SNavdeep Parhar 	    V_FW_EQ_CTRL_CMD_VFN(0));
1512f7dfe243SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
1513f7dfe243SNavdeep Parhar 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
1514f7dfe243SNavdeep Parhar 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
1515f7dfe243SNavdeep Parhar 	c.physeqid_pkd = htobe32(0);
1516f7dfe243SNavdeep Parhar 	c.fetchszm_to_iqid =
1517f7dfe243SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
151856599263SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_PCIECHN(sc->port[idx]->tx_chan) |
151956599263SNavdeep Parhar 		F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
1520f7dfe243SNavdeep Parhar 	c.dcaen_to_eqsize =
1521f7dfe243SNavdeep Parhar 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1522f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1523f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1524f7dfe243SNavdeep Parhar 		V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
1525f7dfe243SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
1526f7dfe243SNavdeep Parhar 
1527f7dfe243SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1528f7dfe243SNavdeep Parhar 	if (rc != 0) {
1529f7dfe243SNavdeep Parhar 		device_printf(sc->dev,
1530f7dfe243SNavdeep Parhar 		    "failed to create control queue %d: %d\n", idx, rc);
1531f7dfe243SNavdeep Parhar 		return (rc);
1532f7dfe243SNavdeep Parhar 	}
1533f7dfe243SNavdeep Parhar 
1534f7dfe243SNavdeep Parhar 	eq->pidx = eq->cidx = 0;
1535f7dfe243SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
1536f7dfe243SNavdeep Parhar 	eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1537f7dfe243SNavdeep Parhar 
1538f7dfe243SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1539f7dfe243SNavdeep Parhar 	KASSERT(cntxt_id < sc->sge.neq,
1540f7dfe243SNavdeep Parhar 	    ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1541f7dfe243SNavdeep Parhar 	    cntxt_id, sc->sge.neq - 1));
1542f7dfe243SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
1543f7dfe243SNavdeep Parhar 
1544f7dfe243SNavdeep Parhar 	children = SYSCTL_CHILDREN(sc->oid_ctrlq);
1545f7dfe243SNavdeep Parhar 
1546f7dfe243SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
1547f7dfe243SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1548f7dfe243SNavdeep Parhar 	    NULL, "ctrl queue");
1549f7dfe243SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
1550f7dfe243SNavdeep Parhar 
155156599263SNavdeep Parhar 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "pidx",
155256599263SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &ctrlq->eq.pidx, 0, sysctl_uint16, "I",
155356599263SNavdeep Parhar 	    "producer index");
1554f7dfe243SNavdeep Parhar 	SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1555f7dfe243SNavdeep Parhar 	    &ctrlq->no_desc, 0,
1556f7dfe243SNavdeep Parhar 	    "# of times ctrlq ran out of hardware descriptors");
1557f7dfe243SNavdeep Parhar 
1558f7dfe243SNavdeep Parhar 	return (rc);
1559f7dfe243SNavdeep Parhar }
1560f7dfe243SNavdeep Parhar 
1561f7dfe243SNavdeep Parhar static int
1562f7dfe243SNavdeep Parhar free_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq)
1563f7dfe243SNavdeep Parhar {
1564f7dfe243SNavdeep Parhar 	int rc;
1565f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &ctrlq->eq;
1566f7dfe243SNavdeep Parhar 
1567f7dfe243SNavdeep Parhar 	if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1568b5a6d97eSNavdeep Parhar 		rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1569f7dfe243SNavdeep Parhar 		if (rc != 0) {
1570f7dfe243SNavdeep Parhar 			device_printf(sc->dev,
1571f7dfe243SNavdeep Parhar 			    "failed to free ctrl queue %p: %d\n", eq, rc);
1572f7dfe243SNavdeep Parhar 			return (rc);
1573f7dfe243SNavdeep Parhar 		}
1574f7dfe243SNavdeep Parhar 		eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1575f7dfe243SNavdeep Parhar 	}
1576f7dfe243SNavdeep Parhar 
1577f7dfe243SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1578f7dfe243SNavdeep Parhar 
1579f7dfe243SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
1580f7dfe243SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
1581f7dfe243SNavdeep Parhar 
1582f7dfe243SNavdeep Parhar 	bzero(ctrlq, sizeof(*ctrlq));
1583f7dfe243SNavdeep Parhar 	return (0);
1584f7dfe243SNavdeep Parhar }
1585f7dfe243SNavdeep Parhar 
1586f7dfe243SNavdeep Parhar static int
158754e4ee71SNavdeep Parhar alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
158854e4ee71SNavdeep Parhar {
158954e4ee71SNavdeep Parhar 	int rc, cntxt_id;
159054e4ee71SNavdeep Parhar 	size_t len;
159154e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
159254e4ee71SNavdeep Parhar 	struct fw_eq_eth_cmd c;
159354e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
159454e4ee71SNavdeep Parhar 	char name[16];
159554e4ee71SNavdeep Parhar 	struct sysctl_oid *oid;
159654e4ee71SNavdeep Parhar 	struct sysctl_oid_list *children;
159756599263SNavdeep Parhar 	struct sge_iq *intrq;
159854e4ee71SNavdeep Parhar 
159929ca78e1SNavdeep Parhar 	txq->ifp = pi->ifp;
1600ecb79ca4SNavdeep Parhar 	TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq);
1601ecb79ca4SNavdeep Parhar 
160254e4ee71SNavdeep Parhar 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
160354e4ee71SNavdeep Parhar 
160454e4ee71SNavdeep Parhar 	len = eq->qsize * TX_EQ_ESIZE;
160554e4ee71SNavdeep Parhar 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
160654e4ee71SNavdeep Parhar 	    &eq->ba, (void **)&eq->desc);
160754e4ee71SNavdeep Parhar 	if (rc)
160854e4ee71SNavdeep Parhar 		return (rc);
160954e4ee71SNavdeep Parhar 
161054e4ee71SNavdeep Parhar 	eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE;
161154e4ee71SNavdeep Parhar 	eq->spg = (void *)&eq->desc[eq->cap];
161254e4ee71SNavdeep Parhar 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
1613f7dfe243SNavdeep Parhar 	txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
161454e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
1615f7dfe243SNavdeep Parhar 	txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
161656599263SNavdeep Parhar 
161756599263SNavdeep Parhar 	intrq = &sc->sge.intrq[0];
161856599263SNavdeep Parhar 	if (sc->flags & INTR_SHARED)
161956599263SNavdeep Parhar 		eq->iqid = intrq[(pi->first_txq + idx) % NINTRQ(sc)].cntxt_id;
162056599263SNavdeep Parhar 	else
162156599263SNavdeep Parhar 		eq->iqid = intrq[pi->first_rxq + (idx % pi->nrxq)].cntxt_id;
162254e4ee71SNavdeep Parhar 
162354e4ee71SNavdeep Parhar 	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
162454e4ee71SNavdeep Parhar 	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
1625f7dfe243SNavdeep Parhar 	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
162654e4ee71SNavdeep Parhar 	if (rc != 0) {
162754e4ee71SNavdeep Parhar 		device_printf(sc->dev,
162854e4ee71SNavdeep Parhar 		    "failed to create tx DMA tag: %d\n", rc);
162954e4ee71SNavdeep Parhar 		return (rc);
163054e4ee71SNavdeep Parhar 	}
163154e4ee71SNavdeep Parhar 
1632f7dfe243SNavdeep Parhar 	rc = alloc_tx_maps(txq);
163354e4ee71SNavdeep Parhar 	if (rc != 0) {
163454e4ee71SNavdeep Parhar 		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
163554e4ee71SNavdeep Parhar 		return (rc);
163654e4ee71SNavdeep Parhar 	}
163754e4ee71SNavdeep Parhar 
163854e4ee71SNavdeep Parhar 	bzero(&c, sizeof(c));
163954e4ee71SNavdeep Parhar 
164054e4ee71SNavdeep Parhar 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
164154e4ee71SNavdeep Parhar 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
164254e4ee71SNavdeep Parhar 	    V_FW_EQ_ETH_CMD_VFN(0));
164354e4ee71SNavdeep Parhar 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
164454e4ee71SNavdeep Parhar 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
164554e4ee71SNavdeep Parhar 	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
164654e4ee71SNavdeep Parhar 	c.fetchszm_to_iqid =
164754e4ee71SNavdeep Parhar 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1648bc14b14dSNavdeep Parhar 		V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
1649aa2457e1SNavdeep Parhar 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
165054e4ee71SNavdeep Parhar 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
165154e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
165254e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
165354e4ee71SNavdeep Parhar 		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
165454e4ee71SNavdeep Parhar 	c.eqaddr = htobe64(eq->ba);
165554e4ee71SNavdeep Parhar 
165654e4ee71SNavdeep Parhar 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
165754e4ee71SNavdeep Parhar 	if (rc != 0) {
165854e4ee71SNavdeep Parhar 		device_printf(pi->dev,
165954e4ee71SNavdeep Parhar 		    "failed to create egress queue: %d\n", rc);
166054e4ee71SNavdeep Parhar 		return (rc);
166154e4ee71SNavdeep Parhar 	}
166254e4ee71SNavdeep Parhar 
166354e4ee71SNavdeep Parhar 	eq->pidx = eq->cidx = 0;
166454e4ee71SNavdeep Parhar 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
166554e4ee71SNavdeep Parhar 	eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
166654e4ee71SNavdeep Parhar 
166754e4ee71SNavdeep Parhar 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
166854e4ee71SNavdeep Parhar 	KASSERT(cntxt_id < sc->sge.neq,
166954e4ee71SNavdeep Parhar 	    ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
167054e4ee71SNavdeep Parhar 	    cntxt_id, sc->sge.neq - 1));
167154e4ee71SNavdeep Parhar 	sc->sge.eqmap[cntxt_id] = eq;
167254e4ee71SNavdeep Parhar 
167354e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(pi->oid_txq);
167454e4ee71SNavdeep Parhar 
167554e4ee71SNavdeep Parhar 	snprintf(name, sizeof(name), "%d", idx);
167654e4ee71SNavdeep Parhar 	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
167754e4ee71SNavdeep Parhar 	    NULL, "tx queue");
167854e4ee71SNavdeep Parhar 	children = SYSCTL_CHILDREN(oid);
167954e4ee71SNavdeep Parhar 
1680*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
1681*59bc8ce0SNavdeep Parhar 	    &eq->cntxt_id, 0, "SGE context id of the queue");
1682*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
1683*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
1684*59bc8ce0SNavdeep Parhar 	    "consumer index");
1685*59bc8ce0SNavdeep Parhar 	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
1686*59bc8ce0SNavdeep Parhar 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
1687*59bc8ce0SNavdeep Parhar 	    "producer index");
1688*59bc8ce0SNavdeep Parhar 
168954e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
169054e4ee71SNavdeep Parhar 	    &txq->txcsum, "# of times hardware assisted with checksum");
169154e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
169254e4ee71SNavdeep Parhar 	    CTLFLAG_RD, &txq->vlan_insertion,
169354e4ee71SNavdeep Parhar 	    "# of times hardware inserted 802.1Q tag");
169454e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
169554e4ee71SNavdeep Parhar 	    &txq->tso_wrs, "# of IPv4 TSO work requests");
169654e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
169754e4ee71SNavdeep Parhar 	    &txq->imm_wrs, "# of work requests with immediate data");
169854e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
169954e4ee71SNavdeep Parhar 	    &txq->sgl_wrs, "# of work requests with direct SGL");
170054e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
170154e4ee71SNavdeep Parhar 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
170254e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
170354e4ee71SNavdeep Parhar 	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
170454e4ee71SNavdeep Parhar 	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
170554e4ee71SNavdeep Parhar 	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
170654e4ee71SNavdeep Parhar 
170754e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
170854e4ee71SNavdeep Parhar 	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
170954e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
171054e4ee71SNavdeep Parhar 	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
171154e4ee71SNavdeep Parhar 	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
171254e4ee71SNavdeep Parhar 	    &txq->egr_update, 0, "egress update notifications from the SGE");
171354e4ee71SNavdeep Parhar 
171454e4ee71SNavdeep Parhar 	return (rc);
171554e4ee71SNavdeep Parhar }
171654e4ee71SNavdeep Parhar 
171754e4ee71SNavdeep Parhar static int
171854e4ee71SNavdeep Parhar free_txq(struct port_info *pi, struct sge_txq *txq)
171954e4ee71SNavdeep Parhar {
172054e4ee71SNavdeep Parhar 	int rc;
172154e4ee71SNavdeep Parhar 	struct adapter *sc = pi->adapter;
172254e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
172354e4ee71SNavdeep Parhar 
172454e4ee71SNavdeep Parhar 	if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
17252be67d29SNavdeep Parhar 
17262be67d29SNavdeep Parhar 		/*
17272be67d29SNavdeep Parhar 		 * Wait for the response to a credit flush if there's one
17282be67d29SNavdeep Parhar 		 * pending.  Clearing the flag tells handle_sge_egr_update or
17292be67d29SNavdeep Parhar 		 * cxgbe_txq_start (depending on how far the response has made
17302be67d29SNavdeep Parhar 		 * it) that they should ignore the response and wake up free_txq
17312be67d29SNavdeep Parhar 		 * instead.
17322be67d29SNavdeep Parhar 		 *
17332be67d29SNavdeep Parhar 		 * The interface has been marked down by the time we get here
17342be67d29SNavdeep Parhar 		 * (both IFF_UP and IFF_DRV_RUNNING cleared).  qflush has
17352be67d29SNavdeep Parhar 		 * emptied the tx buf_rings and we know nothing new is being
17362be67d29SNavdeep Parhar 		 * queued for tx so we don't have to worry about a new credit
17372be67d29SNavdeep Parhar 		 * flush request.
17382be67d29SNavdeep Parhar 		 */
17392be67d29SNavdeep Parhar 		TXQ_LOCK(txq);
17402be67d29SNavdeep Parhar 		if (eq->flags & EQ_CRFLUSHED) {
17412be67d29SNavdeep Parhar 			eq->flags &= ~EQ_CRFLUSHED;
17422be67d29SNavdeep Parhar 			msleep(txq, &eq->eq_lock, 0, "crflush", 0);
17432be67d29SNavdeep Parhar 		}
17442be67d29SNavdeep Parhar 		TXQ_UNLOCK(txq);
17452be67d29SNavdeep Parhar 
174654e4ee71SNavdeep Parhar 		rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
174754e4ee71SNavdeep Parhar 		if (rc != 0) {
174854e4ee71SNavdeep Parhar 			device_printf(pi->dev,
174954e4ee71SNavdeep Parhar 			    "failed to free egress queue %p: %d\n", eq, rc);
175054e4ee71SNavdeep Parhar 			return (rc);
175154e4ee71SNavdeep Parhar 		}
175254e4ee71SNavdeep Parhar 		eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
175354e4ee71SNavdeep Parhar 	}
175454e4ee71SNavdeep Parhar 
175554e4ee71SNavdeep Parhar 	free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
175654e4ee71SNavdeep Parhar 
1757f7dfe243SNavdeep Parhar 	free(txq->sdesc, M_CXGBE);
175854e4ee71SNavdeep Parhar 
1759f7dfe243SNavdeep Parhar 	if (txq->maps)
1760f7dfe243SNavdeep Parhar 		free_tx_maps(txq);
176154e4ee71SNavdeep Parhar 
1762f7dfe243SNavdeep Parhar 	buf_ring_free(txq->br, M_CXGBE);
176354e4ee71SNavdeep Parhar 
1764f7dfe243SNavdeep Parhar 	if (txq->tx_tag)
1765f7dfe243SNavdeep Parhar 		bus_dma_tag_destroy(txq->tx_tag);
176654e4ee71SNavdeep Parhar 
176754e4ee71SNavdeep Parhar 	if (mtx_initialized(&eq->eq_lock))
176854e4ee71SNavdeep Parhar 		mtx_destroy(&eq->eq_lock);
176954e4ee71SNavdeep Parhar 
177054e4ee71SNavdeep Parhar 	bzero(txq, sizeof(*txq));
177154e4ee71SNavdeep Parhar 	return (0);
177254e4ee71SNavdeep Parhar }
177354e4ee71SNavdeep Parhar 
177454e4ee71SNavdeep Parhar static void
177554e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
177654e4ee71SNavdeep Parhar {
177754e4ee71SNavdeep Parhar 	bus_addr_t *ba = arg;
177854e4ee71SNavdeep Parhar 
177954e4ee71SNavdeep Parhar 	KASSERT(nseg == 1,
178054e4ee71SNavdeep Parhar 	    ("%s meant for single segment mappings only.", __func__));
178154e4ee71SNavdeep Parhar 
178254e4ee71SNavdeep Parhar 	*ba = error ? 0 : segs->ds_addr;
178354e4ee71SNavdeep Parhar }
178454e4ee71SNavdeep Parhar 
178554e4ee71SNavdeep Parhar static inline bool
178654e4ee71SNavdeep Parhar is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
178754e4ee71SNavdeep Parhar {
178854e4ee71SNavdeep Parhar 	*ctrl = (void *)((uintptr_t)iq->cdesc +
178954e4ee71SNavdeep Parhar 	    (iq->esize - sizeof(struct rsp_ctrl)));
179054e4ee71SNavdeep Parhar 
179154e4ee71SNavdeep Parhar 	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
179254e4ee71SNavdeep Parhar }
179354e4ee71SNavdeep Parhar 
179454e4ee71SNavdeep Parhar static inline void
179554e4ee71SNavdeep Parhar iq_next(struct sge_iq *iq)
179654e4ee71SNavdeep Parhar {
179754e4ee71SNavdeep Parhar 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
179854e4ee71SNavdeep Parhar 	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
179954e4ee71SNavdeep Parhar 		iq->cidx = 0;
180054e4ee71SNavdeep Parhar 		iq->gen ^= 1;
180154e4ee71SNavdeep Parhar 		iq->cdesc = iq->desc;
180254e4ee71SNavdeep Parhar 	}
180354e4ee71SNavdeep Parhar }
180454e4ee71SNavdeep Parhar 
1805fb12416cSNavdeep Parhar #define FL_HW_IDX(x) ((x) >> 3)
180654e4ee71SNavdeep Parhar static inline void
180754e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl)
180854e4ee71SNavdeep Parhar {
180954e4ee71SNavdeep Parhar 	int ndesc = fl->pending / 8;
181054e4ee71SNavdeep Parhar 
1811fb12416cSNavdeep Parhar 	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
1812fb12416cSNavdeep Parhar 		ndesc--;	/* hold back one credit */
1813fb12416cSNavdeep Parhar 
1814fb12416cSNavdeep Parhar 	if (ndesc <= 0)
1815fb12416cSNavdeep Parhar 		return;		/* nothing to do */
181654e4ee71SNavdeep Parhar 
181754e4ee71SNavdeep Parhar 	wmb();
181854e4ee71SNavdeep Parhar 
181954e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
182054e4ee71SNavdeep Parhar 	    V_QID(fl->cntxt_id) | V_PIDX(ndesc));
1821fb12416cSNavdeep Parhar 	fl->pending -= ndesc * 8;
182254e4ee71SNavdeep Parhar }
182354e4ee71SNavdeep Parhar 
1824fb12416cSNavdeep Parhar /*
1825fb12416cSNavdeep Parhar  * Fill up the freelist by upto nbufs and ring its doorbell if the number of
1826fb12416cSNavdeep Parhar  * buffers ready to be handed to the hardware >= dbthresh.
1827fb12416cSNavdeep Parhar  */
182854e4ee71SNavdeep Parhar static void
1829fb12416cSNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs, int dbthresh)
183054e4ee71SNavdeep Parhar {
183154e4ee71SNavdeep Parhar 	__be64 *d = &fl->desc[fl->pidx];
183254e4ee71SNavdeep Parhar 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
183354e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
183454e4ee71SNavdeep Parhar 	bus_addr_t pa;
183554e4ee71SNavdeep Parhar 	caddr_t cl;
183654e4ee71SNavdeep Parhar 	int rc;
183754e4ee71SNavdeep Parhar 
183854e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
183954e4ee71SNavdeep Parhar 
184054e4ee71SNavdeep Parhar 	if (nbufs < 0 || nbufs > fl->needed)
184154e4ee71SNavdeep Parhar 		nbufs = fl->needed;
184254e4ee71SNavdeep Parhar 
184354e4ee71SNavdeep Parhar 	while (nbufs--) {
184454e4ee71SNavdeep Parhar 
184554e4ee71SNavdeep Parhar 		if (sd->cl != NULL) {
184654e4ee71SNavdeep Parhar 
184754e4ee71SNavdeep Parhar 			/*
184854e4ee71SNavdeep Parhar 			 * This happens when a frame small enough to fit
184954e4ee71SNavdeep Parhar 			 * entirely in an mbuf was received in cl last time.
185054e4ee71SNavdeep Parhar 			 * We'd held on to cl and can reuse it now.  Note that
185154e4ee71SNavdeep Parhar 			 * we reuse a cluster of the old size if fl->tag_idx is
185254e4ee71SNavdeep Parhar 			 * no longer the same as sd->tag_idx.
185354e4ee71SNavdeep Parhar 			 */
185454e4ee71SNavdeep Parhar 
185554e4ee71SNavdeep Parhar 			KASSERT(*d == sd->ba_tag,
185654e4ee71SNavdeep Parhar 			    ("%s: recyling problem at pidx %d",
185754e4ee71SNavdeep Parhar 			    __func__, fl->pidx));
185854e4ee71SNavdeep Parhar 
185954e4ee71SNavdeep Parhar 			d++;
186054e4ee71SNavdeep Parhar 			goto recycled;
186154e4ee71SNavdeep Parhar 		}
186254e4ee71SNavdeep Parhar 
186354e4ee71SNavdeep Parhar 
186454e4ee71SNavdeep Parhar 		if (fl->tag_idx != sd->tag_idx) {
186554e4ee71SNavdeep Parhar 			bus_dmamap_t map;
186654e4ee71SNavdeep Parhar 			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
186754e4ee71SNavdeep Parhar 			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
186854e4ee71SNavdeep Parhar 
186954e4ee71SNavdeep Parhar 			/*
187054e4ee71SNavdeep Parhar 			 * An MTU change can get us here.  Discard the old map
187154e4ee71SNavdeep Parhar 			 * which was created with the old tag, but only if
187254e4ee71SNavdeep Parhar 			 * we're able to get a new one.
187354e4ee71SNavdeep Parhar 			 */
187454e4ee71SNavdeep Parhar 			rc = bus_dmamap_create(newtag, 0, &map);
187554e4ee71SNavdeep Parhar 			if (rc == 0) {
187654e4ee71SNavdeep Parhar 				bus_dmamap_destroy(oldtag, sd->map);
187754e4ee71SNavdeep Parhar 				sd->map = map;
187854e4ee71SNavdeep Parhar 				sd->tag_idx = fl->tag_idx;
187954e4ee71SNavdeep Parhar 			}
188054e4ee71SNavdeep Parhar 		}
188154e4ee71SNavdeep Parhar 
188254e4ee71SNavdeep Parhar 		tag = fl->tag[sd->tag_idx];
188354e4ee71SNavdeep Parhar 
188454e4ee71SNavdeep Parhar 		cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
188554e4ee71SNavdeep Parhar 		if (cl == NULL)
188654e4ee71SNavdeep Parhar 			break;
188754e4ee71SNavdeep Parhar 
18887d29df59SNavdeep Parhar 		rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
18897d29df59SNavdeep Parhar 		    oneseg_dma_callback, &pa, 0);
189054e4ee71SNavdeep Parhar 		if (rc != 0 || pa == 0) {
189154e4ee71SNavdeep Parhar 			fl->dmamap_failed++;
189254e4ee71SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
189354e4ee71SNavdeep Parhar 			break;
189454e4ee71SNavdeep Parhar 		}
189554e4ee71SNavdeep Parhar 
189654e4ee71SNavdeep Parhar 		sd->cl = cl;
189754e4ee71SNavdeep Parhar 		*d++ = htobe64(pa | sd->tag_idx);
189854e4ee71SNavdeep Parhar 
189954e4ee71SNavdeep Parhar #ifdef INVARIANTS
190054e4ee71SNavdeep Parhar 		sd->ba_tag = htobe64(pa | sd->tag_idx);
190154e4ee71SNavdeep Parhar #endif
190254e4ee71SNavdeep Parhar 
19037d29df59SNavdeep Parhar recycled:
19047d29df59SNavdeep Parhar 		/* sd->m is never recycled, should always be NULL */
19057d29df59SNavdeep Parhar 		KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
19067d29df59SNavdeep Parhar 
19077d29df59SNavdeep Parhar 		sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
19087d29df59SNavdeep Parhar 		if (sd->m == NULL)
19097d29df59SNavdeep Parhar 			break;
19107d29df59SNavdeep Parhar 
19117d29df59SNavdeep Parhar 		fl->pending++;
191254e4ee71SNavdeep Parhar 		fl->needed--;
191354e4ee71SNavdeep Parhar 		sd++;
191454e4ee71SNavdeep Parhar 		if (++fl->pidx == fl->cap) {
191554e4ee71SNavdeep Parhar 			fl->pidx = 0;
191654e4ee71SNavdeep Parhar 			sd = fl->sdesc;
191754e4ee71SNavdeep Parhar 			d = fl->desc;
191854e4ee71SNavdeep Parhar 		}
191954e4ee71SNavdeep Parhar 	}
1920fb12416cSNavdeep Parhar 
1921fb12416cSNavdeep Parhar 	if (fl->pending >= dbthresh)
1922fb12416cSNavdeep Parhar 		ring_fl_db(sc, fl);
192354e4ee71SNavdeep Parhar }
192454e4ee71SNavdeep Parhar 
192554e4ee71SNavdeep Parhar static int
192654e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl)
192754e4ee71SNavdeep Parhar {
192854e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
192954e4ee71SNavdeep Parhar 	bus_dma_tag_t tag;
193054e4ee71SNavdeep Parhar 	int i, rc;
193154e4ee71SNavdeep Parhar 
193254e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
193354e4ee71SNavdeep Parhar 
193454e4ee71SNavdeep Parhar 	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
193554e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
193654e4ee71SNavdeep Parhar 
193754e4ee71SNavdeep Parhar 	tag = fl->tag[fl->tag_idx];
193854e4ee71SNavdeep Parhar 	sd = fl->sdesc;
193954e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
194054e4ee71SNavdeep Parhar 
194154e4ee71SNavdeep Parhar 		sd->tag_idx = fl->tag_idx;
194254e4ee71SNavdeep Parhar 		rc = bus_dmamap_create(tag, 0, &sd->map);
194354e4ee71SNavdeep Parhar 		if (rc != 0)
194454e4ee71SNavdeep Parhar 			goto failed;
194554e4ee71SNavdeep Parhar 	}
194654e4ee71SNavdeep Parhar 
194754e4ee71SNavdeep Parhar 	return (0);
194854e4ee71SNavdeep Parhar failed:
194954e4ee71SNavdeep Parhar 	while (--i >= 0) {
195054e4ee71SNavdeep Parhar 		sd--;
195154e4ee71SNavdeep Parhar 		bus_dmamap_destroy(tag, sd->map);
195254e4ee71SNavdeep Parhar 		if (sd->m) {
195394586193SNavdeep Parhar 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
195454e4ee71SNavdeep Parhar 			m_free(sd->m);
195554e4ee71SNavdeep Parhar 			sd->m = NULL;
195654e4ee71SNavdeep Parhar 		}
195754e4ee71SNavdeep Parhar 	}
195854e4ee71SNavdeep Parhar 	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
195954e4ee71SNavdeep Parhar 
196054e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
196154e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
196254e4ee71SNavdeep Parhar 
196354e4ee71SNavdeep Parhar 	return (rc);
196454e4ee71SNavdeep Parhar }
196554e4ee71SNavdeep Parhar 
196654e4ee71SNavdeep Parhar static void
196754e4ee71SNavdeep Parhar free_fl_sdesc(struct sge_fl *fl)
196854e4ee71SNavdeep Parhar {
196954e4ee71SNavdeep Parhar 	struct fl_sdesc *sd;
197054e4ee71SNavdeep Parhar 	int i;
197154e4ee71SNavdeep Parhar 
197254e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
197354e4ee71SNavdeep Parhar 
197454e4ee71SNavdeep Parhar 	sd = fl->sdesc;
197554e4ee71SNavdeep Parhar 	for (i = 0; i < fl->cap; i++, sd++) {
197654e4ee71SNavdeep Parhar 
197754e4ee71SNavdeep Parhar 		if (sd->m) {
197894586193SNavdeep Parhar 			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
197954e4ee71SNavdeep Parhar 			m_free(sd->m);
198054e4ee71SNavdeep Parhar 			sd->m = NULL;
198154e4ee71SNavdeep Parhar 		}
198254e4ee71SNavdeep Parhar 
198354e4ee71SNavdeep Parhar 		if (sd->cl) {
198454e4ee71SNavdeep Parhar 			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
198554e4ee71SNavdeep Parhar 			uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
198654e4ee71SNavdeep Parhar 			sd->cl = NULL;
198754e4ee71SNavdeep Parhar 		}
198854e4ee71SNavdeep Parhar 
198954e4ee71SNavdeep Parhar 		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
199054e4ee71SNavdeep Parhar 	}
199154e4ee71SNavdeep Parhar 
199254e4ee71SNavdeep Parhar 	free(fl->sdesc, M_CXGBE);
199354e4ee71SNavdeep Parhar 	fl->sdesc = NULL;
199454e4ee71SNavdeep Parhar }
199554e4ee71SNavdeep Parhar 
199654e4ee71SNavdeep Parhar static int
1997f7dfe243SNavdeep Parhar alloc_tx_maps(struct sge_txq *txq)
199854e4ee71SNavdeep Parhar {
199954e4ee71SNavdeep Parhar 	struct tx_map *txm;
200054e4ee71SNavdeep Parhar 	int i, rc, count;
200154e4ee71SNavdeep Parhar 
200254e4ee71SNavdeep Parhar 	/*
200354e4ee71SNavdeep Parhar 	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
200454e4ee71SNavdeep Parhar 	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
200554e4ee71SNavdeep Parhar 	 * sized for the worst case.
200654e4ee71SNavdeep Parhar 	 */
2007f7dfe243SNavdeep Parhar 	count = txq->eq.qsize * 10 / 8;
2008f7dfe243SNavdeep Parhar 	txq->map_total = txq->map_avail = count;
2009f7dfe243SNavdeep Parhar 	txq->map_cidx = txq->map_pidx = 0;
201054e4ee71SNavdeep Parhar 
2011f7dfe243SNavdeep Parhar 	txq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
201254e4ee71SNavdeep Parhar 	    M_ZERO | M_WAITOK);
201354e4ee71SNavdeep Parhar 
2014f7dfe243SNavdeep Parhar 	txm = txq->maps;
201554e4ee71SNavdeep Parhar 	for (i = 0; i < count; i++, txm++) {
2016f7dfe243SNavdeep Parhar 		rc = bus_dmamap_create(txq->tx_tag, 0, &txm->map);
201754e4ee71SNavdeep Parhar 		if (rc != 0)
201854e4ee71SNavdeep Parhar 			goto failed;
201954e4ee71SNavdeep Parhar 	}
202054e4ee71SNavdeep Parhar 
202154e4ee71SNavdeep Parhar 	return (0);
202254e4ee71SNavdeep Parhar failed:
202354e4ee71SNavdeep Parhar 	while (--i >= 0) {
202454e4ee71SNavdeep Parhar 		txm--;
2025f7dfe243SNavdeep Parhar 		bus_dmamap_destroy(txq->tx_tag, txm->map);
202654e4ee71SNavdeep Parhar 	}
2027f7dfe243SNavdeep Parhar 	KASSERT(txm == txq->maps, ("%s: EDOOFUS", __func__));
202854e4ee71SNavdeep Parhar 
2029f7dfe243SNavdeep Parhar 	free(txq->maps, M_CXGBE);
2030f7dfe243SNavdeep Parhar 	txq->maps = NULL;
203154e4ee71SNavdeep Parhar 
203254e4ee71SNavdeep Parhar 	return (rc);
203354e4ee71SNavdeep Parhar }
203454e4ee71SNavdeep Parhar 
203554e4ee71SNavdeep Parhar static void
2036f7dfe243SNavdeep Parhar free_tx_maps(struct sge_txq *txq)
203754e4ee71SNavdeep Parhar {
203854e4ee71SNavdeep Parhar 	struct tx_map *txm;
203954e4ee71SNavdeep Parhar 	int i;
204054e4ee71SNavdeep Parhar 
2041f7dfe243SNavdeep Parhar 	txm = txq->maps;
2042f7dfe243SNavdeep Parhar 	for (i = 0; i < txq->map_total; i++, txm++) {
204354e4ee71SNavdeep Parhar 
204454e4ee71SNavdeep Parhar 		if (txm->m) {
2045f7dfe243SNavdeep Parhar 			bus_dmamap_unload(txq->tx_tag, txm->map);
204654e4ee71SNavdeep Parhar 			m_freem(txm->m);
204754e4ee71SNavdeep Parhar 			txm->m = NULL;
204854e4ee71SNavdeep Parhar 		}
204954e4ee71SNavdeep Parhar 
2050f7dfe243SNavdeep Parhar 		bus_dmamap_destroy(txq->tx_tag, txm->map);
205154e4ee71SNavdeep Parhar 	}
205254e4ee71SNavdeep Parhar 
2053f7dfe243SNavdeep Parhar 	free(txq->maps, M_CXGBE);
2054f7dfe243SNavdeep Parhar 	txq->maps = NULL;
205554e4ee71SNavdeep Parhar }
205654e4ee71SNavdeep Parhar 
205754e4ee71SNavdeep Parhar /*
205854e4ee71SNavdeep Parhar  * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
205954e4ee71SNavdeep Parhar  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
206054e4ee71SNavdeep Parhar  * of immediate data.
206154e4ee71SNavdeep Parhar  */
206254e4ee71SNavdeep Parhar #define IMM_LEN ( \
206354e4ee71SNavdeep Parhar       2 * TX_EQ_ESIZE \
206454e4ee71SNavdeep Parhar     - sizeof(struct fw_eth_tx_pkt_wr) \
206554e4ee71SNavdeep Parhar     - sizeof(struct cpl_tx_pkt_core))
206654e4ee71SNavdeep Parhar 
206754e4ee71SNavdeep Parhar /*
206854e4ee71SNavdeep Parhar  * Returns non-zero on failure, no need to cleanup anything in that case.
206954e4ee71SNavdeep Parhar  *
207054e4ee71SNavdeep Parhar  * Note 1: We always try to defrag the mbuf if required and return EFBIG only
207154e4ee71SNavdeep Parhar  * if the resulting chain still won't fit in a tx descriptor.
207254e4ee71SNavdeep Parhar  *
207354e4ee71SNavdeep Parhar  * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
207454e4ee71SNavdeep Parhar  * does not have the TCP header in it.
207554e4ee71SNavdeep Parhar  */
207654e4ee71SNavdeep Parhar static int
207754e4ee71SNavdeep Parhar get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
207854e4ee71SNavdeep Parhar     int sgl_only)
207954e4ee71SNavdeep Parhar {
208054e4ee71SNavdeep Parhar 	struct mbuf *m = *fp;
208154e4ee71SNavdeep Parhar 	struct tx_map *txm;
208254e4ee71SNavdeep Parhar 	int rc, defragged = 0, n;
208354e4ee71SNavdeep Parhar 
208454e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
208554e4ee71SNavdeep Parhar 
208654e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz)
208754e4ee71SNavdeep Parhar 		sgl_only = 1;	/* Do not allow immediate data with LSO */
208854e4ee71SNavdeep Parhar 
208954e4ee71SNavdeep Parhar start:	sgl->nsegs = 0;
209054e4ee71SNavdeep Parhar 
209154e4ee71SNavdeep Parhar 	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
209254e4ee71SNavdeep Parhar 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
209354e4ee71SNavdeep Parhar 
2094f7dfe243SNavdeep Parhar 	if (txq->map_avail == 0) {
209554e4ee71SNavdeep Parhar 		txq->no_dmamap++;
209654e4ee71SNavdeep Parhar 		return (ENOMEM);
209754e4ee71SNavdeep Parhar 	}
2098f7dfe243SNavdeep Parhar 	txm = &txq->maps[txq->map_pidx];
209954e4ee71SNavdeep Parhar 
210054e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
210154e4ee71SNavdeep Parhar 		*fp = m_pullup(m, 50);
210254e4ee71SNavdeep Parhar 		m = *fp;
210354e4ee71SNavdeep Parhar 		if (m == NULL)
210454e4ee71SNavdeep Parhar 			return (ENOBUFS);
210554e4ee71SNavdeep Parhar 	}
210654e4ee71SNavdeep Parhar 
2107f7dfe243SNavdeep Parhar 	rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
210854e4ee71SNavdeep Parhar 	    &sgl->nsegs, BUS_DMA_NOWAIT);
210954e4ee71SNavdeep Parhar 	if (rc == EFBIG && defragged == 0) {
211054e4ee71SNavdeep Parhar 		m = m_defrag(m, M_DONTWAIT);
211154e4ee71SNavdeep Parhar 		if (m == NULL)
211254e4ee71SNavdeep Parhar 			return (EFBIG);
211354e4ee71SNavdeep Parhar 
211454e4ee71SNavdeep Parhar 		defragged = 1;
211554e4ee71SNavdeep Parhar 		*fp = m;
211654e4ee71SNavdeep Parhar 		goto start;
211754e4ee71SNavdeep Parhar 	}
211854e4ee71SNavdeep Parhar 	if (rc != 0)
211954e4ee71SNavdeep Parhar 		return (rc);
212054e4ee71SNavdeep Parhar 
212154e4ee71SNavdeep Parhar 	txm->m = m;
2122f7dfe243SNavdeep Parhar 	txq->map_avail--;
2123f7dfe243SNavdeep Parhar 	if (++txq->map_pidx == txq->map_total)
2124f7dfe243SNavdeep Parhar 		txq->map_pidx = 0;
212554e4ee71SNavdeep Parhar 
212654e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
212754e4ee71SNavdeep Parhar 	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
212854e4ee71SNavdeep Parhar 
212954e4ee71SNavdeep Parhar 	/*
213054e4ee71SNavdeep Parhar 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
213154e4ee71SNavdeep Parhar 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
213254e4ee71SNavdeep Parhar 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
213354e4ee71SNavdeep Parhar 	 * then len1 must be set to 0.
213454e4ee71SNavdeep Parhar 	 */
213554e4ee71SNavdeep Parhar 	n = sgl->nsegs - 1;
213654e4ee71SNavdeep Parhar 	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
213754e4ee71SNavdeep Parhar 
213854e4ee71SNavdeep Parhar 	return (0);
213954e4ee71SNavdeep Parhar }
214054e4ee71SNavdeep Parhar 
214154e4ee71SNavdeep Parhar 
214254e4ee71SNavdeep Parhar /*
214354e4ee71SNavdeep Parhar  * Releases all the txq resources used up in the specified sgl.
214454e4ee71SNavdeep Parhar  */
214554e4ee71SNavdeep Parhar static int
214654e4ee71SNavdeep Parhar free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
214754e4ee71SNavdeep Parhar {
214854e4ee71SNavdeep Parhar 	struct tx_map *txm;
214954e4ee71SNavdeep Parhar 
215054e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
215154e4ee71SNavdeep Parhar 
215254e4ee71SNavdeep Parhar 	if (sgl->nsegs == 0)
215354e4ee71SNavdeep Parhar 		return (0);	/* didn't use any map */
215454e4ee71SNavdeep Parhar 
215554e4ee71SNavdeep Parhar 	/* 1 pkt uses exactly 1 map, back it out */
215654e4ee71SNavdeep Parhar 
2157f7dfe243SNavdeep Parhar 	txq->map_avail++;
2158f7dfe243SNavdeep Parhar 	if (txq->map_pidx > 0)
2159f7dfe243SNavdeep Parhar 		txq->map_pidx--;
216054e4ee71SNavdeep Parhar 	else
2161f7dfe243SNavdeep Parhar 		txq->map_pidx = txq->map_total - 1;
216254e4ee71SNavdeep Parhar 
2163f7dfe243SNavdeep Parhar 	txm = &txq->maps[txq->map_pidx];
2164f7dfe243SNavdeep Parhar 	bus_dmamap_unload(txq->tx_tag, txm->map);
216554e4ee71SNavdeep Parhar 	txm->m = NULL;
216654e4ee71SNavdeep Parhar 
216754e4ee71SNavdeep Parhar 	return (0);
216854e4ee71SNavdeep Parhar }
216954e4ee71SNavdeep Parhar 
217054e4ee71SNavdeep Parhar static int
217154e4ee71SNavdeep Parhar write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
217254e4ee71SNavdeep Parhar     struct sgl *sgl)
217354e4ee71SNavdeep Parhar {
217454e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
217554e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkt_wr *wr;
217654e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
217754e4ee71SNavdeep Parhar 	uint32_t ctrl;	/* used in many unrelated places */
217854e4ee71SNavdeep Parhar 	uint64_t ctrl1;
2179ecb79ca4SNavdeep Parhar 	int nflits, ndesc, pktlen;
218054e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
218154e4ee71SNavdeep Parhar 	caddr_t dst;
218254e4ee71SNavdeep Parhar 
218354e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
218454e4ee71SNavdeep Parhar 
2185ecb79ca4SNavdeep Parhar 	pktlen = m->m_pkthdr.len;
2186ecb79ca4SNavdeep Parhar 
218754e4ee71SNavdeep Parhar 	/*
218854e4ee71SNavdeep Parhar 	 * Do we have enough flits to send this frame out?
218954e4ee71SNavdeep Parhar 	 */
219054e4ee71SNavdeep Parhar 	ctrl = sizeof(struct cpl_tx_pkt_core);
219154e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
219254e4ee71SNavdeep Parhar 		nflits = TXPKT_LSO_WR_HDR;
219354e4ee71SNavdeep Parhar 		ctrl += sizeof(struct cpl_tx_pkt_lso);
219454e4ee71SNavdeep Parhar 	} else
219554e4ee71SNavdeep Parhar 		nflits = TXPKT_WR_HDR;
219654e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0)
219754e4ee71SNavdeep Parhar 		nflits += sgl->nflits;
219854e4ee71SNavdeep Parhar 	else {
2199ecb79ca4SNavdeep Parhar 		nflits += howmany(pktlen, 8);
2200ecb79ca4SNavdeep Parhar 		ctrl += pktlen;
220154e4ee71SNavdeep Parhar 	}
220254e4ee71SNavdeep Parhar 	ndesc = howmany(nflits, 8);
220354e4ee71SNavdeep Parhar 	if (ndesc > eq->avail)
220454e4ee71SNavdeep Parhar 		return (ENOMEM);
220554e4ee71SNavdeep Parhar 
220654e4ee71SNavdeep Parhar 	/* Firmware work request header */
220754e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
220854e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
220954e4ee71SNavdeep Parhar 	    V_FW_WR_IMMDLEN(ctrl));
221054e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
22116b49a4ecSNavdeep Parhar 	if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
221254e4ee71SNavdeep Parhar 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
22136b49a4ecSNavdeep Parhar 		eq->flags |= EQ_CRFLUSHED;
22146b49a4ecSNavdeep Parhar 	}
22156b49a4ecSNavdeep Parhar 
221654e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
221754e4ee71SNavdeep Parhar 	wr->r3 = 0;
221854e4ee71SNavdeep Parhar 
221954e4ee71SNavdeep Parhar 	if (m->m_pkthdr.tso_segsz) {
222054e4ee71SNavdeep Parhar 		struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
222154e4ee71SNavdeep Parhar 		struct ether_header *eh;
222254e4ee71SNavdeep Parhar 		struct ip *ip;
222354e4ee71SNavdeep Parhar 		struct tcphdr *tcp;
222454e4ee71SNavdeep Parhar 
222554e4ee71SNavdeep Parhar 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
222654e4ee71SNavdeep Parhar 		    F_LSO_LAST_SLICE;
222754e4ee71SNavdeep Parhar 
222854e4ee71SNavdeep Parhar 		eh = mtod(m, struct ether_header *);
222954e4ee71SNavdeep Parhar 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
223054e4ee71SNavdeep Parhar 			ctrl |= V_LSO_ETHHDR_LEN(1);
223154e4ee71SNavdeep Parhar 			ip = (void *)((struct ether_vlan_header *)eh + 1);
223254e4ee71SNavdeep Parhar 		} else
223354e4ee71SNavdeep Parhar 			ip = (void *)(eh + 1);
223454e4ee71SNavdeep Parhar 
223554e4ee71SNavdeep Parhar 		tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
223654e4ee71SNavdeep Parhar 		ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
223754e4ee71SNavdeep Parhar 		    V_LSO_TCPHDR_LEN(tcp->th_off);
223854e4ee71SNavdeep Parhar 
223954e4ee71SNavdeep Parhar 		lso->lso_ctrl = htobe32(ctrl);
224054e4ee71SNavdeep Parhar 		lso->ipid_ofst = htobe16(0);
224154e4ee71SNavdeep Parhar 		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
224254e4ee71SNavdeep Parhar 		lso->seqno_offset = htobe32(0);
2243ecb79ca4SNavdeep Parhar 		lso->len = htobe32(pktlen);
224454e4ee71SNavdeep Parhar 
224554e4ee71SNavdeep Parhar 		cpl = (void *)(lso + 1);
224654e4ee71SNavdeep Parhar 
224754e4ee71SNavdeep Parhar 		txq->tso_wrs++;
224854e4ee71SNavdeep Parhar 	} else
224954e4ee71SNavdeep Parhar 		cpl = (void *)(wr + 1);
225054e4ee71SNavdeep Parhar 
225154e4ee71SNavdeep Parhar 	/* Checksum offload */
225254e4ee71SNavdeep Parhar 	ctrl1 = 0;
225354e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
225454e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
225554e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
225654e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
225754e4ee71SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
225854e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
225954e4ee71SNavdeep Parhar 
226054e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
226154e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
226254e4ee71SNavdeep Parhar 		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
226354e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
226454e4ee71SNavdeep Parhar 	}
226554e4ee71SNavdeep Parhar 
226654e4ee71SNavdeep Parhar 	/* CPL header */
226754e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
226854e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
226954e4ee71SNavdeep Parhar 	cpl->pack = 0;
2270ecb79ca4SNavdeep Parhar 	cpl->len = htobe16(pktlen);
227154e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl1);
227254e4ee71SNavdeep Parhar 
227354e4ee71SNavdeep Parhar 	/* Software descriptor */
2274f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
227554e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
227654e4ee71SNavdeep Parhar 
227754e4ee71SNavdeep Parhar 	eq->pending += ndesc;
227854e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
227954e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
228054e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
228154e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
228254e4ee71SNavdeep Parhar 
228354e4ee71SNavdeep Parhar 	/* SGL */
228454e4ee71SNavdeep Parhar 	dst = (void *)(cpl + 1);
228554e4ee71SNavdeep Parhar 	if (sgl->nsegs > 0) {
2286f7dfe243SNavdeep Parhar 		txsd->credits = 1;
228754e4ee71SNavdeep Parhar 		txq->sgl_wrs++;
228854e4ee71SNavdeep Parhar 		write_sgl_to_txd(eq, sgl, &dst);
228954e4ee71SNavdeep Parhar 	} else {
2290f7dfe243SNavdeep Parhar 		txsd->credits = 0;
229154e4ee71SNavdeep Parhar 		txq->imm_wrs++;
229254e4ee71SNavdeep Parhar 		for (; m; m = m->m_next) {
229354e4ee71SNavdeep Parhar 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2294ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
2295ecb79ca4SNavdeep Parhar 			pktlen -= m->m_len;
2296ecb79ca4SNavdeep Parhar #endif
229754e4ee71SNavdeep Parhar 		}
2298ecb79ca4SNavdeep Parhar #ifdef INVARIANTS
2299ecb79ca4SNavdeep Parhar 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
2300ecb79ca4SNavdeep Parhar #endif
2301ecb79ca4SNavdeep Parhar 
230254e4ee71SNavdeep Parhar 	}
230354e4ee71SNavdeep Parhar 
230454e4ee71SNavdeep Parhar 	txq->txpkt_wrs++;
230554e4ee71SNavdeep Parhar 	return (0);
230654e4ee71SNavdeep Parhar }
230754e4ee71SNavdeep Parhar 
230854e4ee71SNavdeep Parhar /*
230954e4ee71SNavdeep Parhar  * Returns 0 to indicate that m has been accepted into a coalesced tx work
231054e4ee71SNavdeep Parhar  * request.  It has either been folded into txpkts or txpkts was flushed and m
231154e4ee71SNavdeep Parhar  * has started a new coalesced work request (as the first frame in a fresh
231254e4ee71SNavdeep Parhar  * txpkts).
231354e4ee71SNavdeep Parhar  *
231454e4ee71SNavdeep Parhar  * Returns non-zero to indicate a failure - caller is responsible for
231554e4ee71SNavdeep Parhar  * transmitting m, if there was anything in txpkts it has been flushed.
231654e4ee71SNavdeep Parhar  */
231754e4ee71SNavdeep Parhar static int
231854e4ee71SNavdeep Parhar add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
231954e4ee71SNavdeep Parhar     struct mbuf *m, struct sgl *sgl)
232054e4ee71SNavdeep Parhar {
232154e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
232254e4ee71SNavdeep Parhar 	int can_coalesce;
232354e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
232454e4ee71SNavdeep Parhar 	int flits;
232554e4ee71SNavdeep Parhar 
232654e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
232754e4ee71SNavdeep Parhar 
232854e4ee71SNavdeep Parhar 	if (txpkts->npkt > 0) {
232954e4ee71SNavdeep Parhar 		flits = TXPKTS_PKT_HDR + sgl->nflits;
233054e4ee71SNavdeep Parhar 		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
233154e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= TX_WR_FLITS &&
233254e4ee71SNavdeep Parhar 		    txpkts->nflits + flits <= eq->avail * 8 &&
233354e4ee71SNavdeep Parhar 		    txpkts->plen + m->m_pkthdr.len < 65536;
233454e4ee71SNavdeep Parhar 
233554e4ee71SNavdeep Parhar 		if (can_coalesce) {
233654e4ee71SNavdeep Parhar 			txpkts->npkt++;
233754e4ee71SNavdeep Parhar 			txpkts->nflits += flits;
233854e4ee71SNavdeep Parhar 			txpkts->plen += m->m_pkthdr.len;
233954e4ee71SNavdeep Parhar 
2340f7dfe243SNavdeep Parhar 			txsd = &txq->sdesc[eq->pidx];
2341f7dfe243SNavdeep Parhar 			txsd->credits++;
234254e4ee71SNavdeep Parhar 
234354e4ee71SNavdeep Parhar 			return (0);
234454e4ee71SNavdeep Parhar 		}
234554e4ee71SNavdeep Parhar 
234654e4ee71SNavdeep Parhar 		/*
234754e4ee71SNavdeep Parhar 		 * Couldn't coalesce m into txpkts.  The first order of business
234854e4ee71SNavdeep Parhar 		 * is to send txpkts on its way.  Then we'll revisit m.
234954e4ee71SNavdeep Parhar 		 */
235054e4ee71SNavdeep Parhar 		write_txpkts_wr(txq, txpkts);
235154e4ee71SNavdeep Parhar 	}
235254e4ee71SNavdeep Parhar 
235354e4ee71SNavdeep Parhar 	/*
235454e4ee71SNavdeep Parhar 	 * Check if we can start a new coalesced tx work request with m as
235554e4ee71SNavdeep Parhar 	 * the first packet in it.
235654e4ee71SNavdeep Parhar 	 */
235754e4ee71SNavdeep Parhar 
235854e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
235954e4ee71SNavdeep Parhar 
236054e4ee71SNavdeep Parhar 	flits = TXPKTS_WR_HDR + sgl->nflits;
236154e4ee71SNavdeep Parhar 	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
236254e4ee71SNavdeep Parhar 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
236354e4ee71SNavdeep Parhar 
236454e4ee71SNavdeep Parhar 	if (can_coalesce == 0)
236554e4ee71SNavdeep Parhar 		return (EINVAL);
236654e4ee71SNavdeep Parhar 
236754e4ee71SNavdeep Parhar 	/*
236854e4ee71SNavdeep Parhar 	 * Start a fresh coalesced tx WR with m as the first frame in it.
236954e4ee71SNavdeep Parhar 	 */
237054e4ee71SNavdeep Parhar 	txpkts->npkt = 1;
237154e4ee71SNavdeep Parhar 	txpkts->nflits = flits;
237254e4ee71SNavdeep Parhar 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
237354e4ee71SNavdeep Parhar 	txpkts->plen = m->m_pkthdr.len;
237454e4ee71SNavdeep Parhar 
2375f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
2376f7dfe243SNavdeep Parhar 	txsd->credits = 1;
237754e4ee71SNavdeep Parhar 
237854e4ee71SNavdeep Parhar 	return (0);
237954e4ee71SNavdeep Parhar }
238054e4ee71SNavdeep Parhar 
238154e4ee71SNavdeep Parhar /*
238254e4ee71SNavdeep Parhar  * Note that write_txpkts_wr can never run out of hardware descriptors (but
238354e4ee71SNavdeep Parhar  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
238454e4ee71SNavdeep Parhar  * coalescing only if sufficient hardware descriptors are available.
238554e4ee71SNavdeep Parhar  */
238654e4ee71SNavdeep Parhar static void
238754e4ee71SNavdeep Parhar write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
238854e4ee71SNavdeep Parhar {
238954e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
239054e4ee71SNavdeep Parhar 	struct fw_eth_tx_pkts_wr *wr;
239154e4ee71SNavdeep Parhar 	struct tx_sdesc *txsd;
239254e4ee71SNavdeep Parhar 	uint32_t ctrl;
239354e4ee71SNavdeep Parhar 	int ndesc;
239454e4ee71SNavdeep Parhar 
239554e4ee71SNavdeep Parhar 	TXQ_LOCK_ASSERT_OWNED(txq);
239654e4ee71SNavdeep Parhar 
239754e4ee71SNavdeep Parhar 	ndesc = howmany(txpkts->nflits, 8);
239854e4ee71SNavdeep Parhar 
239954e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
240054e4ee71SNavdeep Parhar 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
240154e4ee71SNavdeep Parhar 	    V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
240254e4ee71SNavdeep Parhar 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
24036b49a4ecSNavdeep Parhar 	if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
240454e4ee71SNavdeep Parhar 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
24056b49a4ecSNavdeep Parhar 		eq->flags |= EQ_CRFLUSHED;
24066b49a4ecSNavdeep Parhar 	}
240754e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(ctrl);
240854e4ee71SNavdeep Parhar 	wr->plen = htobe16(txpkts->plen);
240954e4ee71SNavdeep Parhar 	wr->npkt = txpkts->npkt;
2410b400f1eaSNavdeep Parhar 	wr->r3 = wr->type = 0;
241154e4ee71SNavdeep Parhar 
241254e4ee71SNavdeep Parhar 	/* Everything else already written */
241354e4ee71SNavdeep Parhar 
2414f7dfe243SNavdeep Parhar 	txsd = &txq->sdesc[eq->pidx];
241554e4ee71SNavdeep Parhar 	txsd->desc_used = ndesc;
241654e4ee71SNavdeep Parhar 
24176b49a4ecSNavdeep Parhar 	KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
241854e4ee71SNavdeep Parhar 
241954e4ee71SNavdeep Parhar 	eq->pending += ndesc;
242054e4ee71SNavdeep Parhar 	eq->avail -= ndesc;
242154e4ee71SNavdeep Parhar 	eq->pidx += ndesc;
242254e4ee71SNavdeep Parhar 	if (eq->pidx >= eq->cap)
242354e4ee71SNavdeep Parhar 		eq->pidx -= eq->cap;
242454e4ee71SNavdeep Parhar 
242554e4ee71SNavdeep Parhar 	txq->txpkts_pkts += txpkts->npkt;
242654e4ee71SNavdeep Parhar 	txq->txpkts_wrs++;
242754e4ee71SNavdeep Parhar 	txpkts->npkt = 0;	/* emptied */
242854e4ee71SNavdeep Parhar }
242954e4ee71SNavdeep Parhar 
243054e4ee71SNavdeep Parhar static inline void
243154e4ee71SNavdeep Parhar write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
243254e4ee71SNavdeep Parhar     struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
243354e4ee71SNavdeep Parhar {
243454e4ee71SNavdeep Parhar 	struct ulp_txpkt *ulpmc;
243554e4ee71SNavdeep Parhar 	struct ulptx_idata *ulpsc;
243654e4ee71SNavdeep Parhar 	struct cpl_tx_pkt_core *cpl;
243754e4ee71SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
243854e4ee71SNavdeep Parhar 	uintptr_t flitp, start, end;
243954e4ee71SNavdeep Parhar 	uint64_t ctrl;
244054e4ee71SNavdeep Parhar 	caddr_t dst;
244154e4ee71SNavdeep Parhar 
244254e4ee71SNavdeep Parhar 	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
244354e4ee71SNavdeep Parhar 
244454e4ee71SNavdeep Parhar 	start = (uintptr_t)eq->desc;
244554e4ee71SNavdeep Parhar 	end = (uintptr_t)eq->spg;
244654e4ee71SNavdeep Parhar 
244754e4ee71SNavdeep Parhar 	/* Checksum offload */
244854e4ee71SNavdeep Parhar 	ctrl = 0;
244954e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & CSUM_IP))
245054e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_IPCSUM_DIS;
245154e4ee71SNavdeep Parhar 	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
245254e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_L4CSUM_DIS;
245354e4ee71SNavdeep Parhar 	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
245454e4ee71SNavdeep Parhar 		txq->txcsum++;	/* some hardware assistance provided */
245554e4ee71SNavdeep Parhar 
245654e4ee71SNavdeep Parhar 	/* VLAN tag insertion */
245754e4ee71SNavdeep Parhar 	if (m->m_flags & M_VLANTAG) {
245854e4ee71SNavdeep Parhar 		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
245954e4ee71SNavdeep Parhar 		txq->vlan_insertion++;
246054e4ee71SNavdeep Parhar 	}
246154e4ee71SNavdeep Parhar 
246254e4ee71SNavdeep Parhar 	/*
246354e4ee71SNavdeep Parhar 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
246454e4ee71SNavdeep Parhar 	 * is required by the firmware/hardware).  It follows that flitp cannot
246554e4ee71SNavdeep Parhar 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
246654e4ee71SNavdeep Parhar 	 * bytes each), and that it can not wrap around in the middle of the
246754e4ee71SNavdeep Parhar 	 * cpl_tx_pkt_core either.
246854e4ee71SNavdeep Parhar 	 */
246954e4ee71SNavdeep Parhar 	flitp = (uintptr_t)txpkts->flitp;
247054e4ee71SNavdeep Parhar 	KASSERT((flitp & 0xf) == 0,
247154e4ee71SNavdeep Parhar 	    ("%s: last SGL did not end at 16 byte boundary: %p",
247254e4ee71SNavdeep Parhar 	    __func__, txpkts->flitp));
247354e4ee71SNavdeep Parhar 
247454e4ee71SNavdeep Parhar 	/* ULP master command */
247554e4ee71SNavdeep Parhar 	ulpmc = (void *)flitp;
2476aa2457e1SNavdeep Parhar 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
2477aa2457e1SNavdeep Parhar 	    V_ULP_TXPKT_FID(eq->iqid));
247854e4ee71SNavdeep Parhar 	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
247954e4ee71SNavdeep Parhar 	    sizeof(*cpl) + 8 * sgl->nflits, 16));
248054e4ee71SNavdeep Parhar 
248154e4ee71SNavdeep Parhar 	/* ULP subcommand */
248254e4ee71SNavdeep Parhar 	ulpsc = (void *)(ulpmc + 1);
248354e4ee71SNavdeep Parhar 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
248454e4ee71SNavdeep Parhar 	    F_ULP_TX_SC_MORE);
248554e4ee71SNavdeep Parhar 	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
248654e4ee71SNavdeep Parhar 
248754e4ee71SNavdeep Parhar 	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
248854e4ee71SNavdeep Parhar 	if (flitp == end)
248954e4ee71SNavdeep Parhar 		flitp = start;
249054e4ee71SNavdeep Parhar 
249154e4ee71SNavdeep Parhar 	/* CPL_TX_PKT */
249254e4ee71SNavdeep Parhar 	cpl = (void *)flitp;
249354e4ee71SNavdeep Parhar 	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
249454e4ee71SNavdeep Parhar 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
249554e4ee71SNavdeep Parhar 	cpl->pack = 0;
249654e4ee71SNavdeep Parhar 	cpl->len = htobe16(m->m_pkthdr.len);
249754e4ee71SNavdeep Parhar 	cpl->ctrl1 = htobe64(ctrl);
249854e4ee71SNavdeep Parhar 
249954e4ee71SNavdeep Parhar 	flitp += sizeof(*cpl);
250054e4ee71SNavdeep Parhar 	if (flitp == end)
250154e4ee71SNavdeep Parhar 		flitp = start;
250254e4ee71SNavdeep Parhar 
250354e4ee71SNavdeep Parhar 	/* SGL for this frame */
250454e4ee71SNavdeep Parhar 	dst = (caddr_t)flitp;
250554e4ee71SNavdeep Parhar 	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
250654e4ee71SNavdeep Parhar 	txpkts->flitp = (void *)dst;
250754e4ee71SNavdeep Parhar 
250854e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)dst & 0xf) == 0,
250954e4ee71SNavdeep Parhar 	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
251054e4ee71SNavdeep Parhar }
251154e4ee71SNavdeep Parhar 
251254e4ee71SNavdeep Parhar /*
251354e4ee71SNavdeep Parhar  * If the SGL ends on an address that is not 16 byte aligned, this function will
251454e4ee71SNavdeep Parhar  * add a 0 filled flit at the end.  It returns 1 in that case.
251554e4ee71SNavdeep Parhar  */
251654e4ee71SNavdeep Parhar static int
251754e4ee71SNavdeep Parhar write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
251854e4ee71SNavdeep Parhar {
251954e4ee71SNavdeep Parhar 	__be64 *flitp, *end;
252054e4ee71SNavdeep Parhar 	struct ulptx_sgl *usgl;
252154e4ee71SNavdeep Parhar 	bus_dma_segment_t *seg;
252254e4ee71SNavdeep Parhar 	int i, padded;
252354e4ee71SNavdeep Parhar 
252454e4ee71SNavdeep Parhar 	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
252554e4ee71SNavdeep Parhar 	    ("%s: bad SGL - nsegs=%d, nflits=%d",
252654e4ee71SNavdeep Parhar 	    __func__, sgl->nsegs, sgl->nflits));
252754e4ee71SNavdeep Parhar 
252854e4ee71SNavdeep Parhar 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
252954e4ee71SNavdeep Parhar 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
253054e4ee71SNavdeep Parhar 
253154e4ee71SNavdeep Parhar 	flitp = (__be64 *)(*to);
253254e4ee71SNavdeep Parhar 	end = flitp + sgl->nflits;
253354e4ee71SNavdeep Parhar 	seg = &sgl->seg[0];
253454e4ee71SNavdeep Parhar 	usgl = (void *)flitp;
253554e4ee71SNavdeep Parhar 
253654e4ee71SNavdeep Parhar 	/*
253754e4ee71SNavdeep Parhar 	 * We start at a 16 byte boundary somewhere inside the tx descriptor
253854e4ee71SNavdeep Parhar 	 * ring, so we're at least 16 bytes away from the status page.  There is
253954e4ee71SNavdeep Parhar 	 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
254054e4ee71SNavdeep Parhar 	 */
254154e4ee71SNavdeep Parhar 
254254e4ee71SNavdeep Parhar 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
254354e4ee71SNavdeep Parhar 	    V_ULPTX_NSGE(sgl->nsegs));
254454e4ee71SNavdeep Parhar 	usgl->len0 = htobe32(seg->ds_len);
254554e4ee71SNavdeep Parhar 	usgl->addr0 = htobe64(seg->ds_addr);
254654e4ee71SNavdeep Parhar 	seg++;
254754e4ee71SNavdeep Parhar 
254854e4ee71SNavdeep Parhar 	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
254954e4ee71SNavdeep Parhar 
255054e4ee71SNavdeep Parhar 		/* Won't wrap around at all */
255154e4ee71SNavdeep Parhar 
255254e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
255354e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
255454e4ee71SNavdeep Parhar 			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
255554e4ee71SNavdeep Parhar 		}
255654e4ee71SNavdeep Parhar 		if (i & 1)
255754e4ee71SNavdeep Parhar 			usgl->sge[i / 2].len[1] = htobe32(0);
255854e4ee71SNavdeep Parhar 	} else {
255954e4ee71SNavdeep Parhar 
256054e4ee71SNavdeep Parhar 		/* Will wrap somewhere in the rest of the SGL */
256154e4ee71SNavdeep Parhar 
256254e4ee71SNavdeep Parhar 		/* 2 flits already written, write the rest flit by flit */
256354e4ee71SNavdeep Parhar 		flitp = (void *)(usgl + 1);
256454e4ee71SNavdeep Parhar 		for (i = 0; i < sgl->nflits - 2; i++) {
256554e4ee71SNavdeep Parhar 			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
256654e4ee71SNavdeep Parhar 				flitp = (void *)eq->desc;
256754e4ee71SNavdeep Parhar 			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
256854e4ee71SNavdeep Parhar 		}
256954e4ee71SNavdeep Parhar 		end = flitp;
257054e4ee71SNavdeep Parhar 	}
257154e4ee71SNavdeep Parhar 
257254e4ee71SNavdeep Parhar 	if ((uintptr_t)end & 0xf) {
257354e4ee71SNavdeep Parhar 		*(uint64_t *)end = 0;
257454e4ee71SNavdeep Parhar 		end++;
257554e4ee71SNavdeep Parhar 		padded = 1;
257654e4ee71SNavdeep Parhar 	} else
257754e4ee71SNavdeep Parhar 		padded = 0;
257854e4ee71SNavdeep Parhar 
257954e4ee71SNavdeep Parhar 	if ((uintptr_t)end == (uintptr_t)eq->spg)
258054e4ee71SNavdeep Parhar 		*to = (void *)eq->desc;
258154e4ee71SNavdeep Parhar 	else
258254e4ee71SNavdeep Parhar 		*to = (void *)end;
258354e4ee71SNavdeep Parhar 
258454e4ee71SNavdeep Parhar 	return (padded);
258554e4ee71SNavdeep Parhar }
258654e4ee71SNavdeep Parhar 
258754e4ee71SNavdeep Parhar static inline void
258854e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
258954e4ee71SNavdeep Parhar {
259054e4ee71SNavdeep Parhar 	if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
259154e4ee71SNavdeep Parhar 		bcopy(from, *to, len);
259254e4ee71SNavdeep Parhar 		(*to) += len;
259354e4ee71SNavdeep Parhar 	} else {
259454e4ee71SNavdeep Parhar 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
259554e4ee71SNavdeep Parhar 
259654e4ee71SNavdeep Parhar 		bcopy(from, *to, portion);
259754e4ee71SNavdeep Parhar 		from += portion;
259854e4ee71SNavdeep Parhar 		portion = len - portion;	/* remaining */
259954e4ee71SNavdeep Parhar 		bcopy(from, (void *)eq->desc, portion);
260054e4ee71SNavdeep Parhar 		(*to) = (caddr_t)eq->desc + portion;
260154e4ee71SNavdeep Parhar 	}
260254e4ee71SNavdeep Parhar }
260354e4ee71SNavdeep Parhar 
260454e4ee71SNavdeep Parhar static inline void
2605f7dfe243SNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq)
260654e4ee71SNavdeep Parhar {
260754e4ee71SNavdeep Parhar 	wmb();
260854e4ee71SNavdeep Parhar 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
260954e4ee71SNavdeep Parhar 	    V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
261054e4ee71SNavdeep Parhar 	eq->pending = 0;
261154e4ee71SNavdeep Parhar }
261254e4ee71SNavdeep Parhar 
2613e874ff7aSNavdeep Parhar static inline int
2614e874ff7aSNavdeep Parhar reclaimable(struct sge_eq *eq)
261554e4ee71SNavdeep Parhar {
2616e874ff7aSNavdeep Parhar 	unsigned int cidx;
261754e4ee71SNavdeep Parhar 
261854e4ee71SNavdeep Parhar 	cidx = eq->spg->cidx;	/* stable snapshot */
261954e4ee71SNavdeep Parhar 	cidx = be16_to_cpu(cidx);
262054e4ee71SNavdeep Parhar 
262154e4ee71SNavdeep Parhar 	if (cidx >= eq->cidx)
2622e874ff7aSNavdeep Parhar 		return (cidx - eq->cidx);
262354e4ee71SNavdeep Parhar 	else
2624e874ff7aSNavdeep Parhar 		return (cidx + eq->cap - eq->cidx);
2625e874ff7aSNavdeep Parhar }
262654e4ee71SNavdeep Parhar 
2627e874ff7aSNavdeep Parhar /*
2628e874ff7aSNavdeep Parhar  * There are "can_reclaim" tx descriptors ready to be reclaimed.  Reclaim as
2629e874ff7aSNavdeep Parhar  * many as possible but stop when there are around "n" mbufs to free.
2630e874ff7aSNavdeep Parhar  *
2631e874ff7aSNavdeep Parhar  * The actual number reclaimed is provided as the return value.
2632e874ff7aSNavdeep Parhar  */
2633e874ff7aSNavdeep Parhar static int
2634f7dfe243SNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
2635e874ff7aSNavdeep Parhar {
2636e874ff7aSNavdeep Parhar 	struct tx_sdesc *txsd;
2637e874ff7aSNavdeep Parhar 	struct tx_map *txm;
2638e874ff7aSNavdeep Parhar 	unsigned int reclaimed, maps;
2639f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &txq->eq;
264054e4ee71SNavdeep Parhar 
2641e874ff7aSNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
2642e874ff7aSNavdeep Parhar 
2643e874ff7aSNavdeep Parhar 	if (can_reclaim == 0)
2644e874ff7aSNavdeep Parhar 		can_reclaim = reclaimable(eq);
264554e4ee71SNavdeep Parhar 
264654e4ee71SNavdeep Parhar 	maps = reclaimed = 0;
2647e874ff7aSNavdeep Parhar 	while (can_reclaim && maps < n) {
264854e4ee71SNavdeep Parhar 		int ndesc;
264954e4ee71SNavdeep Parhar 
2650f7dfe243SNavdeep Parhar 		txsd = &txq->sdesc[eq->cidx];
265154e4ee71SNavdeep Parhar 		ndesc = txsd->desc_used;
265254e4ee71SNavdeep Parhar 
265354e4ee71SNavdeep Parhar 		/* Firmware doesn't return "partial" credits. */
265454e4ee71SNavdeep Parhar 		KASSERT(can_reclaim >= ndesc,
265554e4ee71SNavdeep Parhar 		    ("%s: unexpected number of credits: %d, %d",
265654e4ee71SNavdeep Parhar 		    __func__, can_reclaim, ndesc));
265754e4ee71SNavdeep Parhar 
2658f7dfe243SNavdeep Parhar 		maps += txsd->credits;
2659e874ff7aSNavdeep Parhar 
266054e4ee71SNavdeep Parhar 		reclaimed += ndesc;
266154e4ee71SNavdeep Parhar 		can_reclaim -= ndesc;
266254e4ee71SNavdeep Parhar 
2663e874ff7aSNavdeep Parhar 		eq->cidx += ndesc;
2664e874ff7aSNavdeep Parhar 		if (__predict_false(eq->cidx >= eq->cap))
2665e874ff7aSNavdeep Parhar 			eq->cidx -= eq->cap;
2666e874ff7aSNavdeep Parhar 	}
2667e874ff7aSNavdeep Parhar 
2668f7dfe243SNavdeep Parhar 	txm = &txq->maps[txq->map_cidx];
2669e874ff7aSNavdeep Parhar 	if (maps)
2670e874ff7aSNavdeep Parhar 		prefetch(txm->m);
267154e4ee71SNavdeep Parhar 
267254e4ee71SNavdeep Parhar 	eq->avail += reclaimed;
267354e4ee71SNavdeep Parhar 	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
267454e4ee71SNavdeep Parhar 	    ("%s: too many descriptors available", __func__));
267554e4ee71SNavdeep Parhar 
2676f7dfe243SNavdeep Parhar 	txq->map_avail += maps;
2677f7dfe243SNavdeep Parhar 	KASSERT(txq->map_avail <= txq->map_total,
267854e4ee71SNavdeep Parhar 	    ("%s: too many maps available", __func__));
267954e4ee71SNavdeep Parhar 
268054e4ee71SNavdeep Parhar 	while (maps--) {
2681e874ff7aSNavdeep Parhar 		struct tx_map *next;
2682e874ff7aSNavdeep Parhar 
2683e874ff7aSNavdeep Parhar 		next = txm + 1;
2684f7dfe243SNavdeep Parhar 		if (__predict_false(txq->map_cidx + 1 == txq->map_total))
2685f7dfe243SNavdeep Parhar 			next = txq->maps;
2686e874ff7aSNavdeep Parhar 		prefetch(next->m);
268754e4ee71SNavdeep Parhar 
2688f7dfe243SNavdeep Parhar 		bus_dmamap_unload(txq->tx_tag, txm->map);
268954e4ee71SNavdeep Parhar 		m_freem(txm->m);
269054e4ee71SNavdeep Parhar 		txm->m = NULL;
269154e4ee71SNavdeep Parhar 
2692e874ff7aSNavdeep Parhar 		txm = next;
2693f7dfe243SNavdeep Parhar 		if (__predict_false(++txq->map_cidx == txq->map_total))
2694f7dfe243SNavdeep Parhar 			txq->map_cidx = 0;
269554e4ee71SNavdeep Parhar 	}
269654e4ee71SNavdeep Parhar 
269754e4ee71SNavdeep Parhar 	return (reclaimed);
269854e4ee71SNavdeep Parhar }
269954e4ee71SNavdeep Parhar 
270054e4ee71SNavdeep Parhar static void
270154e4ee71SNavdeep Parhar write_eqflush_wr(struct sge_eq *eq)
270254e4ee71SNavdeep Parhar {
270354e4ee71SNavdeep Parhar 	struct fw_eq_flush_wr *wr;
270454e4ee71SNavdeep Parhar 
270554e4ee71SNavdeep Parhar 	EQ_LOCK_ASSERT_OWNED(eq);
270654e4ee71SNavdeep Parhar 	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
270754e4ee71SNavdeep Parhar 
270854e4ee71SNavdeep Parhar 	wr = (void *)&eq->desc[eq->pidx];
270954e4ee71SNavdeep Parhar 	bzero(wr, sizeof(*wr));
271054e4ee71SNavdeep Parhar 	wr->opcode = FW_EQ_FLUSH_WR;
271154e4ee71SNavdeep Parhar 	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
271254e4ee71SNavdeep Parhar 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
271354e4ee71SNavdeep Parhar 
27146b49a4ecSNavdeep Parhar 	eq->flags |= EQ_CRFLUSHED;
271554e4ee71SNavdeep Parhar 	eq->pending++;
271654e4ee71SNavdeep Parhar 	eq->avail--;
271754e4ee71SNavdeep Parhar 	if (++eq->pidx == eq->cap)
271854e4ee71SNavdeep Parhar 		eq->pidx = 0;
271954e4ee71SNavdeep Parhar }
272054e4ee71SNavdeep Parhar 
272154e4ee71SNavdeep Parhar static __be64
272254e4ee71SNavdeep Parhar get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
272354e4ee71SNavdeep Parhar {
272454e4ee71SNavdeep Parhar 	int i = (idx / 3) * 2;
272554e4ee71SNavdeep Parhar 
272654e4ee71SNavdeep Parhar 	switch (idx % 3) {
272754e4ee71SNavdeep Parhar 	case 0: {
272854e4ee71SNavdeep Parhar 		__be64 rc;
272954e4ee71SNavdeep Parhar 
273054e4ee71SNavdeep Parhar 		rc = htobe32(sgl[i].ds_len);
273154e4ee71SNavdeep Parhar 		if (i + 1 < nsegs)
273254e4ee71SNavdeep Parhar 			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
273354e4ee71SNavdeep Parhar 
273454e4ee71SNavdeep Parhar 		return (rc);
273554e4ee71SNavdeep Parhar 	}
273654e4ee71SNavdeep Parhar 	case 1:
273754e4ee71SNavdeep Parhar 		return htobe64(sgl[i].ds_addr);
273854e4ee71SNavdeep Parhar 	case 2:
273954e4ee71SNavdeep Parhar 		return htobe64(sgl[i + 1].ds_addr);
274054e4ee71SNavdeep Parhar 	}
274154e4ee71SNavdeep Parhar 
274254e4ee71SNavdeep Parhar 	return (0);
274354e4ee71SNavdeep Parhar }
274454e4ee71SNavdeep Parhar 
274554e4ee71SNavdeep Parhar static void
274654e4ee71SNavdeep Parhar set_fl_tag_idx(struct sge_fl *fl, int mtu)
274754e4ee71SNavdeep Parhar {
274854e4ee71SNavdeep Parhar 	int i;
274954e4ee71SNavdeep Parhar 
275054e4ee71SNavdeep Parhar 	FL_LOCK_ASSERT_OWNED(fl);
275154e4ee71SNavdeep Parhar 
275254e4ee71SNavdeep Parhar 	for (i = 0; i < FL_BUF_SIZES - 1; i++) {
275354e4ee71SNavdeep Parhar 		if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT))
275454e4ee71SNavdeep Parhar 			break;
275554e4ee71SNavdeep Parhar 	}
275654e4ee71SNavdeep Parhar 
275754e4ee71SNavdeep Parhar 	fl->tag_idx = i;
275854e4ee71SNavdeep Parhar }
2759ecb79ca4SNavdeep Parhar 
2760ecb79ca4SNavdeep Parhar static int
2761ecb79ca4SNavdeep Parhar handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl)
2762ecb79ca4SNavdeep Parhar {
2763ecb79ca4SNavdeep Parhar 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
2764ecb79ca4SNavdeep Parhar 	struct sge *s = &sc->sge;
2765ecb79ca4SNavdeep Parhar 	struct sge_txq *txq;
276629ca78e1SNavdeep Parhar 	struct port_info *pi;
2767ecb79ca4SNavdeep Parhar 
2768ecb79ca4SNavdeep Parhar 	txq = (void *)s->eqmap[qid - s->eq_start];
27692be67d29SNavdeep Parhar 	TXQ_LOCK(txq);
27702be67d29SNavdeep Parhar 	if (txq->eq.flags & EQ_CRFLUSHED) {
277129ca78e1SNavdeep Parhar 		pi = txq->ifp->if_softc;
277229ca78e1SNavdeep Parhar 		taskqueue_enqueue(pi->tq, &txq->resume_tx);
2773ecb79ca4SNavdeep Parhar 		txq->egr_update++;
27742be67d29SNavdeep Parhar 	} else
27752be67d29SNavdeep Parhar 		wakeup_one(txq);	/* txq is going away, wakeup free_txq */
27762be67d29SNavdeep Parhar 	TXQ_UNLOCK(txq);
2777ecb79ca4SNavdeep Parhar 
2778ecb79ca4SNavdeep Parhar 	return (0);
2779ecb79ca4SNavdeep Parhar }
2780f7dfe243SNavdeep Parhar 
278156599263SNavdeep Parhar static void
278256599263SNavdeep Parhar handle_cpl(struct adapter *sc, struct sge_iq *iq)
278356599263SNavdeep Parhar {
278456599263SNavdeep Parhar 	const struct rss_header *rss = (const void *)iq->cdesc;
278556599263SNavdeep Parhar 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
278656599263SNavdeep Parhar 
278756599263SNavdeep Parhar 	switch (rss->opcode) {
278856599263SNavdeep Parhar 	case CPL_FW4_MSG:
278956599263SNavdeep Parhar 	case CPL_FW6_MSG:
279056599263SNavdeep Parhar 		if (cpl->type == FW6_TYPE_CMD_RPL)
279156599263SNavdeep Parhar 			t4_handle_fw_rpl(sc, cpl->data);
279256599263SNavdeep Parhar 		break;
279356599263SNavdeep Parhar 
279456599263SNavdeep Parhar 	case CPL_SGE_EGR_UPDATE:
279556599263SNavdeep Parhar 		handle_sge_egr_update(sc, (const void *)cpl);
279656599263SNavdeep Parhar 		break;
279756599263SNavdeep Parhar 
279856599263SNavdeep Parhar 	case CPL_SET_TCB_RPL:
279956599263SNavdeep Parhar 		filter_rpl(sc, (const void *)cpl);
280056599263SNavdeep Parhar 		break;
280156599263SNavdeep Parhar 
280256599263SNavdeep Parhar 	default:
280356599263SNavdeep Parhar 		panic("%s: unexpected CPL opcode 0x%x", __func__, rss->opcode);
280456599263SNavdeep Parhar 	}
280556599263SNavdeep Parhar }
280656599263SNavdeep Parhar 
2807f7dfe243SNavdeep Parhar /*
2808f7dfe243SNavdeep Parhar  * m0 is freed on successful transmission.
2809f7dfe243SNavdeep Parhar  */
2810f7dfe243SNavdeep Parhar static int
2811f7dfe243SNavdeep Parhar ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0)
2812f7dfe243SNavdeep Parhar {
2813f7dfe243SNavdeep Parhar 	struct sge_eq *eq = &ctrlq->eq;
2814f7dfe243SNavdeep Parhar 	int rc = 0, ndesc;
2815f7dfe243SNavdeep Parhar 	int can_reclaim;
2816f7dfe243SNavdeep Parhar 	caddr_t dst;
2817f7dfe243SNavdeep Parhar 	struct mbuf *m;
2818f7dfe243SNavdeep Parhar 
2819f7dfe243SNavdeep Parhar 	M_ASSERTPKTHDR(m0);
2820f7dfe243SNavdeep Parhar 
2821f7dfe243SNavdeep Parhar 	if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) {
282256599263SNavdeep Parhar 		log(LOG_ERR, "%s: %s work request too long (%d)",
282356599263SNavdeep Parhar 		    device_get_nameunit(sc->dev), __func__, m0->m_pkthdr.len);
2824f7dfe243SNavdeep Parhar 		return (EMSGSIZE);
2825f7dfe243SNavdeep Parhar 	}
2826f7dfe243SNavdeep Parhar 	ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE);
2827f7dfe243SNavdeep Parhar 
2828f7dfe243SNavdeep Parhar 	EQ_LOCK(eq);
2829f7dfe243SNavdeep Parhar 
2830f7dfe243SNavdeep Parhar 	can_reclaim = reclaimable(eq);
2831f7dfe243SNavdeep Parhar 	eq->cidx += can_reclaim;
2832f7dfe243SNavdeep Parhar 	eq->avail += can_reclaim;
2833f7dfe243SNavdeep Parhar 	if (__predict_false(eq->cidx >= eq->cap))
2834f7dfe243SNavdeep Parhar 		eq->cidx -= eq->cap;
2835f7dfe243SNavdeep Parhar 
2836f7dfe243SNavdeep Parhar 	if (eq->avail < ndesc) {
2837f7dfe243SNavdeep Parhar 		rc = EAGAIN;
2838f7dfe243SNavdeep Parhar 		ctrlq->no_desc++;
2839f7dfe243SNavdeep Parhar 		goto failed;
2840f7dfe243SNavdeep Parhar 	}
2841f7dfe243SNavdeep Parhar 
2842f7dfe243SNavdeep Parhar 	dst = (void *)&eq->desc[eq->pidx];
2843f7dfe243SNavdeep Parhar 	for (m = m0; m; m = m->m_next)
2844f7dfe243SNavdeep Parhar 		copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2845f7dfe243SNavdeep Parhar 
2846f7dfe243SNavdeep Parhar 	eq->pidx += ndesc;
2847f7dfe243SNavdeep Parhar 	if (__predict_false(eq->pidx >= eq->cap))
2848f7dfe243SNavdeep Parhar 		eq->pidx -= eq->cap;
2849f7dfe243SNavdeep Parhar 
2850f7dfe243SNavdeep Parhar 	eq->pending += ndesc;
2851f7dfe243SNavdeep Parhar 	ring_eq_db(sc, eq);
2852f7dfe243SNavdeep Parhar failed:
2853f7dfe243SNavdeep Parhar 	EQ_UNLOCK(eq);
2854f7dfe243SNavdeep Parhar 	if (rc == 0)
2855f7dfe243SNavdeep Parhar 		m_freem(m0);
2856f7dfe243SNavdeep Parhar 
2857f7dfe243SNavdeep Parhar 	return (rc);
2858f7dfe243SNavdeep Parhar }
2859af49c942SNavdeep Parhar 
2860af49c942SNavdeep Parhar static int
286156599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS)
2862af49c942SNavdeep Parhar {
2863af49c942SNavdeep Parhar 	uint16_t *id = arg1;
2864af49c942SNavdeep Parhar 	int i = *id;
2865af49c942SNavdeep Parhar 
2866af49c942SNavdeep Parhar 	return sysctl_handle_int(oidp, &i, 0, req);
2867af49c942SNavdeep Parhar }
2868