154e4ee71SNavdeep Parhar /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 454e4ee71SNavdeep Parhar * Copyright (c) 2011 Chelsio Communications, Inc. 554e4ee71SNavdeep Parhar * All rights reserved. 654e4ee71SNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 754e4ee71SNavdeep Parhar * 854e4ee71SNavdeep Parhar * Redistribution and use in source and binary forms, with or without 954e4ee71SNavdeep Parhar * modification, are permitted provided that the following conditions 1054e4ee71SNavdeep Parhar * are met: 1154e4ee71SNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 1254e4ee71SNavdeep Parhar * notice, this list of conditions and the following disclaimer. 1354e4ee71SNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 1454e4ee71SNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 1554e4ee71SNavdeep Parhar * documentation and/or other materials provided with the distribution. 1654e4ee71SNavdeep Parhar * 1754e4ee71SNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1854e4ee71SNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1954e4ee71SNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2054e4ee71SNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2154e4ee71SNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2254e4ee71SNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2354e4ee71SNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2454e4ee71SNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2554e4ee71SNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2654e4ee71SNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2754e4ee71SNavdeep Parhar * SUCH DAMAGE. 2854e4ee71SNavdeep Parhar */ 2954e4ee71SNavdeep Parhar 3054e4ee71SNavdeep Parhar #include <sys/cdefs.h> 3154e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$"); 3254e4ee71SNavdeep Parhar 3354e4ee71SNavdeep Parhar #include "opt_inet.h" 34a1ea9a82SNavdeep Parhar #include "opt_inet6.h" 35bddf7343SJohn Baldwin #include "opt_kern_tls.h" 36eff62dbaSNavdeep Parhar #include "opt_ratelimit.h" 3754e4ee71SNavdeep Parhar 3854e4ee71SNavdeep Parhar #include <sys/types.h> 39c3322cb9SGleb Smirnoff #include <sys/eventhandler.h> 4054e4ee71SNavdeep Parhar #include <sys/mbuf.h> 4154e4ee71SNavdeep Parhar #include <sys/socket.h> 4254e4ee71SNavdeep Parhar #include <sys/kernel.h> 43bddf7343SJohn Baldwin #include <sys/ktls.h> 44ecb79ca4SNavdeep Parhar #include <sys/malloc.h> 45ecb79ca4SNavdeep Parhar #include <sys/queue.h> 4638035ed6SNavdeep Parhar #include <sys/sbuf.h> 47ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h> 48480e603cSNavdeep Parhar #include <sys/time.h> 497951040fSNavdeep Parhar #include <sys/sglist.h> 5054e4ee71SNavdeep Parhar #include <sys/sysctl.h> 51733b9277SNavdeep Parhar #include <sys/smp.h> 52bddf7343SJohn Baldwin #include <sys/socketvar.h> 5382eff304SNavdeep Parhar #include <sys/counter.h> 5454e4ee71SNavdeep Parhar #include <net/bpf.h> 5554e4ee71SNavdeep Parhar #include <net/ethernet.h> 5654e4ee71SNavdeep Parhar #include <net/if.h> 5754e4ee71SNavdeep Parhar #include <net/if_vlan_var.h> 5854e4ee71SNavdeep Parhar #include <netinet/in.h> 5954e4ee71SNavdeep Parhar #include <netinet/ip.h> 60a1ea9a82SNavdeep Parhar #include <netinet/ip6.h> 6154e4ee71SNavdeep Parhar #include <netinet/tcp.h> 62786099deSNavdeep Parhar #include <netinet/udp.h> 636af45170SJohn Baldwin #include <machine/in_cksum.h> 6464db8966SDimitry Andric #include <machine/md_var.h> 6538035ed6SNavdeep Parhar #include <vm/vm.h> 6638035ed6SNavdeep Parhar #include <vm/pmap.h> 67298d969cSNavdeep Parhar #ifdef DEV_NETMAP 68298d969cSNavdeep Parhar #include <machine/bus.h> 69298d969cSNavdeep Parhar #include <sys/selinfo.h> 70298d969cSNavdeep Parhar #include <net/if_var.h> 71298d969cSNavdeep Parhar #include <net/netmap.h> 72298d969cSNavdeep Parhar #include <dev/netmap/netmap_kern.h> 73298d969cSNavdeep Parhar #endif 7454e4ee71SNavdeep Parhar 7554e4ee71SNavdeep Parhar #include "common/common.h" 7654e4ee71SNavdeep Parhar #include "common/t4_regs.h" 7754e4ee71SNavdeep Parhar #include "common/t4_regs_values.h" 7854e4ee71SNavdeep Parhar #include "common/t4_msg.h" 79671bf2b8SNavdeep Parhar #include "t4_l2t.h" 807951040fSNavdeep Parhar #include "t4_mp_ring.h" 8154e4ee71SNavdeep Parhar 82d14b0ac1SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP 83d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 84d14b0ac1SNavdeep Parhar #else 85d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE 86d14b0ac1SNavdeep Parhar #endif 87d14b0ac1SNavdeep Parhar 885cdaef71SJohn Baldwin /* Internal mbuf flags stored in PH_loc.eight[1]. */ 89d76bbe17SJohn Baldwin #define MC_NOMAP 0x01 905cdaef71SJohn Baldwin #define MC_RAW_WR 0x02 91bddf7343SJohn Baldwin #define MC_TLS 0x04 925cdaef71SJohn Baldwin 939fb8886bSNavdeep Parhar /* 949fb8886bSNavdeep Parhar * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 959fb8886bSNavdeep Parhar * 0-7 are valid values. 969fb8886bSNavdeep Parhar */ 97518bca2cSNavdeep Parhar static int fl_pktshift = 0; 982d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 992d714dbcSJohn Baldwin "payload DMA offset in rx buffer (bytes)"); 10054e4ee71SNavdeep Parhar 1019fb8886bSNavdeep Parhar /* 1029fb8886bSNavdeep Parhar * Pad ethernet payload up to this boundary. 1039fb8886bSNavdeep Parhar * -1: driver should figure out a good value. 1041458bff9SNavdeep Parhar * 0: disable padding. 1051458bff9SNavdeep Parhar * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 1069fb8886bSNavdeep Parhar */ 107298d969cSNavdeep Parhar int fl_pad = -1; 1082d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 1092d714dbcSJohn Baldwin "payload pad boundary (bytes)"); 1109fb8886bSNavdeep Parhar 1119fb8886bSNavdeep Parhar /* 1129fb8886bSNavdeep Parhar * Status page length. 1139fb8886bSNavdeep Parhar * -1: driver should figure out a good value. 1149fb8886bSNavdeep Parhar * 64 or 128 are the only other valid values. 1159fb8886bSNavdeep Parhar */ 11629c229e9SJohn Baldwin static int spg_len = -1; 1172d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 1182d714dbcSJohn Baldwin "status page size (bytes)"); 1199fb8886bSNavdeep Parhar 1209fb8886bSNavdeep Parhar /* 1219fb8886bSNavdeep Parhar * Congestion drops. 1229fb8886bSNavdeep Parhar * -1: no congestion feedback (not recommended). 1239fb8886bSNavdeep Parhar * 0: backpressure the channel instead of dropping packets right away. 1249fb8886bSNavdeep Parhar * 1: no backpressure, drop packets for the congested queue immediately. 1259fb8886bSNavdeep Parhar */ 1269fb8886bSNavdeep Parhar static int cong_drop = 0; 1272d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 1282d714dbcSJohn Baldwin "Congestion control for RX queues (0 = backpressure, 1 = drop"); 12954e4ee71SNavdeep Parhar 1301458bff9SNavdeep Parhar /* 1311458bff9SNavdeep Parhar * Deliver multiple frames in the same free list buffer if they fit. 1321458bff9SNavdeep Parhar * -1: let the driver decide whether to enable buffer packing or not. 1331458bff9SNavdeep Parhar * 0: disable buffer packing. 1341458bff9SNavdeep Parhar * 1: enable buffer packing. 1351458bff9SNavdeep Parhar */ 1361458bff9SNavdeep Parhar static int buffer_packing = -1; 1372d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 1382d714dbcSJohn Baldwin 0, "Enable buffer packing"); 1391458bff9SNavdeep Parhar 1401458bff9SNavdeep Parhar /* 1411458bff9SNavdeep Parhar * Start next frame in a packed buffer at this boundary. 1421458bff9SNavdeep Parhar * -1: driver should figure out a good value. 143e3207e19SNavdeep Parhar * T4: driver will ignore this and use the same value as fl_pad above. 144e3207e19SNavdeep Parhar * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 1451458bff9SNavdeep Parhar */ 1461458bff9SNavdeep Parhar static int fl_pack = -1; 1472d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 1482d714dbcSJohn Baldwin "payload pack boundary (bytes)"); 1491458bff9SNavdeep Parhar 15038035ed6SNavdeep Parhar /* 15138035ed6SNavdeep Parhar * Largest rx cluster size that the driver is allowed to allocate. 15238035ed6SNavdeep Parhar */ 15338035ed6SNavdeep Parhar static int largest_rx_cluster = MJUM16BYTES; 1542d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 1552d714dbcSJohn Baldwin &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 15638035ed6SNavdeep Parhar 15738035ed6SNavdeep Parhar /* 15838035ed6SNavdeep Parhar * Size of cluster allocation that's most likely to succeed. The driver will 15938035ed6SNavdeep Parhar * fall back to this size if it fails to allocate clusters larger than this. 16038035ed6SNavdeep Parhar */ 16138035ed6SNavdeep Parhar static int safest_rx_cluster = PAGE_SIZE; 1622d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 1632d714dbcSJohn Baldwin &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 16438035ed6SNavdeep Parhar 165786099deSNavdeep Parhar #ifdef RATELIMIT 166786099deSNavdeep Parhar /* 167786099deSNavdeep Parhar * Knob to control TCP timestamp rewriting, and the granularity of the tick used 168786099deSNavdeep Parhar * for rewriting. -1 and 0-3 are all valid values. 169786099deSNavdeep Parhar * -1: hardware should leave the TCP timestamps alone. 170786099deSNavdeep Parhar * 0: 1ms 171786099deSNavdeep Parhar * 1: 100us 172786099deSNavdeep Parhar * 2: 10us 173786099deSNavdeep Parhar * 3: 1us 174786099deSNavdeep Parhar */ 175786099deSNavdeep Parhar static int tsclk = -1; 1762d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 1772d714dbcSJohn Baldwin "Control TCP timestamp rewriting when using pacing"); 178786099deSNavdeep Parhar 179786099deSNavdeep Parhar static int eo_max_backlog = 1024 * 1024; 1802d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 1812d714dbcSJohn Baldwin 0, "Maximum backlog of ratelimited data per flow"); 182786099deSNavdeep Parhar #endif 183786099deSNavdeep Parhar 184d491f8caSNavdeep Parhar /* 185d491f8caSNavdeep Parhar * The interrupt holdoff timers are multiplied by this value on T6+. 186d491f8caSNavdeep Parhar * 1 and 3-17 (both inclusive) are legal values. 187d491f8caSNavdeep Parhar */ 188d491f8caSNavdeep Parhar static int tscale = 1; 1892d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 1902d714dbcSJohn Baldwin "Interrupt holdoff timer scale on T6+"); 191d491f8caSNavdeep Parhar 19246f48ee5SNavdeep Parhar /* 19346f48ee5SNavdeep Parhar * Number of LRO entries in the lro_ctrl structure per rx queue. 19446f48ee5SNavdeep Parhar */ 19546f48ee5SNavdeep Parhar static int lro_entries = TCP_LRO_ENTRIES; 1962d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 1972d714dbcSJohn Baldwin "Number of LRO entries per RX queue"); 19846f48ee5SNavdeep Parhar 19946f48ee5SNavdeep Parhar /* 20046f48ee5SNavdeep Parhar * This enables presorting of frames before they're fed into tcp_lro_rx. 20146f48ee5SNavdeep Parhar */ 20246f48ee5SNavdeep Parhar static int lro_mbufs = 0; 2032d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 2042d714dbcSJohn Baldwin "Enable presorting of LRO frames"); 20546f48ee5SNavdeep Parhar 206733b9277SNavdeep Parhar static int service_iq(struct sge_iq *, int); 2073098bcfcSNavdeep Parhar static int service_iq_fl(struct sge_iq *, int); 2084d6db4e0SNavdeep Parhar static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 2091486d2deSNavdeep Parhar static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *, 2101486d2deSNavdeep Parhar u_int); 211b2daa9a9SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 212e3207e19SNavdeep Parhar static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 21390e7434aSNavdeep Parhar static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 21490e7434aSNavdeep Parhar uint16_t, char *); 21554e4ee71SNavdeep Parhar static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 21654e4ee71SNavdeep Parhar bus_addr_t *, void **); 21754e4ee71SNavdeep Parhar static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 21854e4ee71SNavdeep Parhar void *); 219fe2ebb76SJohn Baldwin static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 220bc14b14dSNavdeep Parhar int, int); 221fe2ebb76SJohn Baldwin static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 222348694daSNavdeep Parhar static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 223348694daSNavdeep Parhar struct sge_iq *); 224aa93b99aSNavdeep Parhar static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 225aa93b99aSNavdeep Parhar struct sysctl_oid *, struct sge_fl *); 226733b9277SNavdeep Parhar static int alloc_fwq(struct adapter *); 227733b9277SNavdeep Parhar static int free_fwq(struct adapter *); 22837310a98SNavdeep Parhar static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, 22937310a98SNavdeep Parhar struct sysctl_oid *); 230fe2ebb76SJohn Baldwin static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 231733b9277SNavdeep Parhar struct sysctl_oid *); 232fe2ebb76SJohn Baldwin static int free_rxq(struct vi_info *, struct sge_rxq *); 23309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 234fe2ebb76SJohn Baldwin static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 235733b9277SNavdeep Parhar struct sysctl_oid *); 236fe2ebb76SJohn Baldwin static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 237733b9277SNavdeep Parhar #endif 238298d969cSNavdeep Parhar #ifdef DEV_NETMAP 239fe2ebb76SJohn Baldwin static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, 240298d969cSNavdeep Parhar struct sysctl_oid *); 241fe2ebb76SJohn Baldwin static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 242fe2ebb76SJohn Baldwin static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, 243298d969cSNavdeep Parhar struct sysctl_oid *); 244fe2ebb76SJohn Baldwin static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 245298d969cSNavdeep Parhar #endif 246733b9277SNavdeep Parhar static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 247fe2ebb76SJohn Baldwin static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 248eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 249fe2ebb76SJohn Baldwin static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 250733b9277SNavdeep Parhar #endif 251fe2ebb76SJohn Baldwin static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 252733b9277SNavdeep Parhar static int free_eq(struct adapter *, struct sge_eq *); 253fe2ebb76SJohn Baldwin static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 254733b9277SNavdeep Parhar struct sysctl_oid *); 255733b9277SNavdeep Parhar static int free_wrq(struct adapter *, struct sge_wrq *); 256fe2ebb76SJohn Baldwin static int alloc_txq(struct vi_info *, struct sge_txq *, int, 257733b9277SNavdeep Parhar struct sysctl_oid *); 258fe2ebb76SJohn Baldwin static int free_txq(struct vi_info *, struct sge_txq *); 25954e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 26054e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *); 261733b9277SNavdeep Parhar static int refill_fl(struct adapter *, struct sge_fl *, int); 262733b9277SNavdeep Parhar static void refill_sfl(void *); 26354e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *); 2641458bff9SNavdeep Parhar static void free_fl_sdesc(struct adapter *, struct sge_fl *); 26546e1e307SNavdeep Parhar static int find_refill_source(struct adapter *, int, bool); 266733b9277SNavdeep Parhar static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 26754e4ee71SNavdeep Parhar 2687951040fSNavdeep Parhar static inline void get_pkt_gl(struct mbuf *, struct sglist *); 2697951040fSNavdeep Parhar static inline u_int txpkt_len16(u_int, u_int); 2706af45170SJohn Baldwin static inline u_int txpkt_vm_len16(u_int, u_int); 2717951040fSNavdeep Parhar static inline u_int txpkts0_len16(u_int); 2727951040fSNavdeep Parhar static inline u_int txpkts1_len16(void); 2735cdaef71SJohn Baldwin static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 274d735920dSNavdeep Parhar static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, 275d735920dSNavdeep Parhar u_int); 276472a6004SNavdeep Parhar static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 277d735920dSNavdeep Parhar struct mbuf *); 278d735920dSNavdeep Parhar static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, 279d735920dSNavdeep Parhar int, bool *); 280d735920dSNavdeep Parhar static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *, 281d735920dSNavdeep Parhar int, bool *); 282d735920dSNavdeep Parhar static u_int write_txpkts_wr(struct adapter *, struct sge_txq *); 283d735920dSNavdeep Parhar static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *); 2847951040fSNavdeep Parhar static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 28554e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 2867951040fSNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 2877951040fSNavdeep Parhar static inline uint16_t read_hw_cidx(struct sge_eq *); 2887951040fSNavdeep Parhar static inline u_int reclaimable_tx_desc(struct sge_eq *); 2897951040fSNavdeep Parhar static inline u_int total_available_tx_desc(struct sge_eq *); 2907951040fSNavdeep Parhar static u_int reclaim_tx_descs(struct sge_txq *, u_int); 2917951040fSNavdeep Parhar static void tx_reclaim(void *, int); 2927951040fSNavdeep Parhar static __be64 get_flit(struct sglist_seg *, int, int); 293733b9277SNavdeep Parhar static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 294733b9277SNavdeep Parhar struct mbuf *); 2951b4cc91fSNavdeep Parhar static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 296733b9277SNavdeep Parhar struct mbuf *); 297069af0ebSJohn Baldwin static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 2987951040fSNavdeep Parhar static void wrq_tx_drain(void *, int); 2997951040fSNavdeep Parhar static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 30054e4ee71SNavdeep Parhar 30156599263SNavdeep Parhar static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 30238035ed6SNavdeep Parhar static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 303786099deSNavdeep Parhar #ifdef RATELIMIT 304786099deSNavdeep Parhar static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 305786099deSNavdeep Parhar static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 306786099deSNavdeep Parhar struct mbuf *); 307786099deSNavdeep Parhar #endif 308f7dfe243SNavdeep Parhar 30982eff304SNavdeep Parhar static counter_u64_t extfree_refs; 31082eff304SNavdeep Parhar static counter_u64_t extfree_rels; 31182eff304SNavdeep Parhar 312671bf2b8SNavdeep Parhar an_handler_t t4_an_handler; 313671bf2b8SNavdeep Parhar fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 314671bf2b8SNavdeep Parhar cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 3154535e804SNavdeep Parhar cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 3164535e804SNavdeep Parhar cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 317111638bfSNavdeep Parhar cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 31889f651e7SNavdeep Parhar cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 3199c707b32SNavdeep Parhar cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 320671bf2b8SNavdeep Parhar 3214535e804SNavdeep Parhar void 322671bf2b8SNavdeep Parhar t4_register_an_handler(an_handler_t h) 323671bf2b8SNavdeep Parhar { 3244535e804SNavdeep Parhar uintptr_t *loc; 325671bf2b8SNavdeep Parhar 3264535e804SNavdeep Parhar MPASS(h == NULL || t4_an_handler == NULL); 3274535e804SNavdeep Parhar 328671bf2b8SNavdeep Parhar loc = (uintptr_t *)&t4_an_handler; 3294535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 330671bf2b8SNavdeep Parhar } 331671bf2b8SNavdeep Parhar 3324535e804SNavdeep Parhar void 333671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 334671bf2b8SNavdeep Parhar { 3354535e804SNavdeep Parhar uintptr_t *loc; 336671bf2b8SNavdeep Parhar 3374535e804SNavdeep Parhar MPASS(type < nitems(t4_fw_msg_handler)); 3384535e804SNavdeep Parhar MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 339671bf2b8SNavdeep Parhar /* 340671bf2b8SNavdeep Parhar * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 341671bf2b8SNavdeep Parhar * handler dispatch table. Reject any attempt to install a handler for 342671bf2b8SNavdeep Parhar * this subtype. 343671bf2b8SNavdeep Parhar */ 3444535e804SNavdeep Parhar MPASS(type != FW_TYPE_RSSCPL); 3454535e804SNavdeep Parhar MPASS(type != FW6_TYPE_RSSCPL); 346671bf2b8SNavdeep Parhar 347671bf2b8SNavdeep Parhar loc = (uintptr_t *)&t4_fw_msg_handler[type]; 3484535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 3494535e804SNavdeep Parhar } 350671bf2b8SNavdeep Parhar 3514535e804SNavdeep Parhar void 3524535e804SNavdeep Parhar t4_register_cpl_handler(int opcode, cpl_handler_t h) 3534535e804SNavdeep Parhar { 3544535e804SNavdeep Parhar uintptr_t *loc; 3554535e804SNavdeep Parhar 3564535e804SNavdeep Parhar MPASS(opcode < nitems(t4_cpl_handler)); 3574535e804SNavdeep Parhar MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 3584535e804SNavdeep Parhar 3594535e804SNavdeep Parhar loc = (uintptr_t *)&t4_cpl_handler[opcode]; 3604535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 361671bf2b8SNavdeep Parhar } 362671bf2b8SNavdeep Parhar 363671bf2b8SNavdeep Parhar static int 3644535e804SNavdeep Parhar set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 3654535e804SNavdeep Parhar struct mbuf *m) 366671bf2b8SNavdeep Parhar { 3674535e804SNavdeep Parhar const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 3684535e804SNavdeep Parhar u_int tid; 3694535e804SNavdeep Parhar int cookie; 370671bf2b8SNavdeep Parhar 3714535e804SNavdeep Parhar MPASS(m == NULL); 3724535e804SNavdeep Parhar 3734535e804SNavdeep Parhar tid = GET_TID(cpl); 3745fc0f72fSNavdeep Parhar if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 3754535e804SNavdeep Parhar /* 3764535e804SNavdeep Parhar * The return code for filter-write is put in the CPL cookie so 3774535e804SNavdeep Parhar * we have to rely on the hardware tid (is_ftid) to determine 3784535e804SNavdeep Parhar * that this is a response to a filter. 3794535e804SNavdeep Parhar */ 3804535e804SNavdeep Parhar cookie = CPL_COOKIE_FILTER; 3814535e804SNavdeep Parhar } else { 3824535e804SNavdeep Parhar cookie = G_COOKIE(cpl->cookie); 3834535e804SNavdeep Parhar } 3844535e804SNavdeep Parhar MPASS(cookie > CPL_COOKIE_RESERVED); 3854535e804SNavdeep Parhar MPASS(cookie < nitems(set_tcb_rpl_handlers)); 3864535e804SNavdeep Parhar 3874535e804SNavdeep Parhar return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 388671bf2b8SNavdeep Parhar } 389671bf2b8SNavdeep Parhar 3904535e804SNavdeep Parhar static int 3914535e804SNavdeep Parhar l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 3924535e804SNavdeep Parhar struct mbuf *m) 393671bf2b8SNavdeep Parhar { 3944535e804SNavdeep Parhar const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 3954535e804SNavdeep Parhar unsigned int cookie; 396671bf2b8SNavdeep Parhar 3974535e804SNavdeep Parhar MPASS(m == NULL); 398671bf2b8SNavdeep Parhar 3994535e804SNavdeep Parhar cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 4004535e804SNavdeep Parhar return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 4014535e804SNavdeep Parhar } 402671bf2b8SNavdeep Parhar 403111638bfSNavdeep Parhar static int 404111638bfSNavdeep Parhar act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 405111638bfSNavdeep Parhar struct mbuf *m) 406111638bfSNavdeep Parhar { 407111638bfSNavdeep Parhar const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 408111638bfSNavdeep Parhar u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 409111638bfSNavdeep Parhar 410111638bfSNavdeep Parhar MPASS(m == NULL); 411111638bfSNavdeep Parhar MPASS(cookie != CPL_COOKIE_RESERVED); 412111638bfSNavdeep Parhar 413111638bfSNavdeep Parhar return (act_open_rpl_handlers[cookie](iq, rss, m)); 414111638bfSNavdeep Parhar } 415111638bfSNavdeep Parhar 41689f651e7SNavdeep Parhar static int 41789f651e7SNavdeep Parhar abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 41889f651e7SNavdeep Parhar struct mbuf *m) 41989f651e7SNavdeep Parhar { 42089f651e7SNavdeep Parhar struct adapter *sc = iq->adapter; 42189f651e7SNavdeep Parhar u_int cookie; 42289f651e7SNavdeep Parhar 42389f651e7SNavdeep Parhar MPASS(m == NULL); 42489f651e7SNavdeep Parhar if (is_hashfilter(sc)) 42589f651e7SNavdeep Parhar cookie = CPL_COOKIE_HASHFILTER; 42689f651e7SNavdeep Parhar else 42789f651e7SNavdeep Parhar cookie = CPL_COOKIE_TOM; 42889f651e7SNavdeep Parhar 42989f651e7SNavdeep Parhar return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 43089f651e7SNavdeep Parhar } 43189f651e7SNavdeep Parhar 4329c707b32SNavdeep Parhar static int 4339c707b32SNavdeep Parhar fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4349c707b32SNavdeep Parhar { 4359c707b32SNavdeep Parhar struct adapter *sc = iq->adapter; 4369c707b32SNavdeep Parhar const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 4379c707b32SNavdeep Parhar unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 4389c707b32SNavdeep Parhar u_int cookie; 4399c707b32SNavdeep Parhar 4409c707b32SNavdeep Parhar MPASS(m == NULL); 4419c707b32SNavdeep Parhar if (is_etid(sc, tid)) 4429c707b32SNavdeep Parhar cookie = CPL_COOKIE_ETHOFLD; 4439c707b32SNavdeep Parhar else 4449c707b32SNavdeep Parhar cookie = CPL_COOKIE_TOM; 4459c707b32SNavdeep Parhar 4469c707b32SNavdeep Parhar return (fw4_ack_handlers[cookie](iq, rss, m)); 4479c707b32SNavdeep Parhar } 4489c707b32SNavdeep Parhar 4494535e804SNavdeep Parhar static void 4504535e804SNavdeep Parhar t4_init_shared_cpl_handlers(void) 4514535e804SNavdeep Parhar { 4524535e804SNavdeep Parhar 4534535e804SNavdeep Parhar t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 4544535e804SNavdeep Parhar t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 455111638bfSNavdeep Parhar t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 45689f651e7SNavdeep Parhar t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 4579c707b32SNavdeep Parhar t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 4584535e804SNavdeep Parhar } 4594535e804SNavdeep Parhar 4604535e804SNavdeep Parhar void 4614535e804SNavdeep Parhar t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 4624535e804SNavdeep Parhar { 4634535e804SNavdeep Parhar uintptr_t *loc; 4644535e804SNavdeep Parhar 4654535e804SNavdeep Parhar MPASS(opcode < nitems(t4_cpl_handler)); 4664535e804SNavdeep Parhar MPASS(cookie > CPL_COOKIE_RESERVED); 4674535e804SNavdeep Parhar MPASS(cookie < NUM_CPL_COOKIES); 4684535e804SNavdeep Parhar MPASS(t4_cpl_handler[opcode] != NULL); 4694535e804SNavdeep Parhar 4704535e804SNavdeep Parhar switch (opcode) { 4714535e804SNavdeep Parhar case CPL_SET_TCB_RPL: 4724535e804SNavdeep Parhar loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 4734535e804SNavdeep Parhar break; 4744535e804SNavdeep Parhar case CPL_L2T_WRITE_RPL: 4754535e804SNavdeep Parhar loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 4764535e804SNavdeep Parhar break; 477111638bfSNavdeep Parhar case CPL_ACT_OPEN_RPL: 478111638bfSNavdeep Parhar loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 479111638bfSNavdeep Parhar break; 48089f651e7SNavdeep Parhar case CPL_ABORT_RPL_RSS: 48189f651e7SNavdeep Parhar loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 48289f651e7SNavdeep Parhar break; 4839c707b32SNavdeep Parhar case CPL_FW4_ACK: 4849c707b32SNavdeep Parhar loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 4859c707b32SNavdeep Parhar break; 4864535e804SNavdeep Parhar default: 4874535e804SNavdeep Parhar MPASS(0); 4884535e804SNavdeep Parhar return; 4894535e804SNavdeep Parhar } 4904535e804SNavdeep Parhar MPASS(h == NULL || *loc == (uintptr_t)NULL); 4914535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 492671bf2b8SNavdeep Parhar } 493671bf2b8SNavdeep Parhar 49494586193SNavdeep Parhar /* 4951458bff9SNavdeep Parhar * Called on MOD_LOAD. Validates and calculates the SGE tunables. 49694586193SNavdeep Parhar */ 49794586193SNavdeep Parhar void 49894586193SNavdeep Parhar t4_sge_modload(void) 49994586193SNavdeep Parhar { 5004defc81bSNavdeep Parhar 5019fb8886bSNavdeep Parhar if (fl_pktshift < 0 || fl_pktshift > 7) { 5029fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 503518bca2cSNavdeep Parhar " using 0 instead.\n", fl_pktshift); 504518bca2cSNavdeep Parhar fl_pktshift = 0; 5059fb8886bSNavdeep Parhar } 5069fb8886bSNavdeep Parhar 5079fb8886bSNavdeep Parhar if (spg_len != 64 && spg_len != 128) { 5089fb8886bSNavdeep Parhar int len; 5099fb8886bSNavdeep Parhar 5109fb8886bSNavdeep Parhar #if defined(__i386__) || defined(__amd64__) 5119fb8886bSNavdeep Parhar len = cpu_clflush_line_size > 64 ? 128 : 64; 5129fb8886bSNavdeep Parhar #else 5139fb8886bSNavdeep Parhar len = 64; 5149fb8886bSNavdeep Parhar #endif 5159fb8886bSNavdeep Parhar if (spg_len != -1) { 5169fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.spg_len value (%d)," 5179fb8886bSNavdeep Parhar " using %d instead.\n", spg_len, len); 5189fb8886bSNavdeep Parhar } 5199fb8886bSNavdeep Parhar spg_len = len; 5209fb8886bSNavdeep Parhar } 5219fb8886bSNavdeep Parhar 5229fb8886bSNavdeep Parhar if (cong_drop < -1 || cong_drop > 1) { 5239fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.cong_drop value (%d)," 5249fb8886bSNavdeep Parhar " using 0 instead.\n", cong_drop); 5259fb8886bSNavdeep Parhar cong_drop = 0; 5269fb8886bSNavdeep Parhar } 52782eff304SNavdeep Parhar 528d491f8caSNavdeep Parhar if (tscale != 1 && (tscale < 3 || tscale > 17)) { 529d491f8caSNavdeep Parhar printf("Invalid hw.cxgbe.tscale value (%d)," 530d491f8caSNavdeep Parhar " using 1 instead.\n", tscale); 531d491f8caSNavdeep Parhar tscale = 1; 532d491f8caSNavdeep Parhar } 533d491f8caSNavdeep Parhar 53482eff304SNavdeep Parhar extfree_refs = counter_u64_alloc(M_WAITOK); 53582eff304SNavdeep Parhar extfree_rels = counter_u64_alloc(M_WAITOK); 53682eff304SNavdeep Parhar counter_u64_zero(extfree_refs); 53782eff304SNavdeep Parhar counter_u64_zero(extfree_rels); 538671bf2b8SNavdeep Parhar 5394535e804SNavdeep Parhar t4_init_shared_cpl_handlers(); 540671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 541671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 542671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 543786099deSNavdeep Parhar #ifdef RATELIMIT 544786099deSNavdeep Parhar t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 545786099deSNavdeep Parhar CPL_COOKIE_ETHOFLD); 546786099deSNavdeep Parhar #endif 547671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 548069af0ebSJohn Baldwin t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 54982eff304SNavdeep Parhar } 55082eff304SNavdeep Parhar 55182eff304SNavdeep Parhar void 55282eff304SNavdeep Parhar t4_sge_modunload(void) 55382eff304SNavdeep Parhar { 55482eff304SNavdeep Parhar 55582eff304SNavdeep Parhar counter_u64_free(extfree_refs); 55682eff304SNavdeep Parhar counter_u64_free(extfree_rels); 55782eff304SNavdeep Parhar } 55882eff304SNavdeep Parhar 55982eff304SNavdeep Parhar uint64_t 56082eff304SNavdeep Parhar t4_sge_extfree_refs(void) 56182eff304SNavdeep Parhar { 56282eff304SNavdeep Parhar uint64_t refs, rels; 56382eff304SNavdeep Parhar 56482eff304SNavdeep Parhar rels = counter_u64_fetch(extfree_rels); 56582eff304SNavdeep Parhar refs = counter_u64_fetch(extfree_refs); 56682eff304SNavdeep Parhar 56782eff304SNavdeep Parhar return (refs - rels); 56894586193SNavdeep Parhar } 56994586193SNavdeep Parhar 57044c6fea8SNavdeep Parhar /* max 4096 */ 57144c6fea8SNavdeep Parhar #define MAX_PACK_BOUNDARY 512 57244c6fea8SNavdeep Parhar 573e3207e19SNavdeep Parhar static inline void 574e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(struct adapter *sc) 575e3207e19SNavdeep Parhar { 576e3207e19SNavdeep Parhar uint32_t v, m; 5770dbc6cfdSNavdeep Parhar int pad, pack, pad_shift; 578e3207e19SNavdeep Parhar 5790dbc6cfdSNavdeep Parhar pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 5800dbc6cfdSNavdeep Parhar X_INGPADBOUNDARY_SHIFT; 581e3207e19SNavdeep Parhar pad = fl_pad; 5820dbc6cfdSNavdeep Parhar if (fl_pad < (1 << pad_shift) || 5830dbc6cfdSNavdeep Parhar fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 5840dbc6cfdSNavdeep Parhar !powerof2(fl_pad)) { 585e3207e19SNavdeep Parhar /* 586e3207e19SNavdeep Parhar * If there is any chance that we might use buffer packing and 587e3207e19SNavdeep Parhar * the chip is a T4, then pick 64 as the pad/pack boundary. Set 5880dbc6cfdSNavdeep Parhar * it to the minimum allowed in all other cases. 589e3207e19SNavdeep Parhar */ 5900dbc6cfdSNavdeep Parhar pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 591e3207e19SNavdeep Parhar 592e3207e19SNavdeep Parhar /* 593e3207e19SNavdeep Parhar * For fl_pad = 0 we'll still write a reasonable value to the 594e3207e19SNavdeep Parhar * register but all the freelists will opt out of padding. 595e3207e19SNavdeep Parhar * We'll complain here only if the user tried to set it to a 596e3207e19SNavdeep Parhar * value greater than 0 that was invalid. 597e3207e19SNavdeep Parhar */ 598e3207e19SNavdeep Parhar if (fl_pad > 0) { 599e3207e19SNavdeep Parhar device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 600e3207e19SNavdeep Parhar " (%d), using %d instead.\n", fl_pad, pad); 601e3207e19SNavdeep Parhar } 602e3207e19SNavdeep Parhar } 603e3207e19SNavdeep Parhar m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 6040dbc6cfdSNavdeep Parhar v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 605e3207e19SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 606e3207e19SNavdeep Parhar 607e3207e19SNavdeep Parhar if (is_t4(sc)) { 608e3207e19SNavdeep Parhar if (fl_pack != -1 && fl_pack != pad) { 609e3207e19SNavdeep Parhar /* Complain but carry on. */ 610e3207e19SNavdeep Parhar device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 611e3207e19SNavdeep Parhar " using %d instead.\n", fl_pack, pad); 612e3207e19SNavdeep Parhar } 613e3207e19SNavdeep Parhar return; 614e3207e19SNavdeep Parhar } 615e3207e19SNavdeep Parhar 616e3207e19SNavdeep Parhar pack = fl_pack; 617e3207e19SNavdeep Parhar if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 618e3207e19SNavdeep Parhar !powerof2(fl_pack)) { 61944c6fea8SNavdeep Parhar if (sc->params.pci.mps > MAX_PACK_BOUNDARY) 62044c6fea8SNavdeep Parhar pack = MAX_PACK_BOUNDARY; 62144c6fea8SNavdeep Parhar else 622e3207e19SNavdeep Parhar pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 623e3207e19SNavdeep Parhar MPASS(powerof2(pack)); 624e3207e19SNavdeep Parhar if (pack < 16) 625e3207e19SNavdeep Parhar pack = 16; 626e3207e19SNavdeep Parhar if (pack == 32) 627e3207e19SNavdeep Parhar pack = 64; 628e3207e19SNavdeep Parhar if (pack > 4096) 629e3207e19SNavdeep Parhar pack = 4096; 630e3207e19SNavdeep Parhar if (fl_pack != -1) { 631e3207e19SNavdeep Parhar device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 632e3207e19SNavdeep Parhar " (%d), using %d instead.\n", fl_pack, pack); 633e3207e19SNavdeep Parhar } 634e3207e19SNavdeep Parhar } 635e3207e19SNavdeep Parhar m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 636e3207e19SNavdeep Parhar if (pack == 16) 637e3207e19SNavdeep Parhar v = V_INGPACKBOUNDARY(0); 638e3207e19SNavdeep Parhar else 639e3207e19SNavdeep Parhar v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 640e3207e19SNavdeep Parhar 641e3207e19SNavdeep Parhar MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 642e3207e19SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 643e3207e19SNavdeep Parhar } 644e3207e19SNavdeep Parhar 645cf738022SNavdeep Parhar /* 646cf738022SNavdeep Parhar * adap->params.vpd.cclk must be set up before this is called. 647cf738022SNavdeep Parhar */ 648d14b0ac1SNavdeep Parhar void 649d14b0ac1SNavdeep Parhar t4_tweak_chip_settings(struct adapter *sc) 650d14b0ac1SNavdeep Parhar { 65146e1e307SNavdeep Parhar int i, reg; 652d14b0ac1SNavdeep Parhar uint32_t v, m; 653d14b0ac1SNavdeep Parhar int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 654cf738022SNavdeep Parhar int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 655d14b0ac1SNavdeep Parhar int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 656d14b0ac1SNavdeep Parhar uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 65746e1e307SNavdeep Parhar static int sw_buf_sizes[] = { 6581458bff9SNavdeep Parhar MCLBYTES, 6591458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 6601458bff9SNavdeep Parhar MJUMPAGESIZE, 6611458bff9SNavdeep Parhar #endif 6621458bff9SNavdeep Parhar MJUM9BYTES, 66346e1e307SNavdeep Parhar MJUM16BYTES 6641458bff9SNavdeep Parhar }; 665d14b0ac1SNavdeep Parhar 666d14b0ac1SNavdeep Parhar KASSERT(sc->flags & MASTER_PF, 667d14b0ac1SNavdeep Parhar ("%s: trying to change chip settings when not master.", __func__)); 668d14b0ac1SNavdeep Parhar 6691458bff9SNavdeep Parhar m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 670d14b0ac1SNavdeep Parhar v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 6714defc81bSNavdeep Parhar V_EGRSTATUSPAGESIZE(spg_len == 128); 672d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 67354e4ee71SNavdeep Parhar 674e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(sc); 6751458bff9SNavdeep Parhar 676d14b0ac1SNavdeep Parhar v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 677733b9277SNavdeep Parhar V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 678733b9277SNavdeep Parhar V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 679733b9277SNavdeep Parhar V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 680733b9277SNavdeep Parhar V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 681733b9277SNavdeep Parhar V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 682733b9277SNavdeep Parhar V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 683733b9277SNavdeep Parhar V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 684d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 685733b9277SNavdeep Parhar 6869b11a65dSNavdeep Parhar t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 6879b11a65dSNavdeep Parhar t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 68846e1e307SNavdeep Parhar reg = A_SGE_FL_BUFFER_SIZE2; 68946e1e307SNavdeep Parhar for (i = 0; i < nitems(sw_buf_sizes); i++) { 69046e1e307SNavdeep Parhar MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 69146e1e307SNavdeep Parhar t4_write_reg(sc, reg, sw_buf_sizes[i]); 69246e1e307SNavdeep Parhar reg += 4; 69346e1e307SNavdeep Parhar MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 69446e1e307SNavdeep Parhar t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); 69546e1e307SNavdeep Parhar reg += 4; 69654e4ee71SNavdeep Parhar } 69754e4ee71SNavdeep Parhar 698d14b0ac1SNavdeep Parhar v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 699d14b0ac1SNavdeep Parhar V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 700d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 70154e4ee71SNavdeep Parhar 702cf738022SNavdeep Parhar KASSERT(intr_timer[0] <= timer_max, 703cf738022SNavdeep Parhar ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 704cf738022SNavdeep Parhar timer_max)); 705cf738022SNavdeep Parhar for (i = 1; i < nitems(intr_timer); i++) { 706cf738022SNavdeep Parhar KASSERT(intr_timer[i] >= intr_timer[i - 1], 707cf738022SNavdeep Parhar ("%s: timers not listed in increasing order (%d)", 708cf738022SNavdeep Parhar __func__, i)); 709cf738022SNavdeep Parhar 710cf738022SNavdeep Parhar while (intr_timer[i] > timer_max) { 711cf738022SNavdeep Parhar if (i == nitems(intr_timer) - 1) { 712cf738022SNavdeep Parhar intr_timer[i] = timer_max; 713cf738022SNavdeep Parhar break; 714cf738022SNavdeep Parhar } 715cf738022SNavdeep Parhar intr_timer[i] += intr_timer[i - 1]; 716cf738022SNavdeep Parhar intr_timer[i] /= 2; 717cf738022SNavdeep Parhar } 718cf738022SNavdeep Parhar } 719cf738022SNavdeep Parhar 720d14b0ac1SNavdeep Parhar v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 721d14b0ac1SNavdeep Parhar V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 722d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 723d14b0ac1SNavdeep Parhar v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 724d14b0ac1SNavdeep Parhar V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 725d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 726d14b0ac1SNavdeep Parhar v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 727d14b0ac1SNavdeep Parhar V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 728d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 72986e02bf2SNavdeep Parhar 730d491f8caSNavdeep Parhar if (chip_id(sc) >= CHELSIO_T6) { 731d491f8caSNavdeep Parhar m = V_TSCALE(M_TSCALE); 732d491f8caSNavdeep Parhar if (tscale == 1) 733d491f8caSNavdeep Parhar v = 0; 734d491f8caSNavdeep Parhar else 735d491f8caSNavdeep Parhar v = V_TSCALE(tscale - 2); 736d491f8caSNavdeep Parhar t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 7372f318252SNavdeep Parhar 7382f318252SNavdeep Parhar if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 7392f318252SNavdeep Parhar m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 7402f318252SNavdeep Parhar V_WRTHRTHRESH(M_WRTHRTHRESH); 7412f318252SNavdeep Parhar t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 7422f318252SNavdeep Parhar v &= ~m; 7432f318252SNavdeep Parhar v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 7442f318252SNavdeep Parhar V_WRTHRTHRESH(16); 7452f318252SNavdeep Parhar t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 7462f318252SNavdeep Parhar } 747d491f8caSNavdeep Parhar } 748d491f8caSNavdeep Parhar 7497cba15b1SNavdeep Parhar /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 750d14b0ac1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 751d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 752d14b0ac1SNavdeep Parhar 7537cba15b1SNavdeep Parhar /* 7547cba15b1SNavdeep Parhar * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 7557cba15b1SNavdeep Parhar * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 7567cba15b1SNavdeep Parhar * may have to deal with is MAXPHYS + 1 page. 7577cba15b1SNavdeep Parhar */ 7587cba15b1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 7597cba15b1SNavdeep Parhar t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 7607cba15b1SNavdeep Parhar 7617cba15b1SNavdeep Parhar /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 7627cba15b1SNavdeep Parhar m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 763d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 764d14b0ac1SNavdeep Parhar 765d14b0ac1SNavdeep Parhar m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 766d14b0ac1SNavdeep Parhar F_RESETDDPOFFSET; 767d14b0ac1SNavdeep Parhar v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 768d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 769d14b0ac1SNavdeep Parhar } 770d14b0ac1SNavdeep Parhar 771d14b0ac1SNavdeep Parhar /* 77246e1e307SNavdeep Parhar * SGE wants the buffer to be at least 64B and then a multiple of 16. Its 77346e1e307SNavdeep Parhar * address mut be 16B aligned. If padding is in use the buffer's start and end 77446e1e307SNavdeep Parhar * need to be aligned to the pad boundary as well. We'll just make sure that 77546e1e307SNavdeep Parhar * the size is a multiple of the pad boundary here, it is up to the buffer 77646e1e307SNavdeep Parhar * allocation code to make sure the start of the buffer is aligned. 77738035ed6SNavdeep Parhar */ 77838035ed6SNavdeep Parhar static inline int 779e3207e19SNavdeep Parhar hwsz_ok(struct adapter *sc, int hwsz) 78038035ed6SNavdeep Parhar { 78190e7434aSNavdeep Parhar int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 78238035ed6SNavdeep Parhar 783b741402cSNavdeep Parhar return (hwsz >= 64 && (hwsz & mask) == 0); 78438035ed6SNavdeep Parhar } 78538035ed6SNavdeep Parhar 78638035ed6SNavdeep Parhar /* 787d14b0ac1SNavdeep Parhar * XXX: driver really should be able to deal with unexpected settings. 788d14b0ac1SNavdeep Parhar */ 789d14b0ac1SNavdeep Parhar int 790d14b0ac1SNavdeep Parhar t4_read_chip_settings(struct adapter *sc) 791d14b0ac1SNavdeep Parhar { 792d14b0ac1SNavdeep Parhar struct sge *s = &sc->sge; 79390e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 7941458bff9SNavdeep Parhar int i, j, n, rc = 0; 795d14b0ac1SNavdeep Parhar uint32_t m, v, r; 796d14b0ac1SNavdeep Parhar uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 79738035ed6SNavdeep Parhar static int sw_buf_sizes[] = { /* Sorted by size */ 7981458bff9SNavdeep Parhar MCLBYTES, 7991458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 8001458bff9SNavdeep Parhar MJUMPAGESIZE, 8011458bff9SNavdeep Parhar #endif 8021458bff9SNavdeep Parhar MJUM9BYTES, 8031458bff9SNavdeep Parhar MJUM16BYTES 8041458bff9SNavdeep Parhar }; 80546e1e307SNavdeep Parhar struct rx_buf_info *rxb; 806d14b0ac1SNavdeep Parhar 80790e7434aSNavdeep Parhar m = F_RXPKTCPLMODE; 80890e7434aSNavdeep Parhar v = F_RXPKTCPLMODE; 80959c1e950SJohn Baldwin r = sc->params.sge.sge_control; 810d14b0ac1SNavdeep Parhar if ((r & m) != v) { 811d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 812733b9277SNavdeep Parhar rc = EINVAL; 813733b9277SNavdeep Parhar } 814733b9277SNavdeep Parhar 81590e7434aSNavdeep Parhar /* 81690e7434aSNavdeep Parhar * If this changes then every single use of PAGE_SHIFT in the driver 81790e7434aSNavdeep Parhar * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 81890e7434aSNavdeep Parhar */ 81990e7434aSNavdeep Parhar if (sp->page_shift != PAGE_SHIFT) { 820d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 821733b9277SNavdeep Parhar rc = EINVAL; 822733b9277SNavdeep Parhar } 823733b9277SNavdeep Parhar 82446e1e307SNavdeep Parhar s->safe_zidx = -1; 82546e1e307SNavdeep Parhar rxb = &s->rx_buf_info[0]; 82646e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 82746e1e307SNavdeep Parhar rxb->size1 = sw_buf_sizes[i]; 82846e1e307SNavdeep Parhar rxb->zone = m_getzone(rxb->size1); 82946e1e307SNavdeep Parhar rxb->type = m_gettype(rxb->size1); 83046e1e307SNavdeep Parhar rxb->size2 = 0; 83146e1e307SNavdeep Parhar rxb->hwidx1 = -1; 83246e1e307SNavdeep Parhar rxb->hwidx2 = -1; 83346e1e307SNavdeep Parhar for (j = 0; j < SGE_FLBUF_SIZES; j++) { 83446e1e307SNavdeep Parhar int hwsize = sp->sge_fl_buffer_size[j]; 83538035ed6SNavdeep Parhar 83646e1e307SNavdeep Parhar if (!hwsz_ok(sc, hwsize)) 837e3207e19SNavdeep Parhar continue; 838e3207e19SNavdeep Parhar 83946e1e307SNavdeep Parhar /* hwidx for size1 */ 84046e1e307SNavdeep Parhar if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) 84146e1e307SNavdeep Parhar rxb->hwidx1 = j; 84238035ed6SNavdeep Parhar 84346e1e307SNavdeep Parhar /* hwidx for size2 (buffer packing) */ 84446e1e307SNavdeep Parhar if (rxb->size1 - CL_METADATA_SIZE < hwsize) 8451458bff9SNavdeep Parhar continue; 84646e1e307SNavdeep Parhar n = rxb->size1 - hwsize - CL_METADATA_SIZE; 8471458bff9SNavdeep Parhar if (n == 0) { 84846e1e307SNavdeep Parhar rxb->hwidx2 = j; 84946e1e307SNavdeep Parhar rxb->size2 = hwsize; 85046e1e307SNavdeep Parhar break; /* stop looking */ 851733b9277SNavdeep Parhar } 85246e1e307SNavdeep Parhar if (rxb->hwidx2 != -1) { 85346e1e307SNavdeep Parhar if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - 85446e1e307SNavdeep Parhar hwsize - CL_METADATA_SIZE) { 85546e1e307SNavdeep Parhar rxb->hwidx2 = j; 85646e1e307SNavdeep Parhar rxb->size2 = hwsize; 85746e1e307SNavdeep Parhar } 85846e1e307SNavdeep Parhar } else if (n <= 2 * CL_METADATA_SIZE) { 85946e1e307SNavdeep Parhar rxb->hwidx2 = j; 86046e1e307SNavdeep Parhar rxb->size2 = hwsize; 86138035ed6SNavdeep Parhar } 86238035ed6SNavdeep Parhar } 86346e1e307SNavdeep Parhar if (rxb->hwidx2 != -1) 86446e1e307SNavdeep Parhar sc->flags |= BUF_PACKING_OK; 86546e1e307SNavdeep Parhar if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) 86646e1e307SNavdeep Parhar s->safe_zidx = i; 867e3207e19SNavdeep Parhar } 868733b9277SNavdeep Parhar 8696af45170SJohn Baldwin if (sc->flags & IS_VF) 8706af45170SJohn Baldwin return (0); 8716af45170SJohn Baldwin 872d14b0ac1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 873d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 874d14b0ac1SNavdeep Parhar if (r != v) { 875d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 876d14b0ac1SNavdeep Parhar rc = EINVAL; 877d14b0ac1SNavdeep Parhar } 878733b9277SNavdeep Parhar 879d14b0ac1SNavdeep Parhar m = v = F_TDDPTAGTCB; 880d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_ULP_RX_CTL); 881d14b0ac1SNavdeep Parhar if ((r & m) != v) { 882d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 883d14b0ac1SNavdeep Parhar rc = EINVAL; 884d14b0ac1SNavdeep Parhar } 885d14b0ac1SNavdeep Parhar 886d14b0ac1SNavdeep Parhar m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 887d14b0ac1SNavdeep Parhar F_RESETDDPOFFSET; 888d14b0ac1SNavdeep Parhar v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 889d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_TP_PARA_REG5); 890d14b0ac1SNavdeep Parhar if ((r & m) != v) { 891d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 892d14b0ac1SNavdeep Parhar rc = EINVAL; 893d14b0ac1SNavdeep Parhar } 894d14b0ac1SNavdeep Parhar 895c45b1868SNavdeep Parhar t4_init_tp_params(sc, 1); 896d14b0ac1SNavdeep Parhar 897d14b0ac1SNavdeep Parhar t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 898d14b0ac1SNavdeep Parhar t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 899d14b0ac1SNavdeep Parhar 900733b9277SNavdeep Parhar return (rc); 90154e4ee71SNavdeep Parhar } 90254e4ee71SNavdeep Parhar 90354e4ee71SNavdeep Parhar int 90454e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc) 90554e4ee71SNavdeep Parhar { 90654e4ee71SNavdeep Parhar int rc; 90754e4ee71SNavdeep Parhar 90854e4ee71SNavdeep Parhar rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 90954e4ee71SNavdeep Parhar BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 91054e4ee71SNavdeep Parhar BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 91154e4ee71SNavdeep Parhar NULL, &sc->dmat); 91254e4ee71SNavdeep Parhar if (rc != 0) { 91354e4ee71SNavdeep Parhar device_printf(sc->dev, 91454e4ee71SNavdeep Parhar "failed to create main DMA tag: %d\n", rc); 91554e4ee71SNavdeep Parhar } 91654e4ee71SNavdeep Parhar 91754e4ee71SNavdeep Parhar return (rc); 91854e4ee71SNavdeep Parhar } 91954e4ee71SNavdeep Parhar 9206e22f9f3SNavdeep Parhar void 9216e22f9f3SNavdeep Parhar t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 9226e22f9f3SNavdeep Parhar struct sysctl_oid_list *children) 9236e22f9f3SNavdeep Parhar { 92490e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 9256e22f9f3SNavdeep Parhar 92638035ed6SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 9277029da5cSPawel Biernacki CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, 9287029da5cSPawel Biernacki sysctl_bufsizes, "A", "freelist buffer sizes"); 92938035ed6SNavdeep Parhar 9306e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 93190e7434aSNavdeep Parhar NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 9326e22f9f3SNavdeep Parhar 9336e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 93490e7434aSNavdeep Parhar NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 9356e22f9f3SNavdeep Parhar 9366e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 93790e7434aSNavdeep Parhar NULL, sp->spg_len, "status page size (bytes)"); 9386e22f9f3SNavdeep Parhar 9396e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 9406e22f9f3SNavdeep Parhar NULL, cong_drop, "congestion drop setting"); 9411458bff9SNavdeep Parhar 9421458bff9SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 94390e7434aSNavdeep Parhar NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 9446e22f9f3SNavdeep Parhar } 9456e22f9f3SNavdeep Parhar 94654e4ee71SNavdeep Parhar int 94754e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc) 94854e4ee71SNavdeep Parhar { 94954e4ee71SNavdeep Parhar if (sc->dmat) 95054e4ee71SNavdeep Parhar bus_dma_tag_destroy(sc->dmat); 95154e4ee71SNavdeep Parhar 95254e4ee71SNavdeep Parhar return (0); 95354e4ee71SNavdeep Parhar } 95454e4ee71SNavdeep Parhar 95554e4ee71SNavdeep Parhar /* 95637310a98SNavdeep Parhar * Allocate and initialize the firmware event queue, control queues, and special 95737310a98SNavdeep Parhar * purpose rx queues owned by the adapter. 95854e4ee71SNavdeep Parhar * 95954e4ee71SNavdeep Parhar * Returns errno on failure. Resources allocated up to that point may still be 96054e4ee71SNavdeep Parhar * allocated. Caller is responsible for cleanup in case this function fails. 96154e4ee71SNavdeep Parhar */ 96254e4ee71SNavdeep Parhar int 963f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc) 96454e4ee71SNavdeep Parhar { 96537310a98SNavdeep Parhar struct sysctl_oid *oid; 96637310a98SNavdeep Parhar struct sysctl_oid_list *children; 96737310a98SNavdeep Parhar int rc, i; 96854e4ee71SNavdeep Parhar 96954e4ee71SNavdeep Parhar ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 97054e4ee71SNavdeep Parhar 971733b9277SNavdeep Parhar sysctl_ctx_init(&sc->ctx); 972733b9277SNavdeep Parhar sc->flags |= ADAP_SYSCTL_CTX; 97354e4ee71SNavdeep Parhar 97456599263SNavdeep Parhar /* 97556599263SNavdeep Parhar * Firmware event queue 97656599263SNavdeep Parhar */ 977733b9277SNavdeep Parhar rc = alloc_fwq(sc); 978aa95b653SNavdeep Parhar if (rc != 0) 979f7dfe243SNavdeep Parhar return (rc); 980f7dfe243SNavdeep Parhar 981f7dfe243SNavdeep Parhar /* 98237310a98SNavdeep Parhar * That's all for the VF driver. 983f7dfe243SNavdeep Parhar */ 98437310a98SNavdeep Parhar if (sc->flags & IS_VF) 98537310a98SNavdeep Parhar return (rc); 98637310a98SNavdeep Parhar 98737310a98SNavdeep Parhar oid = device_get_sysctl_tree(sc->dev); 98837310a98SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 98937310a98SNavdeep Parhar 99037310a98SNavdeep Parhar /* 99137310a98SNavdeep Parhar * XXX: General purpose rx queues, one per port. 99237310a98SNavdeep Parhar */ 99337310a98SNavdeep Parhar 99437310a98SNavdeep Parhar /* 99537310a98SNavdeep Parhar * Control queues, one per port. 99637310a98SNavdeep Parhar */ 99737310a98SNavdeep Parhar oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", 9987029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); 99937310a98SNavdeep Parhar for_each_port(sc, i) { 100037310a98SNavdeep Parhar struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; 100137310a98SNavdeep Parhar 100237310a98SNavdeep Parhar rc = alloc_ctrlq(sc, ctrlq, i, oid); 100337310a98SNavdeep Parhar if (rc != 0) 100437310a98SNavdeep Parhar return (rc); 100537310a98SNavdeep Parhar } 100654e4ee71SNavdeep Parhar 100754e4ee71SNavdeep Parhar return (rc); 100854e4ee71SNavdeep Parhar } 100954e4ee71SNavdeep Parhar 101054e4ee71SNavdeep Parhar /* 101154e4ee71SNavdeep Parhar * Idempotent 101254e4ee71SNavdeep Parhar */ 101354e4ee71SNavdeep Parhar int 1014f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc) 101554e4ee71SNavdeep Parhar { 101637310a98SNavdeep Parhar int i; 101754e4ee71SNavdeep Parhar 101854e4ee71SNavdeep Parhar ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 101954e4ee71SNavdeep Parhar 1020733b9277SNavdeep Parhar /* Do this before freeing the queue */ 1021733b9277SNavdeep Parhar if (sc->flags & ADAP_SYSCTL_CTX) { 1022f7dfe243SNavdeep Parhar sysctl_ctx_free(&sc->ctx); 1023733b9277SNavdeep Parhar sc->flags &= ~ADAP_SYSCTL_CTX; 1024f7dfe243SNavdeep Parhar } 1025f7dfe243SNavdeep Parhar 1026b8bfcb71SNavdeep Parhar if (!(sc->flags & IS_VF)) { 102737310a98SNavdeep Parhar for_each_port(sc, i) 102837310a98SNavdeep Parhar free_wrq(sc, &sc->sge.ctrlq[i]); 1029b8bfcb71SNavdeep Parhar } 1030733b9277SNavdeep Parhar free_fwq(sc); 103154e4ee71SNavdeep Parhar 103254e4ee71SNavdeep Parhar return (0); 103354e4ee71SNavdeep Parhar } 103454e4ee71SNavdeep Parhar 103538035ed6SNavdeep Parhar /* Maximum payload that can be delivered with a single iq descriptor */ 10368340ece5SNavdeep Parhar static inline int 10378bf30903SNavdeep Parhar mtu_to_max_payload(struct adapter *sc, int mtu) 10388340ece5SNavdeep Parhar { 10398340ece5SNavdeep Parhar 104038035ed6SNavdeep Parhar /* large enough even when hw VLAN extraction is disabled */ 10418bf30903SNavdeep Parhar return (sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 10428bf30903SNavdeep Parhar ETHER_VLAN_ENCAP_LEN + mtu); 104338035ed6SNavdeep Parhar } 10446eb3180fSNavdeep Parhar 1045733b9277SNavdeep Parhar int 1046fe2ebb76SJohn Baldwin t4_setup_vi_queues(struct vi_info *vi) 1047733b9277SNavdeep Parhar { 1048f549e352SNavdeep Parhar int rc = 0, i, intr_idx, iqidx; 1049733b9277SNavdeep Parhar struct sge_rxq *rxq; 1050733b9277SNavdeep Parhar struct sge_txq *txq; 105109fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1052733b9277SNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 1053eff62dbaSNavdeep Parhar #endif 1054eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1055733b9277SNavdeep Parhar struct sge_wrq *ofld_txq; 1056298d969cSNavdeep Parhar #endif 1057298d969cSNavdeep Parhar #ifdef DEV_NETMAP 105862291463SNavdeep Parhar int saved_idx; 1059298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 1060298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 1061733b9277SNavdeep Parhar #endif 1062733b9277SNavdeep Parhar char name[16]; 1063fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 1064733b9277SNavdeep Parhar struct adapter *sc = pi->adapter; 1065fe2ebb76SJohn Baldwin struct ifnet *ifp = vi->ifp; 1066fe2ebb76SJohn Baldwin struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1067733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 1068e3207e19SNavdeep Parhar int maxp, mtu = ifp->if_mtu; 1069733b9277SNavdeep Parhar 1070733b9277SNavdeep Parhar /* Interrupt vector to start from (when using multiple vectors) */ 1071f549e352SNavdeep Parhar intr_idx = vi->first_intr; 1072fe2ebb76SJohn Baldwin 1073fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP 107462291463SNavdeep Parhar saved_idx = intr_idx; 107562291463SNavdeep Parhar if (ifp->if_capabilities & IFCAP_NETMAP) { 107662291463SNavdeep Parhar 107762291463SNavdeep Parhar /* netmap is supported with direct interrupts only. */ 1078f549e352SNavdeep Parhar MPASS(!forwarding_intr_to_fwq(sc)); 107962291463SNavdeep Parhar 1080fe2ebb76SJohn Baldwin /* 1081fe2ebb76SJohn Baldwin * We don't have buffers to back the netmap rx queues 1082fe2ebb76SJohn Baldwin * right now so we create the queues in a way that 1083fe2ebb76SJohn Baldwin * doesn't set off any congestion signal in the chip. 1084fe2ebb76SJohn Baldwin */ 108562291463SNavdeep Parhar oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 10867029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1087fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 1088fe2ebb76SJohn Baldwin rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1089fe2ebb76SJohn Baldwin if (rc != 0) 1090fe2ebb76SJohn Baldwin goto done; 1091fe2ebb76SJohn Baldwin intr_idx++; 1092fe2ebb76SJohn Baldwin } 1093fe2ebb76SJohn Baldwin 109462291463SNavdeep Parhar oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 10957029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1096fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 1097f549e352SNavdeep Parhar iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1098f549e352SNavdeep Parhar rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1099fe2ebb76SJohn Baldwin if (rc != 0) 1100fe2ebb76SJohn Baldwin goto done; 1101fe2ebb76SJohn Baldwin } 1102fe2ebb76SJohn Baldwin } 110362291463SNavdeep Parhar 110462291463SNavdeep Parhar /* Normal rx queues and netmap rx queues share the same interrupts. */ 110562291463SNavdeep Parhar intr_idx = saved_idx; 1106fe2ebb76SJohn Baldwin #endif 1107733b9277SNavdeep Parhar 1108733b9277SNavdeep Parhar /* 1109f549e352SNavdeep Parhar * Allocate rx queues first because a default iqid is required when 1110f549e352SNavdeep Parhar * creating a tx queue. 1111733b9277SNavdeep Parhar */ 11128bf30903SNavdeep Parhar maxp = mtu_to_max_payload(sc, mtu); 1113fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 11147029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1115fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 111654e4ee71SNavdeep Parhar 1117fe2ebb76SJohn Baldwin init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 111854e4ee71SNavdeep Parhar 111954e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%s rxq%d-fl", 1120fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1121fe2ebb76SJohn Baldwin init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 112254e4ee71SNavdeep Parhar 1123f549e352SNavdeep Parhar rc = alloc_rxq(vi, rxq, 1124f549e352SNavdeep Parhar forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 112554e4ee71SNavdeep Parhar if (rc != 0) 112654e4ee71SNavdeep Parhar goto done; 1127733b9277SNavdeep Parhar intr_idx++; 1128733b9277SNavdeep Parhar } 112962291463SNavdeep Parhar #ifdef DEV_NETMAP 113062291463SNavdeep Parhar if (ifp->if_capabilities & IFCAP_NETMAP) 113162291463SNavdeep Parhar intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 113262291463SNavdeep Parhar #endif 113309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1134fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 11357029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues for offloaded TCP connections"); 1136fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 1137733b9277SNavdeep Parhar 113808cd1f11SNavdeep Parhar init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1139fe2ebb76SJohn Baldwin vi->qsize_rxq); 1140733b9277SNavdeep Parhar 1141733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1142fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1143fe2ebb76SJohn Baldwin init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1144733b9277SNavdeep Parhar 1145f549e352SNavdeep Parhar rc = alloc_ofld_rxq(vi, ofld_rxq, 1146f549e352SNavdeep Parhar forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1147733b9277SNavdeep Parhar if (rc != 0) 1148733b9277SNavdeep Parhar goto done; 1149733b9277SNavdeep Parhar intr_idx++; 1150733b9277SNavdeep Parhar } 1151733b9277SNavdeep Parhar #endif 1152733b9277SNavdeep Parhar 1153733b9277SNavdeep Parhar /* 1154f549e352SNavdeep Parhar * Now the tx queues. 1155733b9277SNavdeep Parhar */ 11567029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", 11577029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1158fe2ebb76SJohn Baldwin for_each_txq(vi, i, txq) { 1159f549e352SNavdeep Parhar iqidx = vi->first_rxq + (i % vi->nrxq); 116054e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%s txq%d", 1161fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1162f549e352SNavdeep Parhar init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1163f549e352SNavdeep Parhar sc->sge.rxq[iqidx].iq.cntxt_id, name); 116454e4ee71SNavdeep Parhar 1165fe2ebb76SJohn Baldwin rc = alloc_txq(vi, txq, i, oid); 116654e4ee71SNavdeep Parhar if (rc != 0) 116754e4ee71SNavdeep Parhar goto done; 116854e4ee71SNavdeep Parhar } 1169eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1170fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 11717029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues for TOE/ETHOFLD"); 1172fe2ebb76SJohn Baldwin for_each_ofld_txq(vi, i, ofld_txq) { 1173298d969cSNavdeep Parhar struct sysctl_oid *oid2; 1174733b9277SNavdeep Parhar 1175733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%s ofld_txq%d", 1176fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1177c3a88be4SNavdeep Parhar if (vi->nofldrxq > 0) { 1178eff62dbaSNavdeep Parhar iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1179c3a88be4SNavdeep Parhar init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1180c3a88be4SNavdeep Parhar pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, 1181c3a88be4SNavdeep Parhar name); 1182c3a88be4SNavdeep Parhar } else { 1183eff62dbaSNavdeep Parhar iqidx = vi->first_rxq + (i % vi->nrxq); 1184c3a88be4SNavdeep Parhar init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1185c3a88be4SNavdeep Parhar pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); 1186c3a88be4SNavdeep Parhar } 1187733b9277SNavdeep Parhar 1188733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%d", i); 1189fe2ebb76SJohn Baldwin oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 11907029da5cSPawel Biernacki name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue"); 1191733b9277SNavdeep Parhar 1192fe2ebb76SJohn Baldwin rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1193298d969cSNavdeep Parhar if (rc != 0) 1194298d969cSNavdeep Parhar goto done; 1195298d969cSNavdeep Parhar } 1196298d969cSNavdeep Parhar #endif 119754e4ee71SNavdeep Parhar done: 119854e4ee71SNavdeep Parhar if (rc) 1199fe2ebb76SJohn Baldwin t4_teardown_vi_queues(vi); 120054e4ee71SNavdeep Parhar 120154e4ee71SNavdeep Parhar return (rc); 120254e4ee71SNavdeep Parhar } 120354e4ee71SNavdeep Parhar 120454e4ee71SNavdeep Parhar /* 120554e4ee71SNavdeep Parhar * Idempotent 120654e4ee71SNavdeep Parhar */ 120754e4ee71SNavdeep Parhar int 1208fe2ebb76SJohn Baldwin t4_teardown_vi_queues(struct vi_info *vi) 120954e4ee71SNavdeep Parhar { 121054e4ee71SNavdeep Parhar int i; 121154e4ee71SNavdeep Parhar struct sge_rxq *rxq; 121254e4ee71SNavdeep Parhar struct sge_txq *txq; 121337310a98SNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 121437310a98SNavdeep Parhar struct port_info *pi = vi->pi; 121537310a98SNavdeep Parhar struct adapter *sc = pi->adapter; 121637310a98SNavdeep Parhar struct sge_wrq *ofld_txq; 121737310a98SNavdeep Parhar #endif 121809fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1219733b9277SNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 1220eff62dbaSNavdeep Parhar #endif 1221298d969cSNavdeep Parhar #ifdef DEV_NETMAP 1222298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 1223298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 1224298d969cSNavdeep Parhar #endif 122554e4ee71SNavdeep Parhar 122654e4ee71SNavdeep Parhar /* Do this before freeing the queues */ 1227fe2ebb76SJohn Baldwin if (vi->flags & VI_SYSCTL_CTX) { 1228fe2ebb76SJohn Baldwin sysctl_ctx_free(&vi->ctx); 1229fe2ebb76SJohn Baldwin vi->flags &= ~VI_SYSCTL_CTX; 123054e4ee71SNavdeep Parhar } 123154e4ee71SNavdeep Parhar 1232fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP 123362291463SNavdeep Parhar if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1234fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 1235fe2ebb76SJohn Baldwin free_nm_txq(vi, nm_txq); 1236fe2ebb76SJohn Baldwin } 1237fe2ebb76SJohn Baldwin 1238fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 1239fe2ebb76SJohn Baldwin free_nm_rxq(vi, nm_rxq); 1240fe2ebb76SJohn Baldwin } 1241fe2ebb76SJohn Baldwin } 1242fe2ebb76SJohn Baldwin #endif 1243fe2ebb76SJohn Baldwin 1244733b9277SNavdeep Parhar /* 1245733b9277SNavdeep Parhar * Take down all the tx queues first, as they reference the rx queues 1246733b9277SNavdeep Parhar * (for egress updates, etc.). 1247733b9277SNavdeep Parhar */ 1248733b9277SNavdeep Parhar 1249fe2ebb76SJohn Baldwin for_each_txq(vi, i, txq) { 1250fe2ebb76SJohn Baldwin free_txq(vi, txq); 125154e4ee71SNavdeep Parhar } 1252eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1253fe2ebb76SJohn Baldwin for_each_ofld_txq(vi, i, ofld_txq) { 1254733b9277SNavdeep Parhar free_wrq(sc, ofld_txq); 1255733b9277SNavdeep Parhar } 1256733b9277SNavdeep Parhar #endif 1257733b9277SNavdeep Parhar 1258733b9277SNavdeep Parhar /* 1259f549e352SNavdeep Parhar * Then take down the rx queues. 1260733b9277SNavdeep Parhar */ 1261733b9277SNavdeep Parhar 1262fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 1263fe2ebb76SJohn Baldwin free_rxq(vi, rxq); 126454e4ee71SNavdeep Parhar } 126509fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1266fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 1267fe2ebb76SJohn Baldwin free_ofld_rxq(vi, ofld_rxq); 1268733b9277SNavdeep Parhar } 1269733b9277SNavdeep Parhar #endif 1270733b9277SNavdeep Parhar 127154e4ee71SNavdeep Parhar return (0); 127254e4ee71SNavdeep Parhar } 127354e4ee71SNavdeep Parhar 1274733b9277SNavdeep Parhar /* 12753098bcfcSNavdeep Parhar * Interrupt handler when the driver is using only 1 interrupt. This is a very 12763098bcfcSNavdeep Parhar * unusual scenario. 12773098bcfcSNavdeep Parhar * 12783098bcfcSNavdeep Parhar * a) Deals with errors, if any. 12793098bcfcSNavdeep Parhar * b) Services firmware event queue, which is taking interrupts for all other 12803098bcfcSNavdeep Parhar * queues. 1281733b9277SNavdeep Parhar */ 128254e4ee71SNavdeep Parhar void 128354e4ee71SNavdeep Parhar t4_intr_all(void *arg) 128454e4ee71SNavdeep Parhar { 128554e4ee71SNavdeep Parhar struct adapter *sc = arg; 1286733b9277SNavdeep Parhar struct sge_iq *fwq = &sc->sge.fwq; 128754e4ee71SNavdeep Parhar 12883098bcfcSNavdeep Parhar MPASS(sc->intr_count == 1); 12893098bcfcSNavdeep Parhar 12901dca7005SNavdeep Parhar if (sc->intr_type == INTR_INTX) 12911dca7005SNavdeep Parhar t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 12921dca7005SNavdeep Parhar 129354e4ee71SNavdeep Parhar t4_intr_err(arg); 12943098bcfcSNavdeep Parhar t4_intr_evt(fwq); 129554e4ee71SNavdeep Parhar } 129654e4ee71SNavdeep Parhar 12973098bcfcSNavdeep Parhar /* 12983098bcfcSNavdeep Parhar * Interrupt handler for errors (installed directly when multiple interrupts are 12993098bcfcSNavdeep Parhar * being used, or called by t4_intr_all). 13003098bcfcSNavdeep Parhar */ 130154e4ee71SNavdeep Parhar void 130254e4ee71SNavdeep Parhar t4_intr_err(void *arg) 130354e4ee71SNavdeep Parhar { 130454e4ee71SNavdeep Parhar struct adapter *sc = arg; 1305dd3b96ecSNavdeep Parhar uint32_t v; 1306cb7c3f12SNavdeep Parhar const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 130754e4ee71SNavdeep Parhar 1308cb7c3f12SNavdeep Parhar if (sc->flags & ADAP_ERR) 1309cb7c3f12SNavdeep Parhar return; 1310cb7c3f12SNavdeep Parhar 1311dd3b96ecSNavdeep Parhar v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); 1312dd3b96ecSNavdeep Parhar if (v & F_PFSW) { 1313dd3b96ecSNavdeep Parhar sc->swintr++; 1314dd3b96ecSNavdeep Parhar t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); 1315dd3b96ecSNavdeep Parhar } 1316dd3b96ecSNavdeep Parhar 1317cb7c3f12SNavdeep Parhar t4_slow_intr_handler(sc, verbose); 131854e4ee71SNavdeep Parhar } 131954e4ee71SNavdeep Parhar 13203098bcfcSNavdeep Parhar /* 13213098bcfcSNavdeep Parhar * Interrupt handler for iq-only queues. The firmware event queue is the only 13223098bcfcSNavdeep Parhar * such queue right now. 13233098bcfcSNavdeep Parhar */ 132454e4ee71SNavdeep Parhar void 132554e4ee71SNavdeep Parhar t4_intr_evt(void *arg) 132654e4ee71SNavdeep Parhar { 132754e4ee71SNavdeep Parhar struct sge_iq *iq = arg; 13282be67d29SNavdeep Parhar 1329733b9277SNavdeep Parhar if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1330733b9277SNavdeep Parhar service_iq(iq, 0); 1331da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 13322be67d29SNavdeep Parhar } 13332be67d29SNavdeep Parhar } 13342be67d29SNavdeep Parhar 13353098bcfcSNavdeep Parhar /* 13363098bcfcSNavdeep Parhar * Interrupt handler for iq+fl queues. 13373098bcfcSNavdeep Parhar */ 1338733b9277SNavdeep Parhar void 1339733b9277SNavdeep Parhar t4_intr(void *arg) 13402be67d29SNavdeep Parhar { 13412be67d29SNavdeep Parhar struct sge_iq *iq = arg; 1342733b9277SNavdeep Parhar 1343733b9277SNavdeep Parhar if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 13443098bcfcSNavdeep Parhar service_iq_fl(iq, 0); 1345da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1346733b9277SNavdeep Parhar } 1347733b9277SNavdeep Parhar } 1348733b9277SNavdeep Parhar 13493098bcfcSNavdeep Parhar #ifdef DEV_NETMAP 13503098bcfcSNavdeep Parhar /* 13513098bcfcSNavdeep Parhar * Interrupt handler for netmap rx queues. 13523098bcfcSNavdeep Parhar */ 13533098bcfcSNavdeep Parhar void 13543098bcfcSNavdeep Parhar t4_nm_intr(void *arg) 13553098bcfcSNavdeep Parhar { 13563098bcfcSNavdeep Parhar struct sge_nm_rxq *nm_rxq = arg; 13573098bcfcSNavdeep Parhar 13583098bcfcSNavdeep Parhar if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 13593098bcfcSNavdeep Parhar service_nm_rxq(nm_rxq); 1360da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 13613098bcfcSNavdeep Parhar } 13623098bcfcSNavdeep Parhar } 13633098bcfcSNavdeep Parhar 13643098bcfcSNavdeep Parhar /* 13653098bcfcSNavdeep Parhar * Interrupt handler for vectors shared between NIC and netmap rx queues. 13663098bcfcSNavdeep Parhar */ 136762291463SNavdeep Parhar void 136862291463SNavdeep Parhar t4_vi_intr(void *arg) 136962291463SNavdeep Parhar { 137062291463SNavdeep Parhar struct irq *irq = arg; 137162291463SNavdeep Parhar 13723098bcfcSNavdeep Parhar MPASS(irq->nm_rxq != NULL); 137362291463SNavdeep Parhar t4_nm_intr(irq->nm_rxq); 13743098bcfcSNavdeep Parhar 13753098bcfcSNavdeep Parhar MPASS(irq->rxq != NULL); 137662291463SNavdeep Parhar t4_intr(irq->rxq); 137762291463SNavdeep Parhar } 13783098bcfcSNavdeep Parhar #endif 137946f48ee5SNavdeep Parhar 1380733b9277SNavdeep Parhar /* 13813098bcfcSNavdeep Parhar * Deals with interrupts on an iq-only (no freelist) queue. 1382733b9277SNavdeep Parhar */ 1383733b9277SNavdeep Parhar static int 1384733b9277SNavdeep Parhar service_iq(struct sge_iq *iq, int budget) 1385733b9277SNavdeep Parhar { 1386733b9277SNavdeep Parhar struct sge_iq *q; 138754e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 1388b2daa9a9SNavdeep Parhar struct iq_desc *d = &iq->desc[iq->cidx]; 13894d6db4e0SNavdeep Parhar int ndescs = 0, limit; 13903098bcfcSNavdeep Parhar int rsp_type; 1391733b9277SNavdeep Parhar uint32_t lq; 1392733b9277SNavdeep Parhar STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1393733b9277SNavdeep Parhar 1394733b9277SNavdeep Parhar KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 13953098bcfcSNavdeep Parhar KASSERT((iq->flags & IQ_HAS_FL) == 0, 13963098bcfcSNavdeep Parhar ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 13973098bcfcSNavdeep Parhar iq->flags)); 13983098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 13993098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1400733b9277SNavdeep Parhar 14014d6db4e0SNavdeep Parhar limit = budget ? budget : iq->qsize / 16; 14024d6db4e0SNavdeep Parhar 1403733b9277SNavdeep Parhar /* 1404733b9277SNavdeep Parhar * We always come back and check the descriptor ring for new indirect 1405733b9277SNavdeep Parhar * interrupts and other responses after running a single handler. 1406733b9277SNavdeep Parhar */ 1407733b9277SNavdeep Parhar for (;;) { 1408b2daa9a9SNavdeep Parhar while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 140954e4ee71SNavdeep Parhar 141054e4ee71SNavdeep Parhar rmb(); 141154e4ee71SNavdeep Parhar 1412b2daa9a9SNavdeep Parhar rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1413b2daa9a9SNavdeep Parhar lq = be32toh(d->rsp.pldbuflen_qid); 141454e4ee71SNavdeep Parhar 1415733b9277SNavdeep Parhar switch (rsp_type) { 1416733b9277SNavdeep Parhar case X_RSPD_TYPE_FLBUF: 14173098bcfcSNavdeep Parhar panic("%s: data for an iq (%p) with no freelist", 14183098bcfcSNavdeep Parhar __func__, iq); 141954e4ee71SNavdeep Parhar 14203098bcfcSNavdeep Parhar /* NOTREACHED */ 1421733b9277SNavdeep Parhar 1422733b9277SNavdeep Parhar case X_RSPD_TYPE_CPL: 1423b2daa9a9SNavdeep Parhar KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1424733b9277SNavdeep Parhar ("%s: bad opcode %02x.", __func__, 1425b2daa9a9SNavdeep Parhar d->rss.opcode)); 14263098bcfcSNavdeep Parhar t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1427733b9277SNavdeep Parhar break; 1428733b9277SNavdeep Parhar 1429733b9277SNavdeep Parhar case X_RSPD_TYPE_INTR: 143098005176SNavdeep Parhar /* 143198005176SNavdeep Parhar * There are 1K interrupt-capable queues (qids 0 143298005176SNavdeep Parhar * through 1023). A response type indicating a 143398005176SNavdeep Parhar * forwarded interrupt with a qid >= 1K is an 143498005176SNavdeep Parhar * iWARP async notification. 143598005176SNavdeep Parhar */ 14363098bcfcSNavdeep Parhar if (__predict_true(lq >= 1024)) { 1437671bf2b8SNavdeep Parhar t4_an_handler(iq, &d->rsp); 143898005176SNavdeep Parhar break; 143998005176SNavdeep Parhar } 144098005176SNavdeep Parhar 1441ec55567cSJohn Baldwin q = sc->sge.iqmap[lq - sc->sge.iq_start - 1442ec55567cSJohn Baldwin sc->sge.iq_base]; 1443733b9277SNavdeep Parhar if (atomic_cmpset_int(&q->state, IQS_IDLE, 1444733b9277SNavdeep Parhar IQS_BUSY)) { 14453098bcfcSNavdeep Parhar if (service_iq_fl(q, q->qsize / 16) == 0) { 1446da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&q->state, 1447733b9277SNavdeep Parhar IQS_BUSY, IQS_IDLE); 1448733b9277SNavdeep Parhar } else { 1449733b9277SNavdeep Parhar STAILQ_INSERT_TAIL(&iql, q, 1450733b9277SNavdeep Parhar link); 1451733b9277SNavdeep Parhar } 1452733b9277SNavdeep Parhar } 1453733b9277SNavdeep Parhar break; 1454733b9277SNavdeep Parhar 1455733b9277SNavdeep Parhar default: 145698005176SNavdeep Parhar KASSERT(0, 145798005176SNavdeep Parhar ("%s: illegal response type %d on iq %p", 145898005176SNavdeep Parhar __func__, rsp_type, iq)); 145998005176SNavdeep Parhar log(LOG_ERR, 146098005176SNavdeep Parhar "%s: illegal response type %d on iq %p", 146198005176SNavdeep Parhar device_get_nameunit(sc->dev), rsp_type, iq); 146209fe6320SNavdeep Parhar break; 146354e4ee71SNavdeep Parhar } 146456599263SNavdeep Parhar 1465b2daa9a9SNavdeep Parhar d++; 1466b2daa9a9SNavdeep Parhar if (__predict_false(++iq->cidx == iq->sidx)) { 1467b2daa9a9SNavdeep Parhar iq->cidx = 0; 1468b2daa9a9SNavdeep Parhar iq->gen ^= F_RSPD_GEN; 1469b2daa9a9SNavdeep Parhar d = &iq->desc[0]; 1470b2daa9a9SNavdeep Parhar } 1471b2daa9a9SNavdeep Parhar if (__predict_false(++ndescs == limit)) { 1472315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, 1473733b9277SNavdeep Parhar V_CIDXINC(ndescs) | 1474733b9277SNavdeep Parhar V_INGRESSQID(iq->cntxt_id) | 1475733b9277SNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1476733b9277SNavdeep Parhar ndescs = 0; 1477733b9277SNavdeep Parhar 14783098bcfcSNavdeep Parhar if (budget) { 14793098bcfcSNavdeep Parhar return (EINPROGRESS); 14803098bcfcSNavdeep Parhar } 14813098bcfcSNavdeep Parhar } 14823098bcfcSNavdeep Parhar } 14833098bcfcSNavdeep Parhar 14843098bcfcSNavdeep Parhar if (STAILQ_EMPTY(&iql)) 14853098bcfcSNavdeep Parhar break; 14863098bcfcSNavdeep Parhar 14873098bcfcSNavdeep Parhar /* 14883098bcfcSNavdeep Parhar * Process the head only, and send it to the back of the list if 14893098bcfcSNavdeep Parhar * it's still not done. 14903098bcfcSNavdeep Parhar */ 14913098bcfcSNavdeep Parhar q = STAILQ_FIRST(&iql); 14923098bcfcSNavdeep Parhar STAILQ_REMOVE_HEAD(&iql, link); 14933098bcfcSNavdeep Parhar if (service_iq_fl(q, q->qsize / 8) == 0) 1494da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 14953098bcfcSNavdeep Parhar else 14963098bcfcSNavdeep Parhar STAILQ_INSERT_TAIL(&iql, q, link); 14973098bcfcSNavdeep Parhar } 14983098bcfcSNavdeep Parhar 14993098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 15003098bcfcSNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 15013098bcfcSNavdeep Parhar 15023098bcfcSNavdeep Parhar return (0); 15033098bcfcSNavdeep Parhar } 15043098bcfcSNavdeep Parhar 15053098bcfcSNavdeep Parhar static inline int 15063098bcfcSNavdeep Parhar sort_before_lro(struct lro_ctrl *lro) 15073098bcfcSNavdeep Parhar { 15083098bcfcSNavdeep Parhar 15093098bcfcSNavdeep Parhar return (lro->lro_mbuf_max != 0); 15103098bcfcSNavdeep Parhar } 15113098bcfcSNavdeep Parhar 1512e7e08444SNavdeep Parhar static inline uint64_t 1513e7e08444SNavdeep Parhar last_flit_to_ns(struct adapter *sc, uint64_t lf) 1514e7e08444SNavdeep Parhar { 1515e7e08444SNavdeep Parhar uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */ 1516e7e08444SNavdeep Parhar 1517e7e08444SNavdeep Parhar if (n > UINT64_MAX / 1000000) 1518e7e08444SNavdeep Parhar return (n / sc->params.vpd.cclk * 1000000); 1519e7e08444SNavdeep Parhar else 1520e7e08444SNavdeep Parhar return (n * 1000000 / sc->params.vpd.cclk); 1521e7e08444SNavdeep Parhar } 1522e7e08444SNavdeep Parhar 152346e1e307SNavdeep Parhar static inline void 152446e1e307SNavdeep Parhar move_to_next_rxbuf(struct sge_fl *fl) 152546e1e307SNavdeep Parhar { 152646e1e307SNavdeep Parhar 152746e1e307SNavdeep Parhar fl->rx_offset = 0; 152846e1e307SNavdeep Parhar if (__predict_false((++fl->cidx & 7) == 0)) { 152946e1e307SNavdeep Parhar uint16_t cidx = fl->cidx >> 3; 153046e1e307SNavdeep Parhar 153146e1e307SNavdeep Parhar if (__predict_false(cidx == fl->sidx)) 153246e1e307SNavdeep Parhar fl->cidx = cidx = 0; 153346e1e307SNavdeep Parhar fl->hw_cidx = cidx; 153446e1e307SNavdeep Parhar } 153546e1e307SNavdeep Parhar } 153646e1e307SNavdeep Parhar 15373098bcfcSNavdeep Parhar /* 15383098bcfcSNavdeep Parhar * Deals with interrupts on an iq+fl queue. 15393098bcfcSNavdeep Parhar */ 15403098bcfcSNavdeep Parhar static int 15413098bcfcSNavdeep Parhar service_iq_fl(struct sge_iq *iq, int budget) 15423098bcfcSNavdeep Parhar { 15433098bcfcSNavdeep Parhar struct sge_rxq *rxq = iq_to_rxq(iq); 15443098bcfcSNavdeep Parhar struct sge_fl *fl; 15453098bcfcSNavdeep Parhar struct adapter *sc = iq->adapter; 15463098bcfcSNavdeep Parhar struct iq_desc *d = &iq->desc[iq->cidx]; 154746e1e307SNavdeep Parhar int ndescs, limit; 154846e1e307SNavdeep Parhar int rsp_type, starved; 15493098bcfcSNavdeep Parhar uint32_t lq; 15503098bcfcSNavdeep Parhar uint16_t fl_hw_cidx; 15513098bcfcSNavdeep Parhar struct mbuf *m0; 15523098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6) 15533098bcfcSNavdeep Parhar const struct timeval lro_timeout = {0, sc->lro_timeout}; 15543098bcfcSNavdeep Parhar struct lro_ctrl *lro = &rxq->lro; 15553098bcfcSNavdeep Parhar #endif 15563098bcfcSNavdeep Parhar 15573098bcfcSNavdeep Parhar KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 15583098bcfcSNavdeep Parhar MPASS(iq->flags & IQ_HAS_FL); 15593098bcfcSNavdeep Parhar 156046e1e307SNavdeep Parhar ndescs = 0; 15613098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6) 15623098bcfcSNavdeep Parhar if (iq->flags & IQ_ADJ_CREDIT) { 15633098bcfcSNavdeep Parhar MPASS(sort_before_lro(lro)); 15643098bcfcSNavdeep Parhar iq->flags &= ~IQ_ADJ_CREDIT; 15653098bcfcSNavdeep Parhar if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 15663098bcfcSNavdeep Parhar tcp_lro_flush_all(lro); 15673098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 15683098bcfcSNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | 15693098bcfcSNavdeep Parhar V_SEINTARM(iq->intr_params)); 15703098bcfcSNavdeep Parhar return (0); 15713098bcfcSNavdeep Parhar } 15723098bcfcSNavdeep Parhar ndescs = 1; 15733098bcfcSNavdeep Parhar } 15743098bcfcSNavdeep Parhar #else 15753098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 15763098bcfcSNavdeep Parhar #endif 15773098bcfcSNavdeep Parhar 157846e1e307SNavdeep Parhar limit = budget ? budget : iq->qsize / 16; 157946e1e307SNavdeep Parhar fl = &rxq->fl; 158046e1e307SNavdeep Parhar fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 15813098bcfcSNavdeep Parhar while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 15823098bcfcSNavdeep Parhar 15833098bcfcSNavdeep Parhar rmb(); 15843098bcfcSNavdeep Parhar 15853098bcfcSNavdeep Parhar m0 = NULL; 15863098bcfcSNavdeep Parhar rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 15873098bcfcSNavdeep Parhar lq = be32toh(d->rsp.pldbuflen_qid); 15883098bcfcSNavdeep Parhar 15893098bcfcSNavdeep Parhar switch (rsp_type) { 15903098bcfcSNavdeep Parhar case X_RSPD_TYPE_FLBUF: 159146e1e307SNavdeep Parhar if (lq & F_RSPD_NEWBUF) { 159246e1e307SNavdeep Parhar if (fl->rx_offset > 0) 159346e1e307SNavdeep Parhar move_to_next_rxbuf(fl); 159446e1e307SNavdeep Parhar lq = G_RSPD_LEN(lq); 159546e1e307SNavdeep Parhar } 159646e1e307SNavdeep Parhar if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { 159746e1e307SNavdeep Parhar FL_LOCK(fl); 159846e1e307SNavdeep Parhar refill_fl(sc, fl, 64); 159946e1e307SNavdeep Parhar FL_UNLOCK(fl); 160046e1e307SNavdeep Parhar fl_hw_cidx = fl->hw_cidx; 160146e1e307SNavdeep Parhar } 16023098bcfcSNavdeep Parhar 16031486d2deSNavdeep Parhar if (d->rss.opcode == CPL_RX_PKT) { 16041486d2deSNavdeep Parhar if (__predict_true(eth_rx(sc, rxq, d, lq) == 0)) 16051486d2deSNavdeep Parhar break; 16061486d2deSNavdeep Parhar goto out; 16071486d2deSNavdeep Parhar } 16083098bcfcSNavdeep Parhar m0 = get_fl_payload(sc, fl, lq); 16093098bcfcSNavdeep Parhar if (__predict_false(m0 == NULL)) 16103098bcfcSNavdeep Parhar goto out; 1611e7e08444SNavdeep Parhar 16123098bcfcSNavdeep Parhar /* fall through */ 16133098bcfcSNavdeep Parhar 16143098bcfcSNavdeep Parhar case X_RSPD_TYPE_CPL: 16153098bcfcSNavdeep Parhar KASSERT(d->rss.opcode < NUM_CPL_CMDS, 16163098bcfcSNavdeep Parhar ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 16173098bcfcSNavdeep Parhar t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 16183098bcfcSNavdeep Parhar break; 16193098bcfcSNavdeep Parhar 16203098bcfcSNavdeep Parhar case X_RSPD_TYPE_INTR: 16213098bcfcSNavdeep Parhar 16223098bcfcSNavdeep Parhar /* 16233098bcfcSNavdeep Parhar * There are 1K interrupt-capable queues (qids 0 16243098bcfcSNavdeep Parhar * through 1023). A response type indicating a 16253098bcfcSNavdeep Parhar * forwarded interrupt with a qid >= 1K is an 16263098bcfcSNavdeep Parhar * iWARP async notification. That is the only 16273098bcfcSNavdeep Parhar * acceptable indirect interrupt on this queue. 16283098bcfcSNavdeep Parhar */ 16293098bcfcSNavdeep Parhar if (__predict_false(lq < 1024)) { 16303098bcfcSNavdeep Parhar panic("%s: indirect interrupt on iq_fl %p " 16313098bcfcSNavdeep Parhar "with qid %u", __func__, iq, lq); 16323098bcfcSNavdeep Parhar } 16333098bcfcSNavdeep Parhar 16343098bcfcSNavdeep Parhar t4_an_handler(iq, &d->rsp); 16353098bcfcSNavdeep Parhar break; 16363098bcfcSNavdeep Parhar 16373098bcfcSNavdeep Parhar default: 16383098bcfcSNavdeep Parhar KASSERT(0, ("%s: illegal response type %d on iq %p", 16393098bcfcSNavdeep Parhar __func__, rsp_type, iq)); 16403098bcfcSNavdeep Parhar log(LOG_ERR, "%s: illegal response type %d on iq %p", 16413098bcfcSNavdeep Parhar device_get_nameunit(sc->dev), rsp_type, iq); 16423098bcfcSNavdeep Parhar break; 16433098bcfcSNavdeep Parhar } 16443098bcfcSNavdeep Parhar 16453098bcfcSNavdeep Parhar d++; 16463098bcfcSNavdeep Parhar if (__predict_false(++iq->cidx == iq->sidx)) { 16473098bcfcSNavdeep Parhar iq->cidx = 0; 16483098bcfcSNavdeep Parhar iq->gen ^= F_RSPD_GEN; 16493098bcfcSNavdeep Parhar d = &iq->desc[0]; 16503098bcfcSNavdeep Parhar } 16513098bcfcSNavdeep Parhar if (__predict_false(++ndescs == limit)) { 16523098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 16533098bcfcSNavdeep Parhar V_INGRESSQID(iq->cntxt_id) | 16543098bcfcSNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 16553098bcfcSNavdeep Parhar 1656480e603cSNavdeep Parhar #if defined(INET) || defined(INET6) 1657480e603cSNavdeep Parhar if (iq->flags & IQ_LRO_ENABLED && 165846f48ee5SNavdeep Parhar !sort_before_lro(lro) && 1659480e603cSNavdeep Parhar sc->lro_timeout != 0) { 16603098bcfcSNavdeep Parhar tcp_lro_flush_inactive(lro, &lro_timeout); 1661480e603cSNavdeep Parhar } 1662480e603cSNavdeep Parhar #endif 166346e1e307SNavdeep Parhar if (budget) 1664733b9277SNavdeep Parhar return (EINPROGRESS); 166546e1e307SNavdeep Parhar ndescs = 0; 16664d6db4e0SNavdeep Parhar } 1667861e42b2SNavdeep Parhar } 16683098bcfcSNavdeep Parhar out: 1669a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 1670733b9277SNavdeep Parhar if (iq->flags & IQ_LRO_ENABLED) { 167146f48ee5SNavdeep Parhar if (ndescs > 0 && lro->lro_mbuf_count > 8) { 167246f48ee5SNavdeep Parhar MPASS(sort_before_lro(lro)); 167346f48ee5SNavdeep Parhar /* hold back one credit and don't flush LRO state */ 167446f48ee5SNavdeep Parhar iq->flags |= IQ_ADJ_CREDIT; 167546f48ee5SNavdeep Parhar ndescs--; 167646f48ee5SNavdeep Parhar } else { 16776dd38b87SSepherosa Ziehau tcp_lro_flush_all(lro); 1678733b9277SNavdeep Parhar } 167946f48ee5SNavdeep Parhar } 1680733b9277SNavdeep Parhar #endif 1681733b9277SNavdeep Parhar 1682315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1683733b9277SNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1684733b9277SNavdeep Parhar 1685733b9277SNavdeep Parhar FL_LOCK(fl); 168638035ed6SNavdeep Parhar starved = refill_fl(sc, fl, 64); 1687733b9277SNavdeep Parhar FL_UNLOCK(fl); 1688733b9277SNavdeep Parhar if (__predict_false(starved != 0)) 1689733b9277SNavdeep Parhar add_fl_to_sfl(sc, fl); 1690733b9277SNavdeep Parhar 1691733b9277SNavdeep Parhar return (0); 1692733b9277SNavdeep Parhar } 1693733b9277SNavdeep Parhar 169438035ed6SNavdeep Parhar static inline struct cluster_metadata * 169546e1e307SNavdeep Parhar cl_metadata(struct fl_sdesc *sd) 16961458bff9SNavdeep Parhar { 16971458bff9SNavdeep Parhar 169846e1e307SNavdeep Parhar return ((void *)(sd->cl + sd->moff)); 16991458bff9SNavdeep Parhar } 17001458bff9SNavdeep Parhar 170115c28f87SGleb Smirnoff static void 1702e8fd18f3SGleb Smirnoff rxb_free(struct mbuf *m) 17031458bff9SNavdeep Parhar { 1704d6f79b27SNavdeep Parhar struct cluster_metadata *clm = m->m_ext.ext_arg1; 17051458bff9SNavdeep Parhar 1706d6f79b27SNavdeep Parhar uma_zfree(clm->zone, clm->cl); 170782eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 17081458bff9SNavdeep Parhar } 17091458bff9SNavdeep Parhar 171038035ed6SNavdeep Parhar /* 171146e1e307SNavdeep Parhar * The mbuf returned comes from zone_muf and carries the payload in one of these 171246e1e307SNavdeep Parhar * ways 171346e1e307SNavdeep Parhar * a) complete frame inside the mbuf 171446e1e307SNavdeep Parhar * b) m_cljset (for clusters without metadata) 171546e1e307SNavdeep Parhar * d) m_extaddref (cluster with metadata) 171638035ed6SNavdeep Parhar */ 17171458bff9SNavdeep Parhar static struct mbuf * 1718b741402cSNavdeep Parhar get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1719b741402cSNavdeep Parhar int remaining) 172038035ed6SNavdeep Parhar { 172138035ed6SNavdeep Parhar struct mbuf *m; 172238035ed6SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 172346e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 172446e1e307SNavdeep Parhar struct cluster_metadata *clm; 1725b741402cSNavdeep Parhar int len, blen; 172638035ed6SNavdeep Parhar caddr_t payload; 172738035ed6SNavdeep Parhar 1728e3207e19SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 172946e1e307SNavdeep Parhar u_int l, pad; 1730b741402cSNavdeep Parhar 173146e1e307SNavdeep Parhar blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 173246e1e307SNavdeep Parhar len = min(remaining, blen); 173346e1e307SNavdeep Parhar payload = sd->cl + fl->rx_offset; 173446e1e307SNavdeep Parhar 173546e1e307SNavdeep Parhar l = fr_offset + len; 173646e1e307SNavdeep Parhar pad = roundup2(l, fl->buf_boundary) - l; 173746e1e307SNavdeep Parhar if (fl->rx_offset + len + pad < rxb->size2) 1738b741402cSNavdeep Parhar blen = len + pad; 173946e1e307SNavdeep Parhar MPASS(fl->rx_offset + blen <= rxb->size2); 1740e3207e19SNavdeep Parhar } else { 1741e3207e19SNavdeep Parhar MPASS(fl->rx_offset == 0); /* not packing */ 174246e1e307SNavdeep Parhar blen = rxb->size1; 174346e1e307SNavdeep Parhar len = min(remaining, blen); 174446e1e307SNavdeep Parhar payload = sd->cl; 1745e3207e19SNavdeep Parhar } 174638035ed6SNavdeep Parhar 174746e1e307SNavdeep Parhar if (fr_offset == 0) { 174846e1e307SNavdeep Parhar m = m_gethdr(M_NOWAIT, MT_DATA); 174946e1e307SNavdeep Parhar if (__predict_false(m == NULL)) 175046e1e307SNavdeep Parhar return (NULL); 175146e1e307SNavdeep Parhar m->m_pkthdr.len = remaining; 175246e1e307SNavdeep Parhar } else { 175346e1e307SNavdeep Parhar m = m_get(M_NOWAIT, MT_DATA); 175446e1e307SNavdeep Parhar if (__predict_false(m == NULL)) 175546e1e307SNavdeep Parhar return (NULL); 175646e1e307SNavdeep Parhar } 175746e1e307SNavdeep Parhar m->m_len = len; 1758b741402cSNavdeep Parhar 175938035ed6SNavdeep Parhar if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 176038035ed6SNavdeep Parhar /* copy data to mbuf */ 176138035ed6SNavdeep Parhar bcopy(payload, mtod(m, caddr_t), len); 176246e1e307SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 176346e1e307SNavdeep Parhar fl->rx_offset += blen; 176446e1e307SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 176546e1e307SNavdeep Parhar if (fl->rx_offset < rxb->size2) 176646e1e307SNavdeep Parhar return (m); /* without advancing the cidx */ 176746e1e307SNavdeep Parhar } 176846e1e307SNavdeep Parhar } else if (fl->flags & FL_BUF_PACKING) { 176946e1e307SNavdeep Parhar clm = cl_metadata(sd); 1770a9c4062aSNavdeep Parhar if (sd->nmbuf++ == 0) { 1771a9c4062aSNavdeep Parhar clm->refcount = 1; 177246e1e307SNavdeep Parhar clm->zone = rxb->zone; 1773d6f79b27SNavdeep Parhar clm->cl = sd->cl; 1774a9c4062aSNavdeep Parhar counter_u64_add(extfree_refs, 1); 1775a9c4062aSNavdeep Parhar } 1776d6f79b27SNavdeep Parhar m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, 1777d6f79b27SNavdeep Parhar NULL); 177838035ed6SNavdeep Parhar 177946e1e307SNavdeep Parhar fl->rx_offset += blen; 178046e1e307SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 178146e1e307SNavdeep Parhar if (fl->rx_offset < rxb->size2) 178246e1e307SNavdeep Parhar return (m); /* without advancing the cidx */ 1783ccc69b2fSNavdeep Parhar } else { 178446e1e307SNavdeep Parhar m_cljset(m, sd->cl, rxb->type); 178538035ed6SNavdeep Parhar sd->cl = NULL; /* consumed, not a recycle candidate */ 178638035ed6SNavdeep Parhar } 178738035ed6SNavdeep Parhar 178846e1e307SNavdeep Parhar move_to_next_rxbuf(fl); 178938035ed6SNavdeep Parhar 179038035ed6SNavdeep Parhar return (m); 179138035ed6SNavdeep Parhar } 179238035ed6SNavdeep Parhar 179338035ed6SNavdeep Parhar static struct mbuf * 179446e1e307SNavdeep Parhar get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen) 17951458bff9SNavdeep Parhar { 179638035ed6SNavdeep Parhar struct mbuf *m0, *m, **pnext; 1797b741402cSNavdeep Parhar u_int remaining; 17981458bff9SNavdeep Parhar 17994d6db4e0SNavdeep Parhar if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1800368541baSNavdeep Parhar M_ASSERTPKTHDR(fl->m0); 180146e1e307SNavdeep Parhar MPASS(fl->m0->m_pkthdr.len == plen); 180246e1e307SNavdeep Parhar MPASS(fl->remaining < plen); 18031458bff9SNavdeep Parhar 180438035ed6SNavdeep Parhar m0 = fl->m0; 180538035ed6SNavdeep Parhar pnext = fl->pnext; 1806b741402cSNavdeep Parhar remaining = fl->remaining; 18074d6db4e0SNavdeep Parhar fl->flags &= ~FL_BUF_RESUME; 180838035ed6SNavdeep Parhar goto get_segment; 18091458bff9SNavdeep Parhar } 18101458bff9SNavdeep Parhar 18111458bff9SNavdeep Parhar /* 181238035ed6SNavdeep Parhar * Payload starts at rx_offset in the current hw buffer. Its length is 181338035ed6SNavdeep Parhar * 'len' and it may span multiple hw buffers. 18141458bff9SNavdeep Parhar */ 18151458bff9SNavdeep Parhar 181646e1e307SNavdeep Parhar m0 = get_scatter_segment(sc, fl, 0, plen); 1817368541baSNavdeep Parhar if (m0 == NULL) 18184d6db4e0SNavdeep Parhar return (NULL); 181946e1e307SNavdeep Parhar remaining = plen - m0->m_len; 182038035ed6SNavdeep Parhar pnext = &m0->m_next; 1821b741402cSNavdeep Parhar while (remaining > 0) { 182238035ed6SNavdeep Parhar get_segment: 182338035ed6SNavdeep Parhar MPASS(fl->rx_offset == 0); 182446e1e307SNavdeep Parhar m = get_scatter_segment(sc, fl, plen - remaining, remaining); 18254d6db4e0SNavdeep Parhar if (__predict_false(m == NULL)) { 182638035ed6SNavdeep Parhar fl->m0 = m0; 182738035ed6SNavdeep Parhar fl->pnext = pnext; 1828b741402cSNavdeep Parhar fl->remaining = remaining; 18294d6db4e0SNavdeep Parhar fl->flags |= FL_BUF_RESUME; 18304d6db4e0SNavdeep Parhar return (NULL); 18311458bff9SNavdeep Parhar } 183238035ed6SNavdeep Parhar *pnext = m; 183338035ed6SNavdeep Parhar pnext = &m->m_next; 1834b741402cSNavdeep Parhar remaining -= m->m_len; 1835733b9277SNavdeep Parhar } 183638035ed6SNavdeep Parhar *pnext = NULL; 18374d6db4e0SNavdeep Parhar 1838dbbf46c4SNavdeep Parhar M_ASSERTPKTHDR(m0); 1839733b9277SNavdeep Parhar return (m0); 1840733b9277SNavdeep Parhar } 1841733b9277SNavdeep Parhar 1842733b9277SNavdeep Parhar static int 184387bbb333SNavdeep Parhar skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 184487bbb333SNavdeep Parhar int remaining) 184587bbb333SNavdeep Parhar { 184687bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 184787bbb333SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 184887bbb333SNavdeep Parhar int len, blen; 184987bbb333SNavdeep Parhar 185087bbb333SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 185187bbb333SNavdeep Parhar u_int l, pad; 185287bbb333SNavdeep Parhar 185387bbb333SNavdeep Parhar blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 185487bbb333SNavdeep Parhar len = min(remaining, blen); 185587bbb333SNavdeep Parhar 185687bbb333SNavdeep Parhar l = fr_offset + len; 185787bbb333SNavdeep Parhar pad = roundup2(l, fl->buf_boundary) - l; 185887bbb333SNavdeep Parhar if (fl->rx_offset + len + pad < rxb->size2) 185987bbb333SNavdeep Parhar blen = len + pad; 186087bbb333SNavdeep Parhar fl->rx_offset += blen; 186187bbb333SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 186287bbb333SNavdeep Parhar if (fl->rx_offset < rxb->size2) 186387bbb333SNavdeep Parhar return (len); /* without advancing the cidx */ 186487bbb333SNavdeep Parhar } else { 186587bbb333SNavdeep Parhar MPASS(fl->rx_offset == 0); /* not packing */ 186687bbb333SNavdeep Parhar blen = rxb->size1; 186787bbb333SNavdeep Parhar len = min(remaining, blen); 186887bbb333SNavdeep Parhar } 186987bbb333SNavdeep Parhar move_to_next_rxbuf(fl); 187087bbb333SNavdeep Parhar return (len); 187187bbb333SNavdeep Parhar } 187287bbb333SNavdeep Parhar 187387bbb333SNavdeep Parhar static inline void 187487bbb333SNavdeep Parhar skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen) 187587bbb333SNavdeep Parhar { 187687bbb333SNavdeep Parhar int remaining, fr_offset, len; 187787bbb333SNavdeep Parhar 187887bbb333SNavdeep Parhar fr_offset = 0; 187987bbb333SNavdeep Parhar remaining = plen; 188087bbb333SNavdeep Parhar while (remaining > 0) { 188187bbb333SNavdeep Parhar len = skip_scatter_segment(sc, fl, fr_offset, remaining); 188287bbb333SNavdeep Parhar fr_offset += len; 188387bbb333SNavdeep Parhar remaining -= len; 188487bbb333SNavdeep Parhar } 188587bbb333SNavdeep Parhar } 188687bbb333SNavdeep Parhar 188787bbb333SNavdeep Parhar static inline int 188887bbb333SNavdeep Parhar get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen) 188987bbb333SNavdeep Parhar { 189087bbb333SNavdeep Parhar int len; 189187bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 189287bbb333SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 189387bbb333SNavdeep Parhar 189487bbb333SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) 189587bbb333SNavdeep Parhar len = rxb->size2 - fl->rx_offset; 189687bbb333SNavdeep Parhar else 189787bbb333SNavdeep Parhar len = rxb->size1; 189887bbb333SNavdeep Parhar 189987bbb333SNavdeep Parhar return (min(plen, len)); 190087bbb333SNavdeep Parhar } 190187bbb333SNavdeep Parhar 190287bbb333SNavdeep Parhar static int 19031486d2deSNavdeep Parhar eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, 19041486d2deSNavdeep Parhar u_int plen) 1905733b9277SNavdeep Parhar { 19061486d2deSNavdeep Parhar struct mbuf *m0; 1907733b9277SNavdeep Parhar struct ifnet *ifp = rxq->ifp; 19081486d2deSNavdeep Parhar struct sge_fl *fl = &rxq->fl; 190987bbb333SNavdeep Parhar struct vi_info *vi = ifp->if_softc; 19101486d2deSNavdeep Parhar const struct cpl_rx_pkt *cpl; 1911a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 1912733b9277SNavdeep Parhar struct lro_ctrl *lro = &rxq->lro; 1913733b9277SNavdeep Parhar #endif 191470ca6229SNavdeep Parhar static const int sw_hashtype[4][2] = { 191570ca6229SNavdeep Parhar {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 191670ca6229SNavdeep Parhar {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 191770ca6229SNavdeep Parhar {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 191870ca6229SNavdeep Parhar {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 191970ca6229SNavdeep Parhar }; 1920733b9277SNavdeep Parhar 19211486d2deSNavdeep Parhar MPASS(plen > sc->params.sge.fl_pktshift); 192287bbb333SNavdeep Parhar if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && 192387bbb333SNavdeep Parhar __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { 192487bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 192587bbb333SNavdeep Parhar caddr_t frame; 192687bbb333SNavdeep Parhar int rc, slen; 192787bbb333SNavdeep Parhar 192887bbb333SNavdeep Parhar slen = get_segment_len(sc, fl, plen) - 192987bbb333SNavdeep Parhar sc->params.sge.fl_pktshift; 193087bbb333SNavdeep Parhar frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; 193187bbb333SNavdeep Parhar CURVNET_SET_QUIET(ifp->if_vnet); 193287bbb333SNavdeep Parhar rc = pfil_run_hooks(vi->pfil, frame, ifp, 193387bbb333SNavdeep Parhar slen | PFIL_MEMPTR | PFIL_IN, NULL); 193487bbb333SNavdeep Parhar CURVNET_RESTORE(); 193587bbb333SNavdeep Parhar if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) { 193687bbb333SNavdeep Parhar skip_fl_payload(sc, fl, plen); 193787bbb333SNavdeep Parhar return (0); 193887bbb333SNavdeep Parhar } 193987bbb333SNavdeep Parhar if (rc == PFIL_REALLOCED) { 194087bbb333SNavdeep Parhar skip_fl_payload(sc, fl, plen); 194187bbb333SNavdeep Parhar m0 = pfil_mem2mbuf(frame); 194287bbb333SNavdeep Parhar goto have_mbuf; 194387bbb333SNavdeep Parhar } 194487bbb333SNavdeep Parhar } 194587bbb333SNavdeep Parhar 19461486d2deSNavdeep Parhar m0 = get_fl_payload(sc, fl, plen); 19471486d2deSNavdeep Parhar if (__predict_false(m0 == NULL)) 19481486d2deSNavdeep Parhar return (ENOMEM); 1949733b9277SNavdeep Parhar 195090e7434aSNavdeep Parhar m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 195190e7434aSNavdeep Parhar m0->m_len -= sc->params.sge.fl_pktshift; 195290e7434aSNavdeep Parhar m0->m_data += sc->params.sge.fl_pktshift; 195354e4ee71SNavdeep Parhar 195487bbb333SNavdeep Parhar have_mbuf: 195554e4ee71SNavdeep Parhar m0->m_pkthdr.rcvif = ifp; 19561486d2deSNavdeep Parhar M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); 19571486d2deSNavdeep Parhar m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); 195854e4ee71SNavdeep Parhar 19591486d2deSNavdeep Parhar cpl = (const void *)(&d->rss + 1); 19601de8c69dSNavdeep Parhar if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { 19619600bf00SNavdeep Parhar if (ifp->if_capenable & IFCAP_RXCSUM && 19629600bf00SNavdeep Parhar cpl->l2info & htobe32(F_RXF_IP)) { 1963932b1a5fSNavdeep Parhar m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 196454e4ee71SNavdeep Parhar CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 19659600bf00SNavdeep Parhar rxq->rxcsum++; 19669600bf00SNavdeep Parhar } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 19679600bf00SNavdeep Parhar cpl->l2info & htobe32(F_RXF_IP6)) { 1968932b1a5fSNavdeep Parhar m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 19699600bf00SNavdeep Parhar CSUM_PSEUDO_HDR); 19709600bf00SNavdeep Parhar rxq->rxcsum++; 19719600bf00SNavdeep Parhar } 19729600bf00SNavdeep Parhar 19739600bf00SNavdeep Parhar if (__predict_false(cpl->ip_frag)) 197454e4ee71SNavdeep Parhar m0->m_pkthdr.csum_data = be16toh(cpl->csum); 197554e4ee71SNavdeep Parhar else 197654e4ee71SNavdeep Parhar m0->m_pkthdr.csum_data = 0xffff; 197754e4ee71SNavdeep Parhar } 197854e4ee71SNavdeep Parhar 197954e4ee71SNavdeep Parhar if (cpl->vlan_ex) { 198054e4ee71SNavdeep Parhar m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 198154e4ee71SNavdeep Parhar m0->m_flags |= M_VLANTAG; 198254e4ee71SNavdeep Parhar rxq->vlan_extraction++; 198354e4ee71SNavdeep Parhar } 198454e4ee71SNavdeep Parhar 19851486d2deSNavdeep Parhar if (rxq->iq.flags & IQ_RX_TIMESTAMP) { 19861486d2deSNavdeep Parhar /* 19871486d2deSNavdeep Parhar * Fill up rcv_tstmp but do not set M_TSTMP. 19881486d2deSNavdeep Parhar * rcv_tstmp is not in the format that the 19891486d2deSNavdeep Parhar * kernel expects and we don't want to mislead 19901486d2deSNavdeep Parhar * it. For now this is only for custom code 19911486d2deSNavdeep Parhar * that knows how to interpret cxgbe's stamp. 19921486d2deSNavdeep Parhar */ 19931486d2deSNavdeep Parhar m0->m_pkthdr.rcv_tstmp = 19941486d2deSNavdeep Parhar last_flit_to_ns(sc, d->rsp.u.last_flit); 19951486d2deSNavdeep Parhar #ifdef notyet 19961486d2deSNavdeep Parhar m0->m_flags |= M_TSTMP; 19971486d2deSNavdeep Parhar #endif 19981486d2deSNavdeep Parhar } 19991486d2deSNavdeep Parhar 200050575ce1SAndrew Gallatin #ifdef NUMA 200150575ce1SAndrew Gallatin m0->m_pkthdr.numa_domain = ifp->if_numa_domain; 200250575ce1SAndrew Gallatin #endif 2003a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 20041486d2deSNavdeep Parhar if (rxq->iq.flags & IQ_LRO_ENABLED && 20059087a3dfSNavdeep Parhar (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || 20069087a3dfSNavdeep Parhar M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { 200746f48ee5SNavdeep Parhar if (sort_before_lro(lro)) { 200846f48ee5SNavdeep Parhar tcp_lro_queue_mbuf(lro, m0); 200946f48ee5SNavdeep Parhar return (0); /* queued for sort, then LRO */ 201046f48ee5SNavdeep Parhar } 201146f48ee5SNavdeep Parhar if (tcp_lro_rx(lro, m0, 0) == 0) 201246f48ee5SNavdeep Parhar return (0); /* queued for LRO */ 201346f48ee5SNavdeep Parhar } 201454e4ee71SNavdeep Parhar #endif 20157d29df59SNavdeep Parhar ifp->if_input(ifp, m0); 201654e4ee71SNavdeep Parhar 2017733b9277SNavdeep Parhar return (0); 201854e4ee71SNavdeep Parhar } 201954e4ee71SNavdeep Parhar 2020733b9277SNavdeep Parhar /* 20217951040fSNavdeep Parhar * Must drain the wrq or make sure that someone else will. 20227951040fSNavdeep Parhar */ 20237951040fSNavdeep Parhar static void 20247951040fSNavdeep Parhar wrq_tx_drain(void *arg, int n) 20257951040fSNavdeep Parhar { 20267951040fSNavdeep Parhar struct sge_wrq *wrq = arg; 20277951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 20287951040fSNavdeep Parhar 20297951040fSNavdeep Parhar EQ_LOCK(eq); 20307951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 20317951040fSNavdeep Parhar drain_wrq_wr_list(wrq->adapter, wrq); 20327951040fSNavdeep Parhar EQ_UNLOCK(eq); 20337951040fSNavdeep Parhar } 20347951040fSNavdeep Parhar 20357951040fSNavdeep Parhar static void 20367951040fSNavdeep Parhar drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 20377951040fSNavdeep Parhar { 20387951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 20397951040fSNavdeep Parhar u_int available, dbdiff; /* # of hardware descriptors */ 20407951040fSNavdeep Parhar u_int n; 20417951040fSNavdeep Parhar struct wrqe *wr; 20427951040fSNavdeep Parhar struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 20437951040fSNavdeep Parhar 20447951040fSNavdeep Parhar EQ_LOCK_ASSERT_OWNED(eq); 20457951040fSNavdeep Parhar MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 20467951040fSNavdeep Parhar wr = STAILQ_FIRST(&wrq->wr_list); 20477951040fSNavdeep Parhar MPASS(wr != NULL); /* Must be called with something useful to do */ 2048cda2ab0eSNavdeep Parhar MPASS(eq->pidx == eq->dbidx); 2049cda2ab0eSNavdeep Parhar dbdiff = 0; 20507951040fSNavdeep Parhar 20517951040fSNavdeep Parhar do { 20527951040fSNavdeep Parhar eq->cidx = read_hw_cidx(eq); 20537951040fSNavdeep Parhar if (eq->pidx == eq->cidx) 20547951040fSNavdeep Parhar available = eq->sidx - 1; 20557951040fSNavdeep Parhar else 20567951040fSNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 20577951040fSNavdeep Parhar 20587951040fSNavdeep Parhar MPASS(wr->wrq == wrq); 20597951040fSNavdeep Parhar n = howmany(wr->wr_len, EQ_ESIZE); 20607951040fSNavdeep Parhar if (available < n) 2061cda2ab0eSNavdeep Parhar break; 20627951040fSNavdeep Parhar 20637951040fSNavdeep Parhar dst = (void *)&eq->desc[eq->pidx]; 20647951040fSNavdeep Parhar if (__predict_true(eq->sidx - eq->pidx > n)) { 20657951040fSNavdeep Parhar /* Won't wrap, won't end exactly at the status page. */ 20667951040fSNavdeep Parhar bcopy(&wr->wr[0], dst, wr->wr_len); 20677951040fSNavdeep Parhar eq->pidx += n; 20687951040fSNavdeep Parhar } else { 20697951040fSNavdeep Parhar int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 20707951040fSNavdeep Parhar 20717951040fSNavdeep Parhar bcopy(&wr->wr[0], dst, first_portion); 20727951040fSNavdeep Parhar if (wr->wr_len > first_portion) { 20737951040fSNavdeep Parhar bcopy(&wr->wr[first_portion], &eq->desc[0], 20747951040fSNavdeep Parhar wr->wr_len - first_portion); 20757951040fSNavdeep Parhar } 20767951040fSNavdeep Parhar eq->pidx = n - (eq->sidx - eq->pidx); 20777951040fSNavdeep Parhar } 20780459a175SNavdeep Parhar wrq->tx_wrs_copied++; 20797951040fSNavdeep Parhar 20807951040fSNavdeep Parhar if (available < eq->sidx / 4 && 20817951040fSNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 2082ddf09ad6SNavdeep Parhar /* 2083ddf09ad6SNavdeep Parhar * XXX: This is not 100% reliable with some 2084ddf09ad6SNavdeep Parhar * types of WRs. But this is a very unusual 2085ddf09ad6SNavdeep Parhar * situation for an ofld/ctrl queue anyway. 2086ddf09ad6SNavdeep Parhar */ 20877951040fSNavdeep Parhar dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 20887951040fSNavdeep Parhar F_FW_WR_EQUEQ); 20897951040fSNavdeep Parhar } 20907951040fSNavdeep Parhar 20917951040fSNavdeep Parhar dbdiff += n; 20927951040fSNavdeep Parhar if (dbdiff >= 16) { 20937951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 20947951040fSNavdeep Parhar dbdiff = 0; 20957951040fSNavdeep Parhar } 20967951040fSNavdeep Parhar 20977951040fSNavdeep Parhar STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 20987951040fSNavdeep Parhar free_wrqe(wr); 20997951040fSNavdeep Parhar MPASS(wrq->nwr_pending > 0); 21007951040fSNavdeep Parhar wrq->nwr_pending--; 21017951040fSNavdeep Parhar MPASS(wrq->ndesc_needed >= n); 21027951040fSNavdeep Parhar wrq->ndesc_needed -= n; 21037951040fSNavdeep Parhar } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 21047951040fSNavdeep Parhar 21057951040fSNavdeep Parhar if (dbdiff) 21067951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 21077951040fSNavdeep Parhar } 21087951040fSNavdeep Parhar 21097951040fSNavdeep Parhar /* 2110733b9277SNavdeep Parhar * Doesn't fail. Holds on to work requests it can't send right away. 2111733b9277SNavdeep Parhar */ 211209fe6320SNavdeep Parhar void 211309fe6320SNavdeep Parhar t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2114733b9277SNavdeep Parhar { 2115733b9277SNavdeep Parhar #ifdef INVARIANTS 21167951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 2117733b9277SNavdeep Parhar #endif 2118733b9277SNavdeep Parhar 21197951040fSNavdeep Parhar EQ_LOCK_ASSERT_OWNED(eq); 21207951040fSNavdeep Parhar MPASS(wr != NULL); 21217951040fSNavdeep Parhar MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 21227951040fSNavdeep Parhar MPASS((wr->wr_len & 0x7) == 0); 2123733b9277SNavdeep Parhar 21247951040fSNavdeep Parhar STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 21257951040fSNavdeep Parhar wrq->nwr_pending++; 21267951040fSNavdeep Parhar wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2127733b9277SNavdeep Parhar 21287951040fSNavdeep Parhar if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 21297951040fSNavdeep Parhar return; /* commit_wrq_wr will drain wr_list as well. */ 2130733b9277SNavdeep Parhar 21317951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 2132733b9277SNavdeep Parhar 21337951040fSNavdeep Parhar /* Doorbell must have caught up to the pidx. */ 21347951040fSNavdeep Parhar MPASS(eq->pidx == eq->dbidx); 213554e4ee71SNavdeep Parhar } 213654e4ee71SNavdeep Parhar 213754e4ee71SNavdeep Parhar void 213854e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp) 213954e4ee71SNavdeep Parhar { 2140fe2ebb76SJohn Baldwin struct vi_info *vi = ifp->if_softc; 21417c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 214254e4ee71SNavdeep Parhar struct sge_rxq *rxq; 21436eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD 21446eb3180fSNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 21456eb3180fSNavdeep Parhar #endif 214654e4ee71SNavdeep Parhar struct sge_fl *fl; 214738035ed6SNavdeep Parhar int i, maxp, mtu = ifp->if_mtu; 214854e4ee71SNavdeep Parhar 21498bf30903SNavdeep Parhar maxp = mtu_to_max_payload(sc, mtu); 2150fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 215154e4ee71SNavdeep Parhar fl = &rxq->fl; 215254e4ee71SNavdeep Parhar 215354e4ee71SNavdeep Parhar FL_LOCK(fl); 215446e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, 215546e1e307SNavdeep Parhar fl->flags & FL_BUF_PACKING); 215654e4ee71SNavdeep Parhar FL_UNLOCK(fl); 215754e4ee71SNavdeep Parhar } 21586eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD 2159fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 21606eb3180fSNavdeep Parhar fl = &ofld_rxq->fl; 21616eb3180fSNavdeep Parhar 21626eb3180fSNavdeep Parhar FL_LOCK(fl); 216346e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, 216446e1e307SNavdeep Parhar fl->flags & FL_BUF_PACKING); 21656eb3180fSNavdeep Parhar FL_UNLOCK(fl); 21666eb3180fSNavdeep Parhar } 21676eb3180fSNavdeep Parhar #endif 216854e4ee71SNavdeep Parhar } 216954e4ee71SNavdeep Parhar 21707951040fSNavdeep Parhar static inline int 21717951040fSNavdeep Parhar mbuf_nsegs(struct mbuf *m) 2172733b9277SNavdeep Parhar { 21730835ddc7SNavdeep Parhar 21747951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 21757951040fSNavdeep Parhar KASSERT(m->m_pkthdr.l5hlen > 0, 21767951040fSNavdeep Parhar ("%s: mbuf %p missing information on # of segments.", __func__, m)); 21777951040fSNavdeep Parhar 21787951040fSNavdeep Parhar return (m->m_pkthdr.l5hlen); 21797951040fSNavdeep Parhar } 21807951040fSNavdeep Parhar 21817951040fSNavdeep Parhar static inline void 21827951040fSNavdeep Parhar set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 21837951040fSNavdeep Parhar { 21847951040fSNavdeep Parhar 21857951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 21867951040fSNavdeep Parhar m->m_pkthdr.l5hlen = nsegs; 21877951040fSNavdeep Parhar } 21887951040fSNavdeep Parhar 21897951040fSNavdeep Parhar static inline int 21905cdaef71SJohn Baldwin mbuf_cflags(struct mbuf *m) 21915cdaef71SJohn Baldwin { 21925cdaef71SJohn Baldwin 21935cdaef71SJohn Baldwin M_ASSERTPKTHDR(m); 21945cdaef71SJohn Baldwin return (m->m_pkthdr.PH_loc.eight[4]); 21955cdaef71SJohn Baldwin } 21965cdaef71SJohn Baldwin 21975cdaef71SJohn Baldwin static inline void 21985cdaef71SJohn Baldwin set_mbuf_cflags(struct mbuf *m, uint8_t flags) 21995cdaef71SJohn Baldwin { 22005cdaef71SJohn Baldwin 22015cdaef71SJohn Baldwin M_ASSERTPKTHDR(m); 22025cdaef71SJohn Baldwin m->m_pkthdr.PH_loc.eight[4] = flags; 22035cdaef71SJohn Baldwin } 22045cdaef71SJohn Baldwin 22055cdaef71SJohn Baldwin static inline int 22067951040fSNavdeep Parhar mbuf_len16(struct mbuf *m) 22077951040fSNavdeep Parhar { 22087951040fSNavdeep Parhar int n; 22097951040fSNavdeep Parhar 22107951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 22117951040fSNavdeep Parhar n = m->m_pkthdr.PH_loc.eight[0]; 2212bddf7343SJohn Baldwin if (!(mbuf_cflags(m) & MC_TLS)) 22137951040fSNavdeep Parhar MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 22147951040fSNavdeep Parhar 22157951040fSNavdeep Parhar return (n); 22167951040fSNavdeep Parhar } 22177951040fSNavdeep Parhar 22187951040fSNavdeep Parhar static inline void 22197951040fSNavdeep Parhar set_mbuf_len16(struct mbuf *m, uint8_t len16) 22207951040fSNavdeep Parhar { 22217951040fSNavdeep Parhar 22227951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 22237951040fSNavdeep Parhar m->m_pkthdr.PH_loc.eight[0] = len16; 22247951040fSNavdeep Parhar } 22257951040fSNavdeep Parhar 2226786099deSNavdeep Parhar #ifdef RATELIMIT 2227786099deSNavdeep Parhar static inline int 2228786099deSNavdeep Parhar mbuf_eo_nsegs(struct mbuf *m) 2229786099deSNavdeep Parhar { 2230786099deSNavdeep Parhar 2231786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2232786099deSNavdeep Parhar return (m->m_pkthdr.PH_loc.eight[1]); 2233786099deSNavdeep Parhar } 2234786099deSNavdeep Parhar 2235786099deSNavdeep Parhar static inline void 2236786099deSNavdeep Parhar set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2237786099deSNavdeep Parhar { 2238786099deSNavdeep Parhar 2239786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2240786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[1] = nsegs; 2241786099deSNavdeep Parhar } 2242786099deSNavdeep Parhar 2243786099deSNavdeep Parhar static inline int 2244786099deSNavdeep Parhar mbuf_eo_len16(struct mbuf *m) 2245786099deSNavdeep Parhar { 2246786099deSNavdeep Parhar int n; 2247786099deSNavdeep Parhar 2248786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2249786099deSNavdeep Parhar n = m->m_pkthdr.PH_loc.eight[2]; 2250786099deSNavdeep Parhar MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2251786099deSNavdeep Parhar 2252786099deSNavdeep Parhar return (n); 2253786099deSNavdeep Parhar } 2254786099deSNavdeep Parhar 2255786099deSNavdeep Parhar static inline void 2256786099deSNavdeep Parhar set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2257786099deSNavdeep Parhar { 2258786099deSNavdeep Parhar 2259786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2260786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[2] = len16; 2261786099deSNavdeep Parhar } 2262786099deSNavdeep Parhar 2263786099deSNavdeep Parhar static inline int 2264786099deSNavdeep Parhar mbuf_eo_tsclk_tsoff(struct mbuf *m) 2265786099deSNavdeep Parhar { 2266786099deSNavdeep Parhar 2267786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2268786099deSNavdeep Parhar return (m->m_pkthdr.PH_loc.eight[3]); 2269786099deSNavdeep Parhar } 2270786099deSNavdeep Parhar 2271786099deSNavdeep Parhar static inline void 2272786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2273786099deSNavdeep Parhar { 2274786099deSNavdeep Parhar 2275786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2276786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2277786099deSNavdeep Parhar } 2278786099deSNavdeep Parhar 2279786099deSNavdeep Parhar static inline int 2280e38a50e8SJohn Baldwin needs_eo(struct cxgbe_snd_tag *cst) 2281786099deSNavdeep Parhar { 2282786099deSNavdeep Parhar 2283e38a50e8SJohn Baldwin return (cst != NULL && cst->type == IF_SND_TAG_TYPE_RATE_LIMIT); 2284786099deSNavdeep Parhar } 2285786099deSNavdeep Parhar #endif 2286786099deSNavdeep Parhar 22875cdaef71SJohn Baldwin /* 22885cdaef71SJohn Baldwin * Try to allocate an mbuf to contain a raw work request. To make it 22895cdaef71SJohn Baldwin * easy to construct the work request, don't allocate a chain but a 22905cdaef71SJohn Baldwin * single mbuf. 22915cdaef71SJohn Baldwin */ 22925cdaef71SJohn Baldwin struct mbuf * 22935cdaef71SJohn Baldwin alloc_wr_mbuf(int len, int how) 22945cdaef71SJohn Baldwin { 22955cdaef71SJohn Baldwin struct mbuf *m; 22965cdaef71SJohn Baldwin 22975cdaef71SJohn Baldwin if (len <= MHLEN) 22985cdaef71SJohn Baldwin m = m_gethdr(how, MT_DATA); 22995cdaef71SJohn Baldwin else if (len <= MCLBYTES) 23005cdaef71SJohn Baldwin m = m_getcl(how, MT_DATA, M_PKTHDR); 23015cdaef71SJohn Baldwin else 23025cdaef71SJohn Baldwin m = NULL; 23035cdaef71SJohn Baldwin if (m == NULL) 23045cdaef71SJohn Baldwin return (NULL); 23055cdaef71SJohn Baldwin m->m_pkthdr.len = len; 23065cdaef71SJohn Baldwin m->m_len = len; 23075cdaef71SJohn Baldwin set_mbuf_cflags(m, MC_RAW_WR); 23085cdaef71SJohn Baldwin set_mbuf_len16(m, howmany(len, 16)); 23095cdaef71SJohn Baldwin return (m); 23105cdaef71SJohn Baldwin } 23115cdaef71SJohn Baldwin 23127951040fSNavdeep Parhar static inline int 2313c0236bd9SNavdeep Parhar needs_hwcsum(struct mbuf *m) 2314c0236bd9SNavdeep Parhar { 2315c0236bd9SNavdeep Parhar 2316c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 2317c0236bd9SNavdeep Parhar 2318c0236bd9SNavdeep Parhar return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP | 2319c0236bd9SNavdeep Parhar CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6)); 2320c0236bd9SNavdeep Parhar } 2321c0236bd9SNavdeep Parhar 2322c0236bd9SNavdeep Parhar static inline int 23237951040fSNavdeep Parhar needs_tso(struct mbuf *m) 23247951040fSNavdeep Parhar { 23257951040fSNavdeep Parhar 23267951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 23277951040fSNavdeep Parhar 2328a6a8ff35SNavdeep Parhar return (m->m_pkthdr.csum_flags & CSUM_TSO); 23297951040fSNavdeep Parhar } 23307951040fSNavdeep Parhar 23317951040fSNavdeep Parhar static inline int 23327951040fSNavdeep Parhar needs_l3_csum(struct mbuf *m) 23337951040fSNavdeep Parhar { 23347951040fSNavdeep Parhar 23357951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 23367951040fSNavdeep Parhar 2337a6a8ff35SNavdeep Parhar return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); 23387951040fSNavdeep Parhar } 23397951040fSNavdeep Parhar 23407951040fSNavdeep Parhar static inline int 2341c0236bd9SNavdeep Parhar needs_tcp_csum(struct mbuf *m) 2342c0236bd9SNavdeep Parhar { 2343c0236bd9SNavdeep Parhar 2344c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 2345c0236bd9SNavdeep Parhar return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TCP_IPV6 | CSUM_TSO)); 2346c0236bd9SNavdeep Parhar } 2347c0236bd9SNavdeep Parhar 2348c0236bd9SNavdeep Parhar #ifdef RATELIMIT 2349c0236bd9SNavdeep Parhar static inline int 23507951040fSNavdeep Parhar needs_l4_csum(struct mbuf *m) 23517951040fSNavdeep Parhar { 23527951040fSNavdeep Parhar 23537951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 23547951040fSNavdeep Parhar 2355a6a8ff35SNavdeep Parhar return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2356a6a8ff35SNavdeep Parhar CSUM_TCP_IPV6 | CSUM_TSO)); 23577951040fSNavdeep Parhar } 23587951040fSNavdeep Parhar 23597951040fSNavdeep Parhar static inline int 2360786099deSNavdeep Parhar needs_udp_csum(struct mbuf *m) 2361786099deSNavdeep Parhar { 2362786099deSNavdeep Parhar 2363786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2364786099deSNavdeep Parhar return (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)); 2365786099deSNavdeep Parhar } 2366c3fce948SNavdeep Parhar #endif 2367786099deSNavdeep Parhar 2368786099deSNavdeep Parhar static inline int 23697951040fSNavdeep Parhar needs_vlan_insertion(struct mbuf *m) 23707951040fSNavdeep Parhar { 23717951040fSNavdeep Parhar 23727951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 23737951040fSNavdeep Parhar 2374a6a8ff35SNavdeep Parhar return (m->m_flags & M_VLANTAG); 23757951040fSNavdeep Parhar } 23767951040fSNavdeep Parhar 23777951040fSNavdeep Parhar static void * 23787951040fSNavdeep Parhar m_advance(struct mbuf **pm, int *poffset, int len) 23797951040fSNavdeep Parhar { 23807951040fSNavdeep Parhar struct mbuf *m = *pm; 23817951040fSNavdeep Parhar int offset = *poffset; 23827951040fSNavdeep Parhar uintptr_t p = 0; 23837951040fSNavdeep Parhar 23847951040fSNavdeep Parhar MPASS(len > 0); 23857951040fSNavdeep Parhar 2386e06ab612SJohn Baldwin for (;;) { 23877951040fSNavdeep Parhar if (offset + len < m->m_len) { 23887951040fSNavdeep Parhar offset += len; 23897951040fSNavdeep Parhar p = mtod(m, uintptr_t) + offset; 23907951040fSNavdeep Parhar break; 23917951040fSNavdeep Parhar } 23927951040fSNavdeep Parhar len -= m->m_len - offset; 23937951040fSNavdeep Parhar m = m->m_next; 23947951040fSNavdeep Parhar offset = 0; 23957951040fSNavdeep Parhar MPASS(m != NULL); 23967951040fSNavdeep Parhar } 23977951040fSNavdeep Parhar *poffset = offset; 23987951040fSNavdeep Parhar *pm = m; 23997951040fSNavdeep Parhar return ((void *)p); 24007951040fSNavdeep Parhar } 24017951040fSNavdeep Parhar 2402d76bbe17SJohn Baldwin static inline int 2403d76bbe17SJohn Baldwin count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr) 2404d76bbe17SJohn Baldwin { 2405d76bbe17SJohn Baldwin vm_paddr_t paddr; 2406d76bbe17SJohn Baldwin int i, len, off, pglen, pgoff, seglen, segoff; 2407d76bbe17SJohn Baldwin int nsegs = 0; 2408d76bbe17SJohn Baldwin 2409365e8da4SGleb Smirnoff M_ASSERTEXTPG(m); 2410d76bbe17SJohn Baldwin off = mtod(m, vm_offset_t); 2411d76bbe17SJohn Baldwin len = m->m_len; 2412d76bbe17SJohn Baldwin off += skip; 2413d76bbe17SJohn Baldwin len -= skip; 2414d76bbe17SJohn Baldwin 24157b6c99d0SGleb Smirnoff if (m->m_epg_hdrlen != 0) { 24167b6c99d0SGleb Smirnoff if (off >= m->m_epg_hdrlen) { 24177b6c99d0SGleb Smirnoff off -= m->m_epg_hdrlen; 2418d76bbe17SJohn Baldwin } else { 24197b6c99d0SGleb Smirnoff seglen = m->m_epg_hdrlen - off; 2420d76bbe17SJohn Baldwin segoff = off; 2421d76bbe17SJohn Baldwin seglen = min(seglen, len); 2422d76bbe17SJohn Baldwin off = 0; 2423d76bbe17SJohn Baldwin len -= seglen; 2424d76bbe17SJohn Baldwin paddr = pmap_kextract( 24250c103266SGleb Smirnoff (vm_offset_t)&m->m_epg_hdr[segoff]); 2426d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2427d76bbe17SJohn Baldwin nsegs++; 2428d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2429d76bbe17SJohn Baldwin } 2430d76bbe17SJohn Baldwin } 24317b6c99d0SGleb Smirnoff pgoff = m->m_epg_1st_off; 24327b6c99d0SGleb Smirnoff for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 2433c4ee38f8SGleb Smirnoff pglen = m_epg_pagelen(m, i, pgoff); 2434d76bbe17SJohn Baldwin if (off >= pglen) { 2435d76bbe17SJohn Baldwin off -= pglen; 2436d76bbe17SJohn Baldwin pgoff = 0; 2437d76bbe17SJohn Baldwin continue; 2438d76bbe17SJohn Baldwin } 2439d76bbe17SJohn Baldwin seglen = pglen - off; 2440d76bbe17SJohn Baldwin segoff = pgoff + off; 2441d76bbe17SJohn Baldwin off = 0; 2442d76bbe17SJohn Baldwin seglen = min(seglen, len); 2443d76bbe17SJohn Baldwin len -= seglen; 24440c103266SGleb Smirnoff paddr = m->m_epg_pa[i] + segoff; 2445d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2446d76bbe17SJohn Baldwin nsegs++; 2447d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2448d76bbe17SJohn Baldwin pgoff = 0; 2449d76bbe17SJohn Baldwin }; 2450d76bbe17SJohn Baldwin if (len != 0) { 24517b6c99d0SGleb Smirnoff seglen = min(len, m->m_epg_trllen - off); 2452d76bbe17SJohn Baldwin len -= seglen; 24530c103266SGleb Smirnoff paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); 2454d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2455d76bbe17SJohn Baldwin nsegs++; 2456d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2457d76bbe17SJohn Baldwin } 2458d76bbe17SJohn Baldwin 2459d76bbe17SJohn Baldwin return (nsegs); 2460d76bbe17SJohn Baldwin } 2461d76bbe17SJohn Baldwin 2462d76bbe17SJohn Baldwin 24637951040fSNavdeep Parhar /* 24647951040fSNavdeep Parhar * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2465786099deSNavdeep Parhar * must have at least one mbuf that's not empty. It is possible for this 2466786099deSNavdeep Parhar * routine to return 0 if skip accounts for all the contents of the mbuf chain. 24677951040fSNavdeep Parhar */ 24687951040fSNavdeep Parhar static inline int 2469d76bbe17SJohn Baldwin count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags) 24707951040fSNavdeep Parhar { 2471d76bbe17SJohn Baldwin vm_paddr_t nextaddr, paddr; 247277e9044cSNavdeep Parhar vm_offset_t va; 24737951040fSNavdeep Parhar int len, nsegs; 24747951040fSNavdeep Parhar 2475786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2476786099deSNavdeep Parhar MPASS(m->m_pkthdr.len > 0); 2477786099deSNavdeep Parhar MPASS(m->m_pkthdr.len >= skip); 24787951040fSNavdeep Parhar 24797951040fSNavdeep Parhar nsegs = 0; 2480d76bbe17SJohn Baldwin nextaddr = 0; 24817951040fSNavdeep Parhar for (; m; m = m->m_next) { 24827951040fSNavdeep Parhar len = m->m_len; 24837951040fSNavdeep Parhar if (__predict_false(len == 0)) 24847951040fSNavdeep Parhar continue; 2485786099deSNavdeep Parhar if (skip >= len) { 2486786099deSNavdeep Parhar skip -= len; 2487786099deSNavdeep Parhar continue; 2488786099deSNavdeep Parhar } 24896edfd179SGleb Smirnoff if ((m->m_flags & M_EXTPG) != 0) { 2490d76bbe17SJohn Baldwin *cflags |= MC_NOMAP; 2491d76bbe17SJohn Baldwin nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr); 2492d76bbe17SJohn Baldwin skip = 0; 2493d76bbe17SJohn Baldwin continue; 2494d76bbe17SJohn Baldwin } 2495786099deSNavdeep Parhar va = mtod(m, vm_offset_t) + skip; 2496786099deSNavdeep Parhar len -= skip; 2497786099deSNavdeep Parhar skip = 0; 2498d76bbe17SJohn Baldwin paddr = pmap_kextract(va); 2499786099deSNavdeep Parhar nsegs += sglist_count((void *)(uintptr_t)va, len); 2500d76bbe17SJohn Baldwin if (paddr == nextaddr) 25017951040fSNavdeep Parhar nsegs--; 2502d76bbe17SJohn Baldwin nextaddr = pmap_kextract(va + len - 1) + 1; 25037951040fSNavdeep Parhar } 25047951040fSNavdeep Parhar 25057951040fSNavdeep Parhar return (nsegs); 25067951040fSNavdeep Parhar } 25077951040fSNavdeep Parhar 25087951040fSNavdeep Parhar /* 25097951040fSNavdeep Parhar * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 25107951040fSNavdeep Parhar * a) caller can assume it's been freed if this function returns with an error. 25117951040fSNavdeep Parhar * b) it may get defragged up if the gather list is too long for the hardware. 25127951040fSNavdeep Parhar */ 25137951040fSNavdeep Parhar int 25146af45170SJohn Baldwin parse_pkt(struct adapter *sc, struct mbuf **mp) 25157951040fSNavdeep Parhar { 25167951040fSNavdeep Parhar struct mbuf *m0 = *mp, *m; 25177951040fSNavdeep Parhar int rc, nsegs, defragged = 0, offset; 25187951040fSNavdeep Parhar struct ether_header *eh; 25197951040fSNavdeep Parhar void *l3hdr; 25207951040fSNavdeep Parhar #if defined(INET) || defined(INET6) 25217951040fSNavdeep Parhar struct tcphdr *tcp; 25227951040fSNavdeep Parhar #endif 2523bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 2524e38a50e8SJohn Baldwin struct cxgbe_snd_tag *cst; 2525e38a50e8SJohn Baldwin #endif 25267951040fSNavdeep Parhar uint16_t eh_type; 2527d76bbe17SJohn Baldwin uint8_t cflags; 25287951040fSNavdeep Parhar 2529d76bbe17SJohn Baldwin cflags = 0; 25307951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 25317951040fSNavdeep Parhar if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 25327951040fSNavdeep Parhar rc = EINVAL; 25337951040fSNavdeep Parhar fail: 25347951040fSNavdeep Parhar m_freem(m0); 25357951040fSNavdeep Parhar *mp = NULL; 25367951040fSNavdeep Parhar return (rc); 25377951040fSNavdeep Parhar } 25387951040fSNavdeep Parhar restart: 25397951040fSNavdeep Parhar /* 25407951040fSNavdeep Parhar * First count the number of gather list segments in the payload. 25417951040fSNavdeep Parhar * Defrag the mbuf if nsegs exceeds the hardware limit. 25427951040fSNavdeep Parhar */ 25437951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 25447951040fSNavdeep Parhar MPASS(m0->m_pkthdr.len > 0); 2545d76bbe17SJohn Baldwin nsegs = count_mbuf_nsegs(m0, 0, &cflags); 2546bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 2547e38a50e8SJohn Baldwin if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) 2548e38a50e8SJohn Baldwin cst = mst_to_cst(m0->m_pkthdr.snd_tag); 2549e38a50e8SJohn Baldwin else 2550e38a50e8SJohn Baldwin cst = NULL; 2551e38a50e8SJohn Baldwin #endif 2552bddf7343SJohn Baldwin #ifdef KERN_TLS 2553bddf7343SJohn Baldwin if (cst != NULL && cst->type == IF_SND_TAG_TYPE_TLS) { 2554bddf7343SJohn Baldwin int len16; 2555bddf7343SJohn Baldwin 2556bddf7343SJohn Baldwin cflags |= MC_TLS; 2557bddf7343SJohn Baldwin set_mbuf_cflags(m0, cflags); 2558bddf7343SJohn Baldwin rc = t6_ktls_parse_pkt(m0, &nsegs, &len16); 2559bddf7343SJohn Baldwin if (rc != 0) 2560bddf7343SJohn Baldwin goto fail; 2561bddf7343SJohn Baldwin set_mbuf_nsegs(m0, nsegs); 2562bddf7343SJohn Baldwin set_mbuf_len16(m0, len16); 2563bddf7343SJohn Baldwin return (0); 2564bddf7343SJohn Baldwin } 2565bddf7343SJohn Baldwin #endif 25667951040fSNavdeep Parhar if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 25677951040fSNavdeep Parhar if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 25687951040fSNavdeep Parhar rc = EFBIG; 25697951040fSNavdeep Parhar goto fail; 25707951040fSNavdeep Parhar } 25717951040fSNavdeep Parhar *mp = m0 = m; /* update caller's copy after defrag */ 25727951040fSNavdeep Parhar goto restart; 25737951040fSNavdeep Parhar } 25747951040fSNavdeep Parhar 2575d76bbe17SJohn Baldwin if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && 2576d76bbe17SJohn Baldwin !(cflags & MC_NOMAP))) { 25777951040fSNavdeep Parhar m0 = m_pullup(m0, m0->m_pkthdr.len); 25787951040fSNavdeep Parhar if (m0 == NULL) { 25797951040fSNavdeep Parhar /* Should have left well enough alone. */ 25807951040fSNavdeep Parhar rc = EFBIG; 25817951040fSNavdeep Parhar goto fail; 25827951040fSNavdeep Parhar } 25837951040fSNavdeep Parhar *mp = m0; /* update caller's copy after pullup */ 25847951040fSNavdeep Parhar goto restart; 25857951040fSNavdeep Parhar } 25867951040fSNavdeep Parhar set_mbuf_nsegs(m0, nsegs); 2587d76bbe17SJohn Baldwin set_mbuf_cflags(m0, cflags); 25886af45170SJohn Baldwin if (sc->flags & IS_VF) 25896af45170SJohn Baldwin set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); 25906af45170SJohn Baldwin else 25917951040fSNavdeep Parhar set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 25927951040fSNavdeep Parhar 2593786099deSNavdeep Parhar #ifdef RATELIMIT 2594786099deSNavdeep Parhar /* 2595786099deSNavdeep Parhar * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2596786099deSNavdeep Parhar * checksumming is enabled. needs_l4_csum happens to check for all the 2597786099deSNavdeep Parhar * right things. 2598786099deSNavdeep Parhar */ 2599e38a50e8SJohn Baldwin if (__predict_false(needs_eo(cst) && !needs_l4_csum(m0))) { 2600fb3bc596SJohn Baldwin m_snd_tag_rele(m0->m_pkthdr.snd_tag); 2601786099deSNavdeep Parhar m0->m_pkthdr.snd_tag = NULL; 2602fb3bc596SJohn Baldwin m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 2603e38a50e8SJohn Baldwin cst = NULL; 2604fb3bc596SJohn Baldwin } 2605786099deSNavdeep Parhar #endif 2606786099deSNavdeep Parhar 2607c0236bd9SNavdeep Parhar if (!needs_hwcsum(m0) 2608786099deSNavdeep Parhar #ifdef RATELIMIT 2609c0236bd9SNavdeep Parhar && !needs_eo(cst) 2610786099deSNavdeep Parhar #endif 2611c0236bd9SNavdeep Parhar ) 26127951040fSNavdeep Parhar return (0); 26137951040fSNavdeep Parhar 26147951040fSNavdeep Parhar m = m0; 26157951040fSNavdeep Parhar eh = mtod(m, struct ether_header *); 26167951040fSNavdeep Parhar eh_type = ntohs(eh->ether_type); 26177951040fSNavdeep Parhar if (eh_type == ETHERTYPE_VLAN) { 26187951040fSNavdeep Parhar struct ether_vlan_header *evh = (void *)eh; 26197951040fSNavdeep Parhar 26207951040fSNavdeep Parhar eh_type = ntohs(evh->evl_proto); 26217951040fSNavdeep Parhar m0->m_pkthdr.l2hlen = sizeof(*evh); 26227951040fSNavdeep Parhar } else 26237951040fSNavdeep Parhar m0->m_pkthdr.l2hlen = sizeof(*eh); 26247951040fSNavdeep Parhar 26257951040fSNavdeep Parhar offset = 0; 26267951040fSNavdeep Parhar l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 26277951040fSNavdeep Parhar 26287951040fSNavdeep Parhar switch (eh_type) { 26297951040fSNavdeep Parhar #ifdef INET6 26307951040fSNavdeep Parhar case ETHERTYPE_IPV6: 26317951040fSNavdeep Parhar { 26327951040fSNavdeep Parhar struct ip6_hdr *ip6 = l3hdr; 26337951040fSNavdeep Parhar 26346af45170SJohn Baldwin MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); 26357951040fSNavdeep Parhar 26367951040fSNavdeep Parhar m0->m_pkthdr.l3hlen = sizeof(*ip6); 26377951040fSNavdeep Parhar break; 26387951040fSNavdeep Parhar } 26397951040fSNavdeep Parhar #endif 26407951040fSNavdeep Parhar #ifdef INET 26417951040fSNavdeep Parhar case ETHERTYPE_IP: 26427951040fSNavdeep Parhar { 26437951040fSNavdeep Parhar struct ip *ip = l3hdr; 26447951040fSNavdeep Parhar 26457951040fSNavdeep Parhar m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 26467951040fSNavdeep Parhar break; 26477951040fSNavdeep Parhar } 26487951040fSNavdeep Parhar #endif 26497951040fSNavdeep Parhar default: 26507951040fSNavdeep Parhar panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 26517951040fSNavdeep Parhar " with the same INET/INET6 options as the kernel.", 26527951040fSNavdeep Parhar __func__, eh_type); 26537951040fSNavdeep Parhar } 26547951040fSNavdeep Parhar 26557951040fSNavdeep Parhar #if defined(INET) || defined(INET6) 2656786099deSNavdeep Parhar if (needs_tcp_csum(m0)) { 26577951040fSNavdeep Parhar tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 26587951040fSNavdeep Parhar m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2659786099deSNavdeep Parhar #ifdef RATELIMIT 2660786099deSNavdeep Parhar if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2661786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(m0, 2662786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2663786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2664786099deSNavdeep Parhar } else 2665786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(m0, 0); 2666e9edde41SGleb Smirnoff } else if (needs_udp_csum(m0)) { 2667786099deSNavdeep Parhar m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2668786099deSNavdeep Parhar #endif 26696af45170SJohn Baldwin } 2670786099deSNavdeep Parhar #ifdef RATELIMIT 2671e38a50e8SJohn Baldwin if (needs_eo(cst)) { 2672786099deSNavdeep Parhar u_int immhdrs; 2673786099deSNavdeep Parhar 2674786099deSNavdeep Parhar /* EO WRs have the headers in the WR and not the GL. */ 2675786099deSNavdeep Parhar immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2676786099deSNavdeep Parhar m0->m_pkthdr.l4hlen; 2677d76bbe17SJohn Baldwin cflags = 0; 2678d76bbe17SJohn Baldwin nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); 2679d76bbe17SJohn Baldwin MPASS(cflags == mbuf_cflags(m0)); 2680786099deSNavdeep Parhar set_mbuf_eo_nsegs(m0, nsegs); 2681786099deSNavdeep Parhar set_mbuf_eo_len16(m0, 2682786099deSNavdeep Parhar txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2683786099deSNavdeep Parhar } 2684786099deSNavdeep Parhar #endif 26857951040fSNavdeep Parhar #endif 26867951040fSNavdeep Parhar MPASS(m0 == *mp); 26877951040fSNavdeep Parhar return (0); 26887951040fSNavdeep Parhar } 26897951040fSNavdeep Parhar 26907951040fSNavdeep Parhar void * 26917951040fSNavdeep Parhar start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 26927951040fSNavdeep Parhar { 26937951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 26947951040fSNavdeep Parhar struct adapter *sc = wrq->adapter; 26957951040fSNavdeep Parhar int ndesc, available; 26967951040fSNavdeep Parhar struct wrqe *wr; 26977951040fSNavdeep Parhar void *w; 26987951040fSNavdeep Parhar 26997951040fSNavdeep Parhar MPASS(len16 > 0); 27000cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 27017951040fSNavdeep Parhar MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 27027951040fSNavdeep Parhar 27037951040fSNavdeep Parhar EQ_LOCK(eq); 27047951040fSNavdeep Parhar 27058d6ae10aSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 27067951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 27077951040fSNavdeep Parhar 27087951040fSNavdeep Parhar if (!STAILQ_EMPTY(&wrq->wr_list)) { 27097951040fSNavdeep Parhar slowpath: 27107951040fSNavdeep Parhar EQ_UNLOCK(eq); 27117951040fSNavdeep Parhar wr = alloc_wrqe(len16 * 16, wrq); 27127951040fSNavdeep Parhar if (__predict_false(wr == NULL)) 27137951040fSNavdeep Parhar return (NULL); 27147951040fSNavdeep Parhar cookie->pidx = -1; 27157951040fSNavdeep Parhar cookie->ndesc = ndesc; 27167951040fSNavdeep Parhar return (&wr->wr); 27177951040fSNavdeep Parhar } 27187951040fSNavdeep Parhar 27197951040fSNavdeep Parhar eq->cidx = read_hw_cidx(eq); 27207951040fSNavdeep Parhar if (eq->pidx == eq->cidx) 27217951040fSNavdeep Parhar available = eq->sidx - 1; 27227951040fSNavdeep Parhar else 27237951040fSNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 27247951040fSNavdeep Parhar if (available < ndesc) 27257951040fSNavdeep Parhar goto slowpath; 27267951040fSNavdeep Parhar 27277951040fSNavdeep Parhar cookie->pidx = eq->pidx; 27287951040fSNavdeep Parhar cookie->ndesc = ndesc; 27297951040fSNavdeep Parhar TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 27307951040fSNavdeep Parhar 27317951040fSNavdeep Parhar w = &eq->desc[eq->pidx]; 27327951040fSNavdeep Parhar IDXINCR(eq->pidx, ndesc, eq->sidx); 2733f50c49ccSNavdeep Parhar if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 27347951040fSNavdeep Parhar w = &wrq->ss[0]; 27357951040fSNavdeep Parhar wrq->ss_pidx = cookie->pidx; 27367951040fSNavdeep Parhar wrq->ss_len = len16 * 16; 27377951040fSNavdeep Parhar } 27387951040fSNavdeep Parhar 27397951040fSNavdeep Parhar EQ_UNLOCK(eq); 27407951040fSNavdeep Parhar 27417951040fSNavdeep Parhar return (w); 27427951040fSNavdeep Parhar } 27437951040fSNavdeep Parhar 27447951040fSNavdeep Parhar void 27457951040fSNavdeep Parhar commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 27467951040fSNavdeep Parhar { 27477951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 27487951040fSNavdeep Parhar struct adapter *sc = wrq->adapter; 27497951040fSNavdeep Parhar int ndesc, pidx; 27507951040fSNavdeep Parhar struct wrq_cookie *prev, *next; 27517951040fSNavdeep Parhar 27527951040fSNavdeep Parhar if (cookie->pidx == -1) { 27537951040fSNavdeep Parhar struct wrqe *wr = __containerof(w, struct wrqe, wr); 27547951040fSNavdeep Parhar 27557951040fSNavdeep Parhar t4_wrq_tx(sc, wr); 27567951040fSNavdeep Parhar return; 27577951040fSNavdeep Parhar } 27587951040fSNavdeep Parhar 27597951040fSNavdeep Parhar if (__predict_false(w == &wrq->ss[0])) { 27607951040fSNavdeep Parhar int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 27617951040fSNavdeep Parhar 27627951040fSNavdeep Parhar MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 27637951040fSNavdeep Parhar bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 27647951040fSNavdeep Parhar bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 27657951040fSNavdeep Parhar wrq->tx_wrs_ss++; 27667951040fSNavdeep Parhar } else 27677951040fSNavdeep Parhar wrq->tx_wrs_direct++; 27687951040fSNavdeep Parhar 27697951040fSNavdeep Parhar EQ_LOCK(eq); 27708d6ae10aSNavdeep Parhar ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 27718d6ae10aSNavdeep Parhar pidx = cookie->pidx; 27728d6ae10aSNavdeep Parhar MPASS(pidx >= 0 && pidx < eq->sidx); 27737951040fSNavdeep Parhar prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 27747951040fSNavdeep Parhar next = TAILQ_NEXT(cookie, link); 27757951040fSNavdeep Parhar if (prev == NULL) { 27767951040fSNavdeep Parhar MPASS(pidx == eq->dbidx); 27772e09fe91SNavdeep Parhar if (next == NULL || ndesc >= 16) { 27782e09fe91SNavdeep Parhar int available; 27792e09fe91SNavdeep Parhar struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 27802e09fe91SNavdeep Parhar 27812e09fe91SNavdeep Parhar /* 27822e09fe91SNavdeep Parhar * Note that the WR via which we'll request tx updates 27832e09fe91SNavdeep Parhar * is at pidx and not eq->pidx, which has moved on 27842e09fe91SNavdeep Parhar * already. 27852e09fe91SNavdeep Parhar */ 27862e09fe91SNavdeep Parhar dst = (void *)&eq->desc[pidx]; 27872e09fe91SNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 27882e09fe91SNavdeep Parhar if (available < eq->sidx / 4 && 27892e09fe91SNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 2790ddf09ad6SNavdeep Parhar /* 2791ddf09ad6SNavdeep Parhar * XXX: This is not 100% reliable with some 2792ddf09ad6SNavdeep Parhar * types of WRs. But this is a very unusual 2793ddf09ad6SNavdeep Parhar * situation for an ofld/ctrl queue anyway. 2794ddf09ad6SNavdeep Parhar */ 27952e09fe91SNavdeep Parhar dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 27962e09fe91SNavdeep Parhar F_FW_WR_EQUEQ); 27972e09fe91SNavdeep Parhar } 27982e09fe91SNavdeep Parhar 27997951040fSNavdeep Parhar ring_eq_db(wrq->adapter, eq, ndesc); 28002e09fe91SNavdeep Parhar } else { 28017951040fSNavdeep Parhar MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 28027951040fSNavdeep Parhar next->pidx = pidx; 28037951040fSNavdeep Parhar next->ndesc += ndesc; 28047951040fSNavdeep Parhar } 28057951040fSNavdeep Parhar } else { 28067951040fSNavdeep Parhar MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 28077951040fSNavdeep Parhar prev->ndesc += ndesc; 28087951040fSNavdeep Parhar } 28097951040fSNavdeep Parhar TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 28107951040fSNavdeep Parhar 28117951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 28127951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 28137951040fSNavdeep Parhar 28147951040fSNavdeep Parhar #ifdef INVARIANTS 28157951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 28167951040fSNavdeep Parhar /* Doorbell must have caught up to the pidx. */ 28177951040fSNavdeep Parhar MPASS(wrq->eq.pidx == wrq->eq.dbidx); 28187951040fSNavdeep Parhar } 28197951040fSNavdeep Parhar #endif 28207951040fSNavdeep Parhar EQ_UNLOCK(eq); 28217951040fSNavdeep Parhar } 28227951040fSNavdeep Parhar 28237951040fSNavdeep Parhar static u_int 28247951040fSNavdeep Parhar can_resume_eth_tx(struct mp_ring *r) 28257951040fSNavdeep Parhar { 28267951040fSNavdeep Parhar struct sge_eq *eq = r->cookie; 28277951040fSNavdeep Parhar 28287951040fSNavdeep Parhar return (total_available_tx_desc(eq) > eq->sidx / 8); 28297951040fSNavdeep Parhar } 28307951040fSNavdeep Parhar 2831d735920dSNavdeep Parhar static inline bool 28327951040fSNavdeep Parhar cannot_use_txpkts(struct mbuf *m) 28337951040fSNavdeep Parhar { 28347951040fSNavdeep Parhar /* maybe put a GL limit too, to avoid silliness? */ 28357951040fSNavdeep Parhar 2836bddf7343SJohn Baldwin return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0); 28377951040fSNavdeep Parhar } 28387951040fSNavdeep Parhar 28391404daa7SNavdeep Parhar static inline int 28401404daa7SNavdeep Parhar discard_tx(struct sge_eq *eq) 28411404daa7SNavdeep Parhar { 28421404daa7SNavdeep Parhar 28431404daa7SNavdeep Parhar return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 28441404daa7SNavdeep Parhar } 28451404daa7SNavdeep Parhar 28465cdaef71SJohn Baldwin static inline int 2847d735920dSNavdeep Parhar wr_can_update_eq(void *p) 28485cdaef71SJohn Baldwin { 2849d735920dSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr = p; 28505cdaef71SJohn Baldwin 28515cdaef71SJohn Baldwin switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 28525cdaef71SJohn Baldwin case FW_ULPTX_WR: 28535cdaef71SJohn Baldwin case FW_ETH_TX_PKT_WR: 28545cdaef71SJohn Baldwin case FW_ETH_TX_PKTS_WR: 2855693a9dfcSNavdeep Parhar case FW_ETH_TX_PKTS2_WR: 28565cdaef71SJohn Baldwin case FW_ETH_TX_PKT_VM_WR: 2857d735920dSNavdeep Parhar case FW_ETH_TX_PKTS_VM_WR: 28585cdaef71SJohn Baldwin return (1); 28595cdaef71SJohn Baldwin default: 28605cdaef71SJohn Baldwin return (0); 28615cdaef71SJohn Baldwin } 28625cdaef71SJohn Baldwin } 28635cdaef71SJohn Baldwin 2864d735920dSNavdeep Parhar static inline void 2865d735920dSNavdeep Parhar set_txupdate_flags(struct sge_txq *txq, u_int avail, 2866d735920dSNavdeep Parhar struct fw_eth_tx_pkt_wr *wr) 2867d735920dSNavdeep Parhar { 2868d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 2869d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 2870d735920dSNavdeep Parhar 2871d735920dSNavdeep Parhar if ((txp->npkt > 0 || avail < eq->sidx / 2) && 2872d735920dSNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 2873d735920dSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2874d735920dSNavdeep Parhar eq->equeqidx = eq->pidx; 2875d735920dSNavdeep Parhar } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2876d735920dSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2877d735920dSNavdeep Parhar eq->equeqidx = eq->pidx; 2878d735920dSNavdeep Parhar } 2879d735920dSNavdeep Parhar } 2880d735920dSNavdeep Parhar 28817951040fSNavdeep Parhar /* 28827951040fSNavdeep Parhar * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 28837951040fSNavdeep Parhar * be consumed. Return the actual number consumed. 0 indicates a stall. 28847951040fSNavdeep Parhar */ 28857951040fSNavdeep Parhar static u_int 2886d735920dSNavdeep Parhar eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing) 28877951040fSNavdeep Parhar { 28887951040fSNavdeep Parhar struct sge_txq *txq = r->cookie; 28897951040fSNavdeep Parhar struct ifnet *ifp = txq->ifp; 2890d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 2891d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 2892fe2ebb76SJohn Baldwin struct vi_info *vi = ifp->if_softc; 28937c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 28947951040fSNavdeep Parhar u_int total, remaining; /* # of packets */ 2895d735920dSNavdeep Parhar u_int n, avail, dbdiff; /* # of hardware descriptors */ 2896d735920dSNavdeep Parhar int i, rc; 2897d735920dSNavdeep Parhar struct mbuf *m0; 2898d735920dSNavdeep Parhar bool snd; 2899d735920dSNavdeep Parhar void *wr; /* start of the last WR written to the ring */ 2900d735920dSNavdeep Parhar 2901d735920dSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 29027951040fSNavdeep Parhar 29037951040fSNavdeep Parhar remaining = IDXDIFF(pidx, cidx, r->size); 29041404daa7SNavdeep Parhar if (__predict_false(discard_tx(eq))) { 2905d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 2906d735920dSNavdeep Parhar m_freem(txp->mb[i]); 2907d735920dSNavdeep Parhar txp->npkt = 0; 29087951040fSNavdeep Parhar while (cidx != pidx) { 29097951040fSNavdeep Parhar m0 = r->items[cidx]; 29107951040fSNavdeep Parhar m_freem(m0); 29117951040fSNavdeep Parhar if (++cidx == r->size) 29127951040fSNavdeep Parhar cidx = 0; 29137951040fSNavdeep Parhar } 2914d735920dSNavdeep Parhar reclaim_tx_descs(txq, eq->sidx); 2915d735920dSNavdeep Parhar *coalescing = false; 2916d735920dSNavdeep Parhar return (remaining); /* emptied */ 29177951040fSNavdeep Parhar } 29187951040fSNavdeep Parhar 29197951040fSNavdeep Parhar /* How many hardware descriptors do we have readily available. */ 2920d735920dSNavdeep Parhar if (eq->pidx == eq->cidx) { 2921d735920dSNavdeep Parhar avail = eq->sidx - 1; 2922d735920dSNavdeep Parhar if (txp->score++ >= 5) 2923d735920dSNavdeep Parhar txp->score = 5; /* tx is completely idle, reset. */ 2924d735920dSNavdeep Parhar } else 2925d735920dSNavdeep Parhar avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 29267951040fSNavdeep Parhar 2927d735920dSNavdeep Parhar total = 0; 2928d735920dSNavdeep Parhar if (remaining == 0) { 2929d735920dSNavdeep Parhar if (txp->score-- == 1) /* egr_update had to drain txpkts */ 2930d735920dSNavdeep Parhar txp->score = 1; 2931d735920dSNavdeep Parhar goto send_txpkts; 2932d735920dSNavdeep Parhar } 2933d735920dSNavdeep Parhar 2934d735920dSNavdeep Parhar dbdiff = 0; 2935d735920dSNavdeep Parhar MPASS(remaining > 0); 29367951040fSNavdeep Parhar while (remaining > 0) { 29377951040fSNavdeep Parhar m0 = r->items[cidx]; 29387951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 29397951040fSNavdeep Parhar MPASS(m0->m_nextpkt == NULL); 29407951040fSNavdeep Parhar 2941d735920dSNavdeep Parhar if (avail < 2 * SGE_MAX_WR_NDESC) 2942d735920dSNavdeep Parhar avail += reclaim_tx_descs(txq, 64); 2943d735920dSNavdeep Parhar 2944d735920dSNavdeep Parhar if (txp->npkt > 0 || remaining > 1 || txp->score > 3 || 2945d735920dSNavdeep Parhar atomic_load_int(&txq->eq.equiq) != 0) { 2946d735920dSNavdeep Parhar if (sc->flags & IS_VF) 2947d735920dSNavdeep Parhar rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); 2948d735920dSNavdeep Parhar else 2949d735920dSNavdeep Parhar rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); 2950d735920dSNavdeep Parhar } else { 2951d735920dSNavdeep Parhar snd = false; 2952d735920dSNavdeep Parhar rc = EINVAL; 2953d735920dSNavdeep Parhar } 2954d735920dSNavdeep Parhar if (snd) { 2955d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 2956d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 2957d735920dSNavdeep Parhar ETHER_BPF_MTAP(ifp, txp->mb[i]); 2958d735920dSNavdeep Parhar if (txp->npkt > 1) { 2959d735920dSNavdeep Parhar if (txp->score++ >= 10) 2960d735920dSNavdeep Parhar txp->score = 10; 2961d735920dSNavdeep Parhar MPASS(avail >= tx_len16_to_desc(txp->len16)); 2962d735920dSNavdeep Parhar if (sc->flags & IS_VF) 2963d735920dSNavdeep Parhar n = write_txpkts_vm_wr(sc, txq); 2964d735920dSNavdeep Parhar else 2965d735920dSNavdeep Parhar n = write_txpkts_wr(sc, txq); 2966d735920dSNavdeep Parhar } else { 2967d735920dSNavdeep Parhar MPASS(avail >= 2968d735920dSNavdeep Parhar tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 2969d735920dSNavdeep Parhar if (sc->flags & IS_VF) 2970d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, 2971d735920dSNavdeep Parhar txp->mb[0]); 2972d735920dSNavdeep Parhar else 2973d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, txp->mb[0], 2974d735920dSNavdeep Parhar avail); 2975d735920dSNavdeep Parhar } 2976d735920dSNavdeep Parhar MPASS(n <= SGE_MAX_WR_NDESC); 2977d735920dSNavdeep Parhar avail -= n; 2978d735920dSNavdeep Parhar dbdiff += n; 2979d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 2980d735920dSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 2981d735920dSNavdeep Parhar txp->npkt = 0; /* emptied */ 2982d735920dSNavdeep Parhar } 2983d735920dSNavdeep Parhar if (rc == 0) { 2984d735920dSNavdeep Parhar /* m0 was coalesced into txq->txpkts. */ 2985d735920dSNavdeep Parhar goto next_mbuf; 2986d735920dSNavdeep Parhar } 2987d735920dSNavdeep Parhar if (rc == EAGAIN) { 2988d735920dSNavdeep Parhar /* 2989d735920dSNavdeep Parhar * m0 is suitable for tx coalescing but could not be 2990d735920dSNavdeep Parhar * combined with the existing txq->txpkts, which has now 2991d735920dSNavdeep Parhar * been transmitted. Start a new txpkts with m0. 2992d735920dSNavdeep Parhar */ 2993d735920dSNavdeep Parhar MPASS(snd); 2994d735920dSNavdeep Parhar MPASS(txp->npkt == 0); 2995d735920dSNavdeep Parhar continue; 29967951040fSNavdeep Parhar } 29977951040fSNavdeep Parhar 2998d735920dSNavdeep Parhar MPASS(rc != 0 && rc != EAGAIN); 2999d735920dSNavdeep Parhar MPASS(txp->npkt == 0); 3000d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 3001bddf7343SJohn Baldwin if (mbuf_cflags(m0) & MC_RAW_WR) { 3002d735920dSNavdeep Parhar n = write_raw_wr(txq, wr, m0, avail); 3003bddf7343SJohn Baldwin #ifdef KERN_TLS 3004bddf7343SJohn Baldwin } else if (mbuf_cflags(m0) & MC_TLS) { 3005bddf7343SJohn Baldwin ETHER_BPF_MTAP(ifp, m0); 3006d735920dSNavdeep Parhar n = t6_ktls_write_wr(txq, wr, m0, mbuf_nsegs(m0), 3007d735920dSNavdeep Parhar avail); 3008bddf7343SJohn Baldwin #endif 30097951040fSNavdeep Parhar } else { 3010d735920dSNavdeep Parhar n = tx_len16_to_desc(mbuf_len16(m0)); 3011d735920dSNavdeep Parhar if (__predict_false(avail < n)) { 3012d735920dSNavdeep Parhar avail += reclaim_tx_descs(txq, 32); 3013d735920dSNavdeep Parhar if (avail < n) 3014d735920dSNavdeep Parhar break; /* out of descriptors */ 30157951040fSNavdeep Parhar } 3016*3bbb68f0SNavdeep Parhar ETHER_BPF_MTAP(ifp, m0); 3017d735920dSNavdeep Parhar if (sc->flags & IS_VF) 3018d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, m0); 3019d735920dSNavdeep Parhar else 3020d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, m0, avail); 3021d735920dSNavdeep Parhar } 3022d735920dSNavdeep Parhar MPASS(n >= 1 && n <= avail); 3023bddf7343SJohn Baldwin if (!(mbuf_cflags(m0) & MC_TLS)) 3024bddf7343SJohn Baldwin MPASS(n <= SGE_MAX_WR_NDESC); 30257951040fSNavdeep Parhar 3026d735920dSNavdeep Parhar avail -= n; 30277951040fSNavdeep Parhar dbdiff += n; 30287951040fSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 30297951040fSNavdeep Parhar 3030d735920dSNavdeep Parhar if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */ 3031d735920dSNavdeep Parhar if (wr_can_update_eq(wr)) 3032d735920dSNavdeep Parhar set_txupdate_flags(txq, avail, wr); 30337951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 3034d735920dSNavdeep Parhar avail += reclaim_tx_descs(txq, 32); 30357951040fSNavdeep Parhar dbdiff = 0; 30367951040fSNavdeep Parhar } 3037d735920dSNavdeep Parhar next_mbuf: 3038d735920dSNavdeep Parhar total++; 3039d735920dSNavdeep Parhar remaining--; 3040d735920dSNavdeep Parhar if (__predict_false(++cidx == r->size)) 3041d735920dSNavdeep Parhar cidx = 0; 30427951040fSNavdeep Parhar } 30437951040fSNavdeep Parhar if (dbdiff != 0) { 3044d735920dSNavdeep Parhar if (wr_can_update_eq(wr)) 3045d735920dSNavdeep Parhar set_txupdate_flags(txq, avail, wr); 30467951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 30477951040fSNavdeep Parhar reclaim_tx_descs(txq, 32); 3048d735920dSNavdeep Parhar } else if (eq->pidx == eq->cidx && txp->npkt > 0 && 3049d735920dSNavdeep Parhar atomic_load_int(&txq->eq.equiq) == 0) { 3050d735920dSNavdeep Parhar /* 3051d735920dSNavdeep Parhar * If nothing was submitted to the chip for tx (it was coalesced 3052d735920dSNavdeep Parhar * into txpkts instead) and there is no tx update outstanding 3053d735920dSNavdeep Parhar * then we need to send txpkts now. 3054d735920dSNavdeep Parhar */ 3055d735920dSNavdeep Parhar send_txpkts: 3056d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 3057d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 3058d735920dSNavdeep Parhar ETHER_BPF_MTAP(ifp, txp->mb[i]); 3059d735920dSNavdeep Parhar if (txp->npkt > 1) { 3060d735920dSNavdeep Parhar MPASS(avail >= tx_len16_to_desc(txp->len16)); 3061d735920dSNavdeep Parhar if (sc->flags & IS_VF) 3062d735920dSNavdeep Parhar n = write_txpkts_vm_wr(sc, txq); 3063d735920dSNavdeep Parhar else 3064d735920dSNavdeep Parhar n = write_txpkts_wr(sc, txq); 3065d735920dSNavdeep Parhar } else { 3066d735920dSNavdeep Parhar MPASS(avail >= 3067d735920dSNavdeep Parhar tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3068d735920dSNavdeep Parhar if (sc->flags & IS_VF) 3069d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); 3070d735920dSNavdeep Parhar else 3071d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, txp->mb[0], avail); 30727951040fSNavdeep Parhar } 3073d735920dSNavdeep Parhar MPASS(n <= SGE_MAX_WR_NDESC); 3074d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 3075d735920dSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 3076d735920dSNavdeep Parhar txp->npkt = 0; /* emptied */ 3077d735920dSNavdeep Parhar 3078d735920dSNavdeep Parhar MPASS(wr_can_update_eq(wr)); 3079d735920dSNavdeep Parhar set_txupdate_flags(txq, avail - n, wr); 3080d735920dSNavdeep Parhar ring_eq_db(sc, eq, n); 3081d735920dSNavdeep Parhar reclaim_tx_descs(txq, 32); 3082d735920dSNavdeep Parhar } 3083d735920dSNavdeep Parhar *coalescing = txp->npkt > 0; 30847951040fSNavdeep Parhar 30857951040fSNavdeep Parhar return (total); 3086733b9277SNavdeep Parhar } 3087733b9277SNavdeep Parhar 308854e4ee71SNavdeep Parhar static inline void 308954e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 3090b2daa9a9SNavdeep Parhar int qsize) 309154e4ee71SNavdeep Parhar { 3092b2daa9a9SNavdeep Parhar 309354e4ee71SNavdeep Parhar KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 309454e4ee71SNavdeep Parhar ("%s: bad tmr_idx %d", __func__, tmr_idx)); 309554e4ee71SNavdeep Parhar KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 309654e4ee71SNavdeep Parhar ("%s: bad pktc_idx %d", __func__, pktc_idx)); 309754e4ee71SNavdeep Parhar 309854e4ee71SNavdeep Parhar iq->flags = 0; 309954e4ee71SNavdeep Parhar iq->adapter = sc; 31007a32954cSNavdeep Parhar iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 31017a32954cSNavdeep Parhar iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 31027a32954cSNavdeep Parhar if (pktc_idx >= 0) { 31037a32954cSNavdeep Parhar iq->intr_params |= F_QINTR_CNT_EN; 310454e4ee71SNavdeep Parhar iq->intr_pktc_idx = pktc_idx; 31057a32954cSNavdeep Parhar } 3106d14b0ac1SNavdeep Parhar iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 310790e7434aSNavdeep Parhar iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 310854e4ee71SNavdeep Parhar } 310954e4ee71SNavdeep Parhar 311054e4ee71SNavdeep Parhar static inline void 3111e3207e19SNavdeep Parhar init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 311254e4ee71SNavdeep Parhar { 31131458bff9SNavdeep Parhar 311454e4ee71SNavdeep Parhar fl->qsize = qsize; 311590e7434aSNavdeep Parhar fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 311654e4ee71SNavdeep Parhar strlcpy(fl->lockname, name, sizeof(fl->lockname)); 3117e3207e19SNavdeep Parhar if (sc->flags & BUF_PACKING_OK && 3118e3207e19SNavdeep Parhar ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 3119e3207e19SNavdeep Parhar (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 31201458bff9SNavdeep Parhar fl->flags |= FL_BUF_PACKING; 312146e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); 312246e1e307SNavdeep Parhar fl->safe_zidx = sc->sge.safe_zidx; 312354e4ee71SNavdeep Parhar } 312454e4ee71SNavdeep Parhar 312554e4ee71SNavdeep Parhar static inline void 312690e7434aSNavdeep Parhar init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 312790e7434aSNavdeep Parhar uint8_t tx_chan, uint16_t iqid, char *name) 312854e4ee71SNavdeep Parhar { 3129733b9277SNavdeep Parhar KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 3130733b9277SNavdeep Parhar 3131733b9277SNavdeep Parhar eq->flags = eqtype & EQ_TYPEMASK; 3132733b9277SNavdeep Parhar eq->tx_chan = tx_chan; 3133733b9277SNavdeep Parhar eq->iqid = iqid; 313490e7434aSNavdeep Parhar eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3135f7dfe243SNavdeep Parhar strlcpy(eq->lockname, name, sizeof(eq->lockname)); 313654e4ee71SNavdeep Parhar } 313754e4ee71SNavdeep Parhar 313854e4ee71SNavdeep Parhar static int 313954e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 314054e4ee71SNavdeep Parhar bus_dmamap_t *map, bus_addr_t *pa, void **va) 314154e4ee71SNavdeep Parhar { 314254e4ee71SNavdeep Parhar int rc; 314354e4ee71SNavdeep Parhar 314454e4ee71SNavdeep Parhar rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 314554e4ee71SNavdeep Parhar BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 314654e4ee71SNavdeep Parhar if (rc != 0) { 314754e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 314854e4ee71SNavdeep Parhar goto done; 314954e4ee71SNavdeep Parhar } 315054e4ee71SNavdeep Parhar 315154e4ee71SNavdeep Parhar rc = bus_dmamem_alloc(*tag, va, 315254e4ee71SNavdeep Parhar BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 315354e4ee71SNavdeep Parhar if (rc != 0) { 315454e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 315554e4ee71SNavdeep Parhar goto done; 315654e4ee71SNavdeep Parhar } 315754e4ee71SNavdeep Parhar 315854e4ee71SNavdeep Parhar rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 315954e4ee71SNavdeep Parhar if (rc != 0) { 316054e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 316154e4ee71SNavdeep Parhar goto done; 316254e4ee71SNavdeep Parhar } 316354e4ee71SNavdeep Parhar done: 316454e4ee71SNavdeep Parhar if (rc) 316554e4ee71SNavdeep Parhar free_ring(sc, *tag, *map, *pa, *va); 316654e4ee71SNavdeep Parhar 316754e4ee71SNavdeep Parhar return (rc); 316854e4ee71SNavdeep Parhar } 316954e4ee71SNavdeep Parhar 317054e4ee71SNavdeep Parhar static int 317154e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 317254e4ee71SNavdeep Parhar bus_addr_t pa, void *va) 317354e4ee71SNavdeep Parhar { 317454e4ee71SNavdeep Parhar if (pa) 317554e4ee71SNavdeep Parhar bus_dmamap_unload(tag, map); 317654e4ee71SNavdeep Parhar if (va) 317754e4ee71SNavdeep Parhar bus_dmamem_free(tag, va, map); 317854e4ee71SNavdeep Parhar if (tag) 317954e4ee71SNavdeep Parhar bus_dma_tag_destroy(tag); 318054e4ee71SNavdeep Parhar 318154e4ee71SNavdeep Parhar return (0); 318254e4ee71SNavdeep Parhar } 318354e4ee71SNavdeep Parhar 318454e4ee71SNavdeep Parhar /* 318554e4ee71SNavdeep Parhar * Allocates the ring for an ingress queue and an optional freelist. If the 318654e4ee71SNavdeep Parhar * freelist is specified it will be allocated and then associated with the 318754e4ee71SNavdeep Parhar * ingress queue. 318854e4ee71SNavdeep Parhar * 318954e4ee71SNavdeep Parhar * Returns errno on failure. Resources allocated up to that point may still be 319054e4ee71SNavdeep Parhar * allocated. Caller is responsible for cleanup in case this function fails. 319154e4ee71SNavdeep Parhar * 3192f549e352SNavdeep Parhar * If the ingress queue will take interrupts directly then the intr_idx 3193f549e352SNavdeep Parhar * specifies the vector, starting from 0. -1 means the interrupts for this 3194f549e352SNavdeep Parhar * queue should be forwarded to the fwq. 319554e4ee71SNavdeep Parhar */ 319654e4ee71SNavdeep Parhar static int 3197fe2ebb76SJohn Baldwin alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3198bc14b14dSNavdeep Parhar int intr_idx, int cong) 319954e4ee71SNavdeep Parhar { 320054e4ee71SNavdeep Parhar int rc, i, cntxt_id; 320154e4ee71SNavdeep Parhar size_t len; 320254e4ee71SNavdeep Parhar struct fw_iq_cmd c; 3203fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 320454e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 320590e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 320654e4ee71SNavdeep Parhar __be32 v = 0; 320754e4ee71SNavdeep Parhar 3208b2daa9a9SNavdeep Parhar len = iq->qsize * IQ_ESIZE; 320954e4ee71SNavdeep Parhar rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 321054e4ee71SNavdeep Parhar (void **)&iq->desc); 321154e4ee71SNavdeep Parhar if (rc != 0) 321254e4ee71SNavdeep Parhar return (rc); 321354e4ee71SNavdeep Parhar 321454e4ee71SNavdeep Parhar bzero(&c, sizeof(c)); 321554e4ee71SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 321654e4ee71SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 321754e4ee71SNavdeep Parhar V_FW_IQ_CMD_VFN(0)); 321854e4ee71SNavdeep Parhar 321954e4ee71SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 322054e4ee71SNavdeep Parhar FW_LEN16(c)); 322154e4ee71SNavdeep Parhar 322254e4ee71SNavdeep Parhar /* Special handling for firmware event queue */ 322354e4ee71SNavdeep Parhar if (iq == &sc->sge.fwq) 322454e4ee71SNavdeep Parhar v |= F_FW_IQ_CMD_IQASYNCH; 322554e4ee71SNavdeep Parhar 3226f549e352SNavdeep Parhar if (intr_idx < 0) { 3227f549e352SNavdeep Parhar /* Forwarded interrupts, all headed to fwq */ 3228f549e352SNavdeep Parhar v |= F_FW_IQ_CMD_IQANDST; 3229f549e352SNavdeep Parhar v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3230f549e352SNavdeep Parhar } else { 323154e4ee71SNavdeep Parhar KASSERT(intr_idx < sc->intr_count, 323254e4ee71SNavdeep Parhar ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 323354e4ee71SNavdeep Parhar v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 3234f549e352SNavdeep Parhar } 323554e4ee71SNavdeep Parhar 323654e4ee71SNavdeep Parhar c.type_to_iqandstindex = htobe32(v | 323754e4ee71SNavdeep Parhar V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3238fe2ebb76SJohn Baldwin V_FW_IQ_CMD_VIID(vi->viid) | 323954e4ee71SNavdeep Parhar V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 324054e4ee71SNavdeep Parhar c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 324154e4ee71SNavdeep Parhar F_FW_IQ_CMD_IQGTSMODE | 324254e4ee71SNavdeep Parhar V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3243b2daa9a9SNavdeep Parhar V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 324454e4ee71SNavdeep Parhar c.iqsize = htobe16(iq->qsize); 324554e4ee71SNavdeep Parhar c.iqaddr = htobe64(iq->ba); 3246bc14b14dSNavdeep Parhar if (cong >= 0) 3247bc14b14dSNavdeep Parhar c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 324854e4ee71SNavdeep Parhar 324954e4ee71SNavdeep Parhar if (fl) { 325054e4ee71SNavdeep Parhar mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 325154e4ee71SNavdeep Parhar 3252b2daa9a9SNavdeep Parhar len = fl->qsize * EQ_ESIZE; 325354e4ee71SNavdeep Parhar rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 325454e4ee71SNavdeep Parhar &fl->ba, (void **)&fl->desc); 325554e4ee71SNavdeep Parhar if (rc) 325654e4ee71SNavdeep Parhar return (rc); 325754e4ee71SNavdeep Parhar 325854e4ee71SNavdeep Parhar /* Allocate space for one software descriptor per buffer. */ 325954e4ee71SNavdeep Parhar rc = alloc_fl_sdesc(fl); 326054e4ee71SNavdeep Parhar if (rc != 0) { 326154e4ee71SNavdeep Parhar device_printf(sc->dev, 326254e4ee71SNavdeep Parhar "failed to setup fl software descriptors: %d\n", 326354e4ee71SNavdeep Parhar rc); 326454e4ee71SNavdeep Parhar return (rc); 326554e4ee71SNavdeep Parhar } 32664d6db4e0SNavdeep Parhar 32674d6db4e0SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 326890e7434aSNavdeep Parhar fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 326990e7434aSNavdeep Parhar fl->buf_boundary = sp->pack_boundary; 32704d6db4e0SNavdeep Parhar } else { 327190e7434aSNavdeep Parhar fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3272e3207e19SNavdeep Parhar fl->buf_boundary = 16; 32734d6db4e0SNavdeep Parhar } 327490e7434aSNavdeep Parhar if (fl_pad && fl->buf_boundary < sp->pad_boundary) 327590e7434aSNavdeep Parhar fl->buf_boundary = sp->pad_boundary; 327654e4ee71SNavdeep Parhar 3277214c3582SNavdeep Parhar c.iqns_to_fl0congen |= 3278bc14b14dSNavdeep Parhar htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3279bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 32801458bff9SNavdeep Parhar (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 32811458bff9SNavdeep Parhar (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 32821458bff9SNavdeep Parhar 0)); 3283bc14b14dSNavdeep Parhar if (cong >= 0) { 3284bc14b14dSNavdeep Parhar c.iqns_to_fl0congen |= 3285bc14b14dSNavdeep Parhar htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 3286bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0CONGCIF | 3287bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0CONGEN); 3288bc14b14dSNavdeep Parhar } 328954e4ee71SNavdeep Parhar c.fl0dcaen_to_fl0cidxfthresh = 3290ed7e5640SNavdeep Parhar htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3291adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 3292ed7e5640SNavdeep Parhar V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3293ed7e5640SNavdeep Parhar X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 329454e4ee71SNavdeep Parhar c.fl0size = htobe16(fl->qsize); 329554e4ee71SNavdeep Parhar c.fl0addr = htobe64(fl->ba); 329654e4ee71SNavdeep Parhar } 329754e4ee71SNavdeep Parhar 329854e4ee71SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 329954e4ee71SNavdeep Parhar if (rc != 0) { 330054e4ee71SNavdeep Parhar device_printf(sc->dev, 330154e4ee71SNavdeep Parhar "failed to create ingress queue: %d\n", rc); 330254e4ee71SNavdeep Parhar return (rc); 330354e4ee71SNavdeep Parhar } 330454e4ee71SNavdeep Parhar 330554e4ee71SNavdeep Parhar iq->cidx = 0; 3306b2daa9a9SNavdeep Parhar iq->gen = F_RSPD_GEN; 330754e4ee71SNavdeep Parhar iq->intr_next = iq->intr_params; 330854e4ee71SNavdeep Parhar iq->cntxt_id = be16toh(c.iqid); 330954e4ee71SNavdeep Parhar iq->abs_id = be16toh(c.physiqid); 3310733b9277SNavdeep Parhar iq->flags |= IQ_ALLOCATED; 331154e4ee71SNavdeep Parhar 331254e4ee71SNavdeep Parhar cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3313733b9277SNavdeep Parhar if (cntxt_id >= sc->sge.niq) { 3314733b9277SNavdeep Parhar panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3315733b9277SNavdeep Parhar cntxt_id, sc->sge.niq - 1); 3316733b9277SNavdeep Parhar } 331754e4ee71SNavdeep Parhar sc->sge.iqmap[cntxt_id] = iq; 331854e4ee71SNavdeep Parhar 331954e4ee71SNavdeep Parhar if (fl) { 33204d6db4e0SNavdeep Parhar u_int qid; 33214d6db4e0SNavdeep Parhar 33224d6db4e0SNavdeep Parhar iq->flags |= IQ_HAS_FL; 332354e4ee71SNavdeep Parhar fl->cntxt_id = be16toh(c.fl0id); 332454e4ee71SNavdeep Parhar fl->pidx = fl->cidx = 0; 332554e4ee71SNavdeep Parhar 33269f1f7ec9SNavdeep Parhar cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3327733b9277SNavdeep Parhar if (cntxt_id >= sc->sge.neq) { 3328733b9277SNavdeep Parhar panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3329733b9277SNavdeep Parhar __func__, cntxt_id, sc->sge.neq - 1); 3330733b9277SNavdeep Parhar } 333154e4ee71SNavdeep Parhar sc->sge.eqmap[cntxt_id] = (void *)fl; 333254e4ee71SNavdeep Parhar 33334d6db4e0SNavdeep Parhar qid = fl->cntxt_id; 33344d6db4e0SNavdeep Parhar if (isset(&sc->doorbells, DOORBELL_UDB)) { 333590e7434aSNavdeep Parhar uint32_t s_qpp = sc->params.sge.eq_s_qpp; 33364d6db4e0SNavdeep Parhar uint32_t mask = (1 << s_qpp) - 1; 33374d6db4e0SNavdeep Parhar volatile uint8_t *udb; 33384d6db4e0SNavdeep Parhar 33394d6db4e0SNavdeep Parhar udb = sc->udbs_base + UDBS_DB_OFFSET; 33404d6db4e0SNavdeep Parhar udb += (qid >> s_qpp) << PAGE_SHIFT; 33414d6db4e0SNavdeep Parhar qid &= mask; 33424d6db4e0SNavdeep Parhar if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 33434d6db4e0SNavdeep Parhar udb += qid << UDBS_SEG_SHIFT; 33444d6db4e0SNavdeep Parhar qid = 0; 33454d6db4e0SNavdeep Parhar } 33464d6db4e0SNavdeep Parhar fl->udb = (volatile void *)udb; 33474d6db4e0SNavdeep Parhar } 3348d1205d09SNavdeep Parhar fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 33494d6db4e0SNavdeep Parhar 335054e4ee71SNavdeep Parhar FL_LOCK(fl); 3351733b9277SNavdeep Parhar /* Enough to make sure the SGE doesn't think it's starved */ 3352733b9277SNavdeep Parhar refill_fl(sc, fl, fl->lowat); 335354e4ee71SNavdeep Parhar FL_UNLOCK(fl); 335454e4ee71SNavdeep Parhar } 335554e4ee71SNavdeep Parhar 33568c0ca00bSNavdeep Parhar if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 3357ba41ec48SNavdeep Parhar uint32_t param, val; 3358ba41ec48SNavdeep Parhar 3359ba41ec48SNavdeep Parhar param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3360ba41ec48SNavdeep Parhar V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3361ba41ec48SNavdeep Parhar V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 336273cd9220SNavdeep Parhar if (cong == 0) 336373cd9220SNavdeep Parhar val = 1 << 19; 336473cd9220SNavdeep Parhar else { 336573cd9220SNavdeep Parhar val = 2 << 19; 336673cd9220SNavdeep Parhar for (i = 0; i < 4; i++) { 336773cd9220SNavdeep Parhar if (cong & (1 << i)) 336873cd9220SNavdeep Parhar val |= 1 << (i << 2); 336973cd9220SNavdeep Parhar } 337073cd9220SNavdeep Parhar } 337173cd9220SNavdeep Parhar 3372ba41ec48SNavdeep Parhar rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3373ba41ec48SNavdeep Parhar if (rc != 0) { 3374ba41ec48SNavdeep Parhar /* report error but carry on */ 3375ba41ec48SNavdeep Parhar device_printf(sc->dev, 3376ba41ec48SNavdeep Parhar "failed to set congestion manager context for " 3377ba41ec48SNavdeep Parhar "ingress queue %d: %d\n", iq->cntxt_id, rc); 3378ba41ec48SNavdeep Parhar } 3379ba41ec48SNavdeep Parhar } 3380ba41ec48SNavdeep Parhar 338154e4ee71SNavdeep Parhar /* Enable IQ interrupts */ 3382733b9277SNavdeep Parhar atomic_store_rel_int(&iq->state, IQS_IDLE); 3383315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 338454e4ee71SNavdeep Parhar V_INGRESSQID(iq->cntxt_id)); 338554e4ee71SNavdeep Parhar 338654e4ee71SNavdeep Parhar return (0); 338754e4ee71SNavdeep Parhar } 338854e4ee71SNavdeep Parhar 338954e4ee71SNavdeep Parhar static int 3390fe2ebb76SJohn Baldwin free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 339154e4ee71SNavdeep Parhar { 339238035ed6SNavdeep Parhar int rc; 339354e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 339454e4ee71SNavdeep Parhar device_t dev; 339554e4ee71SNavdeep Parhar 339654e4ee71SNavdeep Parhar if (sc == NULL) 339754e4ee71SNavdeep Parhar return (0); /* nothing to do */ 339854e4ee71SNavdeep Parhar 3399fe2ebb76SJohn Baldwin dev = vi ? vi->dev : sc->dev; 340054e4ee71SNavdeep Parhar 340154e4ee71SNavdeep Parhar if (iq->flags & IQ_ALLOCATED) { 340254e4ee71SNavdeep Parhar rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 340354e4ee71SNavdeep Parhar FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 340454e4ee71SNavdeep Parhar fl ? fl->cntxt_id : 0xffff, 0xffff); 340554e4ee71SNavdeep Parhar if (rc != 0) { 340654e4ee71SNavdeep Parhar device_printf(dev, 340754e4ee71SNavdeep Parhar "failed to free queue %p: %d\n", iq, rc); 340854e4ee71SNavdeep Parhar return (rc); 340954e4ee71SNavdeep Parhar } 341054e4ee71SNavdeep Parhar iq->flags &= ~IQ_ALLOCATED; 341154e4ee71SNavdeep Parhar } 341254e4ee71SNavdeep Parhar 341354e4ee71SNavdeep Parhar free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 341454e4ee71SNavdeep Parhar 341554e4ee71SNavdeep Parhar bzero(iq, sizeof(*iq)); 341654e4ee71SNavdeep Parhar 341754e4ee71SNavdeep Parhar if (fl) { 341854e4ee71SNavdeep Parhar free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 341954e4ee71SNavdeep Parhar fl->desc); 342054e4ee71SNavdeep Parhar 3421aa9a5cc0SNavdeep Parhar if (fl->sdesc) 34221458bff9SNavdeep Parhar free_fl_sdesc(sc, fl); 34231458bff9SNavdeep Parhar 342454e4ee71SNavdeep Parhar if (mtx_initialized(&fl->fl_lock)) 342554e4ee71SNavdeep Parhar mtx_destroy(&fl->fl_lock); 342654e4ee71SNavdeep Parhar 342754e4ee71SNavdeep Parhar bzero(fl, sizeof(*fl)); 342854e4ee71SNavdeep Parhar } 342954e4ee71SNavdeep Parhar 343054e4ee71SNavdeep Parhar return (0); 343154e4ee71SNavdeep Parhar } 343254e4ee71SNavdeep Parhar 343338035ed6SNavdeep Parhar static void 3434348694daSNavdeep Parhar add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3435348694daSNavdeep Parhar struct sge_iq *iq) 3436348694daSNavdeep Parhar { 3437348694daSNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3438348694daSNavdeep Parhar 3439348694daSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3440348694daSNavdeep Parhar "bus address of descriptor ring"); 3441348694daSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3442348694daSNavdeep Parhar iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3443348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 34447029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->abs_id, 0, 34457029da5cSPawel Biernacki sysctl_uint16, "I", "absolute id of the queue"); 3446348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 34477029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cntxt_id, 0, 34487029da5cSPawel Biernacki sysctl_uint16, "I", "SGE context id of the queue"); 3449348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 34507029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &iq->cidx, 0, 34517029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 3452348694daSNavdeep Parhar } 3453348694daSNavdeep Parhar 3454348694daSNavdeep Parhar static void 3455aa93b99aSNavdeep Parhar add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3456aa93b99aSNavdeep Parhar struct sysctl_oid *oid, struct sge_fl *fl) 345738035ed6SNavdeep Parhar { 345838035ed6SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 345938035ed6SNavdeep Parhar 34607029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 34617029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 346238035ed6SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 346338035ed6SNavdeep Parhar 3464aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3465aa93b99aSNavdeep Parhar &fl->ba, "bus address of descriptor ring"); 3466aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3467aa93b99aSNavdeep Parhar fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3468aa93b99aSNavdeep Parhar "desc ring size in bytes"); 346938035ed6SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 34707029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &fl->cntxt_id, 0, 34717029da5cSPawel Biernacki sysctl_uint16, "I", "SGE context id of the freelist"); 3472e3207e19SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3473e3207e19SNavdeep Parhar fl_pad ? 1 : 0, "padding enabled"); 3474e3207e19SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3475e3207e19SNavdeep Parhar fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 347638035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 347738035ed6SNavdeep Parhar 0, "consumer index"); 347838035ed6SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 347938035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 348038035ed6SNavdeep Parhar CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 348138035ed6SNavdeep Parhar } 348238035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 348338035ed6SNavdeep Parhar 0, "producer index"); 348438035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 348538035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 348638035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 348738035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 348838035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 348938035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 349038035ed6SNavdeep Parhar } 349138035ed6SNavdeep Parhar 349254e4ee71SNavdeep Parhar static int 3493733b9277SNavdeep Parhar alloc_fwq(struct adapter *sc) 349454e4ee71SNavdeep Parhar { 3495733b9277SNavdeep Parhar int rc, intr_idx; 349656599263SNavdeep Parhar struct sge_iq *fwq = &sc->sge.fwq; 3497733b9277SNavdeep Parhar struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 3498733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 349956599263SNavdeep Parhar 3500b2daa9a9SNavdeep Parhar init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 35016af45170SJohn Baldwin if (sc->flags & IS_VF) 35026af45170SJohn Baldwin intr_idx = 0; 35034535e804SNavdeep Parhar else 3504733b9277SNavdeep Parhar intr_idx = sc->intr_count > 1 ? 1 : 0; 3505fe2ebb76SJohn Baldwin rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 3506733b9277SNavdeep Parhar if (rc != 0) { 3507733b9277SNavdeep Parhar device_printf(sc->dev, 3508733b9277SNavdeep Parhar "failed to create firmware event queue: %d\n", rc); 350956599263SNavdeep Parhar return (rc); 3510733b9277SNavdeep Parhar } 351156599263SNavdeep Parhar 35127029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", 35137029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); 3514348694daSNavdeep Parhar add_iq_sysctls(&sc->ctx, oid, fwq); 351556599263SNavdeep Parhar 3516733b9277SNavdeep Parhar return (0); 3517733b9277SNavdeep Parhar } 3518733b9277SNavdeep Parhar 3519733b9277SNavdeep Parhar static int 3520733b9277SNavdeep Parhar free_fwq(struct adapter *sc) 3521733b9277SNavdeep Parhar { 3522733b9277SNavdeep Parhar return free_iq_fl(NULL, &sc->sge.fwq, NULL); 3523733b9277SNavdeep Parhar } 3524733b9277SNavdeep Parhar 3525733b9277SNavdeep Parhar static int 352637310a98SNavdeep Parhar alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, 352737310a98SNavdeep Parhar struct sysctl_oid *oid) 3528733b9277SNavdeep Parhar { 3529733b9277SNavdeep Parhar int rc; 3530733b9277SNavdeep Parhar char name[16]; 353137310a98SNavdeep Parhar struct sysctl_oid_list *children; 3532733b9277SNavdeep Parhar 353337310a98SNavdeep Parhar snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), 353437310a98SNavdeep Parhar idx); 353537310a98SNavdeep Parhar init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, 3536733b9277SNavdeep Parhar sc->sge.fwq.cntxt_id, name); 353737310a98SNavdeep Parhar 353837310a98SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 353937310a98SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 35407029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, 35417029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ctrl queue"); 354237310a98SNavdeep Parhar rc = alloc_wrq(sc, NULL, ctrlq, oid); 354337310a98SNavdeep Parhar 354456599263SNavdeep Parhar return (rc); 354556599263SNavdeep Parhar } 354656599263SNavdeep Parhar 35471605bac6SNavdeep Parhar int 35489af71ab3SNavdeep Parhar tnl_cong(struct port_info *pi, int drop) 35499fb8886bSNavdeep Parhar { 35509fb8886bSNavdeep Parhar 35519af71ab3SNavdeep Parhar if (drop == -1) 35529fb8886bSNavdeep Parhar return (-1); 35539af71ab3SNavdeep Parhar else if (drop == 1) 35549fb8886bSNavdeep Parhar return (0); 35559fb8886bSNavdeep Parhar else 35565bcae8ddSNavdeep Parhar return (pi->rx_e_chan_map); 35579fb8886bSNavdeep Parhar } 35589fb8886bSNavdeep Parhar 3559733b9277SNavdeep Parhar static int 3560fe2ebb76SJohn Baldwin alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3561733b9277SNavdeep Parhar struct sysctl_oid *oid) 356254e4ee71SNavdeep Parhar { 356354e4ee71SNavdeep Parhar int rc; 35647c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 356554e4ee71SNavdeep Parhar struct sysctl_oid_list *children; 356654e4ee71SNavdeep Parhar char name[16]; 356754e4ee71SNavdeep Parhar 3568fe2ebb76SJohn Baldwin rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3569fe2ebb76SJohn Baldwin tnl_cong(vi->pi, cong_drop)); 357054e4ee71SNavdeep Parhar if (rc != 0) 357154e4ee71SNavdeep Parhar return (rc); 357254e4ee71SNavdeep Parhar 3573ec55567cSJohn Baldwin if (idx == 0) 3574ec55567cSJohn Baldwin sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3575ec55567cSJohn Baldwin else 3576ec55567cSJohn Baldwin KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3577ec55567cSJohn Baldwin ("iq_base mismatch")); 3578ec55567cSJohn Baldwin KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3579ec55567cSJohn Baldwin ("PF with non-zero iq_base")); 3580ec55567cSJohn Baldwin 35814d6db4e0SNavdeep Parhar /* 35824d6db4e0SNavdeep Parhar * The freelist is just barely above the starvation threshold right now, 35834d6db4e0SNavdeep Parhar * fill it up a bit more. 35844d6db4e0SNavdeep Parhar */ 35859b4d7b4eSNavdeep Parhar FL_LOCK(&rxq->fl); 3586ec55567cSJohn Baldwin refill_fl(sc, &rxq->fl, 128); 35879b4d7b4eSNavdeep Parhar FL_UNLOCK(&rxq->fl); 35889b4d7b4eSNavdeep Parhar 3589a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 359046f48ee5SNavdeep Parhar rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 359154e4ee71SNavdeep Parhar if (rc != 0) 359254e4ee71SNavdeep Parhar return (rc); 359346f48ee5SNavdeep Parhar MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 359454e4ee71SNavdeep Parhar 3595fe2ebb76SJohn Baldwin if (vi->ifp->if_capenable & IFCAP_LRO) 3596733b9277SNavdeep Parhar rxq->iq.flags |= IQ_LRO_ENABLED; 359754e4ee71SNavdeep Parhar #endif 35989877f735SNavdeep Parhar if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP) 35999877f735SNavdeep Parhar rxq->iq.flags |= IQ_RX_TIMESTAMP; 3600fe2ebb76SJohn Baldwin rxq->ifp = vi->ifp; 360154e4ee71SNavdeep Parhar 3602733b9277SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 360354e4ee71SNavdeep Parhar 360454e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 36057029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 36067029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 360754e4ee71SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 360854e4ee71SNavdeep Parhar 3609348694daSNavdeep Parhar add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3610a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 3611e936121dSHans Petter Selasky SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 361254e4ee71SNavdeep Parhar &rxq->lro.lro_queued, 0, NULL); 3613e936121dSHans Petter Selasky SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 361454e4ee71SNavdeep Parhar &rxq->lro.lro_flushed, 0, NULL); 36157d29df59SNavdeep Parhar #endif 3616fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 361754e4ee71SNavdeep Parhar &rxq->rxcsum, "# of times hardware assisted with checksum"); 3618fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 361954e4ee71SNavdeep Parhar CTLFLAG_RD, &rxq->vlan_extraction, 362054e4ee71SNavdeep Parhar "# of times hardware extracted 802.1Q tag"); 362154e4ee71SNavdeep Parhar 3622aa93b99aSNavdeep Parhar add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 362359bc8ce0SNavdeep Parhar 362454e4ee71SNavdeep Parhar return (rc); 362554e4ee71SNavdeep Parhar } 362654e4ee71SNavdeep Parhar 362754e4ee71SNavdeep Parhar static int 3628fe2ebb76SJohn Baldwin free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 362954e4ee71SNavdeep Parhar { 363054e4ee71SNavdeep Parhar int rc; 363154e4ee71SNavdeep Parhar 3632a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 363354e4ee71SNavdeep Parhar if (rxq->lro.ifp) { 363454e4ee71SNavdeep Parhar tcp_lro_free(&rxq->lro); 363554e4ee71SNavdeep Parhar rxq->lro.ifp = NULL; 363654e4ee71SNavdeep Parhar } 363754e4ee71SNavdeep Parhar #endif 363854e4ee71SNavdeep Parhar 3639fe2ebb76SJohn Baldwin rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 364054e4ee71SNavdeep Parhar if (rc == 0) 364154e4ee71SNavdeep Parhar bzero(rxq, sizeof(*rxq)); 364254e4ee71SNavdeep Parhar 364354e4ee71SNavdeep Parhar return (rc); 364454e4ee71SNavdeep Parhar } 364554e4ee71SNavdeep Parhar 364609fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 364754e4ee71SNavdeep Parhar static int 3648fe2ebb76SJohn Baldwin alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3649733b9277SNavdeep Parhar int intr_idx, int idx, struct sysctl_oid *oid) 3650f7dfe243SNavdeep Parhar { 3651aa93b99aSNavdeep Parhar struct port_info *pi = vi->pi; 3652733b9277SNavdeep Parhar int rc; 3653f7dfe243SNavdeep Parhar struct sysctl_oid_list *children; 3654733b9277SNavdeep Parhar char name[16]; 3655f7dfe243SNavdeep Parhar 36565bcae8ddSNavdeep Parhar rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3657733b9277SNavdeep Parhar if (rc != 0) 3658f7dfe243SNavdeep Parhar return (rc); 3659f7dfe243SNavdeep Parhar 3660733b9277SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3661733b9277SNavdeep Parhar 3662733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 36637029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 36647029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3665348694daSNavdeep Parhar add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3666aa93b99aSNavdeep Parhar add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3667733b9277SNavdeep Parhar 3668733b9277SNavdeep Parhar return (rc); 3669733b9277SNavdeep Parhar } 3670733b9277SNavdeep Parhar 3671733b9277SNavdeep Parhar static int 3672fe2ebb76SJohn Baldwin free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3673733b9277SNavdeep Parhar { 3674733b9277SNavdeep Parhar int rc; 3675733b9277SNavdeep Parhar 3676fe2ebb76SJohn Baldwin rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3677733b9277SNavdeep Parhar if (rc == 0) 3678733b9277SNavdeep Parhar bzero(ofld_rxq, sizeof(*ofld_rxq)); 3679733b9277SNavdeep Parhar 3680733b9277SNavdeep Parhar return (rc); 3681733b9277SNavdeep Parhar } 3682733b9277SNavdeep Parhar #endif 3683733b9277SNavdeep Parhar 3684298d969cSNavdeep Parhar #ifdef DEV_NETMAP 3685298d969cSNavdeep Parhar static int 3686fe2ebb76SJohn Baldwin alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3687298d969cSNavdeep Parhar int idx, struct sysctl_oid *oid) 3688298d969cSNavdeep Parhar { 3689298d969cSNavdeep Parhar int rc; 3690298d969cSNavdeep Parhar struct sysctl_oid_list *children; 3691298d969cSNavdeep Parhar struct sysctl_ctx_list *ctx; 3692298d969cSNavdeep Parhar char name[16]; 3693298d969cSNavdeep Parhar size_t len; 36947c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 3695fe2ebb76SJohn Baldwin struct netmap_adapter *na = NA(vi->ifp); 3696298d969cSNavdeep Parhar 3697298d969cSNavdeep Parhar MPASS(na != NULL); 3698298d969cSNavdeep Parhar 3699fe2ebb76SJohn Baldwin len = vi->qsize_rxq * IQ_ESIZE; 3700298d969cSNavdeep Parhar rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3701298d969cSNavdeep Parhar &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3702298d969cSNavdeep Parhar if (rc != 0) 3703298d969cSNavdeep Parhar return (rc); 3704298d969cSNavdeep Parhar 370590e7434aSNavdeep Parhar len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3706298d969cSNavdeep Parhar rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3707298d969cSNavdeep Parhar &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3708298d969cSNavdeep Parhar if (rc != 0) 3709298d969cSNavdeep Parhar return (rc); 3710298d969cSNavdeep Parhar 3711fe2ebb76SJohn Baldwin nm_rxq->vi = vi; 3712298d969cSNavdeep Parhar nm_rxq->nid = idx; 3713298d969cSNavdeep Parhar nm_rxq->iq_cidx = 0; 371490e7434aSNavdeep Parhar nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 3715298d969cSNavdeep Parhar nm_rxq->iq_gen = F_RSPD_GEN; 3716298d969cSNavdeep Parhar nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3717298d969cSNavdeep Parhar nm_rxq->fl_sidx = na->num_rx_desc; 3718aa301e5fSNavdeep Parhar nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 3719298d969cSNavdeep Parhar nm_rxq->intr_idx = intr_idx; 3720a8c4fcb9SNavdeep Parhar nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 3721298d969cSNavdeep Parhar 3722fe2ebb76SJohn Baldwin ctx = &vi->ctx; 3723298d969cSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3724298d969cSNavdeep Parhar 3725298d969cSNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 37267029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 37277029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3728298d969cSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3729298d969cSNavdeep Parhar 3730298d969cSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 37317029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_abs_id, 37327029da5cSPawel Biernacki 0, sysctl_uint16, "I", "absolute id of the queue"); 3733298d969cSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 37347029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cntxt_id, 37357029da5cSPawel Biernacki 0, sysctl_uint16, "I", "SGE context id of the queue"); 3736298d969cSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 37377029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->iq_cidx, 0, 37387029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 3739298d969cSNavdeep Parhar 3740298d969cSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 37417029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 37427029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 3743298d969cSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3744298d969cSNavdeep Parhar 3745298d969cSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 37467029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_rxq->fl_cntxt_id, 37477029da5cSPawel Biernacki 0, sysctl_uint16, "I", "SGE context id of the freelist"); 3748298d969cSNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3749298d969cSNavdeep Parhar &nm_rxq->fl_cidx, 0, "consumer index"); 3750298d969cSNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3751298d969cSNavdeep Parhar &nm_rxq->fl_pidx, 0, "producer index"); 3752298d969cSNavdeep Parhar 3753298d969cSNavdeep Parhar return (rc); 3754298d969cSNavdeep Parhar } 3755298d969cSNavdeep Parhar 3756298d969cSNavdeep Parhar 3757298d969cSNavdeep Parhar static int 3758fe2ebb76SJohn Baldwin free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 3759298d969cSNavdeep Parhar { 37607c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 3761298d969cSNavdeep Parhar 37620fa7560dSNavdeep Parhar if (vi->flags & VI_INIT_DONE) 3763a8c4fcb9SNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 37640fa7560dSNavdeep Parhar else 37650fa7560dSNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id == 0); 3766a8c4fcb9SNavdeep Parhar 3767298d969cSNavdeep Parhar free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3768298d969cSNavdeep Parhar nm_rxq->iq_desc); 3769298d969cSNavdeep Parhar free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3770298d969cSNavdeep Parhar nm_rxq->fl_desc); 3771298d969cSNavdeep Parhar 3772298d969cSNavdeep Parhar return (0); 3773298d969cSNavdeep Parhar } 3774298d969cSNavdeep Parhar 3775298d969cSNavdeep Parhar static int 3776fe2ebb76SJohn Baldwin alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3777298d969cSNavdeep Parhar struct sysctl_oid *oid) 3778298d969cSNavdeep Parhar { 3779298d969cSNavdeep Parhar int rc; 3780298d969cSNavdeep Parhar size_t len; 3781fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 3782298d969cSNavdeep Parhar struct adapter *sc = pi->adapter; 3783fe2ebb76SJohn Baldwin struct netmap_adapter *na = NA(vi->ifp); 3784298d969cSNavdeep Parhar char name[16]; 3785298d969cSNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3786298d969cSNavdeep Parhar 378790e7434aSNavdeep Parhar len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3788298d969cSNavdeep Parhar rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3789298d969cSNavdeep Parhar &nm_txq->ba, (void **)&nm_txq->desc); 3790298d969cSNavdeep Parhar if (rc) 3791298d969cSNavdeep Parhar return (rc); 3792298d969cSNavdeep Parhar 3793298d969cSNavdeep Parhar nm_txq->pidx = nm_txq->cidx = 0; 3794298d969cSNavdeep Parhar nm_txq->sidx = na->num_tx_desc; 3795298d969cSNavdeep Parhar nm_txq->nid = idx; 3796298d969cSNavdeep Parhar nm_txq->iqidx = iqidx; 3797298d969cSNavdeep Parhar nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3798edb518f4SNavdeep Parhar V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 3799edb518f4SNavdeep Parhar V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 3800aa7bdbc0SNavdeep Parhar if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 3801aa7bdbc0SNavdeep Parhar nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 3802aa7bdbc0SNavdeep Parhar else 3803aa7bdbc0SNavdeep Parhar nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3804a8c4fcb9SNavdeep Parhar nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 3805298d969cSNavdeep Parhar 3806298d969cSNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 38077029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 38087029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 3809298d969cSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3810298d969cSNavdeep Parhar 3811fe2ebb76SJohn Baldwin SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3812298d969cSNavdeep Parhar &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3813fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 38147029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->cidx, 0, 38157029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 3816fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 38177029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &nm_txq->pidx, 0, 38187029da5cSPawel Biernacki sysctl_uint16, "I", "producer index"); 3819298d969cSNavdeep Parhar 3820298d969cSNavdeep Parhar return (rc); 3821298d969cSNavdeep Parhar } 3822298d969cSNavdeep Parhar 3823298d969cSNavdeep Parhar static int 3824fe2ebb76SJohn Baldwin free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 3825298d969cSNavdeep Parhar { 38267c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 3827298d969cSNavdeep Parhar 38280fa7560dSNavdeep Parhar if (vi->flags & VI_INIT_DONE) 3829a8c4fcb9SNavdeep Parhar MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 38300fa7560dSNavdeep Parhar else 38310fa7560dSNavdeep Parhar MPASS(nm_txq->cntxt_id == 0); 3832a8c4fcb9SNavdeep Parhar 3833298d969cSNavdeep Parhar free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3834298d969cSNavdeep Parhar nm_txq->desc); 3835298d969cSNavdeep Parhar 3836298d969cSNavdeep Parhar return (0); 3837298d969cSNavdeep Parhar } 3838298d969cSNavdeep Parhar #endif 3839298d969cSNavdeep Parhar 3840ddf09ad6SNavdeep Parhar /* 3841ddf09ad6SNavdeep Parhar * Returns a reasonable automatic cidx flush threshold for a given queue size. 3842ddf09ad6SNavdeep Parhar */ 3843ddf09ad6SNavdeep Parhar static u_int 3844ddf09ad6SNavdeep Parhar qsize_to_fthresh(int qsize) 3845ddf09ad6SNavdeep Parhar { 3846ddf09ad6SNavdeep Parhar u_int fthresh; 3847ddf09ad6SNavdeep Parhar 3848ddf09ad6SNavdeep Parhar while (!powerof2(qsize)) 3849ddf09ad6SNavdeep Parhar qsize++; 3850ddf09ad6SNavdeep Parhar fthresh = ilog2(qsize); 3851ddf09ad6SNavdeep Parhar if (fthresh > X_CIDXFLUSHTHRESH_128) 3852ddf09ad6SNavdeep Parhar fthresh = X_CIDXFLUSHTHRESH_128; 3853ddf09ad6SNavdeep Parhar 3854ddf09ad6SNavdeep Parhar return (fthresh); 3855ddf09ad6SNavdeep Parhar } 3856ddf09ad6SNavdeep Parhar 3857733b9277SNavdeep Parhar static int 3858733b9277SNavdeep Parhar ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3859733b9277SNavdeep Parhar { 3860733b9277SNavdeep Parhar int rc, cntxt_id; 3861733b9277SNavdeep Parhar struct fw_eq_ctrl_cmd c; 386290e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3863f7dfe243SNavdeep Parhar 3864f7dfe243SNavdeep Parhar bzero(&c, sizeof(c)); 3865f7dfe243SNavdeep Parhar 3866f7dfe243SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3867f7dfe243SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3868f7dfe243SNavdeep Parhar V_FW_EQ_CTRL_CMD_VFN(0)); 3869f7dfe243SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3870f7dfe243SNavdeep Parhar F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 38717951040fSNavdeep Parhar c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3872f7dfe243SNavdeep Parhar c.physeqid_pkd = htobe32(0); 3873f7dfe243SNavdeep Parhar c.fetchszm_to_iqid = 387487b027baSNavdeep Parhar htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3875733b9277SNavdeep Parhar V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 387656599263SNavdeep Parhar F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3877f7dfe243SNavdeep Parhar c.dcaen_to_eqsize = 3878adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3879adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3880f7dfe243SNavdeep Parhar V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3881ddf09ad6SNavdeep Parhar V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 38827951040fSNavdeep Parhar V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3883f7dfe243SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 3884f7dfe243SNavdeep Parhar 3885f7dfe243SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3886f7dfe243SNavdeep Parhar if (rc != 0) { 3887f7dfe243SNavdeep Parhar device_printf(sc->dev, 3888733b9277SNavdeep Parhar "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3889f7dfe243SNavdeep Parhar return (rc); 3890f7dfe243SNavdeep Parhar } 3891733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 3892f7dfe243SNavdeep Parhar 3893f7dfe243SNavdeep Parhar eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3894f7dfe243SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3895733b9277SNavdeep Parhar if (cntxt_id >= sc->sge.neq) 3896733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3897733b9277SNavdeep Parhar cntxt_id, sc->sge.neq - 1); 3898f7dfe243SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 3899f7dfe243SNavdeep Parhar 3900f7dfe243SNavdeep Parhar return (rc); 3901f7dfe243SNavdeep Parhar } 3902f7dfe243SNavdeep Parhar 3903f7dfe243SNavdeep Parhar static int 3904fe2ebb76SJohn Baldwin eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 390554e4ee71SNavdeep Parhar { 390654e4ee71SNavdeep Parhar int rc, cntxt_id; 390754e4ee71SNavdeep Parhar struct fw_eq_eth_cmd c; 390890e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 390954e4ee71SNavdeep Parhar 391054e4ee71SNavdeep Parhar bzero(&c, sizeof(c)); 391154e4ee71SNavdeep Parhar 391254e4ee71SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 391354e4ee71SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 391454e4ee71SNavdeep Parhar V_FW_EQ_ETH_CMD_VFN(0)); 391554e4ee71SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 391654e4ee71SNavdeep Parhar F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 39177951040fSNavdeep Parhar c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3918fe2ebb76SJohn Baldwin F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 391954e4ee71SNavdeep Parhar c.fetchszm_to_iqid = 39207951040fSNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3921733b9277SNavdeep Parhar V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3922aa2457e1SNavdeep Parhar V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3923adb0cd84SNavdeep Parhar c.dcaen_to_eqsize = 3924adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3925adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 392654e4ee71SNavdeep Parhar V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 39277951040fSNavdeep Parhar V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 392854e4ee71SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 392954e4ee71SNavdeep Parhar 393054e4ee71SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 393154e4ee71SNavdeep Parhar if (rc != 0) { 3932fe2ebb76SJohn Baldwin device_printf(vi->dev, 3933733b9277SNavdeep Parhar "failed to create Ethernet egress queue: %d\n", rc); 3934733b9277SNavdeep Parhar return (rc); 3935733b9277SNavdeep Parhar } 3936733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 3937733b9277SNavdeep Parhar 3938733b9277SNavdeep Parhar eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3939ec55567cSJohn Baldwin eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 3940733b9277SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3941733b9277SNavdeep Parhar if (cntxt_id >= sc->sge.neq) 3942733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3943733b9277SNavdeep Parhar cntxt_id, sc->sge.neq - 1); 3944733b9277SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 3945733b9277SNavdeep Parhar 394654e4ee71SNavdeep Parhar return (rc); 394754e4ee71SNavdeep Parhar } 394854e4ee71SNavdeep Parhar 3949eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3950733b9277SNavdeep Parhar static int 3951fe2ebb76SJohn Baldwin ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3952733b9277SNavdeep Parhar { 3953733b9277SNavdeep Parhar int rc, cntxt_id; 3954733b9277SNavdeep Parhar struct fw_eq_ofld_cmd c; 395590e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 395654e4ee71SNavdeep Parhar 3957733b9277SNavdeep Parhar bzero(&c, sizeof(c)); 3958733b9277SNavdeep Parhar 3959733b9277SNavdeep Parhar c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3960733b9277SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3961733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_VFN(0)); 3962733b9277SNavdeep Parhar c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3963733b9277SNavdeep Parhar F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3964733b9277SNavdeep Parhar c.fetchszm_to_iqid = 3965ddf09ad6SNavdeep Parhar htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3966733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3967733b9277SNavdeep Parhar F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3968733b9277SNavdeep Parhar c.dcaen_to_eqsize = 3969adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3970adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3971733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3972ddf09ad6SNavdeep Parhar V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 39737951040fSNavdeep Parhar V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3974733b9277SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 3975733b9277SNavdeep Parhar 3976733b9277SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3977733b9277SNavdeep Parhar if (rc != 0) { 3978fe2ebb76SJohn Baldwin device_printf(vi->dev, 3979733b9277SNavdeep Parhar "failed to create egress queue for TCP offload: %d\n", rc); 3980733b9277SNavdeep Parhar return (rc); 3981733b9277SNavdeep Parhar } 3982733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 3983733b9277SNavdeep Parhar 3984733b9277SNavdeep Parhar eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 398554e4ee71SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3986733b9277SNavdeep Parhar if (cntxt_id >= sc->sge.neq) 3987733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3988733b9277SNavdeep Parhar cntxt_id, sc->sge.neq - 1); 398954e4ee71SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 399054e4ee71SNavdeep Parhar 3991733b9277SNavdeep Parhar return (rc); 3992733b9277SNavdeep Parhar } 3993733b9277SNavdeep Parhar #endif 3994733b9277SNavdeep Parhar 3995733b9277SNavdeep Parhar static int 3996fe2ebb76SJohn Baldwin alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3997733b9277SNavdeep Parhar { 39987951040fSNavdeep Parhar int rc, qsize; 3999733b9277SNavdeep Parhar size_t len; 4000733b9277SNavdeep Parhar 4001733b9277SNavdeep Parhar mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 4002733b9277SNavdeep Parhar 400390e7434aSNavdeep Parhar qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 40047951040fSNavdeep Parhar len = qsize * EQ_ESIZE; 4005733b9277SNavdeep Parhar rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 4006733b9277SNavdeep Parhar &eq->ba, (void **)&eq->desc); 4007733b9277SNavdeep Parhar if (rc) 4008733b9277SNavdeep Parhar return (rc); 4009733b9277SNavdeep Parhar 4010ddf09ad6SNavdeep Parhar eq->pidx = eq->cidx = eq->dbidx = 0; 4011ddf09ad6SNavdeep Parhar /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 4012ddf09ad6SNavdeep Parhar eq->equeqidx = 0; 4013d14b0ac1SNavdeep Parhar eq->doorbells = sc->doorbells; 4014733b9277SNavdeep Parhar 4015733b9277SNavdeep Parhar switch (eq->flags & EQ_TYPEMASK) { 4016733b9277SNavdeep Parhar case EQ_CTRL: 4017733b9277SNavdeep Parhar rc = ctrl_eq_alloc(sc, eq); 4018733b9277SNavdeep Parhar break; 4019733b9277SNavdeep Parhar 4020733b9277SNavdeep Parhar case EQ_ETH: 4021fe2ebb76SJohn Baldwin rc = eth_eq_alloc(sc, vi, eq); 4022733b9277SNavdeep Parhar break; 4023733b9277SNavdeep Parhar 4024eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4025733b9277SNavdeep Parhar case EQ_OFLD: 4026fe2ebb76SJohn Baldwin rc = ofld_eq_alloc(sc, vi, eq); 4027733b9277SNavdeep Parhar break; 4028733b9277SNavdeep Parhar #endif 4029733b9277SNavdeep Parhar 4030733b9277SNavdeep Parhar default: 4031733b9277SNavdeep Parhar panic("%s: invalid eq type %d.", __func__, 4032733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK); 4033733b9277SNavdeep Parhar } 4034733b9277SNavdeep Parhar if (rc != 0) { 4035733b9277SNavdeep Parhar device_printf(sc->dev, 4036c086e3d1SNavdeep Parhar "failed to allocate egress queue(%d): %d\n", 4037733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK, rc); 4038733b9277SNavdeep Parhar } 4039733b9277SNavdeep Parhar 4040d14b0ac1SNavdeep Parhar if (isset(&eq->doorbells, DOORBELL_UDB) || 4041d14b0ac1SNavdeep Parhar isset(&eq->doorbells, DOORBELL_UDBWC) || 404277ad3c41SNavdeep Parhar isset(&eq->doorbells, DOORBELL_WCWR)) { 404390e7434aSNavdeep Parhar uint32_t s_qpp = sc->params.sge.eq_s_qpp; 4044d14b0ac1SNavdeep Parhar uint32_t mask = (1 << s_qpp) - 1; 4045d14b0ac1SNavdeep Parhar volatile uint8_t *udb; 4046d14b0ac1SNavdeep Parhar 4047d14b0ac1SNavdeep Parhar udb = sc->udbs_base + UDBS_DB_OFFSET; 4048d14b0ac1SNavdeep Parhar udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 4049d14b0ac1SNavdeep Parhar eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 4050f10405b3SNavdeep Parhar if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 405177ad3c41SNavdeep Parhar clrbit(&eq->doorbells, DOORBELL_WCWR); 4052d14b0ac1SNavdeep Parhar else { 4053d14b0ac1SNavdeep Parhar udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 4054d14b0ac1SNavdeep Parhar eq->udb_qid = 0; 4055d14b0ac1SNavdeep Parhar } 4056d14b0ac1SNavdeep Parhar eq->udb = (volatile void *)udb; 4057d14b0ac1SNavdeep Parhar } 4058d14b0ac1SNavdeep Parhar 4059733b9277SNavdeep Parhar return (rc); 4060733b9277SNavdeep Parhar } 4061733b9277SNavdeep Parhar 4062733b9277SNavdeep Parhar static int 4063733b9277SNavdeep Parhar free_eq(struct adapter *sc, struct sge_eq *eq) 4064733b9277SNavdeep Parhar { 4065733b9277SNavdeep Parhar int rc; 4066733b9277SNavdeep Parhar 4067733b9277SNavdeep Parhar if (eq->flags & EQ_ALLOCATED) { 4068733b9277SNavdeep Parhar switch (eq->flags & EQ_TYPEMASK) { 4069733b9277SNavdeep Parhar case EQ_CTRL: 4070733b9277SNavdeep Parhar rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 4071733b9277SNavdeep Parhar eq->cntxt_id); 4072733b9277SNavdeep Parhar break; 4073733b9277SNavdeep Parhar 4074733b9277SNavdeep Parhar case EQ_ETH: 4075733b9277SNavdeep Parhar rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 4076733b9277SNavdeep Parhar eq->cntxt_id); 4077733b9277SNavdeep Parhar break; 4078733b9277SNavdeep Parhar 4079eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4080733b9277SNavdeep Parhar case EQ_OFLD: 4081733b9277SNavdeep Parhar rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 4082733b9277SNavdeep Parhar eq->cntxt_id); 4083733b9277SNavdeep Parhar break; 4084733b9277SNavdeep Parhar #endif 4085733b9277SNavdeep Parhar 4086733b9277SNavdeep Parhar default: 4087733b9277SNavdeep Parhar panic("%s: invalid eq type %d.", __func__, 4088733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK); 4089733b9277SNavdeep Parhar } 4090733b9277SNavdeep Parhar if (rc != 0) { 4091733b9277SNavdeep Parhar device_printf(sc->dev, 4092733b9277SNavdeep Parhar "failed to free egress queue (%d): %d\n", 4093733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK, rc); 4094733b9277SNavdeep Parhar return (rc); 4095733b9277SNavdeep Parhar } 4096733b9277SNavdeep Parhar eq->flags &= ~EQ_ALLOCATED; 4097733b9277SNavdeep Parhar } 4098733b9277SNavdeep Parhar 4099733b9277SNavdeep Parhar free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 4100733b9277SNavdeep Parhar 4101733b9277SNavdeep Parhar if (mtx_initialized(&eq->eq_lock)) 4102733b9277SNavdeep Parhar mtx_destroy(&eq->eq_lock); 4103733b9277SNavdeep Parhar 4104733b9277SNavdeep Parhar bzero(eq, sizeof(*eq)); 4105733b9277SNavdeep Parhar return (0); 4106733b9277SNavdeep Parhar } 4107733b9277SNavdeep Parhar 4108733b9277SNavdeep Parhar static int 4109fe2ebb76SJohn Baldwin alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 4110733b9277SNavdeep Parhar struct sysctl_oid *oid) 4111733b9277SNavdeep Parhar { 4112733b9277SNavdeep Parhar int rc; 4113fe2ebb76SJohn Baldwin struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 4114733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4115733b9277SNavdeep Parhar 4116fe2ebb76SJohn Baldwin rc = alloc_eq(sc, vi, &wrq->eq); 4117733b9277SNavdeep Parhar if (rc) 4118733b9277SNavdeep Parhar return (rc); 4119733b9277SNavdeep Parhar 4120733b9277SNavdeep Parhar wrq->adapter = sc; 41217951040fSNavdeep Parhar TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 41227951040fSNavdeep Parhar TAILQ_INIT(&wrq->incomplete_wrs); 412309fe6320SNavdeep Parhar STAILQ_INIT(&wrq->wr_list); 41247951040fSNavdeep Parhar wrq->nwr_pending = 0; 41257951040fSNavdeep Parhar wrq->ndesc_needed = 0; 4126733b9277SNavdeep Parhar 4127aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4128aa93b99aSNavdeep Parhar &wrq->eq.ba, "bus address of descriptor ring"); 4129aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4130aa93b99aSNavdeep Parhar wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 4131aa93b99aSNavdeep Parhar "desc ring size in bytes"); 4132733b9277SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4133733b9277SNavdeep Parhar &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 4134733b9277SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 41357029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.cidx, 0, 41367029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 4137733b9277SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 41387029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &wrq->eq.pidx, 0, 41397029da5cSPawel Biernacki sysctl_uint16, "I", "producer index"); 4140aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4141aa93b99aSNavdeep Parhar wrq->eq.sidx, "status page index"); 41427951040fSNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 41437951040fSNavdeep Parhar &wrq->tx_wrs_direct, "# of work requests (direct)"); 41447951040fSNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 41457951040fSNavdeep Parhar &wrq->tx_wrs_copied, "# of work requests (copied)"); 41460459a175SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 41470459a175SNavdeep Parhar &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 4148733b9277SNavdeep Parhar 4149733b9277SNavdeep Parhar return (rc); 4150733b9277SNavdeep Parhar } 4151733b9277SNavdeep Parhar 4152733b9277SNavdeep Parhar static int 4153733b9277SNavdeep Parhar free_wrq(struct adapter *sc, struct sge_wrq *wrq) 4154733b9277SNavdeep Parhar { 4155733b9277SNavdeep Parhar int rc; 4156733b9277SNavdeep Parhar 4157733b9277SNavdeep Parhar rc = free_eq(sc, &wrq->eq); 4158733b9277SNavdeep Parhar if (rc) 4159733b9277SNavdeep Parhar return (rc); 4160733b9277SNavdeep Parhar 4161733b9277SNavdeep Parhar bzero(wrq, sizeof(*wrq)); 4162733b9277SNavdeep Parhar return (0); 4163733b9277SNavdeep Parhar } 4164733b9277SNavdeep Parhar 4165733b9277SNavdeep Parhar static int 4166fe2ebb76SJohn Baldwin alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 4167733b9277SNavdeep Parhar struct sysctl_oid *oid) 4168733b9277SNavdeep Parhar { 4169733b9277SNavdeep Parhar int rc; 4170fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 4171733b9277SNavdeep Parhar struct adapter *sc = pi->adapter; 4172733b9277SNavdeep Parhar struct sge_eq *eq = &txq->eq; 4173d735920dSNavdeep Parhar struct txpkts *txp; 4174733b9277SNavdeep Parhar char name[16]; 4175733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4176733b9277SNavdeep Parhar 41777951040fSNavdeep Parhar rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 4178d735920dSNavdeep Parhar M_CXGBE, &eq->eq_lock, M_WAITOK); 41797951040fSNavdeep Parhar if (rc != 0) { 41807951040fSNavdeep Parhar device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 41817951040fSNavdeep Parhar return (rc); 41827951040fSNavdeep Parhar } 41837951040fSNavdeep Parhar 4184fe2ebb76SJohn Baldwin rc = alloc_eq(sc, vi, eq); 41857951040fSNavdeep Parhar if (rc != 0) { 41867951040fSNavdeep Parhar mp_ring_free(txq->r); 41877951040fSNavdeep Parhar txq->r = NULL; 4188733b9277SNavdeep Parhar return (rc); 41897951040fSNavdeep Parhar } 4190733b9277SNavdeep Parhar 41917951040fSNavdeep Parhar /* Can't fail after this point. */ 41927951040fSNavdeep Parhar 4193ec55567cSJohn Baldwin if (idx == 0) 4194ec55567cSJohn Baldwin sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4195ec55567cSJohn Baldwin else 4196ec55567cSJohn Baldwin KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4197ec55567cSJohn Baldwin ("eq_base mismatch")); 4198ec55567cSJohn Baldwin KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4199ec55567cSJohn Baldwin ("PF with non-zero eq_base")); 4200ec55567cSJohn Baldwin 42017951040fSNavdeep Parhar TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4202fe2ebb76SJohn Baldwin txq->ifp = vi->ifp; 42037951040fSNavdeep Parhar txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 42046af45170SJohn Baldwin if (sc->flags & IS_VF) 42056af45170SJohn Baldwin txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 42066af45170SJohn Baldwin V_TXPKT_INTF(pi->tx_chan)); 42076af45170SJohn Baldwin else 4208c0236bd9SNavdeep Parhar txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4209edb518f4SNavdeep Parhar V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4210edb518f4SNavdeep Parhar V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 421102f972e8SNavdeep Parhar txq->tc_idx = -1; 42127951040fSNavdeep Parhar txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4213733b9277SNavdeep Parhar M_ZERO | M_WAITOK); 421454e4ee71SNavdeep Parhar 4215d735920dSNavdeep Parhar txp = &txq->txp; 4216d735920dSNavdeep Parhar txp->score = 5; 4217d735920dSNavdeep Parhar MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); 4218d735920dSNavdeep Parhar txq->txp.max_npkt = min(nitems(txp->mb), 4219d735920dSNavdeep Parhar sc->params.max_pkts_per_eth_tx_pkts_wr); 4220d735920dSNavdeep Parhar 422154e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 42227029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 42237029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queue"); 422454e4ee71SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 422554e4ee71SNavdeep Parhar 4226aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4227aa93b99aSNavdeep Parhar &eq->ba, "bus address of descriptor ring"); 4228aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4229aa93b99aSNavdeep Parhar eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4230aa93b99aSNavdeep Parhar "desc ring size in bytes"); 4231ec55567cSJohn Baldwin SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4232ec55567cSJohn Baldwin &eq->abs_id, 0, "absolute id of the queue"); 4233fe2ebb76SJohn Baldwin SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 423459bc8ce0SNavdeep Parhar &eq->cntxt_id, 0, "SGE context id of the queue"); 4235fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 42367029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->cidx, 0, 42377029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 4238fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 42397029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, &eq->pidx, 0, 42407029da5cSPawel Biernacki sysctl_uint16, "I", "producer index"); 4241aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4242aa93b99aSNavdeep Parhar eq->sidx, "status page index"); 424359bc8ce0SNavdeep Parhar 424402f972e8SNavdeep Parhar SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 42457029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, vi, idx, sysctl_tc, 42467029da5cSPawel Biernacki "I", "traffic class (-1 means none)"); 424702f972e8SNavdeep Parhar 4248fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 424954e4ee71SNavdeep Parhar &txq->txcsum, "# of times hardware assisted with checksum"); 4250fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 425154e4ee71SNavdeep Parhar CTLFLAG_RD, &txq->vlan_insertion, 425254e4ee71SNavdeep Parhar "# of times hardware inserted 802.1Q tag"); 4253fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4254a1ea9a82SNavdeep Parhar &txq->tso_wrs, "# of TSO work requests"); 4255fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 425654e4ee71SNavdeep Parhar &txq->imm_wrs, "# of work requests with immediate data"); 4257fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 425854e4ee71SNavdeep Parhar &txq->sgl_wrs, "# of work requests with direct SGL"); 4259fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 426054e4ee71SNavdeep Parhar &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4261fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 42627951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts0_wrs, 42637951040fSNavdeep Parhar "# of txpkts (type 0) work requests"); 4264fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 42657951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts1_wrs, 42667951040fSNavdeep Parhar "# of txpkts (type 1) work requests"); 4267fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 42687951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts0_pkts, 42697951040fSNavdeep Parhar "# of frames tx'd using type0 txpkts work requests"); 4270fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 42717951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts1_pkts, 42727951040fSNavdeep Parhar "# of frames tx'd using type1 txpkts work requests"); 42735cdaef71SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 42745cdaef71SJohn Baldwin &txq->raw_wrs, "# of raw work requests (non-packets)"); 4275bddf7343SJohn Baldwin 4276bddf7343SJohn Baldwin #ifdef KERN_TLS 4277bddf7343SJohn Baldwin if (sc->flags & KERN_TLS_OK) { 4278bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4279bddf7343SJohn Baldwin "kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records, 4280bddf7343SJohn Baldwin "# of NIC TLS records transmitted"); 4281bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4282bddf7343SJohn Baldwin "kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short, 4283bddf7343SJohn Baldwin "# of short NIC TLS records transmitted"); 4284bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4285bddf7343SJohn Baldwin "kern_tls_partial", CTLFLAG_RD, &txq->kern_tls_partial, 4286bddf7343SJohn Baldwin "# of partial NIC TLS records transmitted"); 4287bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4288bddf7343SJohn Baldwin "kern_tls_full", CTLFLAG_RD, &txq->kern_tls_full, 4289bddf7343SJohn Baldwin "# of full NIC TLS records transmitted"); 4290bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4291bddf7343SJohn Baldwin "kern_tls_octets", CTLFLAG_RD, &txq->kern_tls_octets, 4292bddf7343SJohn Baldwin "# of payload octets in transmitted NIC TLS records"); 4293bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4294bddf7343SJohn Baldwin "kern_tls_waste", CTLFLAG_RD, &txq->kern_tls_waste, 4295bddf7343SJohn Baldwin "# of octets DMAd but not transmitted in NIC TLS records"); 4296bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4297bddf7343SJohn Baldwin "kern_tls_options", CTLFLAG_RD, &txq->kern_tls_options, 4298bddf7343SJohn Baldwin "# of NIC TLS options-only packets transmitted"); 4299bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4300bddf7343SJohn Baldwin "kern_tls_header", CTLFLAG_RD, &txq->kern_tls_header, 4301bddf7343SJohn Baldwin "# of NIC TLS header-only packets transmitted"); 4302bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4303bddf7343SJohn Baldwin "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin, 4304bddf7343SJohn Baldwin "# of NIC TLS FIN-only packets transmitted"); 4305bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4306bddf7343SJohn Baldwin "kern_tls_fin_short", CTLFLAG_RD, &txq->kern_tls_fin_short, 4307bddf7343SJohn Baldwin "# of NIC TLS padded FIN packets on short TLS records"); 4308bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4309bddf7343SJohn Baldwin "kern_tls_cbc", CTLFLAG_RD, &txq->kern_tls_cbc, 4310bddf7343SJohn Baldwin "# of NIC TLS sessions using AES-CBC"); 4311bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4312bddf7343SJohn Baldwin "kern_tls_gcm", CTLFLAG_RD, &txq->kern_tls_gcm, 4313bddf7343SJohn Baldwin "# of NIC TLS sessions using AES-GCM"); 4314bddf7343SJohn Baldwin } 4315bddf7343SJohn Baldwin #endif 4316d735920dSNavdeep Parhar mp_ring_sysctls(txq->r, &vi->ctx, children); 431754e4ee71SNavdeep Parhar 43187951040fSNavdeep Parhar return (0); 431954e4ee71SNavdeep Parhar } 432054e4ee71SNavdeep Parhar 432154e4ee71SNavdeep Parhar static int 4322fe2ebb76SJohn Baldwin free_txq(struct vi_info *vi, struct sge_txq *txq) 432354e4ee71SNavdeep Parhar { 432454e4ee71SNavdeep Parhar int rc; 43257c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 432654e4ee71SNavdeep Parhar struct sge_eq *eq = &txq->eq; 432754e4ee71SNavdeep Parhar 4328733b9277SNavdeep Parhar rc = free_eq(sc, eq); 4329733b9277SNavdeep Parhar if (rc) 433054e4ee71SNavdeep Parhar return (rc); 433154e4ee71SNavdeep Parhar 43327951040fSNavdeep Parhar sglist_free(txq->gl); 4333f7dfe243SNavdeep Parhar free(txq->sdesc, M_CXGBE); 43347951040fSNavdeep Parhar mp_ring_free(txq->r); 433554e4ee71SNavdeep Parhar 433654e4ee71SNavdeep Parhar bzero(txq, sizeof(*txq)); 433754e4ee71SNavdeep Parhar return (0); 433854e4ee71SNavdeep Parhar } 433954e4ee71SNavdeep Parhar 434054e4ee71SNavdeep Parhar static void 434154e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 434254e4ee71SNavdeep Parhar { 434354e4ee71SNavdeep Parhar bus_addr_t *ba = arg; 434454e4ee71SNavdeep Parhar 434554e4ee71SNavdeep Parhar KASSERT(nseg == 1, 434654e4ee71SNavdeep Parhar ("%s meant for single segment mappings only.", __func__)); 434754e4ee71SNavdeep Parhar 434854e4ee71SNavdeep Parhar *ba = error ? 0 : segs->ds_addr; 434954e4ee71SNavdeep Parhar } 435054e4ee71SNavdeep Parhar 435154e4ee71SNavdeep Parhar static inline void 435254e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl) 435354e4ee71SNavdeep Parhar { 43544d6db4e0SNavdeep Parhar uint32_t n, v; 435554e4ee71SNavdeep Parhar 435646e1e307SNavdeep Parhar n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); 43574d6db4e0SNavdeep Parhar MPASS(n > 0); 4358d14b0ac1SNavdeep Parhar 435954e4ee71SNavdeep Parhar wmb(); 43604d6db4e0SNavdeep Parhar v = fl->dbval | V_PIDX(n); 43614d6db4e0SNavdeep Parhar if (fl->udb) 43624d6db4e0SNavdeep Parhar *fl->udb = htole32(v); 43634d6db4e0SNavdeep Parhar else 4364315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 43654d6db4e0SNavdeep Parhar IDXINCR(fl->dbidx, n, fl->sidx); 436654e4ee71SNavdeep Parhar } 436754e4ee71SNavdeep Parhar 4368fb12416cSNavdeep Parhar /* 43694d6db4e0SNavdeep Parhar * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 43704d6db4e0SNavdeep Parhar * recycled do not count towards this allocation budget. 4371733b9277SNavdeep Parhar * 43724d6db4e0SNavdeep Parhar * Returns non-zero to indicate that this freelist should be added to the list 43734d6db4e0SNavdeep Parhar * of starving freelists. 4374fb12416cSNavdeep Parhar */ 4375733b9277SNavdeep Parhar static int 43764d6db4e0SNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 437754e4ee71SNavdeep Parhar { 43784d6db4e0SNavdeep Parhar __be64 *d; 43794d6db4e0SNavdeep Parhar struct fl_sdesc *sd; 438038035ed6SNavdeep Parhar uintptr_t pa; 438154e4ee71SNavdeep Parhar caddr_t cl; 438246e1e307SNavdeep Parhar struct rx_buf_info *rxb; 438338035ed6SNavdeep Parhar struct cluster_metadata *clm; 43844d6db4e0SNavdeep Parhar uint16_t max_pidx; 43854d6db4e0SNavdeep Parhar uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 438654e4ee71SNavdeep Parhar 438754e4ee71SNavdeep Parhar FL_LOCK_ASSERT_OWNED(fl); 438854e4ee71SNavdeep Parhar 43894d6db4e0SNavdeep Parhar /* 4390453130d9SPedro F. Giffuni * We always stop at the beginning of the hardware descriptor that's just 43914d6db4e0SNavdeep Parhar * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 43924d6db4e0SNavdeep Parhar * which would mean an empty freelist to the chip. 43934d6db4e0SNavdeep Parhar */ 43944d6db4e0SNavdeep Parhar max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 43954d6db4e0SNavdeep Parhar if (fl->pidx == max_pidx * 8) 43964d6db4e0SNavdeep Parhar return (0); 439754e4ee71SNavdeep Parhar 43984d6db4e0SNavdeep Parhar d = &fl->desc[fl->pidx]; 43994d6db4e0SNavdeep Parhar sd = &fl->sdesc[fl->pidx]; 44004d6db4e0SNavdeep Parhar 44014d6db4e0SNavdeep Parhar while (n > 0) { 440254e4ee71SNavdeep Parhar 440354e4ee71SNavdeep Parhar if (sd->cl != NULL) { 440454e4ee71SNavdeep Parhar 4405c3fb7725SNavdeep Parhar if (sd->nmbuf == 0) { 440638035ed6SNavdeep Parhar /* 440738035ed6SNavdeep Parhar * Fast recycle without involving any atomics on 440838035ed6SNavdeep Parhar * the cluster's metadata (if the cluster has 440938035ed6SNavdeep Parhar * metadata). This happens when all frames 441038035ed6SNavdeep Parhar * received in the cluster were small enough to 441138035ed6SNavdeep Parhar * fit within a single mbuf each. 441238035ed6SNavdeep Parhar */ 441338035ed6SNavdeep Parhar fl->cl_fast_recycled++; 4414a9c4062aSNavdeep Parhar goto recycled; 441538035ed6SNavdeep Parhar } 441654e4ee71SNavdeep Parhar 441738035ed6SNavdeep Parhar /* 441838035ed6SNavdeep Parhar * Cluster is guaranteed to have metadata. Clusters 441938035ed6SNavdeep Parhar * without metadata always take the fast recycle path 442038035ed6SNavdeep Parhar * when they're recycled. 442138035ed6SNavdeep Parhar */ 442246e1e307SNavdeep Parhar clm = cl_metadata(sd); 442338035ed6SNavdeep Parhar MPASS(clm != NULL); 44241458bff9SNavdeep Parhar 442538035ed6SNavdeep Parhar if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 442638035ed6SNavdeep Parhar fl->cl_recycled++; 442782eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 442854e4ee71SNavdeep Parhar goto recycled; 442954e4ee71SNavdeep Parhar } 44301458bff9SNavdeep Parhar sd->cl = NULL; /* gave up my reference */ 44311458bff9SNavdeep Parhar } 443238035ed6SNavdeep Parhar MPASS(sd->cl == NULL); 443346e1e307SNavdeep Parhar rxb = &sc->sge.rx_buf_info[fl->zidx]; 443446e1e307SNavdeep Parhar cl = uma_zalloc(rxb->zone, M_NOWAIT); 44352b9010f0SNavdeep Parhar if (__predict_false(cl == NULL)) { 44362b9010f0SNavdeep Parhar if (fl->zidx != fl->safe_zidx) { 443746e1e307SNavdeep Parhar rxb = &sc->sge.rx_buf_info[fl->safe_zidx]; 443846e1e307SNavdeep Parhar cl = uma_zalloc(rxb->zone, M_NOWAIT); 44392b9010f0SNavdeep Parhar } 44402b9010f0SNavdeep Parhar if (cl == NULL) 444154e4ee71SNavdeep Parhar break; 444254e4ee71SNavdeep Parhar } 444338035ed6SNavdeep Parhar fl->cl_allocated++; 44444d6db4e0SNavdeep Parhar n--; 444554e4ee71SNavdeep Parhar 444638035ed6SNavdeep Parhar pa = pmap_kextract((vm_offset_t)cl); 444754e4ee71SNavdeep Parhar sd->cl = cl; 444846e1e307SNavdeep Parhar sd->zidx = fl->zidx; 444946e1e307SNavdeep Parhar 445046e1e307SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 445146e1e307SNavdeep Parhar *d = htobe64(pa | rxb->hwidx2); 445246e1e307SNavdeep Parhar sd->moff = rxb->size2; 445346e1e307SNavdeep Parhar } else { 445446e1e307SNavdeep Parhar *d = htobe64(pa | rxb->hwidx1); 445546e1e307SNavdeep Parhar sd->moff = 0; 445646e1e307SNavdeep Parhar } 44577d29df59SNavdeep Parhar recycled: 4458c3fb7725SNavdeep Parhar sd->nmbuf = 0; 445938035ed6SNavdeep Parhar d++; 446054e4ee71SNavdeep Parhar sd++; 446146e1e307SNavdeep Parhar if (__predict_false((++fl->pidx & 7) == 0)) { 446246e1e307SNavdeep Parhar uint16_t pidx = fl->pidx >> 3; 44634d6db4e0SNavdeep Parhar 44644d6db4e0SNavdeep Parhar if (__predict_false(pidx == fl->sidx)) { 446554e4ee71SNavdeep Parhar fl->pidx = 0; 44664d6db4e0SNavdeep Parhar pidx = 0; 446754e4ee71SNavdeep Parhar sd = fl->sdesc; 446854e4ee71SNavdeep Parhar d = fl->desc; 446954e4ee71SNavdeep Parhar } 447046e1e307SNavdeep Parhar if (n < 8 || pidx == max_pidx) 44714d6db4e0SNavdeep Parhar break; 44724d6db4e0SNavdeep Parhar 44734d6db4e0SNavdeep Parhar if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 44744d6db4e0SNavdeep Parhar ring_fl_db(sc, fl); 44754d6db4e0SNavdeep Parhar } 447654e4ee71SNavdeep Parhar } 4477fb12416cSNavdeep Parhar 447846e1e307SNavdeep Parhar if ((fl->pidx >> 3) != fl->dbidx) 4479fb12416cSNavdeep Parhar ring_fl_db(sc, fl); 4480733b9277SNavdeep Parhar 4481733b9277SNavdeep Parhar return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 4482733b9277SNavdeep Parhar } 4483733b9277SNavdeep Parhar 4484733b9277SNavdeep Parhar /* 4485733b9277SNavdeep Parhar * Attempt to refill all starving freelists. 4486733b9277SNavdeep Parhar */ 4487733b9277SNavdeep Parhar static void 4488733b9277SNavdeep Parhar refill_sfl(void *arg) 4489733b9277SNavdeep Parhar { 4490733b9277SNavdeep Parhar struct adapter *sc = arg; 4491733b9277SNavdeep Parhar struct sge_fl *fl, *fl_temp; 4492733b9277SNavdeep Parhar 4493fe2ebb76SJohn Baldwin mtx_assert(&sc->sfl_lock, MA_OWNED); 4494733b9277SNavdeep Parhar TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 4495733b9277SNavdeep Parhar FL_LOCK(fl); 4496733b9277SNavdeep Parhar refill_fl(sc, fl, 64); 4497733b9277SNavdeep Parhar if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 4498733b9277SNavdeep Parhar TAILQ_REMOVE(&sc->sfl, fl, link); 4499733b9277SNavdeep Parhar fl->flags &= ~FL_STARVING; 4500733b9277SNavdeep Parhar } 4501733b9277SNavdeep Parhar FL_UNLOCK(fl); 4502733b9277SNavdeep Parhar } 4503733b9277SNavdeep Parhar 4504733b9277SNavdeep Parhar if (!TAILQ_EMPTY(&sc->sfl)) 4505733b9277SNavdeep Parhar callout_schedule(&sc->sfl_callout, hz / 5); 450654e4ee71SNavdeep Parhar } 450754e4ee71SNavdeep Parhar 450854e4ee71SNavdeep Parhar static int 450954e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl) 451054e4ee71SNavdeep Parhar { 451154e4ee71SNavdeep Parhar 45124d6db4e0SNavdeep Parhar fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 451354e4ee71SNavdeep Parhar M_ZERO | M_WAITOK); 451454e4ee71SNavdeep Parhar 451554e4ee71SNavdeep Parhar return (0); 451654e4ee71SNavdeep Parhar } 451754e4ee71SNavdeep Parhar 451854e4ee71SNavdeep Parhar static void 45191458bff9SNavdeep Parhar free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 452054e4ee71SNavdeep Parhar { 452154e4ee71SNavdeep Parhar struct fl_sdesc *sd; 452238035ed6SNavdeep Parhar struct cluster_metadata *clm; 452354e4ee71SNavdeep Parhar int i; 452454e4ee71SNavdeep Parhar 452554e4ee71SNavdeep Parhar sd = fl->sdesc; 45264d6db4e0SNavdeep Parhar for (i = 0; i < fl->sidx * 8; i++, sd++) { 452738035ed6SNavdeep Parhar if (sd->cl == NULL) 452838035ed6SNavdeep Parhar continue; 452954e4ee71SNavdeep Parhar 453082eff304SNavdeep Parhar if (sd->nmbuf == 0) 453146e1e307SNavdeep Parhar uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); 453246e1e307SNavdeep Parhar else if (fl->flags & FL_BUF_PACKING) { 453346e1e307SNavdeep Parhar clm = cl_metadata(sd); 453446e1e307SNavdeep Parhar if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 453546e1e307SNavdeep Parhar uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, 453646e1e307SNavdeep Parhar sd->cl); 453782eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 453854e4ee71SNavdeep Parhar } 453946e1e307SNavdeep Parhar } 454038035ed6SNavdeep Parhar sd->cl = NULL; 454154e4ee71SNavdeep Parhar } 454254e4ee71SNavdeep Parhar 454354e4ee71SNavdeep Parhar free(fl->sdesc, M_CXGBE); 454454e4ee71SNavdeep Parhar fl->sdesc = NULL; 454554e4ee71SNavdeep Parhar } 454654e4ee71SNavdeep Parhar 45477951040fSNavdeep Parhar static inline void 45487951040fSNavdeep Parhar get_pkt_gl(struct mbuf *m, struct sglist *gl) 454954e4ee71SNavdeep Parhar { 45507951040fSNavdeep Parhar int rc; 455154e4ee71SNavdeep Parhar 45527951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 455354e4ee71SNavdeep Parhar 45547951040fSNavdeep Parhar sglist_reset(gl); 45557951040fSNavdeep Parhar rc = sglist_append_mbuf(gl, m); 45567951040fSNavdeep Parhar if (__predict_false(rc != 0)) { 45577951040fSNavdeep Parhar panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 45587951040fSNavdeep Parhar "with %d.", __func__, m, mbuf_nsegs(m), rc); 455954e4ee71SNavdeep Parhar } 456054e4ee71SNavdeep Parhar 45617951040fSNavdeep Parhar KASSERT(gl->sg_nseg == mbuf_nsegs(m), 45627951040fSNavdeep Parhar ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 45637951040fSNavdeep Parhar mbuf_nsegs(m), gl->sg_nseg)); 45647951040fSNavdeep Parhar KASSERT(gl->sg_nseg > 0 && 45657951040fSNavdeep Parhar gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 45667951040fSNavdeep Parhar ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 45677951040fSNavdeep Parhar gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 456854e4ee71SNavdeep Parhar } 456954e4ee71SNavdeep Parhar 457054e4ee71SNavdeep Parhar /* 45717951040fSNavdeep Parhar * len16 for a txpkt WR with a GL. Includes the firmware work request header. 457254e4ee71SNavdeep Parhar */ 45737951040fSNavdeep Parhar static inline u_int 45747951040fSNavdeep Parhar txpkt_len16(u_int nsegs, u_int tso) 45757951040fSNavdeep Parhar { 45767951040fSNavdeep Parhar u_int n; 45777951040fSNavdeep Parhar 45787951040fSNavdeep Parhar MPASS(nsegs > 0); 45797951040fSNavdeep Parhar 45807951040fSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 45817951040fSNavdeep Parhar n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 45827951040fSNavdeep Parhar sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 45837951040fSNavdeep Parhar if (tso) 45847951040fSNavdeep Parhar n += sizeof(struct cpl_tx_pkt_lso_core); 45857951040fSNavdeep Parhar 45867951040fSNavdeep Parhar return (howmany(n, 16)); 45877951040fSNavdeep Parhar } 458854e4ee71SNavdeep Parhar 458954e4ee71SNavdeep Parhar /* 45906af45170SJohn Baldwin * len16 for a txpkt_vm WR with a GL. Includes the firmware work 45916af45170SJohn Baldwin * request header. 45926af45170SJohn Baldwin */ 45936af45170SJohn Baldwin static inline u_int 45946af45170SJohn Baldwin txpkt_vm_len16(u_int nsegs, u_int tso) 45956af45170SJohn Baldwin { 45966af45170SJohn Baldwin u_int n; 45976af45170SJohn Baldwin 45986af45170SJohn Baldwin MPASS(nsegs > 0); 45996af45170SJohn Baldwin 46006af45170SJohn Baldwin nsegs--; /* first segment is part of ulptx_sgl */ 46016af45170SJohn Baldwin n = sizeof(struct fw_eth_tx_pkt_vm_wr) + 46026af45170SJohn Baldwin sizeof(struct cpl_tx_pkt_core) + 46036af45170SJohn Baldwin sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 46046af45170SJohn Baldwin if (tso) 46056af45170SJohn Baldwin n += sizeof(struct cpl_tx_pkt_lso_core); 46066af45170SJohn Baldwin 46076af45170SJohn Baldwin return (howmany(n, 16)); 46086af45170SJohn Baldwin } 46096af45170SJohn Baldwin 46106af45170SJohn Baldwin /* 46117951040fSNavdeep Parhar * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 46127951040fSNavdeep Parhar * request header. 46137951040fSNavdeep Parhar */ 46147951040fSNavdeep Parhar static inline u_int 46157951040fSNavdeep Parhar txpkts0_len16(u_int nsegs) 46167951040fSNavdeep Parhar { 46177951040fSNavdeep Parhar u_int n; 46187951040fSNavdeep Parhar 46197951040fSNavdeep Parhar MPASS(nsegs > 0); 46207951040fSNavdeep Parhar 46217951040fSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 46227951040fSNavdeep Parhar n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 46237951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 46247951040fSNavdeep Parhar 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 46257951040fSNavdeep Parhar 46267951040fSNavdeep Parhar return (howmany(n, 16)); 46277951040fSNavdeep Parhar } 46287951040fSNavdeep Parhar 46297951040fSNavdeep Parhar /* 46307951040fSNavdeep Parhar * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 46317951040fSNavdeep Parhar * request header. 46327951040fSNavdeep Parhar */ 46337951040fSNavdeep Parhar static inline u_int 46347951040fSNavdeep Parhar txpkts1_len16(void) 46357951040fSNavdeep Parhar { 46367951040fSNavdeep Parhar u_int n; 46377951040fSNavdeep Parhar 46387951040fSNavdeep Parhar n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 46397951040fSNavdeep Parhar 46407951040fSNavdeep Parhar return (howmany(n, 16)); 46417951040fSNavdeep Parhar } 46427951040fSNavdeep Parhar 46437951040fSNavdeep Parhar static inline u_int 46447951040fSNavdeep Parhar imm_payload(u_int ndesc) 46457951040fSNavdeep Parhar { 46467951040fSNavdeep Parhar u_int n; 46477951040fSNavdeep Parhar 46487951040fSNavdeep Parhar n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 46497951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core); 46507951040fSNavdeep Parhar 46517951040fSNavdeep Parhar return (n); 46527951040fSNavdeep Parhar } 46537951040fSNavdeep Parhar 4654c0236bd9SNavdeep Parhar static inline uint64_t 4655c0236bd9SNavdeep Parhar csum_to_ctrl(struct adapter *sc, struct mbuf *m) 4656c0236bd9SNavdeep Parhar { 4657c0236bd9SNavdeep Parhar uint64_t ctrl; 4658c0236bd9SNavdeep Parhar int csum_type; 4659c0236bd9SNavdeep Parhar 4660c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 4661c0236bd9SNavdeep Parhar 4662c0236bd9SNavdeep Parhar if (needs_hwcsum(m) == 0) 4663c0236bd9SNavdeep Parhar return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 4664c0236bd9SNavdeep Parhar 4665c0236bd9SNavdeep Parhar ctrl = 0; 4666c0236bd9SNavdeep Parhar if (needs_l3_csum(m) == 0) 4667c0236bd9SNavdeep Parhar ctrl |= F_TXPKT_IPCSUM_DIS; 4668c0236bd9SNavdeep Parhar switch (m->m_pkthdr.csum_flags & 4669c0236bd9SNavdeep Parhar (CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) { 4670c0236bd9SNavdeep Parhar case CSUM_IP_TCP: 4671c0236bd9SNavdeep Parhar csum_type = TX_CSUM_TCPIP; 4672c0236bd9SNavdeep Parhar break; 4673c0236bd9SNavdeep Parhar case CSUM_IP_UDP: 4674c0236bd9SNavdeep Parhar csum_type = TX_CSUM_UDPIP; 4675c0236bd9SNavdeep Parhar break; 4676c0236bd9SNavdeep Parhar case CSUM_IP6_TCP: 4677c0236bd9SNavdeep Parhar csum_type = TX_CSUM_TCPIP6; 4678c0236bd9SNavdeep Parhar break; 4679c0236bd9SNavdeep Parhar case CSUM_IP6_UDP: 4680c0236bd9SNavdeep Parhar csum_type = TX_CSUM_UDPIP6; 4681c0236bd9SNavdeep Parhar break; 4682c0236bd9SNavdeep Parhar default: 4683c0236bd9SNavdeep Parhar /* needs_hwcsum told us that at least some hwcsum is needed. */ 4684c0236bd9SNavdeep Parhar MPASS(ctrl == 0); 4685c0236bd9SNavdeep Parhar MPASS(m->m_pkthdr.csum_flags & CSUM_IP); 4686c0236bd9SNavdeep Parhar ctrl |= F_TXPKT_L4CSUM_DIS; 4687c0236bd9SNavdeep Parhar csum_type = TX_CSUM_IP; 4688c0236bd9SNavdeep Parhar break; 4689c0236bd9SNavdeep Parhar } 4690c0236bd9SNavdeep Parhar 4691c0236bd9SNavdeep Parhar MPASS(m->m_pkthdr.l2hlen > 0); 4692c0236bd9SNavdeep Parhar MPASS(m->m_pkthdr.l3hlen > 0); 4693c0236bd9SNavdeep Parhar ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | 4694c0236bd9SNavdeep Parhar V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen); 4695c0236bd9SNavdeep Parhar if (chip_id(sc) <= CHELSIO_T5) 4696c0236bd9SNavdeep Parhar ctrl |= V_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); 4697c0236bd9SNavdeep Parhar else 4698c0236bd9SNavdeep Parhar ctrl |= V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN); 4699c0236bd9SNavdeep Parhar 4700c0236bd9SNavdeep Parhar return (ctrl); 4701c0236bd9SNavdeep Parhar } 4702c0236bd9SNavdeep Parhar 47037951040fSNavdeep Parhar /* 47046af45170SJohn Baldwin * Write a VM txpkt WR for this packet to the hardware descriptors, update the 47056af45170SJohn Baldwin * software descriptor, and advance the pidx. It is guaranteed that enough 47066af45170SJohn Baldwin * descriptors are available. 47076af45170SJohn Baldwin * 47086af45170SJohn Baldwin * The return value is the # of hardware descriptors used. 47096af45170SJohn Baldwin */ 47106af45170SJohn Baldwin static u_int 4711d735920dSNavdeep Parhar write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) 47126af45170SJohn Baldwin { 4713d735920dSNavdeep Parhar struct sge_eq *eq; 4714d735920dSNavdeep Parhar struct fw_eth_tx_pkt_vm_wr *wr; 47156af45170SJohn Baldwin struct tx_sdesc *txsd; 47166af45170SJohn Baldwin struct cpl_tx_pkt_core *cpl; 47176af45170SJohn Baldwin uint32_t ctrl; /* used in many unrelated places */ 47186af45170SJohn Baldwin uint64_t ctrl1; 4719c0236bd9SNavdeep Parhar int len16, ndesc, pktlen, nsegs; 47206af45170SJohn Baldwin caddr_t dst; 47216af45170SJohn Baldwin 47226af45170SJohn Baldwin TXQ_LOCK_ASSERT_OWNED(txq); 47236af45170SJohn Baldwin M_ASSERTPKTHDR(m0); 47246af45170SJohn Baldwin 47256af45170SJohn Baldwin len16 = mbuf_len16(m0); 47266af45170SJohn Baldwin nsegs = mbuf_nsegs(m0); 47276af45170SJohn Baldwin pktlen = m0->m_pkthdr.len; 47286af45170SJohn Baldwin ctrl = sizeof(struct cpl_tx_pkt_core); 47296af45170SJohn Baldwin if (needs_tso(m0)) 47306af45170SJohn Baldwin ctrl += sizeof(struct cpl_tx_pkt_lso_core); 47310cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 47326af45170SJohn Baldwin 47336af45170SJohn Baldwin /* Firmware work request header */ 4734d735920dSNavdeep Parhar eq = &txq->eq; 4735d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 47366af45170SJohn Baldwin wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 47376af45170SJohn Baldwin V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 47386af45170SJohn Baldwin 47396af45170SJohn Baldwin ctrl = V_FW_WR_LEN16(len16); 47406af45170SJohn Baldwin wr->equiq_to_len16 = htobe32(ctrl); 47416af45170SJohn Baldwin wr->r3[0] = 0; 47426af45170SJohn Baldwin wr->r3[1] = 0; 47436af45170SJohn Baldwin 47446af45170SJohn Baldwin /* 47456af45170SJohn Baldwin * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 47466af45170SJohn Baldwin * vlantci is ignored unless the ethtype is 0x8100, so it's 47476af45170SJohn Baldwin * simpler to always copy it rather than making it 47486af45170SJohn Baldwin * conditional. Also, it seems that we do not have to set 47496af45170SJohn Baldwin * vlantci or fake the ethtype when doing VLAN tag insertion. 47506af45170SJohn Baldwin */ 47516af45170SJohn Baldwin m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst); 47526af45170SJohn Baldwin 47536af45170SJohn Baldwin if (needs_tso(m0)) { 47546af45170SJohn Baldwin struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 47556af45170SJohn Baldwin 47566af45170SJohn Baldwin KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 47576af45170SJohn Baldwin m0->m_pkthdr.l4hlen > 0, 47586af45170SJohn Baldwin ("%s: mbuf %p needs TSO but missing header lengths", 47596af45170SJohn Baldwin __func__, m0)); 47606af45170SJohn Baldwin 47616af45170SJohn Baldwin ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4762c0236bd9SNavdeep Parhar F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 4763c0236bd9SNavdeep Parhar ETHER_HDR_LEN) >> 2) | 4764c0236bd9SNavdeep Parhar V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4765c0236bd9SNavdeep Parhar V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 47666af45170SJohn Baldwin if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 47676af45170SJohn Baldwin ctrl |= F_LSO_IPV6; 47686af45170SJohn Baldwin 47696af45170SJohn Baldwin lso->lso_ctrl = htobe32(ctrl); 47706af45170SJohn Baldwin lso->ipid_ofst = htobe16(0); 47716af45170SJohn Baldwin lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 47726af45170SJohn Baldwin lso->seqno_offset = htobe32(0); 47736af45170SJohn Baldwin lso->len = htobe32(pktlen); 47746af45170SJohn Baldwin 47756af45170SJohn Baldwin cpl = (void *)(lso + 1); 47766af45170SJohn Baldwin 47776af45170SJohn Baldwin txq->tso_wrs++; 4778c0236bd9SNavdeep Parhar } else 47796af45170SJohn Baldwin cpl = (void *)(wr + 1); 47806af45170SJohn Baldwin 47816af45170SJohn Baldwin /* Checksum offload */ 4782c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m0); 4783c0236bd9SNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 47846af45170SJohn Baldwin txq->txcsum++; /* some hardware assistance provided */ 47856af45170SJohn Baldwin 47866af45170SJohn Baldwin /* VLAN tag insertion */ 47876af45170SJohn Baldwin if (needs_vlan_insertion(m0)) { 47886af45170SJohn Baldwin ctrl1 |= F_TXPKT_VLAN_VLD | 47896af45170SJohn Baldwin V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 47906af45170SJohn Baldwin txq->vlan_insertion++; 47916af45170SJohn Baldwin } 47926af45170SJohn Baldwin 47936af45170SJohn Baldwin /* CPL header */ 47946af45170SJohn Baldwin cpl->ctrl0 = txq->cpl_ctrl0; 47956af45170SJohn Baldwin cpl->pack = 0; 47966af45170SJohn Baldwin cpl->len = htobe16(pktlen); 47976af45170SJohn Baldwin cpl->ctrl1 = htobe64(ctrl1); 47986af45170SJohn Baldwin 47996af45170SJohn Baldwin /* SGL */ 48006af45170SJohn Baldwin dst = (void *)(cpl + 1); 48016af45170SJohn Baldwin 48026af45170SJohn Baldwin /* 48036af45170SJohn Baldwin * A packet using TSO will use up an entire descriptor for the 48046af45170SJohn Baldwin * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 48056af45170SJohn Baldwin * If this descriptor is the last descriptor in the ring, wrap 48066af45170SJohn Baldwin * around to the front of the ring explicitly for the start of 48076af45170SJohn Baldwin * the sgl. 48086af45170SJohn Baldwin */ 48096af45170SJohn Baldwin if (dst == (void *)&eq->desc[eq->sidx]) { 48106af45170SJohn Baldwin dst = (void *)&eq->desc[0]; 48116af45170SJohn Baldwin write_gl_to_txd(txq, m0, &dst, 0); 48126af45170SJohn Baldwin } else 48136af45170SJohn Baldwin write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 48146af45170SJohn Baldwin txq->sgl_wrs++; 48156af45170SJohn Baldwin txq->txpkt_wrs++; 48166af45170SJohn Baldwin 48176af45170SJohn Baldwin txsd = &txq->sdesc[eq->pidx]; 48186af45170SJohn Baldwin txsd->m = m0; 48196af45170SJohn Baldwin txsd->desc_used = ndesc; 48206af45170SJohn Baldwin 48216af45170SJohn Baldwin return (ndesc); 48226af45170SJohn Baldwin } 48236af45170SJohn Baldwin 48246af45170SJohn Baldwin /* 48255cdaef71SJohn Baldwin * Write a raw WR to the hardware descriptors, update the software 48265cdaef71SJohn Baldwin * descriptor, and advance the pidx. It is guaranteed that enough 48275cdaef71SJohn Baldwin * descriptors are available. 48285cdaef71SJohn Baldwin * 48295cdaef71SJohn Baldwin * The return value is the # of hardware descriptors used. 48305cdaef71SJohn Baldwin */ 48315cdaef71SJohn Baldwin static u_int 48325cdaef71SJohn Baldwin write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 48335cdaef71SJohn Baldwin { 48345cdaef71SJohn Baldwin struct sge_eq *eq = &txq->eq; 48355cdaef71SJohn Baldwin struct tx_sdesc *txsd; 48365cdaef71SJohn Baldwin struct mbuf *m; 48375cdaef71SJohn Baldwin caddr_t dst; 48385cdaef71SJohn Baldwin int len16, ndesc; 48395cdaef71SJohn Baldwin 48405cdaef71SJohn Baldwin len16 = mbuf_len16(m0); 48410cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 48425cdaef71SJohn Baldwin MPASS(ndesc <= available); 48435cdaef71SJohn Baldwin 48445cdaef71SJohn Baldwin dst = wr; 48455cdaef71SJohn Baldwin for (m = m0; m != NULL; m = m->m_next) 48465cdaef71SJohn Baldwin copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 48475cdaef71SJohn Baldwin 48485cdaef71SJohn Baldwin txq->raw_wrs++; 48495cdaef71SJohn Baldwin 48505cdaef71SJohn Baldwin txsd = &txq->sdesc[eq->pidx]; 48515cdaef71SJohn Baldwin txsd->m = m0; 48525cdaef71SJohn Baldwin txsd->desc_used = ndesc; 48535cdaef71SJohn Baldwin 48545cdaef71SJohn Baldwin return (ndesc); 48555cdaef71SJohn Baldwin } 48565cdaef71SJohn Baldwin 48575cdaef71SJohn Baldwin /* 48587951040fSNavdeep Parhar * Write a txpkt WR for this packet to the hardware descriptors, update the 48597951040fSNavdeep Parhar * software descriptor, and advance the pidx. It is guaranteed that enough 48607951040fSNavdeep Parhar * descriptors are available. 486154e4ee71SNavdeep Parhar * 48627951040fSNavdeep Parhar * The return value is the # of hardware descriptors used. 486354e4ee71SNavdeep Parhar */ 48647951040fSNavdeep Parhar static u_int 4865d735920dSNavdeep Parhar write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, 4866d735920dSNavdeep Parhar u_int available) 486754e4ee71SNavdeep Parhar { 4868d735920dSNavdeep Parhar struct sge_eq *eq; 4869d735920dSNavdeep Parhar struct fw_eth_tx_pkt_wr *wr; 48707951040fSNavdeep Parhar struct tx_sdesc *txsd; 487154e4ee71SNavdeep Parhar struct cpl_tx_pkt_core *cpl; 487254e4ee71SNavdeep Parhar uint32_t ctrl; /* used in many unrelated places */ 487354e4ee71SNavdeep Parhar uint64_t ctrl1; 48747951040fSNavdeep Parhar int len16, ndesc, pktlen, nsegs; 487554e4ee71SNavdeep Parhar caddr_t dst; 487654e4ee71SNavdeep Parhar 487754e4ee71SNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 48787951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 487954e4ee71SNavdeep Parhar 48807951040fSNavdeep Parhar len16 = mbuf_len16(m0); 48817951040fSNavdeep Parhar nsegs = mbuf_nsegs(m0); 48827951040fSNavdeep Parhar pktlen = m0->m_pkthdr.len; 488354e4ee71SNavdeep Parhar ctrl = sizeof(struct cpl_tx_pkt_core); 48847951040fSNavdeep Parhar if (needs_tso(m0)) 48852a5f6b0eSNavdeep Parhar ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4886d76bbe17SJohn Baldwin else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && 4887d76bbe17SJohn Baldwin available >= 2) { 48887951040fSNavdeep Parhar /* Immediate data. Recalculate len16 and set nsegs to 0. */ 4889ecb79ca4SNavdeep Parhar ctrl += pktlen; 48907951040fSNavdeep Parhar len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 48917951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 48927951040fSNavdeep Parhar nsegs = 0; 489354e4ee71SNavdeep Parhar } 48940cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 48957951040fSNavdeep Parhar MPASS(ndesc <= available); 489654e4ee71SNavdeep Parhar 489754e4ee71SNavdeep Parhar /* Firmware work request header */ 4898d735920dSNavdeep Parhar eq = &txq->eq; 4899d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 490054e4ee71SNavdeep Parhar wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 4901733b9277SNavdeep Parhar V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 49026b49a4ecSNavdeep Parhar 49037951040fSNavdeep Parhar ctrl = V_FW_WR_LEN16(len16); 490454e4ee71SNavdeep Parhar wr->equiq_to_len16 = htobe32(ctrl); 490554e4ee71SNavdeep Parhar wr->r3 = 0; 490654e4ee71SNavdeep Parhar 49077951040fSNavdeep Parhar if (needs_tso(m0)) { 49082a5f6b0eSNavdeep Parhar struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 49097951040fSNavdeep Parhar 49107951040fSNavdeep Parhar KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 49117951040fSNavdeep Parhar m0->m_pkthdr.l4hlen > 0, 49127951040fSNavdeep Parhar ("%s: mbuf %p needs TSO but missing header lengths", 49137951040fSNavdeep Parhar __func__, m0)); 491454e4ee71SNavdeep Parhar 491554e4ee71SNavdeep Parhar ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4916c0236bd9SNavdeep Parhar F_LSO_LAST_SLICE | V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 4917c0236bd9SNavdeep Parhar ETHER_HDR_LEN) >> 2) | 4918c0236bd9SNavdeep Parhar V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4919c0236bd9SNavdeep Parhar V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 49207951040fSNavdeep Parhar if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4921a1ea9a82SNavdeep Parhar ctrl |= F_LSO_IPV6; 492254e4ee71SNavdeep Parhar 492354e4ee71SNavdeep Parhar lso->lso_ctrl = htobe32(ctrl); 492454e4ee71SNavdeep Parhar lso->ipid_ofst = htobe16(0); 49257951040fSNavdeep Parhar lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 492654e4ee71SNavdeep Parhar lso->seqno_offset = htobe32(0); 4927ecb79ca4SNavdeep Parhar lso->len = htobe32(pktlen); 492854e4ee71SNavdeep Parhar 492954e4ee71SNavdeep Parhar cpl = (void *)(lso + 1); 493054e4ee71SNavdeep Parhar 493154e4ee71SNavdeep Parhar txq->tso_wrs++; 493254e4ee71SNavdeep Parhar } else 493354e4ee71SNavdeep Parhar cpl = (void *)(wr + 1); 493454e4ee71SNavdeep Parhar 493554e4ee71SNavdeep Parhar /* Checksum offload */ 4936c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m0); 4937c0236bd9SNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 493854e4ee71SNavdeep Parhar txq->txcsum++; /* some hardware assistance provided */ 493954e4ee71SNavdeep Parhar 494054e4ee71SNavdeep Parhar /* VLAN tag insertion */ 49417951040fSNavdeep Parhar if (needs_vlan_insertion(m0)) { 49427951040fSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 494354e4ee71SNavdeep Parhar txq->vlan_insertion++; 494454e4ee71SNavdeep Parhar } 494554e4ee71SNavdeep Parhar 494654e4ee71SNavdeep Parhar /* CPL header */ 49477951040fSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 494854e4ee71SNavdeep Parhar cpl->pack = 0; 4949ecb79ca4SNavdeep Parhar cpl->len = htobe16(pktlen); 495054e4ee71SNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 495154e4ee71SNavdeep Parhar 495254e4ee71SNavdeep Parhar /* SGL */ 495354e4ee71SNavdeep Parhar dst = (void *)(cpl + 1); 49547951040fSNavdeep Parhar if (nsegs > 0) { 49557951040fSNavdeep Parhar 49567951040fSNavdeep Parhar write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 495754e4ee71SNavdeep Parhar txq->sgl_wrs++; 495854e4ee71SNavdeep Parhar } else { 49597951040fSNavdeep Parhar struct mbuf *m; 49607951040fSNavdeep Parhar 49617951040fSNavdeep Parhar for (m = m0; m != NULL; m = m->m_next) { 496254e4ee71SNavdeep Parhar copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4963ecb79ca4SNavdeep Parhar #ifdef INVARIANTS 4964ecb79ca4SNavdeep Parhar pktlen -= m->m_len; 4965ecb79ca4SNavdeep Parhar #endif 496654e4ee71SNavdeep Parhar } 4967ecb79ca4SNavdeep Parhar #ifdef INVARIANTS 4968ecb79ca4SNavdeep Parhar KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4969ecb79ca4SNavdeep Parhar #endif 49707951040fSNavdeep Parhar txq->imm_wrs++; 497154e4ee71SNavdeep Parhar } 497254e4ee71SNavdeep Parhar 497354e4ee71SNavdeep Parhar txq->txpkt_wrs++; 497454e4ee71SNavdeep Parhar 4975f7dfe243SNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 49767951040fSNavdeep Parhar txsd->m = m0; 497754e4ee71SNavdeep Parhar txsd->desc_used = ndesc; 497854e4ee71SNavdeep Parhar 49797951040fSNavdeep Parhar return (ndesc); 498054e4ee71SNavdeep Parhar } 498154e4ee71SNavdeep Parhar 4982d735920dSNavdeep Parhar static inline bool 4983d735920dSNavdeep Parhar cmp_l2hdr(struct txpkts *txp, struct mbuf *m) 498454e4ee71SNavdeep Parhar { 4985d735920dSNavdeep Parhar int len; 49867951040fSNavdeep Parhar 4987d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 4988d735920dSNavdeep Parhar MPASS(m->m_len >= 16); /* type1 implies 1 GL with all of the frame. */ 49897951040fSNavdeep Parhar 4990d735920dSNavdeep Parhar if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) 4991d735920dSNavdeep Parhar len = sizeof(struct ether_vlan_header); 4992d735920dSNavdeep Parhar else 4993d735920dSNavdeep Parhar len = sizeof(struct ether_header); 4994d735920dSNavdeep Parhar 4995d735920dSNavdeep Parhar return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); 49967951040fSNavdeep Parhar } 49977951040fSNavdeep Parhar 4998d735920dSNavdeep Parhar static inline void 4999d735920dSNavdeep Parhar save_l2hdr(struct txpkts *txp, struct mbuf *m) 5000d735920dSNavdeep Parhar { 5001d735920dSNavdeep Parhar MPASS(m->m_len >= 16); /* type1 implies 1 GL with all of the frame. */ 50027951040fSNavdeep Parhar 5003d735920dSNavdeep Parhar memcpy(&txp->ethmacdst[0], mtod(m, const void *), 16); 5004d735920dSNavdeep Parhar } 50057951040fSNavdeep Parhar 5006d735920dSNavdeep Parhar static int 5007d735920dSNavdeep Parhar add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5008d735920dSNavdeep Parhar int avail, bool *send) 5009d735920dSNavdeep Parhar { 5010d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 5011d735920dSNavdeep Parhar 5012d735920dSNavdeep Parhar MPASS(sc->flags & IS_VF); 5013d735920dSNavdeep Parhar 5014d735920dSNavdeep Parhar /* Cannot have TSO and coalesce at the same time. */ 5015d735920dSNavdeep Parhar if (cannot_use_txpkts(m)) { 5016d735920dSNavdeep Parhar cannot_coalesce: 5017d735920dSNavdeep Parhar *send = txp->npkt > 0; 5018d735920dSNavdeep Parhar return (EINVAL); 5019d735920dSNavdeep Parhar } 5020d735920dSNavdeep Parhar 5021d735920dSNavdeep Parhar /* VF allows coalescing of type 1 (1 GL) only */ 5022d735920dSNavdeep Parhar if (mbuf_nsegs(m) > 1) 5023d735920dSNavdeep Parhar goto cannot_coalesce; 5024d735920dSNavdeep Parhar 5025d735920dSNavdeep Parhar *send = false; 5026d735920dSNavdeep Parhar if (txp->npkt > 0) { 5027d735920dSNavdeep Parhar MPASS(tx_len16_to_desc(txp->len16) <= avail); 5028d735920dSNavdeep Parhar MPASS(txp->npkt < txp->max_npkt); 5029d735920dSNavdeep Parhar MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5030d735920dSNavdeep Parhar 5031d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { 5032d735920dSNavdeep Parhar retry_after_send: 5033d735920dSNavdeep Parhar *send = true; 5034d735920dSNavdeep Parhar return (EAGAIN); 5035d735920dSNavdeep Parhar } 5036d735920dSNavdeep Parhar if (m->m_pkthdr.len + txp->plen > 65535) 5037d735920dSNavdeep Parhar goto retry_after_send; 5038d735920dSNavdeep Parhar if (cmp_l2hdr(txp, m)) 5039d735920dSNavdeep Parhar goto retry_after_send; 5040d735920dSNavdeep Parhar 5041d735920dSNavdeep Parhar txp->len16 += txpkts1_len16(); 5042d735920dSNavdeep Parhar txp->plen += m->m_pkthdr.len; 5043d735920dSNavdeep Parhar txp->mb[txp->npkt++] = m; 5044d735920dSNavdeep Parhar if (txp->npkt == txp->max_npkt) 5045d735920dSNavdeep Parhar *send = true; 5046d735920dSNavdeep Parhar } else { 5047d735920dSNavdeep Parhar txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + 5048d735920dSNavdeep Parhar txpkts1_len16(); 5049d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16) > avail) 5050d735920dSNavdeep Parhar goto cannot_coalesce; 5051d735920dSNavdeep Parhar txp->npkt = 1; 5052d735920dSNavdeep Parhar txp->wr_type = 1; 5053d735920dSNavdeep Parhar txp->plen = m->m_pkthdr.len; 5054d735920dSNavdeep Parhar txp->mb[0] = m; 5055d735920dSNavdeep Parhar save_l2hdr(txp, m); 5056d735920dSNavdeep Parhar } 50577951040fSNavdeep Parhar return (0); 50587951040fSNavdeep Parhar } 50597951040fSNavdeep Parhar 50607951040fSNavdeep Parhar static int 5061d735920dSNavdeep Parhar add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5062d735920dSNavdeep Parhar int avail, bool *send) 50637951040fSNavdeep Parhar { 5064d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 5065d735920dSNavdeep Parhar int nsegs; 5066d735920dSNavdeep Parhar 5067d735920dSNavdeep Parhar MPASS(!(sc->flags & IS_VF)); 5068d735920dSNavdeep Parhar 5069d735920dSNavdeep Parhar /* Cannot have TSO and coalesce at the same time. */ 5070d735920dSNavdeep Parhar if (cannot_use_txpkts(m)) { 5071d735920dSNavdeep Parhar cannot_coalesce: 5072d735920dSNavdeep Parhar *send = txp->npkt > 0; 5073d735920dSNavdeep Parhar return (EINVAL); 5074d735920dSNavdeep Parhar } 5075d735920dSNavdeep Parhar 5076d735920dSNavdeep Parhar *send = false; 5077d735920dSNavdeep Parhar nsegs = mbuf_nsegs(m); 5078d735920dSNavdeep Parhar if (txp->npkt == 0) { 5079d735920dSNavdeep Parhar if (m->m_pkthdr.len > 65535) 5080d735920dSNavdeep Parhar goto cannot_coalesce; 5081d735920dSNavdeep Parhar if (nsegs > 1) { 5082d735920dSNavdeep Parhar txp->wr_type = 0; 5083d735920dSNavdeep Parhar txp->len16 = 5084d735920dSNavdeep Parhar howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5085d735920dSNavdeep Parhar txpkts0_len16(nsegs); 5086d735920dSNavdeep Parhar } else { 5087d735920dSNavdeep Parhar txp->wr_type = 1; 5088d735920dSNavdeep Parhar txp->len16 = 5089d735920dSNavdeep Parhar howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5090d735920dSNavdeep Parhar txpkts1_len16(); 5091d735920dSNavdeep Parhar } 5092d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16) > avail) 5093d735920dSNavdeep Parhar goto cannot_coalesce; 5094d735920dSNavdeep Parhar txp->npkt = 1; 5095d735920dSNavdeep Parhar txp->plen = m->m_pkthdr.len; 5096d735920dSNavdeep Parhar txp->mb[0] = m; 5097d735920dSNavdeep Parhar } else { 5098d735920dSNavdeep Parhar MPASS(tx_len16_to_desc(txp->len16) <= avail); 5099d735920dSNavdeep Parhar MPASS(txp->npkt < txp->max_npkt); 5100d735920dSNavdeep Parhar 5101d735920dSNavdeep Parhar if (m->m_pkthdr.len + txp->plen > 65535) { 5102d735920dSNavdeep Parhar retry_after_send: 5103d735920dSNavdeep Parhar *send = true; 5104d735920dSNavdeep Parhar return (EAGAIN); 5105d735920dSNavdeep Parhar } 51067951040fSNavdeep Parhar 51077951040fSNavdeep Parhar MPASS(txp->wr_type == 0 || txp->wr_type == 1); 5108d735920dSNavdeep Parhar if (txp->wr_type == 0) { 5109d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + 5110d735920dSNavdeep Parhar txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC)) 5111d735920dSNavdeep Parhar goto retry_after_send; 5112d735920dSNavdeep Parhar txp->len16 += txpkts0_len16(nsegs); 5113d735920dSNavdeep Parhar } else { 5114d735920dSNavdeep Parhar if (nsegs != 1) 5115d735920dSNavdeep Parhar goto retry_after_send; 5116d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > 5117d735920dSNavdeep Parhar avail) 5118d735920dSNavdeep Parhar goto retry_after_send; 5119d735920dSNavdeep Parhar txp->len16 += txpkts1_len16(); 5120d735920dSNavdeep Parhar } 51217951040fSNavdeep Parhar 5122d735920dSNavdeep Parhar txp->plen += m->m_pkthdr.len; 5123d735920dSNavdeep Parhar txp->mb[txp->npkt++] = m; 5124d735920dSNavdeep Parhar if (txp->npkt == txp->max_npkt) 5125d735920dSNavdeep Parhar *send = true; 5126d735920dSNavdeep Parhar } 51277951040fSNavdeep Parhar return (0); 51287951040fSNavdeep Parhar } 51297951040fSNavdeep Parhar 51307951040fSNavdeep Parhar /* 51317951040fSNavdeep Parhar * Write a txpkts WR for the packets in txp to the hardware descriptors, update 51327951040fSNavdeep Parhar * the software descriptor, and advance the pidx. It is guaranteed that enough 51337951040fSNavdeep Parhar * descriptors are available. 51347951040fSNavdeep Parhar * 51357951040fSNavdeep Parhar * The return value is the # of hardware descriptors used. 51367951040fSNavdeep Parhar */ 51377951040fSNavdeep Parhar static u_int 5138d735920dSNavdeep Parhar write_txpkts_wr(struct adapter *sc, struct sge_txq *txq) 51397951040fSNavdeep Parhar { 5140d735920dSNavdeep Parhar const struct txpkts *txp = &txq->txp; 51417951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 5142d735920dSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr; 51437951040fSNavdeep Parhar struct tx_sdesc *txsd; 51447951040fSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 51457951040fSNavdeep Parhar uint64_t ctrl1; 5146d735920dSNavdeep Parhar int ndesc, i, checkwrap; 5147d735920dSNavdeep Parhar struct mbuf *m, *last; 51487951040fSNavdeep Parhar void *flitp; 51497951040fSNavdeep Parhar 51507951040fSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 51517951040fSNavdeep Parhar MPASS(txp->npkt > 0); 51527951040fSNavdeep Parhar MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 51537951040fSNavdeep Parhar 5154d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 51557951040fSNavdeep Parhar wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 5156d735920dSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 51577951040fSNavdeep Parhar wr->plen = htobe16(txp->plen); 51587951040fSNavdeep Parhar wr->npkt = txp->npkt; 51597951040fSNavdeep Parhar wr->r3 = 0; 51607951040fSNavdeep Parhar wr->type = txp->wr_type; 51617951040fSNavdeep Parhar flitp = wr + 1; 51627951040fSNavdeep Parhar 51637951040fSNavdeep Parhar /* 51647951040fSNavdeep Parhar * At this point we are 16B into a hardware descriptor. If checkwrap is 51657951040fSNavdeep Parhar * set then we know the WR is going to wrap around somewhere. We'll 51667951040fSNavdeep Parhar * check for that at appropriate points. 51677951040fSNavdeep Parhar */ 5168d735920dSNavdeep Parhar ndesc = tx_len16_to_desc(txp->len16); 5169d735920dSNavdeep Parhar last = NULL; 51707951040fSNavdeep Parhar checkwrap = eq->sidx - ndesc < eq->pidx; 5171d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) { 5172d735920dSNavdeep Parhar m = txp->mb[i]; 51737951040fSNavdeep Parhar if (txp->wr_type == 0) { 517454e4ee71SNavdeep Parhar struct ulp_txpkt *ulpmc; 517554e4ee71SNavdeep Parhar struct ulptx_idata *ulpsc; 517654e4ee71SNavdeep Parhar 51777951040fSNavdeep Parhar /* ULP master command */ 51787951040fSNavdeep Parhar ulpmc = flitp; 51797951040fSNavdeep Parhar ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 51807951040fSNavdeep Parhar V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 5181d735920dSNavdeep Parhar ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); 518254e4ee71SNavdeep Parhar 51837951040fSNavdeep Parhar /* ULP subcommand */ 51847951040fSNavdeep Parhar ulpsc = (void *)(ulpmc + 1); 51857951040fSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 51867951040fSNavdeep Parhar F_ULP_TX_SC_MORE); 51877951040fSNavdeep Parhar ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 51887951040fSNavdeep Parhar 51897951040fSNavdeep Parhar cpl = (void *)(ulpsc + 1); 51907951040fSNavdeep Parhar if (checkwrap && 51917951040fSNavdeep Parhar (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 51927951040fSNavdeep Parhar cpl = (void *)&eq->desc[0]; 51937951040fSNavdeep Parhar } else { 51947951040fSNavdeep Parhar cpl = flitp; 51957951040fSNavdeep Parhar } 519654e4ee71SNavdeep Parhar 519754e4ee71SNavdeep Parhar /* Checksum offload */ 5198c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m); 5199c0236bd9SNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 520054e4ee71SNavdeep Parhar txq->txcsum++; /* some hardware assistance provided */ 520154e4ee71SNavdeep Parhar 520254e4ee71SNavdeep Parhar /* VLAN tag insertion */ 52037951040fSNavdeep Parhar if (needs_vlan_insertion(m)) { 52047951040fSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 52057951040fSNavdeep Parhar V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 520654e4ee71SNavdeep Parhar txq->vlan_insertion++; 520754e4ee71SNavdeep Parhar } 520854e4ee71SNavdeep Parhar 52097951040fSNavdeep Parhar /* CPL header */ 52107951040fSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 521154e4ee71SNavdeep Parhar cpl->pack = 0; 521254e4ee71SNavdeep Parhar cpl->len = htobe16(m->m_pkthdr.len); 52137951040fSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 521454e4ee71SNavdeep Parhar 52157951040fSNavdeep Parhar flitp = cpl + 1; 52167951040fSNavdeep Parhar if (checkwrap && 52177951040fSNavdeep Parhar (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 52187951040fSNavdeep Parhar flitp = (void *)&eq->desc[0]; 521954e4ee71SNavdeep Parhar 52207951040fSNavdeep Parhar write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 522154e4ee71SNavdeep Parhar 5222d735920dSNavdeep Parhar if (last != NULL) 5223d735920dSNavdeep Parhar last->m_nextpkt = m; 5224d735920dSNavdeep Parhar last = m; 52257951040fSNavdeep Parhar } 52267951040fSNavdeep Parhar 5227d735920dSNavdeep Parhar txq->sgl_wrs++; 5228a59a1477SNavdeep Parhar if (txp->wr_type == 0) { 5229a59a1477SNavdeep Parhar txq->txpkts0_pkts += txp->npkt; 5230a59a1477SNavdeep Parhar txq->txpkts0_wrs++; 5231a59a1477SNavdeep Parhar } else { 5232a59a1477SNavdeep Parhar txq->txpkts1_pkts += txp->npkt; 5233a59a1477SNavdeep Parhar txq->txpkts1_wrs++; 5234a59a1477SNavdeep Parhar } 5235a59a1477SNavdeep Parhar 52367951040fSNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 5237d735920dSNavdeep Parhar txsd->m = txp->mb[0]; 5238d735920dSNavdeep Parhar txsd->desc_used = ndesc; 5239d735920dSNavdeep Parhar 5240d735920dSNavdeep Parhar return (ndesc); 5241d735920dSNavdeep Parhar } 5242d735920dSNavdeep Parhar 5243d735920dSNavdeep Parhar static u_int 5244d735920dSNavdeep Parhar write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq) 5245d735920dSNavdeep Parhar { 5246d735920dSNavdeep Parhar const struct txpkts *txp = &txq->txp; 5247d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 5248d735920dSNavdeep Parhar struct fw_eth_tx_pkts_vm_wr *wr; 5249d735920dSNavdeep Parhar struct tx_sdesc *txsd; 5250d735920dSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 5251d735920dSNavdeep Parhar uint64_t ctrl1; 5252d735920dSNavdeep Parhar int ndesc, i; 5253d735920dSNavdeep Parhar struct mbuf *m, *last; 5254d735920dSNavdeep Parhar void *flitp; 5255d735920dSNavdeep Parhar 5256d735920dSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 5257d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 5258d735920dSNavdeep Parhar MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5259d735920dSNavdeep Parhar MPASS(txp->mb[0] != NULL); 5260d735920dSNavdeep Parhar MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5261d735920dSNavdeep Parhar 5262d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 5263d735920dSNavdeep Parhar wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 5264d735920dSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5265d735920dSNavdeep Parhar wr->r3 = 0; 5266d735920dSNavdeep Parhar wr->plen = htobe16(txp->plen); 5267d735920dSNavdeep Parhar wr->npkt = txp->npkt; 5268d735920dSNavdeep Parhar wr->r4 = 0; 5269d735920dSNavdeep Parhar memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); 5270d735920dSNavdeep Parhar flitp = wr + 1; 5271d735920dSNavdeep Parhar 5272d735920dSNavdeep Parhar /* 5273d735920dSNavdeep Parhar * At this point we are 32B into a hardware descriptor. Each mbuf in 5274d735920dSNavdeep Parhar * the WR will take 32B so we check for the end of the descriptor ring 5275d735920dSNavdeep Parhar * before writing odd mbufs (mb[1], 3, 5, ..) 5276d735920dSNavdeep Parhar */ 5277d735920dSNavdeep Parhar ndesc = tx_len16_to_desc(txp->len16); 5278d735920dSNavdeep Parhar last = NULL; 5279d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) { 5280d735920dSNavdeep Parhar m = txp->mb[i]; 5281d735920dSNavdeep Parhar if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5282d735920dSNavdeep Parhar flitp = &eq->desc[0]; 5283d735920dSNavdeep Parhar cpl = flitp; 5284d735920dSNavdeep Parhar 5285d735920dSNavdeep Parhar /* Checksum offload */ 5286d735920dSNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m); 5287d735920dSNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5288d735920dSNavdeep Parhar txq->txcsum++; /* some hardware assistance provided */ 5289d735920dSNavdeep Parhar 5290d735920dSNavdeep Parhar /* VLAN tag insertion */ 5291d735920dSNavdeep Parhar if (needs_vlan_insertion(m)) { 5292d735920dSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 5293d735920dSNavdeep Parhar V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5294d735920dSNavdeep Parhar txq->vlan_insertion++; 5295d735920dSNavdeep Parhar } 5296d735920dSNavdeep Parhar 5297d735920dSNavdeep Parhar /* CPL header */ 5298d735920dSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 5299d735920dSNavdeep Parhar cpl->pack = 0; 5300d735920dSNavdeep Parhar cpl->len = htobe16(m->m_pkthdr.len); 5301d735920dSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 5302d735920dSNavdeep Parhar 5303d735920dSNavdeep Parhar flitp = cpl + 1; 5304d735920dSNavdeep Parhar MPASS(mbuf_nsegs(m) == 1); 5305d735920dSNavdeep Parhar write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0); 5306d735920dSNavdeep Parhar 5307d735920dSNavdeep Parhar if (last != NULL) 5308d735920dSNavdeep Parhar last->m_nextpkt = m; 5309d735920dSNavdeep Parhar last = m; 5310d735920dSNavdeep Parhar } 5311d735920dSNavdeep Parhar 5312d735920dSNavdeep Parhar txq->sgl_wrs++; 5313d735920dSNavdeep Parhar txq->txpkts1_pkts += txp->npkt; 5314d735920dSNavdeep Parhar txq->txpkts1_wrs++; 5315d735920dSNavdeep Parhar 5316d735920dSNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 5317d735920dSNavdeep Parhar txsd->m = txp->mb[0]; 53187951040fSNavdeep Parhar txsd->desc_used = ndesc; 53197951040fSNavdeep Parhar 53207951040fSNavdeep Parhar return (ndesc); 532154e4ee71SNavdeep Parhar } 532254e4ee71SNavdeep Parhar 532354e4ee71SNavdeep Parhar /* 532454e4ee71SNavdeep Parhar * If the SGL ends on an address that is not 16 byte aligned, this function will 53257951040fSNavdeep Parhar * add a 0 filled flit at the end. 532654e4ee71SNavdeep Parhar */ 53277951040fSNavdeep Parhar static void 53287951040fSNavdeep Parhar write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 532954e4ee71SNavdeep Parhar { 53307951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 53317951040fSNavdeep Parhar struct sglist *gl = txq->gl; 53327951040fSNavdeep Parhar struct sglist_seg *seg; 53337951040fSNavdeep Parhar __be64 *flitp, *wrap; 533454e4ee71SNavdeep Parhar struct ulptx_sgl *usgl; 53357951040fSNavdeep Parhar int i, nflits, nsegs; 533654e4ee71SNavdeep Parhar 533754e4ee71SNavdeep Parhar KASSERT(((uintptr_t)(*to) & 0xf) == 0, 533854e4ee71SNavdeep Parhar ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 53397951040fSNavdeep Parhar MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 53407951040fSNavdeep Parhar MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 534154e4ee71SNavdeep Parhar 53427951040fSNavdeep Parhar get_pkt_gl(m, gl); 53437951040fSNavdeep Parhar nsegs = gl->sg_nseg; 53447951040fSNavdeep Parhar MPASS(nsegs > 0); 53457951040fSNavdeep Parhar 53467951040fSNavdeep Parhar nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 534754e4ee71SNavdeep Parhar flitp = (__be64 *)(*to); 53487951040fSNavdeep Parhar wrap = (__be64 *)(&eq->desc[eq->sidx]); 53497951040fSNavdeep Parhar seg = &gl->sg_segs[0]; 535054e4ee71SNavdeep Parhar usgl = (void *)flitp; 535154e4ee71SNavdeep Parhar 535254e4ee71SNavdeep Parhar /* 535354e4ee71SNavdeep Parhar * We start at a 16 byte boundary somewhere inside the tx descriptor 535454e4ee71SNavdeep Parhar * ring, so we're at least 16 bytes away from the status page. There is 535554e4ee71SNavdeep Parhar * no chance of a wrap around in the middle of usgl (which is 16 bytes). 535654e4ee71SNavdeep Parhar */ 535754e4ee71SNavdeep Parhar 535854e4ee71SNavdeep Parhar usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 53597951040fSNavdeep Parhar V_ULPTX_NSGE(nsegs)); 53607951040fSNavdeep Parhar usgl->len0 = htobe32(seg->ss_len); 53617951040fSNavdeep Parhar usgl->addr0 = htobe64(seg->ss_paddr); 536254e4ee71SNavdeep Parhar seg++; 536354e4ee71SNavdeep Parhar 53647951040fSNavdeep Parhar if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 536554e4ee71SNavdeep Parhar 536654e4ee71SNavdeep Parhar /* Won't wrap around at all */ 536754e4ee71SNavdeep Parhar 53687951040fSNavdeep Parhar for (i = 0; i < nsegs - 1; i++, seg++) { 53697951040fSNavdeep Parhar usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 53707951040fSNavdeep Parhar usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 537154e4ee71SNavdeep Parhar } 537254e4ee71SNavdeep Parhar if (i & 1) 537354e4ee71SNavdeep Parhar usgl->sge[i / 2].len[1] = htobe32(0); 53747951040fSNavdeep Parhar flitp += nflits; 537554e4ee71SNavdeep Parhar } else { 537654e4ee71SNavdeep Parhar 537754e4ee71SNavdeep Parhar /* Will wrap somewhere in the rest of the SGL */ 537854e4ee71SNavdeep Parhar 537954e4ee71SNavdeep Parhar /* 2 flits already written, write the rest flit by flit */ 538054e4ee71SNavdeep Parhar flitp = (void *)(usgl + 1); 53817951040fSNavdeep Parhar for (i = 0; i < nflits - 2; i++) { 53827951040fSNavdeep Parhar if (flitp == wrap) 538354e4ee71SNavdeep Parhar flitp = (void *)eq->desc; 53847951040fSNavdeep Parhar *flitp++ = get_flit(seg, nsegs - 1, i); 538554e4ee71SNavdeep Parhar } 538654e4ee71SNavdeep Parhar } 538754e4ee71SNavdeep Parhar 53887951040fSNavdeep Parhar if (nflits & 1) { 53897951040fSNavdeep Parhar MPASS(((uintptr_t)flitp) & 0xf); 53907951040fSNavdeep Parhar *flitp++ = 0; 53917951040fSNavdeep Parhar } 539254e4ee71SNavdeep Parhar 53937951040fSNavdeep Parhar MPASS((((uintptr_t)flitp) & 0xf) == 0); 53947951040fSNavdeep Parhar if (__predict_false(flitp == wrap)) 539554e4ee71SNavdeep Parhar *to = (void *)eq->desc; 539654e4ee71SNavdeep Parhar else 53977951040fSNavdeep Parhar *to = (void *)flitp; 539854e4ee71SNavdeep Parhar } 539954e4ee71SNavdeep Parhar 540054e4ee71SNavdeep Parhar static inline void 540154e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 540254e4ee71SNavdeep Parhar { 54037951040fSNavdeep Parhar 54047951040fSNavdeep Parhar MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 54057951040fSNavdeep Parhar MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 54067951040fSNavdeep Parhar 54077951040fSNavdeep Parhar if (__predict_true((uintptr_t)(*to) + len <= 54087951040fSNavdeep Parhar (uintptr_t)&eq->desc[eq->sidx])) { 540954e4ee71SNavdeep Parhar bcopy(from, *to, len); 541054e4ee71SNavdeep Parhar (*to) += len; 541154e4ee71SNavdeep Parhar } else { 54127951040fSNavdeep Parhar int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 541354e4ee71SNavdeep Parhar 541454e4ee71SNavdeep Parhar bcopy(from, *to, portion); 541554e4ee71SNavdeep Parhar from += portion; 541654e4ee71SNavdeep Parhar portion = len - portion; /* remaining */ 541754e4ee71SNavdeep Parhar bcopy(from, (void *)eq->desc, portion); 541854e4ee71SNavdeep Parhar (*to) = (caddr_t)eq->desc + portion; 541954e4ee71SNavdeep Parhar } 542054e4ee71SNavdeep Parhar } 542154e4ee71SNavdeep Parhar 542254e4ee71SNavdeep Parhar static inline void 54237951040fSNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 542454e4ee71SNavdeep Parhar { 54257951040fSNavdeep Parhar u_int db; 54267951040fSNavdeep Parhar 54277951040fSNavdeep Parhar MPASS(n > 0); 5428d14b0ac1SNavdeep Parhar 5429d14b0ac1SNavdeep Parhar db = eq->doorbells; 54307951040fSNavdeep Parhar if (n > 1) 543177ad3c41SNavdeep Parhar clrbit(&db, DOORBELL_WCWR); 5432d14b0ac1SNavdeep Parhar wmb(); 5433d14b0ac1SNavdeep Parhar 5434d14b0ac1SNavdeep Parhar switch (ffs(db) - 1) { 5435d14b0ac1SNavdeep Parhar case DOORBELL_UDB: 54367951040fSNavdeep Parhar *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 54377951040fSNavdeep Parhar break; 5438d14b0ac1SNavdeep Parhar 543977ad3c41SNavdeep Parhar case DOORBELL_WCWR: { 5440d14b0ac1SNavdeep Parhar volatile uint64_t *dst, *src; 5441d14b0ac1SNavdeep Parhar int i; 5442d14b0ac1SNavdeep Parhar 5443d14b0ac1SNavdeep Parhar /* 5444d14b0ac1SNavdeep Parhar * Queues whose 128B doorbell segment fits in the page do not 5445d14b0ac1SNavdeep Parhar * use relative qid (udb_qid is always 0). Only queues with 544677ad3c41SNavdeep Parhar * doorbell segments can do WCWR. 5447d14b0ac1SNavdeep Parhar */ 54487951040fSNavdeep Parhar KASSERT(eq->udb_qid == 0 && n == 1, 5449d14b0ac1SNavdeep Parhar ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 54507951040fSNavdeep Parhar __func__, eq->doorbells, n, eq->dbidx, eq)); 5451d14b0ac1SNavdeep Parhar 5452d14b0ac1SNavdeep Parhar dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 5453d14b0ac1SNavdeep Parhar UDBS_DB_OFFSET); 54547951040fSNavdeep Parhar i = eq->dbidx; 5455d14b0ac1SNavdeep Parhar src = (void *)&eq->desc[i]; 5456d14b0ac1SNavdeep Parhar while (src != (void *)&eq->desc[i + 1]) 5457d14b0ac1SNavdeep Parhar *dst++ = *src++; 5458d14b0ac1SNavdeep Parhar wmb(); 54597951040fSNavdeep Parhar break; 5460d14b0ac1SNavdeep Parhar } 5461d14b0ac1SNavdeep Parhar 5462d14b0ac1SNavdeep Parhar case DOORBELL_UDBWC: 54637951040fSNavdeep Parhar *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5464d14b0ac1SNavdeep Parhar wmb(); 54657951040fSNavdeep Parhar break; 5466d14b0ac1SNavdeep Parhar 5467d14b0ac1SNavdeep Parhar case DOORBELL_KDB: 5468315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 54697951040fSNavdeep Parhar V_QID(eq->cntxt_id) | V_PIDX(n)); 54707951040fSNavdeep Parhar break; 547154e4ee71SNavdeep Parhar } 547254e4ee71SNavdeep Parhar 54737951040fSNavdeep Parhar IDXINCR(eq->dbidx, n, eq->sidx); 54747951040fSNavdeep Parhar } 54757951040fSNavdeep Parhar 54767951040fSNavdeep Parhar static inline u_int 54777951040fSNavdeep Parhar reclaimable_tx_desc(struct sge_eq *eq) 547854e4ee71SNavdeep Parhar { 54797951040fSNavdeep Parhar uint16_t hw_cidx; 548054e4ee71SNavdeep Parhar 54817951040fSNavdeep Parhar hw_cidx = read_hw_cidx(eq); 54827951040fSNavdeep Parhar return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 54837951040fSNavdeep Parhar } 548454e4ee71SNavdeep Parhar 54857951040fSNavdeep Parhar static inline u_int 54867951040fSNavdeep Parhar total_available_tx_desc(struct sge_eq *eq) 54877951040fSNavdeep Parhar { 54887951040fSNavdeep Parhar uint16_t hw_cidx, pidx; 54897951040fSNavdeep Parhar 54907951040fSNavdeep Parhar hw_cidx = read_hw_cidx(eq); 54917951040fSNavdeep Parhar pidx = eq->pidx; 54927951040fSNavdeep Parhar 54937951040fSNavdeep Parhar if (pidx == hw_cidx) 54947951040fSNavdeep Parhar return (eq->sidx - 1); 549554e4ee71SNavdeep Parhar else 54967951040fSNavdeep Parhar return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 54977951040fSNavdeep Parhar } 54987951040fSNavdeep Parhar 54997951040fSNavdeep Parhar static inline uint16_t 55007951040fSNavdeep Parhar read_hw_cidx(struct sge_eq *eq) 55017951040fSNavdeep Parhar { 55027951040fSNavdeep Parhar struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 55037951040fSNavdeep Parhar uint16_t cidx = spg->cidx; /* stable snapshot */ 55047951040fSNavdeep Parhar 55057951040fSNavdeep Parhar return (be16toh(cidx)); 5506e874ff7aSNavdeep Parhar } 550754e4ee71SNavdeep Parhar 5508e874ff7aSNavdeep Parhar /* 55097951040fSNavdeep Parhar * Reclaim 'n' descriptors approximately. 5510e874ff7aSNavdeep Parhar */ 55117951040fSNavdeep Parhar static u_int 55127951040fSNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, u_int n) 5513e874ff7aSNavdeep Parhar { 5514e874ff7aSNavdeep Parhar struct tx_sdesc *txsd; 5515f7dfe243SNavdeep Parhar struct sge_eq *eq = &txq->eq; 55167951040fSNavdeep Parhar u_int can_reclaim, reclaimed; 551754e4ee71SNavdeep Parhar 5518733b9277SNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 55197951040fSNavdeep Parhar MPASS(n > 0); 5520e874ff7aSNavdeep Parhar 55217951040fSNavdeep Parhar reclaimed = 0; 55227951040fSNavdeep Parhar can_reclaim = reclaimable_tx_desc(eq); 55237951040fSNavdeep Parhar while (can_reclaim && reclaimed < n) { 552454e4ee71SNavdeep Parhar int ndesc; 55257951040fSNavdeep Parhar struct mbuf *m, *nextpkt; 552654e4ee71SNavdeep Parhar 5527f7dfe243SNavdeep Parhar txsd = &txq->sdesc[eq->cidx]; 552854e4ee71SNavdeep Parhar ndesc = txsd->desc_used; 552954e4ee71SNavdeep Parhar 553054e4ee71SNavdeep Parhar /* Firmware doesn't return "partial" credits. */ 553154e4ee71SNavdeep Parhar KASSERT(can_reclaim >= ndesc, 553254e4ee71SNavdeep Parhar ("%s: unexpected number of credits: %d, %d", 553354e4ee71SNavdeep Parhar __func__, can_reclaim, ndesc)); 5534dcd50a20SJohn Baldwin KASSERT(ndesc != 0, 5535dcd50a20SJohn Baldwin ("%s: descriptor with no credits: cidx %d", 5536dcd50a20SJohn Baldwin __func__, eq->cidx)); 553754e4ee71SNavdeep Parhar 55387951040fSNavdeep Parhar for (m = txsd->m; m != NULL; m = nextpkt) { 55397951040fSNavdeep Parhar nextpkt = m->m_nextpkt; 55407951040fSNavdeep Parhar m->m_nextpkt = NULL; 55417951040fSNavdeep Parhar m_freem(m); 55427951040fSNavdeep Parhar } 554354e4ee71SNavdeep Parhar reclaimed += ndesc; 554454e4ee71SNavdeep Parhar can_reclaim -= ndesc; 55457951040fSNavdeep Parhar IDXINCR(eq->cidx, ndesc, eq->sidx); 554654e4ee71SNavdeep Parhar } 554754e4ee71SNavdeep Parhar 554854e4ee71SNavdeep Parhar return (reclaimed); 554954e4ee71SNavdeep Parhar } 555054e4ee71SNavdeep Parhar 555154e4ee71SNavdeep Parhar static void 55527951040fSNavdeep Parhar tx_reclaim(void *arg, int n) 555354e4ee71SNavdeep Parhar { 55547951040fSNavdeep Parhar struct sge_txq *txq = arg; 55557951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 555654e4ee71SNavdeep Parhar 55577951040fSNavdeep Parhar do { 55587951040fSNavdeep Parhar if (TXQ_TRYLOCK(txq) == 0) 55597951040fSNavdeep Parhar break; 55607951040fSNavdeep Parhar n = reclaim_tx_descs(txq, 32); 55617951040fSNavdeep Parhar if (eq->cidx == eq->pidx) 55627951040fSNavdeep Parhar eq->equeqidx = eq->pidx; 55637951040fSNavdeep Parhar TXQ_UNLOCK(txq); 55647951040fSNavdeep Parhar } while (n > 0); 556554e4ee71SNavdeep Parhar } 556654e4ee71SNavdeep Parhar 556754e4ee71SNavdeep Parhar static __be64 55687951040fSNavdeep Parhar get_flit(struct sglist_seg *segs, int nsegs, int idx) 556954e4ee71SNavdeep Parhar { 557054e4ee71SNavdeep Parhar int i = (idx / 3) * 2; 557154e4ee71SNavdeep Parhar 557254e4ee71SNavdeep Parhar switch (idx % 3) { 557354e4ee71SNavdeep Parhar case 0: { 5574f078ecf6SWojciech Macek uint64_t rc; 557554e4ee71SNavdeep Parhar 5576f078ecf6SWojciech Macek rc = (uint64_t)segs[i].ss_len << 32; 557754e4ee71SNavdeep Parhar if (i + 1 < nsegs) 5578f078ecf6SWojciech Macek rc |= (uint64_t)(segs[i + 1].ss_len); 557954e4ee71SNavdeep Parhar 5580f078ecf6SWojciech Macek return (htobe64(rc)); 558154e4ee71SNavdeep Parhar } 558254e4ee71SNavdeep Parhar case 1: 55837951040fSNavdeep Parhar return (htobe64(segs[i].ss_paddr)); 558454e4ee71SNavdeep Parhar case 2: 55857951040fSNavdeep Parhar return (htobe64(segs[i + 1].ss_paddr)); 558654e4ee71SNavdeep Parhar } 558754e4ee71SNavdeep Parhar 558854e4ee71SNavdeep Parhar return (0); 558954e4ee71SNavdeep Parhar } 559054e4ee71SNavdeep Parhar 559146e1e307SNavdeep Parhar static int 559246e1e307SNavdeep Parhar find_refill_source(struct adapter *sc, int maxp, bool packing) 559354e4ee71SNavdeep Parhar { 559446e1e307SNavdeep Parhar int i, zidx = -1; 559546e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 559654e4ee71SNavdeep Parhar 559746e1e307SNavdeep Parhar if (packing) { 559846e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 559946e1e307SNavdeep Parhar if (rxb->hwidx2 == -1) 560046e1e307SNavdeep Parhar continue; 560146e1e307SNavdeep Parhar if (rxb->size1 < PAGE_SIZE && 560246e1e307SNavdeep Parhar rxb->size1 < largest_rx_cluster) 560346e1e307SNavdeep Parhar continue; 560446e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 560538035ed6SNavdeep Parhar break; 560646e1e307SNavdeep Parhar MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); 560746e1e307SNavdeep Parhar if (rxb->size2 >= maxp) 560846e1e307SNavdeep Parhar return (i); 560946e1e307SNavdeep Parhar zidx = i; 561038035ed6SNavdeep Parhar } 561138035ed6SNavdeep Parhar } else { 561246e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 561346e1e307SNavdeep Parhar if (rxb->hwidx1 == -1) 561446e1e307SNavdeep Parhar continue; 561546e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 561638035ed6SNavdeep Parhar break; 561746e1e307SNavdeep Parhar if (rxb->size1 >= maxp) 561846e1e307SNavdeep Parhar return (i); 561946e1e307SNavdeep Parhar zidx = i; 562038035ed6SNavdeep Parhar } 562138035ed6SNavdeep Parhar } 562238035ed6SNavdeep Parhar 562346e1e307SNavdeep Parhar return (zidx); 562454e4ee71SNavdeep Parhar } 5625ecb79ca4SNavdeep Parhar 5626733b9277SNavdeep Parhar static void 5627733b9277SNavdeep Parhar add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5628ecb79ca4SNavdeep Parhar { 5629733b9277SNavdeep Parhar mtx_lock(&sc->sfl_lock); 5630733b9277SNavdeep Parhar FL_LOCK(fl); 5631733b9277SNavdeep Parhar if ((fl->flags & FL_DOOMED) == 0) { 5632733b9277SNavdeep Parhar fl->flags |= FL_STARVING; 5633733b9277SNavdeep Parhar TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5634733b9277SNavdeep Parhar callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5635733b9277SNavdeep Parhar } 5636733b9277SNavdeep Parhar FL_UNLOCK(fl); 5637733b9277SNavdeep Parhar mtx_unlock(&sc->sfl_lock); 5638733b9277SNavdeep Parhar } 5639ecb79ca4SNavdeep Parhar 56407951040fSNavdeep Parhar static void 56417951040fSNavdeep Parhar handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 56427951040fSNavdeep Parhar { 56437951040fSNavdeep Parhar struct sge_wrq *wrq = (void *)eq; 56447951040fSNavdeep Parhar 56457951040fSNavdeep Parhar atomic_readandclear_int(&eq->equiq); 56467951040fSNavdeep Parhar taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 56477951040fSNavdeep Parhar } 56487951040fSNavdeep Parhar 56497951040fSNavdeep Parhar static void 56507951040fSNavdeep Parhar handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 56517951040fSNavdeep Parhar { 56527951040fSNavdeep Parhar struct sge_txq *txq = (void *)eq; 56537951040fSNavdeep Parhar 56547951040fSNavdeep Parhar MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 56557951040fSNavdeep Parhar 56567951040fSNavdeep Parhar atomic_readandclear_int(&eq->equiq); 5657d735920dSNavdeep Parhar if (mp_ring_is_idle(txq->r)) 56587951040fSNavdeep Parhar taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5659d735920dSNavdeep Parhar else 5660d735920dSNavdeep Parhar mp_ring_check_drainage(txq->r, 64); 56617951040fSNavdeep Parhar } 56627951040fSNavdeep Parhar 5663733b9277SNavdeep Parhar static int 5664733b9277SNavdeep Parhar handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5665733b9277SNavdeep Parhar struct mbuf *m) 5666733b9277SNavdeep Parhar { 5667733b9277SNavdeep Parhar const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5668733b9277SNavdeep Parhar unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5669733b9277SNavdeep Parhar struct adapter *sc = iq->adapter; 5670733b9277SNavdeep Parhar struct sge *s = &sc->sge; 5671733b9277SNavdeep Parhar struct sge_eq *eq; 56727951040fSNavdeep Parhar static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 56737951040fSNavdeep Parhar &handle_wrq_egr_update, &handle_eth_egr_update, 56747951040fSNavdeep Parhar &handle_wrq_egr_update}; 5675733b9277SNavdeep Parhar 5676733b9277SNavdeep Parhar KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5677733b9277SNavdeep Parhar rss->opcode)); 5678733b9277SNavdeep Parhar 5679ec55567cSJohn Baldwin eq = s->eqmap[qid - s->eq_start - s->eq_base]; 56807951040fSNavdeep Parhar (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5681ecb79ca4SNavdeep Parhar 5682ecb79ca4SNavdeep Parhar return (0); 5683ecb79ca4SNavdeep Parhar } 5684f7dfe243SNavdeep Parhar 56850abd31e2SNavdeep Parhar /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 56860abd31e2SNavdeep Parhar CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 56870abd31e2SNavdeep Parhar offsetof(struct cpl_fw6_msg, data)); 56880abd31e2SNavdeep Parhar 5689733b9277SNavdeep Parhar static int 56901b4cc91fSNavdeep Parhar handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 569156599263SNavdeep Parhar { 56921b4cc91fSNavdeep Parhar struct adapter *sc = iq->adapter; 569356599263SNavdeep Parhar const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 569456599263SNavdeep Parhar 5695733b9277SNavdeep Parhar KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5696733b9277SNavdeep Parhar rss->opcode)); 5697733b9277SNavdeep Parhar 56980abd31e2SNavdeep Parhar if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 56990abd31e2SNavdeep Parhar const struct rss_header *rss2; 57000abd31e2SNavdeep Parhar 57010abd31e2SNavdeep Parhar rss2 = (const struct rss_header *)&cpl->data[0]; 5702671bf2b8SNavdeep Parhar return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 57030abd31e2SNavdeep Parhar } 57040abd31e2SNavdeep Parhar 5705671bf2b8SNavdeep Parhar return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5706f7dfe243SNavdeep Parhar } 5707af49c942SNavdeep Parhar 5708069af0ebSJohn Baldwin /** 5709069af0ebSJohn Baldwin * t4_handle_wrerr_rpl - process a FW work request error message 5710069af0ebSJohn Baldwin * @adap: the adapter 5711069af0ebSJohn Baldwin * @rpl: start of the FW message 5712069af0ebSJohn Baldwin */ 5713069af0ebSJohn Baldwin static int 5714069af0ebSJohn Baldwin t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5715069af0ebSJohn Baldwin { 5716069af0ebSJohn Baldwin u8 opcode = *(const u8 *)rpl; 5717069af0ebSJohn Baldwin const struct fw_error_cmd *e = (const void *)rpl; 5718069af0ebSJohn Baldwin unsigned int i; 5719069af0ebSJohn Baldwin 5720069af0ebSJohn Baldwin if (opcode != FW_ERROR_CMD) { 5721069af0ebSJohn Baldwin log(LOG_ERR, 5722069af0ebSJohn Baldwin "%s: Received WRERR_RPL message with opcode %#x\n", 5723069af0ebSJohn Baldwin device_get_nameunit(adap->dev), opcode); 5724069af0ebSJohn Baldwin return (EINVAL); 5725069af0ebSJohn Baldwin } 5726069af0ebSJohn Baldwin log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5727069af0ebSJohn Baldwin G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5728069af0ebSJohn Baldwin "non-fatal"); 5729069af0ebSJohn Baldwin switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5730069af0ebSJohn Baldwin case FW_ERROR_TYPE_EXCEPTION: 5731069af0ebSJohn Baldwin log(LOG_ERR, "exception info:\n"); 5732069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.exception.info); i++) 5733069af0ebSJohn Baldwin log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5734069af0ebSJohn Baldwin be32toh(e->u.exception.info[i])); 5735069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5736069af0ebSJohn Baldwin break; 5737069af0ebSJohn Baldwin case FW_ERROR_TYPE_HWMODULE: 5738069af0ebSJohn Baldwin log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5739069af0ebSJohn Baldwin be32toh(e->u.hwmodule.regaddr), 5740069af0ebSJohn Baldwin be32toh(e->u.hwmodule.regval)); 5741069af0ebSJohn Baldwin break; 5742069af0ebSJohn Baldwin case FW_ERROR_TYPE_WR: 5743069af0ebSJohn Baldwin log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 5744069af0ebSJohn Baldwin be16toh(e->u.wr.cidx), 5745069af0ebSJohn Baldwin G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 5746069af0ebSJohn Baldwin G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 5747069af0ebSJohn Baldwin be32toh(e->u.wr.eqid)); 5748069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 5749069af0ebSJohn Baldwin log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 5750069af0ebSJohn Baldwin e->u.wr.wrhdr[i]); 5751069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5752069af0ebSJohn Baldwin break; 5753069af0ebSJohn Baldwin case FW_ERROR_TYPE_ACL: 5754069af0ebSJohn Baldwin log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 5755069af0ebSJohn Baldwin be16toh(e->u.acl.cidx), 5756069af0ebSJohn Baldwin G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 5757069af0ebSJohn Baldwin G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 5758069af0ebSJohn Baldwin be32toh(e->u.acl.eqid), 5759069af0ebSJohn Baldwin G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 5760069af0ebSJohn Baldwin "MAC"); 5761069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.acl.val); i++) 5762069af0ebSJohn Baldwin log(LOG_ERR, " %02x", e->u.acl.val[i]); 5763069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5764069af0ebSJohn Baldwin break; 5765069af0ebSJohn Baldwin default: 5766069af0ebSJohn Baldwin log(LOG_ERR, "type %#x\n", 5767069af0ebSJohn Baldwin G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 5768069af0ebSJohn Baldwin return (EINVAL); 5769069af0ebSJohn Baldwin } 5770069af0ebSJohn Baldwin return (0); 5771069af0ebSJohn Baldwin } 5772069af0ebSJohn Baldwin 5773af49c942SNavdeep Parhar static int 577456599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS) 5775af49c942SNavdeep Parhar { 5776af49c942SNavdeep Parhar uint16_t *id = arg1; 5777af49c942SNavdeep Parhar int i = *id; 5778af49c942SNavdeep Parhar 5779af49c942SNavdeep Parhar return sysctl_handle_int(oidp, &i, 0, req); 5780af49c942SNavdeep Parhar } 578138035ed6SNavdeep Parhar 578246e1e307SNavdeep Parhar static inline bool 578346e1e307SNavdeep Parhar bufidx_used(struct adapter *sc, int idx) 578446e1e307SNavdeep Parhar { 578546e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 578646e1e307SNavdeep Parhar int i; 578746e1e307SNavdeep Parhar 578846e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 578946e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 579046e1e307SNavdeep Parhar continue; 579146e1e307SNavdeep Parhar if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) 579246e1e307SNavdeep Parhar return (true); 579346e1e307SNavdeep Parhar } 579446e1e307SNavdeep Parhar 579546e1e307SNavdeep Parhar return (false); 579646e1e307SNavdeep Parhar } 579746e1e307SNavdeep Parhar 579838035ed6SNavdeep Parhar static int 579938035ed6SNavdeep Parhar sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 580038035ed6SNavdeep Parhar { 580146e1e307SNavdeep Parhar struct adapter *sc = arg1; 580246e1e307SNavdeep Parhar struct sge_params *sp = &sc->params.sge; 580338035ed6SNavdeep Parhar int i, rc; 580438035ed6SNavdeep Parhar struct sbuf sb; 580538035ed6SNavdeep Parhar char c; 580638035ed6SNavdeep Parhar 580746e1e307SNavdeep Parhar sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 580846e1e307SNavdeep Parhar for (i = 0; i < SGE_FLBUF_SIZES; i++) { 580946e1e307SNavdeep Parhar if (bufidx_used(sc, i)) 581038035ed6SNavdeep Parhar c = '*'; 581138035ed6SNavdeep Parhar else 581238035ed6SNavdeep Parhar c = '\0'; 581338035ed6SNavdeep Parhar 581446e1e307SNavdeep Parhar sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); 581538035ed6SNavdeep Parhar } 581638035ed6SNavdeep Parhar sbuf_trim(&sb); 581738035ed6SNavdeep Parhar sbuf_finish(&sb); 581838035ed6SNavdeep Parhar rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 581938035ed6SNavdeep Parhar sbuf_delete(&sb); 582038035ed6SNavdeep Parhar return (rc); 582138035ed6SNavdeep Parhar } 582202f972e8SNavdeep Parhar 5823786099deSNavdeep Parhar #ifdef RATELIMIT 5824786099deSNavdeep Parhar /* 5825786099deSNavdeep Parhar * len16 for a txpkt WR with a GL. Includes the firmware work request header. 5826786099deSNavdeep Parhar */ 5827786099deSNavdeep Parhar static inline u_int 5828786099deSNavdeep Parhar txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 5829786099deSNavdeep Parhar { 5830786099deSNavdeep Parhar u_int n; 5831786099deSNavdeep Parhar 5832786099deSNavdeep Parhar MPASS(immhdrs > 0); 5833786099deSNavdeep Parhar 5834786099deSNavdeep Parhar n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 5835786099deSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 5836786099deSNavdeep Parhar if (__predict_false(nsegs == 0)) 5837786099deSNavdeep Parhar goto done; 5838786099deSNavdeep Parhar 5839786099deSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 5840786099deSNavdeep Parhar n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 5841786099deSNavdeep Parhar if (tso) 5842786099deSNavdeep Parhar n += sizeof(struct cpl_tx_pkt_lso_core); 5843786099deSNavdeep Parhar 5844786099deSNavdeep Parhar done: 5845786099deSNavdeep Parhar return (howmany(n, 16)); 5846786099deSNavdeep Parhar } 5847786099deSNavdeep Parhar 5848786099deSNavdeep Parhar #define ETID_FLOWC_NPARAMS 6 5849786099deSNavdeep Parhar #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 5850786099deSNavdeep Parhar ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 5851786099deSNavdeep Parhar #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 5852786099deSNavdeep Parhar 5853786099deSNavdeep Parhar static int 5854e38a50e8SJohn Baldwin send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi, 5855786099deSNavdeep Parhar struct vi_info *vi) 5856786099deSNavdeep Parhar { 5857786099deSNavdeep Parhar struct wrq_cookie cookie; 5858edb518f4SNavdeep Parhar u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 5859786099deSNavdeep Parhar struct fw_flowc_wr *flowc; 5860786099deSNavdeep Parhar 5861786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 5862786099deSNavdeep Parhar MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 5863786099deSNavdeep Parhar EO_FLOWC_PENDING); 5864786099deSNavdeep Parhar 5865786099deSNavdeep Parhar flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie); 5866786099deSNavdeep Parhar if (__predict_false(flowc == NULL)) 5867786099deSNavdeep Parhar return (ENOMEM); 5868786099deSNavdeep Parhar 5869786099deSNavdeep Parhar bzero(flowc, ETID_FLOWC_LEN); 5870786099deSNavdeep Parhar flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5871786099deSNavdeep Parhar V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 5872786099deSNavdeep Parhar flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 5873786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 5874786099deSNavdeep Parhar flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 5875786099deSNavdeep Parhar flowc->mnemval[0].val = htobe32(pfvf); 5876786099deSNavdeep Parhar flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 5877786099deSNavdeep Parhar flowc->mnemval[1].val = htobe32(pi->tx_chan); 5878786099deSNavdeep Parhar flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 5879786099deSNavdeep Parhar flowc->mnemval[2].val = htobe32(pi->tx_chan); 5880786099deSNavdeep Parhar flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 5881786099deSNavdeep Parhar flowc->mnemval[3].val = htobe32(cst->iqid); 5882786099deSNavdeep Parhar flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 5883786099deSNavdeep Parhar flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 5884786099deSNavdeep Parhar flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 5885786099deSNavdeep Parhar flowc->mnemval[5].val = htobe32(cst->schedcl); 5886786099deSNavdeep Parhar 5887786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5888786099deSNavdeep Parhar 5889786099deSNavdeep Parhar cst->flags &= ~EO_FLOWC_PENDING; 5890786099deSNavdeep Parhar cst->flags |= EO_FLOWC_RPL_PENDING; 5891786099deSNavdeep Parhar MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 5892786099deSNavdeep Parhar cst->tx_credits -= ETID_FLOWC_LEN16; 5893786099deSNavdeep Parhar 5894786099deSNavdeep Parhar return (0); 5895786099deSNavdeep Parhar } 5896786099deSNavdeep Parhar 5897786099deSNavdeep Parhar #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 5898786099deSNavdeep Parhar 5899786099deSNavdeep Parhar void 5900e38a50e8SJohn Baldwin send_etid_flush_wr(struct cxgbe_rate_tag *cst) 5901786099deSNavdeep Parhar { 5902786099deSNavdeep Parhar struct fw_flowc_wr *flowc; 5903786099deSNavdeep Parhar struct wrq_cookie cookie; 5904786099deSNavdeep Parhar 5905786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 5906786099deSNavdeep Parhar 5907786099deSNavdeep Parhar flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie); 5908786099deSNavdeep Parhar if (__predict_false(flowc == NULL)) 5909786099deSNavdeep Parhar CXGBE_UNIMPLEMENTED(__func__); 5910786099deSNavdeep Parhar 5911786099deSNavdeep Parhar bzero(flowc, ETID_FLUSH_LEN16 * 16); 5912786099deSNavdeep Parhar flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 5913786099deSNavdeep Parhar V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 5914786099deSNavdeep Parhar flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 5915786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 5916786099deSNavdeep Parhar 5917786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, flowc, &cookie); 5918786099deSNavdeep Parhar 5919786099deSNavdeep Parhar cst->flags |= EO_FLUSH_RPL_PENDING; 5920786099deSNavdeep Parhar MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 5921786099deSNavdeep Parhar cst->tx_credits -= ETID_FLUSH_LEN16; 5922786099deSNavdeep Parhar cst->ncompl++; 5923786099deSNavdeep Parhar } 5924786099deSNavdeep Parhar 5925786099deSNavdeep Parhar static void 5926e38a50e8SJohn Baldwin write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, 5927786099deSNavdeep Parhar struct mbuf *m0, int compl) 5928786099deSNavdeep Parhar { 5929786099deSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 5930786099deSNavdeep Parhar uint64_t ctrl1; 5931786099deSNavdeep Parhar uint32_t ctrl; /* used in many unrelated places */ 5932786099deSNavdeep Parhar int len16, pktlen, nsegs, immhdrs; 5933786099deSNavdeep Parhar caddr_t dst; 5934786099deSNavdeep Parhar uintptr_t p; 5935786099deSNavdeep Parhar struct ulptx_sgl *usgl; 5936786099deSNavdeep Parhar struct sglist sg; 5937786099deSNavdeep Parhar struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 5938786099deSNavdeep Parhar 5939786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 5940786099deSNavdeep Parhar M_ASSERTPKTHDR(m0); 5941786099deSNavdeep Parhar KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 5942786099deSNavdeep Parhar m0->m_pkthdr.l4hlen > 0, 5943786099deSNavdeep Parhar ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 5944786099deSNavdeep Parhar 5945786099deSNavdeep Parhar len16 = mbuf_eo_len16(m0); 5946786099deSNavdeep Parhar nsegs = mbuf_eo_nsegs(m0); 5947786099deSNavdeep Parhar pktlen = m0->m_pkthdr.len; 5948786099deSNavdeep Parhar ctrl = sizeof(struct cpl_tx_pkt_core); 5949786099deSNavdeep Parhar if (needs_tso(m0)) 5950786099deSNavdeep Parhar ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5951786099deSNavdeep Parhar immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 5952786099deSNavdeep Parhar ctrl += immhdrs; 5953786099deSNavdeep Parhar 5954786099deSNavdeep Parhar wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 5955786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 5956786099deSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 5957786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 5958786099deSNavdeep Parhar wr->r3 = 0; 59596933902dSNavdeep Parhar if (needs_udp_csum(m0)) { 59606933902dSNavdeep Parhar wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 59616933902dSNavdeep Parhar wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 59626933902dSNavdeep Parhar wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 59636933902dSNavdeep Parhar wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 59646933902dSNavdeep Parhar wr->u.udpseg.rtplen = 0; 59656933902dSNavdeep Parhar wr->u.udpseg.r4 = 0; 59666933902dSNavdeep Parhar wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 59676933902dSNavdeep Parhar wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 59686933902dSNavdeep Parhar wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 59696933902dSNavdeep Parhar cpl = (void *)(wr + 1); 59706933902dSNavdeep Parhar } else { 59716933902dSNavdeep Parhar MPASS(needs_tcp_csum(m0)); 5972786099deSNavdeep Parhar wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 5973786099deSNavdeep Parhar wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 5974786099deSNavdeep Parhar wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 5975786099deSNavdeep Parhar wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 5976786099deSNavdeep Parhar wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 5977786099deSNavdeep Parhar wr->u.tcpseg.r4 = 0; 5978786099deSNavdeep Parhar wr->u.tcpseg.r5 = 0; 5979786099deSNavdeep Parhar wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 5980786099deSNavdeep Parhar 5981786099deSNavdeep Parhar if (needs_tso(m0)) { 5982786099deSNavdeep Parhar struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 5983786099deSNavdeep Parhar 5984786099deSNavdeep Parhar wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 5985786099deSNavdeep Parhar 59866933902dSNavdeep Parhar ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 59876933902dSNavdeep Parhar F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 5988c0236bd9SNavdeep Parhar V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 5989c0236bd9SNavdeep Parhar ETHER_HDR_LEN) >> 2) | 59906933902dSNavdeep Parhar V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 59916933902dSNavdeep Parhar V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 5992786099deSNavdeep Parhar if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 5993786099deSNavdeep Parhar ctrl |= F_LSO_IPV6; 5994786099deSNavdeep Parhar lso->lso_ctrl = htobe32(ctrl); 5995786099deSNavdeep Parhar lso->ipid_ofst = htobe16(0); 5996786099deSNavdeep Parhar lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 5997786099deSNavdeep Parhar lso->seqno_offset = htobe32(0); 5998786099deSNavdeep Parhar lso->len = htobe32(pktlen); 5999786099deSNavdeep Parhar 6000786099deSNavdeep Parhar cpl = (void *)(lso + 1); 6001786099deSNavdeep Parhar } else { 6002786099deSNavdeep Parhar wr->u.tcpseg.mss = htobe16(0xffff); 6003786099deSNavdeep Parhar cpl = (void *)(wr + 1); 6004786099deSNavdeep Parhar } 60056933902dSNavdeep Parhar } 6006786099deSNavdeep Parhar 6007786099deSNavdeep Parhar /* Checksum offload must be requested for ethofld. */ 6008786099deSNavdeep Parhar MPASS(needs_l4_csum(m0)); 6009c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(cst->adapter, m0); 6010786099deSNavdeep Parhar 6011786099deSNavdeep Parhar /* VLAN tag insertion */ 6012786099deSNavdeep Parhar if (needs_vlan_insertion(m0)) { 6013786099deSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 6014786099deSNavdeep Parhar V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 6015786099deSNavdeep Parhar } 6016786099deSNavdeep Parhar 6017786099deSNavdeep Parhar /* CPL header */ 6018786099deSNavdeep Parhar cpl->ctrl0 = cst->ctrl0; 6019786099deSNavdeep Parhar cpl->pack = 0; 6020786099deSNavdeep Parhar cpl->len = htobe16(pktlen); 6021786099deSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 6022786099deSNavdeep Parhar 60236933902dSNavdeep Parhar /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 6024786099deSNavdeep Parhar p = (uintptr_t)(cpl + 1); 6025786099deSNavdeep Parhar m_copydata(m0, 0, immhdrs, (void *)p); 6026786099deSNavdeep Parhar 6027786099deSNavdeep Parhar /* SGL */ 6028786099deSNavdeep Parhar dst = (void *)(cpl + 1); 6029786099deSNavdeep Parhar if (nsegs > 0) { 6030786099deSNavdeep Parhar int i, pad; 6031786099deSNavdeep Parhar 6032786099deSNavdeep Parhar /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 6033786099deSNavdeep Parhar p += immhdrs; 6034786099deSNavdeep Parhar pad = 16 - (immhdrs & 0xf); 6035786099deSNavdeep Parhar bzero((void *)p, pad); 6036786099deSNavdeep Parhar 6037786099deSNavdeep Parhar usgl = (void *)(p + pad); 6038786099deSNavdeep Parhar usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6039786099deSNavdeep Parhar V_ULPTX_NSGE(nsegs)); 6040786099deSNavdeep Parhar 6041786099deSNavdeep Parhar sglist_init(&sg, nitems(segs), segs); 6042786099deSNavdeep Parhar for (; m0 != NULL; m0 = m0->m_next) { 6043786099deSNavdeep Parhar if (__predict_false(m0->m_len == 0)) 6044786099deSNavdeep Parhar continue; 6045786099deSNavdeep Parhar if (immhdrs >= m0->m_len) { 6046786099deSNavdeep Parhar immhdrs -= m0->m_len; 6047786099deSNavdeep Parhar continue; 6048786099deSNavdeep Parhar } 60496edfd179SGleb Smirnoff if (m0->m_flags & M_EXTPG) 605049b6b60eSGleb Smirnoff sglist_append_mbuf_epg(&sg, m0, 605149b6b60eSGleb Smirnoff mtod(m0, vm_offset_t), m0->m_len); 605249b6b60eSGleb Smirnoff else 6053786099deSNavdeep Parhar sglist_append(&sg, mtod(m0, char *) + immhdrs, 6054786099deSNavdeep Parhar m0->m_len - immhdrs); 6055786099deSNavdeep Parhar immhdrs = 0; 6056786099deSNavdeep Parhar } 6057786099deSNavdeep Parhar MPASS(sg.sg_nseg == nsegs); 6058786099deSNavdeep Parhar 6059786099deSNavdeep Parhar /* 6060786099deSNavdeep Parhar * Zero pad last 8B in case the WR doesn't end on a 16B 6061786099deSNavdeep Parhar * boundary. 6062786099deSNavdeep Parhar */ 6063786099deSNavdeep Parhar *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 6064786099deSNavdeep Parhar 6065786099deSNavdeep Parhar usgl->len0 = htobe32(segs[0].ss_len); 6066786099deSNavdeep Parhar usgl->addr0 = htobe64(segs[0].ss_paddr); 6067786099deSNavdeep Parhar for (i = 0; i < nsegs - 1; i++) { 6068786099deSNavdeep Parhar usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 6069786099deSNavdeep Parhar usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 6070786099deSNavdeep Parhar } 6071786099deSNavdeep Parhar if (i & 1) 6072786099deSNavdeep Parhar usgl->sge[i / 2].len[1] = htobe32(0); 6073786099deSNavdeep Parhar } 6074786099deSNavdeep Parhar 6075786099deSNavdeep Parhar } 6076786099deSNavdeep Parhar 6077786099deSNavdeep Parhar static void 6078e38a50e8SJohn Baldwin ethofld_tx(struct cxgbe_rate_tag *cst) 6079786099deSNavdeep Parhar { 6080786099deSNavdeep Parhar struct mbuf *m; 6081786099deSNavdeep Parhar struct wrq_cookie cookie; 6082786099deSNavdeep Parhar int next_credits, compl; 6083786099deSNavdeep Parhar struct fw_eth_tx_eo_wr *wr; 6084786099deSNavdeep Parhar 6085786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 6086786099deSNavdeep Parhar 6087786099deSNavdeep Parhar while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 6088786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 6089786099deSNavdeep Parhar 6090786099deSNavdeep Parhar /* How many len16 credits do we need to send this mbuf. */ 6091786099deSNavdeep Parhar next_credits = mbuf_eo_len16(m); 6092786099deSNavdeep Parhar MPASS(next_credits > 0); 6093786099deSNavdeep Parhar if (next_credits > cst->tx_credits) { 6094786099deSNavdeep Parhar /* 6095786099deSNavdeep Parhar * Tx will make progress eventually because there is at 6096786099deSNavdeep Parhar * least one outstanding fw4_ack that will return 6097786099deSNavdeep Parhar * credits and kick the tx. 6098786099deSNavdeep Parhar */ 6099786099deSNavdeep Parhar MPASS(cst->ncompl > 0); 6100786099deSNavdeep Parhar return; 6101786099deSNavdeep Parhar } 6102786099deSNavdeep Parhar wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie); 6103786099deSNavdeep Parhar if (__predict_false(wr == NULL)) { 6104786099deSNavdeep Parhar /* XXX: wishful thinking, not a real assertion. */ 6105786099deSNavdeep Parhar MPASS(cst->ncompl > 0); 6106786099deSNavdeep Parhar return; 6107786099deSNavdeep Parhar } 6108786099deSNavdeep Parhar cst->tx_credits -= next_credits; 6109786099deSNavdeep Parhar cst->tx_nocompl += next_credits; 6110786099deSNavdeep Parhar compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 6111e38a50e8SJohn Baldwin ETHER_BPF_MTAP(cst->com.com.ifp, m); 6112786099deSNavdeep Parhar write_ethofld_wr(cst, wr, m, compl); 6113786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, wr, &cookie); 6114786099deSNavdeep Parhar if (compl) { 6115786099deSNavdeep Parhar cst->ncompl++; 6116786099deSNavdeep Parhar cst->tx_nocompl = 0; 6117786099deSNavdeep Parhar } 6118786099deSNavdeep Parhar (void) mbufq_dequeue(&cst->pending_tx); 6119fb3bc596SJohn Baldwin 6120fb3bc596SJohn Baldwin /* 6121fb3bc596SJohn Baldwin * Drop the mbuf's reference on the tag now rather 6122fb3bc596SJohn Baldwin * than waiting until m_freem(). This ensures that 6123e38a50e8SJohn Baldwin * cxgbe_rate_tag_free gets called when the inp drops 6124fb3bc596SJohn Baldwin * its reference on the tag and there are no more 6125fb3bc596SJohn Baldwin * mbufs in the pending_tx queue and can flush any 6126fb3bc596SJohn Baldwin * pending requests. Otherwise if the last mbuf 6127fb3bc596SJohn Baldwin * doesn't request a completion the etid will never be 6128fb3bc596SJohn Baldwin * released. 6129fb3bc596SJohn Baldwin */ 6130fb3bc596SJohn Baldwin m->m_pkthdr.snd_tag = NULL; 6131fb3bc596SJohn Baldwin m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 6132e38a50e8SJohn Baldwin m_snd_tag_rele(&cst->com.com); 6133fb3bc596SJohn Baldwin 6134786099deSNavdeep Parhar mbufq_enqueue(&cst->pending_fwack, m); 6135786099deSNavdeep Parhar } 6136786099deSNavdeep Parhar } 6137786099deSNavdeep Parhar 6138786099deSNavdeep Parhar int 6139786099deSNavdeep Parhar ethofld_transmit(struct ifnet *ifp, struct mbuf *m0) 6140786099deSNavdeep Parhar { 6141e38a50e8SJohn Baldwin struct cxgbe_rate_tag *cst; 6142786099deSNavdeep Parhar int rc; 6143786099deSNavdeep Parhar 6144786099deSNavdeep Parhar MPASS(m0->m_nextpkt == NULL); 6145fb3bc596SJohn Baldwin MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); 6146786099deSNavdeep Parhar MPASS(m0->m_pkthdr.snd_tag != NULL); 6147e38a50e8SJohn Baldwin cst = mst_to_crt(m0->m_pkthdr.snd_tag); 6148786099deSNavdeep Parhar 6149786099deSNavdeep Parhar mtx_lock(&cst->lock); 6150786099deSNavdeep Parhar MPASS(cst->flags & EO_SND_TAG_REF); 6151786099deSNavdeep Parhar 6152786099deSNavdeep Parhar if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 6153786099deSNavdeep Parhar struct vi_info *vi = ifp->if_softc; 6154786099deSNavdeep Parhar struct port_info *pi = vi->pi; 6155786099deSNavdeep Parhar struct adapter *sc = pi->adapter; 6156786099deSNavdeep Parhar const uint32_t rss_mask = vi->rss_size - 1; 6157786099deSNavdeep Parhar uint32_t rss_hash; 6158786099deSNavdeep Parhar 6159786099deSNavdeep Parhar cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 6160786099deSNavdeep Parhar if (M_HASHTYPE_ISHASH(m0)) 6161786099deSNavdeep Parhar rss_hash = m0->m_pkthdr.flowid; 6162786099deSNavdeep Parhar else 6163786099deSNavdeep Parhar rss_hash = arc4random(); 6164786099deSNavdeep Parhar /* We assume RSS hashing */ 6165786099deSNavdeep Parhar cst->iqid = vi->rss[rss_hash & rss_mask]; 6166786099deSNavdeep Parhar cst->eo_txq += rss_hash % vi->nofldtxq; 6167786099deSNavdeep Parhar rc = send_etid_flowc_wr(cst, pi, vi); 6168786099deSNavdeep Parhar if (rc != 0) 6169786099deSNavdeep Parhar goto done; 6170786099deSNavdeep Parhar } 6171786099deSNavdeep Parhar 6172786099deSNavdeep Parhar if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 6173786099deSNavdeep Parhar rc = ENOBUFS; 6174786099deSNavdeep Parhar goto done; 6175786099deSNavdeep Parhar } 6176786099deSNavdeep Parhar 6177786099deSNavdeep Parhar mbufq_enqueue(&cst->pending_tx, m0); 6178786099deSNavdeep Parhar cst->plen += m0->m_pkthdr.len; 6179786099deSNavdeep Parhar 6180fb3bc596SJohn Baldwin /* 6181fb3bc596SJohn Baldwin * Hold an extra reference on the tag while generating work 6182fb3bc596SJohn Baldwin * requests to ensure that we don't try to free the tag during 6183fb3bc596SJohn Baldwin * ethofld_tx() in case we are sending the final mbuf after 6184fb3bc596SJohn Baldwin * the inp was freed. 6185fb3bc596SJohn Baldwin */ 6186e38a50e8SJohn Baldwin m_snd_tag_ref(&cst->com.com); 6187786099deSNavdeep Parhar ethofld_tx(cst); 6188fb3bc596SJohn Baldwin mtx_unlock(&cst->lock); 6189e38a50e8SJohn Baldwin m_snd_tag_rele(&cst->com.com); 6190fb3bc596SJohn Baldwin return (0); 6191fb3bc596SJohn Baldwin 6192786099deSNavdeep Parhar done: 6193786099deSNavdeep Parhar mtx_unlock(&cst->lock); 6194786099deSNavdeep Parhar if (__predict_false(rc != 0)) 6195786099deSNavdeep Parhar m_freem(m0); 6196786099deSNavdeep Parhar return (rc); 6197786099deSNavdeep Parhar } 6198786099deSNavdeep Parhar 6199786099deSNavdeep Parhar static int 6200786099deSNavdeep Parhar ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 6201786099deSNavdeep Parhar { 6202786099deSNavdeep Parhar struct adapter *sc = iq->adapter; 6203786099deSNavdeep Parhar const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 6204786099deSNavdeep Parhar struct mbuf *m; 6205786099deSNavdeep Parhar u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 6206e38a50e8SJohn Baldwin struct cxgbe_rate_tag *cst; 6207786099deSNavdeep Parhar uint8_t credits = cpl->credits; 6208786099deSNavdeep Parhar 6209786099deSNavdeep Parhar cst = lookup_etid(sc, etid); 6210786099deSNavdeep Parhar mtx_lock(&cst->lock); 6211786099deSNavdeep Parhar if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 6212786099deSNavdeep Parhar MPASS(credits >= ETID_FLOWC_LEN16); 6213786099deSNavdeep Parhar credits -= ETID_FLOWC_LEN16; 6214786099deSNavdeep Parhar cst->flags &= ~EO_FLOWC_RPL_PENDING; 6215786099deSNavdeep Parhar } 6216786099deSNavdeep Parhar 6217786099deSNavdeep Parhar KASSERT(cst->ncompl > 0, 6218786099deSNavdeep Parhar ("%s: etid %u (%p) wasn't expecting completion.", 6219786099deSNavdeep Parhar __func__, etid, cst)); 6220786099deSNavdeep Parhar cst->ncompl--; 6221786099deSNavdeep Parhar 6222786099deSNavdeep Parhar while (credits > 0) { 6223786099deSNavdeep Parhar m = mbufq_dequeue(&cst->pending_fwack); 6224786099deSNavdeep Parhar if (__predict_false(m == NULL)) { 6225786099deSNavdeep Parhar /* 6226786099deSNavdeep Parhar * The remaining credits are for the final flush that 6227786099deSNavdeep Parhar * was issued when the tag was freed by the kernel. 6228786099deSNavdeep Parhar */ 6229786099deSNavdeep Parhar MPASS((cst->flags & 6230786099deSNavdeep Parhar (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6231786099deSNavdeep Parhar EO_FLUSH_RPL_PENDING); 6232786099deSNavdeep Parhar MPASS(credits == ETID_FLUSH_LEN16); 6233786099deSNavdeep Parhar MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6234786099deSNavdeep Parhar MPASS(cst->ncompl == 0); 6235786099deSNavdeep Parhar 6236786099deSNavdeep Parhar cst->flags &= ~EO_FLUSH_RPL_PENDING; 6237786099deSNavdeep Parhar cst->tx_credits += cpl->credits; 6238e38a50e8SJohn Baldwin cxgbe_rate_tag_free_locked(cst); 6239786099deSNavdeep Parhar return (0); /* cst is gone. */ 6240786099deSNavdeep Parhar } 6241786099deSNavdeep Parhar KASSERT(m != NULL, 6242786099deSNavdeep Parhar ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6243786099deSNavdeep Parhar credits)); 6244786099deSNavdeep Parhar KASSERT(credits >= mbuf_eo_len16(m), 6245786099deSNavdeep Parhar ("%s: too few credits (%u, %u, %u)", __func__, 6246786099deSNavdeep Parhar cpl->credits, credits, mbuf_eo_len16(m))); 6247786099deSNavdeep Parhar credits -= mbuf_eo_len16(m); 6248786099deSNavdeep Parhar cst->plen -= m->m_pkthdr.len; 6249786099deSNavdeep Parhar m_freem(m); 6250786099deSNavdeep Parhar } 6251786099deSNavdeep Parhar 6252786099deSNavdeep Parhar cst->tx_credits += cpl->credits; 6253786099deSNavdeep Parhar MPASS(cst->tx_credits <= cst->tx_total); 6254786099deSNavdeep Parhar 6255fb3bc596SJohn Baldwin if (cst->flags & EO_SND_TAG_REF) { 6256fb3bc596SJohn Baldwin /* 6257fb3bc596SJohn Baldwin * As with ethofld_transmit(), hold an extra reference 6258fb3bc596SJohn Baldwin * so that the tag is stable across ethold_tx(). 6259fb3bc596SJohn Baldwin */ 6260e38a50e8SJohn Baldwin m_snd_tag_ref(&cst->com.com); 6261786099deSNavdeep Parhar m = mbufq_first(&cst->pending_tx); 6262786099deSNavdeep Parhar if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6263786099deSNavdeep Parhar ethofld_tx(cst); 6264786099deSNavdeep Parhar mtx_unlock(&cst->lock); 6265e38a50e8SJohn Baldwin m_snd_tag_rele(&cst->com.com); 6266fb3bc596SJohn Baldwin } else { 6267fb3bc596SJohn Baldwin /* 6268fb3bc596SJohn Baldwin * There shouldn't be any pending packets if the tag 6269fb3bc596SJohn Baldwin * was freed by the kernel since any pending packet 6270fb3bc596SJohn Baldwin * should hold a reference to the tag. 6271fb3bc596SJohn Baldwin */ 6272fb3bc596SJohn Baldwin MPASS(mbufq_first(&cst->pending_tx) == NULL); 6273fb3bc596SJohn Baldwin mtx_unlock(&cst->lock); 6274fb3bc596SJohn Baldwin } 6275786099deSNavdeep Parhar 6276786099deSNavdeep Parhar return (0); 6277786099deSNavdeep Parhar } 6278786099deSNavdeep Parhar #endif 6279