154e4ee71SNavdeep Parhar /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 454e4ee71SNavdeep Parhar * Copyright (c) 2011 Chelsio Communications, Inc. 554e4ee71SNavdeep Parhar * All rights reserved. 654e4ee71SNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 754e4ee71SNavdeep Parhar * 854e4ee71SNavdeep Parhar * Redistribution and use in source and binary forms, with or without 954e4ee71SNavdeep Parhar * modification, are permitted provided that the following conditions 1054e4ee71SNavdeep Parhar * are met: 1154e4ee71SNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 1254e4ee71SNavdeep Parhar * notice, this list of conditions and the following disclaimer. 1354e4ee71SNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 1454e4ee71SNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 1554e4ee71SNavdeep Parhar * documentation and/or other materials provided with the distribution. 1654e4ee71SNavdeep Parhar * 1754e4ee71SNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1854e4ee71SNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1954e4ee71SNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2054e4ee71SNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2154e4ee71SNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2254e4ee71SNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2354e4ee71SNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2454e4ee71SNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2554e4ee71SNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2654e4ee71SNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2754e4ee71SNavdeep Parhar * SUCH DAMAGE. 2854e4ee71SNavdeep Parhar */ 2954e4ee71SNavdeep Parhar 3054e4ee71SNavdeep Parhar #include <sys/cdefs.h> 3154e4ee71SNavdeep Parhar __FBSDID("$FreeBSD$"); 3254e4ee71SNavdeep Parhar 3354e4ee71SNavdeep Parhar #include "opt_inet.h" 34a1ea9a82SNavdeep Parhar #include "opt_inet6.h" 35bddf7343SJohn Baldwin #include "opt_kern_tls.h" 36eff62dbaSNavdeep Parhar #include "opt_ratelimit.h" 3754e4ee71SNavdeep Parhar 3854e4ee71SNavdeep Parhar #include <sys/types.h> 39c3322cb9SGleb Smirnoff #include <sys/eventhandler.h> 4054e4ee71SNavdeep Parhar #include <sys/mbuf.h> 4154e4ee71SNavdeep Parhar #include <sys/socket.h> 4254e4ee71SNavdeep Parhar #include <sys/kernel.h> 43bddf7343SJohn Baldwin #include <sys/ktls.h> 44ecb79ca4SNavdeep Parhar #include <sys/malloc.h> 45ecb79ca4SNavdeep Parhar #include <sys/queue.h> 4638035ed6SNavdeep Parhar #include <sys/sbuf.h> 47ecb79ca4SNavdeep Parhar #include <sys/taskqueue.h> 48480e603cSNavdeep Parhar #include <sys/time.h> 497951040fSNavdeep Parhar #include <sys/sglist.h> 5054e4ee71SNavdeep Parhar #include <sys/sysctl.h> 51733b9277SNavdeep Parhar #include <sys/smp.h> 52bddf7343SJohn Baldwin #include <sys/socketvar.h> 5382eff304SNavdeep Parhar #include <sys/counter.h> 5454e4ee71SNavdeep Parhar #include <net/bpf.h> 5554e4ee71SNavdeep Parhar #include <net/ethernet.h> 5654e4ee71SNavdeep Parhar #include <net/if.h> 5754e4ee71SNavdeep Parhar #include <net/if_vlan_var.h> 58a4a4ad2dSNavdeep Parhar #include <net/if_vxlan.h> 5954e4ee71SNavdeep Parhar #include <netinet/in.h> 6054e4ee71SNavdeep Parhar #include <netinet/ip.h> 61a1ea9a82SNavdeep Parhar #include <netinet/ip6.h> 6254e4ee71SNavdeep Parhar #include <netinet/tcp.h> 63786099deSNavdeep Parhar #include <netinet/udp.h> 646af45170SJohn Baldwin #include <machine/in_cksum.h> 6564db8966SDimitry Andric #include <machine/md_var.h> 6638035ed6SNavdeep Parhar #include <vm/vm.h> 6738035ed6SNavdeep Parhar #include <vm/pmap.h> 68298d969cSNavdeep Parhar #ifdef DEV_NETMAP 69298d969cSNavdeep Parhar #include <machine/bus.h> 70298d969cSNavdeep Parhar #include <sys/selinfo.h> 71298d969cSNavdeep Parhar #include <net/if_var.h> 72298d969cSNavdeep Parhar #include <net/netmap.h> 73298d969cSNavdeep Parhar #include <dev/netmap/netmap_kern.h> 74298d969cSNavdeep Parhar #endif 7554e4ee71SNavdeep Parhar 7654e4ee71SNavdeep Parhar #include "common/common.h" 7754e4ee71SNavdeep Parhar #include "common/t4_regs.h" 7854e4ee71SNavdeep Parhar #include "common/t4_regs_values.h" 7954e4ee71SNavdeep Parhar #include "common/t4_msg.h" 80671bf2b8SNavdeep Parhar #include "t4_l2t.h" 817951040fSNavdeep Parhar #include "t4_mp_ring.h" 8254e4ee71SNavdeep Parhar 83d14b0ac1SNavdeep Parhar #ifdef T4_PKT_TIMESTAMP 84d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD (MINCLSIZE - 8) 85d14b0ac1SNavdeep Parhar #else 86d14b0ac1SNavdeep Parhar #define RX_COPY_THRESHOLD MINCLSIZE 87d14b0ac1SNavdeep Parhar #endif 88d14b0ac1SNavdeep Parhar 895cdaef71SJohn Baldwin /* Internal mbuf flags stored in PH_loc.eight[1]. */ 90d76bbe17SJohn Baldwin #define MC_NOMAP 0x01 915cdaef71SJohn Baldwin #define MC_RAW_WR 0x02 92bddf7343SJohn Baldwin #define MC_TLS 0x04 935cdaef71SJohn Baldwin 949fb8886bSNavdeep Parhar /* 959fb8886bSNavdeep Parhar * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 969fb8886bSNavdeep Parhar * 0-7 are valid values. 979fb8886bSNavdeep Parhar */ 98518bca2cSNavdeep Parhar static int fl_pktshift = 0; 992d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 1002d714dbcSJohn Baldwin "payload DMA offset in rx buffer (bytes)"); 10154e4ee71SNavdeep Parhar 1029fb8886bSNavdeep Parhar /* 1039fb8886bSNavdeep Parhar * Pad ethernet payload up to this boundary. 1049fb8886bSNavdeep Parhar * -1: driver should figure out a good value. 1051458bff9SNavdeep Parhar * 0: disable padding. 1061458bff9SNavdeep Parhar * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 1079fb8886bSNavdeep Parhar */ 108298d969cSNavdeep Parhar int fl_pad = -1; 1092d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 1102d714dbcSJohn Baldwin "payload pad boundary (bytes)"); 1119fb8886bSNavdeep Parhar 1129fb8886bSNavdeep Parhar /* 1139fb8886bSNavdeep Parhar * Status page length. 1149fb8886bSNavdeep Parhar * -1: driver should figure out a good value. 1159fb8886bSNavdeep Parhar * 64 or 128 are the only other valid values. 1169fb8886bSNavdeep Parhar */ 11729c229e9SJohn Baldwin static int spg_len = -1; 1182d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 1192d714dbcSJohn Baldwin "status page size (bytes)"); 1209fb8886bSNavdeep Parhar 1219fb8886bSNavdeep Parhar /* 1229fb8886bSNavdeep Parhar * Congestion drops. 1239fb8886bSNavdeep Parhar * -1: no congestion feedback (not recommended). 1249fb8886bSNavdeep Parhar * 0: backpressure the channel instead of dropping packets right away. 1259fb8886bSNavdeep Parhar * 1: no backpressure, drop packets for the congested queue immediately. 1269fb8886bSNavdeep Parhar */ 1279fb8886bSNavdeep Parhar static int cong_drop = 0; 1282d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 1292d714dbcSJohn Baldwin "Congestion control for RX queues (0 = backpressure, 1 = drop"); 13054e4ee71SNavdeep Parhar 1311458bff9SNavdeep Parhar /* 1321458bff9SNavdeep Parhar * Deliver multiple frames in the same free list buffer if they fit. 1331458bff9SNavdeep Parhar * -1: let the driver decide whether to enable buffer packing or not. 1341458bff9SNavdeep Parhar * 0: disable buffer packing. 1351458bff9SNavdeep Parhar * 1: enable buffer packing. 1361458bff9SNavdeep Parhar */ 1371458bff9SNavdeep Parhar static int buffer_packing = -1; 1382d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 1392d714dbcSJohn Baldwin 0, "Enable buffer packing"); 1401458bff9SNavdeep Parhar 1411458bff9SNavdeep Parhar /* 1421458bff9SNavdeep Parhar * Start next frame in a packed buffer at this boundary. 1431458bff9SNavdeep Parhar * -1: driver should figure out a good value. 144e3207e19SNavdeep Parhar * T4: driver will ignore this and use the same value as fl_pad above. 145e3207e19SNavdeep Parhar * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 1461458bff9SNavdeep Parhar */ 1471458bff9SNavdeep Parhar static int fl_pack = -1; 1482d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 1492d714dbcSJohn Baldwin "payload pack boundary (bytes)"); 1501458bff9SNavdeep Parhar 15138035ed6SNavdeep Parhar /* 15238035ed6SNavdeep Parhar * Largest rx cluster size that the driver is allowed to allocate. 15338035ed6SNavdeep Parhar */ 15438035ed6SNavdeep Parhar static int largest_rx_cluster = MJUM16BYTES; 1552d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 1562d714dbcSJohn Baldwin &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 15738035ed6SNavdeep Parhar 15838035ed6SNavdeep Parhar /* 15938035ed6SNavdeep Parhar * Size of cluster allocation that's most likely to succeed. The driver will 16038035ed6SNavdeep Parhar * fall back to this size if it fails to allocate clusters larger than this. 16138035ed6SNavdeep Parhar */ 16238035ed6SNavdeep Parhar static int safest_rx_cluster = PAGE_SIZE; 1632d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 1642d714dbcSJohn Baldwin &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 16538035ed6SNavdeep Parhar 166786099deSNavdeep Parhar #ifdef RATELIMIT 167786099deSNavdeep Parhar /* 168786099deSNavdeep Parhar * Knob to control TCP timestamp rewriting, and the granularity of the tick used 169786099deSNavdeep Parhar * for rewriting. -1 and 0-3 are all valid values. 170786099deSNavdeep Parhar * -1: hardware should leave the TCP timestamps alone. 171786099deSNavdeep Parhar * 0: 1ms 172786099deSNavdeep Parhar * 1: 100us 173786099deSNavdeep Parhar * 2: 10us 174786099deSNavdeep Parhar * 3: 1us 175786099deSNavdeep Parhar */ 176786099deSNavdeep Parhar static int tsclk = -1; 1772d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 1782d714dbcSJohn Baldwin "Control TCP timestamp rewriting when using pacing"); 179786099deSNavdeep Parhar 180786099deSNavdeep Parhar static int eo_max_backlog = 1024 * 1024; 1812d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 1822d714dbcSJohn Baldwin 0, "Maximum backlog of ratelimited data per flow"); 183786099deSNavdeep Parhar #endif 184786099deSNavdeep Parhar 185d491f8caSNavdeep Parhar /* 186d491f8caSNavdeep Parhar * The interrupt holdoff timers are multiplied by this value on T6+. 187d491f8caSNavdeep Parhar * 1 and 3-17 (both inclusive) are legal values. 188d491f8caSNavdeep Parhar */ 189d491f8caSNavdeep Parhar static int tscale = 1; 1902d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 1912d714dbcSJohn Baldwin "Interrupt holdoff timer scale on T6+"); 192d491f8caSNavdeep Parhar 19346f48ee5SNavdeep Parhar /* 19446f48ee5SNavdeep Parhar * Number of LRO entries in the lro_ctrl structure per rx queue. 19546f48ee5SNavdeep Parhar */ 19646f48ee5SNavdeep Parhar static int lro_entries = TCP_LRO_ENTRIES; 1972d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 1982d714dbcSJohn Baldwin "Number of LRO entries per RX queue"); 19946f48ee5SNavdeep Parhar 20046f48ee5SNavdeep Parhar /* 20146f48ee5SNavdeep Parhar * This enables presorting of frames before they're fed into tcp_lro_rx. 20246f48ee5SNavdeep Parhar */ 20346f48ee5SNavdeep Parhar static int lro_mbufs = 0; 2042d714dbcSJohn Baldwin SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 2052d714dbcSJohn Baldwin "Enable presorting of LRO frames"); 20646f48ee5SNavdeep Parhar 2077054f6ecSNavdeep Parhar static counter_u64_t pullups; 2087054f6ecSNavdeep Parhar SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, pullups, CTLFLAG_RD, &pullups, 2097054f6ecSNavdeep Parhar "Number of mbuf pullups performed"); 2107054f6ecSNavdeep Parhar 2117054f6ecSNavdeep Parhar static counter_u64_t defrags; 2127054f6ecSNavdeep Parhar SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, defrags, CTLFLAG_RD, &defrags, 2137054f6ecSNavdeep Parhar "Number of mbuf defrags performed"); 2147054f6ecSNavdeep Parhar 2157054f6ecSNavdeep Parhar 216733b9277SNavdeep Parhar static int service_iq(struct sge_iq *, int); 2173098bcfcSNavdeep Parhar static int service_iq_fl(struct sge_iq *, int); 2184d6db4e0SNavdeep Parhar static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 2191486d2deSNavdeep Parhar static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *, 2201486d2deSNavdeep Parhar u_int); 221b2daa9a9SNavdeep Parhar static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 222e3207e19SNavdeep Parhar static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 22390e7434aSNavdeep Parhar static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 22490e7434aSNavdeep Parhar uint16_t, char *); 225fe2ebb76SJohn Baldwin static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 226bc14b14dSNavdeep Parhar int, int); 227fe2ebb76SJohn Baldwin static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 228348694daSNavdeep Parhar static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 229348694daSNavdeep Parhar struct sge_iq *); 230aa93b99aSNavdeep Parhar static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 231aa93b99aSNavdeep Parhar struct sysctl_oid *, struct sge_fl *); 232733b9277SNavdeep Parhar static int alloc_fwq(struct adapter *); 233733b9277SNavdeep Parhar static int free_fwq(struct adapter *); 23437310a98SNavdeep Parhar static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, 23537310a98SNavdeep Parhar struct sysctl_oid *); 236fe2ebb76SJohn Baldwin static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 237733b9277SNavdeep Parhar struct sysctl_oid *); 238fe2ebb76SJohn Baldwin static int free_rxq(struct vi_info *, struct sge_rxq *); 23909fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 240fe2ebb76SJohn Baldwin static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 241733b9277SNavdeep Parhar struct sysctl_oid *); 242fe2ebb76SJohn Baldwin static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 243733b9277SNavdeep Parhar #endif 244733b9277SNavdeep Parhar static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 245fe2ebb76SJohn Baldwin static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 246eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 247fe2ebb76SJohn Baldwin static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 248733b9277SNavdeep Parhar #endif 249fe2ebb76SJohn Baldwin static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 250733b9277SNavdeep Parhar static int free_eq(struct adapter *, struct sge_eq *); 251fe2ebb76SJohn Baldwin static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 252733b9277SNavdeep Parhar struct sysctl_oid *); 253733b9277SNavdeep Parhar static int free_wrq(struct adapter *, struct sge_wrq *); 254fe2ebb76SJohn Baldwin static int alloc_txq(struct vi_info *, struct sge_txq *, int, 255733b9277SNavdeep Parhar struct sysctl_oid *); 256fe2ebb76SJohn Baldwin static int free_txq(struct vi_info *, struct sge_txq *); 25754e4ee71SNavdeep Parhar static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 25854e4ee71SNavdeep Parhar static inline void ring_fl_db(struct adapter *, struct sge_fl *); 259733b9277SNavdeep Parhar static int refill_fl(struct adapter *, struct sge_fl *, int); 260733b9277SNavdeep Parhar static void refill_sfl(void *); 26154e4ee71SNavdeep Parhar static int alloc_fl_sdesc(struct sge_fl *); 2621458bff9SNavdeep Parhar static void free_fl_sdesc(struct adapter *, struct sge_fl *); 26346e1e307SNavdeep Parhar static int find_refill_source(struct adapter *, int, bool); 264733b9277SNavdeep Parhar static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 26554e4ee71SNavdeep Parhar 2667951040fSNavdeep Parhar static inline void get_pkt_gl(struct mbuf *, struct sglist *); 267a4a4ad2dSNavdeep Parhar static inline u_int txpkt_len16(u_int, const u_int); 268a4a4ad2dSNavdeep Parhar static inline u_int txpkt_vm_len16(u_int, const u_int); 26930e3f2b4SNavdeep Parhar static inline void calculate_mbuf_len16(struct mbuf *, bool); 2707951040fSNavdeep Parhar static inline u_int txpkts0_len16(u_int); 2717951040fSNavdeep Parhar static inline u_int txpkts1_len16(void); 2725cdaef71SJohn Baldwin static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 273d735920dSNavdeep Parhar static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, 274d735920dSNavdeep Parhar u_int); 275472a6004SNavdeep Parhar static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 276d735920dSNavdeep Parhar struct mbuf *); 277d735920dSNavdeep Parhar static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, 278d735920dSNavdeep Parhar int, bool *); 279d735920dSNavdeep Parhar static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *, 280d735920dSNavdeep Parhar int, bool *); 281d735920dSNavdeep Parhar static u_int write_txpkts_wr(struct adapter *, struct sge_txq *); 282d735920dSNavdeep Parhar static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *); 2837951040fSNavdeep Parhar static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 28454e4ee71SNavdeep Parhar static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 2857951040fSNavdeep Parhar static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 2867951040fSNavdeep Parhar static inline uint16_t read_hw_cidx(struct sge_eq *); 2877951040fSNavdeep Parhar static inline u_int reclaimable_tx_desc(struct sge_eq *); 2887951040fSNavdeep Parhar static inline u_int total_available_tx_desc(struct sge_eq *); 2897951040fSNavdeep Parhar static u_int reclaim_tx_descs(struct sge_txq *, u_int); 2907951040fSNavdeep Parhar static void tx_reclaim(void *, int); 2917951040fSNavdeep Parhar static __be64 get_flit(struct sglist_seg *, int, int); 292733b9277SNavdeep Parhar static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 293733b9277SNavdeep Parhar struct mbuf *); 2941b4cc91fSNavdeep Parhar static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 295733b9277SNavdeep Parhar struct mbuf *); 296069af0ebSJohn Baldwin static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 2977951040fSNavdeep Parhar static void wrq_tx_drain(void *, int); 2987951040fSNavdeep Parhar static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 29954e4ee71SNavdeep Parhar 30038035ed6SNavdeep Parhar static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 301786099deSNavdeep Parhar #ifdef RATELIMIT 302786099deSNavdeep Parhar static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 303786099deSNavdeep Parhar static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 304786099deSNavdeep Parhar struct mbuf *); 305786099deSNavdeep Parhar #endif 306f7dfe243SNavdeep Parhar 30782eff304SNavdeep Parhar static counter_u64_t extfree_refs; 30882eff304SNavdeep Parhar static counter_u64_t extfree_rels; 30982eff304SNavdeep Parhar 310671bf2b8SNavdeep Parhar an_handler_t t4_an_handler; 311671bf2b8SNavdeep Parhar fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 312671bf2b8SNavdeep Parhar cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 3134535e804SNavdeep Parhar cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 3144535e804SNavdeep Parhar cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 315111638bfSNavdeep Parhar cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 31689f651e7SNavdeep Parhar cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 3179c707b32SNavdeep Parhar cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 318671bf2b8SNavdeep Parhar 3194535e804SNavdeep Parhar void 320671bf2b8SNavdeep Parhar t4_register_an_handler(an_handler_t h) 321671bf2b8SNavdeep Parhar { 3224535e804SNavdeep Parhar uintptr_t *loc; 323671bf2b8SNavdeep Parhar 3244535e804SNavdeep Parhar MPASS(h == NULL || t4_an_handler == NULL); 3254535e804SNavdeep Parhar 326671bf2b8SNavdeep Parhar loc = (uintptr_t *)&t4_an_handler; 3274535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 328671bf2b8SNavdeep Parhar } 329671bf2b8SNavdeep Parhar 3304535e804SNavdeep Parhar void 331671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 332671bf2b8SNavdeep Parhar { 3334535e804SNavdeep Parhar uintptr_t *loc; 334671bf2b8SNavdeep Parhar 3354535e804SNavdeep Parhar MPASS(type < nitems(t4_fw_msg_handler)); 3364535e804SNavdeep Parhar MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 337671bf2b8SNavdeep Parhar /* 338671bf2b8SNavdeep Parhar * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 339671bf2b8SNavdeep Parhar * handler dispatch table. Reject any attempt to install a handler for 340671bf2b8SNavdeep Parhar * this subtype. 341671bf2b8SNavdeep Parhar */ 3424535e804SNavdeep Parhar MPASS(type != FW_TYPE_RSSCPL); 3434535e804SNavdeep Parhar MPASS(type != FW6_TYPE_RSSCPL); 344671bf2b8SNavdeep Parhar 345671bf2b8SNavdeep Parhar loc = (uintptr_t *)&t4_fw_msg_handler[type]; 3464535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 3474535e804SNavdeep Parhar } 348671bf2b8SNavdeep Parhar 3494535e804SNavdeep Parhar void 3504535e804SNavdeep Parhar t4_register_cpl_handler(int opcode, cpl_handler_t h) 3514535e804SNavdeep Parhar { 3524535e804SNavdeep Parhar uintptr_t *loc; 3534535e804SNavdeep Parhar 3544535e804SNavdeep Parhar MPASS(opcode < nitems(t4_cpl_handler)); 3554535e804SNavdeep Parhar MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 3564535e804SNavdeep Parhar 3574535e804SNavdeep Parhar loc = (uintptr_t *)&t4_cpl_handler[opcode]; 3584535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 359671bf2b8SNavdeep Parhar } 360671bf2b8SNavdeep Parhar 361671bf2b8SNavdeep Parhar static int 3624535e804SNavdeep Parhar set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 3634535e804SNavdeep Parhar struct mbuf *m) 364671bf2b8SNavdeep Parhar { 3654535e804SNavdeep Parhar const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 3664535e804SNavdeep Parhar u_int tid; 3674535e804SNavdeep Parhar int cookie; 368671bf2b8SNavdeep Parhar 3694535e804SNavdeep Parhar MPASS(m == NULL); 3704535e804SNavdeep Parhar 3714535e804SNavdeep Parhar tid = GET_TID(cpl); 3725fc0f72fSNavdeep Parhar if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 3734535e804SNavdeep Parhar /* 3744535e804SNavdeep Parhar * The return code for filter-write is put in the CPL cookie so 3754535e804SNavdeep Parhar * we have to rely on the hardware tid (is_ftid) to determine 3764535e804SNavdeep Parhar * that this is a response to a filter. 3774535e804SNavdeep Parhar */ 3784535e804SNavdeep Parhar cookie = CPL_COOKIE_FILTER; 3794535e804SNavdeep Parhar } else { 3804535e804SNavdeep Parhar cookie = G_COOKIE(cpl->cookie); 3814535e804SNavdeep Parhar } 3824535e804SNavdeep Parhar MPASS(cookie > CPL_COOKIE_RESERVED); 3834535e804SNavdeep Parhar MPASS(cookie < nitems(set_tcb_rpl_handlers)); 3844535e804SNavdeep Parhar 3854535e804SNavdeep Parhar return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 386671bf2b8SNavdeep Parhar } 387671bf2b8SNavdeep Parhar 3884535e804SNavdeep Parhar static int 3894535e804SNavdeep Parhar l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 3904535e804SNavdeep Parhar struct mbuf *m) 391671bf2b8SNavdeep Parhar { 3924535e804SNavdeep Parhar const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 3934535e804SNavdeep Parhar unsigned int cookie; 394671bf2b8SNavdeep Parhar 3954535e804SNavdeep Parhar MPASS(m == NULL); 396671bf2b8SNavdeep Parhar 3974535e804SNavdeep Parhar cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 3984535e804SNavdeep Parhar return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 3994535e804SNavdeep Parhar } 400671bf2b8SNavdeep Parhar 401111638bfSNavdeep Parhar static int 402111638bfSNavdeep Parhar act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 403111638bfSNavdeep Parhar struct mbuf *m) 404111638bfSNavdeep Parhar { 405111638bfSNavdeep Parhar const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 406111638bfSNavdeep Parhar u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 407111638bfSNavdeep Parhar 408111638bfSNavdeep Parhar MPASS(m == NULL); 409111638bfSNavdeep Parhar MPASS(cookie != CPL_COOKIE_RESERVED); 410111638bfSNavdeep Parhar 411111638bfSNavdeep Parhar return (act_open_rpl_handlers[cookie](iq, rss, m)); 412111638bfSNavdeep Parhar } 413111638bfSNavdeep Parhar 41489f651e7SNavdeep Parhar static int 41589f651e7SNavdeep Parhar abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 41689f651e7SNavdeep Parhar struct mbuf *m) 41789f651e7SNavdeep Parhar { 41889f651e7SNavdeep Parhar struct adapter *sc = iq->adapter; 41989f651e7SNavdeep Parhar u_int cookie; 42089f651e7SNavdeep Parhar 42189f651e7SNavdeep Parhar MPASS(m == NULL); 42289f651e7SNavdeep Parhar if (is_hashfilter(sc)) 42389f651e7SNavdeep Parhar cookie = CPL_COOKIE_HASHFILTER; 42489f651e7SNavdeep Parhar else 42589f651e7SNavdeep Parhar cookie = CPL_COOKIE_TOM; 42689f651e7SNavdeep Parhar 42789f651e7SNavdeep Parhar return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 42889f651e7SNavdeep Parhar } 42989f651e7SNavdeep Parhar 4309c707b32SNavdeep Parhar static int 4319c707b32SNavdeep Parhar fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4329c707b32SNavdeep Parhar { 4339c707b32SNavdeep Parhar struct adapter *sc = iq->adapter; 4349c707b32SNavdeep Parhar const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 4359c707b32SNavdeep Parhar unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 4369c707b32SNavdeep Parhar u_int cookie; 4379c707b32SNavdeep Parhar 4389c707b32SNavdeep Parhar MPASS(m == NULL); 4399c707b32SNavdeep Parhar if (is_etid(sc, tid)) 4409c707b32SNavdeep Parhar cookie = CPL_COOKIE_ETHOFLD; 4419c707b32SNavdeep Parhar else 4429c707b32SNavdeep Parhar cookie = CPL_COOKIE_TOM; 4439c707b32SNavdeep Parhar 4449c707b32SNavdeep Parhar return (fw4_ack_handlers[cookie](iq, rss, m)); 4459c707b32SNavdeep Parhar } 4469c707b32SNavdeep Parhar 4474535e804SNavdeep Parhar static void 4484535e804SNavdeep Parhar t4_init_shared_cpl_handlers(void) 4494535e804SNavdeep Parhar { 4504535e804SNavdeep Parhar 4514535e804SNavdeep Parhar t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 4524535e804SNavdeep Parhar t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 453111638bfSNavdeep Parhar t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 45489f651e7SNavdeep Parhar t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 4559c707b32SNavdeep Parhar t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 4564535e804SNavdeep Parhar } 4574535e804SNavdeep Parhar 4584535e804SNavdeep Parhar void 4594535e804SNavdeep Parhar t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 4604535e804SNavdeep Parhar { 4614535e804SNavdeep Parhar uintptr_t *loc; 4624535e804SNavdeep Parhar 4634535e804SNavdeep Parhar MPASS(opcode < nitems(t4_cpl_handler)); 4644535e804SNavdeep Parhar MPASS(cookie > CPL_COOKIE_RESERVED); 4654535e804SNavdeep Parhar MPASS(cookie < NUM_CPL_COOKIES); 4664535e804SNavdeep Parhar MPASS(t4_cpl_handler[opcode] != NULL); 4674535e804SNavdeep Parhar 4684535e804SNavdeep Parhar switch (opcode) { 4694535e804SNavdeep Parhar case CPL_SET_TCB_RPL: 4704535e804SNavdeep Parhar loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 4714535e804SNavdeep Parhar break; 4724535e804SNavdeep Parhar case CPL_L2T_WRITE_RPL: 4734535e804SNavdeep Parhar loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 4744535e804SNavdeep Parhar break; 475111638bfSNavdeep Parhar case CPL_ACT_OPEN_RPL: 476111638bfSNavdeep Parhar loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 477111638bfSNavdeep Parhar break; 47889f651e7SNavdeep Parhar case CPL_ABORT_RPL_RSS: 47989f651e7SNavdeep Parhar loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 48089f651e7SNavdeep Parhar break; 4819c707b32SNavdeep Parhar case CPL_FW4_ACK: 4829c707b32SNavdeep Parhar loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 4839c707b32SNavdeep Parhar break; 4844535e804SNavdeep Parhar default: 4854535e804SNavdeep Parhar MPASS(0); 4864535e804SNavdeep Parhar return; 4874535e804SNavdeep Parhar } 4884535e804SNavdeep Parhar MPASS(h == NULL || *loc == (uintptr_t)NULL); 4894535e804SNavdeep Parhar atomic_store_rel_ptr(loc, (uintptr_t)h); 490671bf2b8SNavdeep Parhar } 491671bf2b8SNavdeep Parhar 49294586193SNavdeep Parhar /* 4931458bff9SNavdeep Parhar * Called on MOD_LOAD. Validates and calculates the SGE tunables. 49494586193SNavdeep Parhar */ 49594586193SNavdeep Parhar void 49694586193SNavdeep Parhar t4_sge_modload(void) 49794586193SNavdeep Parhar { 4984defc81bSNavdeep Parhar 4999fb8886bSNavdeep Parhar if (fl_pktshift < 0 || fl_pktshift > 7) { 5009fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 501518bca2cSNavdeep Parhar " using 0 instead.\n", fl_pktshift); 502518bca2cSNavdeep Parhar fl_pktshift = 0; 5039fb8886bSNavdeep Parhar } 5049fb8886bSNavdeep Parhar 5059fb8886bSNavdeep Parhar if (spg_len != 64 && spg_len != 128) { 5069fb8886bSNavdeep Parhar int len; 5079fb8886bSNavdeep Parhar 5089fb8886bSNavdeep Parhar #if defined(__i386__) || defined(__amd64__) 5099fb8886bSNavdeep Parhar len = cpu_clflush_line_size > 64 ? 128 : 64; 5109fb8886bSNavdeep Parhar #else 5119fb8886bSNavdeep Parhar len = 64; 5129fb8886bSNavdeep Parhar #endif 5139fb8886bSNavdeep Parhar if (spg_len != -1) { 5149fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.spg_len value (%d)," 5159fb8886bSNavdeep Parhar " using %d instead.\n", spg_len, len); 5169fb8886bSNavdeep Parhar } 5179fb8886bSNavdeep Parhar spg_len = len; 5189fb8886bSNavdeep Parhar } 5199fb8886bSNavdeep Parhar 5209fb8886bSNavdeep Parhar if (cong_drop < -1 || cong_drop > 1) { 5219fb8886bSNavdeep Parhar printf("Invalid hw.cxgbe.cong_drop value (%d)," 5229fb8886bSNavdeep Parhar " using 0 instead.\n", cong_drop); 5239fb8886bSNavdeep Parhar cong_drop = 0; 5249fb8886bSNavdeep Parhar } 52582eff304SNavdeep Parhar 526d491f8caSNavdeep Parhar if (tscale != 1 && (tscale < 3 || tscale > 17)) { 527d491f8caSNavdeep Parhar printf("Invalid hw.cxgbe.tscale value (%d)," 528d491f8caSNavdeep Parhar " using 1 instead.\n", tscale); 529d491f8caSNavdeep Parhar tscale = 1; 530d491f8caSNavdeep Parhar } 531d491f8caSNavdeep Parhar 5327676c62aSNavdeep Parhar if (largest_rx_cluster != MCLBYTES && 5337676c62aSNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 5347676c62aSNavdeep Parhar largest_rx_cluster != MJUMPAGESIZE && 5357676c62aSNavdeep Parhar #endif 5367676c62aSNavdeep Parhar largest_rx_cluster != MJUM9BYTES && 5377676c62aSNavdeep Parhar largest_rx_cluster != MJUM16BYTES) { 5387676c62aSNavdeep Parhar printf("Invalid hw.cxgbe.largest_rx_cluster value (%d)," 5397676c62aSNavdeep Parhar " using %d instead.\n", largest_rx_cluster, MJUM16BYTES); 5407676c62aSNavdeep Parhar largest_rx_cluster = MJUM16BYTES; 5417676c62aSNavdeep Parhar } 5427676c62aSNavdeep Parhar 5437676c62aSNavdeep Parhar if (safest_rx_cluster != MCLBYTES && 5447676c62aSNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 5457676c62aSNavdeep Parhar safest_rx_cluster != MJUMPAGESIZE && 5467676c62aSNavdeep Parhar #endif 5477676c62aSNavdeep Parhar safest_rx_cluster != MJUM9BYTES && 5487676c62aSNavdeep Parhar safest_rx_cluster != MJUM16BYTES) { 5497676c62aSNavdeep Parhar printf("Invalid hw.cxgbe.safest_rx_cluster value (%d)," 5507676c62aSNavdeep Parhar " using %d instead.\n", safest_rx_cluster, MJUMPAGESIZE); 5517676c62aSNavdeep Parhar safest_rx_cluster = MJUMPAGESIZE; 5527676c62aSNavdeep Parhar } 5537676c62aSNavdeep Parhar 55482eff304SNavdeep Parhar extfree_refs = counter_u64_alloc(M_WAITOK); 55582eff304SNavdeep Parhar extfree_rels = counter_u64_alloc(M_WAITOK); 5567054f6ecSNavdeep Parhar pullups = counter_u64_alloc(M_WAITOK); 5577054f6ecSNavdeep Parhar defrags = counter_u64_alloc(M_WAITOK); 55882eff304SNavdeep Parhar counter_u64_zero(extfree_refs); 55982eff304SNavdeep Parhar counter_u64_zero(extfree_rels); 5607054f6ecSNavdeep Parhar counter_u64_zero(pullups); 5617054f6ecSNavdeep Parhar counter_u64_zero(defrags); 562671bf2b8SNavdeep Parhar 5634535e804SNavdeep Parhar t4_init_shared_cpl_handlers(); 564671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 565671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 566671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 567786099deSNavdeep Parhar #ifdef RATELIMIT 568786099deSNavdeep Parhar t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 569786099deSNavdeep Parhar CPL_COOKIE_ETHOFLD); 570786099deSNavdeep Parhar #endif 571671bf2b8SNavdeep Parhar t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 572069af0ebSJohn Baldwin t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 57382eff304SNavdeep Parhar } 57482eff304SNavdeep Parhar 57582eff304SNavdeep Parhar void 57682eff304SNavdeep Parhar t4_sge_modunload(void) 57782eff304SNavdeep Parhar { 57882eff304SNavdeep Parhar 57982eff304SNavdeep Parhar counter_u64_free(extfree_refs); 58082eff304SNavdeep Parhar counter_u64_free(extfree_rels); 5817054f6ecSNavdeep Parhar counter_u64_free(pullups); 5827054f6ecSNavdeep Parhar counter_u64_free(defrags); 58382eff304SNavdeep Parhar } 58482eff304SNavdeep Parhar 58582eff304SNavdeep Parhar uint64_t 58682eff304SNavdeep Parhar t4_sge_extfree_refs(void) 58782eff304SNavdeep Parhar { 58882eff304SNavdeep Parhar uint64_t refs, rels; 58982eff304SNavdeep Parhar 59082eff304SNavdeep Parhar rels = counter_u64_fetch(extfree_rels); 59182eff304SNavdeep Parhar refs = counter_u64_fetch(extfree_refs); 59282eff304SNavdeep Parhar 59382eff304SNavdeep Parhar return (refs - rels); 59494586193SNavdeep Parhar } 59594586193SNavdeep Parhar 59644c6fea8SNavdeep Parhar /* max 4096 */ 59744c6fea8SNavdeep Parhar #define MAX_PACK_BOUNDARY 512 59844c6fea8SNavdeep Parhar 599e3207e19SNavdeep Parhar static inline void 600e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(struct adapter *sc) 601e3207e19SNavdeep Parhar { 602e3207e19SNavdeep Parhar uint32_t v, m; 6030dbc6cfdSNavdeep Parhar int pad, pack, pad_shift; 604e3207e19SNavdeep Parhar 6050dbc6cfdSNavdeep Parhar pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 6060dbc6cfdSNavdeep Parhar X_INGPADBOUNDARY_SHIFT; 607e3207e19SNavdeep Parhar pad = fl_pad; 6080dbc6cfdSNavdeep Parhar if (fl_pad < (1 << pad_shift) || 6090dbc6cfdSNavdeep Parhar fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 6100dbc6cfdSNavdeep Parhar !powerof2(fl_pad)) { 611e3207e19SNavdeep Parhar /* 612e3207e19SNavdeep Parhar * If there is any chance that we might use buffer packing and 613e3207e19SNavdeep Parhar * the chip is a T4, then pick 64 as the pad/pack boundary. Set 6140dbc6cfdSNavdeep Parhar * it to the minimum allowed in all other cases. 615e3207e19SNavdeep Parhar */ 6160dbc6cfdSNavdeep Parhar pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 617e3207e19SNavdeep Parhar 618e3207e19SNavdeep Parhar /* 619e3207e19SNavdeep Parhar * For fl_pad = 0 we'll still write a reasonable value to the 620e3207e19SNavdeep Parhar * register but all the freelists will opt out of padding. 621e3207e19SNavdeep Parhar * We'll complain here only if the user tried to set it to a 622e3207e19SNavdeep Parhar * value greater than 0 that was invalid. 623e3207e19SNavdeep Parhar */ 624e3207e19SNavdeep Parhar if (fl_pad > 0) { 625e3207e19SNavdeep Parhar device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 626e3207e19SNavdeep Parhar " (%d), using %d instead.\n", fl_pad, pad); 627e3207e19SNavdeep Parhar } 628e3207e19SNavdeep Parhar } 629e3207e19SNavdeep Parhar m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 6300dbc6cfdSNavdeep Parhar v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 631e3207e19SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 632e3207e19SNavdeep Parhar 633e3207e19SNavdeep Parhar if (is_t4(sc)) { 634e3207e19SNavdeep Parhar if (fl_pack != -1 && fl_pack != pad) { 635e3207e19SNavdeep Parhar /* Complain but carry on. */ 636e3207e19SNavdeep Parhar device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 637e3207e19SNavdeep Parhar " using %d instead.\n", fl_pack, pad); 638e3207e19SNavdeep Parhar } 639e3207e19SNavdeep Parhar return; 640e3207e19SNavdeep Parhar } 641e3207e19SNavdeep Parhar 642e3207e19SNavdeep Parhar pack = fl_pack; 643e3207e19SNavdeep Parhar if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 644e3207e19SNavdeep Parhar !powerof2(fl_pack)) { 64544c6fea8SNavdeep Parhar if (sc->params.pci.mps > MAX_PACK_BOUNDARY) 64644c6fea8SNavdeep Parhar pack = MAX_PACK_BOUNDARY; 64744c6fea8SNavdeep Parhar else 648e3207e19SNavdeep Parhar pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 649e3207e19SNavdeep Parhar MPASS(powerof2(pack)); 650e3207e19SNavdeep Parhar if (pack < 16) 651e3207e19SNavdeep Parhar pack = 16; 652e3207e19SNavdeep Parhar if (pack == 32) 653e3207e19SNavdeep Parhar pack = 64; 654e3207e19SNavdeep Parhar if (pack > 4096) 655e3207e19SNavdeep Parhar pack = 4096; 656e3207e19SNavdeep Parhar if (fl_pack != -1) { 657e3207e19SNavdeep Parhar device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 658e3207e19SNavdeep Parhar " (%d), using %d instead.\n", fl_pack, pack); 659e3207e19SNavdeep Parhar } 660e3207e19SNavdeep Parhar } 661e3207e19SNavdeep Parhar m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 662e3207e19SNavdeep Parhar if (pack == 16) 663e3207e19SNavdeep Parhar v = V_INGPACKBOUNDARY(0); 664e3207e19SNavdeep Parhar else 665e3207e19SNavdeep Parhar v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 666e3207e19SNavdeep Parhar 667e3207e19SNavdeep Parhar MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 668e3207e19SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 669e3207e19SNavdeep Parhar } 670e3207e19SNavdeep Parhar 671cf738022SNavdeep Parhar /* 672cf738022SNavdeep Parhar * adap->params.vpd.cclk must be set up before this is called. 673cf738022SNavdeep Parhar */ 674d14b0ac1SNavdeep Parhar void 675d14b0ac1SNavdeep Parhar t4_tweak_chip_settings(struct adapter *sc) 676d14b0ac1SNavdeep Parhar { 67746e1e307SNavdeep Parhar int i, reg; 678d14b0ac1SNavdeep Parhar uint32_t v, m; 679d14b0ac1SNavdeep Parhar int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 680cf738022SNavdeep Parhar int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 681d14b0ac1SNavdeep Parhar int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 682d14b0ac1SNavdeep Parhar uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 68346e1e307SNavdeep Parhar static int sw_buf_sizes[] = { 6841458bff9SNavdeep Parhar MCLBYTES, 6851458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 6861458bff9SNavdeep Parhar MJUMPAGESIZE, 6871458bff9SNavdeep Parhar #endif 6881458bff9SNavdeep Parhar MJUM9BYTES, 68946e1e307SNavdeep Parhar MJUM16BYTES 6901458bff9SNavdeep Parhar }; 691d14b0ac1SNavdeep Parhar 692d14b0ac1SNavdeep Parhar KASSERT(sc->flags & MASTER_PF, 693d14b0ac1SNavdeep Parhar ("%s: trying to change chip settings when not master.", __func__)); 694d14b0ac1SNavdeep Parhar 6951458bff9SNavdeep Parhar m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 696d14b0ac1SNavdeep Parhar v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 6974defc81bSNavdeep Parhar V_EGRSTATUSPAGESIZE(spg_len == 128); 698d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 69954e4ee71SNavdeep Parhar 700e3207e19SNavdeep Parhar setup_pad_and_pack_boundaries(sc); 7011458bff9SNavdeep Parhar 702d14b0ac1SNavdeep Parhar v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 703733b9277SNavdeep Parhar V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 704733b9277SNavdeep Parhar V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 705733b9277SNavdeep Parhar V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 706733b9277SNavdeep Parhar V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 707733b9277SNavdeep Parhar V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 708733b9277SNavdeep Parhar V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 709733b9277SNavdeep Parhar V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 710d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 711733b9277SNavdeep Parhar 7129b11a65dSNavdeep Parhar t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 7139b11a65dSNavdeep Parhar t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 71446e1e307SNavdeep Parhar reg = A_SGE_FL_BUFFER_SIZE2; 71546e1e307SNavdeep Parhar for (i = 0; i < nitems(sw_buf_sizes); i++) { 71646e1e307SNavdeep Parhar MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 71746e1e307SNavdeep Parhar t4_write_reg(sc, reg, sw_buf_sizes[i]); 71846e1e307SNavdeep Parhar reg += 4; 71946e1e307SNavdeep Parhar MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 72046e1e307SNavdeep Parhar t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); 72146e1e307SNavdeep Parhar reg += 4; 72254e4ee71SNavdeep Parhar } 72354e4ee71SNavdeep Parhar 724d14b0ac1SNavdeep Parhar v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 725d14b0ac1SNavdeep Parhar V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 726d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 72754e4ee71SNavdeep Parhar 728cf738022SNavdeep Parhar KASSERT(intr_timer[0] <= timer_max, 729cf738022SNavdeep Parhar ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 730cf738022SNavdeep Parhar timer_max)); 731cf738022SNavdeep Parhar for (i = 1; i < nitems(intr_timer); i++) { 732cf738022SNavdeep Parhar KASSERT(intr_timer[i] >= intr_timer[i - 1], 733cf738022SNavdeep Parhar ("%s: timers not listed in increasing order (%d)", 734cf738022SNavdeep Parhar __func__, i)); 735cf738022SNavdeep Parhar 736cf738022SNavdeep Parhar while (intr_timer[i] > timer_max) { 737cf738022SNavdeep Parhar if (i == nitems(intr_timer) - 1) { 738cf738022SNavdeep Parhar intr_timer[i] = timer_max; 739cf738022SNavdeep Parhar break; 740cf738022SNavdeep Parhar } 741cf738022SNavdeep Parhar intr_timer[i] += intr_timer[i - 1]; 742cf738022SNavdeep Parhar intr_timer[i] /= 2; 743cf738022SNavdeep Parhar } 744cf738022SNavdeep Parhar } 745cf738022SNavdeep Parhar 746d14b0ac1SNavdeep Parhar v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 747d14b0ac1SNavdeep Parhar V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 748d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 749d14b0ac1SNavdeep Parhar v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 750d14b0ac1SNavdeep Parhar V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 751d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 752d14b0ac1SNavdeep Parhar v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 753d14b0ac1SNavdeep Parhar V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 754d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 75586e02bf2SNavdeep Parhar 756d491f8caSNavdeep Parhar if (chip_id(sc) >= CHELSIO_T6) { 757d491f8caSNavdeep Parhar m = V_TSCALE(M_TSCALE); 758d491f8caSNavdeep Parhar if (tscale == 1) 759d491f8caSNavdeep Parhar v = 0; 760d491f8caSNavdeep Parhar else 761d491f8caSNavdeep Parhar v = V_TSCALE(tscale - 2); 762d491f8caSNavdeep Parhar t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 7632f318252SNavdeep Parhar 7642f318252SNavdeep Parhar if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 7652f318252SNavdeep Parhar m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 7662f318252SNavdeep Parhar V_WRTHRTHRESH(M_WRTHRTHRESH); 7672f318252SNavdeep Parhar t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 7682f318252SNavdeep Parhar v &= ~m; 7692f318252SNavdeep Parhar v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 7702f318252SNavdeep Parhar V_WRTHRTHRESH(16); 7712f318252SNavdeep Parhar t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 7722f318252SNavdeep Parhar } 773d491f8caSNavdeep Parhar } 774d491f8caSNavdeep Parhar 7757cba15b1SNavdeep Parhar /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 776d14b0ac1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 777d14b0ac1SNavdeep Parhar t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 778d14b0ac1SNavdeep Parhar 7797cba15b1SNavdeep Parhar /* 7807cba15b1SNavdeep Parhar * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 7817cba15b1SNavdeep Parhar * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 7827cba15b1SNavdeep Parhar * may have to deal with is MAXPHYS + 1 page. 7837cba15b1SNavdeep Parhar */ 7847cba15b1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 7857cba15b1SNavdeep Parhar t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 7867cba15b1SNavdeep Parhar 7877cba15b1SNavdeep Parhar /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 7887cba15b1SNavdeep Parhar m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 789d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 790d14b0ac1SNavdeep Parhar 791d14b0ac1SNavdeep Parhar m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 792d14b0ac1SNavdeep Parhar F_RESETDDPOFFSET; 793d14b0ac1SNavdeep Parhar v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 794d14b0ac1SNavdeep Parhar t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 795d14b0ac1SNavdeep Parhar } 796d14b0ac1SNavdeep Parhar 797d14b0ac1SNavdeep Parhar /* 79846e1e307SNavdeep Parhar * SGE wants the buffer to be at least 64B and then a multiple of 16. Its 79946e1e307SNavdeep Parhar * address mut be 16B aligned. If padding is in use the buffer's start and end 80046e1e307SNavdeep Parhar * need to be aligned to the pad boundary as well. We'll just make sure that 80146e1e307SNavdeep Parhar * the size is a multiple of the pad boundary here, it is up to the buffer 80246e1e307SNavdeep Parhar * allocation code to make sure the start of the buffer is aligned. 80338035ed6SNavdeep Parhar */ 80438035ed6SNavdeep Parhar static inline int 805e3207e19SNavdeep Parhar hwsz_ok(struct adapter *sc, int hwsz) 80638035ed6SNavdeep Parhar { 80790e7434aSNavdeep Parhar int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 80838035ed6SNavdeep Parhar 809b741402cSNavdeep Parhar return (hwsz >= 64 && (hwsz & mask) == 0); 81038035ed6SNavdeep Parhar } 81138035ed6SNavdeep Parhar 81238035ed6SNavdeep Parhar /* 813d14b0ac1SNavdeep Parhar * XXX: driver really should be able to deal with unexpected settings. 814d14b0ac1SNavdeep Parhar */ 815d14b0ac1SNavdeep Parhar int 816d14b0ac1SNavdeep Parhar t4_read_chip_settings(struct adapter *sc) 817d14b0ac1SNavdeep Parhar { 818d14b0ac1SNavdeep Parhar struct sge *s = &sc->sge; 81990e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 8201458bff9SNavdeep Parhar int i, j, n, rc = 0; 821d14b0ac1SNavdeep Parhar uint32_t m, v, r; 822d14b0ac1SNavdeep Parhar uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 82338035ed6SNavdeep Parhar static int sw_buf_sizes[] = { /* Sorted by size */ 8241458bff9SNavdeep Parhar MCLBYTES, 8251458bff9SNavdeep Parhar #if MJUMPAGESIZE != MCLBYTES 8261458bff9SNavdeep Parhar MJUMPAGESIZE, 8271458bff9SNavdeep Parhar #endif 8281458bff9SNavdeep Parhar MJUM9BYTES, 8291458bff9SNavdeep Parhar MJUM16BYTES 8301458bff9SNavdeep Parhar }; 83146e1e307SNavdeep Parhar struct rx_buf_info *rxb; 832d14b0ac1SNavdeep Parhar 83390e7434aSNavdeep Parhar m = F_RXPKTCPLMODE; 83490e7434aSNavdeep Parhar v = F_RXPKTCPLMODE; 83559c1e950SJohn Baldwin r = sc->params.sge.sge_control; 836d14b0ac1SNavdeep Parhar if ((r & m) != v) { 837d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 838733b9277SNavdeep Parhar rc = EINVAL; 839733b9277SNavdeep Parhar } 840733b9277SNavdeep Parhar 84190e7434aSNavdeep Parhar /* 84290e7434aSNavdeep Parhar * If this changes then every single use of PAGE_SHIFT in the driver 84390e7434aSNavdeep Parhar * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 84490e7434aSNavdeep Parhar */ 84590e7434aSNavdeep Parhar if (sp->page_shift != PAGE_SHIFT) { 846d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 847733b9277SNavdeep Parhar rc = EINVAL; 848733b9277SNavdeep Parhar } 849733b9277SNavdeep Parhar 85046e1e307SNavdeep Parhar s->safe_zidx = -1; 85146e1e307SNavdeep Parhar rxb = &s->rx_buf_info[0]; 85246e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 85346e1e307SNavdeep Parhar rxb->size1 = sw_buf_sizes[i]; 85446e1e307SNavdeep Parhar rxb->zone = m_getzone(rxb->size1); 85546e1e307SNavdeep Parhar rxb->type = m_gettype(rxb->size1); 85646e1e307SNavdeep Parhar rxb->size2 = 0; 85746e1e307SNavdeep Parhar rxb->hwidx1 = -1; 85846e1e307SNavdeep Parhar rxb->hwidx2 = -1; 85946e1e307SNavdeep Parhar for (j = 0; j < SGE_FLBUF_SIZES; j++) { 86046e1e307SNavdeep Parhar int hwsize = sp->sge_fl_buffer_size[j]; 86138035ed6SNavdeep Parhar 86246e1e307SNavdeep Parhar if (!hwsz_ok(sc, hwsize)) 863e3207e19SNavdeep Parhar continue; 864e3207e19SNavdeep Parhar 86546e1e307SNavdeep Parhar /* hwidx for size1 */ 86646e1e307SNavdeep Parhar if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) 86746e1e307SNavdeep Parhar rxb->hwidx1 = j; 86838035ed6SNavdeep Parhar 86946e1e307SNavdeep Parhar /* hwidx for size2 (buffer packing) */ 87046e1e307SNavdeep Parhar if (rxb->size1 - CL_METADATA_SIZE < hwsize) 8711458bff9SNavdeep Parhar continue; 87246e1e307SNavdeep Parhar n = rxb->size1 - hwsize - CL_METADATA_SIZE; 8731458bff9SNavdeep Parhar if (n == 0) { 87446e1e307SNavdeep Parhar rxb->hwidx2 = j; 87546e1e307SNavdeep Parhar rxb->size2 = hwsize; 87646e1e307SNavdeep Parhar break; /* stop looking */ 877733b9277SNavdeep Parhar } 87846e1e307SNavdeep Parhar if (rxb->hwidx2 != -1) { 87946e1e307SNavdeep Parhar if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - 88046e1e307SNavdeep Parhar hwsize - CL_METADATA_SIZE) { 88146e1e307SNavdeep Parhar rxb->hwidx2 = j; 88246e1e307SNavdeep Parhar rxb->size2 = hwsize; 88346e1e307SNavdeep Parhar } 88446e1e307SNavdeep Parhar } else if (n <= 2 * CL_METADATA_SIZE) { 88546e1e307SNavdeep Parhar rxb->hwidx2 = j; 88646e1e307SNavdeep Parhar rxb->size2 = hwsize; 88738035ed6SNavdeep Parhar } 88838035ed6SNavdeep Parhar } 88946e1e307SNavdeep Parhar if (rxb->hwidx2 != -1) 89046e1e307SNavdeep Parhar sc->flags |= BUF_PACKING_OK; 89146e1e307SNavdeep Parhar if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) 89246e1e307SNavdeep Parhar s->safe_zidx = i; 893e3207e19SNavdeep Parhar } 894733b9277SNavdeep Parhar 8956af45170SJohn Baldwin if (sc->flags & IS_VF) 8966af45170SJohn Baldwin return (0); 8976af45170SJohn Baldwin 898d14b0ac1SNavdeep Parhar v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 899d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 900d14b0ac1SNavdeep Parhar if (r != v) { 901d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 902d14b0ac1SNavdeep Parhar rc = EINVAL; 903d14b0ac1SNavdeep Parhar } 904733b9277SNavdeep Parhar 905d14b0ac1SNavdeep Parhar m = v = F_TDDPTAGTCB; 906d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_ULP_RX_CTL); 907d14b0ac1SNavdeep Parhar if ((r & m) != v) { 908d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 909d14b0ac1SNavdeep Parhar rc = EINVAL; 910d14b0ac1SNavdeep Parhar } 911d14b0ac1SNavdeep Parhar 912d14b0ac1SNavdeep Parhar m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 913d14b0ac1SNavdeep Parhar F_RESETDDPOFFSET; 914d14b0ac1SNavdeep Parhar v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 915d14b0ac1SNavdeep Parhar r = t4_read_reg(sc, A_TP_PARA_REG5); 916d14b0ac1SNavdeep Parhar if ((r & m) != v) { 917d14b0ac1SNavdeep Parhar device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 918d14b0ac1SNavdeep Parhar rc = EINVAL; 919d14b0ac1SNavdeep Parhar } 920d14b0ac1SNavdeep Parhar 921c45b1868SNavdeep Parhar t4_init_tp_params(sc, 1); 922d14b0ac1SNavdeep Parhar 923d14b0ac1SNavdeep Parhar t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 924d14b0ac1SNavdeep Parhar t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 925d14b0ac1SNavdeep Parhar 926733b9277SNavdeep Parhar return (rc); 92754e4ee71SNavdeep Parhar } 92854e4ee71SNavdeep Parhar 92954e4ee71SNavdeep Parhar int 93054e4ee71SNavdeep Parhar t4_create_dma_tag(struct adapter *sc) 93154e4ee71SNavdeep Parhar { 93254e4ee71SNavdeep Parhar int rc; 93354e4ee71SNavdeep Parhar 93454e4ee71SNavdeep Parhar rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 93554e4ee71SNavdeep Parhar BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 93654e4ee71SNavdeep Parhar BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 93754e4ee71SNavdeep Parhar NULL, &sc->dmat); 93854e4ee71SNavdeep Parhar if (rc != 0) { 93954e4ee71SNavdeep Parhar device_printf(sc->dev, 94054e4ee71SNavdeep Parhar "failed to create main DMA tag: %d\n", rc); 94154e4ee71SNavdeep Parhar } 94254e4ee71SNavdeep Parhar 94354e4ee71SNavdeep Parhar return (rc); 94454e4ee71SNavdeep Parhar } 94554e4ee71SNavdeep Parhar 9466e22f9f3SNavdeep Parhar void 9476e22f9f3SNavdeep Parhar t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 9486e22f9f3SNavdeep Parhar struct sysctl_oid_list *children) 9496e22f9f3SNavdeep Parhar { 95090e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 9516e22f9f3SNavdeep Parhar 95238035ed6SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 9538741306bSNavdeep Parhar CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 9547029da5cSPawel Biernacki sysctl_bufsizes, "A", "freelist buffer sizes"); 95538035ed6SNavdeep Parhar 9566e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 95790e7434aSNavdeep Parhar NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 9586e22f9f3SNavdeep Parhar 9596e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 96090e7434aSNavdeep Parhar NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 9616e22f9f3SNavdeep Parhar 9626e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 96390e7434aSNavdeep Parhar NULL, sp->spg_len, "status page size (bytes)"); 9646e22f9f3SNavdeep Parhar 9656e22f9f3SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 9666e22f9f3SNavdeep Parhar NULL, cong_drop, "congestion drop setting"); 9671458bff9SNavdeep Parhar 9681458bff9SNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 96990e7434aSNavdeep Parhar NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 9706e22f9f3SNavdeep Parhar } 9716e22f9f3SNavdeep Parhar 97254e4ee71SNavdeep Parhar int 97354e4ee71SNavdeep Parhar t4_destroy_dma_tag(struct adapter *sc) 97454e4ee71SNavdeep Parhar { 97554e4ee71SNavdeep Parhar if (sc->dmat) 97654e4ee71SNavdeep Parhar bus_dma_tag_destroy(sc->dmat); 97754e4ee71SNavdeep Parhar 97854e4ee71SNavdeep Parhar return (0); 97954e4ee71SNavdeep Parhar } 98054e4ee71SNavdeep Parhar 98154e4ee71SNavdeep Parhar /* 98237310a98SNavdeep Parhar * Allocate and initialize the firmware event queue, control queues, and special 98337310a98SNavdeep Parhar * purpose rx queues owned by the adapter. 98454e4ee71SNavdeep Parhar * 98554e4ee71SNavdeep Parhar * Returns errno on failure. Resources allocated up to that point may still be 98654e4ee71SNavdeep Parhar * allocated. Caller is responsible for cleanup in case this function fails. 98754e4ee71SNavdeep Parhar */ 98854e4ee71SNavdeep Parhar int 989f7dfe243SNavdeep Parhar t4_setup_adapter_queues(struct adapter *sc) 99054e4ee71SNavdeep Parhar { 99137310a98SNavdeep Parhar struct sysctl_oid *oid; 99237310a98SNavdeep Parhar struct sysctl_oid_list *children; 99337310a98SNavdeep Parhar int rc, i; 99454e4ee71SNavdeep Parhar 99554e4ee71SNavdeep Parhar ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 99654e4ee71SNavdeep Parhar 997733b9277SNavdeep Parhar sysctl_ctx_init(&sc->ctx); 998733b9277SNavdeep Parhar sc->flags |= ADAP_SYSCTL_CTX; 99954e4ee71SNavdeep Parhar 100056599263SNavdeep Parhar /* 100156599263SNavdeep Parhar * Firmware event queue 100256599263SNavdeep Parhar */ 1003733b9277SNavdeep Parhar rc = alloc_fwq(sc); 1004aa95b653SNavdeep Parhar if (rc != 0) 1005f7dfe243SNavdeep Parhar return (rc); 1006f7dfe243SNavdeep Parhar 1007f7dfe243SNavdeep Parhar /* 100837310a98SNavdeep Parhar * That's all for the VF driver. 1009f7dfe243SNavdeep Parhar */ 101037310a98SNavdeep Parhar if (sc->flags & IS_VF) 101137310a98SNavdeep Parhar return (rc); 101237310a98SNavdeep Parhar 101337310a98SNavdeep Parhar oid = device_get_sysctl_tree(sc->dev); 101437310a98SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 101537310a98SNavdeep Parhar 101637310a98SNavdeep Parhar /* 101737310a98SNavdeep Parhar * XXX: General purpose rx queues, one per port. 101837310a98SNavdeep Parhar */ 101937310a98SNavdeep Parhar 102037310a98SNavdeep Parhar /* 102137310a98SNavdeep Parhar * Control queues, one per port. 102237310a98SNavdeep Parhar */ 102337310a98SNavdeep Parhar oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", 10247029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); 102537310a98SNavdeep Parhar for_each_port(sc, i) { 102637310a98SNavdeep Parhar struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; 102737310a98SNavdeep Parhar 102837310a98SNavdeep Parhar rc = alloc_ctrlq(sc, ctrlq, i, oid); 102937310a98SNavdeep Parhar if (rc != 0) 103037310a98SNavdeep Parhar return (rc); 103137310a98SNavdeep Parhar } 103254e4ee71SNavdeep Parhar 103354e4ee71SNavdeep Parhar return (rc); 103454e4ee71SNavdeep Parhar } 103554e4ee71SNavdeep Parhar 103654e4ee71SNavdeep Parhar /* 103754e4ee71SNavdeep Parhar * Idempotent 103854e4ee71SNavdeep Parhar */ 103954e4ee71SNavdeep Parhar int 1040f7dfe243SNavdeep Parhar t4_teardown_adapter_queues(struct adapter *sc) 104154e4ee71SNavdeep Parhar { 104237310a98SNavdeep Parhar int i; 104354e4ee71SNavdeep Parhar 104454e4ee71SNavdeep Parhar ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 104554e4ee71SNavdeep Parhar 1046733b9277SNavdeep Parhar /* Do this before freeing the queue */ 1047733b9277SNavdeep Parhar if (sc->flags & ADAP_SYSCTL_CTX) { 1048f7dfe243SNavdeep Parhar sysctl_ctx_free(&sc->ctx); 1049733b9277SNavdeep Parhar sc->flags &= ~ADAP_SYSCTL_CTX; 1050f7dfe243SNavdeep Parhar } 1051f7dfe243SNavdeep Parhar 1052b8bfcb71SNavdeep Parhar if (!(sc->flags & IS_VF)) { 105337310a98SNavdeep Parhar for_each_port(sc, i) 105437310a98SNavdeep Parhar free_wrq(sc, &sc->sge.ctrlq[i]); 1055b8bfcb71SNavdeep Parhar } 1056733b9277SNavdeep Parhar free_fwq(sc); 105754e4ee71SNavdeep Parhar 105854e4ee71SNavdeep Parhar return (0); 105954e4ee71SNavdeep Parhar } 106054e4ee71SNavdeep Parhar 10616a59b994SNavdeep Parhar /* Maximum payload that could arrive with a single iq descriptor. */ 10628340ece5SNavdeep Parhar static inline int 10636a59b994SNavdeep Parhar max_rx_payload(struct adapter *sc, struct ifnet *ifp, const bool ofld) 10648340ece5SNavdeep Parhar { 10656a59b994SNavdeep Parhar int maxp; 10668340ece5SNavdeep Parhar 106738035ed6SNavdeep Parhar /* large enough even when hw VLAN extraction is disabled */ 10686a59b994SNavdeep Parhar maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 10696a59b994SNavdeep Parhar ETHER_VLAN_ENCAP_LEN + ifp->if_mtu; 10706a59b994SNavdeep Parhar if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && 10716a59b994SNavdeep Parhar maxp < sc->params.tp.max_rx_pdu) 10726a59b994SNavdeep Parhar maxp = sc->params.tp.max_rx_pdu; 10736a59b994SNavdeep Parhar return (maxp); 107438035ed6SNavdeep Parhar } 10756eb3180fSNavdeep Parhar 1076733b9277SNavdeep Parhar int 1077fe2ebb76SJohn Baldwin t4_setup_vi_queues(struct vi_info *vi) 1078733b9277SNavdeep Parhar { 1079f549e352SNavdeep Parhar int rc = 0, i, intr_idx, iqidx; 1080733b9277SNavdeep Parhar struct sge_rxq *rxq; 1081733b9277SNavdeep Parhar struct sge_txq *txq; 108209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1083733b9277SNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 1084eff62dbaSNavdeep Parhar #endif 1085eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1086733b9277SNavdeep Parhar struct sge_wrq *ofld_txq; 1087298d969cSNavdeep Parhar #endif 1088298d969cSNavdeep Parhar #ifdef DEV_NETMAP 108962291463SNavdeep Parhar int saved_idx; 1090298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 1091298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 1092733b9277SNavdeep Parhar #endif 1093733b9277SNavdeep Parhar char name[16]; 1094fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 1095733b9277SNavdeep Parhar struct adapter *sc = pi->adapter; 1096fe2ebb76SJohn Baldwin struct ifnet *ifp = vi->ifp; 1097fe2ebb76SJohn Baldwin struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1098733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 10996a59b994SNavdeep Parhar int maxp; 1100733b9277SNavdeep Parhar 1101733b9277SNavdeep Parhar /* Interrupt vector to start from (when using multiple vectors) */ 1102f549e352SNavdeep Parhar intr_idx = vi->first_intr; 1103fe2ebb76SJohn Baldwin 1104fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP 110562291463SNavdeep Parhar saved_idx = intr_idx; 110662291463SNavdeep Parhar if (ifp->if_capabilities & IFCAP_NETMAP) { 110762291463SNavdeep Parhar 110862291463SNavdeep Parhar /* netmap is supported with direct interrupts only. */ 1109f549e352SNavdeep Parhar MPASS(!forwarding_intr_to_fwq(sc)); 111062291463SNavdeep Parhar 1111fe2ebb76SJohn Baldwin /* 1112fe2ebb76SJohn Baldwin * We don't have buffers to back the netmap rx queues 1113fe2ebb76SJohn Baldwin * right now so we create the queues in a way that 1114fe2ebb76SJohn Baldwin * doesn't set off any congestion signal in the chip. 1115fe2ebb76SJohn Baldwin */ 111662291463SNavdeep Parhar oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 11177029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1118fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 1119fe2ebb76SJohn Baldwin rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1120fe2ebb76SJohn Baldwin if (rc != 0) 1121fe2ebb76SJohn Baldwin goto done; 1122fe2ebb76SJohn Baldwin intr_idx++; 1123fe2ebb76SJohn Baldwin } 1124fe2ebb76SJohn Baldwin 112562291463SNavdeep Parhar oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 11267029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1127fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 1128f549e352SNavdeep Parhar iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1129f549e352SNavdeep Parhar rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1130fe2ebb76SJohn Baldwin if (rc != 0) 1131fe2ebb76SJohn Baldwin goto done; 1132fe2ebb76SJohn Baldwin } 1133fe2ebb76SJohn Baldwin } 113462291463SNavdeep Parhar 113562291463SNavdeep Parhar /* Normal rx queues and netmap rx queues share the same interrupts. */ 113662291463SNavdeep Parhar intr_idx = saved_idx; 1137fe2ebb76SJohn Baldwin #endif 1138733b9277SNavdeep Parhar 1139733b9277SNavdeep Parhar /* 1140f549e352SNavdeep Parhar * Allocate rx queues first because a default iqid is required when 1141f549e352SNavdeep Parhar * creating a tx queue. 1142733b9277SNavdeep Parhar */ 11436a59b994SNavdeep Parhar maxp = max_rx_payload(sc, ifp, false); 1144fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 11457029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues"); 1146fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 114754e4ee71SNavdeep Parhar 1148fe2ebb76SJohn Baldwin init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 114954e4ee71SNavdeep Parhar 115054e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%s rxq%d-fl", 1151fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1152fe2ebb76SJohn Baldwin init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 115354e4ee71SNavdeep Parhar 1154f549e352SNavdeep Parhar rc = alloc_rxq(vi, rxq, 1155f549e352SNavdeep Parhar forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 115654e4ee71SNavdeep Parhar if (rc != 0) 115754e4ee71SNavdeep Parhar goto done; 1158733b9277SNavdeep Parhar intr_idx++; 1159733b9277SNavdeep Parhar } 116062291463SNavdeep Parhar #ifdef DEV_NETMAP 116162291463SNavdeep Parhar if (ifp->if_capabilities & IFCAP_NETMAP) 116262291463SNavdeep Parhar intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 116362291463SNavdeep Parhar #endif 116409fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 11656a59b994SNavdeep Parhar maxp = max_rx_payload(sc, ifp, true); 1166fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 11677029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queues for offloaded TCP connections"); 1168fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 1169733b9277SNavdeep Parhar 117008cd1f11SNavdeep Parhar init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1171fe2ebb76SJohn Baldwin vi->qsize_rxq); 1172733b9277SNavdeep Parhar 1173733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1174fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1175fe2ebb76SJohn Baldwin init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1176733b9277SNavdeep Parhar 1177f549e352SNavdeep Parhar rc = alloc_ofld_rxq(vi, ofld_rxq, 1178f549e352SNavdeep Parhar forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1179733b9277SNavdeep Parhar if (rc != 0) 1180733b9277SNavdeep Parhar goto done; 1181733b9277SNavdeep Parhar intr_idx++; 1182733b9277SNavdeep Parhar } 1183733b9277SNavdeep Parhar #endif 1184733b9277SNavdeep Parhar 1185733b9277SNavdeep Parhar /* 1186f549e352SNavdeep Parhar * Now the tx queues. 1187733b9277SNavdeep Parhar */ 11887029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", 11897029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues"); 1190fe2ebb76SJohn Baldwin for_each_txq(vi, i, txq) { 1191f549e352SNavdeep Parhar iqidx = vi->first_rxq + (i % vi->nrxq); 119254e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%s txq%d", 1193fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1194f549e352SNavdeep Parhar init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1195f549e352SNavdeep Parhar sc->sge.rxq[iqidx].iq.cntxt_id, name); 119654e4ee71SNavdeep Parhar 1197fe2ebb76SJohn Baldwin rc = alloc_txq(vi, txq, i, oid); 119854e4ee71SNavdeep Parhar if (rc != 0) 119954e4ee71SNavdeep Parhar goto done; 120054e4ee71SNavdeep Parhar } 1201eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1202fe2ebb76SJohn Baldwin oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 12037029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues for TOE/ETHOFLD"); 1204fe2ebb76SJohn Baldwin for_each_ofld_txq(vi, i, ofld_txq) { 1205298d969cSNavdeep Parhar struct sysctl_oid *oid2; 1206733b9277SNavdeep Parhar 1207733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%s ofld_txq%d", 1208fe2ebb76SJohn Baldwin device_get_nameunit(vi->dev), i); 1209c3a88be4SNavdeep Parhar if (vi->nofldrxq > 0) { 1210eff62dbaSNavdeep Parhar iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1211c3a88be4SNavdeep Parhar init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1212c3a88be4SNavdeep Parhar pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, 1213c3a88be4SNavdeep Parhar name); 1214c3a88be4SNavdeep Parhar } else { 1215eff62dbaSNavdeep Parhar iqidx = vi->first_rxq + (i % vi->nrxq); 1216c3a88be4SNavdeep Parhar init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1217c3a88be4SNavdeep Parhar pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); 1218c3a88be4SNavdeep Parhar } 1219733b9277SNavdeep Parhar 1220733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%d", i); 1221fe2ebb76SJohn Baldwin oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 12227029da5cSPawel Biernacki name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue"); 1223733b9277SNavdeep Parhar 1224fe2ebb76SJohn Baldwin rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1225298d969cSNavdeep Parhar if (rc != 0) 1226298d969cSNavdeep Parhar goto done; 1227298d969cSNavdeep Parhar } 1228298d969cSNavdeep Parhar #endif 122954e4ee71SNavdeep Parhar done: 123054e4ee71SNavdeep Parhar if (rc) 1231fe2ebb76SJohn Baldwin t4_teardown_vi_queues(vi); 123254e4ee71SNavdeep Parhar 123354e4ee71SNavdeep Parhar return (rc); 123454e4ee71SNavdeep Parhar } 123554e4ee71SNavdeep Parhar 123654e4ee71SNavdeep Parhar /* 123754e4ee71SNavdeep Parhar * Idempotent 123854e4ee71SNavdeep Parhar */ 123954e4ee71SNavdeep Parhar int 1240fe2ebb76SJohn Baldwin t4_teardown_vi_queues(struct vi_info *vi) 124154e4ee71SNavdeep Parhar { 124254e4ee71SNavdeep Parhar int i; 124354e4ee71SNavdeep Parhar struct sge_rxq *rxq; 124454e4ee71SNavdeep Parhar struct sge_txq *txq; 124537310a98SNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 124637310a98SNavdeep Parhar struct port_info *pi = vi->pi; 124737310a98SNavdeep Parhar struct adapter *sc = pi->adapter; 124837310a98SNavdeep Parhar struct sge_wrq *ofld_txq; 124937310a98SNavdeep Parhar #endif 125009fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1251733b9277SNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 1252eff62dbaSNavdeep Parhar #endif 1253298d969cSNavdeep Parhar #ifdef DEV_NETMAP 1254298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 1255298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 1256298d969cSNavdeep Parhar #endif 125754e4ee71SNavdeep Parhar 125854e4ee71SNavdeep Parhar /* Do this before freeing the queues */ 1259fe2ebb76SJohn Baldwin if (vi->flags & VI_SYSCTL_CTX) { 1260fe2ebb76SJohn Baldwin sysctl_ctx_free(&vi->ctx); 1261fe2ebb76SJohn Baldwin vi->flags &= ~VI_SYSCTL_CTX; 126254e4ee71SNavdeep Parhar } 126354e4ee71SNavdeep Parhar 1264fe2ebb76SJohn Baldwin #ifdef DEV_NETMAP 126562291463SNavdeep Parhar if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1266fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 1267fe2ebb76SJohn Baldwin free_nm_txq(vi, nm_txq); 1268fe2ebb76SJohn Baldwin } 1269fe2ebb76SJohn Baldwin 1270fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 1271fe2ebb76SJohn Baldwin free_nm_rxq(vi, nm_rxq); 1272fe2ebb76SJohn Baldwin } 1273fe2ebb76SJohn Baldwin } 1274fe2ebb76SJohn Baldwin #endif 1275fe2ebb76SJohn Baldwin 1276733b9277SNavdeep Parhar /* 1277733b9277SNavdeep Parhar * Take down all the tx queues first, as they reference the rx queues 1278733b9277SNavdeep Parhar * (for egress updates, etc.). 1279733b9277SNavdeep Parhar */ 1280733b9277SNavdeep Parhar 1281fe2ebb76SJohn Baldwin for_each_txq(vi, i, txq) { 1282fe2ebb76SJohn Baldwin free_txq(vi, txq); 128354e4ee71SNavdeep Parhar } 1284eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1285fe2ebb76SJohn Baldwin for_each_ofld_txq(vi, i, ofld_txq) { 1286733b9277SNavdeep Parhar free_wrq(sc, ofld_txq); 1287733b9277SNavdeep Parhar } 1288733b9277SNavdeep Parhar #endif 1289733b9277SNavdeep Parhar 1290733b9277SNavdeep Parhar /* 1291f549e352SNavdeep Parhar * Then take down the rx queues. 1292733b9277SNavdeep Parhar */ 1293733b9277SNavdeep Parhar 1294fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 1295fe2ebb76SJohn Baldwin free_rxq(vi, rxq); 129654e4ee71SNavdeep Parhar } 129709fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 1298fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 1299fe2ebb76SJohn Baldwin free_ofld_rxq(vi, ofld_rxq); 1300733b9277SNavdeep Parhar } 1301733b9277SNavdeep Parhar #endif 1302733b9277SNavdeep Parhar 130354e4ee71SNavdeep Parhar return (0); 130454e4ee71SNavdeep Parhar } 130554e4ee71SNavdeep Parhar 1306733b9277SNavdeep Parhar /* 13073098bcfcSNavdeep Parhar * Interrupt handler when the driver is using only 1 interrupt. This is a very 13083098bcfcSNavdeep Parhar * unusual scenario. 13093098bcfcSNavdeep Parhar * 13103098bcfcSNavdeep Parhar * a) Deals with errors, if any. 13113098bcfcSNavdeep Parhar * b) Services firmware event queue, which is taking interrupts for all other 13123098bcfcSNavdeep Parhar * queues. 1313733b9277SNavdeep Parhar */ 131454e4ee71SNavdeep Parhar void 131554e4ee71SNavdeep Parhar t4_intr_all(void *arg) 131654e4ee71SNavdeep Parhar { 131754e4ee71SNavdeep Parhar struct adapter *sc = arg; 1318733b9277SNavdeep Parhar struct sge_iq *fwq = &sc->sge.fwq; 131954e4ee71SNavdeep Parhar 13203098bcfcSNavdeep Parhar MPASS(sc->intr_count == 1); 13213098bcfcSNavdeep Parhar 13221dca7005SNavdeep Parhar if (sc->intr_type == INTR_INTX) 13231dca7005SNavdeep Parhar t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 13241dca7005SNavdeep Parhar 132554e4ee71SNavdeep Parhar t4_intr_err(arg); 13263098bcfcSNavdeep Parhar t4_intr_evt(fwq); 132754e4ee71SNavdeep Parhar } 132854e4ee71SNavdeep Parhar 13293098bcfcSNavdeep Parhar /* 13303098bcfcSNavdeep Parhar * Interrupt handler for errors (installed directly when multiple interrupts are 13313098bcfcSNavdeep Parhar * being used, or called by t4_intr_all). 13323098bcfcSNavdeep Parhar */ 133354e4ee71SNavdeep Parhar void 133454e4ee71SNavdeep Parhar t4_intr_err(void *arg) 133554e4ee71SNavdeep Parhar { 133654e4ee71SNavdeep Parhar struct adapter *sc = arg; 1337dd3b96ecSNavdeep Parhar uint32_t v; 1338cb7c3f12SNavdeep Parhar const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 133954e4ee71SNavdeep Parhar 1340cb7c3f12SNavdeep Parhar if (sc->flags & ADAP_ERR) 1341cb7c3f12SNavdeep Parhar return; 1342cb7c3f12SNavdeep Parhar 1343dd3b96ecSNavdeep Parhar v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); 1344dd3b96ecSNavdeep Parhar if (v & F_PFSW) { 1345dd3b96ecSNavdeep Parhar sc->swintr++; 1346dd3b96ecSNavdeep Parhar t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); 1347dd3b96ecSNavdeep Parhar } 1348dd3b96ecSNavdeep Parhar 1349cb7c3f12SNavdeep Parhar t4_slow_intr_handler(sc, verbose); 135054e4ee71SNavdeep Parhar } 135154e4ee71SNavdeep Parhar 13523098bcfcSNavdeep Parhar /* 13533098bcfcSNavdeep Parhar * Interrupt handler for iq-only queues. The firmware event queue is the only 13543098bcfcSNavdeep Parhar * such queue right now. 13553098bcfcSNavdeep Parhar */ 135654e4ee71SNavdeep Parhar void 135754e4ee71SNavdeep Parhar t4_intr_evt(void *arg) 135854e4ee71SNavdeep Parhar { 135954e4ee71SNavdeep Parhar struct sge_iq *iq = arg; 13602be67d29SNavdeep Parhar 1361733b9277SNavdeep Parhar if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1362733b9277SNavdeep Parhar service_iq(iq, 0); 1363da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 13642be67d29SNavdeep Parhar } 13652be67d29SNavdeep Parhar } 13662be67d29SNavdeep Parhar 13673098bcfcSNavdeep Parhar /* 13683098bcfcSNavdeep Parhar * Interrupt handler for iq+fl queues. 13693098bcfcSNavdeep Parhar */ 1370733b9277SNavdeep Parhar void 1371733b9277SNavdeep Parhar t4_intr(void *arg) 13722be67d29SNavdeep Parhar { 13732be67d29SNavdeep Parhar struct sge_iq *iq = arg; 1374733b9277SNavdeep Parhar 1375733b9277SNavdeep Parhar if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 13763098bcfcSNavdeep Parhar service_iq_fl(iq, 0); 1377da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1378733b9277SNavdeep Parhar } 1379733b9277SNavdeep Parhar } 1380733b9277SNavdeep Parhar 13813098bcfcSNavdeep Parhar #ifdef DEV_NETMAP 13823098bcfcSNavdeep Parhar /* 13833098bcfcSNavdeep Parhar * Interrupt handler for netmap rx queues. 13843098bcfcSNavdeep Parhar */ 13853098bcfcSNavdeep Parhar void 13863098bcfcSNavdeep Parhar t4_nm_intr(void *arg) 13873098bcfcSNavdeep Parhar { 13883098bcfcSNavdeep Parhar struct sge_nm_rxq *nm_rxq = arg; 13893098bcfcSNavdeep Parhar 13903098bcfcSNavdeep Parhar if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 13913098bcfcSNavdeep Parhar service_nm_rxq(nm_rxq); 1392da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 13933098bcfcSNavdeep Parhar } 13943098bcfcSNavdeep Parhar } 13953098bcfcSNavdeep Parhar 13963098bcfcSNavdeep Parhar /* 13973098bcfcSNavdeep Parhar * Interrupt handler for vectors shared between NIC and netmap rx queues. 13983098bcfcSNavdeep Parhar */ 139962291463SNavdeep Parhar void 140062291463SNavdeep Parhar t4_vi_intr(void *arg) 140162291463SNavdeep Parhar { 140262291463SNavdeep Parhar struct irq *irq = arg; 140362291463SNavdeep Parhar 14043098bcfcSNavdeep Parhar MPASS(irq->nm_rxq != NULL); 140562291463SNavdeep Parhar t4_nm_intr(irq->nm_rxq); 14063098bcfcSNavdeep Parhar 14073098bcfcSNavdeep Parhar MPASS(irq->rxq != NULL); 140862291463SNavdeep Parhar t4_intr(irq->rxq); 140962291463SNavdeep Parhar } 14103098bcfcSNavdeep Parhar #endif 141146f48ee5SNavdeep Parhar 1412733b9277SNavdeep Parhar /* 14133098bcfcSNavdeep Parhar * Deals with interrupts on an iq-only (no freelist) queue. 1414733b9277SNavdeep Parhar */ 1415733b9277SNavdeep Parhar static int 1416733b9277SNavdeep Parhar service_iq(struct sge_iq *iq, int budget) 1417733b9277SNavdeep Parhar { 1418733b9277SNavdeep Parhar struct sge_iq *q; 141954e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 1420b2daa9a9SNavdeep Parhar struct iq_desc *d = &iq->desc[iq->cidx]; 14214d6db4e0SNavdeep Parhar int ndescs = 0, limit; 14223098bcfcSNavdeep Parhar int rsp_type; 1423733b9277SNavdeep Parhar uint32_t lq; 1424733b9277SNavdeep Parhar STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1425733b9277SNavdeep Parhar 1426733b9277SNavdeep Parhar KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 14273098bcfcSNavdeep Parhar KASSERT((iq->flags & IQ_HAS_FL) == 0, 14283098bcfcSNavdeep Parhar ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 14293098bcfcSNavdeep Parhar iq->flags)); 14303098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 14313098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1432733b9277SNavdeep Parhar 14334d6db4e0SNavdeep Parhar limit = budget ? budget : iq->qsize / 16; 14344d6db4e0SNavdeep Parhar 1435733b9277SNavdeep Parhar /* 1436733b9277SNavdeep Parhar * We always come back and check the descriptor ring for new indirect 1437733b9277SNavdeep Parhar * interrupts and other responses after running a single handler. 1438733b9277SNavdeep Parhar */ 1439733b9277SNavdeep Parhar for (;;) { 1440b2daa9a9SNavdeep Parhar while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 144154e4ee71SNavdeep Parhar 144254e4ee71SNavdeep Parhar rmb(); 144354e4ee71SNavdeep Parhar 1444b2daa9a9SNavdeep Parhar rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1445b2daa9a9SNavdeep Parhar lq = be32toh(d->rsp.pldbuflen_qid); 144654e4ee71SNavdeep Parhar 1447733b9277SNavdeep Parhar switch (rsp_type) { 1448733b9277SNavdeep Parhar case X_RSPD_TYPE_FLBUF: 14493098bcfcSNavdeep Parhar panic("%s: data for an iq (%p) with no freelist", 14503098bcfcSNavdeep Parhar __func__, iq); 145154e4ee71SNavdeep Parhar 14523098bcfcSNavdeep Parhar /* NOTREACHED */ 1453733b9277SNavdeep Parhar 1454733b9277SNavdeep Parhar case X_RSPD_TYPE_CPL: 1455b2daa9a9SNavdeep Parhar KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1456733b9277SNavdeep Parhar ("%s: bad opcode %02x.", __func__, 1457b2daa9a9SNavdeep Parhar d->rss.opcode)); 14583098bcfcSNavdeep Parhar t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1459733b9277SNavdeep Parhar break; 1460733b9277SNavdeep Parhar 1461733b9277SNavdeep Parhar case X_RSPD_TYPE_INTR: 146298005176SNavdeep Parhar /* 146398005176SNavdeep Parhar * There are 1K interrupt-capable queues (qids 0 146498005176SNavdeep Parhar * through 1023). A response type indicating a 146598005176SNavdeep Parhar * forwarded interrupt with a qid >= 1K is an 146698005176SNavdeep Parhar * iWARP async notification. 146798005176SNavdeep Parhar */ 14683098bcfcSNavdeep Parhar if (__predict_true(lq >= 1024)) { 1469671bf2b8SNavdeep Parhar t4_an_handler(iq, &d->rsp); 147098005176SNavdeep Parhar break; 147198005176SNavdeep Parhar } 147298005176SNavdeep Parhar 1473ec55567cSJohn Baldwin q = sc->sge.iqmap[lq - sc->sge.iq_start - 1474ec55567cSJohn Baldwin sc->sge.iq_base]; 1475733b9277SNavdeep Parhar if (atomic_cmpset_int(&q->state, IQS_IDLE, 1476733b9277SNavdeep Parhar IQS_BUSY)) { 14773098bcfcSNavdeep Parhar if (service_iq_fl(q, q->qsize / 16) == 0) { 1478da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&q->state, 1479733b9277SNavdeep Parhar IQS_BUSY, IQS_IDLE); 1480733b9277SNavdeep Parhar } else { 1481733b9277SNavdeep Parhar STAILQ_INSERT_TAIL(&iql, q, 1482733b9277SNavdeep Parhar link); 1483733b9277SNavdeep Parhar } 1484733b9277SNavdeep Parhar } 1485733b9277SNavdeep Parhar break; 1486733b9277SNavdeep Parhar 1487733b9277SNavdeep Parhar default: 148898005176SNavdeep Parhar KASSERT(0, 148998005176SNavdeep Parhar ("%s: illegal response type %d on iq %p", 149098005176SNavdeep Parhar __func__, rsp_type, iq)); 149198005176SNavdeep Parhar log(LOG_ERR, 149298005176SNavdeep Parhar "%s: illegal response type %d on iq %p", 149398005176SNavdeep Parhar device_get_nameunit(sc->dev), rsp_type, iq); 149409fe6320SNavdeep Parhar break; 149554e4ee71SNavdeep Parhar } 149656599263SNavdeep Parhar 1497b2daa9a9SNavdeep Parhar d++; 1498b2daa9a9SNavdeep Parhar if (__predict_false(++iq->cidx == iq->sidx)) { 1499b2daa9a9SNavdeep Parhar iq->cidx = 0; 1500b2daa9a9SNavdeep Parhar iq->gen ^= F_RSPD_GEN; 1501b2daa9a9SNavdeep Parhar d = &iq->desc[0]; 1502b2daa9a9SNavdeep Parhar } 1503b2daa9a9SNavdeep Parhar if (__predict_false(++ndescs == limit)) { 1504315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, 1505733b9277SNavdeep Parhar V_CIDXINC(ndescs) | 1506733b9277SNavdeep Parhar V_INGRESSQID(iq->cntxt_id) | 1507733b9277SNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1508733b9277SNavdeep Parhar ndescs = 0; 1509733b9277SNavdeep Parhar 15103098bcfcSNavdeep Parhar if (budget) { 15113098bcfcSNavdeep Parhar return (EINPROGRESS); 15123098bcfcSNavdeep Parhar } 15133098bcfcSNavdeep Parhar } 15143098bcfcSNavdeep Parhar } 15153098bcfcSNavdeep Parhar 15163098bcfcSNavdeep Parhar if (STAILQ_EMPTY(&iql)) 15173098bcfcSNavdeep Parhar break; 15183098bcfcSNavdeep Parhar 15193098bcfcSNavdeep Parhar /* 15203098bcfcSNavdeep Parhar * Process the head only, and send it to the back of the list if 15213098bcfcSNavdeep Parhar * it's still not done. 15223098bcfcSNavdeep Parhar */ 15233098bcfcSNavdeep Parhar q = STAILQ_FIRST(&iql); 15243098bcfcSNavdeep Parhar STAILQ_REMOVE_HEAD(&iql, link); 15253098bcfcSNavdeep Parhar if (service_iq_fl(q, q->qsize / 8) == 0) 1526da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 15273098bcfcSNavdeep Parhar else 15283098bcfcSNavdeep Parhar STAILQ_INSERT_TAIL(&iql, q, link); 15293098bcfcSNavdeep Parhar } 15303098bcfcSNavdeep Parhar 15313098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 15323098bcfcSNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 15333098bcfcSNavdeep Parhar 15343098bcfcSNavdeep Parhar return (0); 15353098bcfcSNavdeep Parhar } 15363098bcfcSNavdeep Parhar 15373098bcfcSNavdeep Parhar static inline int 15383098bcfcSNavdeep Parhar sort_before_lro(struct lro_ctrl *lro) 15393098bcfcSNavdeep Parhar { 15403098bcfcSNavdeep Parhar 15413098bcfcSNavdeep Parhar return (lro->lro_mbuf_max != 0); 15423098bcfcSNavdeep Parhar } 15433098bcfcSNavdeep Parhar 1544e7e08444SNavdeep Parhar static inline uint64_t 1545e7e08444SNavdeep Parhar last_flit_to_ns(struct adapter *sc, uint64_t lf) 1546e7e08444SNavdeep Parhar { 1547e7e08444SNavdeep Parhar uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */ 1548e7e08444SNavdeep Parhar 1549e7e08444SNavdeep Parhar if (n > UINT64_MAX / 1000000) 1550e7e08444SNavdeep Parhar return (n / sc->params.vpd.cclk * 1000000); 1551e7e08444SNavdeep Parhar else 1552e7e08444SNavdeep Parhar return (n * 1000000 / sc->params.vpd.cclk); 1553e7e08444SNavdeep Parhar } 1554e7e08444SNavdeep Parhar 155546e1e307SNavdeep Parhar static inline void 155646e1e307SNavdeep Parhar move_to_next_rxbuf(struct sge_fl *fl) 155746e1e307SNavdeep Parhar { 155846e1e307SNavdeep Parhar 155946e1e307SNavdeep Parhar fl->rx_offset = 0; 156046e1e307SNavdeep Parhar if (__predict_false((++fl->cidx & 7) == 0)) { 156146e1e307SNavdeep Parhar uint16_t cidx = fl->cidx >> 3; 156246e1e307SNavdeep Parhar 156346e1e307SNavdeep Parhar if (__predict_false(cidx == fl->sidx)) 156446e1e307SNavdeep Parhar fl->cidx = cidx = 0; 156546e1e307SNavdeep Parhar fl->hw_cidx = cidx; 156646e1e307SNavdeep Parhar } 156746e1e307SNavdeep Parhar } 156846e1e307SNavdeep Parhar 15693098bcfcSNavdeep Parhar /* 15703098bcfcSNavdeep Parhar * Deals with interrupts on an iq+fl queue. 15713098bcfcSNavdeep Parhar */ 15723098bcfcSNavdeep Parhar static int 15733098bcfcSNavdeep Parhar service_iq_fl(struct sge_iq *iq, int budget) 15743098bcfcSNavdeep Parhar { 15753098bcfcSNavdeep Parhar struct sge_rxq *rxq = iq_to_rxq(iq); 15763098bcfcSNavdeep Parhar struct sge_fl *fl; 15773098bcfcSNavdeep Parhar struct adapter *sc = iq->adapter; 15783098bcfcSNavdeep Parhar struct iq_desc *d = &iq->desc[iq->cidx]; 157946e1e307SNavdeep Parhar int ndescs, limit; 158046e1e307SNavdeep Parhar int rsp_type, starved; 15813098bcfcSNavdeep Parhar uint32_t lq; 15823098bcfcSNavdeep Parhar uint16_t fl_hw_cidx; 15833098bcfcSNavdeep Parhar struct mbuf *m0; 15843098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6) 15853098bcfcSNavdeep Parhar const struct timeval lro_timeout = {0, sc->lro_timeout}; 15863098bcfcSNavdeep Parhar struct lro_ctrl *lro = &rxq->lro; 15873098bcfcSNavdeep Parhar #endif 15883098bcfcSNavdeep Parhar 15893098bcfcSNavdeep Parhar KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 15903098bcfcSNavdeep Parhar MPASS(iq->flags & IQ_HAS_FL); 15913098bcfcSNavdeep Parhar 159246e1e307SNavdeep Parhar ndescs = 0; 15933098bcfcSNavdeep Parhar #if defined(INET) || defined(INET6) 15943098bcfcSNavdeep Parhar if (iq->flags & IQ_ADJ_CREDIT) { 15953098bcfcSNavdeep Parhar MPASS(sort_before_lro(lro)); 15963098bcfcSNavdeep Parhar iq->flags &= ~IQ_ADJ_CREDIT; 15973098bcfcSNavdeep Parhar if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 15983098bcfcSNavdeep Parhar tcp_lro_flush_all(lro); 15993098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 16003098bcfcSNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | 16013098bcfcSNavdeep Parhar V_SEINTARM(iq->intr_params)); 16023098bcfcSNavdeep Parhar return (0); 16033098bcfcSNavdeep Parhar } 16043098bcfcSNavdeep Parhar ndescs = 1; 16053098bcfcSNavdeep Parhar } 16063098bcfcSNavdeep Parhar #else 16073098bcfcSNavdeep Parhar MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 16083098bcfcSNavdeep Parhar #endif 16093098bcfcSNavdeep Parhar 161046e1e307SNavdeep Parhar limit = budget ? budget : iq->qsize / 16; 161146e1e307SNavdeep Parhar fl = &rxq->fl; 161246e1e307SNavdeep Parhar fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 16133098bcfcSNavdeep Parhar while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 16143098bcfcSNavdeep Parhar 16153098bcfcSNavdeep Parhar rmb(); 16163098bcfcSNavdeep Parhar 16173098bcfcSNavdeep Parhar m0 = NULL; 16183098bcfcSNavdeep Parhar rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 16193098bcfcSNavdeep Parhar lq = be32toh(d->rsp.pldbuflen_qid); 16203098bcfcSNavdeep Parhar 16213098bcfcSNavdeep Parhar switch (rsp_type) { 16223098bcfcSNavdeep Parhar case X_RSPD_TYPE_FLBUF: 162346e1e307SNavdeep Parhar if (lq & F_RSPD_NEWBUF) { 162446e1e307SNavdeep Parhar if (fl->rx_offset > 0) 162546e1e307SNavdeep Parhar move_to_next_rxbuf(fl); 162646e1e307SNavdeep Parhar lq = G_RSPD_LEN(lq); 162746e1e307SNavdeep Parhar } 162846e1e307SNavdeep Parhar if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { 162946e1e307SNavdeep Parhar FL_LOCK(fl); 163046e1e307SNavdeep Parhar refill_fl(sc, fl, 64); 163146e1e307SNavdeep Parhar FL_UNLOCK(fl); 163246e1e307SNavdeep Parhar fl_hw_cidx = fl->hw_cidx; 163346e1e307SNavdeep Parhar } 16343098bcfcSNavdeep Parhar 16351486d2deSNavdeep Parhar if (d->rss.opcode == CPL_RX_PKT) { 16361486d2deSNavdeep Parhar if (__predict_true(eth_rx(sc, rxq, d, lq) == 0)) 16371486d2deSNavdeep Parhar break; 16381486d2deSNavdeep Parhar goto out; 16391486d2deSNavdeep Parhar } 16403098bcfcSNavdeep Parhar m0 = get_fl_payload(sc, fl, lq); 16413098bcfcSNavdeep Parhar if (__predict_false(m0 == NULL)) 16423098bcfcSNavdeep Parhar goto out; 1643e7e08444SNavdeep Parhar 16443098bcfcSNavdeep Parhar /* fall through */ 16453098bcfcSNavdeep Parhar 16463098bcfcSNavdeep Parhar case X_RSPD_TYPE_CPL: 16473098bcfcSNavdeep Parhar KASSERT(d->rss.opcode < NUM_CPL_CMDS, 16483098bcfcSNavdeep Parhar ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 16493098bcfcSNavdeep Parhar t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 16503098bcfcSNavdeep Parhar break; 16513098bcfcSNavdeep Parhar 16523098bcfcSNavdeep Parhar case X_RSPD_TYPE_INTR: 16533098bcfcSNavdeep Parhar 16543098bcfcSNavdeep Parhar /* 16553098bcfcSNavdeep Parhar * There are 1K interrupt-capable queues (qids 0 16563098bcfcSNavdeep Parhar * through 1023). A response type indicating a 16573098bcfcSNavdeep Parhar * forwarded interrupt with a qid >= 1K is an 16583098bcfcSNavdeep Parhar * iWARP async notification. That is the only 16593098bcfcSNavdeep Parhar * acceptable indirect interrupt on this queue. 16603098bcfcSNavdeep Parhar */ 16613098bcfcSNavdeep Parhar if (__predict_false(lq < 1024)) { 16623098bcfcSNavdeep Parhar panic("%s: indirect interrupt on iq_fl %p " 16633098bcfcSNavdeep Parhar "with qid %u", __func__, iq, lq); 16643098bcfcSNavdeep Parhar } 16653098bcfcSNavdeep Parhar 16663098bcfcSNavdeep Parhar t4_an_handler(iq, &d->rsp); 16673098bcfcSNavdeep Parhar break; 16683098bcfcSNavdeep Parhar 16693098bcfcSNavdeep Parhar default: 16703098bcfcSNavdeep Parhar KASSERT(0, ("%s: illegal response type %d on iq %p", 16713098bcfcSNavdeep Parhar __func__, rsp_type, iq)); 16723098bcfcSNavdeep Parhar log(LOG_ERR, "%s: illegal response type %d on iq %p", 16733098bcfcSNavdeep Parhar device_get_nameunit(sc->dev), rsp_type, iq); 16743098bcfcSNavdeep Parhar break; 16753098bcfcSNavdeep Parhar } 16763098bcfcSNavdeep Parhar 16773098bcfcSNavdeep Parhar d++; 16783098bcfcSNavdeep Parhar if (__predict_false(++iq->cidx == iq->sidx)) { 16793098bcfcSNavdeep Parhar iq->cidx = 0; 16803098bcfcSNavdeep Parhar iq->gen ^= F_RSPD_GEN; 16813098bcfcSNavdeep Parhar d = &iq->desc[0]; 16823098bcfcSNavdeep Parhar } 16833098bcfcSNavdeep Parhar if (__predict_false(++ndescs == limit)) { 16843098bcfcSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 16853098bcfcSNavdeep Parhar V_INGRESSQID(iq->cntxt_id) | 16863098bcfcSNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 16873098bcfcSNavdeep Parhar 1688480e603cSNavdeep Parhar #if defined(INET) || defined(INET6) 1689480e603cSNavdeep Parhar if (iq->flags & IQ_LRO_ENABLED && 169046f48ee5SNavdeep Parhar !sort_before_lro(lro) && 1691480e603cSNavdeep Parhar sc->lro_timeout != 0) { 16923098bcfcSNavdeep Parhar tcp_lro_flush_inactive(lro, &lro_timeout); 1693480e603cSNavdeep Parhar } 1694480e603cSNavdeep Parhar #endif 169546e1e307SNavdeep Parhar if (budget) 1696733b9277SNavdeep Parhar return (EINPROGRESS); 169746e1e307SNavdeep Parhar ndescs = 0; 16984d6db4e0SNavdeep Parhar } 1699861e42b2SNavdeep Parhar } 17003098bcfcSNavdeep Parhar out: 1701a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 1702733b9277SNavdeep Parhar if (iq->flags & IQ_LRO_ENABLED) { 170346f48ee5SNavdeep Parhar if (ndescs > 0 && lro->lro_mbuf_count > 8) { 170446f48ee5SNavdeep Parhar MPASS(sort_before_lro(lro)); 170546f48ee5SNavdeep Parhar /* hold back one credit and don't flush LRO state */ 170646f48ee5SNavdeep Parhar iq->flags |= IQ_ADJ_CREDIT; 170746f48ee5SNavdeep Parhar ndescs--; 170846f48ee5SNavdeep Parhar } else { 17096dd38b87SSepherosa Ziehau tcp_lro_flush_all(lro); 1710733b9277SNavdeep Parhar } 171146f48ee5SNavdeep Parhar } 1712733b9277SNavdeep Parhar #endif 1713733b9277SNavdeep Parhar 1714315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1715733b9277SNavdeep Parhar V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1716733b9277SNavdeep Parhar 1717733b9277SNavdeep Parhar FL_LOCK(fl); 171838035ed6SNavdeep Parhar starved = refill_fl(sc, fl, 64); 1719733b9277SNavdeep Parhar FL_UNLOCK(fl); 1720733b9277SNavdeep Parhar if (__predict_false(starved != 0)) 1721733b9277SNavdeep Parhar add_fl_to_sfl(sc, fl); 1722733b9277SNavdeep Parhar 1723733b9277SNavdeep Parhar return (0); 1724733b9277SNavdeep Parhar } 1725733b9277SNavdeep Parhar 172638035ed6SNavdeep Parhar static inline struct cluster_metadata * 172746e1e307SNavdeep Parhar cl_metadata(struct fl_sdesc *sd) 17281458bff9SNavdeep Parhar { 17291458bff9SNavdeep Parhar 173046e1e307SNavdeep Parhar return ((void *)(sd->cl + sd->moff)); 17311458bff9SNavdeep Parhar } 17321458bff9SNavdeep Parhar 173315c28f87SGleb Smirnoff static void 1734e8fd18f3SGleb Smirnoff rxb_free(struct mbuf *m) 17351458bff9SNavdeep Parhar { 1736d6f79b27SNavdeep Parhar struct cluster_metadata *clm = m->m_ext.ext_arg1; 17371458bff9SNavdeep Parhar 1738d6f79b27SNavdeep Parhar uma_zfree(clm->zone, clm->cl); 173982eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 17401458bff9SNavdeep Parhar } 17411458bff9SNavdeep Parhar 174238035ed6SNavdeep Parhar /* 174346e1e307SNavdeep Parhar * The mbuf returned comes from zone_muf and carries the payload in one of these 174446e1e307SNavdeep Parhar * ways 174546e1e307SNavdeep Parhar * a) complete frame inside the mbuf 174646e1e307SNavdeep Parhar * b) m_cljset (for clusters without metadata) 174746e1e307SNavdeep Parhar * d) m_extaddref (cluster with metadata) 174838035ed6SNavdeep Parhar */ 17491458bff9SNavdeep Parhar static struct mbuf * 1750b741402cSNavdeep Parhar get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1751b741402cSNavdeep Parhar int remaining) 175238035ed6SNavdeep Parhar { 175338035ed6SNavdeep Parhar struct mbuf *m; 175438035ed6SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 175546e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 175646e1e307SNavdeep Parhar struct cluster_metadata *clm; 1757b741402cSNavdeep Parhar int len, blen; 175838035ed6SNavdeep Parhar caddr_t payload; 175938035ed6SNavdeep Parhar 1760e3207e19SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 176146e1e307SNavdeep Parhar u_int l, pad; 1762b741402cSNavdeep Parhar 176346e1e307SNavdeep Parhar blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 176446e1e307SNavdeep Parhar len = min(remaining, blen); 176546e1e307SNavdeep Parhar payload = sd->cl + fl->rx_offset; 176646e1e307SNavdeep Parhar 176746e1e307SNavdeep Parhar l = fr_offset + len; 176846e1e307SNavdeep Parhar pad = roundup2(l, fl->buf_boundary) - l; 176946e1e307SNavdeep Parhar if (fl->rx_offset + len + pad < rxb->size2) 1770b741402cSNavdeep Parhar blen = len + pad; 177146e1e307SNavdeep Parhar MPASS(fl->rx_offset + blen <= rxb->size2); 1772e3207e19SNavdeep Parhar } else { 1773e3207e19SNavdeep Parhar MPASS(fl->rx_offset == 0); /* not packing */ 177446e1e307SNavdeep Parhar blen = rxb->size1; 177546e1e307SNavdeep Parhar len = min(remaining, blen); 177646e1e307SNavdeep Parhar payload = sd->cl; 1777e3207e19SNavdeep Parhar } 177838035ed6SNavdeep Parhar 177946e1e307SNavdeep Parhar if (fr_offset == 0) { 178046e1e307SNavdeep Parhar m = m_gethdr(M_NOWAIT, MT_DATA); 178146e1e307SNavdeep Parhar if (__predict_false(m == NULL)) 178246e1e307SNavdeep Parhar return (NULL); 178346e1e307SNavdeep Parhar m->m_pkthdr.len = remaining; 178446e1e307SNavdeep Parhar } else { 178546e1e307SNavdeep Parhar m = m_get(M_NOWAIT, MT_DATA); 178646e1e307SNavdeep Parhar if (__predict_false(m == NULL)) 178746e1e307SNavdeep Parhar return (NULL); 178846e1e307SNavdeep Parhar } 178946e1e307SNavdeep Parhar m->m_len = len; 1790b741402cSNavdeep Parhar 179138035ed6SNavdeep Parhar if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 179238035ed6SNavdeep Parhar /* copy data to mbuf */ 179338035ed6SNavdeep Parhar bcopy(payload, mtod(m, caddr_t), len); 179446e1e307SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 179546e1e307SNavdeep Parhar fl->rx_offset += blen; 179646e1e307SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 179746e1e307SNavdeep Parhar if (fl->rx_offset < rxb->size2) 179846e1e307SNavdeep Parhar return (m); /* without advancing the cidx */ 179946e1e307SNavdeep Parhar } 180046e1e307SNavdeep Parhar } else if (fl->flags & FL_BUF_PACKING) { 180146e1e307SNavdeep Parhar clm = cl_metadata(sd); 1802a9c4062aSNavdeep Parhar if (sd->nmbuf++ == 0) { 1803a9c4062aSNavdeep Parhar clm->refcount = 1; 180446e1e307SNavdeep Parhar clm->zone = rxb->zone; 1805d6f79b27SNavdeep Parhar clm->cl = sd->cl; 1806a9c4062aSNavdeep Parhar counter_u64_add(extfree_refs, 1); 1807a9c4062aSNavdeep Parhar } 1808d6f79b27SNavdeep Parhar m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, 1809d6f79b27SNavdeep Parhar NULL); 181038035ed6SNavdeep Parhar 181146e1e307SNavdeep Parhar fl->rx_offset += blen; 181246e1e307SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 181346e1e307SNavdeep Parhar if (fl->rx_offset < rxb->size2) 181446e1e307SNavdeep Parhar return (m); /* without advancing the cidx */ 1815ccc69b2fSNavdeep Parhar } else { 181646e1e307SNavdeep Parhar m_cljset(m, sd->cl, rxb->type); 181738035ed6SNavdeep Parhar sd->cl = NULL; /* consumed, not a recycle candidate */ 181838035ed6SNavdeep Parhar } 181938035ed6SNavdeep Parhar 182046e1e307SNavdeep Parhar move_to_next_rxbuf(fl); 182138035ed6SNavdeep Parhar 182238035ed6SNavdeep Parhar return (m); 182338035ed6SNavdeep Parhar } 182438035ed6SNavdeep Parhar 182538035ed6SNavdeep Parhar static struct mbuf * 182646e1e307SNavdeep Parhar get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen) 18271458bff9SNavdeep Parhar { 182838035ed6SNavdeep Parhar struct mbuf *m0, *m, **pnext; 1829b741402cSNavdeep Parhar u_int remaining; 18301458bff9SNavdeep Parhar 18314d6db4e0SNavdeep Parhar if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1832368541baSNavdeep Parhar M_ASSERTPKTHDR(fl->m0); 183346e1e307SNavdeep Parhar MPASS(fl->m0->m_pkthdr.len == plen); 183446e1e307SNavdeep Parhar MPASS(fl->remaining < plen); 18351458bff9SNavdeep Parhar 183638035ed6SNavdeep Parhar m0 = fl->m0; 183738035ed6SNavdeep Parhar pnext = fl->pnext; 1838b741402cSNavdeep Parhar remaining = fl->remaining; 18394d6db4e0SNavdeep Parhar fl->flags &= ~FL_BUF_RESUME; 184038035ed6SNavdeep Parhar goto get_segment; 18411458bff9SNavdeep Parhar } 18421458bff9SNavdeep Parhar 18431458bff9SNavdeep Parhar /* 184438035ed6SNavdeep Parhar * Payload starts at rx_offset in the current hw buffer. Its length is 184538035ed6SNavdeep Parhar * 'len' and it may span multiple hw buffers. 18461458bff9SNavdeep Parhar */ 18471458bff9SNavdeep Parhar 184846e1e307SNavdeep Parhar m0 = get_scatter_segment(sc, fl, 0, plen); 1849368541baSNavdeep Parhar if (m0 == NULL) 18504d6db4e0SNavdeep Parhar return (NULL); 185146e1e307SNavdeep Parhar remaining = plen - m0->m_len; 185238035ed6SNavdeep Parhar pnext = &m0->m_next; 1853b741402cSNavdeep Parhar while (remaining > 0) { 185438035ed6SNavdeep Parhar get_segment: 185538035ed6SNavdeep Parhar MPASS(fl->rx_offset == 0); 185646e1e307SNavdeep Parhar m = get_scatter_segment(sc, fl, plen - remaining, remaining); 18574d6db4e0SNavdeep Parhar if (__predict_false(m == NULL)) { 185838035ed6SNavdeep Parhar fl->m0 = m0; 185938035ed6SNavdeep Parhar fl->pnext = pnext; 1860b741402cSNavdeep Parhar fl->remaining = remaining; 18614d6db4e0SNavdeep Parhar fl->flags |= FL_BUF_RESUME; 18624d6db4e0SNavdeep Parhar return (NULL); 18631458bff9SNavdeep Parhar } 186438035ed6SNavdeep Parhar *pnext = m; 186538035ed6SNavdeep Parhar pnext = &m->m_next; 1866b741402cSNavdeep Parhar remaining -= m->m_len; 1867733b9277SNavdeep Parhar } 186838035ed6SNavdeep Parhar *pnext = NULL; 18694d6db4e0SNavdeep Parhar 1870dbbf46c4SNavdeep Parhar M_ASSERTPKTHDR(m0); 1871733b9277SNavdeep Parhar return (m0); 1872733b9277SNavdeep Parhar } 1873733b9277SNavdeep Parhar 1874733b9277SNavdeep Parhar static int 187587bbb333SNavdeep Parhar skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 187687bbb333SNavdeep Parhar int remaining) 187787bbb333SNavdeep Parhar { 187887bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 187987bbb333SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 188087bbb333SNavdeep Parhar int len, blen; 188187bbb333SNavdeep Parhar 188287bbb333SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 188387bbb333SNavdeep Parhar u_int l, pad; 188487bbb333SNavdeep Parhar 188587bbb333SNavdeep Parhar blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 188687bbb333SNavdeep Parhar len = min(remaining, blen); 188787bbb333SNavdeep Parhar 188887bbb333SNavdeep Parhar l = fr_offset + len; 188987bbb333SNavdeep Parhar pad = roundup2(l, fl->buf_boundary) - l; 189087bbb333SNavdeep Parhar if (fl->rx_offset + len + pad < rxb->size2) 189187bbb333SNavdeep Parhar blen = len + pad; 189287bbb333SNavdeep Parhar fl->rx_offset += blen; 189387bbb333SNavdeep Parhar MPASS(fl->rx_offset <= rxb->size2); 189487bbb333SNavdeep Parhar if (fl->rx_offset < rxb->size2) 189587bbb333SNavdeep Parhar return (len); /* without advancing the cidx */ 189687bbb333SNavdeep Parhar } else { 189787bbb333SNavdeep Parhar MPASS(fl->rx_offset == 0); /* not packing */ 189887bbb333SNavdeep Parhar blen = rxb->size1; 189987bbb333SNavdeep Parhar len = min(remaining, blen); 190087bbb333SNavdeep Parhar } 190187bbb333SNavdeep Parhar move_to_next_rxbuf(fl); 190287bbb333SNavdeep Parhar return (len); 190387bbb333SNavdeep Parhar } 190487bbb333SNavdeep Parhar 190587bbb333SNavdeep Parhar static inline void 190687bbb333SNavdeep Parhar skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen) 190787bbb333SNavdeep Parhar { 190887bbb333SNavdeep Parhar int remaining, fr_offset, len; 190987bbb333SNavdeep Parhar 191087bbb333SNavdeep Parhar fr_offset = 0; 191187bbb333SNavdeep Parhar remaining = plen; 191287bbb333SNavdeep Parhar while (remaining > 0) { 191387bbb333SNavdeep Parhar len = skip_scatter_segment(sc, fl, fr_offset, remaining); 191487bbb333SNavdeep Parhar fr_offset += len; 191587bbb333SNavdeep Parhar remaining -= len; 191687bbb333SNavdeep Parhar } 191787bbb333SNavdeep Parhar } 191887bbb333SNavdeep Parhar 191987bbb333SNavdeep Parhar static inline int 192087bbb333SNavdeep Parhar get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen) 192187bbb333SNavdeep Parhar { 192287bbb333SNavdeep Parhar int len; 192387bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 192487bbb333SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 192587bbb333SNavdeep Parhar 192687bbb333SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) 192787bbb333SNavdeep Parhar len = rxb->size2 - fl->rx_offset; 192887bbb333SNavdeep Parhar else 192987bbb333SNavdeep Parhar len = rxb->size1; 193087bbb333SNavdeep Parhar 193187bbb333SNavdeep Parhar return (min(plen, len)); 193287bbb333SNavdeep Parhar } 193387bbb333SNavdeep Parhar 193487bbb333SNavdeep Parhar static int 19351486d2deSNavdeep Parhar eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, 19361486d2deSNavdeep Parhar u_int plen) 1937733b9277SNavdeep Parhar { 19381486d2deSNavdeep Parhar struct mbuf *m0; 1939733b9277SNavdeep Parhar struct ifnet *ifp = rxq->ifp; 19401486d2deSNavdeep Parhar struct sge_fl *fl = &rxq->fl; 194187bbb333SNavdeep Parhar struct vi_info *vi = ifp->if_softc; 19421486d2deSNavdeep Parhar const struct cpl_rx_pkt *cpl; 1943a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 1944733b9277SNavdeep Parhar struct lro_ctrl *lro = &rxq->lro; 1945733b9277SNavdeep Parhar #endif 1946a4a4ad2dSNavdeep Parhar uint16_t err_vec, tnl_type, tnlhdr_len; 194770ca6229SNavdeep Parhar static const int sw_hashtype[4][2] = { 194870ca6229SNavdeep Parhar {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 194970ca6229SNavdeep Parhar {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 195070ca6229SNavdeep Parhar {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 195170ca6229SNavdeep Parhar {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 195270ca6229SNavdeep Parhar }; 1953a4a4ad2dSNavdeep Parhar static const int sw_csum_flags[2][2] = { 1954a4a4ad2dSNavdeep Parhar { 1955a4a4ad2dSNavdeep Parhar /* IP, inner IP */ 1956a4a4ad2dSNavdeep Parhar CSUM_ENCAP_VXLAN | 1957a4a4ad2dSNavdeep Parhar CSUM_L3_CALC | CSUM_L3_VALID | 1958a4a4ad2dSNavdeep Parhar CSUM_L4_CALC | CSUM_L4_VALID | 1959a4a4ad2dSNavdeep Parhar CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1960a4a4ad2dSNavdeep Parhar CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1961a4a4ad2dSNavdeep Parhar 1962a4a4ad2dSNavdeep Parhar /* IP, inner IP6 */ 1963a4a4ad2dSNavdeep Parhar CSUM_ENCAP_VXLAN | 1964a4a4ad2dSNavdeep Parhar CSUM_L3_CALC | CSUM_L3_VALID | 1965a4a4ad2dSNavdeep Parhar CSUM_L4_CALC | CSUM_L4_VALID | 1966a4a4ad2dSNavdeep Parhar CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1967a4a4ad2dSNavdeep Parhar }, 1968a4a4ad2dSNavdeep Parhar { 1969a4a4ad2dSNavdeep Parhar /* IP6, inner IP */ 1970a4a4ad2dSNavdeep Parhar CSUM_ENCAP_VXLAN | 1971a4a4ad2dSNavdeep Parhar CSUM_L4_CALC | CSUM_L4_VALID | 1972a4a4ad2dSNavdeep Parhar CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1973a4a4ad2dSNavdeep Parhar CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1974a4a4ad2dSNavdeep Parhar 1975a4a4ad2dSNavdeep Parhar /* IP6, inner IP6 */ 1976a4a4ad2dSNavdeep Parhar CSUM_ENCAP_VXLAN | 1977a4a4ad2dSNavdeep Parhar CSUM_L4_CALC | CSUM_L4_VALID | 1978a4a4ad2dSNavdeep Parhar CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1979a4a4ad2dSNavdeep Parhar }, 1980a4a4ad2dSNavdeep Parhar }; 1981733b9277SNavdeep Parhar 19821486d2deSNavdeep Parhar MPASS(plen > sc->params.sge.fl_pktshift); 198387bbb333SNavdeep Parhar if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) && 198487bbb333SNavdeep Parhar __predict_true((fl->flags & FL_BUF_RESUME) == 0)) { 198587bbb333SNavdeep Parhar struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 198687bbb333SNavdeep Parhar caddr_t frame; 198787bbb333SNavdeep Parhar int rc, slen; 198887bbb333SNavdeep Parhar 198987bbb333SNavdeep Parhar slen = get_segment_len(sc, fl, plen) - 199087bbb333SNavdeep Parhar sc->params.sge.fl_pktshift; 199187bbb333SNavdeep Parhar frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift; 199287bbb333SNavdeep Parhar CURVNET_SET_QUIET(ifp->if_vnet); 199387bbb333SNavdeep Parhar rc = pfil_run_hooks(vi->pfil, frame, ifp, 199487bbb333SNavdeep Parhar slen | PFIL_MEMPTR | PFIL_IN, NULL); 199587bbb333SNavdeep Parhar CURVNET_RESTORE(); 199687bbb333SNavdeep Parhar if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) { 199787bbb333SNavdeep Parhar skip_fl_payload(sc, fl, plen); 199887bbb333SNavdeep Parhar return (0); 199987bbb333SNavdeep Parhar } 200087bbb333SNavdeep Parhar if (rc == PFIL_REALLOCED) { 200187bbb333SNavdeep Parhar skip_fl_payload(sc, fl, plen); 200287bbb333SNavdeep Parhar m0 = pfil_mem2mbuf(frame); 200387bbb333SNavdeep Parhar goto have_mbuf; 200487bbb333SNavdeep Parhar } 200587bbb333SNavdeep Parhar } 200687bbb333SNavdeep Parhar 20071486d2deSNavdeep Parhar m0 = get_fl_payload(sc, fl, plen); 20081486d2deSNavdeep Parhar if (__predict_false(m0 == NULL)) 20091486d2deSNavdeep Parhar return (ENOMEM); 2010733b9277SNavdeep Parhar 201190e7434aSNavdeep Parhar m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 201290e7434aSNavdeep Parhar m0->m_len -= sc->params.sge.fl_pktshift; 201390e7434aSNavdeep Parhar m0->m_data += sc->params.sge.fl_pktshift; 201454e4ee71SNavdeep Parhar 201587bbb333SNavdeep Parhar have_mbuf: 201654e4ee71SNavdeep Parhar m0->m_pkthdr.rcvif = ifp; 20171486d2deSNavdeep Parhar M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); 20181486d2deSNavdeep Parhar m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); 201954e4ee71SNavdeep Parhar 20201486d2deSNavdeep Parhar cpl = (const void *)(&d->rss + 1); 2021a4a4ad2dSNavdeep Parhar if (sc->params.tp.rx_pkt_encap) { 2022a4a4ad2dSNavdeep Parhar const uint16_t ev = be16toh(cpl->err_vec); 20239600bf00SNavdeep Parhar 2024a4a4ad2dSNavdeep Parhar err_vec = G_T6_COMPR_RXERR_VEC(ev); 2025a4a4ad2dSNavdeep Parhar tnl_type = G_T6_RX_TNL_TYPE(ev); 2026a4a4ad2dSNavdeep Parhar tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev); 2027a4a4ad2dSNavdeep Parhar } else { 2028a4a4ad2dSNavdeep Parhar err_vec = be16toh(cpl->err_vec); 2029a4a4ad2dSNavdeep Parhar tnl_type = 0; 2030a4a4ad2dSNavdeep Parhar tnlhdr_len = 0; 2031a4a4ad2dSNavdeep Parhar } 2032a4a4ad2dSNavdeep Parhar if (cpl->csum_calc && err_vec == 0) { 2033a4a4ad2dSNavdeep Parhar int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); 2034a4a4ad2dSNavdeep Parhar 2035a4a4ad2dSNavdeep Parhar /* checksum(s) calculated and found to be correct. */ 2036a4a4ad2dSNavdeep Parhar 2037a4a4ad2dSNavdeep Parhar MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ 2038a4a4ad2dSNavdeep Parhar (cpl->l2info & htobe32(F_RXF_IP6))); 203954e4ee71SNavdeep Parhar m0->m_pkthdr.csum_data = be16toh(cpl->csum); 2040a4a4ad2dSNavdeep Parhar if (tnl_type == 0) { 2041a4a4ad2dSNavdeep Parhar if (!ipv6 && ifp->if_capenable & IFCAP_RXCSUM) { 2042a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 2043a4a4ad2dSNavdeep Parhar CSUM_L3_VALID | CSUM_L4_CALC | 2044a4a4ad2dSNavdeep Parhar CSUM_L4_VALID; 2045a4a4ad2dSNavdeep Parhar } else if (ipv6 && ifp->if_capenable & IFCAP_RXCSUM_IPV6) { 2046a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 2047a4a4ad2dSNavdeep Parhar CSUM_L4_VALID; 2048a4a4ad2dSNavdeep Parhar } 2049a4a4ad2dSNavdeep Parhar rxq->rxcsum++; 2050a4a4ad2dSNavdeep Parhar } else { 2051a4a4ad2dSNavdeep Parhar MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN); 2052a4a4ad2dSNavdeep Parhar if (__predict_false(cpl->ip_frag)) { 2053a4a4ad2dSNavdeep Parhar /* 2054a4a4ad2dSNavdeep Parhar * csum_data is for the inner frame (which is an 2055a4a4ad2dSNavdeep Parhar * IP fragment) and is not 0xffff. There is no 2056a4a4ad2dSNavdeep Parhar * way to pass the inner csum_data to the stack. 2057a4a4ad2dSNavdeep Parhar * We don't want the stack to use the inner 2058a4a4ad2dSNavdeep Parhar * csum_data to validate the outer frame or it 2059a4a4ad2dSNavdeep Parhar * will get rejected. So we fix csum_data here 2060a4a4ad2dSNavdeep Parhar * and let sw do the checksum of inner IP 2061a4a4ad2dSNavdeep Parhar * fragments. 2062a4a4ad2dSNavdeep Parhar * 2063a4a4ad2dSNavdeep Parhar * XXX: Need 32b for csum_data2 in an rx mbuf. 2064a4a4ad2dSNavdeep Parhar * Maybe stuff it into rcv_tstmp? 2065a4a4ad2dSNavdeep Parhar */ 206654e4ee71SNavdeep Parhar m0->m_pkthdr.csum_data = 0xffff; 2067a4a4ad2dSNavdeep Parhar if (ipv6) { 2068a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 2069a4a4ad2dSNavdeep Parhar CSUM_L4_VALID; 2070a4a4ad2dSNavdeep Parhar } else { 2071a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 2072a4a4ad2dSNavdeep Parhar CSUM_L3_VALID | CSUM_L4_CALC | 2073a4a4ad2dSNavdeep Parhar CSUM_L4_VALID; 2074a4a4ad2dSNavdeep Parhar } 2075a4a4ad2dSNavdeep Parhar } else { 2076a4a4ad2dSNavdeep Parhar int outer_ipv6; 2077a4a4ad2dSNavdeep Parhar 2078a4a4ad2dSNavdeep Parhar MPASS(m0->m_pkthdr.csum_data == 0xffff); 2079a4a4ad2dSNavdeep Parhar 2080a4a4ad2dSNavdeep Parhar outer_ipv6 = tnlhdr_len >= 2081a4a4ad2dSNavdeep Parhar sizeof(struct ether_header) + 2082a4a4ad2dSNavdeep Parhar sizeof(struct ip6_hdr); 2083a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags = 2084a4a4ad2dSNavdeep Parhar sw_csum_flags[outer_ipv6][ipv6]; 2085a4a4ad2dSNavdeep Parhar } 2086a4a4ad2dSNavdeep Parhar rxq->vxlan_rxcsum++; 2087a4a4ad2dSNavdeep Parhar } 208854e4ee71SNavdeep Parhar } 208954e4ee71SNavdeep Parhar 209054e4ee71SNavdeep Parhar if (cpl->vlan_ex) { 209154e4ee71SNavdeep Parhar m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 209254e4ee71SNavdeep Parhar m0->m_flags |= M_VLANTAG; 209354e4ee71SNavdeep Parhar rxq->vlan_extraction++; 209454e4ee71SNavdeep Parhar } 209554e4ee71SNavdeep Parhar 20961486d2deSNavdeep Parhar if (rxq->iq.flags & IQ_RX_TIMESTAMP) { 20971486d2deSNavdeep Parhar /* 20981486d2deSNavdeep Parhar * Fill up rcv_tstmp but do not set M_TSTMP. 20991486d2deSNavdeep Parhar * rcv_tstmp is not in the format that the 21001486d2deSNavdeep Parhar * kernel expects and we don't want to mislead 21011486d2deSNavdeep Parhar * it. For now this is only for custom code 21021486d2deSNavdeep Parhar * that knows how to interpret cxgbe's stamp. 21031486d2deSNavdeep Parhar */ 21041486d2deSNavdeep Parhar m0->m_pkthdr.rcv_tstmp = 21051486d2deSNavdeep Parhar last_flit_to_ns(sc, d->rsp.u.last_flit); 21061486d2deSNavdeep Parhar #ifdef notyet 21071486d2deSNavdeep Parhar m0->m_flags |= M_TSTMP; 21081486d2deSNavdeep Parhar #endif 21091486d2deSNavdeep Parhar } 21101486d2deSNavdeep Parhar 211150575ce1SAndrew Gallatin #ifdef NUMA 211250575ce1SAndrew Gallatin m0->m_pkthdr.numa_domain = ifp->if_numa_domain; 211350575ce1SAndrew Gallatin #endif 2114a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 2115a4a4ad2dSNavdeep Parhar if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && 21169087a3dfSNavdeep Parhar (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || 21179087a3dfSNavdeep Parhar M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { 211846f48ee5SNavdeep Parhar if (sort_before_lro(lro)) { 211946f48ee5SNavdeep Parhar tcp_lro_queue_mbuf(lro, m0); 212046f48ee5SNavdeep Parhar return (0); /* queued for sort, then LRO */ 212146f48ee5SNavdeep Parhar } 212246f48ee5SNavdeep Parhar if (tcp_lro_rx(lro, m0, 0) == 0) 212346f48ee5SNavdeep Parhar return (0); /* queued for LRO */ 212446f48ee5SNavdeep Parhar } 212554e4ee71SNavdeep Parhar #endif 21267d29df59SNavdeep Parhar ifp->if_input(ifp, m0); 212754e4ee71SNavdeep Parhar 2128733b9277SNavdeep Parhar return (0); 212954e4ee71SNavdeep Parhar } 213054e4ee71SNavdeep Parhar 2131733b9277SNavdeep Parhar /* 21327951040fSNavdeep Parhar * Must drain the wrq or make sure that someone else will. 21337951040fSNavdeep Parhar */ 21347951040fSNavdeep Parhar static void 21357951040fSNavdeep Parhar wrq_tx_drain(void *arg, int n) 21367951040fSNavdeep Parhar { 21377951040fSNavdeep Parhar struct sge_wrq *wrq = arg; 21387951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 21397951040fSNavdeep Parhar 21407951040fSNavdeep Parhar EQ_LOCK(eq); 21417951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 21427951040fSNavdeep Parhar drain_wrq_wr_list(wrq->adapter, wrq); 21437951040fSNavdeep Parhar EQ_UNLOCK(eq); 21447951040fSNavdeep Parhar } 21457951040fSNavdeep Parhar 21467951040fSNavdeep Parhar static void 21477951040fSNavdeep Parhar drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 21487951040fSNavdeep Parhar { 21497951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 21507951040fSNavdeep Parhar u_int available, dbdiff; /* # of hardware descriptors */ 21517951040fSNavdeep Parhar u_int n; 21527951040fSNavdeep Parhar struct wrqe *wr; 21537951040fSNavdeep Parhar struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 21547951040fSNavdeep Parhar 21557951040fSNavdeep Parhar EQ_LOCK_ASSERT_OWNED(eq); 21567951040fSNavdeep Parhar MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 21577951040fSNavdeep Parhar wr = STAILQ_FIRST(&wrq->wr_list); 21587951040fSNavdeep Parhar MPASS(wr != NULL); /* Must be called with something useful to do */ 2159cda2ab0eSNavdeep Parhar MPASS(eq->pidx == eq->dbidx); 2160cda2ab0eSNavdeep Parhar dbdiff = 0; 21617951040fSNavdeep Parhar 21627951040fSNavdeep Parhar do { 21637951040fSNavdeep Parhar eq->cidx = read_hw_cidx(eq); 21647951040fSNavdeep Parhar if (eq->pidx == eq->cidx) 21657951040fSNavdeep Parhar available = eq->sidx - 1; 21667951040fSNavdeep Parhar else 21677951040fSNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 21687951040fSNavdeep Parhar 21697951040fSNavdeep Parhar MPASS(wr->wrq == wrq); 21707951040fSNavdeep Parhar n = howmany(wr->wr_len, EQ_ESIZE); 21717951040fSNavdeep Parhar if (available < n) 2172cda2ab0eSNavdeep Parhar break; 21737951040fSNavdeep Parhar 21747951040fSNavdeep Parhar dst = (void *)&eq->desc[eq->pidx]; 21757951040fSNavdeep Parhar if (__predict_true(eq->sidx - eq->pidx > n)) { 21767951040fSNavdeep Parhar /* Won't wrap, won't end exactly at the status page. */ 21777951040fSNavdeep Parhar bcopy(&wr->wr[0], dst, wr->wr_len); 21787951040fSNavdeep Parhar eq->pidx += n; 21797951040fSNavdeep Parhar } else { 21807951040fSNavdeep Parhar int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 21817951040fSNavdeep Parhar 21827951040fSNavdeep Parhar bcopy(&wr->wr[0], dst, first_portion); 21837951040fSNavdeep Parhar if (wr->wr_len > first_portion) { 21847951040fSNavdeep Parhar bcopy(&wr->wr[first_portion], &eq->desc[0], 21857951040fSNavdeep Parhar wr->wr_len - first_portion); 21867951040fSNavdeep Parhar } 21877951040fSNavdeep Parhar eq->pidx = n - (eq->sidx - eq->pidx); 21887951040fSNavdeep Parhar } 21890459a175SNavdeep Parhar wrq->tx_wrs_copied++; 21907951040fSNavdeep Parhar 21917951040fSNavdeep Parhar if (available < eq->sidx / 4 && 21927951040fSNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 2193ddf09ad6SNavdeep Parhar /* 2194ddf09ad6SNavdeep Parhar * XXX: This is not 100% reliable with some 2195ddf09ad6SNavdeep Parhar * types of WRs. But this is a very unusual 2196ddf09ad6SNavdeep Parhar * situation for an ofld/ctrl queue anyway. 2197ddf09ad6SNavdeep Parhar */ 21987951040fSNavdeep Parhar dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 21997951040fSNavdeep Parhar F_FW_WR_EQUEQ); 22007951040fSNavdeep Parhar } 22017951040fSNavdeep Parhar 22027951040fSNavdeep Parhar dbdiff += n; 22037951040fSNavdeep Parhar if (dbdiff >= 16) { 22047951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 22057951040fSNavdeep Parhar dbdiff = 0; 22067951040fSNavdeep Parhar } 22077951040fSNavdeep Parhar 22087951040fSNavdeep Parhar STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 22097951040fSNavdeep Parhar free_wrqe(wr); 22107951040fSNavdeep Parhar MPASS(wrq->nwr_pending > 0); 22117951040fSNavdeep Parhar wrq->nwr_pending--; 22127951040fSNavdeep Parhar MPASS(wrq->ndesc_needed >= n); 22137951040fSNavdeep Parhar wrq->ndesc_needed -= n; 22147951040fSNavdeep Parhar } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 22157951040fSNavdeep Parhar 22167951040fSNavdeep Parhar if (dbdiff) 22177951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 22187951040fSNavdeep Parhar } 22197951040fSNavdeep Parhar 22207951040fSNavdeep Parhar /* 2221733b9277SNavdeep Parhar * Doesn't fail. Holds on to work requests it can't send right away. 2222733b9277SNavdeep Parhar */ 222309fe6320SNavdeep Parhar void 222409fe6320SNavdeep Parhar t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2225733b9277SNavdeep Parhar { 2226733b9277SNavdeep Parhar #ifdef INVARIANTS 22277951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 2228733b9277SNavdeep Parhar #endif 2229733b9277SNavdeep Parhar 22307951040fSNavdeep Parhar EQ_LOCK_ASSERT_OWNED(eq); 22317951040fSNavdeep Parhar MPASS(wr != NULL); 22327951040fSNavdeep Parhar MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 22337951040fSNavdeep Parhar MPASS((wr->wr_len & 0x7) == 0); 2234733b9277SNavdeep Parhar 22357951040fSNavdeep Parhar STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 22367951040fSNavdeep Parhar wrq->nwr_pending++; 22377951040fSNavdeep Parhar wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2238733b9277SNavdeep Parhar 22397951040fSNavdeep Parhar if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 22407951040fSNavdeep Parhar return; /* commit_wrq_wr will drain wr_list as well. */ 2241733b9277SNavdeep Parhar 22427951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 2243733b9277SNavdeep Parhar 22447951040fSNavdeep Parhar /* Doorbell must have caught up to the pidx. */ 22457951040fSNavdeep Parhar MPASS(eq->pidx == eq->dbidx); 224654e4ee71SNavdeep Parhar } 224754e4ee71SNavdeep Parhar 224854e4ee71SNavdeep Parhar void 224954e4ee71SNavdeep Parhar t4_update_fl_bufsize(struct ifnet *ifp) 225054e4ee71SNavdeep Parhar { 2251fe2ebb76SJohn Baldwin struct vi_info *vi = ifp->if_softc; 22527c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 225354e4ee71SNavdeep Parhar struct sge_rxq *rxq; 22546eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD 22556eb3180fSNavdeep Parhar struct sge_ofld_rxq *ofld_rxq; 22566eb3180fSNavdeep Parhar #endif 225754e4ee71SNavdeep Parhar struct sge_fl *fl; 22586a59b994SNavdeep Parhar int i, maxp; 225954e4ee71SNavdeep Parhar 22606a59b994SNavdeep Parhar maxp = max_rx_payload(sc, ifp, false); 2261fe2ebb76SJohn Baldwin for_each_rxq(vi, i, rxq) { 226254e4ee71SNavdeep Parhar fl = &rxq->fl; 226354e4ee71SNavdeep Parhar 226454e4ee71SNavdeep Parhar FL_LOCK(fl); 226546e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, 226646e1e307SNavdeep Parhar fl->flags & FL_BUF_PACKING); 226754e4ee71SNavdeep Parhar FL_UNLOCK(fl); 226854e4ee71SNavdeep Parhar } 22696eb3180fSNavdeep Parhar #ifdef TCP_OFFLOAD 22706a59b994SNavdeep Parhar maxp = max_rx_payload(sc, ifp, true); 2271fe2ebb76SJohn Baldwin for_each_ofld_rxq(vi, i, ofld_rxq) { 22726eb3180fSNavdeep Parhar fl = &ofld_rxq->fl; 22736eb3180fSNavdeep Parhar 22746eb3180fSNavdeep Parhar FL_LOCK(fl); 227546e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, 227646e1e307SNavdeep Parhar fl->flags & FL_BUF_PACKING); 22776eb3180fSNavdeep Parhar FL_UNLOCK(fl); 22786eb3180fSNavdeep Parhar } 22796eb3180fSNavdeep Parhar #endif 228054e4ee71SNavdeep Parhar } 228154e4ee71SNavdeep Parhar 22827951040fSNavdeep Parhar static inline int 22837951040fSNavdeep Parhar mbuf_nsegs(struct mbuf *m) 2284733b9277SNavdeep Parhar { 22850835ddc7SNavdeep Parhar 22867951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 2287a4a4ad2dSNavdeep Parhar KASSERT(m->m_pkthdr.inner_l5hlen > 0, 22887951040fSNavdeep Parhar ("%s: mbuf %p missing information on # of segments.", __func__, m)); 22897951040fSNavdeep Parhar 2290a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.inner_l5hlen); 22917951040fSNavdeep Parhar } 22927951040fSNavdeep Parhar 22937951040fSNavdeep Parhar static inline void 22947951040fSNavdeep Parhar set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 22957951040fSNavdeep Parhar { 22967951040fSNavdeep Parhar 22977951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 2298a4a4ad2dSNavdeep Parhar m->m_pkthdr.inner_l5hlen = nsegs; 22997951040fSNavdeep Parhar } 23007951040fSNavdeep Parhar 23017951040fSNavdeep Parhar static inline int 23025cdaef71SJohn Baldwin mbuf_cflags(struct mbuf *m) 23035cdaef71SJohn Baldwin { 23045cdaef71SJohn Baldwin 23055cdaef71SJohn Baldwin M_ASSERTPKTHDR(m); 23065cdaef71SJohn Baldwin return (m->m_pkthdr.PH_loc.eight[4]); 23075cdaef71SJohn Baldwin } 23085cdaef71SJohn Baldwin 23095cdaef71SJohn Baldwin static inline void 23105cdaef71SJohn Baldwin set_mbuf_cflags(struct mbuf *m, uint8_t flags) 23115cdaef71SJohn Baldwin { 23125cdaef71SJohn Baldwin 23135cdaef71SJohn Baldwin M_ASSERTPKTHDR(m); 23145cdaef71SJohn Baldwin m->m_pkthdr.PH_loc.eight[4] = flags; 23155cdaef71SJohn Baldwin } 23165cdaef71SJohn Baldwin 23175cdaef71SJohn Baldwin static inline int 23187951040fSNavdeep Parhar mbuf_len16(struct mbuf *m) 23197951040fSNavdeep Parhar { 23207951040fSNavdeep Parhar int n; 23217951040fSNavdeep Parhar 23227951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 23237951040fSNavdeep Parhar n = m->m_pkthdr.PH_loc.eight[0]; 2324bddf7343SJohn Baldwin if (!(mbuf_cflags(m) & MC_TLS)) 23257951040fSNavdeep Parhar MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 23267951040fSNavdeep Parhar 23277951040fSNavdeep Parhar return (n); 23287951040fSNavdeep Parhar } 23297951040fSNavdeep Parhar 23307951040fSNavdeep Parhar static inline void 23317951040fSNavdeep Parhar set_mbuf_len16(struct mbuf *m, uint8_t len16) 23327951040fSNavdeep Parhar { 23337951040fSNavdeep Parhar 23347951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 233530e3f2b4SNavdeep Parhar if (!(mbuf_cflags(m) & MC_TLS)) 233630e3f2b4SNavdeep Parhar MPASS(len16 > 0 && len16 <= SGE_MAX_WR_LEN / 16); 23377951040fSNavdeep Parhar m->m_pkthdr.PH_loc.eight[0] = len16; 23387951040fSNavdeep Parhar } 23397951040fSNavdeep Parhar 2340786099deSNavdeep Parhar #ifdef RATELIMIT 2341786099deSNavdeep Parhar static inline int 2342786099deSNavdeep Parhar mbuf_eo_nsegs(struct mbuf *m) 2343786099deSNavdeep Parhar { 2344786099deSNavdeep Parhar 2345786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2346786099deSNavdeep Parhar return (m->m_pkthdr.PH_loc.eight[1]); 2347786099deSNavdeep Parhar } 2348786099deSNavdeep Parhar 2349786099deSNavdeep Parhar static inline void 2350786099deSNavdeep Parhar set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2351786099deSNavdeep Parhar { 2352786099deSNavdeep Parhar 2353786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2354786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[1] = nsegs; 2355786099deSNavdeep Parhar } 2356786099deSNavdeep Parhar 2357786099deSNavdeep Parhar static inline int 2358786099deSNavdeep Parhar mbuf_eo_len16(struct mbuf *m) 2359786099deSNavdeep Parhar { 2360786099deSNavdeep Parhar int n; 2361786099deSNavdeep Parhar 2362786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2363786099deSNavdeep Parhar n = m->m_pkthdr.PH_loc.eight[2]; 2364786099deSNavdeep Parhar MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2365786099deSNavdeep Parhar 2366786099deSNavdeep Parhar return (n); 2367786099deSNavdeep Parhar } 2368786099deSNavdeep Parhar 2369786099deSNavdeep Parhar static inline void 2370786099deSNavdeep Parhar set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2371786099deSNavdeep Parhar { 2372786099deSNavdeep Parhar 2373786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2374786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[2] = len16; 2375786099deSNavdeep Parhar } 2376786099deSNavdeep Parhar 2377786099deSNavdeep Parhar static inline int 2378786099deSNavdeep Parhar mbuf_eo_tsclk_tsoff(struct mbuf *m) 2379786099deSNavdeep Parhar { 2380786099deSNavdeep Parhar 2381786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2382786099deSNavdeep Parhar return (m->m_pkthdr.PH_loc.eight[3]); 2383786099deSNavdeep Parhar } 2384786099deSNavdeep Parhar 2385786099deSNavdeep Parhar static inline void 2386786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2387786099deSNavdeep Parhar { 2388786099deSNavdeep Parhar 2389786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2390786099deSNavdeep Parhar m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2391786099deSNavdeep Parhar } 2392786099deSNavdeep Parhar 2393786099deSNavdeep Parhar static inline int 239456fb710fSJohn Baldwin needs_eo(struct m_snd_tag *mst) 2395786099deSNavdeep Parhar { 2396786099deSNavdeep Parhar 239756fb710fSJohn Baldwin return (mst != NULL && mst->type == IF_SND_TAG_TYPE_RATE_LIMIT); 2398786099deSNavdeep Parhar } 2399786099deSNavdeep Parhar #endif 2400786099deSNavdeep Parhar 24015cdaef71SJohn Baldwin /* 24025cdaef71SJohn Baldwin * Try to allocate an mbuf to contain a raw work request. To make it 24035cdaef71SJohn Baldwin * easy to construct the work request, don't allocate a chain but a 24045cdaef71SJohn Baldwin * single mbuf. 24055cdaef71SJohn Baldwin */ 24065cdaef71SJohn Baldwin struct mbuf * 24075cdaef71SJohn Baldwin alloc_wr_mbuf(int len, int how) 24085cdaef71SJohn Baldwin { 24095cdaef71SJohn Baldwin struct mbuf *m; 24105cdaef71SJohn Baldwin 24115cdaef71SJohn Baldwin if (len <= MHLEN) 24125cdaef71SJohn Baldwin m = m_gethdr(how, MT_DATA); 24135cdaef71SJohn Baldwin else if (len <= MCLBYTES) 24145cdaef71SJohn Baldwin m = m_getcl(how, MT_DATA, M_PKTHDR); 24155cdaef71SJohn Baldwin else 24165cdaef71SJohn Baldwin m = NULL; 24175cdaef71SJohn Baldwin if (m == NULL) 24185cdaef71SJohn Baldwin return (NULL); 24195cdaef71SJohn Baldwin m->m_pkthdr.len = len; 24205cdaef71SJohn Baldwin m->m_len = len; 24215cdaef71SJohn Baldwin set_mbuf_cflags(m, MC_RAW_WR); 24225cdaef71SJohn Baldwin set_mbuf_len16(m, howmany(len, 16)); 24235cdaef71SJohn Baldwin return (m); 24245cdaef71SJohn Baldwin } 24255cdaef71SJohn Baldwin 2426a4a4ad2dSNavdeep Parhar static inline bool 2427c0236bd9SNavdeep Parhar needs_hwcsum(struct mbuf *m) 2428c0236bd9SNavdeep Parhar { 2429a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | 2430a4a4ad2dSNavdeep Parhar CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | 2431a4a4ad2dSNavdeep Parhar CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP | 2432a4a4ad2dSNavdeep Parhar CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP | 2433a4a4ad2dSNavdeep Parhar CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO; 2434c0236bd9SNavdeep Parhar 2435c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 2436c0236bd9SNavdeep Parhar 2437a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 2438c0236bd9SNavdeep Parhar } 2439c0236bd9SNavdeep Parhar 2440a4a4ad2dSNavdeep Parhar static inline bool 24417951040fSNavdeep Parhar needs_tso(struct mbuf *m) 24427951040fSNavdeep Parhar { 2443a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO | 2444a4a4ad2dSNavdeep Parhar CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 24457951040fSNavdeep Parhar 24467951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 24477951040fSNavdeep Parhar 2448a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 24497951040fSNavdeep Parhar } 24507951040fSNavdeep Parhar 2451a4a4ad2dSNavdeep Parhar static inline bool 2452a4a4ad2dSNavdeep Parhar needs_vxlan_csum(struct mbuf *m) 2453a4a4ad2dSNavdeep Parhar { 2454a4a4ad2dSNavdeep Parhar 2455a4a4ad2dSNavdeep Parhar M_ASSERTPKTHDR(m); 2456a4a4ad2dSNavdeep Parhar 2457a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); 2458a4a4ad2dSNavdeep Parhar } 2459a4a4ad2dSNavdeep Parhar 2460a4a4ad2dSNavdeep Parhar static inline bool 2461a4a4ad2dSNavdeep Parhar needs_vxlan_tso(struct mbuf *m) 2462a4a4ad2dSNavdeep Parhar { 2463a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO | 2464a4a4ad2dSNavdeep Parhar CSUM_INNER_IP6_TSO; 2465a4a4ad2dSNavdeep Parhar 2466a4a4ad2dSNavdeep Parhar M_ASSERTPKTHDR(m); 2467a4a4ad2dSNavdeep Parhar 2468a4a4ad2dSNavdeep Parhar return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && 2469a4a4ad2dSNavdeep Parhar (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); 2470a4a4ad2dSNavdeep Parhar } 2471a4a4ad2dSNavdeep Parhar 2472a4a4ad2dSNavdeep Parhar static inline bool 2473a4a4ad2dSNavdeep Parhar needs_inner_tcp_csum(struct mbuf *m) 2474a4a4ad2dSNavdeep Parhar { 2475a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 2476a4a4ad2dSNavdeep Parhar 2477a4a4ad2dSNavdeep Parhar M_ASSERTPKTHDR(m); 2478a4a4ad2dSNavdeep Parhar 2479a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 2480a4a4ad2dSNavdeep Parhar } 2481a4a4ad2dSNavdeep Parhar 2482a4a4ad2dSNavdeep Parhar static inline bool 24837951040fSNavdeep Parhar needs_l3_csum(struct mbuf *m) 24847951040fSNavdeep Parhar { 2485a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP | 2486a4a4ad2dSNavdeep Parhar CSUM_INNER_IP_TSO; 24877951040fSNavdeep Parhar 24887951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 24897951040fSNavdeep Parhar 2490a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 24917951040fSNavdeep Parhar } 24927951040fSNavdeep Parhar 2493a4a4ad2dSNavdeep Parhar static inline bool 2494a4a4ad2dSNavdeep Parhar needs_outer_tcp_csum(struct mbuf *m) 2495c0236bd9SNavdeep Parhar { 2496a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP | 2497a4a4ad2dSNavdeep Parhar CSUM_IP6_TSO; 2498c0236bd9SNavdeep Parhar 2499c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 2500a4a4ad2dSNavdeep Parhar 2501a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 2502c0236bd9SNavdeep Parhar } 2503c0236bd9SNavdeep Parhar 2504c0236bd9SNavdeep Parhar #ifdef RATELIMIT 2505a4a4ad2dSNavdeep Parhar static inline bool 2506a4a4ad2dSNavdeep Parhar needs_outer_l4_csum(struct mbuf *m) 25077951040fSNavdeep Parhar { 2508a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO | 2509a4a4ad2dSNavdeep Parhar CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO; 25107951040fSNavdeep Parhar 25117951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 25127951040fSNavdeep Parhar 2513a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 25147951040fSNavdeep Parhar } 25157951040fSNavdeep Parhar 2516a4a4ad2dSNavdeep Parhar static inline bool 2517a4a4ad2dSNavdeep Parhar needs_outer_udp_csum(struct mbuf *m) 2518786099deSNavdeep Parhar { 2519a4a4ad2dSNavdeep Parhar const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP; 2520786099deSNavdeep Parhar 2521786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2522a4a4ad2dSNavdeep Parhar 2523a4a4ad2dSNavdeep Parhar return (m->m_pkthdr.csum_flags & csum_flags); 2524786099deSNavdeep Parhar } 2525c3fce948SNavdeep Parhar #endif 2526786099deSNavdeep Parhar 2527a4a4ad2dSNavdeep Parhar static inline bool 25287951040fSNavdeep Parhar needs_vlan_insertion(struct mbuf *m) 25297951040fSNavdeep Parhar { 25307951040fSNavdeep Parhar 25317951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 25327951040fSNavdeep Parhar 2533a6a8ff35SNavdeep Parhar return (m->m_flags & M_VLANTAG); 25347951040fSNavdeep Parhar } 25357951040fSNavdeep Parhar 25367951040fSNavdeep Parhar static void * 25377951040fSNavdeep Parhar m_advance(struct mbuf **pm, int *poffset, int len) 25387951040fSNavdeep Parhar { 25397951040fSNavdeep Parhar struct mbuf *m = *pm; 25407951040fSNavdeep Parhar int offset = *poffset; 25417951040fSNavdeep Parhar uintptr_t p = 0; 25427951040fSNavdeep Parhar 25437951040fSNavdeep Parhar MPASS(len > 0); 25447951040fSNavdeep Parhar 2545e06ab612SJohn Baldwin for (;;) { 25467951040fSNavdeep Parhar if (offset + len < m->m_len) { 25477951040fSNavdeep Parhar offset += len; 25487951040fSNavdeep Parhar p = mtod(m, uintptr_t) + offset; 25497951040fSNavdeep Parhar break; 25507951040fSNavdeep Parhar } 25517951040fSNavdeep Parhar len -= m->m_len - offset; 25527951040fSNavdeep Parhar m = m->m_next; 25537951040fSNavdeep Parhar offset = 0; 25547951040fSNavdeep Parhar MPASS(m != NULL); 25557951040fSNavdeep Parhar } 25567951040fSNavdeep Parhar *poffset = offset; 25577951040fSNavdeep Parhar *pm = m; 25587951040fSNavdeep Parhar return ((void *)p); 25597951040fSNavdeep Parhar } 25607951040fSNavdeep Parhar 2561d76bbe17SJohn Baldwin static inline int 2562d76bbe17SJohn Baldwin count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr) 2563d76bbe17SJohn Baldwin { 2564d76bbe17SJohn Baldwin vm_paddr_t paddr; 2565d76bbe17SJohn Baldwin int i, len, off, pglen, pgoff, seglen, segoff; 2566d76bbe17SJohn Baldwin int nsegs = 0; 2567d76bbe17SJohn Baldwin 2568365e8da4SGleb Smirnoff M_ASSERTEXTPG(m); 2569d76bbe17SJohn Baldwin off = mtod(m, vm_offset_t); 2570d76bbe17SJohn Baldwin len = m->m_len; 2571d76bbe17SJohn Baldwin off += skip; 2572d76bbe17SJohn Baldwin len -= skip; 2573d76bbe17SJohn Baldwin 25747b6c99d0SGleb Smirnoff if (m->m_epg_hdrlen != 0) { 25757b6c99d0SGleb Smirnoff if (off >= m->m_epg_hdrlen) { 25767b6c99d0SGleb Smirnoff off -= m->m_epg_hdrlen; 2577d76bbe17SJohn Baldwin } else { 25787b6c99d0SGleb Smirnoff seglen = m->m_epg_hdrlen - off; 2579d76bbe17SJohn Baldwin segoff = off; 2580d76bbe17SJohn Baldwin seglen = min(seglen, len); 2581d76bbe17SJohn Baldwin off = 0; 2582d76bbe17SJohn Baldwin len -= seglen; 2583d76bbe17SJohn Baldwin paddr = pmap_kextract( 25840c103266SGleb Smirnoff (vm_offset_t)&m->m_epg_hdr[segoff]); 2585d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2586d76bbe17SJohn Baldwin nsegs++; 2587d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2588d76bbe17SJohn Baldwin } 2589d76bbe17SJohn Baldwin } 25907b6c99d0SGleb Smirnoff pgoff = m->m_epg_1st_off; 25917b6c99d0SGleb Smirnoff for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 2592c4ee38f8SGleb Smirnoff pglen = m_epg_pagelen(m, i, pgoff); 2593d76bbe17SJohn Baldwin if (off >= pglen) { 2594d76bbe17SJohn Baldwin off -= pglen; 2595d76bbe17SJohn Baldwin pgoff = 0; 2596d76bbe17SJohn Baldwin continue; 2597d76bbe17SJohn Baldwin } 2598d76bbe17SJohn Baldwin seglen = pglen - off; 2599d76bbe17SJohn Baldwin segoff = pgoff + off; 2600d76bbe17SJohn Baldwin off = 0; 2601d76bbe17SJohn Baldwin seglen = min(seglen, len); 2602d76bbe17SJohn Baldwin len -= seglen; 26030c103266SGleb Smirnoff paddr = m->m_epg_pa[i] + segoff; 2604d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2605d76bbe17SJohn Baldwin nsegs++; 2606d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2607d76bbe17SJohn Baldwin pgoff = 0; 2608d76bbe17SJohn Baldwin }; 2609d76bbe17SJohn Baldwin if (len != 0) { 26107b6c99d0SGleb Smirnoff seglen = min(len, m->m_epg_trllen - off); 2611d76bbe17SJohn Baldwin len -= seglen; 26120c103266SGleb Smirnoff paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]); 2613d76bbe17SJohn Baldwin if (*nextaddr != paddr) 2614d76bbe17SJohn Baldwin nsegs++; 2615d76bbe17SJohn Baldwin *nextaddr = paddr + seglen; 2616d76bbe17SJohn Baldwin } 2617d76bbe17SJohn Baldwin 2618d76bbe17SJohn Baldwin return (nsegs); 2619d76bbe17SJohn Baldwin } 2620d76bbe17SJohn Baldwin 2621d76bbe17SJohn Baldwin 26227951040fSNavdeep Parhar /* 26237951040fSNavdeep Parhar * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2624786099deSNavdeep Parhar * must have at least one mbuf that's not empty. It is possible for this 2625786099deSNavdeep Parhar * routine to return 0 if skip accounts for all the contents of the mbuf chain. 26267951040fSNavdeep Parhar */ 26277951040fSNavdeep Parhar static inline int 2628d76bbe17SJohn Baldwin count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags) 26297951040fSNavdeep Parhar { 2630d76bbe17SJohn Baldwin vm_paddr_t nextaddr, paddr; 263177e9044cSNavdeep Parhar vm_offset_t va; 26327951040fSNavdeep Parhar int len, nsegs; 26337951040fSNavdeep Parhar 2634786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 2635786099deSNavdeep Parhar MPASS(m->m_pkthdr.len > 0); 2636786099deSNavdeep Parhar MPASS(m->m_pkthdr.len >= skip); 26377951040fSNavdeep Parhar 26387951040fSNavdeep Parhar nsegs = 0; 2639d76bbe17SJohn Baldwin nextaddr = 0; 26407951040fSNavdeep Parhar for (; m; m = m->m_next) { 26417951040fSNavdeep Parhar len = m->m_len; 26427951040fSNavdeep Parhar if (__predict_false(len == 0)) 26437951040fSNavdeep Parhar continue; 2644786099deSNavdeep Parhar if (skip >= len) { 2645786099deSNavdeep Parhar skip -= len; 2646786099deSNavdeep Parhar continue; 2647786099deSNavdeep Parhar } 26486edfd179SGleb Smirnoff if ((m->m_flags & M_EXTPG) != 0) { 2649d76bbe17SJohn Baldwin *cflags |= MC_NOMAP; 2650d76bbe17SJohn Baldwin nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr); 2651d76bbe17SJohn Baldwin skip = 0; 2652d76bbe17SJohn Baldwin continue; 2653d76bbe17SJohn Baldwin } 2654786099deSNavdeep Parhar va = mtod(m, vm_offset_t) + skip; 2655786099deSNavdeep Parhar len -= skip; 2656786099deSNavdeep Parhar skip = 0; 2657d76bbe17SJohn Baldwin paddr = pmap_kextract(va); 2658786099deSNavdeep Parhar nsegs += sglist_count((void *)(uintptr_t)va, len); 2659d76bbe17SJohn Baldwin if (paddr == nextaddr) 26607951040fSNavdeep Parhar nsegs--; 2661d76bbe17SJohn Baldwin nextaddr = pmap_kextract(va + len - 1) + 1; 26627951040fSNavdeep Parhar } 26637951040fSNavdeep Parhar 26647951040fSNavdeep Parhar return (nsegs); 26657951040fSNavdeep Parhar } 26667951040fSNavdeep Parhar 26677951040fSNavdeep Parhar /* 2668a4a4ad2dSNavdeep Parhar * The maximum number of segments that can fit in a WR. 2669a4a4ad2dSNavdeep Parhar */ 2670a4a4ad2dSNavdeep Parhar static int 267130e3f2b4SNavdeep Parhar max_nsegs_allowed(struct mbuf *m, bool vm_wr) 2672a4a4ad2dSNavdeep Parhar { 2673a4a4ad2dSNavdeep Parhar 267430e3f2b4SNavdeep Parhar if (vm_wr) { 267530e3f2b4SNavdeep Parhar if (needs_tso(m)) 267630e3f2b4SNavdeep Parhar return (TX_SGL_SEGS_VM_TSO); 267730e3f2b4SNavdeep Parhar return (TX_SGL_SEGS_VM); 267830e3f2b4SNavdeep Parhar } 267930e3f2b4SNavdeep Parhar 2680a4a4ad2dSNavdeep Parhar if (needs_tso(m)) { 2681a4a4ad2dSNavdeep Parhar if (needs_vxlan_tso(m)) 2682a4a4ad2dSNavdeep Parhar return (TX_SGL_SEGS_VXLAN_TSO); 2683a4a4ad2dSNavdeep Parhar else 2684a4a4ad2dSNavdeep Parhar return (TX_SGL_SEGS_TSO); 2685a4a4ad2dSNavdeep Parhar } 2686a4a4ad2dSNavdeep Parhar 2687a4a4ad2dSNavdeep Parhar return (TX_SGL_SEGS); 2688a4a4ad2dSNavdeep Parhar } 2689a4a4ad2dSNavdeep Parhar 2690a4a4ad2dSNavdeep Parhar /* 26917951040fSNavdeep Parhar * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 26927951040fSNavdeep Parhar * a) caller can assume it's been freed if this function returns with an error. 26937951040fSNavdeep Parhar * b) it may get defragged up if the gather list is too long for the hardware. 26947951040fSNavdeep Parhar */ 26957951040fSNavdeep Parhar int 269630e3f2b4SNavdeep Parhar parse_pkt(struct mbuf **mp, bool vm_wr) 26977951040fSNavdeep Parhar { 26987951040fSNavdeep Parhar struct mbuf *m0 = *mp, *m; 26997951040fSNavdeep Parhar int rc, nsegs, defragged = 0, offset; 27007951040fSNavdeep Parhar struct ether_header *eh; 27017951040fSNavdeep Parhar void *l3hdr; 27027951040fSNavdeep Parhar #if defined(INET) || defined(INET6) 27037951040fSNavdeep Parhar struct tcphdr *tcp; 27047951040fSNavdeep Parhar #endif 2705bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 270656fb710fSJohn Baldwin struct m_snd_tag *mst; 2707e38a50e8SJohn Baldwin #endif 27087951040fSNavdeep Parhar uint16_t eh_type; 2709d76bbe17SJohn Baldwin uint8_t cflags; 27107951040fSNavdeep Parhar 2711d76bbe17SJohn Baldwin cflags = 0; 27127951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 27137951040fSNavdeep Parhar if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 27147951040fSNavdeep Parhar rc = EINVAL; 27157951040fSNavdeep Parhar fail: 27167951040fSNavdeep Parhar m_freem(m0); 27177951040fSNavdeep Parhar *mp = NULL; 27187951040fSNavdeep Parhar return (rc); 27197951040fSNavdeep Parhar } 27207951040fSNavdeep Parhar restart: 27217951040fSNavdeep Parhar /* 27227951040fSNavdeep Parhar * First count the number of gather list segments in the payload. 27237951040fSNavdeep Parhar * Defrag the mbuf if nsegs exceeds the hardware limit. 27247951040fSNavdeep Parhar */ 27257951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 27267951040fSNavdeep Parhar MPASS(m0->m_pkthdr.len > 0); 2727d76bbe17SJohn Baldwin nsegs = count_mbuf_nsegs(m0, 0, &cflags); 2728bddf7343SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 2729e38a50e8SJohn Baldwin if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) 273056fb710fSJohn Baldwin mst = m0->m_pkthdr.snd_tag; 2731e38a50e8SJohn Baldwin else 273256fb710fSJohn Baldwin mst = NULL; 2733e38a50e8SJohn Baldwin #endif 2734bddf7343SJohn Baldwin #ifdef KERN_TLS 273556fb710fSJohn Baldwin if (mst != NULL && mst->type == IF_SND_TAG_TYPE_TLS) { 2736bddf7343SJohn Baldwin int len16; 2737bddf7343SJohn Baldwin 2738bddf7343SJohn Baldwin cflags |= MC_TLS; 2739bddf7343SJohn Baldwin set_mbuf_cflags(m0, cflags); 2740bddf7343SJohn Baldwin rc = t6_ktls_parse_pkt(m0, &nsegs, &len16); 2741bddf7343SJohn Baldwin if (rc != 0) 2742bddf7343SJohn Baldwin goto fail; 2743bddf7343SJohn Baldwin set_mbuf_nsegs(m0, nsegs); 2744bddf7343SJohn Baldwin set_mbuf_len16(m0, len16); 2745bddf7343SJohn Baldwin return (0); 2746bddf7343SJohn Baldwin } 2747bddf7343SJohn Baldwin #endif 274830e3f2b4SNavdeep Parhar if (nsegs > max_nsegs_allowed(m0, vm_wr)) { 27497054f6ecSNavdeep Parhar if (defragged++ > 0) { 27507951040fSNavdeep Parhar rc = EFBIG; 27517951040fSNavdeep Parhar goto fail; 27527951040fSNavdeep Parhar } 27537054f6ecSNavdeep Parhar counter_u64_add(defrags, 1); 27547054f6ecSNavdeep Parhar if ((m = m_defrag(m0, M_NOWAIT)) == NULL) { 27557054f6ecSNavdeep Parhar rc = ENOMEM; 27567054f6ecSNavdeep Parhar goto fail; 27577054f6ecSNavdeep Parhar } 27587951040fSNavdeep Parhar *mp = m0 = m; /* update caller's copy after defrag */ 27597951040fSNavdeep Parhar goto restart; 27607951040fSNavdeep Parhar } 27617951040fSNavdeep Parhar 2762d76bbe17SJohn Baldwin if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && 2763d76bbe17SJohn Baldwin !(cflags & MC_NOMAP))) { 27647054f6ecSNavdeep Parhar counter_u64_add(pullups, 1); 27657951040fSNavdeep Parhar m0 = m_pullup(m0, m0->m_pkthdr.len); 27667951040fSNavdeep Parhar if (m0 == NULL) { 27677951040fSNavdeep Parhar /* Should have left well enough alone. */ 27687951040fSNavdeep Parhar rc = EFBIG; 27697951040fSNavdeep Parhar goto fail; 27707951040fSNavdeep Parhar } 27717951040fSNavdeep Parhar *mp = m0; /* update caller's copy after pullup */ 27727951040fSNavdeep Parhar goto restart; 27737951040fSNavdeep Parhar } 27747951040fSNavdeep Parhar set_mbuf_nsegs(m0, nsegs); 2775d76bbe17SJohn Baldwin set_mbuf_cflags(m0, cflags); 277630e3f2b4SNavdeep Parhar calculate_mbuf_len16(m0, vm_wr); 27777951040fSNavdeep Parhar 2778786099deSNavdeep Parhar #ifdef RATELIMIT 2779786099deSNavdeep Parhar /* 2780786099deSNavdeep Parhar * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2781a4a4ad2dSNavdeep Parhar * checksumming is enabled. needs_outer_l4_csum happens to check for 2782a4a4ad2dSNavdeep Parhar * all the right things. 2783786099deSNavdeep Parhar */ 278456fb710fSJohn Baldwin if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) { 2785fb3bc596SJohn Baldwin m_snd_tag_rele(m0->m_pkthdr.snd_tag); 2786786099deSNavdeep Parhar m0->m_pkthdr.snd_tag = NULL; 2787fb3bc596SJohn Baldwin m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 278856fb710fSJohn Baldwin mst = NULL; 2789fb3bc596SJohn Baldwin } 2790786099deSNavdeep Parhar #endif 2791786099deSNavdeep Parhar 2792c0236bd9SNavdeep Parhar if (!needs_hwcsum(m0) 2793786099deSNavdeep Parhar #ifdef RATELIMIT 279456fb710fSJohn Baldwin && !needs_eo(mst) 2795786099deSNavdeep Parhar #endif 2796c0236bd9SNavdeep Parhar ) 27977951040fSNavdeep Parhar return (0); 27987951040fSNavdeep Parhar 27997951040fSNavdeep Parhar m = m0; 28007951040fSNavdeep Parhar eh = mtod(m, struct ether_header *); 28017951040fSNavdeep Parhar eh_type = ntohs(eh->ether_type); 28027951040fSNavdeep Parhar if (eh_type == ETHERTYPE_VLAN) { 28037951040fSNavdeep Parhar struct ether_vlan_header *evh = (void *)eh; 28047951040fSNavdeep Parhar 28057951040fSNavdeep Parhar eh_type = ntohs(evh->evl_proto); 28067951040fSNavdeep Parhar m0->m_pkthdr.l2hlen = sizeof(*evh); 28077951040fSNavdeep Parhar } else 28087951040fSNavdeep Parhar m0->m_pkthdr.l2hlen = sizeof(*eh); 28097951040fSNavdeep Parhar 28107951040fSNavdeep Parhar offset = 0; 28117951040fSNavdeep Parhar l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 28127951040fSNavdeep Parhar 28137951040fSNavdeep Parhar switch (eh_type) { 28147951040fSNavdeep Parhar #ifdef INET6 28157951040fSNavdeep Parhar case ETHERTYPE_IPV6: 2816a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); 28177951040fSNavdeep Parhar break; 28187951040fSNavdeep Parhar #endif 28197951040fSNavdeep Parhar #ifdef INET 28207951040fSNavdeep Parhar case ETHERTYPE_IP: 28217951040fSNavdeep Parhar { 28227951040fSNavdeep Parhar struct ip *ip = l3hdr; 28237951040fSNavdeep Parhar 2824a4a4ad2dSNavdeep Parhar if (needs_vxlan_csum(m0)) { 2825a4a4ad2dSNavdeep Parhar /* Driver will do the outer IP hdr checksum. */ 2826a4a4ad2dSNavdeep Parhar ip->ip_sum = 0; 2827a4a4ad2dSNavdeep Parhar if (needs_vxlan_tso(m0)) { 2828a4a4ad2dSNavdeep Parhar const uint16_t ipl = ip->ip_len; 2829a4a4ad2dSNavdeep Parhar 2830a4a4ad2dSNavdeep Parhar ip->ip_len = 0; 2831a4a4ad2dSNavdeep Parhar ip->ip_sum = ~in_cksum_hdr(ip); 2832a4a4ad2dSNavdeep Parhar ip->ip_len = ipl; 2833a4a4ad2dSNavdeep Parhar } else 2834a4a4ad2dSNavdeep Parhar ip->ip_sum = in_cksum_hdr(ip); 2835a4a4ad2dSNavdeep Parhar } 2836a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l3hlen = ip->ip_hl << 2; 28377951040fSNavdeep Parhar break; 28387951040fSNavdeep Parhar } 28397951040fSNavdeep Parhar #endif 28407951040fSNavdeep Parhar default: 28417951040fSNavdeep Parhar panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 28427951040fSNavdeep Parhar " with the same INET/INET6 options as the kernel.", 28437951040fSNavdeep Parhar __func__, eh_type); 28447951040fSNavdeep Parhar } 28457951040fSNavdeep Parhar 2846a4a4ad2dSNavdeep Parhar if (needs_vxlan_csum(m0)) { 2847a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2848a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); 2849a4a4ad2dSNavdeep Parhar 2850a4a4ad2dSNavdeep Parhar /* Inner headers. */ 2851a4a4ad2dSNavdeep Parhar eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + 2852a4a4ad2dSNavdeep Parhar sizeof(struct udphdr) + sizeof(struct vxlan_header)); 2853a4a4ad2dSNavdeep Parhar eh_type = ntohs(eh->ether_type); 2854a4a4ad2dSNavdeep Parhar if (eh_type == ETHERTYPE_VLAN) { 2855a4a4ad2dSNavdeep Parhar struct ether_vlan_header *evh = (void *)eh; 2856a4a4ad2dSNavdeep Parhar 2857a4a4ad2dSNavdeep Parhar eh_type = ntohs(evh->evl_proto); 2858a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l2hlen = sizeof(*evh); 2859a4a4ad2dSNavdeep Parhar } else 2860a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l2hlen = sizeof(*eh); 2861a4a4ad2dSNavdeep Parhar l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); 2862a4a4ad2dSNavdeep Parhar 2863a4a4ad2dSNavdeep Parhar switch (eh_type) { 2864a4a4ad2dSNavdeep Parhar #ifdef INET6 2865a4a4ad2dSNavdeep Parhar case ETHERTYPE_IPV6: 2866a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); 2867a4a4ad2dSNavdeep Parhar break; 2868a4a4ad2dSNavdeep Parhar #endif 2869a4a4ad2dSNavdeep Parhar #ifdef INET 2870a4a4ad2dSNavdeep Parhar case ETHERTYPE_IP: 2871a4a4ad2dSNavdeep Parhar { 2872a4a4ad2dSNavdeep Parhar struct ip *ip = l3hdr; 2873a4a4ad2dSNavdeep Parhar 2874a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; 2875a4a4ad2dSNavdeep Parhar break; 2876a4a4ad2dSNavdeep Parhar } 2877a4a4ad2dSNavdeep Parhar #endif 2878a4a4ad2dSNavdeep Parhar default: 2879a4a4ad2dSNavdeep Parhar panic("%s: VXLAN hw offload requested with unknown " 2880a4a4ad2dSNavdeep Parhar "ethertype 0x%04x. if_cxgbe must be compiled" 2881a4a4ad2dSNavdeep Parhar " with the same INET/INET6 options as the kernel.", 2882a4a4ad2dSNavdeep Parhar __func__, eh_type); 2883a4a4ad2dSNavdeep Parhar } 28847951040fSNavdeep Parhar #if defined(INET) || defined(INET6) 2885a4a4ad2dSNavdeep Parhar if (needs_inner_tcp_csum(m0)) { 2886a4a4ad2dSNavdeep Parhar tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); 2887a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; 2888a4a4ad2dSNavdeep Parhar } 2889a4a4ad2dSNavdeep Parhar #endif 2890a4a4ad2dSNavdeep Parhar MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 2891a4a4ad2dSNavdeep Parhar m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | 2892a4a4ad2dSNavdeep Parhar CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | 2893a4a4ad2dSNavdeep Parhar CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | 2894a4a4ad2dSNavdeep Parhar CSUM_ENCAP_VXLAN; 2895a4a4ad2dSNavdeep Parhar } 2896a4a4ad2dSNavdeep Parhar 2897a4a4ad2dSNavdeep Parhar #if defined(INET) || defined(INET6) 2898a4a4ad2dSNavdeep Parhar if (needs_outer_tcp_csum(m0)) { 28997951040fSNavdeep Parhar tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 29007951040fSNavdeep Parhar m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2901786099deSNavdeep Parhar #ifdef RATELIMIT 2902786099deSNavdeep Parhar if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2903786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(m0, 2904786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2905786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2906786099deSNavdeep Parhar } else 2907786099deSNavdeep Parhar set_mbuf_eo_tsclk_tsoff(m0, 0); 2908a4a4ad2dSNavdeep Parhar } else if (needs_outer_udp_csum(m0)) { 2909786099deSNavdeep Parhar m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2910786099deSNavdeep Parhar #endif 29116af45170SJohn Baldwin } 2912786099deSNavdeep Parhar #ifdef RATELIMIT 291356fb710fSJohn Baldwin if (needs_eo(mst)) { 2914786099deSNavdeep Parhar u_int immhdrs; 2915786099deSNavdeep Parhar 2916786099deSNavdeep Parhar /* EO WRs have the headers in the WR and not the GL. */ 2917786099deSNavdeep Parhar immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2918786099deSNavdeep Parhar m0->m_pkthdr.l4hlen; 2919d76bbe17SJohn Baldwin cflags = 0; 2920d76bbe17SJohn Baldwin nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); 2921d76bbe17SJohn Baldwin MPASS(cflags == mbuf_cflags(m0)); 2922786099deSNavdeep Parhar set_mbuf_eo_nsegs(m0, nsegs); 2923786099deSNavdeep Parhar set_mbuf_eo_len16(m0, 2924786099deSNavdeep Parhar txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2925786099deSNavdeep Parhar } 2926786099deSNavdeep Parhar #endif 29277951040fSNavdeep Parhar #endif 29287951040fSNavdeep Parhar MPASS(m0 == *mp); 29297951040fSNavdeep Parhar return (0); 29307951040fSNavdeep Parhar } 29317951040fSNavdeep Parhar 29327951040fSNavdeep Parhar void * 29337951040fSNavdeep Parhar start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 29347951040fSNavdeep Parhar { 29357951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 29367951040fSNavdeep Parhar struct adapter *sc = wrq->adapter; 29377951040fSNavdeep Parhar int ndesc, available; 29387951040fSNavdeep Parhar struct wrqe *wr; 29397951040fSNavdeep Parhar void *w; 29407951040fSNavdeep Parhar 29417951040fSNavdeep Parhar MPASS(len16 > 0); 29420cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 29437951040fSNavdeep Parhar MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 29447951040fSNavdeep Parhar 29457951040fSNavdeep Parhar EQ_LOCK(eq); 29467951040fSNavdeep Parhar 29478d6ae10aSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 29487951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 29497951040fSNavdeep Parhar 29507951040fSNavdeep Parhar if (!STAILQ_EMPTY(&wrq->wr_list)) { 29517951040fSNavdeep Parhar slowpath: 29527951040fSNavdeep Parhar EQ_UNLOCK(eq); 29537951040fSNavdeep Parhar wr = alloc_wrqe(len16 * 16, wrq); 29547951040fSNavdeep Parhar if (__predict_false(wr == NULL)) 29557951040fSNavdeep Parhar return (NULL); 29567951040fSNavdeep Parhar cookie->pidx = -1; 29577951040fSNavdeep Parhar cookie->ndesc = ndesc; 29587951040fSNavdeep Parhar return (&wr->wr); 29597951040fSNavdeep Parhar } 29607951040fSNavdeep Parhar 29617951040fSNavdeep Parhar eq->cidx = read_hw_cidx(eq); 29627951040fSNavdeep Parhar if (eq->pidx == eq->cidx) 29637951040fSNavdeep Parhar available = eq->sidx - 1; 29647951040fSNavdeep Parhar else 29657951040fSNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 29667951040fSNavdeep Parhar if (available < ndesc) 29677951040fSNavdeep Parhar goto slowpath; 29687951040fSNavdeep Parhar 29697951040fSNavdeep Parhar cookie->pidx = eq->pidx; 29707951040fSNavdeep Parhar cookie->ndesc = ndesc; 29717951040fSNavdeep Parhar TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 29727951040fSNavdeep Parhar 29737951040fSNavdeep Parhar w = &eq->desc[eq->pidx]; 29747951040fSNavdeep Parhar IDXINCR(eq->pidx, ndesc, eq->sidx); 2975f50c49ccSNavdeep Parhar if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 29767951040fSNavdeep Parhar w = &wrq->ss[0]; 29777951040fSNavdeep Parhar wrq->ss_pidx = cookie->pidx; 29787951040fSNavdeep Parhar wrq->ss_len = len16 * 16; 29797951040fSNavdeep Parhar } 29807951040fSNavdeep Parhar 29817951040fSNavdeep Parhar EQ_UNLOCK(eq); 29827951040fSNavdeep Parhar 29837951040fSNavdeep Parhar return (w); 29847951040fSNavdeep Parhar } 29857951040fSNavdeep Parhar 29867951040fSNavdeep Parhar void 29877951040fSNavdeep Parhar commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 29887951040fSNavdeep Parhar { 29897951040fSNavdeep Parhar struct sge_eq *eq = &wrq->eq; 29907951040fSNavdeep Parhar struct adapter *sc = wrq->adapter; 29917951040fSNavdeep Parhar int ndesc, pidx; 29927951040fSNavdeep Parhar struct wrq_cookie *prev, *next; 29937951040fSNavdeep Parhar 29947951040fSNavdeep Parhar if (cookie->pidx == -1) { 29957951040fSNavdeep Parhar struct wrqe *wr = __containerof(w, struct wrqe, wr); 29967951040fSNavdeep Parhar 29977951040fSNavdeep Parhar t4_wrq_tx(sc, wr); 29987951040fSNavdeep Parhar return; 29997951040fSNavdeep Parhar } 30007951040fSNavdeep Parhar 30017951040fSNavdeep Parhar if (__predict_false(w == &wrq->ss[0])) { 30027951040fSNavdeep Parhar int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 30037951040fSNavdeep Parhar 30047951040fSNavdeep Parhar MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 30057951040fSNavdeep Parhar bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 30067951040fSNavdeep Parhar bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 30077951040fSNavdeep Parhar wrq->tx_wrs_ss++; 30087951040fSNavdeep Parhar } else 30097951040fSNavdeep Parhar wrq->tx_wrs_direct++; 30107951040fSNavdeep Parhar 30117951040fSNavdeep Parhar EQ_LOCK(eq); 30128d6ae10aSNavdeep Parhar ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 30138d6ae10aSNavdeep Parhar pidx = cookie->pidx; 30148d6ae10aSNavdeep Parhar MPASS(pidx >= 0 && pidx < eq->sidx); 30157951040fSNavdeep Parhar prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 30167951040fSNavdeep Parhar next = TAILQ_NEXT(cookie, link); 30177951040fSNavdeep Parhar if (prev == NULL) { 30187951040fSNavdeep Parhar MPASS(pidx == eq->dbidx); 30192e09fe91SNavdeep Parhar if (next == NULL || ndesc >= 16) { 30202e09fe91SNavdeep Parhar int available; 30212e09fe91SNavdeep Parhar struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 30222e09fe91SNavdeep Parhar 30232e09fe91SNavdeep Parhar /* 30242e09fe91SNavdeep Parhar * Note that the WR via which we'll request tx updates 30252e09fe91SNavdeep Parhar * is at pidx and not eq->pidx, which has moved on 30262e09fe91SNavdeep Parhar * already. 30272e09fe91SNavdeep Parhar */ 30282e09fe91SNavdeep Parhar dst = (void *)&eq->desc[pidx]; 30292e09fe91SNavdeep Parhar available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 30302e09fe91SNavdeep Parhar if (available < eq->sidx / 4 && 30312e09fe91SNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 3032ddf09ad6SNavdeep Parhar /* 3033ddf09ad6SNavdeep Parhar * XXX: This is not 100% reliable with some 3034ddf09ad6SNavdeep Parhar * types of WRs. But this is a very unusual 3035ddf09ad6SNavdeep Parhar * situation for an ofld/ctrl queue anyway. 3036ddf09ad6SNavdeep Parhar */ 30372e09fe91SNavdeep Parhar dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 30382e09fe91SNavdeep Parhar F_FW_WR_EQUEQ); 30392e09fe91SNavdeep Parhar } 30402e09fe91SNavdeep Parhar 30417951040fSNavdeep Parhar ring_eq_db(wrq->adapter, eq, ndesc); 30422e09fe91SNavdeep Parhar } else { 30437951040fSNavdeep Parhar MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 30447951040fSNavdeep Parhar next->pidx = pidx; 30457951040fSNavdeep Parhar next->ndesc += ndesc; 30467951040fSNavdeep Parhar } 30477951040fSNavdeep Parhar } else { 30487951040fSNavdeep Parhar MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 30497951040fSNavdeep Parhar prev->ndesc += ndesc; 30507951040fSNavdeep Parhar } 30517951040fSNavdeep Parhar TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 30527951040fSNavdeep Parhar 30537951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 30547951040fSNavdeep Parhar drain_wrq_wr_list(sc, wrq); 30557951040fSNavdeep Parhar 30567951040fSNavdeep Parhar #ifdef INVARIANTS 30577951040fSNavdeep Parhar if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 30587951040fSNavdeep Parhar /* Doorbell must have caught up to the pidx. */ 30597951040fSNavdeep Parhar MPASS(wrq->eq.pidx == wrq->eq.dbidx); 30607951040fSNavdeep Parhar } 30617951040fSNavdeep Parhar #endif 30627951040fSNavdeep Parhar EQ_UNLOCK(eq); 30637951040fSNavdeep Parhar } 30647951040fSNavdeep Parhar 30657951040fSNavdeep Parhar static u_int 30667951040fSNavdeep Parhar can_resume_eth_tx(struct mp_ring *r) 30677951040fSNavdeep Parhar { 30687951040fSNavdeep Parhar struct sge_eq *eq = r->cookie; 30697951040fSNavdeep Parhar 30707951040fSNavdeep Parhar return (total_available_tx_desc(eq) > eq->sidx / 8); 30717951040fSNavdeep Parhar } 30727951040fSNavdeep Parhar 3073d735920dSNavdeep Parhar static inline bool 30747951040fSNavdeep Parhar cannot_use_txpkts(struct mbuf *m) 30757951040fSNavdeep Parhar { 30767951040fSNavdeep Parhar /* maybe put a GL limit too, to avoid silliness? */ 30777951040fSNavdeep Parhar 3078bddf7343SJohn Baldwin return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0); 30797951040fSNavdeep Parhar } 30807951040fSNavdeep Parhar 30811404daa7SNavdeep Parhar static inline int 30821404daa7SNavdeep Parhar discard_tx(struct sge_eq *eq) 30831404daa7SNavdeep Parhar { 30841404daa7SNavdeep Parhar 30851404daa7SNavdeep Parhar return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 30861404daa7SNavdeep Parhar } 30871404daa7SNavdeep Parhar 30885cdaef71SJohn Baldwin static inline int 3089d735920dSNavdeep Parhar wr_can_update_eq(void *p) 30905cdaef71SJohn Baldwin { 3091d735920dSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr = p; 30925cdaef71SJohn Baldwin 30935cdaef71SJohn Baldwin switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 30945cdaef71SJohn Baldwin case FW_ULPTX_WR: 30955cdaef71SJohn Baldwin case FW_ETH_TX_PKT_WR: 30965cdaef71SJohn Baldwin case FW_ETH_TX_PKTS_WR: 3097693a9dfcSNavdeep Parhar case FW_ETH_TX_PKTS2_WR: 30985cdaef71SJohn Baldwin case FW_ETH_TX_PKT_VM_WR: 3099d735920dSNavdeep Parhar case FW_ETH_TX_PKTS_VM_WR: 31005cdaef71SJohn Baldwin return (1); 31015cdaef71SJohn Baldwin default: 31025cdaef71SJohn Baldwin return (0); 31035cdaef71SJohn Baldwin } 31045cdaef71SJohn Baldwin } 31055cdaef71SJohn Baldwin 3106d735920dSNavdeep Parhar static inline void 3107d735920dSNavdeep Parhar set_txupdate_flags(struct sge_txq *txq, u_int avail, 3108d735920dSNavdeep Parhar struct fw_eth_tx_pkt_wr *wr) 3109d735920dSNavdeep Parhar { 3110d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 3111d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 3112d735920dSNavdeep Parhar 3113d735920dSNavdeep Parhar if ((txp->npkt > 0 || avail < eq->sidx / 2) && 3114d735920dSNavdeep Parhar atomic_cmpset_int(&eq->equiq, 0, 1)) { 3115d735920dSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 3116d735920dSNavdeep Parhar eq->equeqidx = eq->pidx; 3117d735920dSNavdeep Parhar } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 3118d735920dSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 3119d735920dSNavdeep Parhar eq->equeqidx = eq->pidx; 3120d735920dSNavdeep Parhar } 3121d735920dSNavdeep Parhar } 3122d735920dSNavdeep Parhar 31237951040fSNavdeep Parhar /* 31247951040fSNavdeep Parhar * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 31257951040fSNavdeep Parhar * be consumed. Return the actual number consumed. 0 indicates a stall. 31267951040fSNavdeep Parhar */ 31277951040fSNavdeep Parhar static u_int 3128d735920dSNavdeep Parhar eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing) 31297951040fSNavdeep Parhar { 31307951040fSNavdeep Parhar struct sge_txq *txq = r->cookie; 31317951040fSNavdeep Parhar struct ifnet *ifp = txq->ifp; 3132d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 3133d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 3134fe2ebb76SJohn Baldwin struct vi_info *vi = ifp->if_softc; 31357c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 31367951040fSNavdeep Parhar u_int total, remaining; /* # of packets */ 3137d735920dSNavdeep Parhar u_int n, avail, dbdiff; /* # of hardware descriptors */ 3138d735920dSNavdeep Parhar int i, rc; 3139d735920dSNavdeep Parhar struct mbuf *m0; 3140d735920dSNavdeep Parhar bool snd; 3141d735920dSNavdeep Parhar void *wr; /* start of the last WR written to the ring */ 3142d735920dSNavdeep Parhar 3143d735920dSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 31447951040fSNavdeep Parhar 31457951040fSNavdeep Parhar remaining = IDXDIFF(pidx, cidx, r->size); 31461404daa7SNavdeep Parhar if (__predict_false(discard_tx(eq))) { 3147d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 3148d735920dSNavdeep Parhar m_freem(txp->mb[i]); 3149d735920dSNavdeep Parhar txp->npkt = 0; 31507951040fSNavdeep Parhar while (cidx != pidx) { 31517951040fSNavdeep Parhar m0 = r->items[cidx]; 31527951040fSNavdeep Parhar m_freem(m0); 31537951040fSNavdeep Parhar if (++cidx == r->size) 31547951040fSNavdeep Parhar cidx = 0; 31557951040fSNavdeep Parhar } 3156d735920dSNavdeep Parhar reclaim_tx_descs(txq, eq->sidx); 3157d735920dSNavdeep Parhar *coalescing = false; 3158d735920dSNavdeep Parhar return (remaining); /* emptied */ 31597951040fSNavdeep Parhar } 31607951040fSNavdeep Parhar 31617951040fSNavdeep Parhar /* How many hardware descriptors do we have readily available. */ 3162d735920dSNavdeep Parhar if (eq->pidx == eq->cidx) { 3163d735920dSNavdeep Parhar avail = eq->sidx - 1; 3164d735920dSNavdeep Parhar if (txp->score++ >= 5) 3165d735920dSNavdeep Parhar txp->score = 5; /* tx is completely idle, reset. */ 3166d735920dSNavdeep Parhar } else 3167d735920dSNavdeep Parhar avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 31687951040fSNavdeep Parhar 3169d735920dSNavdeep Parhar total = 0; 3170d735920dSNavdeep Parhar if (remaining == 0) { 3171d735920dSNavdeep Parhar if (txp->score-- == 1) /* egr_update had to drain txpkts */ 3172d735920dSNavdeep Parhar txp->score = 1; 3173d735920dSNavdeep Parhar goto send_txpkts; 3174d735920dSNavdeep Parhar } 3175d735920dSNavdeep Parhar 3176d735920dSNavdeep Parhar dbdiff = 0; 3177d735920dSNavdeep Parhar MPASS(remaining > 0); 31787951040fSNavdeep Parhar while (remaining > 0) { 31797951040fSNavdeep Parhar m0 = r->items[cidx]; 31807951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 31817951040fSNavdeep Parhar MPASS(m0->m_nextpkt == NULL); 31827951040fSNavdeep Parhar 3183d735920dSNavdeep Parhar if (avail < 2 * SGE_MAX_WR_NDESC) 3184d735920dSNavdeep Parhar avail += reclaim_tx_descs(txq, 64); 3185d735920dSNavdeep Parhar 3186d735920dSNavdeep Parhar if (txp->npkt > 0 || remaining > 1 || txp->score > 3 || 3187d735920dSNavdeep Parhar atomic_load_int(&txq->eq.equiq) != 0) { 318830e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3189d735920dSNavdeep Parhar rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); 3190d735920dSNavdeep Parhar else 3191d735920dSNavdeep Parhar rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); 3192d735920dSNavdeep Parhar } else { 3193d735920dSNavdeep Parhar snd = false; 3194d735920dSNavdeep Parhar rc = EINVAL; 3195d735920dSNavdeep Parhar } 3196d735920dSNavdeep Parhar if (snd) { 3197d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 3198d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 3199d735920dSNavdeep Parhar ETHER_BPF_MTAP(ifp, txp->mb[i]); 3200d735920dSNavdeep Parhar if (txp->npkt > 1) { 3201d735920dSNavdeep Parhar if (txp->score++ >= 10) 3202d735920dSNavdeep Parhar txp->score = 10; 3203d735920dSNavdeep Parhar MPASS(avail >= tx_len16_to_desc(txp->len16)); 320430e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3205d735920dSNavdeep Parhar n = write_txpkts_vm_wr(sc, txq); 3206d735920dSNavdeep Parhar else 3207d735920dSNavdeep Parhar n = write_txpkts_wr(sc, txq); 3208d735920dSNavdeep Parhar } else { 3209d735920dSNavdeep Parhar MPASS(avail >= 3210d735920dSNavdeep Parhar tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 321130e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3212d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, 3213d735920dSNavdeep Parhar txp->mb[0]); 3214d735920dSNavdeep Parhar else 3215d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, txp->mb[0], 3216d735920dSNavdeep Parhar avail); 3217d735920dSNavdeep Parhar } 3218d735920dSNavdeep Parhar MPASS(n <= SGE_MAX_WR_NDESC); 3219d735920dSNavdeep Parhar avail -= n; 3220d735920dSNavdeep Parhar dbdiff += n; 3221d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 3222d735920dSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 3223d735920dSNavdeep Parhar txp->npkt = 0; /* emptied */ 3224d735920dSNavdeep Parhar } 3225d735920dSNavdeep Parhar if (rc == 0) { 3226d735920dSNavdeep Parhar /* m0 was coalesced into txq->txpkts. */ 3227d735920dSNavdeep Parhar goto next_mbuf; 3228d735920dSNavdeep Parhar } 3229d735920dSNavdeep Parhar if (rc == EAGAIN) { 3230d735920dSNavdeep Parhar /* 3231d735920dSNavdeep Parhar * m0 is suitable for tx coalescing but could not be 3232d735920dSNavdeep Parhar * combined with the existing txq->txpkts, which has now 3233d735920dSNavdeep Parhar * been transmitted. Start a new txpkts with m0. 3234d735920dSNavdeep Parhar */ 3235d735920dSNavdeep Parhar MPASS(snd); 3236d735920dSNavdeep Parhar MPASS(txp->npkt == 0); 3237d735920dSNavdeep Parhar continue; 32387951040fSNavdeep Parhar } 32397951040fSNavdeep Parhar 3240d735920dSNavdeep Parhar MPASS(rc != 0 && rc != EAGAIN); 3241d735920dSNavdeep Parhar MPASS(txp->npkt == 0); 3242565b8fceSNavdeep Parhar 3243565b8fceSNavdeep Parhar n = tx_len16_to_desc(mbuf_len16(m0)); 3244565b8fceSNavdeep Parhar if (__predict_false(avail < n)) { 3245565b8fceSNavdeep Parhar avail += reclaim_tx_descs(txq, min(n, 32)); 3246565b8fceSNavdeep Parhar if (avail < n) 3247565b8fceSNavdeep Parhar break; /* out of descriptors */ 3248565b8fceSNavdeep Parhar } 3249565b8fceSNavdeep Parhar 3250d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 3251bddf7343SJohn Baldwin if (mbuf_cflags(m0) & MC_RAW_WR) { 3252d735920dSNavdeep Parhar n = write_raw_wr(txq, wr, m0, avail); 3253bddf7343SJohn Baldwin #ifdef KERN_TLS 3254bddf7343SJohn Baldwin } else if (mbuf_cflags(m0) & MC_TLS) { 3255bddf7343SJohn Baldwin ETHER_BPF_MTAP(ifp, m0); 3256d735920dSNavdeep Parhar n = t6_ktls_write_wr(txq, wr, m0, mbuf_nsegs(m0), 3257d735920dSNavdeep Parhar avail); 3258bddf7343SJohn Baldwin #endif 32597951040fSNavdeep Parhar } else { 32603bbb68f0SNavdeep Parhar ETHER_BPF_MTAP(ifp, m0); 326130e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3262d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, m0); 3263d735920dSNavdeep Parhar else 3264d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, m0, avail); 3265d735920dSNavdeep Parhar } 3266d735920dSNavdeep Parhar MPASS(n >= 1 && n <= avail); 3267bddf7343SJohn Baldwin if (!(mbuf_cflags(m0) & MC_TLS)) 3268bddf7343SJohn Baldwin MPASS(n <= SGE_MAX_WR_NDESC); 32697951040fSNavdeep Parhar 3270d735920dSNavdeep Parhar avail -= n; 32717951040fSNavdeep Parhar dbdiff += n; 32727951040fSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 32737951040fSNavdeep Parhar 3274d735920dSNavdeep Parhar if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */ 3275d735920dSNavdeep Parhar if (wr_can_update_eq(wr)) 3276d735920dSNavdeep Parhar set_txupdate_flags(txq, avail, wr); 32777951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 3278d735920dSNavdeep Parhar avail += reclaim_tx_descs(txq, 32); 32797951040fSNavdeep Parhar dbdiff = 0; 32807951040fSNavdeep Parhar } 3281d735920dSNavdeep Parhar next_mbuf: 3282d735920dSNavdeep Parhar total++; 3283d735920dSNavdeep Parhar remaining--; 3284d735920dSNavdeep Parhar if (__predict_false(++cidx == r->size)) 3285d735920dSNavdeep Parhar cidx = 0; 32867951040fSNavdeep Parhar } 32877951040fSNavdeep Parhar if (dbdiff != 0) { 3288d735920dSNavdeep Parhar if (wr_can_update_eq(wr)) 3289d735920dSNavdeep Parhar set_txupdate_flags(txq, avail, wr); 32907951040fSNavdeep Parhar ring_eq_db(sc, eq, dbdiff); 32917951040fSNavdeep Parhar reclaim_tx_descs(txq, 32); 3292d735920dSNavdeep Parhar } else if (eq->pidx == eq->cidx && txp->npkt > 0 && 3293d735920dSNavdeep Parhar atomic_load_int(&txq->eq.equiq) == 0) { 3294d735920dSNavdeep Parhar /* 3295d735920dSNavdeep Parhar * If nothing was submitted to the chip for tx (it was coalesced 3296d735920dSNavdeep Parhar * into txpkts instead) and there is no tx update outstanding 3297d735920dSNavdeep Parhar * then we need to send txpkts now. 3298d735920dSNavdeep Parhar */ 3299d735920dSNavdeep Parhar send_txpkts: 3300d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 3301d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) 3302d735920dSNavdeep Parhar ETHER_BPF_MTAP(ifp, txp->mb[i]); 3303d735920dSNavdeep Parhar if (txp->npkt > 1) { 3304d735920dSNavdeep Parhar MPASS(avail >= tx_len16_to_desc(txp->len16)); 330530e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3306d735920dSNavdeep Parhar n = write_txpkts_vm_wr(sc, txq); 3307d735920dSNavdeep Parhar else 3308d735920dSNavdeep Parhar n = write_txpkts_wr(sc, txq); 3309d735920dSNavdeep Parhar } else { 3310d735920dSNavdeep Parhar MPASS(avail >= 3311d735920dSNavdeep Parhar tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 331230e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 3313d735920dSNavdeep Parhar n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); 3314d735920dSNavdeep Parhar else 3315d735920dSNavdeep Parhar n = write_txpkt_wr(sc, txq, txp->mb[0], avail); 33167951040fSNavdeep Parhar } 3317d735920dSNavdeep Parhar MPASS(n <= SGE_MAX_WR_NDESC); 3318d735920dSNavdeep Parhar wr = &eq->desc[eq->pidx]; 3319d735920dSNavdeep Parhar IDXINCR(eq->pidx, n, eq->sidx); 3320d735920dSNavdeep Parhar txp->npkt = 0; /* emptied */ 3321d735920dSNavdeep Parhar 3322d735920dSNavdeep Parhar MPASS(wr_can_update_eq(wr)); 3323d735920dSNavdeep Parhar set_txupdate_flags(txq, avail - n, wr); 3324d735920dSNavdeep Parhar ring_eq_db(sc, eq, n); 3325d735920dSNavdeep Parhar reclaim_tx_descs(txq, 32); 3326d735920dSNavdeep Parhar } 3327d735920dSNavdeep Parhar *coalescing = txp->npkt > 0; 33287951040fSNavdeep Parhar 33297951040fSNavdeep Parhar return (total); 3330733b9277SNavdeep Parhar } 3331733b9277SNavdeep Parhar 333254e4ee71SNavdeep Parhar static inline void 333354e4ee71SNavdeep Parhar init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 3334b2daa9a9SNavdeep Parhar int qsize) 333554e4ee71SNavdeep Parhar { 3336b2daa9a9SNavdeep Parhar 333754e4ee71SNavdeep Parhar KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 333854e4ee71SNavdeep Parhar ("%s: bad tmr_idx %d", __func__, tmr_idx)); 333954e4ee71SNavdeep Parhar KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 334054e4ee71SNavdeep Parhar ("%s: bad pktc_idx %d", __func__, pktc_idx)); 334154e4ee71SNavdeep Parhar 334254e4ee71SNavdeep Parhar iq->flags = 0; 334354e4ee71SNavdeep Parhar iq->adapter = sc; 33447a32954cSNavdeep Parhar iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 33457a32954cSNavdeep Parhar iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 33467a32954cSNavdeep Parhar if (pktc_idx >= 0) { 33477a32954cSNavdeep Parhar iq->intr_params |= F_QINTR_CNT_EN; 334854e4ee71SNavdeep Parhar iq->intr_pktc_idx = pktc_idx; 33497a32954cSNavdeep Parhar } 3350d14b0ac1SNavdeep Parhar iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 335190e7434aSNavdeep Parhar iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 335254e4ee71SNavdeep Parhar } 335354e4ee71SNavdeep Parhar 335454e4ee71SNavdeep Parhar static inline void 3355e3207e19SNavdeep Parhar init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 335654e4ee71SNavdeep Parhar { 33571458bff9SNavdeep Parhar 335854e4ee71SNavdeep Parhar fl->qsize = qsize; 335990e7434aSNavdeep Parhar fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 336054e4ee71SNavdeep Parhar strlcpy(fl->lockname, name, sizeof(fl->lockname)); 3361e3207e19SNavdeep Parhar if (sc->flags & BUF_PACKING_OK && 3362e3207e19SNavdeep Parhar ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 3363e3207e19SNavdeep Parhar (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 33641458bff9SNavdeep Parhar fl->flags |= FL_BUF_PACKING; 336546e1e307SNavdeep Parhar fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); 336646e1e307SNavdeep Parhar fl->safe_zidx = sc->sge.safe_zidx; 336754e4ee71SNavdeep Parhar } 336854e4ee71SNavdeep Parhar 336954e4ee71SNavdeep Parhar static inline void 337090e7434aSNavdeep Parhar init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 337190e7434aSNavdeep Parhar uint8_t tx_chan, uint16_t iqid, char *name) 337254e4ee71SNavdeep Parhar { 3373733b9277SNavdeep Parhar KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 3374733b9277SNavdeep Parhar 3375733b9277SNavdeep Parhar eq->flags = eqtype & EQ_TYPEMASK; 3376733b9277SNavdeep Parhar eq->tx_chan = tx_chan; 3377733b9277SNavdeep Parhar eq->iqid = iqid; 337890e7434aSNavdeep Parhar eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3379f7dfe243SNavdeep Parhar strlcpy(eq->lockname, name, sizeof(eq->lockname)); 338054e4ee71SNavdeep Parhar } 338154e4ee71SNavdeep Parhar 3382*8eba75edSNavdeep Parhar int 338354e4ee71SNavdeep Parhar alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 338454e4ee71SNavdeep Parhar bus_dmamap_t *map, bus_addr_t *pa, void **va) 338554e4ee71SNavdeep Parhar { 338654e4ee71SNavdeep Parhar int rc; 338754e4ee71SNavdeep Parhar 338854e4ee71SNavdeep Parhar rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 338954e4ee71SNavdeep Parhar BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 339054e4ee71SNavdeep Parhar if (rc != 0) { 339154e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 339254e4ee71SNavdeep Parhar goto done; 339354e4ee71SNavdeep Parhar } 339454e4ee71SNavdeep Parhar 339554e4ee71SNavdeep Parhar rc = bus_dmamem_alloc(*tag, va, 339654e4ee71SNavdeep Parhar BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 339754e4ee71SNavdeep Parhar if (rc != 0) { 339854e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 339954e4ee71SNavdeep Parhar goto done; 340054e4ee71SNavdeep Parhar } 340154e4ee71SNavdeep Parhar 340254e4ee71SNavdeep Parhar rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 340354e4ee71SNavdeep Parhar if (rc != 0) { 340454e4ee71SNavdeep Parhar device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 340554e4ee71SNavdeep Parhar goto done; 340654e4ee71SNavdeep Parhar } 340754e4ee71SNavdeep Parhar done: 340854e4ee71SNavdeep Parhar if (rc) 340954e4ee71SNavdeep Parhar free_ring(sc, *tag, *map, *pa, *va); 341054e4ee71SNavdeep Parhar 341154e4ee71SNavdeep Parhar return (rc); 341254e4ee71SNavdeep Parhar } 341354e4ee71SNavdeep Parhar 3414*8eba75edSNavdeep Parhar int 341554e4ee71SNavdeep Parhar free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 341654e4ee71SNavdeep Parhar bus_addr_t pa, void *va) 341754e4ee71SNavdeep Parhar { 341854e4ee71SNavdeep Parhar if (pa) 341954e4ee71SNavdeep Parhar bus_dmamap_unload(tag, map); 342054e4ee71SNavdeep Parhar if (va) 342154e4ee71SNavdeep Parhar bus_dmamem_free(tag, va, map); 342254e4ee71SNavdeep Parhar if (tag) 342354e4ee71SNavdeep Parhar bus_dma_tag_destroy(tag); 342454e4ee71SNavdeep Parhar 342554e4ee71SNavdeep Parhar return (0); 342654e4ee71SNavdeep Parhar } 342754e4ee71SNavdeep Parhar 342854e4ee71SNavdeep Parhar /* 342954e4ee71SNavdeep Parhar * Allocates the ring for an ingress queue and an optional freelist. If the 343054e4ee71SNavdeep Parhar * freelist is specified it will be allocated and then associated with the 343154e4ee71SNavdeep Parhar * ingress queue. 343254e4ee71SNavdeep Parhar * 343354e4ee71SNavdeep Parhar * Returns errno on failure. Resources allocated up to that point may still be 343454e4ee71SNavdeep Parhar * allocated. Caller is responsible for cleanup in case this function fails. 343554e4ee71SNavdeep Parhar * 3436f549e352SNavdeep Parhar * If the ingress queue will take interrupts directly then the intr_idx 3437f549e352SNavdeep Parhar * specifies the vector, starting from 0. -1 means the interrupts for this 3438f549e352SNavdeep Parhar * queue should be forwarded to the fwq. 343954e4ee71SNavdeep Parhar */ 344054e4ee71SNavdeep Parhar static int 3441fe2ebb76SJohn Baldwin alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3442bc14b14dSNavdeep Parhar int intr_idx, int cong) 344354e4ee71SNavdeep Parhar { 344454e4ee71SNavdeep Parhar int rc, i, cntxt_id; 344554e4ee71SNavdeep Parhar size_t len; 344654e4ee71SNavdeep Parhar struct fw_iq_cmd c; 3447fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 344854e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 344990e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 345054e4ee71SNavdeep Parhar __be32 v = 0; 345154e4ee71SNavdeep Parhar 3452b2daa9a9SNavdeep Parhar len = iq->qsize * IQ_ESIZE; 345354e4ee71SNavdeep Parhar rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 345454e4ee71SNavdeep Parhar (void **)&iq->desc); 345554e4ee71SNavdeep Parhar if (rc != 0) 345654e4ee71SNavdeep Parhar return (rc); 345754e4ee71SNavdeep Parhar 345854e4ee71SNavdeep Parhar bzero(&c, sizeof(c)); 345954e4ee71SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 346054e4ee71SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 346154e4ee71SNavdeep Parhar V_FW_IQ_CMD_VFN(0)); 346254e4ee71SNavdeep Parhar 346354e4ee71SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 346454e4ee71SNavdeep Parhar FW_LEN16(c)); 346554e4ee71SNavdeep Parhar 346654e4ee71SNavdeep Parhar /* Special handling for firmware event queue */ 346754e4ee71SNavdeep Parhar if (iq == &sc->sge.fwq) 346854e4ee71SNavdeep Parhar v |= F_FW_IQ_CMD_IQASYNCH; 346954e4ee71SNavdeep Parhar 3470f549e352SNavdeep Parhar if (intr_idx < 0) { 3471f549e352SNavdeep Parhar /* Forwarded interrupts, all headed to fwq */ 3472f549e352SNavdeep Parhar v |= F_FW_IQ_CMD_IQANDST; 3473f549e352SNavdeep Parhar v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3474f549e352SNavdeep Parhar } else { 347554e4ee71SNavdeep Parhar KASSERT(intr_idx < sc->intr_count, 347654e4ee71SNavdeep Parhar ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 347754e4ee71SNavdeep Parhar v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 3478f549e352SNavdeep Parhar } 347954e4ee71SNavdeep Parhar 348054e4ee71SNavdeep Parhar c.type_to_iqandstindex = htobe32(v | 348154e4ee71SNavdeep Parhar V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3482fe2ebb76SJohn Baldwin V_FW_IQ_CMD_VIID(vi->viid) | 348354e4ee71SNavdeep Parhar V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 348454e4ee71SNavdeep Parhar c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 348554e4ee71SNavdeep Parhar F_FW_IQ_CMD_IQGTSMODE | 348654e4ee71SNavdeep Parhar V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3487b2daa9a9SNavdeep Parhar V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 348854e4ee71SNavdeep Parhar c.iqsize = htobe16(iq->qsize); 348954e4ee71SNavdeep Parhar c.iqaddr = htobe64(iq->ba); 3490bc14b14dSNavdeep Parhar if (cong >= 0) 3491bc14b14dSNavdeep Parhar c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 349254e4ee71SNavdeep Parhar 349354e4ee71SNavdeep Parhar if (fl) { 349454e4ee71SNavdeep Parhar mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 349554e4ee71SNavdeep Parhar 3496b2daa9a9SNavdeep Parhar len = fl->qsize * EQ_ESIZE; 349754e4ee71SNavdeep Parhar rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 349854e4ee71SNavdeep Parhar &fl->ba, (void **)&fl->desc); 349954e4ee71SNavdeep Parhar if (rc) 350054e4ee71SNavdeep Parhar return (rc); 350154e4ee71SNavdeep Parhar 350254e4ee71SNavdeep Parhar /* Allocate space for one software descriptor per buffer. */ 350354e4ee71SNavdeep Parhar rc = alloc_fl_sdesc(fl); 350454e4ee71SNavdeep Parhar if (rc != 0) { 350554e4ee71SNavdeep Parhar device_printf(sc->dev, 350654e4ee71SNavdeep Parhar "failed to setup fl software descriptors: %d\n", 350754e4ee71SNavdeep Parhar rc); 350854e4ee71SNavdeep Parhar return (rc); 350954e4ee71SNavdeep Parhar } 35104d6db4e0SNavdeep Parhar 35114d6db4e0SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 351290e7434aSNavdeep Parhar fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 351390e7434aSNavdeep Parhar fl->buf_boundary = sp->pack_boundary; 35144d6db4e0SNavdeep Parhar } else { 351590e7434aSNavdeep Parhar fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3516e3207e19SNavdeep Parhar fl->buf_boundary = 16; 35174d6db4e0SNavdeep Parhar } 351890e7434aSNavdeep Parhar if (fl_pad && fl->buf_boundary < sp->pad_boundary) 351990e7434aSNavdeep Parhar fl->buf_boundary = sp->pad_boundary; 352054e4ee71SNavdeep Parhar 3521214c3582SNavdeep Parhar c.iqns_to_fl0congen |= 3522bc14b14dSNavdeep Parhar htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3523bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 35241458bff9SNavdeep Parhar (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 35251458bff9SNavdeep Parhar (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 35261458bff9SNavdeep Parhar 0)); 3527bc14b14dSNavdeep Parhar if (cong >= 0) { 3528bc14b14dSNavdeep Parhar c.iqns_to_fl0congen |= 3529bc14b14dSNavdeep Parhar htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 3530bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0CONGCIF | 3531bc14b14dSNavdeep Parhar F_FW_IQ_CMD_FL0CONGEN); 3532bc14b14dSNavdeep Parhar } 353354e4ee71SNavdeep Parhar c.fl0dcaen_to_fl0cidxfthresh = 3534ed7e5640SNavdeep Parhar htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3535adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 3536ed7e5640SNavdeep Parhar V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3537ed7e5640SNavdeep Parhar X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 353854e4ee71SNavdeep Parhar c.fl0size = htobe16(fl->qsize); 353954e4ee71SNavdeep Parhar c.fl0addr = htobe64(fl->ba); 354054e4ee71SNavdeep Parhar } 354154e4ee71SNavdeep Parhar 354254e4ee71SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 354354e4ee71SNavdeep Parhar if (rc != 0) { 354454e4ee71SNavdeep Parhar device_printf(sc->dev, 354554e4ee71SNavdeep Parhar "failed to create ingress queue: %d\n", rc); 354654e4ee71SNavdeep Parhar return (rc); 354754e4ee71SNavdeep Parhar } 354854e4ee71SNavdeep Parhar 354954e4ee71SNavdeep Parhar iq->cidx = 0; 3550b2daa9a9SNavdeep Parhar iq->gen = F_RSPD_GEN; 355154e4ee71SNavdeep Parhar iq->intr_next = iq->intr_params; 355254e4ee71SNavdeep Parhar iq->cntxt_id = be16toh(c.iqid); 355354e4ee71SNavdeep Parhar iq->abs_id = be16toh(c.physiqid); 3554733b9277SNavdeep Parhar iq->flags |= IQ_ALLOCATED; 355554e4ee71SNavdeep Parhar 355654e4ee71SNavdeep Parhar cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3557b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.iqmap_sz) { 3558733b9277SNavdeep Parhar panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3559b20b25e7SNavdeep Parhar cntxt_id, sc->sge.iqmap_sz - 1); 3560733b9277SNavdeep Parhar } 356154e4ee71SNavdeep Parhar sc->sge.iqmap[cntxt_id] = iq; 356254e4ee71SNavdeep Parhar 356354e4ee71SNavdeep Parhar if (fl) { 35644d6db4e0SNavdeep Parhar u_int qid; 35654d6db4e0SNavdeep Parhar 35664d6db4e0SNavdeep Parhar iq->flags |= IQ_HAS_FL; 356754e4ee71SNavdeep Parhar fl->cntxt_id = be16toh(c.fl0id); 356854e4ee71SNavdeep Parhar fl->pidx = fl->cidx = 0; 356954e4ee71SNavdeep Parhar 35709f1f7ec9SNavdeep Parhar cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3571b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) { 3572733b9277SNavdeep Parhar panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3573b20b25e7SNavdeep Parhar __func__, cntxt_id, sc->sge.eqmap_sz - 1); 3574733b9277SNavdeep Parhar } 357554e4ee71SNavdeep Parhar sc->sge.eqmap[cntxt_id] = (void *)fl; 357654e4ee71SNavdeep Parhar 35774d6db4e0SNavdeep Parhar qid = fl->cntxt_id; 35784d6db4e0SNavdeep Parhar if (isset(&sc->doorbells, DOORBELL_UDB)) { 357990e7434aSNavdeep Parhar uint32_t s_qpp = sc->params.sge.eq_s_qpp; 35804d6db4e0SNavdeep Parhar uint32_t mask = (1 << s_qpp) - 1; 35814d6db4e0SNavdeep Parhar volatile uint8_t *udb; 35824d6db4e0SNavdeep Parhar 35834d6db4e0SNavdeep Parhar udb = sc->udbs_base + UDBS_DB_OFFSET; 35844d6db4e0SNavdeep Parhar udb += (qid >> s_qpp) << PAGE_SHIFT; 35854d6db4e0SNavdeep Parhar qid &= mask; 35864d6db4e0SNavdeep Parhar if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 35874d6db4e0SNavdeep Parhar udb += qid << UDBS_SEG_SHIFT; 35884d6db4e0SNavdeep Parhar qid = 0; 35894d6db4e0SNavdeep Parhar } 35904d6db4e0SNavdeep Parhar fl->udb = (volatile void *)udb; 35914d6db4e0SNavdeep Parhar } 3592d1205d09SNavdeep Parhar fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 35934d6db4e0SNavdeep Parhar 359454e4ee71SNavdeep Parhar FL_LOCK(fl); 3595733b9277SNavdeep Parhar /* Enough to make sure the SGE doesn't think it's starved */ 3596733b9277SNavdeep Parhar refill_fl(sc, fl, fl->lowat); 359754e4ee71SNavdeep Parhar FL_UNLOCK(fl); 359854e4ee71SNavdeep Parhar } 359954e4ee71SNavdeep Parhar 36008c0ca00bSNavdeep Parhar if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 3601ba41ec48SNavdeep Parhar uint32_t param, val; 3602ba41ec48SNavdeep Parhar 3603ba41ec48SNavdeep Parhar param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3604ba41ec48SNavdeep Parhar V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3605ba41ec48SNavdeep Parhar V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 360673cd9220SNavdeep Parhar if (cong == 0) 360773cd9220SNavdeep Parhar val = 1 << 19; 360873cd9220SNavdeep Parhar else { 360973cd9220SNavdeep Parhar val = 2 << 19; 361073cd9220SNavdeep Parhar for (i = 0; i < 4; i++) { 361173cd9220SNavdeep Parhar if (cong & (1 << i)) 361273cd9220SNavdeep Parhar val |= 1 << (i << 2); 361373cd9220SNavdeep Parhar } 361473cd9220SNavdeep Parhar } 361573cd9220SNavdeep Parhar 3616ba41ec48SNavdeep Parhar rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3617ba41ec48SNavdeep Parhar if (rc != 0) { 3618ba41ec48SNavdeep Parhar /* report error but carry on */ 3619ba41ec48SNavdeep Parhar device_printf(sc->dev, 3620ba41ec48SNavdeep Parhar "failed to set congestion manager context for " 3621ba41ec48SNavdeep Parhar "ingress queue %d: %d\n", iq->cntxt_id, rc); 3622ba41ec48SNavdeep Parhar } 3623ba41ec48SNavdeep Parhar } 3624ba41ec48SNavdeep Parhar 362554e4ee71SNavdeep Parhar /* Enable IQ interrupts */ 3626733b9277SNavdeep Parhar atomic_store_rel_int(&iq->state, IQS_IDLE); 3627315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 362854e4ee71SNavdeep Parhar V_INGRESSQID(iq->cntxt_id)); 362954e4ee71SNavdeep Parhar 363054e4ee71SNavdeep Parhar return (0); 363154e4ee71SNavdeep Parhar } 363254e4ee71SNavdeep Parhar 363354e4ee71SNavdeep Parhar static int 3634fe2ebb76SJohn Baldwin free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 363554e4ee71SNavdeep Parhar { 363638035ed6SNavdeep Parhar int rc; 363754e4ee71SNavdeep Parhar struct adapter *sc = iq->adapter; 363854e4ee71SNavdeep Parhar device_t dev; 363954e4ee71SNavdeep Parhar 364054e4ee71SNavdeep Parhar if (sc == NULL) 364154e4ee71SNavdeep Parhar return (0); /* nothing to do */ 364254e4ee71SNavdeep Parhar 3643fe2ebb76SJohn Baldwin dev = vi ? vi->dev : sc->dev; 364454e4ee71SNavdeep Parhar 364554e4ee71SNavdeep Parhar if (iq->flags & IQ_ALLOCATED) { 364654e4ee71SNavdeep Parhar rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 364754e4ee71SNavdeep Parhar FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 364854e4ee71SNavdeep Parhar fl ? fl->cntxt_id : 0xffff, 0xffff); 364954e4ee71SNavdeep Parhar if (rc != 0) { 365054e4ee71SNavdeep Parhar device_printf(dev, 365154e4ee71SNavdeep Parhar "failed to free queue %p: %d\n", iq, rc); 365254e4ee71SNavdeep Parhar return (rc); 365354e4ee71SNavdeep Parhar } 365454e4ee71SNavdeep Parhar iq->flags &= ~IQ_ALLOCATED; 365554e4ee71SNavdeep Parhar } 365654e4ee71SNavdeep Parhar 365754e4ee71SNavdeep Parhar free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 365854e4ee71SNavdeep Parhar 365954e4ee71SNavdeep Parhar bzero(iq, sizeof(*iq)); 366054e4ee71SNavdeep Parhar 366154e4ee71SNavdeep Parhar if (fl) { 366254e4ee71SNavdeep Parhar free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 366354e4ee71SNavdeep Parhar fl->desc); 366454e4ee71SNavdeep Parhar 3665aa9a5cc0SNavdeep Parhar if (fl->sdesc) 36661458bff9SNavdeep Parhar free_fl_sdesc(sc, fl); 36671458bff9SNavdeep Parhar 366854e4ee71SNavdeep Parhar if (mtx_initialized(&fl->fl_lock)) 366954e4ee71SNavdeep Parhar mtx_destroy(&fl->fl_lock); 367054e4ee71SNavdeep Parhar 367154e4ee71SNavdeep Parhar bzero(fl, sizeof(*fl)); 367254e4ee71SNavdeep Parhar } 367354e4ee71SNavdeep Parhar 367454e4ee71SNavdeep Parhar return (0); 367554e4ee71SNavdeep Parhar } 367654e4ee71SNavdeep Parhar 367738035ed6SNavdeep Parhar static void 3678348694daSNavdeep Parhar add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3679348694daSNavdeep Parhar struct sge_iq *iq) 3680348694daSNavdeep Parhar { 3681348694daSNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3682348694daSNavdeep Parhar 3683348694daSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3684348694daSNavdeep Parhar "bus address of descriptor ring"); 3685348694daSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3686348694daSNavdeep Parhar iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3687348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 36888741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &iq->abs_id, 0, 36897029da5cSPawel Biernacki sysctl_uint16, "I", "absolute id of the queue"); 3690348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 36918741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &iq->cntxt_id, 0, 36927029da5cSPawel Biernacki sysctl_uint16, "I", "SGE context id of the queue"); 3693348694daSNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 36948741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &iq->cidx, 0, 36957029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 3696348694daSNavdeep Parhar } 3697348694daSNavdeep Parhar 3698348694daSNavdeep Parhar static void 3699aa93b99aSNavdeep Parhar add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3700aa93b99aSNavdeep Parhar struct sysctl_oid *oid, struct sge_fl *fl) 370138035ed6SNavdeep Parhar { 370238035ed6SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 370338035ed6SNavdeep Parhar 37047029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 37057029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 370638035ed6SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 370738035ed6SNavdeep Parhar 3708aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3709aa93b99aSNavdeep Parhar &fl->ba, "bus address of descriptor ring"); 3710aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3711aa93b99aSNavdeep Parhar fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3712aa93b99aSNavdeep Parhar "desc ring size in bytes"); 371338035ed6SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 37148741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &fl->cntxt_id, 0, 37157029da5cSPawel Biernacki sysctl_uint16, "I", "SGE context id of the freelist"); 3716e3207e19SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3717e3207e19SNavdeep Parhar fl_pad ? 1 : 0, "padding enabled"); 3718e3207e19SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3719e3207e19SNavdeep Parhar fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 372038035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 372138035ed6SNavdeep Parhar 0, "consumer index"); 372238035ed6SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 372338035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 372438035ed6SNavdeep Parhar CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 372538035ed6SNavdeep Parhar } 372638035ed6SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 372738035ed6SNavdeep Parhar 0, "producer index"); 372838035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 372938035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 373038035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 373138035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 373238035ed6SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 373338035ed6SNavdeep Parhar CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 373438035ed6SNavdeep Parhar } 373538035ed6SNavdeep Parhar 373654e4ee71SNavdeep Parhar static int 3737733b9277SNavdeep Parhar alloc_fwq(struct adapter *sc) 373854e4ee71SNavdeep Parhar { 3739733b9277SNavdeep Parhar int rc, intr_idx; 374056599263SNavdeep Parhar struct sge_iq *fwq = &sc->sge.fwq; 3741733b9277SNavdeep Parhar struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 3742733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 374356599263SNavdeep Parhar 3744b2daa9a9SNavdeep Parhar init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 37456af45170SJohn Baldwin if (sc->flags & IS_VF) 37466af45170SJohn Baldwin intr_idx = 0; 37474535e804SNavdeep Parhar else 3748733b9277SNavdeep Parhar intr_idx = sc->intr_count > 1 ? 1 : 0; 3749fe2ebb76SJohn Baldwin rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 3750733b9277SNavdeep Parhar if (rc != 0) { 3751733b9277SNavdeep Parhar device_printf(sc->dev, 3752733b9277SNavdeep Parhar "failed to create firmware event queue: %d\n", rc); 375356599263SNavdeep Parhar return (rc); 3754733b9277SNavdeep Parhar } 375556599263SNavdeep Parhar 37567029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", 37577029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); 3758348694daSNavdeep Parhar add_iq_sysctls(&sc->ctx, oid, fwq); 375956599263SNavdeep Parhar 3760733b9277SNavdeep Parhar return (0); 3761733b9277SNavdeep Parhar } 3762733b9277SNavdeep Parhar 3763733b9277SNavdeep Parhar static int 3764733b9277SNavdeep Parhar free_fwq(struct adapter *sc) 3765733b9277SNavdeep Parhar { 3766733b9277SNavdeep Parhar return free_iq_fl(NULL, &sc->sge.fwq, NULL); 3767733b9277SNavdeep Parhar } 3768733b9277SNavdeep Parhar 3769733b9277SNavdeep Parhar static int 377037310a98SNavdeep Parhar alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, 377137310a98SNavdeep Parhar struct sysctl_oid *oid) 3772733b9277SNavdeep Parhar { 3773733b9277SNavdeep Parhar int rc; 3774733b9277SNavdeep Parhar char name[16]; 377537310a98SNavdeep Parhar struct sysctl_oid_list *children; 3776733b9277SNavdeep Parhar 377737310a98SNavdeep Parhar snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), 377837310a98SNavdeep Parhar idx); 377937310a98SNavdeep Parhar init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, 3780733b9277SNavdeep Parhar sc->sge.fwq.cntxt_id, name); 378137310a98SNavdeep Parhar 378237310a98SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 378337310a98SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 37847029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, 37857029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ctrl queue"); 378637310a98SNavdeep Parhar rc = alloc_wrq(sc, NULL, ctrlq, oid); 378737310a98SNavdeep Parhar 378856599263SNavdeep Parhar return (rc); 378956599263SNavdeep Parhar } 379056599263SNavdeep Parhar 37911605bac6SNavdeep Parhar int 37929af71ab3SNavdeep Parhar tnl_cong(struct port_info *pi, int drop) 37939fb8886bSNavdeep Parhar { 37949fb8886bSNavdeep Parhar 37959af71ab3SNavdeep Parhar if (drop == -1) 37969fb8886bSNavdeep Parhar return (-1); 37979af71ab3SNavdeep Parhar else if (drop == 1) 37989fb8886bSNavdeep Parhar return (0); 37999fb8886bSNavdeep Parhar else 38005bcae8ddSNavdeep Parhar return (pi->rx_e_chan_map); 38019fb8886bSNavdeep Parhar } 38029fb8886bSNavdeep Parhar 3803733b9277SNavdeep Parhar static int 3804fe2ebb76SJohn Baldwin alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3805733b9277SNavdeep Parhar struct sysctl_oid *oid) 380654e4ee71SNavdeep Parhar { 380754e4ee71SNavdeep Parhar int rc; 38087c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 380954e4ee71SNavdeep Parhar struct sysctl_oid_list *children; 381054e4ee71SNavdeep Parhar char name[16]; 381154e4ee71SNavdeep Parhar 3812fe2ebb76SJohn Baldwin rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3813fe2ebb76SJohn Baldwin tnl_cong(vi->pi, cong_drop)); 381454e4ee71SNavdeep Parhar if (rc != 0) 381554e4ee71SNavdeep Parhar return (rc); 381654e4ee71SNavdeep Parhar 3817ec55567cSJohn Baldwin if (idx == 0) 3818ec55567cSJohn Baldwin sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3819ec55567cSJohn Baldwin else 3820ec55567cSJohn Baldwin KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3821ec55567cSJohn Baldwin ("iq_base mismatch")); 3822ec55567cSJohn Baldwin KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3823ec55567cSJohn Baldwin ("PF with non-zero iq_base")); 3824ec55567cSJohn Baldwin 38254d6db4e0SNavdeep Parhar /* 38264d6db4e0SNavdeep Parhar * The freelist is just barely above the starvation threshold right now, 38274d6db4e0SNavdeep Parhar * fill it up a bit more. 38284d6db4e0SNavdeep Parhar */ 38299b4d7b4eSNavdeep Parhar FL_LOCK(&rxq->fl); 3830ec55567cSJohn Baldwin refill_fl(sc, &rxq->fl, 128); 38319b4d7b4eSNavdeep Parhar FL_UNLOCK(&rxq->fl); 38329b4d7b4eSNavdeep Parhar 3833a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 383446f48ee5SNavdeep Parhar rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 383554e4ee71SNavdeep Parhar if (rc != 0) 383654e4ee71SNavdeep Parhar return (rc); 383746f48ee5SNavdeep Parhar MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 383854e4ee71SNavdeep Parhar 3839fe2ebb76SJohn Baldwin if (vi->ifp->if_capenable & IFCAP_LRO) 3840733b9277SNavdeep Parhar rxq->iq.flags |= IQ_LRO_ENABLED; 384154e4ee71SNavdeep Parhar #endif 38429877f735SNavdeep Parhar if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP) 38439877f735SNavdeep Parhar rxq->iq.flags |= IQ_RX_TIMESTAMP; 3844fe2ebb76SJohn Baldwin rxq->ifp = vi->ifp; 384554e4ee71SNavdeep Parhar 3846733b9277SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 384754e4ee71SNavdeep Parhar 384854e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 38497029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 38507029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 385154e4ee71SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 385254e4ee71SNavdeep Parhar 3853348694daSNavdeep Parhar add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3854a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 3855e936121dSHans Petter Selasky SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 385654e4ee71SNavdeep Parhar &rxq->lro.lro_queued, 0, NULL); 3857e936121dSHans Petter Selasky SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 385854e4ee71SNavdeep Parhar &rxq->lro.lro_flushed, 0, NULL); 38597d29df59SNavdeep Parhar #endif 3860fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 386154e4ee71SNavdeep Parhar &rxq->rxcsum, "# of times hardware assisted with checksum"); 3862fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 386354e4ee71SNavdeep Parhar CTLFLAG_RD, &rxq->vlan_extraction, 386454e4ee71SNavdeep Parhar "# of times hardware extracted 802.1Q tag"); 3865a4a4ad2dSNavdeep Parhar SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_rxcsum", 3866a4a4ad2dSNavdeep Parhar CTLFLAG_RD, &rxq->vxlan_rxcsum, 3867a4a4ad2dSNavdeep Parhar "# of times hardware assisted with inner checksum (VXLAN) "); 386854e4ee71SNavdeep Parhar 3869aa93b99aSNavdeep Parhar add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 387059bc8ce0SNavdeep Parhar 387154e4ee71SNavdeep Parhar return (rc); 387254e4ee71SNavdeep Parhar } 387354e4ee71SNavdeep Parhar 387454e4ee71SNavdeep Parhar static int 3875fe2ebb76SJohn Baldwin free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 387654e4ee71SNavdeep Parhar { 387754e4ee71SNavdeep Parhar int rc; 387854e4ee71SNavdeep Parhar 3879a1ea9a82SNavdeep Parhar #if defined(INET) || defined(INET6) 388054e4ee71SNavdeep Parhar if (rxq->lro.ifp) { 388154e4ee71SNavdeep Parhar tcp_lro_free(&rxq->lro); 388254e4ee71SNavdeep Parhar rxq->lro.ifp = NULL; 388354e4ee71SNavdeep Parhar } 388454e4ee71SNavdeep Parhar #endif 388554e4ee71SNavdeep Parhar 3886fe2ebb76SJohn Baldwin rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 388754e4ee71SNavdeep Parhar if (rc == 0) 388854e4ee71SNavdeep Parhar bzero(rxq, sizeof(*rxq)); 388954e4ee71SNavdeep Parhar 389054e4ee71SNavdeep Parhar return (rc); 389154e4ee71SNavdeep Parhar } 389254e4ee71SNavdeep Parhar 389309fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD 389454e4ee71SNavdeep Parhar static int 3895fe2ebb76SJohn Baldwin alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3896733b9277SNavdeep Parhar int intr_idx, int idx, struct sysctl_oid *oid) 3897f7dfe243SNavdeep Parhar { 3898aa93b99aSNavdeep Parhar struct port_info *pi = vi->pi; 3899733b9277SNavdeep Parhar int rc; 3900f7dfe243SNavdeep Parhar struct sysctl_oid_list *children; 3901733b9277SNavdeep Parhar char name[16]; 3902f7dfe243SNavdeep Parhar 39035bcae8ddSNavdeep Parhar rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3904733b9277SNavdeep Parhar if (rc != 0) 3905f7dfe243SNavdeep Parhar return (rc); 3906f7dfe243SNavdeep Parhar 3907733b9277SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 3908733b9277SNavdeep Parhar 3909733b9277SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 39107029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 39117029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 3912348694daSNavdeep Parhar add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3913aa93b99aSNavdeep Parhar add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3914733b9277SNavdeep Parhar 3915733b9277SNavdeep Parhar return (rc); 3916733b9277SNavdeep Parhar } 3917733b9277SNavdeep Parhar 3918733b9277SNavdeep Parhar static int 3919fe2ebb76SJohn Baldwin free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3920733b9277SNavdeep Parhar { 3921733b9277SNavdeep Parhar int rc; 3922733b9277SNavdeep Parhar 3923fe2ebb76SJohn Baldwin rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3924733b9277SNavdeep Parhar if (rc == 0) 3925733b9277SNavdeep Parhar bzero(ofld_rxq, sizeof(*ofld_rxq)); 3926733b9277SNavdeep Parhar 3927733b9277SNavdeep Parhar return (rc); 3928733b9277SNavdeep Parhar } 3929733b9277SNavdeep Parhar #endif 3930733b9277SNavdeep Parhar 3931ddf09ad6SNavdeep Parhar /* 3932ddf09ad6SNavdeep Parhar * Returns a reasonable automatic cidx flush threshold for a given queue size. 3933ddf09ad6SNavdeep Parhar */ 3934ddf09ad6SNavdeep Parhar static u_int 3935ddf09ad6SNavdeep Parhar qsize_to_fthresh(int qsize) 3936ddf09ad6SNavdeep Parhar { 3937ddf09ad6SNavdeep Parhar u_int fthresh; 3938ddf09ad6SNavdeep Parhar 3939ddf09ad6SNavdeep Parhar while (!powerof2(qsize)) 3940ddf09ad6SNavdeep Parhar qsize++; 3941ddf09ad6SNavdeep Parhar fthresh = ilog2(qsize); 3942ddf09ad6SNavdeep Parhar if (fthresh > X_CIDXFLUSHTHRESH_128) 3943ddf09ad6SNavdeep Parhar fthresh = X_CIDXFLUSHTHRESH_128; 3944ddf09ad6SNavdeep Parhar 3945ddf09ad6SNavdeep Parhar return (fthresh); 3946ddf09ad6SNavdeep Parhar } 3947ddf09ad6SNavdeep Parhar 3948733b9277SNavdeep Parhar static int 3949733b9277SNavdeep Parhar ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3950733b9277SNavdeep Parhar { 3951733b9277SNavdeep Parhar int rc, cntxt_id; 3952733b9277SNavdeep Parhar struct fw_eq_ctrl_cmd c; 395390e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3954f7dfe243SNavdeep Parhar 3955f7dfe243SNavdeep Parhar bzero(&c, sizeof(c)); 3956f7dfe243SNavdeep Parhar 3957f7dfe243SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3958f7dfe243SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3959f7dfe243SNavdeep Parhar V_FW_EQ_CTRL_CMD_VFN(0)); 3960f7dfe243SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3961f7dfe243SNavdeep Parhar F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 39627951040fSNavdeep Parhar c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3963f7dfe243SNavdeep Parhar c.physeqid_pkd = htobe32(0); 3964f7dfe243SNavdeep Parhar c.fetchszm_to_iqid = 396587b027baSNavdeep Parhar htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3966733b9277SNavdeep Parhar V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 396756599263SNavdeep Parhar F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3968f7dfe243SNavdeep Parhar c.dcaen_to_eqsize = 3969adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3970adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 3971f7dfe243SNavdeep Parhar V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3972ddf09ad6SNavdeep Parhar V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 39737951040fSNavdeep Parhar V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3974f7dfe243SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 3975f7dfe243SNavdeep Parhar 3976f7dfe243SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3977f7dfe243SNavdeep Parhar if (rc != 0) { 3978f7dfe243SNavdeep Parhar device_printf(sc->dev, 3979733b9277SNavdeep Parhar "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3980f7dfe243SNavdeep Parhar return (rc); 3981f7dfe243SNavdeep Parhar } 3982733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 3983f7dfe243SNavdeep Parhar 3984f7dfe243SNavdeep Parhar eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3985f7dfe243SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3986b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) 3987733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3988b20b25e7SNavdeep Parhar cntxt_id, sc->sge.eqmap_sz - 1); 3989f7dfe243SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 3990f7dfe243SNavdeep Parhar 3991f7dfe243SNavdeep Parhar return (rc); 3992f7dfe243SNavdeep Parhar } 3993f7dfe243SNavdeep Parhar 3994f7dfe243SNavdeep Parhar static int 3995fe2ebb76SJohn Baldwin eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 399654e4ee71SNavdeep Parhar { 399754e4ee71SNavdeep Parhar int rc, cntxt_id; 399854e4ee71SNavdeep Parhar struct fw_eq_eth_cmd c; 399990e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 400054e4ee71SNavdeep Parhar 400154e4ee71SNavdeep Parhar bzero(&c, sizeof(c)); 400254e4ee71SNavdeep Parhar 400354e4ee71SNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 400454e4ee71SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 400554e4ee71SNavdeep Parhar V_FW_EQ_ETH_CMD_VFN(0)); 400654e4ee71SNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 400754e4ee71SNavdeep Parhar F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 40087951040fSNavdeep Parhar c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 4009fe2ebb76SJohn Baldwin F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 401054e4ee71SNavdeep Parhar c.fetchszm_to_iqid = 40117951040fSNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 4012733b9277SNavdeep Parhar V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 4013aa2457e1SNavdeep Parhar V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 4014adb0cd84SNavdeep Parhar c.dcaen_to_eqsize = 4015adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4016adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 401754e4ee71SNavdeep Parhar V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 40187951040fSNavdeep Parhar V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 401954e4ee71SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 402054e4ee71SNavdeep Parhar 402154e4ee71SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 402254e4ee71SNavdeep Parhar if (rc != 0) { 4023fe2ebb76SJohn Baldwin device_printf(vi->dev, 4024733b9277SNavdeep Parhar "failed to create Ethernet egress queue: %d\n", rc); 4025733b9277SNavdeep Parhar return (rc); 4026733b9277SNavdeep Parhar } 4027733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 4028733b9277SNavdeep Parhar 4029733b9277SNavdeep Parhar eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 4030ec55567cSJohn Baldwin eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 4031733b9277SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4032b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) 4033733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4034b20b25e7SNavdeep Parhar cntxt_id, sc->sge.eqmap_sz - 1); 4035733b9277SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 4036733b9277SNavdeep Parhar 403754e4ee71SNavdeep Parhar return (rc); 403854e4ee71SNavdeep Parhar } 403954e4ee71SNavdeep Parhar 4040eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4041733b9277SNavdeep Parhar static int 4042fe2ebb76SJohn Baldwin ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4043733b9277SNavdeep Parhar { 4044733b9277SNavdeep Parhar int rc, cntxt_id; 4045733b9277SNavdeep Parhar struct fw_eq_ofld_cmd c; 404690e7434aSNavdeep Parhar int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 404754e4ee71SNavdeep Parhar 4048733b9277SNavdeep Parhar bzero(&c, sizeof(c)); 4049733b9277SNavdeep Parhar 4050733b9277SNavdeep Parhar c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 4051733b9277SNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 4052733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_VFN(0)); 4053733b9277SNavdeep Parhar c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 4054733b9277SNavdeep Parhar F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 4055733b9277SNavdeep Parhar c.fetchszm_to_iqid = 4056ddf09ad6SNavdeep Parhar htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 4057733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 4058733b9277SNavdeep Parhar F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 4059733b9277SNavdeep Parhar c.dcaen_to_eqsize = 4060adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4061adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4062733b9277SNavdeep Parhar V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4063ddf09ad6SNavdeep Parhar V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 40647951040fSNavdeep Parhar V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 4065733b9277SNavdeep Parhar c.eqaddr = htobe64(eq->ba); 4066733b9277SNavdeep Parhar 4067733b9277SNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4068733b9277SNavdeep Parhar if (rc != 0) { 4069fe2ebb76SJohn Baldwin device_printf(vi->dev, 4070733b9277SNavdeep Parhar "failed to create egress queue for TCP offload: %d\n", rc); 4071733b9277SNavdeep Parhar return (rc); 4072733b9277SNavdeep Parhar } 4073733b9277SNavdeep Parhar eq->flags |= EQ_ALLOCATED; 4074733b9277SNavdeep Parhar 4075733b9277SNavdeep Parhar eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 407654e4ee71SNavdeep Parhar cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4077b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) 4078733b9277SNavdeep Parhar panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4079b20b25e7SNavdeep Parhar cntxt_id, sc->sge.eqmap_sz - 1); 408054e4ee71SNavdeep Parhar sc->sge.eqmap[cntxt_id] = eq; 408154e4ee71SNavdeep Parhar 4082733b9277SNavdeep Parhar return (rc); 4083733b9277SNavdeep Parhar } 4084733b9277SNavdeep Parhar #endif 4085733b9277SNavdeep Parhar 4086733b9277SNavdeep Parhar static int 4087fe2ebb76SJohn Baldwin alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4088733b9277SNavdeep Parhar { 40897951040fSNavdeep Parhar int rc, qsize; 4090733b9277SNavdeep Parhar size_t len; 4091733b9277SNavdeep Parhar 4092733b9277SNavdeep Parhar mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 4093733b9277SNavdeep Parhar 409490e7434aSNavdeep Parhar qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 40957951040fSNavdeep Parhar len = qsize * EQ_ESIZE; 4096733b9277SNavdeep Parhar rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 4097733b9277SNavdeep Parhar &eq->ba, (void **)&eq->desc); 4098733b9277SNavdeep Parhar if (rc) 4099733b9277SNavdeep Parhar return (rc); 4100733b9277SNavdeep Parhar 4101ddf09ad6SNavdeep Parhar eq->pidx = eq->cidx = eq->dbidx = 0; 4102ddf09ad6SNavdeep Parhar /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 4103ddf09ad6SNavdeep Parhar eq->equeqidx = 0; 4104d14b0ac1SNavdeep Parhar eq->doorbells = sc->doorbells; 4105733b9277SNavdeep Parhar 4106733b9277SNavdeep Parhar switch (eq->flags & EQ_TYPEMASK) { 4107733b9277SNavdeep Parhar case EQ_CTRL: 4108733b9277SNavdeep Parhar rc = ctrl_eq_alloc(sc, eq); 4109733b9277SNavdeep Parhar break; 4110733b9277SNavdeep Parhar 4111733b9277SNavdeep Parhar case EQ_ETH: 4112fe2ebb76SJohn Baldwin rc = eth_eq_alloc(sc, vi, eq); 4113733b9277SNavdeep Parhar break; 4114733b9277SNavdeep Parhar 4115eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4116733b9277SNavdeep Parhar case EQ_OFLD: 4117fe2ebb76SJohn Baldwin rc = ofld_eq_alloc(sc, vi, eq); 4118733b9277SNavdeep Parhar break; 4119733b9277SNavdeep Parhar #endif 4120733b9277SNavdeep Parhar 4121733b9277SNavdeep Parhar default: 4122733b9277SNavdeep Parhar panic("%s: invalid eq type %d.", __func__, 4123733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK); 4124733b9277SNavdeep Parhar } 4125733b9277SNavdeep Parhar if (rc != 0) { 4126733b9277SNavdeep Parhar device_printf(sc->dev, 4127c086e3d1SNavdeep Parhar "failed to allocate egress queue(%d): %d\n", 4128733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK, rc); 4129733b9277SNavdeep Parhar } 4130733b9277SNavdeep Parhar 4131d14b0ac1SNavdeep Parhar if (isset(&eq->doorbells, DOORBELL_UDB) || 4132d14b0ac1SNavdeep Parhar isset(&eq->doorbells, DOORBELL_UDBWC) || 413377ad3c41SNavdeep Parhar isset(&eq->doorbells, DOORBELL_WCWR)) { 413490e7434aSNavdeep Parhar uint32_t s_qpp = sc->params.sge.eq_s_qpp; 4135d14b0ac1SNavdeep Parhar uint32_t mask = (1 << s_qpp) - 1; 4136d14b0ac1SNavdeep Parhar volatile uint8_t *udb; 4137d14b0ac1SNavdeep Parhar 4138d14b0ac1SNavdeep Parhar udb = sc->udbs_base + UDBS_DB_OFFSET; 4139d14b0ac1SNavdeep Parhar udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 4140d14b0ac1SNavdeep Parhar eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 4141f10405b3SNavdeep Parhar if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 414277ad3c41SNavdeep Parhar clrbit(&eq->doorbells, DOORBELL_WCWR); 4143d14b0ac1SNavdeep Parhar else { 4144d14b0ac1SNavdeep Parhar udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 4145d14b0ac1SNavdeep Parhar eq->udb_qid = 0; 4146d14b0ac1SNavdeep Parhar } 4147d14b0ac1SNavdeep Parhar eq->udb = (volatile void *)udb; 4148d14b0ac1SNavdeep Parhar } 4149d14b0ac1SNavdeep Parhar 4150733b9277SNavdeep Parhar return (rc); 4151733b9277SNavdeep Parhar } 4152733b9277SNavdeep Parhar 4153733b9277SNavdeep Parhar static int 4154733b9277SNavdeep Parhar free_eq(struct adapter *sc, struct sge_eq *eq) 4155733b9277SNavdeep Parhar { 4156733b9277SNavdeep Parhar int rc; 4157733b9277SNavdeep Parhar 4158733b9277SNavdeep Parhar if (eq->flags & EQ_ALLOCATED) { 4159733b9277SNavdeep Parhar switch (eq->flags & EQ_TYPEMASK) { 4160733b9277SNavdeep Parhar case EQ_CTRL: 4161733b9277SNavdeep Parhar rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 4162733b9277SNavdeep Parhar eq->cntxt_id); 4163733b9277SNavdeep Parhar break; 4164733b9277SNavdeep Parhar 4165733b9277SNavdeep Parhar case EQ_ETH: 4166733b9277SNavdeep Parhar rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 4167733b9277SNavdeep Parhar eq->cntxt_id); 4168733b9277SNavdeep Parhar break; 4169733b9277SNavdeep Parhar 4170eff62dbaSNavdeep Parhar #if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4171733b9277SNavdeep Parhar case EQ_OFLD: 4172733b9277SNavdeep Parhar rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 4173733b9277SNavdeep Parhar eq->cntxt_id); 4174733b9277SNavdeep Parhar break; 4175733b9277SNavdeep Parhar #endif 4176733b9277SNavdeep Parhar 4177733b9277SNavdeep Parhar default: 4178733b9277SNavdeep Parhar panic("%s: invalid eq type %d.", __func__, 4179733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK); 4180733b9277SNavdeep Parhar } 4181733b9277SNavdeep Parhar if (rc != 0) { 4182733b9277SNavdeep Parhar device_printf(sc->dev, 4183733b9277SNavdeep Parhar "failed to free egress queue (%d): %d\n", 4184733b9277SNavdeep Parhar eq->flags & EQ_TYPEMASK, rc); 4185733b9277SNavdeep Parhar return (rc); 4186733b9277SNavdeep Parhar } 4187733b9277SNavdeep Parhar eq->flags &= ~EQ_ALLOCATED; 4188733b9277SNavdeep Parhar } 4189733b9277SNavdeep Parhar 4190733b9277SNavdeep Parhar free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 4191733b9277SNavdeep Parhar 4192733b9277SNavdeep Parhar if (mtx_initialized(&eq->eq_lock)) 4193733b9277SNavdeep Parhar mtx_destroy(&eq->eq_lock); 4194733b9277SNavdeep Parhar 4195733b9277SNavdeep Parhar bzero(eq, sizeof(*eq)); 4196733b9277SNavdeep Parhar return (0); 4197733b9277SNavdeep Parhar } 4198733b9277SNavdeep Parhar 4199733b9277SNavdeep Parhar static int 4200fe2ebb76SJohn Baldwin alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 4201733b9277SNavdeep Parhar struct sysctl_oid *oid) 4202733b9277SNavdeep Parhar { 4203733b9277SNavdeep Parhar int rc; 4204fe2ebb76SJohn Baldwin struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 4205733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4206733b9277SNavdeep Parhar 4207fe2ebb76SJohn Baldwin rc = alloc_eq(sc, vi, &wrq->eq); 4208733b9277SNavdeep Parhar if (rc) 4209733b9277SNavdeep Parhar return (rc); 4210733b9277SNavdeep Parhar 4211733b9277SNavdeep Parhar wrq->adapter = sc; 42127951040fSNavdeep Parhar TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 42137951040fSNavdeep Parhar TAILQ_INIT(&wrq->incomplete_wrs); 421409fe6320SNavdeep Parhar STAILQ_INIT(&wrq->wr_list); 42157951040fSNavdeep Parhar wrq->nwr_pending = 0; 42167951040fSNavdeep Parhar wrq->ndesc_needed = 0; 4217733b9277SNavdeep Parhar 4218aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4219aa93b99aSNavdeep Parhar &wrq->eq.ba, "bus address of descriptor ring"); 4220aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4221aa93b99aSNavdeep Parhar wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 4222aa93b99aSNavdeep Parhar "desc ring size in bytes"); 4223733b9277SNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4224733b9277SNavdeep Parhar &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 4225733b9277SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 42268741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &wrq->eq.cidx, 0, 42277029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 4228733b9277SNavdeep Parhar SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 42298741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &wrq->eq.pidx, 0, 42307029da5cSPawel Biernacki sysctl_uint16, "I", "producer index"); 4231aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4232aa93b99aSNavdeep Parhar wrq->eq.sidx, "status page index"); 42337951040fSNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 42347951040fSNavdeep Parhar &wrq->tx_wrs_direct, "# of work requests (direct)"); 42357951040fSNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 42367951040fSNavdeep Parhar &wrq->tx_wrs_copied, "# of work requests (copied)"); 42370459a175SNavdeep Parhar SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 42380459a175SNavdeep Parhar &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 4239733b9277SNavdeep Parhar 4240733b9277SNavdeep Parhar return (rc); 4241733b9277SNavdeep Parhar } 4242733b9277SNavdeep Parhar 4243733b9277SNavdeep Parhar static int 4244733b9277SNavdeep Parhar free_wrq(struct adapter *sc, struct sge_wrq *wrq) 4245733b9277SNavdeep Parhar { 4246733b9277SNavdeep Parhar int rc; 4247733b9277SNavdeep Parhar 4248733b9277SNavdeep Parhar rc = free_eq(sc, &wrq->eq); 4249733b9277SNavdeep Parhar if (rc) 4250733b9277SNavdeep Parhar return (rc); 4251733b9277SNavdeep Parhar 4252733b9277SNavdeep Parhar bzero(wrq, sizeof(*wrq)); 4253733b9277SNavdeep Parhar return (0); 4254733b9277SNavdeep Parhar } 4255733b9277SNavdeep Parhar 4256733b9277SNavdeep Parhar static int 4257fe2ebb76SJohn Baldwin alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 4258733b9277SNavdeep Parhar struct sysctl_oid *oid) 4259733b9277SNavdeep Parhar { 4260733b9277SNavdeep Parhar int rc; 4261fe2ebb76SJohn Baldwin struct port_info *pi = vi->pi; 4262733b9277SNavdeep Parhar struct adapter *sc = pi->adapter; 4263733b9277SNavdeep Parhar struct sge_eq *eq = &txq->eq; 4264d735920dSNavdeep Parhar struct txpkts *txp; 4265733b9277SNavdeep Parhar char name[16]; 4266733b9277SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4267733b9277SNavdeep Parhar 42687951040fSNavdeep Parhar rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 4269d735920dSNavdeep Parhar M_CXGBE, &eq->eq_lock, M_WAITOK); 42707951040fSNavdeep Parhar if (rc != 0) { 42717951040fSNavdeep Parhar device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 42727951040fSNavdeep Parhar return (rc); 42737951040fSNavdeep Parhar } 42747951040fSNavdeep Parhar 4275fe2ebb76SJohn Baldwin rc = alloc_eq(sc, vi, eq); 42767951040fSNavdeep Parhar if (rc != 0) { 42777951040fSNavdeep Parhar mp_ring_free(txq->r); 42787951040fSNavdeep Parhar txq->r = NULL; 4279733b9277SNavdeep Parhar return (rc); 42807951040fSNavdeep Parhar } 4281733b9277SNavdeep Parhar 42827951040fSNavdeep Parhar /* Can't fail after this point. */ 42837951040fSNavdeep Parhar 4284ec55567cSJohn Baldwin if (idx == 0) 4285ec55567cSJohn Baldwin sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4286ec55567cSJohn Baldwin else 4287ec55567cSJohn Baldwin KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4288ec55567cSJohn Baldwin ("eq_base mismatch")); 4289ec55567cSJohn Baldwin KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4290ec55567cSJohn Baldwin ("PF with non-zero eq_base")); 4291ec55567cSJohn Baldwin 42927951040fSNavdeep Parhar TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4293fe2ebb76SJohn Baldwin txq->ifp = vi->ifp; 42947951040fSNavdeep Parhar txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 429530e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR) 42966af45170SJohn Baldwin txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 42976af45170SJohn Baldwin V_TXPKT_INTF(pi->tx_chan)); 42986af45170SJohn Baldwin else 4299c0236bd9SNavdeep Parhar txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4300edb518f4SNavdeep Parhar V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4301edb518f4SNavdeep Parhar V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 430202f972e8SNavdeep Parhar txq->tc_idx = -1; 43037951040fSNavdeep Parhar txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4304733b9277SNavdeep Parhar M_ZERO | M_WAITOK); 430554e4ee71SNavdeep Parhar 4306d735920dSNavdeep Parhar txp = &txq->txp; 4307d735920dSNavdeep Parhar txp->score = 5; 4308d735920dSNavdeep Parhar MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); 4309d735920dSNavdeep Parhar txq->txp.max_npkt = min(nitems(txp->mb), 4310d735920dSNavdeep Parhar sc->params.max_pkts_per_eth_tx_pkts_wr); 431130e3f2b4SNavdeep Parhar if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) 431230e3f2b4SNavdeep Parhar txq->txp.max_npkt--; 4313d735920dSNavdeep Parhar 431454e4ee71SNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 43157029da5cSPawel Biernacki oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 43167029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queue"); 431754e4ee71SNavdeep Parhar children = SYSCTL_CHILDREN(oid); 431854e4ee71SNavdeep Parhar 4319aa93b99aSNavdeep Parhar SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4320aa93b99aSNavdeep Parhar &eq->ba, "bus address of descriptor ring"); 4321aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4322aa93b99aSNavdeep Parhar eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4323aa93b99aSNavdeep Parhar "desc ring size in bytes"); 4324ec55567cSJohn Baldwin SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4325ec55567cSJohn Baldwin &eq->abs_id, 0, "absolute id of the queue"); 4326fe2ebb76SJohn Baldwin SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 432759bc8ce0SNavdeep Parhar &eq->cntxt_id, 0, "SGE context id of the queue"); 4328fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 43298741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &eq->cidx, 0, 43307029da5cSPawel Biernacki sysctl_uint16, "I", "consumer index"); 4331fe2ebb76SJohn Baldwin SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 43328741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, &eq->pidx, 0, 43337029da5cSPawel Biernacki sysctl_uint16, "I", "producer index"); 4334aa93b99aSNavdeep Parhar SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4335aa93b99aSNavdeep Parhar eq->sidx, "status page index"); 433659bc8ce0SNavdeep Parhar 433702f972e8SNavdeep Parhar SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 43388741306bSNavdeep Parhar CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, idx, sysctl_tc, 43397029da5cSPawel Biernacki "I", "traffic class (-1 means none)"); 434002f972e8SNavdeep Parhar 4341fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 434254e4ee71SNavdeep Parhar &txq->txcsum, "# of times hardware assisted with checksum"); 4343fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 434454e4ee71SNavdeep Parhar CTLFLAG_RD, &txq->vlan_insertion, 434554e4ee71SNavdeep Parhar "# of times hardware inserted 802.1Q tag"); 4346fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4347a1ea9a82SNavdeep Parhar &txq->tso_wrs, "# of TSO work requests"); 4348fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 434954e4ee71SNavdeep Parhar &txq->imm_wrs, "# of work requests with immediate data"); 4350fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 435154e4ee71SNavdeep Parhar &txq->sgl_wrs, "# of work requests with direct SGL"); 4352fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 435354e4ee71SNavdeep Parhar &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4354fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 43557951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts0_wrs, 43567951040fSNavdeep Parhar "# of txpkts (type 0) work requests"); 4357fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 43587951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts1_wrs, 43597951040fSNavdeep Parhar "# of txpkts (type 1) work requests"); 4360fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 43617951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts0_pkts, 43627951040fSNavdeep Parhar "# of frames tx'd using type0 txpkts work requests"); 4363fe2ebb76SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 43647951040fSNavdeep Parhar CTLFLAG_RD, &txq->txpkts1_pkts, 43657951040fSNavdeep Parhar "# of frames tx'd using type1 txpkts work requests"); 43665cdaef71SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 43675cdaef71SJohn Baldwin &txq->raw_wrs, "# of raw work requests (non-packets)"); 4368a4a4ad2dSNavdeep Parhar SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_tso_wrs", 4369a4a4ad2dSNavdeep Parhar CTLFLAG_RD, &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); 4370a4a4ad2dSNavdeep Parhar SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_txcsum", 4371a4a4ad2dSNavdeep Parhar CTLFLAG_RD, &txq->vxlan_txcsum, 4372a4a4ad2dSNavdeep Parhar "# of times hardware assisted with inner checksums (VXLAN)"); 4373bddf7343SJohn Baldwin 4374bddf7343SJohn Baldwin #ifdef KERN_TLS 4375bddf7343SJohn Baldwin if (sc->flags & KERN_TLS_OK) { 4376bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4377bddf7343SJohn Baldwin "kern_tls_records", CTLFLAG_RD, &txq->kern_tls_records, 4378bddf7343SJohn Baldwin "# of NIC TLS records transmitted"); 4379bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4380bddf7343SJohn Baldwin "kern_tls_short", CTLFLAG_RD, &txq->kern_tls_short, 4381bddf7343SJohn Baldwin "# of short NIC TLS records transmitted"); 4382bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4383bddf7343SJohn Baldwin "kern_tls_partial", CTLFLAG_RD, &txq->kern_tls_partial, 4384bddf7343SJohn Baldwin "# of partial NIC TLS records transmitted"); 4385bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4386bddf7343SJohn Baldwin "kern_tls_full", CTLFLAG_RD, &txq->kern_tls_full, 4387bddf7343SJohn Baldwin "# of full NIC TLS records transmitted"); 4388bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4389bddf7343SJohn Baldwin "kern_tls_octets", CTLFLAG_RD, &txq->kern_tls_octets, 4390bddf7343SJohn Baldwin "# of payload octets in transmitted NIC TLS records"); 4391bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4392bddf7343SJohn Baldwin "kern_tls_waste", CTLFLAG_RD, &txq->kern_tls_waste, 4393bddf7343SJohn Baldwin "# of octets DMAd but not transmitted in NIC TLS records"); 4394bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4395bddf7343SJohn Baldwin "kern_tls_options", CTLFLAG_RD, &txq->kern_tls_options, 4396bddf7343SJohn Baldwin "# of NIC TLS options-only packets transmitted"); 4397bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4398bddf7343SJohn Baldwin "kern_tls_header", CTLFLAG_RD, &txq->kern_tls_header, 4399bddf7343SJohn Baldwin "# of NIC TLS header-only packets transmitted"); 4400bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4401bddf7343SJohn Baldwin "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin, 4402bddf7343SJohn Baldwin "# of NIC TLS FIN-only packets transmitted"); 4403bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4404bddf7343SJohn Baldwin "kern_tls_fin_short", CTLFLAG_RD, &txq->kern_tls_fin_short, 4405bddf7343SJohn Baldwin "# of NIC TLS padded FIN packets on short TLS records"); 4406bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4407bddf7343SJohn Baldwin "kern_tls_cbc", CTLFLAG_RD, &txq->kern_tls_cbc, 4408bddf7343SJohn Baldwin "# of NIC TLS sessions using AES-CBC"); 4409bddf7343SJohn Baldwin SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, 4410bddf7343SJohn Baldwin "kern_tls_gcm", CTLFLAG_RD, &txq->kern_tls_gcm, 4411bddf7343SJohn Baldwin "# of NIC TLS sessions using AES-GCM"); 4412bddf7343SJohn Baldwin } 4413bddf7343SJohn Baldwin #endif 4414d735920dSNavdeep Parhar mp_ring_sysctls(txq->r, &vi->ctx, children); 441554e4ee71SNavdeep Parhar 44167951040fSNavdeep Parhar return (0); 441754e4ee71SNavdeep Parhar } 441854e4ee71SNavdeep Parhar 441954e4ee71SNavdeep Parhar static int 4420fe2ebb76SJohn Baldwin free_txq(struct vi_info *vi, struct sge_txq *txq) 442154e4ee71SNavdeep Parhar { 442254e4ee71SNavdeep Parhar int rc; 44237c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 442454e4ee71SNavdeep Parhar struct sge_eq *eq = &txq->eq; 442554e4ee71SNavdeep Parhar 4426733b9277SNavdeep Parhar rc = free_eq(sc, eq); 4427733b9277SNavdeep Parhar if (rc) 442854e4ee71SNavdeep Parhar return (rc); 442954e4ee71SNavdeep Parhar 44307951040fSNavdeep Parhar sglist_free(txq->gl); 4431f7dfe243SNavdeep Parhar free(txq->sdesc, M_CXGBE); 44327951040fSNavdeep Parhar mp_ring_free(txq->r); 443354e4ee71SNavdeep Parhar 443454e4ee71SNavdeep Parhar bzero(txq, sizeof(*txq)); 443554e4ee71SNavdeep Parhar return (0); 443654e4ee71SNavdeep Parhar } 443754e4ee71SNavdeep Parhar 443854e4ee71SNavdeep Parhar static void 443954e4ee71SNavdeep Parhar oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 444054e4ee71SNavdeep Parhar { 444154e4ee71SNavdeep Parhar bus_addr_t *ba = arg; 444254e4ee71SNavdeep Parhar 444354e4ee71SNavdeep Parhar KASSERT(nseg == 1, 444454e4ee71SNavdeep Parhar ("%s meant for single segment mappings only.", __func__)); 444554e4ee71SNavdeep Parhar 444654e4ee71SNavdeep Parhar *ba = error ? 0 : segs->ds_addr; 444754e4ee71SNavdeep Parhar } 444854e4ee71SNavdeep Parhar 444954e4ee71SNavdeep Parhar static inline void 445054e4ee71SNavdeep Parhar ring_fl_db(struct adapter *sc, struct sge_fl *fl) 445154e4ee71SNavdeep Parhar { 44524d6db4e0SNavdeep Parhar uint32_t n, v; 445354e4ee71SNavdeep Parhar 445446e1e307SNavdeep Parhar n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); 44554d6db4e0SNavdeep Parhar MPASS(n > 0); 4456d14b0ac1SNavdeep Parhar 445754e4ee71SNavdeep Parhar wmb(); 44584d6db4e0SNavdeep Parhar v = fl->dbval | V_PIDX(n); 44594d6db4e0SNavdeep Parhar if (fl->udb) 44604d6db4e0SNavdeep Parhar *fl->udb = htole32(v); 44614d6db4e0SNavdeep Parhar else 4462315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 44634d6db4e0SNavdeep Parhar IDXINCR(fl->dbidx, n, fl->sidx); 446454e4ee71SNavdeep Parhar } 446554e4ee71SNavdeep Parhar 4466fb12416cSNavdeep Parhar /* 44674d6db4e0SNavdeep Parhar * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 44684d6db4e0SNavdeep Parhar * recycled do not count towards this allocation budget. 4469733b9277SNavdeep Parhar * 44704d6db4e0SNavdeep Parhar * Returns non-zero to indicate that this freelist should be added to the list 44714d6db4e0SNavdeep Parhar * of starving freelists. 4472fb12416cSNavdeep Parhar */ 4473733b9277SNavdeep Parhar static int 44744d6db4e0SNavdeep Parhar refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 447554e4ee71SNavdeep Parhar { 44764d6db4e0SNavdeep Parhar __be64 *d; 44774d6db4e0SNavdeep Parhar struct fl_sdesc *sd; 447838035ed6SNavdeep Parhar uintptr_t pa; 447954e4ee71SNavdeep Parhar caddr_t cl; 448046e1e307SNavdeep Parhar struct rx_buf_info *rxb; 448138035ed6SNavdeep Parhar struct cluster_metadata *clm; 44824d6db4e0SNavdeep Parhar uint16_t max_pidx; 44834d6db4e0SNavdeep Parhar uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 448454e4ee71SNavdeep Parhar 448554e4ee71SNavdeep Parhar FL_LOCK_ASSERT_OWNED(fl); 448654e4ee71SNavdeep Parhar 44874d6db4e0SNavdeep Parhar /* 4488453130d9SPedro F. Giffuni * We always stop at the beginning of the hardware descriptor that's just 44894d6db4e0SNavdeep Parhar * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 44904d6db4e0SNavdeep Parhar * which would mean an empty freelist to the chip. 44914d6db4e0SNavdeep Parhar */ 44924d6db4e0SNavdeep Parhar max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 44934d6db4e0SNavdeep Parhar if (fl->pidx == max_pidx * 8) 44944d6db4e0SNavdeep Parhar return (0); 449554e4ee71SNavdeep Parhar 44964d6db4e0SNavdeep Parhar d = &fl->desc[fl->pidx]; 44974d6db4e0SNavdeep Parhar sd = &fl->sdesc[fl->pidx]; 44984d6db4e0SNavdeep Parhar 44994d6db4e0SNavdeep Parhar while (n > 0) { 450054e4ee71SNavdeep Parhar 450154e4ee71SNavdeep Parhar if (sd->cl != NULL) { 450254e4ee71SNavdeep Parhar 4503c3fb7725SNavdeep Parhar if (sd->nmbuf == 0) { 450438035ed6SNavdeep Parhar /* 450538035ed6SNavdeep Parhar * Fast recycle without involving any atomics on 450638035ed6SNavdeep Parhar * the cluster's metadata (if the cluster has 450738035ed6SNavdeep Parhar * metadata). This happens when all frames 450838035ed6SNavdeep Parhar * received in the cluster were small enough to 450938035ed6SNavdeep Parhar * fit within a single mbuf each. 451038035ed6SNavdeep Parhar */ 451138035ed6SNavdeep Parhar fl->cl_fast_recycled++; 4512a9c4062aSNavdeep Parhar goto recycled; 451338035ed6SNavdeep Parhar } 451454e4ee71SNavdeep Parhar 451538035ed6SNavdeep Parhar /* 451638035ed6SNavdeep Parhar * Cluster is guaranteed to have metadata. Clusters 451738035ed6SNavdeep Parhar * without metadata always take the fast recycle path 451838035ed6SNavdeep Parhar * when they're recycled. 451938035ed6SNavdeep Parhar */ 452046e1e307SNavdeep Parhar clm = cl_metadata(sd); 452138035ed6SNavdeep Parhar MPASS(clm != NULL); 45221458bff9SNavdeep Parhar 452338035ed6SNavdeep Parhar if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 452438035ed6SNavdeep Parhar fl->cl_recycled++; 452582eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 452654e4ee71SNavdeep Parhar goto recycled; 452754e4ee71SNavdeep Parhar } 45281458bff9SNavdeep Parhar sd->cl = NULL; /* gave up my reference */ 45291458bff9SNavdeep Parhar } 453038035ed6SNavdeep Parhar MPASS(sd->cl == NULL); 453146e1e307SNavdeep Parhar rxb = &sc->sge.rx_buf_info[fl->zidx]; 453246e1e307SNavdeep Parhar cl = uma_zalloc(rxb->zone, M_NOWAIT); 45332b9010f0SNavdeep Parhar if (__predict_false(cl == NULL)) { 45342b9010f0SNavdeep Parhar if (fl->zidx != fl->safe_zidx) { 453546e1e307SNavdeep Parhar rxb = &sc->sge.rx_buf_info[fl->safe_zidx]; 453646e1e307SNavdeep Parhar cl = uma_zalloc(rxb->zone, M_NOWAIT); 45372b9010f0SNavdeep Parhar } 45382b9010f0SNavdeep Parhar if (cl == NULL) 453954e4ee71SNavdeep Parhar break; 454054e4ee71SNavdeep Parhar } 454138035ed6SNavdeep Parhar fl->cl_allocated++; 45424d6db4e0SNavdeep Parhar n--; 454354e4ee71SNavdeep Parhar 454438035ed6SNavdeep Parhar pa = pmap_kextract((vm_offset_t)cl); 454554e4ee71SNavdeep Parhar sd->cl = cl; 454646e1e307SNavdeep Parhar sd->zidx = fl->zidx; 454746e1e307SNavdeep Parhar 454846e1e307SNavdeep Parhar if (fl->flags & FL_BUF_PACKING) { 454946e1e307SNavdeep Parhar *d = htobe64(pa | rxb->hwidx2); 455046e1e307SNavdeep Parhar sd->moff = rxb->size2; 455146e1e307SNavdeep Parhar } else { 455246e1e307SNavdeep Parhar *d = htobe64(pa | rxb->hwidx1); 455346e1e307SNavdeep Parhar sd->moff = 0; 455446e1e307SNavdeep Parhar } 45557d29df59SNavdeep Parhar recycled: 4556c3fb7725SNavdeep Parhar sd->nmbuf = 0; 455738035ed6SNavdeep Parhar d++; 455854e4ee71SNavdeep Parhar sd++; 455946e1e307SNavdeep Parhar if (__predict_false((++fl->pidx & 7) == 0)) { 456046e1e307SNavdeep Parhar uint16_t pidx = fl->pidx >> 3; 45614d6db4e0SNavdeep Parhar 45624d6db4e0SNavdeep Parhar if (__predict_false(pidx == fl->sidx)) { 456354e4ee71SNavdeep Parhar fl->pidx = 0; 45644d6db4e0SNavdeep Parhar pidx = 0; 456554e4ee71SNavdeep Parhar sd = fl->sdesc; 456654e4ee71SNavdeep Parhar d = fl->desc; 456754e4ee71SNavdeep Parhar } 456846e1e307SNavdeep Parhar if (n < 8 || pidx == max_pidx) 45694d6db4e0SNavdeep Parhar break; 45704d6db4e0SNavdeep Parhar 45714d6db4e0SNavdeep Parhar if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 45724d6db4e0SNavdeep Parhar ring_fl_db(sc, fl); 45734d6db4e0SNavdeep Parhar } 457454e4ee71SNavdeep Parhar } 4575fb12416cSNavdeep Parhar 457646e1e307SNavdeep Parhar if ((fl->pidx >> 3) != fl->dbidx) 4577fb12416cSNavdeep Parhar ring_fl_db(sc, fl); 4578733b9277SNavdeep Parhar 4579733b9277SNavdeep Parhar return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 4580733b9277SNavdeep Parhar } 4581733b9277SNavdeep Parhar 4582733b9277SNavdeep Parhar /* 4583733b9277SNavdeep Parhar * Attempt to refill all starving freelists. 4584733b9277SNavdeep Parhar */ 4585733b9277SNavdeep Parhar static void 4586733b9277SNavdeep Parhar refill_sfl(void *arg) 4587733b9277SNavdeep Parhar { 4588733b9277SNavdeep Parhar struct adapter *sc = arg; 4589733b9277SNavdeep Parhar struct sge_fl *fl, *fl_temp; 4590733b9277SNavdeep Parhar 4591fe2ebb76SJohn Baldwin mtx_assert(&sc->sfl_lock, MA_OWNED); 4592733b9277SNavdeep Parhar TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 4593733b9277SNavdeep Parhar FL_LOCK(fl); 4594733b9277SNavdeep Parhar refill_fl(sc, fl, 64); 4595733b9277SNavdeep Parhar if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 4596733b9277SNavdeep Parhar TAILQ_REMOVE(&sc->sfl, fl, link); 4597733b9277SNavdeep Parhar fl->flags &= ~FL_STARVING; 4598733b9277SNavdeep Parhar } 4599733b9277SNavdeep Parhar FL_UNLOCK(fl); 4600733b9277SNavdeep Parhar } 4601733b9277SNavdeep Parhar 4602733b9277SNavdeep Parhar if (!TAILQ_EMPTY(&sc->sfl)) 4603733b9277SNavdeep Parhar callout_schedule(&sc->sfl_callout, hz / 5); 460454e4ee71SNavdeep Parhar } 460554e4ee71SNavdeep Parhar 460654e4ee71SNavdeep Parhar static int 460754e4ee71SNavdeep Parhar alloc_fl_sdesc(struct sge_fl *fl) 460854e4ee71SNavdeep Parhar { 460954e4ee71SNavdeep Parhar 46104d6db4e0SNavdeep Parhar fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 461154e4ee71SNavdeep Parhar M_ZERO | M_WAITOK); 461254e4ee71SNavdeep Parhar 461354e4ee71SNavdeep Parhar return (0); 461454e4ee71SNavdeep Parhar } 461554e4ee71SNavdeep Parhar 461654e4ee71SNavdeep Parhar static void 46171458bff9SNavdeep Parhar free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 461854e4ee71SNavdeep Parhar { 461954e4ee71SNavdeep Parhar struct fl_sdesc *sd; 462038035ed6SNavdeep Parhar struct cluster_metadata *clm; 462154e4ee71SNavdeep Parhar int i; 462254e4ee71SNavdeep Parhar 462354e4ee71SNavdeep Parhar sd = fl->sdesc; 46244d6db4e0SNavdeep Parhar for (i = 0; i < fl->sidx * 8; i++, sd++) { 462538035ed6SNavdeep Parhar if (sd->cl == NULL) 462638035ed6SNavdeep Parhar continue; 462754e4ee71SNavdeep Parhar 462882eff304SNavdeep Parhar if (sd->nmbuf == 0) 462946e1e307SNavdeep Parhar uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); 463046e1e307SNavdeep Parhar else if (fl->flags & FL_BUF_PACKING) { 463146e1e307SNavdeep Parhar clm = cl_metadata(sd); 463246e1e307SNavdeep Parhar if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 463346e1e307SNavdeep Parhar uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, 463446e1e307SNavdeep Parhar sd->cl); 463582eff304SNavdeep Parhar counter_u64_add(extfree_rels, 1); 463654e4ee71SNavdeep Parhar } 463746e1e307SNavdeep Parhar } 463838035ed6SNavdeep Parhar sd->cl = NULL; 463954e4ee71SNavdeep Parhar } 464054e4ee71SNavdeep Parhar 464154e4ee71SNavdeep Parhar free(fl->sdesc, M_CXGBE); 464254e4ee71SNavdeep Parhar fl->sdesc = NULL; 464354e4ee71SNavdeep Parhar } 464454e4ee71SNavdeep Parhar 46457951040fSNavdeep Parhar static inline void 46467951040fSNavdeep Parhar get_pkt_gl(struct mbuf *m, struct sglist *gl) 464754e4ee71SNavdeep Parhar { 46487951040fSNavdeep Parhar int rc; 464954e4ee71SNavdeep Parhar 46507951040fSNavdeep Parhar M_ASSERTPKTHDR(m); 465154e4ee71SNavdeep Parhar 46527951040fSNavdeep Parhar sglist_reset(gl); 46537951040fSNavdeep Parhar rc = sglist_append_mbuf(gl, m); 46547951040fSNavdeep Parhar if (__predict_false(rc != 0)) { 46557951040fSNavdeep Parhar panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 46567951040fSNavdeep Parhar "with %d.", __func__, m, mbuf_nsegs(m), rc); 465754e4ee71SNavdeep Parhar } 465854e4ee71SNavdeep Parhar 46597951040fSNavdeep Parhar KASSERT(gl->sg_nseg == mbuf_nsegs(m), 46607951040fSNavdeep Parhar ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 46617951040fSNavdeep Parhar mbuf_nsegs(m), gl->sg_nseg)); 466230e3f2b4SNavdeep Parhar #if 0 /* vm_wr not readily available here. */ 466330e3f2b4SNavdeep Parhar KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), 46647951040fSNavdeep Parhar ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 466530e3f2b4SNavdeep Parhar gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); 466630e3f2b4SNavdeep Parhar #endif 466754e4ee71SNavdeep Parhar } 466854e4ee71SNavdeep Parhar 466954e4ee71SNavdeep Parhar /* 46707951040fSNavdeep Parhar * len16 for a txpkt WR with a GL. Includes the firmware work request header. 467154e4ee71SNavdeep Parhar */ 46727951040fSNavdeep Parhar static inline u_int 4673a4a4ad2dSNavdeep Parhar txpkt_len16(u_int nsegs, const u_int extra) 46747951040fSNavdeep Parhar { 46757951040fSNavdeep Parhar u_int n; 46767951040fSNavdeep Parhar 46777951040fSNavdeep Parhar MPASS(nsegs > 0); 46787951040fSNavdeep Parhar 46797951040fSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 4680a4a4ad2dSNavdeep Parhar n = extra + sizeof(struct fw_eth_tx_pkt_wr) + 4681a4a4ad2dSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + 46827951040fSNavdeep Parhar sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 46837951040fSNavdeep Parhar 46847951040fSNavdeep Parhar return (howmany(n, 16)); 46857951040fSNavdeep Parhar } 468654e4ee71SNavdeep Parhar 468754e4ee71SNavdeep Parhar /* 46886af45170SJohn Baldwin * len16 for a txpkt_vm WR with a GL. Includes the firmware work 46896af45170SJohn Baldwin * request header. 46906af45170SJohn Baldwin */ 46916af45170SJohn Baldwin static inline u_int 4692a4a4ad2dSNavdeep Parhar txpkt_vm_len16(u_int nsegs, const u_int extra) 46936af45170SJohn Baldwin { 46946af45170SJohn Baldwin u_int n; 46956af45170SJohn Baldwin 46966af45170SJohn Baldwin MPASS(nsegs > 0); 46976af45170SJohn Baldwin 46986af45170SJohn Baldwin nsegs--; /* first segment is part of ulptx_sgl */ 4699a4a4ad2dSNavdeep Parhar n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) + 47006af45170SJohn Baldwin sizeof(struct cpl_tx_pkt_core) + 47016af45170SJohn Baldwin sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 47026af45170SJohn Baldwin 47036af45170SJohn Baldwin return (howmany(n, 16)); 47046af45170SJohn Baldwin } 47056af45170SJohn Baldwin 4706a4a4ad2dSNavdeep Parhar static inline void 470730e3f2b4SNavdeep Parhar calculate_mbuf_len16(struct mbuf *m, bool vm_wr) 4708a4a4ad2dSNavdeep Parhar { 4709a4a4ad2dSNavdeep Parhar const int lso = sizeof(struct cpl_tx_pkt_lso_core); 4710a4a4ad2dSNavdeep Parhar const int tnl_lso = sizeof(struct cpl_tx_tnl_lso); 4711a4a4ad2dSNavdeep Parhar 471230e3f2b4SNavdeep Parhar if (vm_wr) { 4713a4a4ad2dSNavdeep Parhar if (needs_tso(m)) 4714a4a4ad2dSNavdeep Parhar set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso)); 4715a4a4ad2dSNavdeep Parhar else 4716a4a4ad2dSNavdeep Parhar set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0)); 4717a4a4ad2dSNavdeep Parhar return; 4718a4a4ad2dSNavdeep Parhar } 4719a4a4ad2dSNavdeep Parhar 4720a4a4ad2dSNavdeep Parhar if (needs_tso(m)) { 4721a4a4ad2dSNavdeep Parhar if (needs_vxlan_tso(m)) 4722a4a4ad2dSNavdeep Parhar set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso)); 4723a4a4ad2dSNavdeep Parhar else 4724a4a4ad2dSNavdeep Parhar set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso)); 4725a4a4ad2dSNavdeep Parhar } else 4726a4a4ad2dSNavdeep Parhar set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0)); 4727a4a4ad2dSNavdeep Parhar } 4728a4a4ad2dSNavdeep Parhar 47296af45170SJohn Baldwin /* 47307951040fSNavdeep Parhar * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 47317951040fSNavdeep Parhar * request header. 47327951040fSNavdeep Parhar */ 47337951040fSNavdeep Parhar static inline u_int 47347951040fSNavdeep Parhar txpkts0_len16(u_int nsegs) 47357951040fSNavdeep Parhar { 47367951040fSNavdeep Parhar u_int n; 47377951040fSNavdeep Parhar 47387951040fSNavdeep Parhar MPASS(nsegs > 0); 47397951040fSNavdeep Parhar 47407951040fSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 47417951040fSNavdeep Parhar n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 47427951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 47437951040fSNavdeep Parhar 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 47447951040fSNavdeep Parhar 47457951040fSNavdeep Parhar return (howmany(n, 16)); 47467951040fSNavdeep Parhar } 47477951040fSNavdeep Parhar 47487951040fSNavdeep Parhar /* 47497951040fSNavdeep Parhar * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 47507951040fSNavdeep Parhar * request header. 47517951040fSNavdeep Parhar */ 47527951040fSNavdeep Parhar static inline u_int 47537951040fSNavdeep Parhar txpkts1_len16(void) 47547951040fSNavdeep Parhar { 47557951040fSNavdeep Parhar u_int n; 47567951040fSNavdeep Parhar 47577951040fSNavdeep Parhar n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 47587951040fSNavdeep Parhar 47597951040fSNavdeep Parhar return (howmany(n, 16)); 47607951040fSNavdeep Parhar } 47617951040fSNavdeep Parhar 47627951040fSNavdeep Parhar static inline u_int 47637951040fSNavdeep Parhar imm_payload(u_int ndesc) 47647951040fSNavdeep Parhar { 47657951040fSNavdeep Parhar u_int n; 47667951040fSNavdeep Parhar 47677951040fSNavdeep Parhar n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 47687951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core); 47697951040fSNavdeep Parhar 47707951040fSNavdeep Parhar return (n); 47717951040fSNavdeep Parhar } 47727951040fSNavdeep Parhar 4773c0236bd9SNavdeep Parhar static inline uint64_t 4774c0236bd9SNavdeep Parhar csum_to_ctrl(struct adapter *sc, struct mbuf *m) 4775c0236bd9SNavdeep Parhar { 4776c0236bd9SNavdeep Parhar uint64_t ctrl; 4777a4a4ad2dSNavdeep Parhar int csum_type, l2hlen, l3hlen; 4778a4a4ad2dSNavdeep Parhar int x, y; 4779a4a4ad2dSNavdeep Parhar static const int csum_types[3][2] = { 4780a4a4ad2dSNavdeep Parhar {TX_CSUM_TCPIP, TX_CSUM_TCPIP6}, 4781a4a4ad2dSNavdeep Parhar {TX_CSUM_UDPIP, TX_CSUM_UDPIP6}, 4782a4a4ad2dSNavdeep Parhar {TX_CSUM_IP, 0} 4783a4a4ad2dSNavdeep Parhar }; 4784c0236bd9SNavdeep Parhar 4785c0236bd9SNavdeep Parhar M_ASSERTPKTHDR(m); 4786c0236bd9SNavdeep Parhar 4787a4a4ad2dSNavdeep Parhar if (!needs_hwcsum(m)) 4788c0236bd9SNavdeep Parhar return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 4789c0236bd9SNavdeep Parhar 4790a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); 4791a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); 4792a4a4ad2dSNavdeep Parhar 4793a4a4ad2dSNavdeep Parhar if (needs_vxlan_csum(m)) { 4794a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.l4hlen > 0); 4795a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.l5hlen > 0); 4796a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); 4797a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); 4798a4a4ad2dSNavdeep Parhar 4799a4a4ad2dSNavdeep Parhar l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + 4800a4a4ad2dSNavdeep Parhar m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + 4801a4a4ad2dSNavdeep Parhar m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; 4802a4a4ad2dSNavdeep Parhar l3hlen = m->m_pkthdr.inner_l3hlen; 4803a4a4ad2dSNavdeep Parhar } else { 4804a4a4ad2dSNavdeep Parhar l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; 4805a4a4ad2dSNavdeep Parhar l3hlen = m->m_pkthdr.l3hlen; 4806c0236bd9SNavdeep Parhar } 4807c0236bd9SNavdeep Parhar 4808a4a4ad2dSNavdeep Parhar ctrl = 0; 4809a4a4ad2dSNavdeep Parhar if (!needs_l3_csum(m)) 4810a4a4ad2dSNavdeep Parhar ctrl |= F_TXPKT_IPCSUM_DIS; 4811a4a4ad2dSNavdeep Parhar 4812a4a4ad2dSNavdeep Parhar if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | 4813a4a4ad2dSNavdeep Parhar CSUM_IP6_TCP | CSUM_INNER_IP6_TCP)) 4814a4a4ad2dSNavdeep Parhar x = 0; /* TCP */ 4815a4a4ad2dSNavdeep Parhar else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | 4816a4a4ad2dSNavdeep Parhar CSUM_IP6_UDP | CSUM_INNER_IP6_UDP)) 4817a4a4ad2dSNavdeep Parhar x = 1; /* UDP */ 4818c0236bd9SNavdeep Parhar else 4819a4a4ad2dSNavdeep Parhar x = 2; 4820a4a4ad2dSNavdeep Parhar 4821a4a4ad2dSNavdeep Parhar if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | 4822a4a4ad2dSNavdeep Parhar CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP)) 4823a4a4ad2dSNavdeep Parhar y = 0; /* IPv4 */ 4824a4a4ad2dSNavdeep Parhar else { 4825a4a4ad2dSNavdeep Parhar MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | 4826a4a4ad2dSNavdeep Parhar CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP)); 4827a4a4ad2dSNavdeep Parhar y = 1; /* IPv6 */ 4828a4a4ad2dSNavdeep Parhar } 4829a4a4ad2dSNavdeep Parhar /* 4830a4a4ad2dSNavdeep Parhar * needs_hwcsum returned true earlier so there must be some kind of 4831a4a4ad2dSNavdeep Parhar * checksum to calculate. 4832a4a4ad2dSNavdeep Parhar */ 4833a4a4ad2dSNavdeep Parhar csum_type = csum_types[x][y]; 4834a4a4ad2dSNavdeep Parhar MPASS(csum_type != 0); 4835a4a4ad2dSNavdeep Parhar if (csum_type == TX_CSUM_IP) 4836a4a4ad2dSNavdeep Parhar ctrl |= F_TXPKT_L4CSUM_DIS; 4837a4a4ad2dSNavdeep Parhar ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen); 4838a4a4ad2dSNavdeep Parhar if (chip_id(sc) <= CHELSIO_T5) 4839a4a4ad2dSNavdeep Parhar ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen); 4840a4a4ad2dSNavdeep Parhar else 4841a4a4ad2dSNavdeep Parhar ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen); 4842c0236bd9SNavdeep Parhar 4843c0236bd9SNavdeep Parhar return (ctrl); 4844c0236bd9SNavdeep Parhar } 4845c0236bd9SNavdeep Parhar 4846a4a4ad2dSNavdeep Parhar static inline void * 4847a4a4ad2dSNavdeep Parhar write_lso_cpl(void *cpl, struct mbuf *m0) 4848a4a4ad2dSNavdeep Parhar { 4849a4a4ad2dSNavdeep Parhar struct cpl_tx_pkt_lso_core *lso; 4850a4a4ad2dSNavdeep Parhar uint32_t ctrl; 4851a4a4ad2dSNavdeep Parhar 4852a4a4ad2dSNavdeep Parhar KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4853a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l4hlen > 0, 4854a4a4ad2dSNavdeep Parhar ("%s: mbuf %p needs TSO but missing header lengths", 4855a4a4ad2dSNavdeep Parhar __func__, m0)); 4856a4a4ad2dSNavdeep Parhar 4857a4a4ad2dSNavdeep Parhar ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 4858a4a4ad2dSNavdeep Parhar F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 4859a4a4ad2dSNavdeep Parhar V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 4860a4a4ad2dSNavdeep Parhar V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4861a4a4ad2dSNavdeep Parhar V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4862a4a4ad2dSNavdeep Parhar if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4863a4a4ad2dSNavdeep Parhar ctrl |= F_LSO_IPV6; 4864a4a4ad2dSNavdeep Parhar 4865a4a4ad2dSNavdeep Parhar lso = cpl; 4866a4a4ad2dSNavdeep Parhar lso->lso_ctrl = htobe32(ctrl); 4867a4a4ad2dSNavdeep Parhar lso->ipid_ofst = htobe16(0); 4868a4a4ad2dSNavdeep Parhar lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4869a4a4ad2dSNavdeep Parhar lso->seqno_offset = htobe32(0); 4870a4a4ad2dSNavdeep Parhar lso->len = htobe32(m0->m_pkthdr.len); 4871a4a4ad2dSNavdeep Parhar 4872a4a4ad2dSNavdeep Parhar return (lso + 1); 4873a4a4ad2dSNavdeep Parhar } 4874a4a4ad2dSNavdeep Parhar 4875a4a4ad2dSNavdeep Parhar static void * 4876a4a4ad2dSNavdeep Parhar write_tnl_lso_cpl(void *cpl, struct mbuf *m0) 4877a4a4ad2dSNavdeep Parhar { 4878a4a4ad2dSNavdeep Parhar struct cpl_tx_tnl_lso *tnl_lso = cpl; 4879a4a4ad2dSNavdeep Parhar uint32_t ctrl; 4880a4a4ad2dSNavdeep Parhar 4881a4a4ad2dSNavdeep Parhar KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && 4882a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && 4883a4a4ad2dSNavdeep Parhar m0->m_pkthdr.inner_l5hlen > 0, 4884a4a4ad2dSNavdeep Parhar ("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths", 4885a4a4ad2dSNavdeep Parhar __func__, m0)); 4886a4a4ad2dSNavdeep Parhar KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4887a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, 4888a4a4ad2dSNavdeep Parhar ("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths", 4889a4a4ad2dSNavdeep Parhar __func__, m0)); 4890a4a4ad2dSNavdeep Parhar 4891a4a4ad2dSNavdeep Parhar /* Outer headers. */ 4892a4a4ad2dSNavdeep Parhar ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) | 4893a4a4ad2dSNavdeep Parhar F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST | 4894a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_ETHHDRLENOUT( 4895a4a4ad2dSNavdeep Parhar (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 4896a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | 4897a4a4ad2dSNavdeep Parhar F_CPL_TX_TNL_LSO_IPLENSETOUT; 4898a4a4ad2dSNavdeep Parhar if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4899a4a4ad2dSNavdeep Parhar ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT; 4900a4a4ad2dSNavdeep Parhar else { 4901a4a4ad2dSNavdeep Parhar ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT | 4902a4a4ad2dSNavdeep Parhar F_CPL_TX_TNL_LSO_IPIDINCOUT; 4903a4a4ad2dSNavdeep Parhar } 4904a4a4ad2dSNavdeep Parhar tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); 4905a4a4ad2dSNavdeep Parhar tnl_lso->IpIdOffsetOut = 0; 4906a4a4ad2dSNavdeep Parhar tnl_lso->UdpLenSetOut_to_TnlHdrLen = 4907a4a4ad2dSNavdeep Parhar htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT | 4908a4a4ad2dSNavdeep Parhar F_CPL_TX_TNL_LSO_UDPLENSETOUT | 4909a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + 4910a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + 4911a4a4ad2dSNavdeep Parhar m0->m_pkthdr.l5hlen) | 4912a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN)); 4913a4a4ad2dSNavdeep Parhar tnl_lso->r1 = 0; 4914a4a4ad2dSNavdeep Parhar 4915a4a4ad2dSNavdeep Parhar /* Inner headers. */ 4916a4a4ad2dSNavdeep Parhar ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN( 4917a4a4ad2dSNavdeep Parhar (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | 4918a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | 4919a4a4ad2dSNavdeep Parhar V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); 4920a4a4ad2dSNavdeep Parhar if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) 4921a4a4ad2dSNavdeep Parhar ctrl |= F_CPL_TX_TNL_LSO_IPV6; 4922a4a4ad2dSNavdeep Parhar tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); 4923a4a4ad2dSNavdeep Parhar tnl_lso->IpIdOffset = 0; 4924a4a4ad2dSNavdeep Parhar tnl_lso->IpIdSplit_to_Mss = 4925a4a4ad2dSNavdeep Parhar htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); 4926a4a4ad2dSNavdeep Parhar tnl_lso->TCPSeqOffset = 0; 4927a4a4ad2dSNavdeep Parhar tnl_lso->EthLenOffset_Size = 4928a4a4ad2dSNavdeep Parhar htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); 4929a4a4ad2dSNavdeep Parhar 4930a4a4ad2dSNavdeep Parhar return (tnl_lso + 1); 4931a4a4ad2dSNavdeep Parhar } 4932a4a4ad2dSNavdeep Parhar 4933800535c2SNavdeep Parhar #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ 4934800535c2SNavdeep Parhar 49357951040fSNavdeep Parhar /* 49366af45170SJohn Baldwin * Write a VM txpkt WR for this packet to the hardware descriptors, update the 49376af45170SJohn Baldwin * software descriptor, and advance the pidx. It is guaranteed that enough 49386af45170SJohn Baldwin * descriptors are available. 49396af45170SJohn Baldwin * 49406af45170SJohn Baldwin * The return value is the # of hardware descriptors used. 49416af45170SJohn Baldwin */ 49426af45170SJohn Baldwin static u_int 4943d735920dSNavdeep Parhar write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) 49446af45170SJohn Baldwin { 4945d735920dSNavdeep Parhar struct sge_eq *eq; 4946d735920dSNavdeep Parhar struct fw_eth_tx_pkt_vm_wr *wr; 49476af45170SJohn Baldwin struct tx_sdesc *txsd; 49486af45170SJohn Baldwin struct cpl_tx_pkt_core *cpl; 49496af45170SJohn Baldwin uint32_t ctrl; /* used in many unrelated places */ 49506af45170SJohn Baldwin uint64_t ctrl1; 4951c0236bd9SNavdeep Parhar int len16, ndesc, pktlen, nsegs; 49526af45170SJohn Baldwin caddr_t dst; 49536af45170SJohn Baldwin 49546af45170SJohn Baldwin TXQ_LOCK_ASSERT_OWNED(txq); 49556af45170SJohn Baldwin M_ASSERTPKTHDR(m0); 49566af45170SJohn Baldwin 49576af45170SJohn Baldwin len16 = mbuf_len16(m0); 49586af45170SJohn Baldwin nsegs = mbuf_nsegs(m0); 49596af45170SJohn Baldwin pktlen = m0->m_pkthdr.len; 49606af45170SJohn Baldwin ctrl = sizeof(struct cpl_tx_pkt_core); 49616af45170SJohn Baldwin if (needs_tso(m0)) 49626af45170SJohn Baldwin ctrl += sizeof(struct cpl_tx_pkt_lso_core); 49630cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 49646af45170SJohn Baldwin 49656af45170SJohn Baldwin /* Firmware work request header */ 4966d735920dSNavdeep Parhar eq = &txq->eq; 4967d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 49686af45170SJohn Baldwin wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 49696af45170SJohn Baldwin V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 49706af45170SJohn Baldwin 49716af45170SJohn Baldwin ctrl = V_FW_WR_LEN16(len16); 49726af45170SJohn Baldwin wr->equiq_to_len16 = htobe32(ctrl); 49736af45170SJohn Baldwin wr->r3[0] = 0; 49746af45170SJohn Baldwin wr->r3[1] = 0; 49756af45170SJohn Baldwin 49766af45170SJohn Baldwin /* 49776af45170SJohn Baldwin * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 49786af45170SJohn Baldwin * vlantci is ignored unless the ethtype is 0x8100, so it's 49796af45170SJohn Baldwin * simpler to always copy it rather than making it 49806af45170SJohn Baldwin * conditional. Also, it seems that we do not have to set 49816af45170SJohn Baldwin * vlantci or fake the ethtype when doing VLAN tag insertion. 49826af45170SJohn Baldwin */ 4983800535c2SNavdeep Parhar m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); 49846af45170SJohn Baldwin 49856af45170SJohn Baldwin if (needs_tso(m0)) { 4986a4a4ad2dSNavdeep Parhar cpl = write_lso_cpl(wr + 1, m0); 49876af45170SJohn Baldwin txq->tso_wrs++; 4988c0236bd9SNavdeep Parhar } else 49896af45170SJohn Baldwin cpl = (void *)(wr + 1); 49906af45170SJohn Baldwin 49916af45170SJohn Baldwin /* Checksum offload */ 4992c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m0); 4993c0236bd9SNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 49946af45170SJohn Baldwin txq->txcsum++; /* some hardware assistance provided */ 49956af45170SJohn Baldwin 49966af45170SJohn Baldwin /* VLAN tag insertion */ 49976af45170SJohn Baldwin if (needs_vlan_insertion(m0)) { 49986af45170SJohn Baldwin ctrl1 |= F_TXPKT_VLAN_VLD | 49996af45170SJohn Baldwin V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 50006af45170SJohn Baldwin txq->vlan_insertion++; 50016af45170SJohn Baldwin } 50026af45170SJohn Baldwin 50036af45170SJohn Baldwin /* CPL header */ 50046af45170SJohn Baldwin cpl->ctrl0 = txq->cpl_ctrl0; 50056af45170SJohn Baldwin cpl->pack = 0; 50066af45170SJohn Baldwin cpl->len = htobe16(pktlen); 50076af45170SJohn Baldwin cpl->ctrl1 = htobe64(ctrl1); 50086af45170SJohn Baldwin 50096af45170SJohn Baldwin /* SGL */ 50106af45170SJohn Baldwin dst = (void *)(cpl + 1); 50116af45170SJohn Baldwin 50126af45170SJohn Baldwin /* 50136af45170SJohn Baldwin * A packet using TSO will use up an entire descriptor for the 50146af45170SJohn Baldwin * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 50156af45170SJohn Baldwin * If this descriptor is the last descriptor in the ring, wrap 50166af45170SJohn Baldwin * around to the front of the ring explicitly for the start of 50176af45170SJohn Baldwin * the sgl. 50186af45170SJohn Baldwin */ 50196af45170SJohn Baldwin if (dst == (void *)&eq->desc[eq->sidx]) { 50206af45170SJohn Baldwin dst = (void *)&eq->desc[0]; 50216af45170SJohn Baldwin write_gl_to_txd(txq, m0, &dst, 0); 50226af45170SJohn Baldwin } else 50236af45170SJohn Baldwin write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 50246af45170SJohn Baldwin txq->sgl_wrs++; 50256af45170SJohn Baldwin txq->txpkt_wrs++; 50266af45170SJohn Baldwin 50276af45170SJohn Baldwin txsd = &txq->sdesc[eq->pidx]; 50286af45170SJohn Baldwin txsd->m = m0; 50296af45170SJohn Baldwin txsd->desc_used = ndesc; 50306af45170SJohn Baldwin 50316af45170SJohn Baldwin return (ndesc); 50326af45170SJohn Baldwin } 50336af45170SJohn Baldwin 50346af45170SJohn Baldwin /* 50355cdaef71SJohn Baldwin * Write a raw WR to the hardware descriptors, update the software 50365cdaef71SJohn Baldwin * descriptor, and advance the pidx. It is guaranteed that enough 50375cdaef71SJohn Baldwin * descriptors are available. 50385cdaef71SJohn Baldwin * 50395cdaef71SJohn Baldwin * The return value is the # of hardware descriptors used. 50405cdaef71SJohn Baldwin */ 50415cdaef71SJohn Baldwin static u_int 50425cdaef71SJohn Baldwin write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 50435cdaef71SJohn Baldwin { 50445cdaef71SJohn Baldwin struct sge_eq *eq = &txq->eq; 50455cdaef71SJohn Baldwin struct tx_sdesc *txsd; 50465cdaef71SJohn Baldwin struct mbuf *m; 50475cdaef71SJohn Baldwin caddr_t dst; 50485cdaef71SJohn Baldwin int len16, ndesc; 50495cdaef71SJohn Baldwin 50505cdaef71SJohn Baldwin len16 = mbuf_len16(m0); 50510cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 50525cdaef71SJohn Baldwin MPASS(ndesc <= available); 50535cdaef71SJohn Baldwin 50545cdaef71SJohn Baldwin dst = wr; 50555cdaef71SJohn Baldwin for (m = m0; m != NULL; m = m->m_next) 50565cdaef71SJohn Baldwin copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 50575cdaef71SJohn Baldwin 50585cdaef71SJohn Baldwin txq->raw_wrs++; 50595cdaef71SJohn Baldwin 50605cdaef71SJohn Baldwin txsd = &txq->sdesc[eq->pidx]; 50615cdaef71SJohn Baldwin txsd->m = m0; 50625cdaef71SJohn Baldwin txsd->desc_used = ndesc; 50635cdaef71SJohn Baldwin 50645cdaef71SJohn Baldwin return (ndesc); 50655cdaef71SJohn Baldwin } 50665cdaef71SJohn Baldwin 50675cdaef71SJohn Baldwin /* 50687951040fSNavdeep Parhar * Write a txpkt WR for this packet to the hardware descriptors, update the 50697951040fSNavdeep Parhar * software descriptor, and advance the pidx. It is guaranteed that enough 50707951040fSNavdeep Parhar * descriptors are available. 507154e4ee71SNavdeep Parhar * 50727951040fSNavdeep Parhar * The return value is the # of hardware descriptors used. 507354e4ee71SNavdeep Parhar */ 50747951040fSNavdeep Parhar static u_int 5075d735920dSNavdeep Parhar write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, 5076d735920dSNavdeep Parhar u_int available) 507754e4ee71SNavdeep Parhar { 5078d735920dSNavdeep Parhar struct sge_eq *eq; 5079d735920dSNavdeep Parhar struct fw_eth_tx_pkt_wr *wr; 50807951040fSNavdeep Parhar struct tx_sdesc *txsd; 508154e4ee71SNavdeep Parhar struct cpl_tx_pkt_core *cpl; 508254e4ee71SNavdeep Parhar uint32_t ctrl; /* used in many unrelated places */ 508354e4ee71SNavdeep Parhar uint64_t ctrl1; 50847951040fSNavdeep Parhar int len16, ndesc, pktlen, nsegs; 508554e4ee71SNavdeep Parhar caddr_t dst; 508654e4ee71SNavdeep Parhar 508754e4ee71SNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 50887951040fSNavdeep Parhar M_ASSERTPKTHDR(m0); 508954e4ee71SNavdeep Parhar 50907951040fSNavdeep Parhar len16 = mbuf_len16(m0); 50917951040fSNavdeep Parhar nsegs = mbuf_nsegs(m0); 50927951040fSNavdeep Parhar pktlen = m0->m_pkthdr.len; 509354e4ee71SNavdeep Parhar ctrl = sizeof(struct cpl_tx_pkt_core); 5094a4a4ad2dSNavdeep Parhar if (needs_tso(m0)) { 5095a4a4ad2dSNavdeep Parhar if (needs_vxlan_tso(m0)) 5096a4a4ad2dSNavdeep Parhar ctrl += sizeof(struct cpl_tx_tnl_lso); 5097a4a4ad2dSNavdeep Parhar else 50982a5f6b0eSNavdeep Parhar ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5099a4a4ad2dSNavdeep Parhar } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && 5100d76bbe17SJohn Baldwin available >= 2) { 51017951040fSNavdeep Parhar /* Immediate data. Recalculate len16 and set nsegs to 0. */ 5102ecb79ca4SNavdeep Parhar ctrl += pktlen; 51037951040fSNavdeep Parhar len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 51047951040fSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 51057951040fSNavdeep Parhar nsegs = 0; 510654e4ee71SNavdeep Parhar } 51070cadedfcSNavdeep Parhar ndesc = tx_len16_to_desc(len16); 51087951040fSNavdeep Parhar MPASS(ndesc <= available); 510954e4ee71SNavdeep Parhar 511054e4ee71SNavdeep Parhar /* Firmware work request header */ 5111d735920dSNavdeep Parhar eq = &txq->eq; 5112d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 511354e4ee71SNavdeep Parhar wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 5114733b9277SNavdeep Parhar V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 51156b49a4ecSNavdeep Parhar 51167951040fSNavdeep Parhar ctrl = V_FW_WR_LEN16(len16); 511754e4ee71SNavdeep Parhar wr->equiq_to_len16 = htobe32(ctrl); 511854e4ee71SNavdeep Parhar wr->r3 = 0; 511954e4ee71SNavdeep Parhar 51207951040fSNavdeep Parhar if (needs_tso(m0)) { 5121a4a4ad2dSNavdeep Parhar if (needs_vxlan_tso(m0)) { 5122a4a4ad2dSNavdeep Parhar cpl = write_tnl_lso_cpl(wr + 1, m0); 5123a4a4ad2dSNavdeep Parhar txq->vxlan_tso_wrs++; 5124a4a4ad2dSNavdeep Parhar } else { 5125a4a4ad2dSNavdeep Parhar cpl = write_lso_cpl(wr + 1, m0); 512654e4ee71SNavdeep Parhar txq->tso_wrs++; 5127a4a4ad2dSNavdeep Parhar } 512854e4ee71SNavdeep Parhar } else 512954e4ee71SNavdeep Parhar cpl = (void *)(wr + 1); 513054e4ee71SNavdeep Parhar 513154e4ee71SNavdeep Parhar /* Checksum offload */ 5132c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m0); 5133a4a4ad2dSNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5134a4a4ad2dSNavdeep Parhar /* some hardware assistance provided */ 5135a4a4ad2dSNavdeep Parhar if (needs_vxlan_csum(m0)) 5136a4a4ad2dSNavdeep Parhar txq->vxlan_txcsum++; 5137a4a4ad2dSNavdeep Parhar else 5138a4a4ad2dSNavdeep Parhar txq->txcsum++; 5139a4a4ad2dSNavdeep Parhar } 514054e4ee71SNavdeep Parhar 514154e4ee71SNavdeep Parhar /* VLAN tag insertion */ 51427951040fSNavdeep Parhar if (needs_vlan_insertion(m0)) { 5143a4a4ad2dSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 5144a4a4ad2dSNavdeep Parhar V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 514554e4ee71SNavdeep Parhar txq->vlan_insertion++; 514654e4ee71SNavdeep Parhar } 514754e4ee71SNavdeep Parhar 514854e4ee71SNavdeep Parhar /* CPL header */ 51497951040fSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 515054e4ee71SNavdeep Parhar cpl->pack = 0; 5151ecb79ca4SNavdeep Parhar cpl->len = htobe16(pktlen); 515254e4ee71SNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 515354e4ee71SNavdeep Parhar 515454e4ee71SNavdeep Parhar /* SGL */ 515554e4ee71SNavdeep Parhar dst = (void *)(cpl + 1); 5156a4a4ad2dSNavdeep Parhar if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) 5157a4a4ad2dSNavdeep Parhar dst = (caddr_t)&eq->desc[0]; 51587951040fSNavdeep Parhar if (nsegs > 0) { 51597951040fSNavdeep Parhar 51607951040fSNavdeep Parhar write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 516154e4ee71SNavdeep Parhar txq->sgl_wrs++; 516254e4ee71SNavdeep Parhar } else { 51637951040fSNavdeep Parhar struct mbuf *m; 51647951040fSNavdeep Parhar 51657951040fSNavdeep Parhar for (m = m0; m != NULL; m = m->m_next) { 516654e4ee71SNavdeep Parhar copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 5167ecb79ca4SNavdeep Parhar #ifdef INVARIANTS 5168ecb79ca4SNavdeep Parhar pktlen -= m->m_len; 5169ecb79ca4SNavdeep Parhar #endif 517054e4ee71SNavdeep Parhar } 5171ecb79ca4SNavdeep Parhar #ifdef INVARIANTS 5172ecb79ca4SNavdeep Parhar KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 5173ecb79ca4SNavdeep Parhar #endif 51747951040fSNavdeep Parhar txq->imm_wrs++; 517554e4ee71SNavdeep Parhar } 517654e4ee71SNavdeep Parhar 517754e4ee71SNavdeep Parhar txq->txpkt_wrs++; 517854e4ee71SNavdeep Parhar 5179f7dfe243SNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 51807951040fSNavdeep Parhar txsd->m = m0; 518154e4ee71SNavdeep Parhar txsd->desc_used = ndesc; 518254e4ee71SNavdeep Parhar 51837951040fSNavdeep Parhar return (ndesc); 518454e4ee71SNavdeep Parhar } 518554e4ee71SNavdeep Parhar 5186d735920dSNavdeep Parhar static inline bool 5187d735920dSNavdeep Parhar cmp_l2hdr(struct txpkts *txp, struct mbuf *m) 518854e4ee71SNavdeep Parhar { 5189d735920dSNavdeep Parhar int len; 51907951040fSNavdeep Parhar 5191d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 5192800535c2SNavdeep Parhar MPASS(m->m_len >= VM_TX_L2HDR_LEN); 51937951040fSNavdeep Parhar 5194d735920dSNavdeep Parhar if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) 5195800535c2SNavdeep Parhar len = VM_TX_L2HDR_LEN; 5196d735920dSNavdeep Parhar else 5197d735920dSNavdeep Parhar len = sizeof(struct ether_header); 5198d735920dSNavdeep Parhar 5199d735920dSNavdeep Parhar return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); 52007951040fSNavdeep Parhar } 52017951040fSNavdeep Parhar 5202d735920dSNavdeep Parhar static inline void 5203d735920dSNavdeep Parhar save_l2hdr(struct txpkts *txp, struct mbuf *m) 5204d735920dSNavdeep Parhar { 5205800535c2SNavdeep Parhar MPASS(m->m_len >= VM_TX_L2HDR_LEN); 52067951040fSNavdeep Parhar 5207800535c2SNavdeep Parhar memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); 5208d735920dSNavdeep Parhar } 52097951040fSNavdeep Parhar 5210d735920dSNavdeep Parhar static int 5211d735920dSNavdeep Parhar add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5212d735920dSNavdeep Parhar int avail, bool *send) 5213d735920dSNavdeep Parhar { 5214d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 5215d735920dSNavdeep Parhar 5216d735920dSNavdeep Parhar /* Cannot have TSO and coalesce at the same time. */ 5217d735920dSNavdeep Parhar if (cannot_use_txpkts(m)) { 5218d735920dSNavdeep Parhar cannot_coalesce: 5219d735920dSNavdeep Parhar *send = txp->npkt > 0; 5220d735920dSNavdeep Parhar return (EINVAL); 5221d735920dSNavdeep Parhar } 5222d735920dSNavdeep Parhar 5223d735920dSNavdeep Parhar /* VF allows coalescing of type 1 (1 GL) only */ 5224d735920dSNavdeep Parhar if (mbuf_nsegs(m) > 1) 5225d735920dSNavdeep Parhar goto cannot_coalesce; 5226d735920dSNavdeep Parhar 5227d735920dSNavdeep Parhar *send = false; 5228d735920dSNavdeep Parhar if (txp->npkt > 0) { 5229d735920dSNavdeep Parhar MPASS(tx_len16_to_desc(txp->len16) <= avail); 5230d735920dSNavdeep Parhar MPASS(txp->npkt < txp->max_npkt); 5231d735920dSNavdeep Parhar MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5232d735920dSNavdeep Parhar 5233d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { 5234d735920dSNavdeep Parhar retry_after_send: 5235d735920dSNavdeep Parhar *send = true; 5236d735920dSNavdeep Parhar return (EAGAIN); 5237d735920dSNavdeep Parhar } 5238d735920dSNavdeep Parhar if (m->m_pkthdr.len + txp->plen > 65535) 5239d735920dSNavdeep Parhar goto retry_after_send; 5240d735920dSNavdeep Parhar if (cmp_l2hdr(txp, m)) 5241d735920dSNavdeep Parhar goto retry_after_send; 5242d735920dSNavdeep Parhar 5243d735920dSNavdeep Parhar txp->len16 += txpkts1_len16(); 5244d735920dSNavdeep Parhar txp->plen += m->m_pkthdr.len; 5245d735920dSNavdeep Parhar txp->mb[txp->npkt++] = m; 5246d735920dSNavdeep Parhar if (txp->npkt == txp->max_npkt) 5247d735920dSNavdeep Parhar *send = true; 5248d735920dSNavdeep Parhar } else { 5249d735920dSNavdeep Parhar txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + 5250d735920dSNavdeep Parhar txpkts1_len16(); 5251d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16) > avail) 5252d735920dSNavdeep Parhar goto cannot_coalesce; 5253d735920dSNavdeep Parhar txp->npkt = 1; 5254d735920dSNavdeep Parhar txp->wr_type = 1; 5255d735920dSNavdeep Parhar txp->plen = m->m_pkthdr.len; 5256d735920dSNavdeep Parhar txp->mb[0] = m; 5257d735920dSNavdeep Parhar save_l2hdr(txp, m); 5258d735920dSNavdeep Parhar } 52597951040fSNavdeep Parhar return (0); 52607951040fSNavdeep Parhar } 52617951040fSNavdeep Parhar 52627951040fSNavdeep Parhar static int 5263d735920dSNavdeep Parhar add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5264d735920dSNavdeep Parhar int avail, bool *send) 52657951040fSNavdeep Parhar { 5266d735920dSNavdeep Parhar struct txpkts *txp = &txq->txp; 5267d735920dSNavdeep Parhar int nsegs; 5268d735920dSNavdeep Parhar 5269d735920dSNavdeep Parhar MPASS(!(sc->flags & IS_VF)); 5270d735920dSNavdeep Parhar 5271d735920dSNavdeep Parhar /* Cannot have TSO and coalesce at the same time. */ 5272d735920dSNavdeep Parhar if (cannot_use_txpkts(m)) { 5273d735920dSNavdeep Parhar cannot_coalesce: 5274d735920dSNavdeep Parhar *send = txp->npkt > 0; 5275d735920dSNavdeep Parhar return (EINVAL); 5276d735920dSNavdeep Parhar } 5277d735920dSNavdeep Parhar 5278d735920dSNavdeep Parhar *send = false; 5279d735920dSNavdeep Parhar nsegs = mbuf_nsegs(m); 5280d735920dSNavdeep Parhar if (txp->npkt == 0) { 5281d735920dSNavdeep Parhar if (m->m_pkthdr.len > 65535) 5282d735920dSNavdeep Parhar goto cannot_coalesce; 5283d735920dSNavdeep Parhar if (nsegs > 1) { 5284d735920dSNavdeep Parhar txp->wr_type = 0; 5285d735920dSNavdeep Parhar txp->len16 = 5286d735920dSNavdeep Parhar howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5287d735920dSNavdeep Parhar txpkts0_len16(nsegs); 5288d735920dSNavdeep Parhar } else { 5289d735920dSNavdeep Parhar txp->wr_type = 1; 5290d735920dSNavdeep Parhar txp->len16 = 5291d735920dSNavdeep Parhar howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5292d735920dSNavdeep Parhar txpkts1_len16(); 5293d735920dSNavdeep Parhar } 5294d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16) > avail) 5295d735920dSNavdeep Parhar goto cannot_coalesce; 5296d735920dSNavdeep Parhar txp->npkt = 1; 5297d735920dSNavdeep Parhar txp->plen = m->m_pkthdr.len; 5298d735920dSNavdeep Parhar txp->mb[0] = m; 5299d735920dSNavdeep Parhar } else { 5300d735920dSNavdeep Parhar MPASS(tx_len16_to_desc(txp->len16) <= avail); 5301d735920dSNavdeep Parhar MPASS(txp->npkt < txp->max_npkt); 5302d735920dSNavdeep Parhar 5303d735920dSNavdeep Parhar if (m->m_pkthdr.len + txp->plen > 65535) { 5304d735920dSNavdeep Parhar retry_after_send: 5305d735920dSNavdeep Parhar *send = true; 5306d735920dSNavdeep Parhar return (EAGAIN); 5307d735920dSNavdeep Parhar } 53087951040fSNavdeep Parhar 53097951040fSNavdeep Parhar MPASS(txp->wr_type == 0 || txp->wr_type == 1); 5310d735920dSNavdeep Parhar if (txp->wr_type == 0) { 5311d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + 5312d735920dSNavdeep Parhar txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC)) 5313d735920dSNavdeep Parhar goto retry_after_send; 5314d735920dSNavdeep Parhar txp->len16 += txpkts0_len16(nsegs); 5315d735920dSNavdeep Parhar } else { 5316d735920dSNavdeep Parhar if (nsegs != 1) 5317d735920dSNavdeep Parhar goto retry_after_send; 5318d735920dSNavdeep Parhar if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > 5319d735920dSNavdeep Parhar avail) 5320d735920dSNavdeep Parhar goto retry_after_send; 5321d735920dSNavdeep Parhar txp->len16 += txpkts1_len16(); 5322d735920dSNavdeep Parhar } 53237951040fSNavdeep Parhar 5324d735920dSNavdeep Parhar txp->plen += m->m_pkthdr.len; 5325d735920dSNavdeep Parhar txp->mb[txp->npkt++] = m; 5326d735920dSNavdeep Parhar if (txp->npkt == txp->max_npkt) 5327d735920dSNavdeep Parhar *send = true; 5328d735920dSNavdeep Parhar } 53297951040fSNavdeep Parhar return (0); 53307951040fSNavdeep Parhar } 53317951040fSNavdeep Parhar 53327951040fSNavdeep Parhar /* 53337951040fSNavdeep Parhar * Write a txpkts WR for the packets in txp to the hardware descriptors, update 53347951040fSNavdeep Parhar * the software descriptor, and advance the pidx. It is guaranteed that enough 53357951040fSNavdeep Parhar * descriptors are available. 53367951040fSNavdeep Parhar * 53377951040fSNavdeep Parhar * The return value is the # of hardware descriptors used. 53387951040fSNavdeep Parhar */ 53397951040fSNavdeep Parhar static u_int 5340d735920dSNavdeep Parhar write_txpkts_wr(struct adapter *sc, struct sge_txq *txq) 53417951040fSNavdeep Parhar { 5342d735920dSNavdeep Parhar const struct txpkts *txp = &txq->txp; 53437951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 5344d735920dSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr; 53457951040fSNavdeep Parhar struct tx_sdesc *txsd; 53467951040fSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 53477951040fSNavdeep Parhar uint64_t ctrl1; 5348d735920dSNavdeep Parhar int ndesc, i, checkwrap; 5349d735920dSNavdeep Parhar struct mbuf *m, *last; 53507951040fSNavdeep Parhar void *flitp; 53517951040fSNavdeep Parhar 53527951040fSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 53537951040fSNavdeep Parhar MPASS(txp->npkt > 0); 53547951040fSNavdeep Parhar MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 53557951040fSNavdeep Parhar 5356d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 53577951040fSNavdeep Parhar wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 5358d735920dSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 53597951040fSNavdeep Parhar wr->plen = htobe16(txp->plen); 53607951040fSNavdeep Parhar wr->npkt = txp->npkt; 53617951040fSNavdeep Parhar wr->r3 = 0; 53627951040fSNavdeep Parhar wr->type = txp->wr_type; 53637951040fSNavdeep Parhar flitp = wr + 1; 53647951040fSNavdeep Parhar 53657951040fSNavdeep Parhar /* 53667951040fSNavdeep Parhar * At this point we are 16B into a hardware descriptor. If checkwrap is 53677951040fSNavdeep Parhar * set then we know the WR is going to wrap around somewhere. We'll 53687951040fSNavdeep Parhar * check for that at appropriate points. 53697951040fSNavdeep Parhar */ 5370d735920dSNavdeep Parhar ndesc = tx_len16_to_desc(txp->len16); 5371d735920dSNavdeep Parhar last = NULL; 53727951040fSNavdeep Parhar checkwrap = eq->sidx - ndesc < eq->pidx; 5373d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) { 5374d735920dSNavdeep Parhar m = txp->mb[i]; 53757951040fSNavdeep Parhar if (txp->wr_type == 0) { 537654e4ee71SNavdeep Parhar struct ulp_txpkt *ulpmc; 537754e4ee71SNavdeep Parhar struct ulptx_idata *ulpsc; 537854e4ee71SNavdeep Parhar 53797951040fSNavdeep Parhar /* ULP master command */ 53807951040fSNavdeep Parhar ulpmc = flitp; 53817951040fSNavdeep Parhar ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 53827951040fSNavdeep Parhar V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 5383d735920dSNavdeep Parhar ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); 538454e4ee71SNavdeep Parhar 53857951040fSNavdeep Parhar /* ULP subcommand */ 53867951040fSNavdeep Parhar ulpsc = (void *)(ulpmc + 1); 53877951040fSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 53887951040fSNavdeep Parhar F_ULP_TX_SC_MORE); 53897951040fSNavdeep Parhar ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 53907951040fSNavdeep Parhar 53917951040fSNavdeep Parhar cpl = (void *)(ulpsc + 1); 53927951040fSNavdeep Parhar if (checkwrap && 53937951040fSNavdeep Parhar (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 53947951040fSNavdeep Parhar cpl = (void *)&eq->desc[0]; 53957951040fSNavdeep Parhar } else { 53967951040fSNavdeep Parhar cpl = flitp; 53977951040fSNavdeep Parhar } 539854e4ee71SNavdeep Parhar 539954e4ee71SNavdeep Parhar /* Checksum offload */ 5400c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m); 5401a4a4ad2dSNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5402a4a4ad2dSNavdeep Parhar /* some hardware assistance provided */ 5403a4a4ad2dSNavdeep Parhar if (needs_vxlan_csum(m)) 5404a4a4ad2dSNavdeep Parhar txq->vxlan_txcsum++; 5405a4a4ad2dSNavdeep Parhar else 5406a4a4ad2dSNavdeep Parhar txq->txcsum++; 5407a4a4ad2dSNavdeep Parhar } 540854e4ee71SNavdeep Parhar 540954e4ee71SNavdeep Parhar /* VLAN tag insertion */ 54107951040fSNavdeep Parhar if (needs_vlan_insertion(m)) { 54117951040fSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 54127951040fSNavdeep Parhar V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 541354e4ee71SNavdeep Parhar txq->vlan_insertion++; 541454e4ee71SNavdeep Parhar } 541554e4ee71SNavdeep Parhar 54167951040fSNavdeep Parhar /* CPL header */ 54177951040fSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 541854e4ee71SNavdeep Parhar cpl->pack = 0; 541954e4ee71SNavdeep Parhar cpl->len = htobe16(m->m_pkthdr.len); 54207951040fSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 542154e4ee71SNavdeep Parhar 54227951040fSNavdeep Parhar flitp = cpl + 1; 54237951040fSNavdeep Parhar if (checkwrap && 54247951040fSNavdeep Parhar (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 54257951040fSNavdeep Parhar flitp = (void *)&eq->desc[0]; 542654e4ee71SNavdeep Parhar 54277951040fSNavdeep Parhar write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 542854e4ee71SNavdeep Parhar 5429d735920dSNavdeep Parhar if (last != NULL) 5430d735920dSNavdeep Parhar last->m_nextpkt = m; 5431d735920dSNavdeep Parhar last = m; 54327951040fSNavdeep Parhar } 54337951040fSNavdeep Parhar 5434d735920dSNavdeep Parhar txq->sgl_wrs++; 5435a59a1477SNavdeep Parhar if (txp->wr_type == 0) { 5436a59a1477SNavdeep Parhar txq->txpkts0_pkts += txp->npkt; 5437a59a1477SNavdeep Parhar txq->txpkts0_wrs++; 5438a59a1477SNavdeep Parhar } else { 5439a59a1477SNavdeep Parhar txq->txpkts1_pkts += txp->npkt; 5440a59a1477SNavdeep Parhar txq->txpkts1_wrs++; 5441a59a1477SNavdeep Parhar } 5442a59a1477SNavdeep Parhar 54437951040fSNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 5444d735920dSNavdeep Parhar txsd->m = txp->mb[0]; 5445d735920dSNavdeep Parhar txsd->desc_used = ndesc; 5446d735920dSNavdeep Parhar 5447d735920dSNavdeep Parhar return (ndesc); 5448d735920dSNavdeep Parhar } 5449d735920dSNavdeep Parhar 5450d735920dSNavdeep Parhar static u_int 5451d735920dSNavdeep Parhar write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq) 5452d735920dSNavdeep Parhar { 5453d735920dSNavdeep Parhar const struct txpkts *txp = &txq->txp; 5454d735920dSNavdeep Parhar struct sge_eq *eq = &txq->eq; 5455d735920dSNavdeep Parhar struct fw_eth_tx_pkts_vm_wr *wr; 5456d735920dSNavdeep Parhar struct tx_sdesc *txsd; 5457d735920dSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 5458d735920dSNavdeep Parhar uint64_t ctrl1; 5459d735920dSNavdeep Parhar int ndesc, i; 5460d735920dSNavdeep Parhar struct mbuf *m, *last; 5461d735920dSNavdeep Parhar void *flitp; 5462d735920dSNavdeep Parhar 5463d735920dSNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 5464d735920dSNavdeep Parhar MPASS(txp->npkt > 0); 5465d735920dSNavdeep Parhar MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5466d735920dSNavdeep Parhar MPASS(txp->mb[0] != NULL); 5467d735920dSNavdeep Parhar MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5468d735920dSNavdeep Parhar 5469d735920dSNavdeep Parhar wr = (void *)&eq->desc[eq->pidx]; 5470d735920dSNavdeep Parhar wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 5471d735920dSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5472d735920dSNavdeep Parhar wr->r3 = 0; 5473d735920dSNavdeep Parhar wr->plen = htobe16(txp->plen); 5474d735920dSNavdeep Parhar wr->npkt = txp->npkt; 5475d735920dSNavdeep Parhar wr->r4 = 0; 5476d735920dSNavdeep Parhar memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); 5477d735920dSNavdeep Parhar flitp = wr + 1; 5478d735920dSNavdeep Parhar 5479d735920dSNavdeep Parhar /* 5480d735920dSNavdeep Parhar * At this point we are 32B into a hardware descriptor. Each mbuf in 5481d735920dSNavdeep Parhar * the WR will take 32B so we check for the end of the descriptor ring 5482d735920dSNavdeep Parhar * before writing odd mbufs (mb[1], 3, 5, ..) 5483d735920dSNavdeep Parhar */ 5484d735920dSNavdeep Parhar ndesc = tx_len16_to_desc(txp->len16); 5485d735920dSNavdeep Parhar last = NULL; 5486d735920dSNavdeep Parhar for (i = 0; i < txp->npkt; i++) { 5487d735920dSNavdeep Parhar m = txp->mb[i]; 5488d735920dSNavdeep Parhar if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5489d735920dSNavdeep Parhar flitp = &eq->desc[0]; 5490d735920dSNavdeep Parhar cpl = flitp; 5491d735920dSNavdeep Parhar 5492d735920dSNavdeep Parhar /* Checksum offload */ 5493d735920dSNavdeep Parhar ctrl1 = csum_to_ctrl(sc, m); 5494d735920dSNavdeep Parhar if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5495d735920dSNavdeep Parhar txq->txcsum++; /* some hardware assistance provided */ 5496d735920dSNavdeep Parhar 5497d735920dSNavdeep Parhar /* VLAN tag insertion */ 5498d735920dSNavdeep Parhar if (needs_vlan_insertion(m)) { 5499d735920dSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 5500d735920dSNavdeep Parhar V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5501d735920dSNavdeep Parhar txq->vlan_insertion++; 5502d735920dSNavdeep Parhar } 5503d735920dSNavdeep Parhar 5504d735920dSNavdeep Parhar /* CPL header */ 5505d735920dSNavdeep Parhar cpl->ctrl0 = txq->cpl_ctrl0; 5506d735920dSNavdeep Parhar cpl->pack = 0; 5507d735920dSNavdeep Parhar cpl->len = htobe16(m->m_pkthdr.len); 5508d735920dSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 5509d735920dSNavdeep Parhar 5510d735920dSNavdeep Parhar flitp = cpl + 1; 5511d735920dSNavdeep Parhar MPASS(mbuf_nsegs(m) == 1); 5512d735920dSNavdeep Parhar write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0); 5513d735920dSNavdeep Parhar 5514d735920dSNavdeep Parhar if (last != NULL) 5515d735920dSNavdeep Parhar last->m_nextpkt = m; 5516d735920dSNavdeep Parhar last = m; 5517d735920dSNavdeep Parhar } 5518d735920dSNavdeep Parhar 5519d735920dSNavdeep Parhar txq->sgl_wrs++; 5520d735920dSNavdeep Parhar txq->txpkts1_pkts += txp->npkt; 5521d735920dSNavdeep Parhar txq->txpkts1_wrs++; 5522d735920dSNavdeep Parhar 5523d735920dSNavdeep Parhar txsd = &txq->sdesc[eq->pidx]; 5524d735920dSNavdeep Parhar txsd->m = txp->mb[0]; 55257951040fSNavdeep Parhar txsd->desc_used = ndesc; 55267951040fSNavdeep Parhar 55277951040fSNavdeep Parhar return (ndesc); 552854e4ee71SNavdeep Parhar } 552954e4ee71SNavdeep Parhar 553054e4ee71SNavdeep Parhar /* 553154e4ee71SNavdeep Parhar * If the SGL ends on an address that is not 16 byte aligned, this function will 55327951040fSNavdeep Parhar * add a 0 filled flit at the end. 553354e4ee71SNavdeep Parhar */ 55347951040fSNavdeep Parhar static void 55357951040fSNavdeep Parhar write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 553654e4ee71SNavdeep Parhar { 55377951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 55387951040fSNavdeep Parhar struct sglist *gl = txq->gl; 55397951040fSNavdeep Parhar struct sglist_seg *seg; 55407951040fSNavdeep Parhar __be64 *flitp, *wrap; 554154e4ee71SNavdeep Parhar struct ulptx_sgl *usgl; 55427951040fSNavdeep Parhar int i, nflits, nsegs; 554354e4ee71SNavdeep Parhar 554454e4ee71SNavdeep Parhar KASSERT(((uintptr_t)(*to) & 0xf) == 0, 554554e4ee71SNavdeep Parhar ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 55467951040fSNavdeep Parhar MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 55477951040fSNavdeep Parhar MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 554854e4ee71SNavdeep Parhar 55497951040fSNavdeep Parhar get_pkt_gl(m, gl); 55507951040fSNavdeep Parhar nsegs = gl->sg_nseg; 55517951040fSNavdeep Parhar MPASS(nsegs > 0); 55527951040fSNavdeep Parhar 55537951040fSNavdeep Parhar nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 555454e4ee71SNavdeep Parhar flitp = (__be64 *)(*to); 55557951040fSNavdeep Parhar wrap = (__be64 *)(&eq->desc[eq->sidx]); 55567951040fSNavdeep Parhar seg = &gl->sg_segs[0]; 555754e4ee71SNavdeep Parhar usgl = (void *)flitp; 555854e4ee71SNavdeep Parhar 555954e4ee71SNavdeep Parhar /* 556054e4ee71SNavdeep Parhar * We start at a 16 byte boundary somewhere inside the tx descriptor 556154e4ee71SNavdeep Parhar * ring, so we're at least 16 bytes away from the status page. There is 556254e4ee71SNavdeep Parhar * no chance of a wrap around in the middle of usgl (which is 16 bytes). 556354e4ee71SNavdeep Parhar */ 556454e4ee71SNavdeep Parhar 556554e4ee71SNavdeep Parhar usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 55667951040fSNavdeep Parhar V_ULPTX_NSGE(nsegs)); 55677951040fSNavdeep Parhar usgl->len0 = htobe32(seg->ss_len); 55687951040fSNavdeep Parhar usgl->addr0 = htobe64(seg->ss_paddr); 556954e4ee71SNavdeep Parhar seg++; 557054e4ee71SNavdeep Parhar 55717951040fSNavdeep Parhar if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 557254e4ee71SNavdeep Parhar 557354e4ee71SNavdeep Parhar /* Won't wrap around at all */ 557454e4ee71SNavdeep Parhar 55757951040fSNavdeep Parhar for (i = 0; i < nsegs - 1; i++, seg++) { 55767951040fSNavdeep Parhar usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 55777951040fSNavdeep Parhar usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 557854e4ee71SNavdeep Parhar } 557954e4ee71SNavdeep Parhar if (i & 1) 558054e4ee71SNavdeep Parhar usgl->sge[i / 2].len[1] = htobe32(0); 55817951040fSNavdeep Parhar flitp += nflits; 558254e4ee71SNavdeep Parhar } else { 558354e4ee71SNavdeep Parhar 558454e4ee71SNavdeep Parhar /* Will wrap somewhere in the rest of the SGL */ 558554e4ee71SNavdeep Parhar 558654e4ee71SNavdeep Parhar /* 2 flits already written, write the rest flit by flit */ 558754e4ee71SNavdeep Parhar flitp = (void *)(usgl + 1); 55887951040fSNavdeep Parhar for (i = 0; i < nflits - 2; i++) { 55897951040fSNavdeep Parhar if (flitp == wrap) 559054e4ee71SNavdeep Parhar flitp = (void *)eq->desc; 55917951040fSNavdeep Parhar *flitp++ = get_flit(seg, nsegs - 1, i); 559254e4ee71SNavdeep Parhar } 559354e4ee71SNavdeep Parhar } 559454e4ee71SNavdeep Parhar 55957951040fSNavdeep Parhar if (nflits & 1) { 55967951040fSNavdeep Parhar MPASS(((uintptr_t)flitp) & 0xf); 55977951040fSNavdeep Parhar *flitp++ = 0; 55987951040fSNavdeep Parhar } 559954e4ee71SNavdeep Parhar 56007951040fSNavdeep Parhar MPASS((((uintptr_t)flitp) & 0xf) == 0); 56017951040fSNavdeep Parhar if (__predict_false(flitp == wrap)) 560254e4ee71SNavdeep Parhar *to = (void *)eq->desc; 560354e4ee71SNavdeep Parhar else 56047951040fSNavdeep Parhar *to = (void *)flitp; 560554e4ee71SNavdeep Parhar } 560654e4ee71SNavdeep Parhar 560754e4ee71SNavdeep Parhar static inline void 560854e4ee71SNavdeep Parhar copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 560954e4ee71SNavdeep Parhar { 56107951040fSNavdeep Parhar 56117951040fSNavdeep Parhar MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 56127951040fSNavdeep Parhar MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 56137951040fSNavdeep Parhar 56147951040fSNavdeep Parhar if (__predict_true((uintptr_t)(*to) + len <= 56157951040fSNavdeep Parhar (uintptr_t)&eq->desc[eq->sidx])) { 561654e4ee71SNavdeep Parhar bcopy(from, *to, len); 561754e4ee71SNavdeep Parhar (*to) += len; 561854e4ee71SNavdeep Parhar } else { 56197951040fSNavdeep Parhar int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 562054e4ee71SNavdeep Parhar 562154e4ee71SNavdeep Parhar bcopy(from, *to, portion); 562254e4ee71SNavdeep Parhar from += portion; 562354e4ee71SNavdeep Parhar portion = len - portion; /* remaining */ 562454e4ee71SNavdeep Parhar bcopy(from, (void *)eq->desc, portion); 562554e4ee71SNavdeep Parhar (*to) = (caddr_t)eq->desc + portion; 562654e4ee71SNavdeep Parhar } 562754e4ee71SNavdeep Parhar } 562854e4ee71SNavdeep Parhar 562954e4ee71SNavdeep Parhar static inline void 56307951040fSNavdeep Parhar ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 563154e4ee71SNavdeep Parhar { 56327951040fSNavdeep Parhar u_int db; 56337951040fSNavdeep Parhar 56347951040fSNavdeep Parhar MPASS(n > 0); 5635d14b0ac1SNavdeep Parhar 5636d14b0ac1SNavdeep Parhar db = eq->doorbells; 56377951040fSNavdeep Parhar if (n > 1) 563877ad3c41SNavdeep Parhar clrbit(&db, DOORBELL_WCWR); 5639d14b0ac1SNavdeep Parhar wmb(); 5640d14b0ac1SNavdeep Parhar 5641d14b0ac1SNavdeep Parhar switch (ffs(db) - 1) { 5642d14b0ac1SNavdeep Parhar case DOORBELL_UDB: 56437951040fSNavdeep Parhar *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 56447951040fSNavdeep Parhar break; 5645d14b0ac1SNavdeep Parhar 564677ad3c41SNavdeep Parhar case DOORBELL_WCWR: { 5647d14b0ac1SNavdeep Parhar volatile uint64_t *dst, *src; 5648d14b0ac1SNavdeep Parhar int i; 5649d14b0ac1SNavdeep Parhar 5650d14b0ac1SNavdeep Parhar /* 5651d14b0ac1SNavdeep Parhar * Queues whose 128B doorbell segment fits in the page do not 5652d14b0ac1SNavdeep Parhar * use relative qid (udb_qid is always 0). Only queues with 565377ad3c41SNavdeep Parhar * doorbell segments can do WCWR. 5654d14b0ac1SNavdeep Parhar */ 56557951040fSNavdeep Parhar KASSERT(eq->udb_qid == 0 && n == 1, 5656d14b0ac1SNavdeep Parhar ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 56577951040fSNavdeep Parhar __func__, eq->doorbells, n, eq->dbidx, eq)); 5658d14b0ac1SNavdeep Parhar 5659d14b0ac1SNavdeep Parhar dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 5660d14b0ac1SNavdeep Parhar UDBS_DB_OFFSET); 56617951040fSNavdeep Parhar i = eq->dbidx; 5662d14b0ac1SNavdeep Parhar src = (void *)&eq->desc[i]; 5663d14b0ac1SNavdeep Parhar while (src != (void *)&eq->desc[i + 1]) 5664d14b0ac1SNavdeep Parhar *dst++ = *src++; 5665d14b0ac1SNavdeep Parhar wmb(); 56667951040fSNavdeep Parhar break; 5667d14b0ac1SNavdeep Parhar } 5668d14b0ac1SNavdeep Parhar 5669d14b0ac1SNavdeep Parhar case DOORBELL_UDBWC: 56707951040fSNavdeep Parhar *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5671d14b0ac1SNavdeep Parhar wmb(); 56727951040fSNavdeep Parhar break; 5673d14b0ac1SNavdeep Parhar 5674d14b0ac1SNavdeep Parhar case DOORBELL_KDB: 5675315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 56767951040fSNavdeep Parhar V_QID(eq->cntxt_id) | V_PIDX(n)); 56777951040fSNavdeep Parhar break; 567854e4ee71SNavdeep Parhar } 567954e4ee71SNavdeep Parhar 56807951040fSNavdeep Parhar IDXINCR(eq->dbidx, n, eq->sidx); 56817951040fSNavdeep Parhar } 56827951040fSNavdeep Parhar 56837951040fSNavdeep Parhar static inline u_int 56847951040fSNavdeep Parhar reclaimable_tx_desc(struct sge_eq *eq) 568554e4ee71SNavdeep Parhar { 56867951040fSNavdeep Parhar uint16_t hw_cidx; 568754e4ee71SNavdeep Parhar 56887951040fSNavdeep Parhar hw_cidx = read_hw_cidx(eq); 56897951040fSNavdeep Parhar return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 56907951040fSNavdeep Parhar } 569154e4ee71SNavdeep Parhar 56927951040fSNavdeep Parhar static inline u_int 56937951040fSNavdeep Parhar total_available_tx_desc(struct sge_eq *eq) 56947951040fSNavdeep Parhar { 56957951040fSNavdeep Parhar uint16_t hw_cidx, pidx; 56967951040fSNavdeep Parhar 56977951040fSNavdeep Parhar hw_cidx = read_hw_cidx(eq); 56987951040fSNavdeep Parhar pidx = eq->pidx; 56997951040fSNavdeep Parhar 57007951040fSNavdeep Parhar if (pidx == hw_cidx) 57017951040fSNavdeep Parhar return (eq->sidx - 1); 570254e4ee71SNavdeep Parhar else 57037951040fSNavdeep Parhar return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 57047951040fSNavdeep Parhar } 57057951040fSNavdeep Parhar 57067951040fSNavdeep Parhar static inline uint16_t 57077951040fSNavdeep Parhar read_hw_cidx(struct sge_eq *eq) 57087951040fSNavdeep Parhar { 57097951040fSNavdeep Parhar struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 57107951040fSNavdeep Parhar uint16_t cidx = spg->cidx; /* stable snapshot */ 57117951040fSNavdeep Parhar 57127951040fSNavdeep Parhar return (be16toh(cidx)); 5713e874ff7aSNavdeep Parhar } 571454e4ee71SNavdeep Parhar 5715e874ff7aSNavdeep Parhar /* 57167951040fSNavdeep Parhar * Reclaim 'n' descriptors approximately. 5717e874ff7aSNavdeep Parhar */ 57187951040fSNavdeep Parhar static u_int 57197951040fSNavdeep Parhar reclaim_tx_descs(struct sge_txq *txq, u_int n) 5720e874ff7aSNavdeep Parhar { 5721e874ff7aSNavdeep Parhar struct tx_sdesc *txsd; 5722f7dfe243SNavdeep Parhar struct sge_eq *eq = &txq->eq; 57237951040fSNavdeep Parhar u_int can_reclaim, reclaimed; 572454e4ee71SNavdeep Parhar 5725733b9277SNavdeep Parhar TXQ_LOCK_ASSERT_OWNED(txq); 57267951040fSNavdeep Parhar MPASS(n > 0); 5727e874ff7aSNavdeep Parhar 57287951040fSNavdeep Parhar reclaimed = 0; 57297951040fSNavdeep Parhar can_reclaim = reclaimable_tx_desc(eq); 57307951040fSNavdeep Parhar while (can_reclaim && reclaimed < n) { 573154e4ee71SNavdeep Parhar int ndesc; 57327951040fSNavdeep Parhar struct mbuf *m, *nextpkt; 573354e4ee71SNavdeep Parhar 5734f7dfe243SNavdeep Parhar txsd = &txq->sdesc[eq->cidx]; 573554e4ee71SNavdeep Parhar ndesc = txsd->desc_used; 573654e4ee71SNavdeep Parhar 573754e4ee71SNavdeep Parhar /* Firmware doesn't return "partial" credits. */ 573854e4ee71SNavdeep Parhar KASSERT(can_reclaim >= ndesc, 573954e4ee71SNavdeep Parhar ("%s: unexpected number of credits: %d, %d", 574054e4ee71SNavdeep Parhar __func__, can_reclaim, ndesc)); 5741dcd50a20SJohn Baldwin KASSERT(ndesc != 0, 5742dcd50a20SJohn Baldwin ("%s: descriptor with no credits: cidx %d", 5743dcd50a20SJohn Baldwin __func__, eq->cidx)); 574454e4ee71SNavdeep Parhar 57457951040fSNavdeep Parhar for (m = txsd->m; m != NULL; m = nextpkt) { 57467951040fSNavdeep Parhar nextpkt = m->m_nextpkt; 57477951040fSNavdeep Parhar m->m_nextpkt = NULL; 57487951040fSNavdeep Parhar m_freem(m); 57497951040fSNavdeep Parhar } 575054e4ee71SNavdeep Parhar reclaimed += ndesc; 575154e4ee71SNavdeep Parhar can_reclaim -= ndesc; 57527951040fSNavdeep Parhar IDXINCR(eq->cidx, ndesc, eq->sidx); 575354e4ee71SNavdeep Parhar } 575454e4ee71SNavdeep Parhar 575554e4ee71SNavdeep Parhar return (reclaimed); 575654e4ee71SNavdeep Parhar } 575754e4ee71SNavdeep Parhar 575854e4ee71SNavdeep Parhar static void 57597951040fSNavdeep Parhar tx_reclaim(void *arg, int n) 576054e4ee71SNavdeep Parhar { 57617951040fSNavdeep Parhar struct sge_txq *txq = arg; 57627951040fSNavdeep Parhar struct sge_eq *eq = &txq->eq; 576354e4ee71SNavdeep Parhar 57647951040fSNavdeep Parhar do { 57657951040fSNavdeep Parhar if (TXQ_TRYLOCK(txq) == 0) 57667951040fSNavdeep Parhar break; 57677951040fSNavdeep Parhar n = reclaim_tx_descs(txq, 32); 57687951040fSNavdeep Parhar if (eq->cidx == eq->pidx) 57697951040fSNavdeep Parhar eq->equeqidx = eq->pidx; 57707951040fSNavdeep Parhar TXQ_UNLOCK(txq); 57717951040fSNavdeep Parhar } while (n > 0); 577254e4ee71SNavdeep Parhar } 577354e4ee71SNavdeep Parhar 577454e4ee71SNavdeep Parhar static __be64 57757951040fSNavdeep Parhar get_flit(struct sglist_seg *segs, int nsegs, int idx) 577654e4ee71SNavdeep Parhar { 577754e4ee71SNavdeep Parhar int i = (idx / 3) * 2; 577854e4ee71SNavdeep Parhar 577954e4ee71SNavdeep Parhar switch (idx % 3) { 578054e4ee71SNavdeep Parhar case 0: { 5781f078ecf6SWojciech Macek uint64_t rc; 578254e4ee71SNavdeep Parhar 5783f078ecf6SWojciech Macek rc = (uint64_t)segs[i].ss_len << 32; 578454e4ee71SNavdeep Parhar if (i + 1 < nsegs) 5785f078ecf6SWojciech Macek rc |= (uint64_t)(segs[i + 1].ss_len); 578654e4ee71SNavdeep Parhar 5787f078ecf6SWojciech Macek return (htobe64(rc)); 578854e4ee71SNavdeep Parhar } 578954e4ee71SNavdeep Parhar case 1: 57907951040fSNavdeep Parhar return (htobe64(segs[i].ss_paddr)); 579154e4ee71SNavdeep Parhar case 2: 57927951040fSNavdeep Parhar return (htobe64(segs[i + 1].ss_paddr)); 579354e4ee71SNavdeep Parhar } 579454e4ee71SNavdeep Parhar 579554e4ee71SNavdeep Parhar return (0); 579654e4ee71SNavdeep Parhar } 579754e4ee71SNavdeep Parhar 579846e1e307SNavdeep Parhar static int 579946e1e307SNavdeep Parhar find_refill_source(struct adapter *sc, int maxp, bool packing) 580054e4ee71SNavdeep Parhar { 580146e1e307SNavdeep Parhar int i, zidx = -1; 580246e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 580354e4ee71SNavdeep Parhar 580446e1e307SNavdeep Parhar if (packing) { 580546e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 580646e1e307SNavdeep Parhar if (rxb->hwidx2 == -1) 580746e1e307SNavdeep Parhar continue; 580846e1e307SNavdeep Parhar if (rxb->size1 < PAGE_SIZE && 580946e1e307SNavdeep Parhar rxb->size1 < largest_rx_cluster) 581046e1e307SNavdeep Parhar continue; 581146e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 581238035ed6SNavdeep Parhar break; 581346e1e307SNavdeep Parhar MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); 581446e1e307SNavdeep Parhar if (rxb->size2 >= maxp) 581546e1e307SNavdeep Parhar return (i); 581646e1e307SNavdeep Parhar zidx = i; 581738035ed6SNavdeep Parhar } 581838035ed6SNavdeep Parhar } else { 581946e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 582046e1e307SNavdeep Parhar if (rxb->hwidx1 == -1) 582146e1e307SNavdeep Parhar continue; 582246e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 582338035ed6SNavdeep Parhar break; 582446e1e307SNavdeep Parhar if (rxb->size1 >= maxp) 582546e1e307SNavdeep Parhar return (i); 582646e1e307SNavdeep Parhar zidx = i; 582738035ed6SNavdeep Parhar } 582838035ed6SNavdeep Parhar } 582938035ed6SNavdeep Parhar 583046e1e307SNavdeep Parhar return (zidx); 583154e4ee71SNavdeep Parhar } 5832ecb79ca4SNavdeep Parhar 5833733b9277SNavdeep Parhar static void 5834733b9277SNavdeep Parhar add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5835ecb79ca4SNavdeep Parhar { 5836733b9277SNavdeep Parhar mtx_lock(&sc->sfl_lock); 5837733b9277SNavdeep Parhar FL_LOCK(fl); 5838733b9277SNavdeep Parhar if ((fl->flags & FL_DOOMED) == 0) { 5839733b9277SNavdeep Parhar fl->flags |= FL_STARVING; 5840733b9277SNavdeep Parhar TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5841733b9277SNavdeep Parhar callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5842733b9277SNavdeep Parhar } 5843733b9277SNavdeep Parhar FL_UNLOCK(fl); 5844733b9277SNavdeep Parhar mtx_unlock(&sc->sfl_lock); 5845733b9277SNavdeep Parhar } 5846ecb79ca4SNavdeep Parhar 58477951040fSNavdeep Parhar static void 58487951040fSNavdeep Parhar handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 58497951040fSNavdeep Parhar { 58507951040fSNavdeep Parhar struct sge_wrq *wrq = (void *)eq; 58517951040fSNavdeep Parhar 58527951040fSNavdeep Parhar atomic_readandclear_int(&eq->equiq); 58537951040fSNavdeep Parhar taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 58547951040fSNavdeep Parhar } 58557951040fSNavdeep Parhar 58567951040fSNavdeep Parhar static void 58577951040fSNavdeep Parhar handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 58587951040fSNavdeep Parhar { 58597951040fSNavdeep Parhar struct sge_txq *txq = (void *)eq; 58607951040fSNavdeep Parhar 58617951040fSNavdeep Parhar MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 58627951040fSNavdeep Parhar 58637951040fSNavdeep Parhar atomic_readandclear_int(&eq->equiq); 5864d735920dSNavdeep Parhar if (mp_ring_is_idle(txq->r)) 58657951040fSNavdeep Parhar taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5866d735920dSNavdeep Parhar else 5867d735920dSNavdeep Parhar mp_ring_check_drainage(txq->r, 64); 58687951040fSNavdeep Parhar } 58697951040fSNavdeep Parhar 5870733b9277SNavdeep Parhar static int 5871733b9277SNavdeep Parhar handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5872733b9277SNavdeep Parhar struct mbuf *m) 5873733b9277SNavdeep Parhar { 5874733b9277SNavdeep Parhar const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5875733b9277SNavdeep Parhar unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5876733b9277SNavdeep Parhar struct adapter *sc = iq->adapter; 5877733b9277SNavdeep Parhar struct sge *s = &sc->sge; 5878733b9277SNavdeep Parhar struct sge_eq *eq; 58797951040fSNavdeep Parhar static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 58807951040fSNavdeep Parhar &handle_wrq_egr_update, &handle_eth_egr_update, 58817951040fSNavdeep Parhar &handle_wrq_egr_update}; 5882733b9277SNavdeep Parhar 5883733b9277SNavdeep Parhar KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5884733b9277SNavdeep Parhar rss->opcode)); 5885733b9277SNavdeep Parhar 5886ec55567cSJohn Baldwin eq = s->eqmap[qid - s->eq_start - s->eq_base]; 58877951040fSNavdeep Parhar (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5888ecb79ca4SNavdeep Parhar 5889ecb79ca4SNavdeep Parhar return (0); 5890ecb79ca4SNavdeep Parhar } 5891f7dfe243SNavdeep Parhar 58920abd31e2SNavdeep Parhar /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 58930abd31e2SNavdeep Parhar CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 58940abd31e2SNavdeep Parhar offsetof(struct cpl_fw6_msg, data)); 58950abd31e2SNavdeep Parhar 5896733b9277SNavdeep Parhar static int 58971b4cc91fSNavdeep Parhar handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 589856599263SNavdeep Parhar { 58991b4cc91fSNavdeep Parhar struct adapter *sc = iq->adapter; 590056599263SNavdeep Parhar const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 590156599263SNavdeep Parhar 5902733b9277SNavdeep Parhar KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5903733b9277SNavdeep Parhar rss->opcode)); 5904733b9277SNavdeep Parhar 59050abd31e2SNavdeep Parhar if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 59060abd31e2SNavdeep Parhar const struct rss_header *rss2; 59070abd31e2SNavdeep Parhar 59080abd31e2SNavdeep Parhar rss2 = (const struct rss_header *)&cpl->data[0]; 5909671bf2b8SNavdeep Parhar return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 59100abd31e2SNavdeep Parhar } 59110abd31e2SNavdeep Parhar 5912671bf2b8SNavdeep Parhar return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5913f7dfe243SNavdeep Parhar } 5914af49c942SNavdeep Parhar 5915069af0ebSJohn Baldwin /** 5916069af0ebSJohn Baldwin * t4_handle_wrerr_rpl - process a FW work request error message 5917069af0ebSJohn Baldwin * @adap: the adapter 5918069af0ebSJohn Baldwin * @rpl: start of the FW message 5919069af0ebSJohn Baldwin */ 5920069af0ebSJohn Baldwin static int 5921069af0ebSJohn Baldwin t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5922069af0ebSJohn Baldwin { 5923069af0ebSJohn Baldwin u8 opcode = *(const u8 *)rpl; 5924069af0ebSJohn Baldwin const struct fw_error_cmd *e = (const void *)rpl; 5925069af0ebSJohn Baldwin unsigned int i; 5926069af0ebSJohn Baldwin 5927069af0ebSJohn Baldwin if (opcode != FW_ERROR_CMD) { 5928069af0ebSJohn Baldwin log(LOG_ERR, 5929069af0ebSJohn Baldwin "%s: Received WRERR_RPL message with opcode %#x\n", 5930069af0ebSJohn Baldwin device_get_nameunit(adap->dev), opcode); 5931069af0ebSJohn Baldwin return (EINVAL); 5932069af0ebSJohn Baldwin } 5933069af0ebSJohn Baldwin log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5934069af0ebSJohn Baldwin G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5935069af0ebSJohn Baldwin "non-fatal"); 5936069af0ebSJohn Baldwin switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5937069af0ebSJohn Baldwin case FW_ERROR_TYPE_EXCEPTION: 5938069af0ebSJohn Baldwin log(LOG_ERR, "exception info:\n"); 5939069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.exception.info); i++) 5940069af0ebSJohn Baldwin log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5941069af0ebSJohn Baldwin be32toh(e->u.exception.info[i])); 5942069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5943069af0ebSJohn Baldwin break; 5944069af0ebSJohn Baldwin case FW_ERROR_TYPE_HWMODULE: 5945069af0ebSJohn Baldwin log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5946069af0ebSJohn Baldwin be32toh(e->u.hwmodule.regaddr), 5947069af0ebSJohn Baldwin be32toh(e->u.hwmodule.regval)); 5948069af0ebSJohn Baldwin break; 5949069af0ebSJohn Baldwin case FW_ERROR_TYPE_WR: 5950069af0ebSJohn Baldwin log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 5951069af0ebSJohn Baldwin be16toh(e->u.wr.cidx), 5952069af0ebSJohn Baldwin G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 5953069af0ebSJohn Baldwin G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 5954069af0ebSJohn Baldwin be32toh(e->u.wr.eqid)); 5955069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 5956069af0ebSJohn Baldwin log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 5957069af0ebSJohn Baldwin e->u.wr.wrhdr[i]); 5958069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5959069af0ebSJohn Baldwin break; 5960069af0ebSJohn Baldwin case FW_ERROR_TYPE_ACL: 5961069af0ebSJohn Baldwin log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 5962069af0ebSJohn Baldwin be16toh(e->u.acl.cidx), 5963069af0ebSJohn Baldwin G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 5964069af0ebSJohn Baldwin G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 5965069af0ebSJohn Baldwin be32toh(e->u.acl.eqid), 5966069af0ebSJohn Baldwin G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 5967069af0ebSJohn Baldwin "MAC"); 5968069af0ebSJohn Baldwin for (i = 0; i < nitems(e->u.acl.val); i++) 5969069af0ebSJohn Baldwin log(LOG_ERR, " %02x", e->u.acl.val[i]); 5970069af0ebSJohn Baldwin log(LOG_ERR, "\n"); 5971069af0ebSJohn Baldwin break; 5972069af0ebSJohn Baldwin default: 5973069af0ebSJohn Baldwin log(LOG_ERR, "type %#x\n", 5974069af0ebSJohn Baldwin G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 5975069af0ebSJohn Baldwin return (EINVAL); 5976069af0ebSJohn Baldwin } 5977069af0ebSJohn Baldwin return (0); 5978069af0ebSJohn Baldwin } 5979069af0ebSJohn Baldwin 5980*8eba75edSNavdeep Parhar int 598156599263SNavdeep Parhar sysctl_uint16(SYSCTL_HANDLER_ARGS) 5982af49c942SNavdeep Parhar { 5983af49c942SNavdeep Parhar uint16_t *id = arg1; 5984af49c942SNavdeep Parhar int i = *id; 5985af49c942SNavdeep Parhar 5986af49c942SNavdeep Parhar return sysctl_handle_int(oidp, &i, 0, req); 5987af49c942SNavdeep Parhar } 598838035ed6SNavdeep Parhar 598946e1e307SNavdeep Parhar static inline bool 599046e1e307SNavdeep Parhar bufidx_used(struct adapter *sc, int idx) 599146e1e307SNavdeep Parhar { 599246e1e307SNavdeep Parhar struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 599346e1e307SNavdeep Parhar int i; 599446e1e307SNavdeep Parhar 599546e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 599646e1e307SNavdeep Parhar if (rxb->size1 > largest_rx_cluster) 599746e1e307SNavdeep Parhar continue; 599846e1e307SNavdeep Parhar if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) 599946e1e307SNavdeep Parhar return (true); 600046e1e307SNavdeep Parhar } 600146e1e307SNavdeep Parhar 600246e1e307SNavdeep Parhar return (false); 600346e1e307SNavdeep Parhar } 600446e1e307SNavdeep Parhar 600538035ed6SNavdeep Parhar static int 600638035ed6SNavdeep Parhar sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 600738035ed6SNavdeep Parhar { 600846e1e307SNavdeep Parhar struct adapter *sc = arg1; 600946e1e307SNavdeep Parhar struct sge_params *sp = &sc->params.sge; 601038035ed6SNavdeep Parhar int i, rc; 601138035ed6SNavdeep Parhar struct sbuf sb; 601238035ed6SNavdeep Parhar char c; 601338035ed6SNavdeep Parhar 601446e1e307SNavdeep Parhar sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 601546e1e307SNavdeep Parhar for (i = 0; i < SGE_FLBUF_SIZES; i++) { 601646e1e307SNavdeep Parhar if (bufidx_used(sc, i)) 601738035ed6SNavdeep Parhar c = '*'; 601838035ed6SNavdeep Parhar else 601938035ed6SNavdeep Parhar c = '\0'; 602038035ed6SNavdeep Parhar 602146e1e307SNavdeep Parhar sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); 602238035ed6SNavdeep Parhar } 602338035ed6SNavdeep Parhar sbuf_trim(&sb); 602438035ed6SNavdeep Parhar sbuf_finish(&sb); 602538035ed6SNavdeep Parhar rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 602638035ed6SNavdeep Parhar sbuf_delete(&sb); 602738035ed6SNavdeep Parhar return (rc); 602838035ed6SNavdeep Parhar } 602902f972e8SNavdeep Parhar 6030786099deSNavdeep Parhar #ifdef RATELIMIT 6031786099deSNavdeep Parhar /* 6032786099deSNavdeep Parhar * len16 for a txpkt WR with a GL. Includes the firmware work request header. 6033786099deSNavdeep Parhar */ 6034786099deSNavdeep Parhar static inline u_int 6035786099deSNavdeep Parhar txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 6036786099deSNavdeep Parhar { 6037786099deSNavdeep Parhar u_int n; 6038786099deSNavdeep Parhar 6039786099deSNavdeep Parhar MPASS(immhdrs > 0); 6040786099deSNavdeep Parhar 6041786099deSNavdeep Parhar n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 6042786099deSNavdeep Parhar sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 6043786099deSNavdeep Parhar if (__predict_false(nsegs == 0)) 6044786099deSNavdeep Parhar goto done; 6045786099deSNavdeep Parhar 6046786099deSNavdeep Parhar nsegs--; /* first segment is part of ulptx_sgl */ 6047786099deSNavdeep Parhar n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 6048786099deSNavdeep Parhar if (tso) 6049786099deSNavdeep Parhar n += sizeof(struct cpl_tx_pkt_lso_core); 6050786099deSNavdeep Parhar 6051786099deSNavdeep Parhar done: 6052786099deSNavdeep Parhar return (howmany(n, 16)); 6053786099deSNavdeep Parhar } 6054786099deSNavdeep Parhar 6055786099deSNavdeep Parhar #define ETID_FLOWC_NPARAMS 6 6056786099deSNavdeep Parhar #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 6057786099deSNavdeep Parhar ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 6058786099deSNavdeep Parhar #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 6059786099deSNavdeep Parhar 6060786099deSNavdeep Parhar static int 6061e38a50e8SJohn Baldwin send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi, 6062786099deSNavdeep Parhar struct vi_info *vi) 6063786099deSNavdeep Parhar { 6064786099deSNavdeep Parhar struct wrq_cookie cookie; 6065edb518f4SNavdeep Parhar u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 6066786099deSNavdeep Parhar struct fw_flowc_wr *flowc; 6067786099deSNavdeep Parhar 6068786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 6069786099deSNavdeep Parhar MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 6070786099deSNavdeep Parhar EO_FLOWC_PENDING); 6071786099deSNavdeep Parhar 6072786099deSNavdeep Parhar flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie); 6073786099deSNavdeep Parhar if (__predict_false(flowc == NULL)) 6074786099deSNavdeep Parhar return (ENOMEM); 6075786099deSNavdeep Parhar 6076786099deSNavdeep Parhar bzero(flowc, ETID_FLOWC_LEN); 6077786099deSNavdeep Parhar flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6078786099deSNavdeep Parhar V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 6079786099deSNavdeep Parhar flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 6080786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 6081786099deSNavdeep Parhar flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 6082786099deSNavdeep Parhar flowc->mnemval[0].val = htobe32(pfvf); 6083786099deSNavdeep Parhar flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 6084786099deSNavdeep Parhar flowc->mnemval[1].val = htobe32(pi->tx_chan); 6085786099deSNavdeep Parhar flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 6086786099deSNavdeep Parhar flowc->mnemval[2].val = htobe32(pi->tx_chan); 6087786099deSNavdeep Parhar flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 6088786099deSNavdeep Parhar flowc->mnemval[3].val = htobe32(cst->iqid); 6089786099deSNavdeep Parhar flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 6090786099deSNavdeep Parhar flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 6091786099deSNavdeep Parhar flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 6092786099deSNavdeep Parhar flowc->mnemval[5].val = htobe32(cst->schedcl); 6093786099deSNavdeep Parhar 6094786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, flowc, &cookie); 6095786099deSNavdeep Parhar 6096786099deSNavdeep Parhar cst->flags &= ~EO_FLOWC_PENDING; 6097786099deSNavdeep Parhar cst->flags |= EO_FLOWC_RPL_PENDING; 6098786099deSNavdeep Parhar MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 6099786099deSNavdeep Parhar cst->tx_credits -= ETID_FLOWC_LEN16; 6100786099deSNavdeep Parhar 6101786099deSNavdeep Parhar return (0); 6102786099deSNavdeep Parhar } 6103786099deSNavdeep Parhar 6104786099deSNavdeep Parhar #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 6105786099deSNavdeep Parhar 6106786099deSNavdeep Parhar void 6107e38a50e8SJohn Baldwin send_etid_flush_wr(struct cxgbe_rate_tag *cst) 6108786099deSNavdeep Parhar { 6109786099deSNavdeep Parhar struct fw_flowc_wr *flowc; 6110786099deSNavdeep Parhar struct wrq_cookie cookie; 6111786099deSNavdeep Parhar 6112786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 6113786099deSNavdeep Parhar 6114786099deSNavdeep Parhar flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie); 6115786099deSNavdeep Parhar if (__predict_false(flowc == NULL)) 6116786099deSNavdeep Parhar CXGBE_UNIMPLEMENTED(__func__); 6117786099deSNavdeep Parhar 6118786099deSNavdeep Parhar bzero(flowc, ETID_FLUSH_LEN16 * 16); 6119786099deSNavdeep Parhar flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6120786099deSNavdeep Parhar V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 6121786099deSNavdeep Parhar flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 6122786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 6123786099deSNavdeep Parhar 6124786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, flowc, &cookie); 6125786099deSNavdeep Parhar 6126786099deSNavdeep Parhar cst->flags |= EO_FLUSH_RPL_PENDING; 6127786099deSNavdeep Parhar MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 6128786099deSNavdeep Parhar cst->tx_credits -= ETID_FLUSH_LEN16; 6129786099deSNavdeep Parhar cst->ncompl++; 6130786099deSNavdeep Parhar } 6131786099deSNavdeep Parhar 6132786099deSNavdeep Parhar static void 6133e38a50e8SJohn Baldwin write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr, 6134786099deSNavdeep Parhar struct mbuf *m0, int compl) 6135786099deSNavdeep Parhar { 6136786099deSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 6137786099deSNavdeep Parhar uint64_t ctrl1; 6138786099deSNavdeep Parhar uint32_t ctrl; /* used in many unrelated places */ 6139786099deSNavdeep Parhar int len16, pktlen, nsegs, immhdrs; 6140786099deSNavdeep Parhar caddr_t dst; 6141786099deSNavdeep Parhar uintptr_t p; 6142786099deSNavdeep Parhar struct ulptx_sgl *usgl; 6143786099deSNavdeep Parhar struct sglist sg; 6144786099deSNavdeep Parhar struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 6145786099deSNavdeep Parhar 6146786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 6147786099deSNavdeep Parhar M_ASSERTPKTHDR(m0); 6148786099deSNavdeep Parhar KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 6149786099deSNavdeep Parhar m0->m_pkthdr.l4hlen > 0, 6150786099deSNavdeep Parhar ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 6151786099deSNavdeep Parhar 6152786099deSNavdeep Parhar len16 = mbuf_eo_len16(m0); 6153786099deSNavdeep Parhar nsegs = mbuf_eo_nsegs(m0); 6154786099deSNavdeep Parhar pktlen = m0->m_pkthdr.len; 6155786099deSNavdeep Parhar ctrl = sizeof(struct cpl_tx_pkt_core); 6156786099deSNavdeep Parhar if (needs_tso(m0)) 6157786099deSNavdeep Parhar ctrl += sizeof(struct cpl_tx_pkt_lso_core); 6158786099deSNavdeep Parhar immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 6159786099deSNavdeep Parhar ctrl += immhdrs; 6160786099deSNavdeep Parhar 6161786099deSNavdeep Parhar wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 6162786099deSNavdeep Parhar V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 6163786099deSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 6164786099deSNavdeep Parhar V_FW_WR_FLOWID(cst->etid)); 6165786099deSNavdeep Parhar wr->r3 = 0; 6166a4a4ad2dSNavdeep Parhar if (needs_outer_udp_csum(m0)) { 61676933902dSNavdeep Parhar wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 61686933902dSNavdeep Parhar wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 61696933902dSNavdeep Parhar wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 61706933902dSNavdeep Parhar wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 61716933902dSNavdeep Parhar wr->u.udpseg.rtplen = 0; 61726933902dSNavdeep Parhar wr->u.udpseg.r4 = 0; 61736933902dSNavdeep Parhar wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 61746933902dSNavdeep Parhar wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 61756933902dSNavdeep Parhar wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 61766933902dSNavdeep Parhar cpl = (void *)(wr + 1); 61776933902dSNavdeep Parhar } else { 6178a4a4ad2dSNavdeep Parhar MPASS(needs_outer_tcp_csum(m0)); 6179786099deSNavdeep Parhar wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 6180786099deSNavdeep Parhar wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 6181786099deSNavdeep Parhar wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 6182786099deSNavdeep Parhar wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 6183786099deSNavdeep Parhar wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 6184786099deSNavdeep Parhar wr->u.tcpseg.r4 = 0; 6185786099deSNavdeep Parhar wr->u.tcpseg.r5 = 0; 6186786099deSNavdeep Parhar wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 6187786099deSNavdeep Parhar 6188786099deSNavdeep Parhar if (needs_tso(m0)) { 6189786099deSNavdeep Parhar struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 6190786099deSNavdeep Parhar 6191786099deSNavdeep Parhar wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 6192786099deSNavdeep Parhar 61936933902dSNavdeep Parhar ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 61946933902dSNavdeep Parhar F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 6195c0236bd9SNavdeep Parhar V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 6196c0236bd9SNavdeep Parhar ETHER_HDR_LEN) >> 2) | 61976933902dSNavdeep Parhar V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 61986933902dSNavdeep Parhar V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 6199786099deSNavdeep Parhar if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 6200786099deSNavdeep Parhar ctrl |= F_LSO_IPV6; 6201786099deSNavdeep Parhar lso->lso_ctrl = htobe32(ctrl); 6202786099deSNavdeep Parhar lso->ipid_ofst = htobe16(0); 6203786099deSNavdeep Parhar lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 6204786099deSNavdeep Parhar lso->seqno_offset = htobe32(0); 6205786099deSNavdeep Parhar lso->len = htobe32(pktlen); 6206786099deSNavdeep Parhar 6207786099deSNavdeep Parhar cpl = (void *)(lso + 1); 6208786099deSNavdeep Parhar } else { 6209786099deSNavdeep Parhar wr->u.tcpseg.mss = htobe16(0xffff); 6210786099deSNavdeep Parhar cpl = (void *)(wr + 1); 6211786099deSNavdeep Parhar } 62126933902dSNavdeep Parhar } 6213786099deSNavdeep Parhar 6214786099deSNavdeep Parhar /* Checksum offload must be requested for ethofld. */ 6215a4a4ad2dSNavdeep Parhar MPASS(needs_outer_l4_csum(m0)); 6216c0236bd9SNavdeep Parhar ctrl1 = csum_to_ctrl(cst->adapter, m0); 6217786099deSNavdeep Parhar 6218786099deSNavdeep Parhar /* VLAN tag insertion */ 6219786099deSNavdeep Parhar if (needs_vlan_insertion(m0)) { 6220786099deSNavdeep Parhar ctrl1 |= F_TXPKT_VLAN_VLD | 6221786099deSNavdeep Parhar V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 6222786099deSNavdeep Parhar } 6223786099deSNavdeep Parhar 6224786099deSNavdeep Parhar /* CPL header */ 6225786099deSNavdeep Parhar cpl->ctrl0 = cst->ctrl0; 6226786099deSNavdeep Parhar cpl->pack = 0; 6227786099deSNavdeep Parhar cpl->len = htobe16(pktlen); 6228786099deSNavdeep Parhar cpl->ctrl1 = htobe64(ctrl1); 6229786099deSNavdeep Parhar 62306933902dSNavdeep Parhar /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 6231786099deSNavdeep Parhar p = (uintptr_t)(cpl + 1); 6232786099deSNavdeep Parhar m_copydata(m0, 0, immhdrs, (void *)p); 6233786099deSNavdeep Parhar 6234786099deSNavdeep Parhar /* SGL */ 6235786099deSNavdeep Parhar dst = (void *)(cpl + 1); 6236786099deSNavdeep Parhar if (nsegs > 0) { 6237786099deSNavdeep Parhar int i, pad; 6238786099deSNavdeep Parhar 6239786099deSNavdeep Parhar /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 6240786099deSNavdeep Parhar p += immhdrs; 6241786099deSNavdeep Parhar pad = 16 - (immhdrs & 0xf); 6242786099deSNavdeep Parhar bzero((void *)p, pad); 6243786099deSNavdeep Parhar 6244786099deSNavdeep Parhar usgl = (void *)(p + pad); 6245786099deSNavdeep Parhar usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6246786099deSNavdeep Parhar V_ULPTX_NSGE(nsegs)); 6247786099deSNavdeep Parhar 6248786099deSNavdeep Parhar sglist_init(&sg, nitems(segs), segs); 6249786099deSNavdeep Parhar for (; m0 != NULL; m0 = m0->m_next) { 6250786099deSNavdeep Parhar if (__predict_false(m0->m_len == 0)) 6251786099deSNavdeep Parhar continue; 6252786099deSNavdeep Parhar if (immhdrs >= m0->m_len) { 6253786099deSNavdeep Parhar immhdrs -= m0->m_len; 6254786099deSNavdeep Parhar continue; 6255786099deSNavdeep Parhar } 62566edfd179SGleb Smirnoff if (m0->m_flags & M_EXTPG) 625749b6b60eSGleb Smirnoff sglist_append_mbuf_epg(&sg, m0, 625849b6b60eSGleb Smirnoff mtod(m0, vm_offset_t), m0->m_len); 625949b6b60eSGleb Smirnoff else 6260786099deSNavdeep Parhar sglist_append(&sg, mtod(m0, char *) + immhdrs, 6261786099deSNavdeep Parhar m0->m_len - immhdrs); 6262786099deSNavdeep Parhar immhdrs = 0; 6263786099deSNavdeep Parhar } 6264786099deSNavdeep Parhar MPASS(sg.sg_nseg == nsegs); 6265786099deSNavdeep Parhar 6266786099deSNavdeep Parhar /* 6267786099deSNavdeep Parhar * Zero pad last 8B in case the WR doesn't end on a 16B 6268786099deSNavdeep Parhar * boundary. 6269786099deSNavdeep Parhar */ 6270786099deSNavdeep Parhar *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 6271786099deSNavdeep Parhar 6272786099deSNavdeep Parhar usgl->len0 = htobe32(segs[0].ss_len); 6273786099deSNavdeep Parhar usgl->addr0 = htobe64(segs[0].ss_paddr); 6274786099deSNavdeep Parhar for (i = 0; i < nsegs - 1; i++) { 6275786099deSNavdeep Parhar usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 6276786099deSNavdeep Parhar usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 6277786099deSNavdeep Parhar } 6278786099deSNavdeep Parhar if (i & 1) 6279786099deSNavdeep Parhar usgl->sge[i / 2].len[1] = htobe32(0); 6280786099deSNavdeep Parhar } 6281786099deSNavdeep Parhar 6282786099deSNavdeep Parhar } 6283786099deSNavdeep Parhar 6284786099deSNavdeep Parhar static void 6285e38a50e8SJohn Baldwin ethofld_tx(struct cxgbe_rate_tag *cst) 6286786099deSNavdeep Parhar { 6287786099deSNavdeep Parhar struct mbuf *m; 6288786099deSNavdeep Parhar struct wrq_cookie cookie; 6289786099deSNavdeep Parhar int next_credits, compl; 6290786099deSNavdeep Parhar struct fw_eth_tx_eo_wr *wr; 6291786099deSNavdeep Parhar 6292786099deSNavdeep Parhar mtx_assert(&cst->lock, MA_OWNED); 6293786099deSNavdeep Parhar 6294786099deSNavdeep Parhar while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 6295786099deSNavdeep Parhar M_ASSERTPKTHDR(m); 6296786099deSNavdeep Parhar 6297786099deSNavdeep Parhar /* How many len16 credits do we need to send this mbuf. */ 6298786099deSNavdeep Parhar next_credits = mbuf_eo_len16(m); 6299786099deSNavdeep Parhar MPASS(next_credits > 0); 6300786099deSNavdeep Parhar if (next_credits > cst->tx_credits) { 6301786099deSNavdeep Parhar /* 6302786099deSNavdeep Parhar * Tx will make progress eventually because there is at 6303786099deSNavdeep Parhar * least one outstanding fw4_ack that will return 6304786099deSNavdeep Parhar * credits and kick the tx. 6305786099deSNavdeep Parhar */ 6306786099deSNavdeep Parhar MPASS(cst->ncompl > 0); 6307786099deSNavdeep Parhar return; 6308786099deSNavdeep Parhar } 6309786099deSNavdeep Parhar wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie); 6310786099deSNavdeep Parhar if (__predict_false(wr == NULL)) { 6311786099deSNavdeep Parhar /* XXX: wishful thinking, not a real assertion. */ 6312786099deSNavdeep Parhar MPASS(cst->ncompl > 0); 6313786099deSNavdeep Parhar return; 6314786099deSNavdeep Parhar } 6315786099deSNavdeep Parhar cst->tx_credits -= next_credits; 6316786099deSNavdeep Parhar cst->tx_nocompl += next_credits; 6317786099deSNavdeep Parhar compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 631856fb710fSJohn Baldwin ETHER_BPF_MTAP(cst->com.ifp, m); 6319786099deSNavdeep Parhar write_ethofld_wr(cst, wr, m, compl); 6320786099deSNavdeep Parhar commit_wrq_wr(cst->eo_txq, wr, &cookie); 6321786099deSNavdeep Parhar if (compl) { 6322786099deSNavdeep Parhar cst->ncompl++; 6323786099deSNavdeep Parhar cst->tx_nocompl = 0; 6324786099deSNavdeep Parhar } 6325786099deSNavdeep Parhar (void) mbufq_dequeue(&cst->pending_tx); 6326fb3bc596SJohn Baldwin 6327fb3bc596SJohn Baldwin /* 6328fb3bc596SJohn Baldwin * Drop the mbuf's reference on the tag now rather 6329fb3bc596SJohn Baldwin * than waiting until m_freem(). This ensures that 6330e38a50e8SJohn Baldwin * cxgbe_rate_tag_free gets called when the inp drops 6331fb3bc596SJohn Baldwin * its reference on the tag and there are no more 6332fb3bc596SJohn Baldwin * mbufs in the pending_tx queue and can flush any 6333fb3bc596SJohn Baldwin * pending requests. Otherwise if the last mbuf 6334fb3bc596SJohn Baldwin * doesn't request a completion the etid will never be 6335fb3bc596SJohn Baldwin * released. 6336fb3bc596SJohn Baldwin */ 6337fb3bc596SJohn Baldwin m->m_pkthdr.snd_tag = NULL; 6338fb3bc596SJohn Baldwin m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 633956fb710fSJohn Baldwin m_snd_tag_rele(&cst->com); 6340fb3bc596SJohn Baldwin 6341786099deSNavdeep Parhar mbufq_enqueue(&cst->pending_fwack, m); 6342786099deSNavdeep Parhar } 6343786099deSNavdeep Parhar } 6344786099deSNavdeep Parhar 6345786099deSNavdeep Parhar int 6346786099deSNavdeep Parhar ethofld_transmit(struct ifnet *ifp, struct mbuf *m0) 6347786099deSNavdeep Parhar { 6348e38a50e8SJohn Baldwin struct cxgbe_rate_tag *cst; 6349786099deSNavdeep Parhar int rc; 6350786099deSNavdeep Parhar 6351786099deSNavdeep Parhar MPASS(m0->m_nextpkt == NULL); 6352fb3bc596SJohn Baldwin MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); 6353786099deSNavdeep Parhar MPASS(m0->m_pkthdr.snd_tag != NULL); 6354e38a50e8SJohn Baldwin cst = mst_to_crt(m0->m_pkthdr.snd_tag); 6355786099deSNavdeep Parhar 6356786099deSNavdeep Parhar mtx_lock(&cst->lock); 6357786099deSNavdeep Parhar MPASS(cst->flags & EO_SND_TAG_REF); 6358786099deSNavdeep Parhar 6359786099deSNavdeep Parhar if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 6360786099deSNavdeep Parhar struct vi_info *vi = ifp->if_softc; 6361786099deSNavdeep Parhar struct port_info *pi = vi->pi; 6362786099deSNavdeep Parhar struct adapter *sc = pi->adapter; 6363786099deSNavdeep Parhar const uint32_t rss_mask = vi->rss_size - 1; 6364786099deSNavdeep Parhar uint32_t rss_hash; 6365786099deSNavdeep Parhar 6366786099deSNavdeep Parhar cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 6367786099deSNavdeep Parhar if (M_HASHTYPE_ISHASH(m0)) 6368786099deSNavdeep Parhar rss_hash = m0->m_pkthdr.flowid; 6369786099deSNavdeep Parhar else 6370786099deSNavdeep Parhar rss_hash = arc4random(); 6371786099deSNavdeep Parhar /* We assume RSS hashing */ 6372786099deSNavdeep Parhar cst->iqid = vi->rss[rss_hash & rss_mask]; 6373786099deSNavdeep Parhar cst->eo_txq += rss_hash % vi->nofldtxq; 6374786099deSNavdeep Parhar rc = send_etid_flowc_wr(cst, pi, vi); 6375786099deSNavdeep Parhar if (rc != 0) 6376786099deSNavdeep Parhar goto done; 6377786099deSNavdeep Parhar } 6378786099deSNavdeep Parhar 6379786099deSNavdeep Parhar if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 6380786099deSNavdeep Parhar rc = ENOBUFS; 6381786099deSNavdeep Parhar goto done; 6382786099deSNavdeep Parhar } 6383786099deSNavdeep Parhar 6384786099deSNavdeep Parhar mbufq_enqueue(&cst->pending_tx, m0); 6385786099deSNavdeep Parhar cst->plen += m0->m_pkthdr.len; 6386786099deSNavdeep Parhar 6387fb3bc596SJohn Baldwin /* 6388fb3bc596SJohn Baldwin * Hold an extra reference on the tag while generating work 6389fb3bc596SJohn Baldwin * requests to ensure that we don't try to free the tag during 6390fb3bc596SJohn Baldwin * ethofld_tx() in case we are sending the final mbuf after 6391fb3bc596SJohn Baldwin * the inp was freed. 6392fb3bc596SJohn Baldwin */ 639356fb710fSJohn Baldwin m_snd_tag_ref(&cst->com); 6394786099deSNavdeep Parhar ethofld_tx(cst); 6395fb3bc596SJohn Baldwin mtx_unlock(&cst->lock); 639656fb710fSJohn Baldwin m_snd_tag_rele(&cst->com); 6397fb3bc596SJohn Baldwin return (0); 6398fb3bc596SJohn Baldwin 6399786099deSNavdeep Parhar done: 6400786099deSNavdeep Parhar mtx_unlock(&cst->lock); 6401786099deSNavdeep Parhar if (__predict_false(rc != 0)) 6402786099deSNavdeep Parhar m_freem(m0); 6403786099deSNavdeep Parhar return (rc); 6404786099deSNavdeep Parhar } 6405786099deSNavdeep Parhar 6406786099deSNavdeep Parhar static int 6407786099deSNavdeep Parhar ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 6408786099deSNavdeep Parhar { 6409786099deSNavdeep Parhar struct adapter *sc = iq->adapter; 6410786099deSNavdeep Parhar const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 6411786099deSNavdeep Parhar struct mbuf *m; 6412786099deSNavdeep Parhar u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 6413e38a50e8SJohn Baldwin struct cxgbe_rate_tag *cst; 6414786099deSNavdeep Parhar uint8_t credits = cpl->credits; 6415786099deSNavdeep Parhar 6416786099deSNavdeep Parhar cst = lookup_etid(sc, etid); 6417786099deSNavdeep Parhar mtx_lock(&cst->lock); 6418786099deSNavdeep Parhar if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 6419786099deSNavdeep Parhar MPASS(credits >= ETID_FLOWC_LEN16); 6420786099deSNavdeep Parhar credits -= ETID_FLOWC_LEN16; 6421786099deSNavdeep Parhar cst->flags &= ~EO_FLOWC_RPL_PENDING; 6422786099deSNavdeep Parhar } 6423786099deSNavdeep Parhar 6424786099deSNavdeep Parhar KASSERT(cst->ncompl > 0, 6425786099deSNavdeep Parhar ("%s: etid %u (%p) wasn't expecting completion.", 6426786099deSNavdeep Parhar __func__, etid, cst)); 6427786099deSNavdeep Parhar cst->ncompl--; 6428786099deSNavdeep Parhar 6429786099deSNavdeep Parhar while (credits > 0) { 6430786099deSNavdeep Parhar m = mbufq_dequeue(&cst->pending_fwack); 6431786099deSNavdeep Parhar if (__predict_false(m == NULL)) { 6432786099deSNavdeep Parhar /* 6433786099deSNavdeep Parhar * The remaining credits are for the final flush that 6434786099deSNavdeep Parhar * was issued when the tag was freed by the kernel. 6435786099deSNavdeep Parhar */ 6436786099deSNavdeep Parhar MPASS((cst->flags & 6437786099deSNavdeep Parhar (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6438786099deSNavdeep Parhar EO_FLUSH_RPL_PENDING); 6439786099deSNavdeep Parhar MPASS(credits == ETID_FLUSH_LEN16); 6440786099deSNavdeep Parhar MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6441786099deSNavdeep Parhar MPASS(cst->ncompl == 0); 6442786099deSNavdeep Parhar 6443786099deSNavdeep Parhar cst->flags &= ~EO_FLUSH_RPL_PENDING; 6444786099deSNavdeep Parhar cst->tx_credits += cpl->credits; 6445e38a50e8SJohn Baldwin cxgbe_rate_tag_free_locked(cst); 6446786099deSNavdeep Parhar return (0); /* cst is gone. */ 6447786099deSNavdeep Parhar } 6448786099deSNavdeep Parhar KASSERT(m != NULL, 6449786099deSNavdeep Parhar ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6450786099deSNavdeep Parhar credits)); 6451786099deSNavdeep Parhar KASSERT(credits >= mbuf_eo_len16(m), 6452786099deSNavdeep Parhar ("%s: too few credits (%u, %u, %u)", __func__, 6453786099deSNavdeep Parhar cpl->credits, credits, mbuf_eo_len16(m))); 6454786099deSNavdeep Parhar credits -= mbuf_eo_len16(m); 6455786099deSNavdeep Parhar cst->plen -= m->m_pkthdr.len; 6456786099deSNavdeep Parhar m_freem(m); 6457786099deSNavdeep Parhar } 6458786099deSNavdeep Parhar 6459786099deSNavdeep Parhar cst->tx_credits += cpl->credits; 6460786099deSNavdeep Parhar MPASS(cst->tx_credits <= cst->tx_total); 6461786099deSNavdeep Parhar 6462fb3bc596SJohn Baldwin if (cst->flags & EO_SND_TAG_REF) { 6463fb3bc596SJohn Baldwin /* 6464fb3bc596SJohn Baldwin * As with ethofld_transmit(), hold an extra reference 6465fb3bc596SJohn Baldwin * so that the tag is stable across ethold_tx(). 6466fb3bc596SJohn Baldwin */ 646756fb710fSJohn Baldwin m_snd_tag_ref(&cst->com); 6468786099deSNavdeep Parhar m = mbufq_first(&cst->pending_tx); 6469786099deSNavdeep Parhar if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6470786099deSNavdeep Parhar ethofld_tx(cst); 6471786099deSNavdeep Parhar mtx_unlock(&cst->lock); 647256fb710fSJohn Baldwin m_snd_tag_rele(&cst->com); 6473fb3bc596SJohn Baldwin } else { 6474fb3bc596SJohn Baldwin /* 6475fb3bc596SJohn Baldwin * There shouldn't be any pending packets if the tag 6476fb3bc596SJohn Baldwin * was freed by the kernel since any pending packet 6477fb3bc596SJohn Baldwin * should hold a reference to the tag. 6478fb3bc596SJohn Baldwin */ 6479fb3bc596SJohn Baldwin MPASS(mbufq_first(&cst->pending_tx) == NULL); 6480fb3bc596SJohn Baldwin mtx_unlock(&cst->lock); 6481fb3bc596SJohn Baldwin } 6482786099deSNavdeep Parhar 6483786099deSNavdeep Parhar return (0); 6484786099deSNavdeep Parhar } 6485786099deSNavdeep Parhar #endif 6486