1 /*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #ifndef _SFXGE_TX_H 33 #define _SFXGE_TX_H 34 35 #include <netinet/in.h> 36 #include <netinet/ip.h> 37 #include <netinet/tcp.h> 38 39 /* Maximum size of TSO packet */ 40 #define SFXGE_TSO_MAX_SIZE (65535) 41 42 /* 43 * Maximum number of segments to be created for a TSO packet. 44 * Allow for a reasonable minimum MSS of 512. 45 */ 46 #define SFXGE_TSO_MAX_SEGS howmany(SFXGE_TSO_MAX_SIZE, 512) 47 48 /* Maximum number of DMA segments needed to map an mbuf chain. With 49 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf 50 * clusters. (The chain could be longer than this initially, but can 51 * be shortened with m_collapse().) 52 */ 53 #define SFXGE_TX_MAPPING_MAX_SEG \ 54 (1 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES)) 55 56 /* 57 * Buffer mapping flags. 58 * 59 * Buffers and DMA mappings must be freed when the last descriptor 60 * referring to them is completed. Set the TX_BUF_UNMAP and 61 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf 62 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to 63 * a heap buffer. 64 */ 65 enum sfxge_tx_buf_flags { 66 TX_BUF_UNMAP = 1, 67 TX_BUF_MBUF = 2, 68 }; 69 70 /* 71 * Buffer mapping information for descriptors in flight. 72 */ 73 struct sfxge_tx_mapping { 74 union { 75 struct mbuf *mbuf; 76 caddr_t heap_buf; 77 } u; 78 bus_dmamap_t map; 79 enum sfxge_tx_buf_flags flags; 80 }; 81 82 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT (64 * 1024) 83 #define SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT 1024 84 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 1024 85 86 /* 87 * Deferred packet list. 88 */ 89 struct sfxge_tx_dpl { 90 unsigned int std_get_max; /* Maximum number of packets 91 * in get list */ 92 unsigned int std_get_non_tcp_max; /* Maximum number 93 * of non-TCP packets 94 * in get list */ 95 unsigned int std_put_max; /* Maximum number of packets 96 * in put list */ 97 uintptr_t std_put; /* Head of put list. */ 98 struct mbuf *std_get; /* Head of get list. */ 99 struct mbuf **std_getp; /* Tail of get list. */ 100 unsigned int std_get_count; /* Packets in get list. */ 101 unsigned int std_get_non_tcp_count; /* Non-TCP packets 102 * in get list */ 103 unsigned int std_get_hiwat; /* Packets in get list 104 * high watermark */ 105 unsigned int std_put_hiwat; /* Packets in put list 106 * high watermark */ 107 }; 108 109 110 #define SFXGE_TX_BUFFER_SIZE 0x400 111 #define SFXGE_TX_HEADER_SIZE 0x100 112 #define SFXGE_TX_COPY_THRESHOLD 0x200 113 114 enum sfxge_txq_state { 115 SFXGE_TXQ_UNINITIALIZED = 0, 116 SFXGE_TXQ_INITIALIZED, 117 SFXGE_TXQ_STARTED 118 }; 119 120 enum sfxge_txq_type { 121 SFXGE_TXQ_NON_CKSUM = 0, 122 SFXGE_TXQ_IP_CKSUM, 123 SFXGE_TXQ_IP_TCP_UDP_CKSUM, 124 SFXGE_TXQ_NTYPES 125 }; 126 127 #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4) 128 129 #define SFXGE_TX_BATCH 64 130 131 #ifdef SFXGE_HAVE_MQ 132 #define SFXGE_TX_LOCK(txq) (&(txq)->lock) 133 #else 134 #define SFXGE_TX_LOCK(txq) (&(txq)->sc->tx_lock) 135 #endif 136 137 #define SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index) \ 138 do { \ 139 struct sfxge_txq *__txq = (_txq); \ 140 \ 141 snprintf((__txq)->lock_name, \ 142 sizeof((__txq)->lock_name), \ 143 "%s:txq%u", (_ifname), (_txq_index)); \ 144 mtx_init(&(__txq)->lock, (__txq)->lock_name, \ 145 NULL, MTX_DEF); \ 146 } while (B_FALSE) 147 #define SFXGE_TXQ_LOCK_DESTROY(_txq) \ 148 mtx_destroy(&(_txq)->lock) 149 #define SFXGE_TXQ_LOCK(_txq) \ 150 mtx_lock(SFXGE_TX_LOCK(_txq)) 151 #define SFXGE_TXQ_TRYLOCK(_txq) \ 152 mtx_trylock(SFXGE_TX_LOCK(_txq)) 153 #define SFXGE_TXQ_UNLOCK(_txq) \ 154 mtx_unlock(SFXGE_TX_LOCK(_txq)) 155 #define SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq) \ 156 mtx_assert(SFXGE_TX_LOCK(_txq), MA_OWNED) 157 158 159 struct sfxge_txq { 160 /* The following fields should be written very rarely */ 161 struct sfxge_softc *sc; 162 enum sfxge_txq_state init_state; 163 enum sfxge_flush_state flush_state; 164 enum sfxge_txq_type type; 165 unsigned int txq_index; 166 unsigned int evq_index; 167 efsys_mem_t mem; 168 unsigned int buf_base_id; 169 unsigned int entries; 170 unsigned int ptr_mask; 171 172 struct sfxge_tx_mapping *stmp; /* Packets in flight. */ 173 bus_dma_tag_t packet_dma_tag; 174 efx_buffer_t *pend_desc; 175 efx_txq_t *common; 176 177 efsys_mem_t *tsoh_buffer; 178 179 char lock_name[SFXGE_LOCK_NAME_MAX]; 180 181 /* This field changes more often and is read regularly on both 182 * the initiation and completion paths 183 */ 184 int blocked __aligned(CACHE_LINE_SIZE); 185 186 /* The following fields change more often, and are used mostly 187 * on the initiation path 188 */ 189 #ifdef SFXGE_HAVE_MQ 190 struct mtx lock __aligned(CACHE_LINE_SIZE); 191 struct sfxge_tx_dpl dpl; /* Deferred packet list. */ 192 unsigned int n_pend_desc; 193 #else 194 unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE); 195 #endif 196 unsigned int added; 197 unsigned int reaped; 198 /* Statistics */ 199 unsigned long tso_bursts; 200 unsigned long tso_packets; 201 unsigned long tso_long_headers; 202 unsigned long collapses; 203 unsigned long drops; 204 unsigned long get_overflow; 205 unsigned long get_non_tcp_overflow; 206 unsigned long put_overflow; 207 unsigned long netdown_drops; 208 unsigned long tso_pdrop_too_many; 209 unsigned long tso_pdrop_no_rsrc; 210 211 /* The following fields change more often, and are used mostly 212 * on the completion path 213 */ 214 unsigned int pending __aligned(CACHE_LINE_SIZE); 215 unsigned int completed; 216 struct sfxge_txq *next; 217 }; 218 219 struct sfxge_evq; 220 221 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *); 222 extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc); 223 224 extern int sfxge_tx_init(struct sfxge_softc *sc); 225 extern void sfxge_tx_fini(struct sfxge_softc *sc); 226 extern int sfxge_tx_start(struct sfxge_softc *sc); 227 extern void sfxge_tx_stop(struct sfxge_softc *sc); 228 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq); 229 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq); 230 #ifdef SFXGE_HAVE_MQ 231 extern void sfxge_if_qflush(struct ifnet *ifp); 232 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m); 233 #else 234 extern void sfxge_if_start(struct ifnet *ifp); 235 #endif 236 237 #endif 238