1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010-2016 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was developed in part by Philip Paeps under contract for 8 * Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing official 33 * policies, either expressed or implied, of the FreeBSD Project. 34 */ 35 36 #ifndef _SFXGE_TX_H 37 #define _SFXGE_TX_H 38 39 #include <netinet/in.h> 40 #include <netinet/ip.h> 41 #include <netinet/tcp.h> 42 43 /* If defined, parse TX packets directly in if_transmit 44 * for better cache locality and reduced time under TX lock 45 */ 46 #define SFXGE_TX_PARSE_EARLY 1 47 48 /* Maximum size of TSO packet */ 49 #define SFXGE_TSO_MAX_SIZE (65535) 50 51 /* 52 * Maximum number of segments to be created for a TSO packet. 53 * Allow for a reasonable minimum MSS of 512. 54 */ 55 #define SFXGE_TSO_MAX_SEGS howmany(SFXGE_TSO_MAX_SIZE, 512) 56 57 /* Maximum number of DMA segments needed to map an mbuf chain. With 58 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf 59 * clusters taking into account that the first may be not 2K cluster 60 * boundary aligned. 61 * Packet header may be split into two segments because of, for example, 62 * VLAN header insertion. 63 * The chain could be longer than this initially, but can be shortened 64 * with m_collapse(). 65 */ 66 #define SFXGE_TX_MAPPING_MAX_SEG \ 67 (2 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES) + 1) 68 69 /* 70 * Buffer mapping flags. 71 * 72 * Buffers and DMA mappings must be freed when the last descriptor 73 * referring to them is completed. Set the TX_BUF_UNMAP and 74 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf 75 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to 76 * a heap buffer. 77 */ 78 enum sfxge_tx_buf_flags { 79 TX_BUF_UNMAP = 1, 80 TX_BUF_MBUF = 2, 81 }; 82 83 /* 84 * Buffer mapping information for descriptors in flight. 85 */ 86 struct sfxge_tx_mapping { 87 union { 88 struct mbuf *mbuf; 89 caddr_t heap_buf; 90 } u; 91 bus_dmamap_t map; 92 enum sfxge_tx_buf_flags flags; 93 }; 94 95 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT (64 * 1024) 96 #define SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT 1024 97 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 1024 98 99 /* 100 * Deferred packet list. 101 */ 102 struct sfxge_tx_dpl { 103 unsigned int std_get_max; /* Maximum number of packets 104 * in get list */ 105 unsigned int std_get_non_tcp_max; /* Maximum number 106 * of non-TCP packets 107 * in get list */ 108 unsigned int std_put_max; /* Maximum number of packets 109 * in put list */ 110 uintptr_t std_put; /* Head of put list. */ 111 struct mbuf *std_get; /* Head of get list. */ 112 struct mbuf **std_getp; /* Tail of get list. */ 113 unsigned int std_get_count; /* Packets in get list. */ 114 unsigned int std_get_non_tcp_count; /* Non-TCP packets 115 * in get list */ 116 unsigned int std_get_hiwat; /* Packets in get list 117 * high watermark */ 118 unsigned int std_put_hiwat; /* Packets in put list 119 * high watermark */ 120 }; 121 122 #define SFXGE_TX_BUFFER_SIZE 0x400 123 #define SFXGE_TX_HEADER_SIZE 0x100 124 #define SFXGE_TX_COPY_THRESHOLD 0x200 125 126 enum sfxge_txq_state { 127 SFXGE_TXQ_UNINITIALIZED = 0, 128 SFXGE_TXQ_INITIALIZED, 129 SFXGE_TXQ_STARTED 130 }; 131 132 enum sfxge_txq_type { 133 SFXGE_TXQ_NON_CKSUM = 0, 134 SFXGE_TXQ_IP_CKSUM, 135 SFXGE_TXQ_IP_TCP_UDP_CKSUM, 136 SFXGE_TXQ_NTYPES 137 }; 138 139 #define SFXGE_EVQ0_N_TXQ(_sc) \ 140 ((_sc)->txq_dynamic_cksum_toggle_supported ? \ 141 1 : SFXGE_TXQ_NTYPES) 142 143 #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4) 144 145 #define SFXGE_TX_BATCH 64 146 147 #define SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index) \ 148 do { \ 149 struct sfxge_txq *__txq = (_txq); \ 150 \ 151 snprintf((__txq)->lock_name, \ 152 sizeof((__txq)->lock_name), \ 153 "%s:txq%u", (_ifname), (_txq_index)); \ 154 mtx_init(&(__txq)->lock, (__txq)->lock_name, \ 155 NULL, MTX_DEF); \ 156 } while (B_FALSE) 157 #define SFXGE_TXQ_LOCK_DESTROY(_txq) \ 158 mtx_destroy(&(_txq)->lock) 159 #define SFXGE_TXQ_LOCK(_txq) \ 160 mtx_lock(&(_txq)->lock) 161 #define SFXGE_TXQ_TRYLOCK(_txq) \ 162 mtx_trylock(&(_txq)->lock) 163 #define SFXGE_TXQ_UNLOCK(_txq) \ 164 mtx_unlock(&(_txq)->lock) 165 #define SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq) \ 166 mtx_assert(&(_txq)->lock, MA_OWNED) 167 #define SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ 168 mtx_assert(&(_txq)->lock, MA_NOTOWNED) 169 170 struct sfxge_txq { 171 /* The following fields should be written very rarely */ 172 struct sfxge_softc *sc; 173 enum sfxge_txq_state init_state; 174 enum sfxge_flush_state flush_state; 175 unsigned int tso_fw_assisted; 176 enum sfxge_txq_type type; 177 unsigned int evq_index; 178 efsys_mem_t mem; 179 unsigned int buf_base_id; 180 unsigned int entries; 181 unsigned int ptr_mask; 182 unsigned int max_pkt_desc; 183 184 struct sfxge_tx_mapping *stmp; /* Packets in flight. */ 185 bus_dma_tag_t packet_dma_tag; 186 efx_desc_t *pend_desc; 187 efx_txq_t *common; 188 189 efsys_mem_t *tsoh_buffer; 190 191 char lock_name[SFXGE_LOCK_NAME_MAX]; 192 193 /* This field changes more often and is read regularly on both 194 * the initiation and completion paths 195 */ 196 int blocked __aligned(CACHE_LINE_SIZE); 197 198 /* The following fields change more often, and are used mostly 199 * on the initiation path 200 */ 201 struct mtx lock __aligned(CACHE_LINE_SIZE); 202 struct sfxge_tx_dpl dpl; /* Deferred packet list. */ 203 unsigned int n_pend_desc; 204 unsigned int added; 205 unsigned int reaped; 206 207 /* The last (or constant) set of HW offloads requested on the queue */ 208 uint16_t hw_cksum_flags; 209 210 /* The last VLAN TCI seen on the queue if FW-assisted tagging is 211 used */ 212 uint16_t hw_vlan_tci; 213 214 /* Statistics */ 215 unsigned long tso_bursts; 216 unsigned long tso_packets; 217 unsigned long tso_long_headers; 218 unsigned long collapses; 219 unsigned long drops; 220 unsigned long get_overflow; 221 unsigned long get_non_tcp_overflow; 222 unsigned long put_overflow; 223 unsigned long netdown_drops; 224 unsigned long tso_pdrop_too_many; 225 unsigned long tso_pdrop_no_rsrc; 226 227 /* The following fields change more often, and are used mostly 228 * on the completion path 229 */ 230 unsigned int pending __aligned(CACHE_LINE_SIZE); 231 unsigned int completed; 232 struct sfxge_txq *next; 233 }; 234 235 struct sfxge_evq; 236 237 extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc); 238 239 extern int sfxge_tx_init(struct sfxge_softc *sc); 240 extern void sfxge_tx_fini(struct sfxge_softc *sc); 241 extern int sfxge_tx_start(struct sfxge_softc *sc); 242 extern void sfxge_tx_stop(struct sfxge_softc *sc); 243 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq); 244 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq); 245 extern void sfxge_if_qflush(if_t ifp); 246 extern int sfxge_if_transmit(if_t ifp, struct mbuf *m); 247 248 #endif 249