1 /*- 2 * Copyright (c) 2010-2011 Solarflare Communications, Inc. 3 * All rights reserved. 4 * 5 * This software was developed in part by Philip Paeps under contract for 6 * Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #ifndef _SFXGE_TX_H 33 #define _SFXGE_TX_H 34 35 #include <netinet/in.h> 36 #include <netinet/ip.h> 37 #include <netinet/tcp.h> 38 39 /* Maximum number of DMA segments needed to map an mbuf chain. With 40 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf 41 * clusters. (The chain could be longer than this initially, but can 42 * be shortened with m_collapse().) 43 */ 44 #define SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1) 45 46 /* Maximum number of DMA segments needed to map an output packet. It 47 * could overlap all mbufs in the chain and also require an extra 48 * segment for a TSO header. 49 */ 50 #define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1) 51 52 /* 53 * Buffer mapping flags. 54 * 55 * Buffers and DMA mappings must be freed when the last descriptor 56 * referring to them is completed. Set the TX_BUF_UNMAP and 57 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf 58 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to 59 * a heap buffer. 60 */ 61 enum sfxge_tx_buf_flags { 62 TX_BUF_UNMAP = 1, 63 TX_BUF_MBUF = 2, 64 }; 65 66 /* 67 * Buffer mapping information for descriptors in flight. 68 */ 69 struct sfxge_tx_mapping { 70 union { 71 struct mbuf *mbuf; 72 caddr_t heap_buf; 73 } u; 74 bus_dmamap_t map; 75 enum sfxge_tx_buf_flags flags; 76 }; 77 78 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 1024 79 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 64 80 81 /* 82 * Deferred packet list. 83 */ 84 struct sfxge_tx_dpl { 85 unsigned int std_get_max; /* Maximum number of packets 86 * in get list */ 87 unsigned int std_put_max; /* Maximum number of packets 88 * in put list */ 89 uintptr_t std_put; /* Head of put list. */ 90 struct mbuf *std_get; /* Head of get list. */ 91 struct mbuf **std_getp; /* Tail of get list. */ 92 unsigned int std_get_count; /* Packets in get list. */ 93 }; 94 95 96 #define SFXGE_TX_BUFFER_SIZE 0x400 97 #define SFXGE_TX_HEADER_SIZE 0x100 98 #define SFXGE_TX_COPY_THRESHOLD 0x200 99 100 enum sfxge_txq_state { 101 SFXGE_TXQ_UNINITIALIZED = 0, 102 SFXGE_TXQ_INITIALIZED, 103 SFXGE_TXQ_STARTED 104 }; 105 106 enum sfxge_txq_type { 107 SFXGE_TXQ_NON_CKSUM = 0, 108 SFXGE_TXQ_IP_CKSUM, 109 SFXGE_TXQ_IP_TCP_UDP_CKSUM, 110 SFXGE_TXQ_NTYPES 111 }; 112 113 #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4) 114 115 #define SFXGE_TX_BATCH 64 116 117 #ifdef SFXGE_HAVE_MQ 118 #define SFXGE_TXQ_LOCK(txq) (&(txq)->lock) 119 #define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc) 120 #else 121 #define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock) 122 #define SFXGE_TX_SCALE(sc) 1 123 #endif 124 125 struct sfxge_txq { 126 /* The following fields should be written very rarely */ 127 struct sfxge_softc *sc; 128 enum sfxge_txq_state init_state; 129 enum sfxge_flush_state flush_state; 130 enum sfxge_txq_type type; 131 unsigned int txq_index; 132 unsigned int evq_index; 133 efsys_mem_t mem; 134 unsigned int buf_base_id; 135 unsigned int entries; 136 unsigned int ptr_mask; 137 138 struct sfxge_tx_mapping *stmp; /* Packets in flight. */ 139 bus_dma_tag_t packet_dma_tag; 140 efx_buffer_t *pend_desc; 141 efx_txq_t *common; 142 struct sfxge_txq *next; 143 144 efsys_mem_t *tsoh_buffer; 145 146 /* This field changes more often and is read regularly on both 147 * the initiation and completion paths 148 */ 149 int blocked __aligned(CACHE_LINE_SIZE); 150 151 /* The following fields change more often, and are used mostly 152 * on the initiation path 153 */ 154 #ifdef SFXGE_HAVE_MQ 155 struct mtx lock __aligned(CACHE_LINE_SIZE); 156 struct sfxge_tx_dpl dpl; /* Deferred packet list. */ 157 unsigned int n_pend_desc; 158 #else 159 unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE); 160 #endif 161 unsigned int added; 162 unsigned int reaped; 163 /* Statistics */ 164 unsigned long tso_bursts; 165 unsigned long tso_packets; 166 unsigned long tso_long_headers; 167 unsigned long collapses; 168 unsigned long drops; 169 unsigned long early_drops; 170 171 /* The following fields change more often, and are used mostly 172 * on the completion path 173 */ 174 unsigned int pending __aligned(CACHE_LINE_SIZE); 175 unsigned int completed; 176 }; 177 178 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *); 179 180 extern int sfxge_tx_init(struct sfxge_softc *sc); 181 extern void sfxge_tx_fini(struct sfxge_softc *sc); 182 extern int sfxge_tx_start(struct sfxge_softc *sc); 183 extern void sfxge_tx_stop(struct sfxge_softc *sc); 184 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq); 185 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq); 186 #ifdef SFXGE_HAVE_MQ 187 extern void sfxge_if_qflush(struct ifnet *ifp); 188 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m); 189 #else 190 extern void sfxge_if_start(struct ifnet *ifp); 191 #endif 192 193 #endif 194