xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision cddbc3b40812213ff00041f79174cac0be360a2a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was developed in part by Philip Paeps under contract for
8  * Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing official
33  * policies, either expressed or implied, of the FreeBSD Project.
34  *
35  * $FreeBSD$
36  */
37 
38 #ifndef _SFXGE_TX_H
39 #define	_SFXGE_TX_H
40 
41 #include <netinet/in.h>
42 #include <netinet/ip.h>
43 #include <netinet/tcp.h>
44 
45 /* If defined, parse TX packets directly in if_transmit
46  * for better cache locality and reduced time under TX lock
47  */
48 #define SFXGE_TX_PARSE_EARLY 1
49 
50 /* Maximum size of TSO packet */
51 #define	SFXGE_TSO_MAX_SIZE		(65535)
52 
53 /*
54  * Maximum number of segments to be created for a TSO packet.
55  * Allow for a reasonable minimum MSS of 512.
56  */
57 #define	SFXGE_TSO_MAX_SEGS		howmany(SFXGE_TSO_MAX_SIZE, 512)
58 
59 /* Maximum number of DMA segments needed to map an mbuf chain.  With
60  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
61  * clusters taking into account that the first may be not 2K cluster
62  * boundary aligned.
63  * Packet header may be split into two segments because of, for example,
64  * VLAN header insertion.
65  * The chain could be longer than this initially, but can be shortened
66  * with m_collapse().
67  */
68 #define	SFXGE_TX_MAPPING_MAX_SEG					\
69 	(2 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES) + 1)
70 
71 /*
72  * Buffer mapping flags.
73  *
74  * Buffers and DMA mappings must be freed when the last descriptor
75  * referring to them is completed.  Set the TX_BUF_UNMAP and
76  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
77  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
78  * a heap buffer.
79  */
80 enum sfxge_tx_buf_flags {
81 	TX_BUF_UNMAP = 1,
82 	TX_BUF_MBUF = 2,
83 };
84 
85 /*
86  * Buffer mapping information for descriptors in flight.
87  */
88 struct sfxge_tx_mapping {
89 	union {
90 		struct mbuf	*mbuf;
91 		caddr_t		heap_buf;
92 	}			u;
93 	bus_dmamap_t		map;
94 	enum sfxge_tx_buf_flags	flags;
95 };
96 
97 #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT		(64 * 1024)
98 #define	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT	1024
99 #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT		1024
100 
101 /*
102  * Deferred packet list.
103  */
104 struct sfxge_tx_dpl {
105 	unsigned int	std_get_max;		/* Maximum number  of packets
106 						 * in get list */
107 	unsigned int	std_get_non_tcp_max;	/* Maximum number
108 						 * of non-TCP packets
109 						 * in get list */
110 	unsigned int	std_put_max;		/* Maximum number of packets
111 						 * in put list */
112 	uintptr_t	std_put;		/* Head of put list. */
113 	struct mbuf	*std_get;		/* Head of get list. */
114 	struct mbuf	**std_getp;		/* Tail of get list. */
115 	unsigned int	std_get_count;		/* Packets in get list. */
116 	unsigned int	std_get_non_tcp_count;	/* Non-TCP packets
117 						 * in get list */
118 	unsigned int	std_get_hiwat;		/* Packets in get list
119 						 * high watermark */
120 	unsigned int	std_put_hiwat;		/* Packets in put list
121 						 * high watermark */
122 };
123 
124 
125 #define	SFXGE_TX_BUFFER_SIZE	0x400
126 #define	SFXGE_TX_HEADER_SIZE	0x100
127 #define	SFXGE_TX_COPY_THRESHOLD	0x200
128 
129 enum sfxge_txq_state {
130 	SFXGE_TXQ_UNINITIALIZED = 0,
131 	SFXGE_TXQ_INITIALIZED,
132 	SFXGE_TXQ_STARTED
133 };
134 
135 enum sfxge_txq_type {
136 	SFXGE_TXQ_NON_CKSUM = 0,
137 	SFXGE_TXQ_IP_CKSUM,
138 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
139 	SFXGE_TXQ_NTYPES
140 };
141 
142 #define	SFXGE_EVQ0_N_TXQ(_sc)						\
143 	((_sc)->txq_dynamic_cksum_toggle_supported ?			\
144 	1 : SFXGE_TXQ_NTYPES)
145 
146 #define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
147 
148 #define	SFXGE_TX_BATCH	64
149 
150 #define	SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index)			\
151 	do {								\
152 		struct sfxge_txq  *__txq = (_txq);			\
153 									\
154 		snprintf((__txq)->lock_name,				\
155 			 sizeof((__txq)->lock_name),			\
156 			 "%s:txq%u", (_ifname), (_txq_index));		\
157 		mtx_init(&(__txq)->lock, (__txq)->lock_name,		\
158 			 NULL, MTX_DEF);				\
159 	} while (B_FALSE)
160 #define	SFXGE_TXQ_LOCK_DESTROY(_txq)					\
161 	mtx_destroy(&(_txq)->lock)
162 #define	SFXGE_TXQ_LOCK(_txq)						\
163 	mtx_lock(&(_txq)->lock)
164 #define	SFXGE_TXQ_TRYLOCK(_txq)						\
165 	mtx_trylock(&(_txq)->lock)
166 #define	SFXGE_TXQ_UNLOCK(_txq)						\
167 	mtx_unlock(&(_txq)->lock)
168 #define	SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq)				\
169 	mtx_assert(&(_txq)->lock, MA_OWNED)
170 #define	SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(_txq)				\
171 	mtx_assert(&(_txq)->lock, MA_NOTOWNED)
172 
173 
174 struct sfxge_txq {
175 	/* The following fields should be written very rarely */
176 	struct sfxge_softc		*sc;
177 	enum sfxge_txq_state		init_state;
178 	enum sfxge_flush_state		flush_state;
179 	unsigned int			tso_fw_assisted;
180 	enum sfxge_txq_type		type;
181 	unsigned int			evq_index;
182 	efsys_mem_t			mem;
183 	unsigned int			buf_base_id;
184 	unsigned int			entries;
185 	unsigned int			ptr_mask;
186 	unsigned int			max_pkt_desc;
187 
188 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
189 	bus_dma_tag_t			packet_dma_tag;
190 	efx_desc_t			*pend_desc;
191 	efx_txq_t			*common;
192 
193 	efsys_mem_t			*tsoh_buffer;
194 
195 	char				lock_name[SFXGE_LOCK_NAME_MAX];
196 
197 	/* This field changes more often and is read regularly on both
198 	 * the initiation and completion paths
199 	 */
200 	int				blocked __aligned(CACHE_LINE_SIZE);
201 
202 	/* The following fields change more often, and are used mostly
203 	 * on the initiation path
204 	 */
205 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
206 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
207 	unsigned int			n_pend_desc;
208 	unsigned int			added;
209 	unsigned int			reaped;
210 
211 	/* The last (or constant) set of HW offloads requested on the queue */
212 	uint16_t			hw_cksum_flags;
213 
214 	/* The last VLAN TCI seen on the queue if FW-assisted tagging is
215 	   used */
216 	uint16_t			hw_vlan_tci;
217 
218 	/* Statistics */
219 	unsigned long			tso_bursts;
220 	unsigned long			tso_packets;
221 	unsigned long			tso_long_headers;
222 	unsigned long			collapses;
223 	unsigned long			drops;
224 	unsigned long			get_overflow;
225 	unsigned long			get_non_tcp_overflow;
226 	unsigned long			put_overflow;
227 	unsigned long			netdown_drops;
228 	unsigned long			tso_pdrop_too_many;
229 	unsigned long			tso_pdrop_no_rsrc;
230 
231 	/* The following fields change more often, and are used mostly
232 	 * on the completion path
233 	 */
234 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
235 	unsigned int			completed;
236 	struct sfxge_txq		*next;
237 };
238 
239 struct sfxge_evq;
240 
241 extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc);
242 
243 extern int sfxge_tx_init(struct sfxge_softc *sc);
244 extern void sfxge_tx_fini(struct sfxge_softc *sc);
245 extern int sfxge_tx_start(struct sfxge_softc *sc);
246 extern void sfxge_tx_stop(struct sfxge_softc *sc);
247 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
248 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
249 extern void sfxge_if_qflush(struct ifnet *ifp);
250 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
251 
252 #endif
253