xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision 120584dca62cf4b027f41d9e71d9ed344b633f10)
1e948693eSPhilip Paeps /*-
23c838a9fSAndrew Rybchenko  * Copyright (c) 2010-2015 Solarflare Communications Inc.
3e948693eSPhilip Paeps  * All rights reserved.
4e948693eSPhilip Paeps  *
5e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
6e948693eSPhilip Paeps  * Solarflare Communications, Inc.
7e948693eSPhilip Paeps  *
8e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
93c838a9fSAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
10e948693eSPhilip Paeps  *
113c838a9fSAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
123c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer.
133c838a9fSAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
143c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
153c838a9fSAndrew Rybchenko  *    and/or other materials provided with the distribution.
163c838a9fSAndrew Rybchenko  *
173c838a9fSAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
183c838a9fSAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
193c838a9fSAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
203c838a9fSAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
213c838a9fSAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
223c838a9fSAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
233c838a9fSAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
243c838a9fSAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
253c838a9fSAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
263c838a9fSAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
273c838a9fSAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
283c838a9fSAndrew Rybchenko  *
293c838a9fSAndrew Rybchenko  * The views and conclusions contained in the software and documentation are
303c838a9fSAndrew Rybchenko  * those of the authors and should not be interpreted as representing official
313c838a9fSAndrew Rybchenko  * policies, either expressed or implied, of the FreeBSD Project.
32e948693eSPhilip Paeps  *
33e948693eSPhilip Paeps  * $FreeBSD$
34e948693eSPhilip Paeps  */
35e948693eSPhilip Paeps 
36e948693eSPhilip Paeps #ifndef _SFXGE_TX_H
37e948693eSPhilip Paeps #define	_SFXGE_TX_H
38e948693eSPhilip Paeps 
39e948693eSPhilip Paeps #include <netinet/in.h>
40e948693eSPhilip Paeps #include <netinet/ip.h>
41e948693eSPhilip Paeps #include <netinet/tcp.h>
42e948693eSPhilip Paeps 
4345923e76SAndrew Rybchenko /* Maximum size of TSO packet */
4445923e76SAndrew Rybchenko #define	SFXGE_TSO_MAX_SIZE		(65535)
4545923e76SAndrew Rybchenko 
4645923e76SAndrew Rybchenko /*
4745923e76SAndrew Rybchenko  * Maximum number of segments to be created for a TSO packet.
4845923e76SAndrew Rybchenko  * Allow for a reasonable minimum MSS of 512.
4945923e76SAndrew Rybchenko  */
5045923e76SAndrew Rybchenko #define	SFXGE_TSO_MAX_SEGS		howmany(SFXGE_TSO_MAX_SIZE, 512)
5145923e76SAndrew Rybchenko 
52e948693eSPhilip Paeps /* Maximum number of DMA segments needed to map an mbuf chain.  With
53e948693eSPhilip Paeps  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
54cb3acb3eSAndrew Rybchenko  * clusters taking into account that the first may be not 2K cluster
55cb3acb3eSAndrew Rybchenko  * boundary aligned.
56*120584dcSAndrew Rybchenko  * Packet header may be split into two segments because of, for example,
57*120584dcSAndrew Rybchenko  * VLAN header insertion.
58cb3acb3eSAndrew Rybchenko  * The chain could be longer than this initially, but can be shortened
59cb3acb3eSAndrew Rybchenko  * with m_collapse().
60e948693eSPhilip Paeps  */
6145923e76SAndrew Rybchenko #define	SFXGE_TX_MAPPING_MAX_SEG					\
62*120584dcSAndrew Rybchenko 	(2 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES) + 1)
63e948693eSPhilip Paeps 
64e948693eSPhilip Paeps /*
65e948693eSPhilip Paeps  * Buffer mapping flags.
66e948693eSPhilip Paeps  *
67e948693eSPhilip Paeps  * Buffers and DMA mappings must be freed when the last descriptor
68e948693eSPhilip Paeps  * referring to them is completed.  Set the TX_BUF_UNMAP and
69e948693eSPhilip Paeps  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
70e948693eSPhilip Paeps  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
71e948693eSPhilip Paeps  * a heap buffer.
72e948693eSPhilip Paeps  */
73e948693eSPhilip Paeps enum sfxge_tx_buf_flags {
74e948693eSPhilip Paeps 	TX_BUF_UNMAP = 1,
75e948693eSPhilip Paeps 	TX_BUF_MBUF = 2,
76e948693eSPhilip Paeps };
77e948693eSPhilip Paeps 
78e948693eSPhilip Paeps /*
79e948693eSPhilip Paeps  * Buffer mapping information for descriptors in flight.
80e948693eSPhilip Paeps  */
81e948693eSPhilip Paeps struct sfxge_tx_mapping {
82e948693eSPhilip Paeps 	union {
83e948693eSPhilip Paeps 		struct mbuf	*mbuf;
84e948693eSPhilip Paeps 		caddr_t		heap_buf;
85e948693eSPhilip Paeps 	}			u;
86e948693eSPhilip Paeps 	bus_dmamap_t		map;
87e948693eSPhilip Paeps 	enum sfxge_tx_buf_flags	flags;
88e948693eSPhilip Paeps };
89e948693eSPhilip Paeps 
9093929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT		(64 * 1024)
9193929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT	1024
9222ff426cSAndrew Rybchenko #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT		1024
93e948693eSPhilip Paeps 
94e948693eSPhilip Paeps /*
95e948693eSPhilip Paeps  * Deferred packet list.
96e948693eSPhilip Paeps  */
97e948693eSPhilip Paeps struct sfxge_tx_dpl {
98060a95efSGeorge V. Neville-Neil 	unsigned int	std_get_max;		/* Maximum number  of packets
99060a95efSGeorge V. Neville-Neil 						 * in get list */
10093929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_max;	/* Maximum number
10193929f25SAndrew Rybchenko 						 * of non-TCP packets
10293929f25SAndrew Rybchenko 						 * in get list */
103060a95efSGeorge V. Neville-Neil 	unsigned int	std_put_max;		/* Maximum number of packets
104060a95efSGeorge V. Neville-Neil 						 * in put list */
105e948693eSPhilip Paeps 	uintptr_t	std_put;		/* Head of put list. */
106e948693eSPhilip Paeps 	struct mbuf	*std_get;		/* Head of get list. */
107e948693eSPhilip Paeps 	struct mbuf	**std_getp;		/* Tail of get list. */
108bc85c897SGeorge V. Neville-Neil 	unsigned int	std_get_count;		/* Packets in get list. */
10993929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_count;	/* Non-TCP packets
11093929f25SAndrew Rybchenko 						 * in get list */
11193929f25SAndrew Rybchenko 	unsigned int	std_get_hiwat;		/* Packets in get list
11293929f25SAndrew Rybchenko 						 * high watermark */
113bce6d281SAndrew Rybchenko 	unsigned int	std_put_hiwat;		/* Packets in put list
114bce6d281SAndrew Rybchenko 						 * high watermark */
115e948693eSPhilip Paeps };
116e948693eSPhilip Paeps 
117e948693eSPhilip Paeps 
118e948693eSPhilip Paeps #define	SFXGE_TX_BUFFER_SIZE	0x400
119e948693eSPhilip Paeps #define	SFXGE_TX_HEADER_SIZE	0x100
120e948693eSPhilip Paeps #define	SFXGE_TX_COPY_THRESHOLD	0x200
121e948693eSPhilip Paeps 
122e948693eSPhilip Paeps enum sfxge_txq_state {
123e948693eSPhilip Paeps 	SFXGE_TXQ_UNINITIALIZED = 0,
124e948693eSPhilip Paeps 	SFXGE_TXQ_INITIALIZED,
125e948693eSPhilip Paeps 	SFXGE_TXQ_STARTED
126e948693eSPhilip Paeps };
127e948693eSPhilip Paeps 
128e948693eSPhilip Paeps enum sfxge_txq_type {
129e948693eSPhilip Paeps 	SFXGE_TXQ_NON_CKSUM = 0,
130e948693eSPhilip Paeps 	SFXGE_TXQ_IP_CKSUM,
131e948693eSPhilip Paeps 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
132e948693eSPhilip Paeps 	SFXGE_TXQ_NTYPES
133e948693eSPhilip Paeps };
134e948693eSPhilip Paeps 
135385b1d8eSGeorge V. Neville-Neil #define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
136e948693eSPhilip Paeps 
137e948693eSPhilip Paeps #define	SFXGE_TX_BATCH	64
138e948693eSPhilip Paeps 
13933d45dc5SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index)			\
14033d45dc5SAndrew Rybchenko 	do {								\
14133d45dc5SAndrew Rybchenko 		struct sfxge_txq  *__txq = (_txq);			\
14233d45dc5SAndrew Rybchenko 									\
14333d45dc5SAndrew Rybchenko 		snprintf((__txq)->lock_name,				\
14433d45dc5SAndrew Rybchenko 			 sizeof((__txq)->lock_name),			\
14533d45dc5SAndrew Rybchenko 			 "%s:txq%u", (_ifname), (_txq_index));		\
14633d45dc5SAndrew Rybchenko 		mtx_init(&(__txq)->lock, (__txq)->lock_name,		\
14733d45dc5SAndrew Rybchenko 			 NULL, MTX_DEF);				\
14833d45dc5SAndrew Rybchenko 	} while (B_FALSE)
149763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_DESTROY(_txq)					\
150763cab71SAndrew Rybchenko 	mtx_destroy(&(_txq)->lock)
151763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK(_txq)						\
152a411fe4eSAndrew Rybchenko 	mtx_lock(&(_txq)->lock)
153763cab71SAndrew Rybchenko #define	SFXGE_TXQ_TRYLOCK(_txq)						\
154a411fe4eSAndrew Rybchenko 	mtx_trylock(&(_txq)->lock)
155763cab71SAndrew Rybchenko #define	SFXGE_TXQ_UNLOCK(_txq)						\
156a411fe4eSAndrew Rybchenko 	mtx_unlock(&(_txq)->lock)
157763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq)				\
158a411fe4eSAndrew Rybchenko 	mtx_assert(&(_txq)->lock, MA_OWNED)
159d6e9f736SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(_txq)				\
160d6e9f736SAndrew Rybchenko 	mtx_assert(&(_txq)->lock, MA_NOTOWNED)
161763cab71SAndrew Rybchenko 
162763cab71SAndrew Rybchenko 
163e948693eSPhilip Paeps struct sfxge_txq {
164e948693eSPhilip Paeps 	/* The following fields should be written very rarely */
165e948693eSPhilip Paeps 	struct sfxge_softc		*sc;
166e948693eSPhilip Paeps 	enum sfxge_txq_state		init_state;
167e948693eSPhilip Paeps 	enum sfxge_flush_state		flush_state;
168e948693eSPhilip Paeps 	enum sfxge_txq_type		type;
169e948693eSPhilip Paeps 	unsigned int			txq_index;
170e948693eSPhilip Paeps 	unsigned int			evq_index;
171e948693eSPhilip Paeps 	efsys_mem_t			mem;
172e948693eSPhilip Paeps 	unsigned int			buf_base_id;
173385b1d8eSGeorge V. Neville-Neil 	unsigned int			entries;
174385b1d8eSGeorge V. Neville-Neil 	unsigned int			ptr_mask;
1753c838a9fSAndrew Rybchenko 	unsigned int			max_pkt_desc;
176e948693eSPhilip Paeps 
177e948693eSPhilip Paeps 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
178e948693eSPhilip Paeps 	bus_dma_tag_t			packet_dma_tag;
1793c838a9fSAndrew Rybchenko 	efx_desc_t			*pend_desc;
180e948693eSPhilip Paeps 	efx_txq_t			*common;
181e948693eSPhilip Paeps 
182e948693eSPhilip Paeps 	efsys_mem_t			*tsoh_buffer;
183e948693eSPhilip Paeps 
18433d45dc5SAndrew Rybchenko 	char				lock_name[SFXGE_LOCK_NAME_MAX];
18533d45dc5SAndrew Rybchenko 
186e948693eSPhilip Paeps 	/* This field changes more often and is read regularly on both
187e948693eSPhilip Paeps 	 * the initiation and completion paths
188e948693eSPhilip Paeps 	 */
189e948693eSPhilip Paeps 	int				blocked __aligned(CACHE_LINE_SIZE);
190e948693eSPhilip Paeps 
191e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
192e948693eSPhilip Paeps 	 * on the initiation path
193e948693eSPhilip Paeps 	 */
194e948693eSPhilip Paeps 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
195e948693eSPhilip Paeps 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
196e948693eSPhilip Paeps 	unsigned int			n_pend_desc;
197e948693eSPhilip Paeps 	unsigned int			added;
198e948693eSPhilip Paeps 	unsigned int			reaped;
1993c838a9fSAndrew Rybchenko 
2003c838a9fSAndrew Rybchenko 	/* The last VLAN TCI seen on the queue if FW-assisted tagging is
2013c838a9fSAndrew Rybchenko 	   used */
2023c838a9fSAndrew Rybchenko 	uint16_t			hw_vlan_tci;
2033c838a9fSAndrew Rybchenko 
204e948693eSPhilip Paeps 	/* Statistics */
205e948693eSPhilip Paeps 	unsigned long			tso_bursts;
206e948693eSPhilip Paeps 	unsigned long			tso_packets;
207e948693eSPhilip Paeps 	unsigned long			tso_long_headers;
208e948693eSPhilip Paeps 	unsigned long			collapses;
209e948693eSPhilip Paeps 	unsigned long			drops;
21093929f25SAndrew Rybchenko 	unsigned long			get_overflow;
21193929f25SAndrew Rybchenko 	unsigned long			get_non_tcp_overflow;
21293929f25SAndrew Rybchenko 	unsigned long			put_overflow;
21393929f25SAndrew Rybchenko 	unsigned long			netdown_drops;
214e1a3d10eSAndrew Rybchenko 	unsigned long			tso_pdrop_too_many;
215e1a3d10eSAndrew Rybchenko 	unsigned long			tso_pdrop_no_rsrc;
216e948693eSPhilip Paeps 
217e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
218e948693eSPhilip Paeps 	 * on the completion path
219e948693eSPhilip Paeps 	 */
220e948693eSPhilip Paeps 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
221e948693eSPhilip Paeps 	unsigned int			completed;
222e5e31360SAndrew Rybchenko 	struct sfxge_txq		*next;
223e948693eSPhilip Paeps };
224e948693eSPhilip Paeps 
225cc933626SAndrew Rybchenko struct sfxge_evq;
226cc933626SAndrew Rybchenko 
2273d8fce27SAndrew Rybchenko extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc);
228e948693eSPhilip Paeps 
229e948693eSPhilip Paeps extern int sfxge_tx_init(struct sfxge_softc *sc);
230e948693eSPhilip Paeps extern void sfxge_tx_fini(struct sfxge_softc *sc);
231e948693eSPhilip Paeps extern int sfxge_tx_start(struct sfxge_softc *sc);
232e948693eSPhilip Paeps extern void sfxge_tx_stop(struct sfxge_softc *sc);
233cc933626SAndrew Rybchenko extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
234e948693eSPhilip Paeps extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
235e948693eSPhilip Paeps extern void sfxge_if_qflush(struct ifnet *ifp);
236e948693eSPhilip Paeps extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
237e948693eSPhilip Paeps 
238e948693eSPhilip Paeps #endif
239