xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1e948693eSPhilip Paeps /*-
2*4d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni  *
4929c7febSAndrew Rybchenko  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5e948693eSPhilip Paeps  * All rights reserved.
6e948693eSPhilip Paeps  *
7e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
8e948693eSPhilip Paeps  * Solarflare Communications, Inc.
9e948693eSPhilip Paeps  *
10e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
113c838a9fSAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
12e948693eSPhilip Paeps  *
133c838a9fSAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
143c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer.
153c838a9fSAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
163c838a9fSAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
173c838a9fSAndrew Rybchenko  *    and/or other materials provided with the distribution.
183c838a9fSAndrew Rybchenko  *
193c838a9fSAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
203c838a9fSAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
213c838a9fSAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
223c838a9fSAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
233c838a9fSAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
243c838a9fSAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
253c838a9fSAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
263c838a9fSAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
273c838a9fSAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
283c838a9fSAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
293c838a9fSAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
303c838a9fSAndrew Rybchenko  *
313c838a9fSAndrew Rybchenko  * The views and conclusions contained in the software and documentation are
323c838a9fSAndrew Rybchenko  * those of the authors and should not be interpreted as representing official
333c838a9fSAndrew Rybchenko  * policies, either expressed or implied, of the FreeBSD Project.
34e948693eSPhilip Paeps  */
35e948693eSPhilip Paeps 
36e948693eSPhilip Paeps #ifndef _SFXGE_TX_H
37e948693eSPhilip Paeps #define	_SFXGE_TX_H
38e948693eSPhilip Paeps 
39e948693eSPhilip Paeps #include <netinet/in.h>
40e948693eSPhilip Paeps #include <netinet/ip.h>
41e948693eSPhilip Paeps #include <netinet/tcp.h>
42e948693eSPhilip Paeps 
43a32efb97SAndrew Rybchenko /* If defined, parse TX packets directly in if_transmit
44a32efb97SAndrew Rybchenko  * for better cache locality and reduced time under TX lock
45a32efb97SAndrew Rybchenko  */
46a32efb97SAndrew Rybchenko #define SFXGE_TX_PARSE_EARLY 1
47a32efb97SAndrew Rybchenko 
4845923e76SAndrew Rybchenko /* Maximum size of TSO packet */
4945923e76SAndrew Rybchenko #define	SFXGE_TSO_MAX_SIZE		(65535)
5045923e76SAndrew Rybchenko 
5145923e76SAndrew Rybchenko /*
5245923e76SAndrew Rybchenko  * Maximum number of segments to be created for a TSO packet.
5345923e76SAndrew Rybchenko  * Allow for a reasonable minimum MSS of 512.
5445923e76SAndrew Rybchenko  */
5545923e76SAndrew Rybchenko #define	SFXGE_TSO_MAX_SEGS		howmany(SFXGE_TSO_MAX_SIZE, 512)
5645923e76SAndrew Rybchenko 
57e948693eSPhilip Paeps /* Maximum number of DMA segments needed to map an mbuf chain.  With
58e948693eSPhilip Paeps  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
59cb3acb3eSAndrew Rybchenko  * clusters taking into account that the first may be not 2K cluster
60cb3acb3eSAndrew Rybchenko  * boundary aligned.
61120584dcSAndrew Rybchenko  * Packet header may be split into two segments because of, for example,
62120584dcSAndrew Rybchenko  * VLAN header insertion.
63cb3acb3eSAndrew Rybchenko  * The chain could be longer than this initially, but can be shortened
64cb3acb3eSAndrew Rybchenko  * with m_collapse().
65e948693eSPhilip Paeps  */
6645923e76SAndrew Rybchenko #define	SFXGE_TX_MAPPING_MAX_SEG					\
67120584dcSAndrew Rybchenko 	(2 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES) + 1)
68e948693eSPhilip Paeps 
69e948693eSPhilip Paeps /*
70e948693eSPhilip Paeps  * Buffer mapping flags.
71e948693eSPhilip Paeps  *
72e948693eSPhilip Paeps  * Buffers and DMA mappings must be freed when the last descriptor
73e948693eSPhilip Paeps  * referring to them is completed.  Set the TX_BUF_UNMAP and
74e948693eSPhilip Paeps  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
75e948693eSPhilip Paeps  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
76e948693eSPhilip Paeps  * a heap buffer.
77e948693eSPhilip Paeps  */
78e948693eSPhilip Paeps enum sfxge_tx_buf_flags {
79e948693eSPhilip Paeps 	TX_BUF_UNMAP = 1,
80e948693eSPhilip Paeps 	TX_BUF_MBUF = 2,
81e948693eSPhilip Paeps };
82e948693eSPhilip Paeps 
83e948693eSPhilip Paeps /*
84e948693eSPhilip Paeps  * Buffer mapping information for descriptors in flight.
85e948693eSPhilip Paeps  */
86e948693eSPhilip Paeps struct sfxge_tx_mapping {
87e948693eSPhilip Paeps 	union {
88e948693eSPhilip Paeps 		struct mbuf	*mbuf;
89e948693eSPhilip Paeps 		caddr_t		heap_buf;
90e948693eSPhilip Paeps 	}			u;
91e948693eSPhilip Paeps 	bus_dmamap_t		map;
92e948693eSPhilip Paeps 	enum sfxge_tx_buf_flags	flags;
93e948693eSPhilip Paeps };
94e948693eSPhilip Paeps 
9593929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT		(64 * 1024)
9693929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT	1024
9722ff426cSAndrew Rybchenko #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT		1024
98e948693eSPhilip Paeps 
99e948693eSPhilip Paeps /*
100e948693eSPhilip Paeps  * Deferred packet list.
101e948693eSPhilip Paeps  */
102e948693eSPhilip Paeps struct sfxge_tx_dpl {
103060a95efSGeorge V. Neville-Neil 	unsigned int	std_get_max;		/* Maximum number  of packets
104060a95efSGeorge V. Neville-Neil 						 * in get list */
10593929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_max;	/* Maximum number
10693929f25SAndrew Rybchenko 						 * of non-TCP packets
10793929f25SAndrew Rybchenko 						 * in get list */
108060a95efSGeorge V. Neville-Neil 	unsigned int	std_put_max;		/* Maximum number of packets
109060a95efSGeorge V. Neville-Neil 						 * in put list */
110e948693eSPhilip Paeps 	uintptr_t	std_put;		/* Head of put list. */
111e948693eSPhilip Paeps 	struct mbuf	*std_get;		/* Head of get list. */
112e948693eSPhilip Paeps 	struct mbuf	**std_getp;		/* Tail of get list. */
113bc85c897SGeorge V. Neville-Neil 	unsigned int	std_get_count;		/* Packets in get list. */
11493929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_count;	/* Non-TCP packets
11593929f25SAndrew Rybchenko 						 * in get list */
11693929f25SAndrew Rybchenko 	unsigned int	std_get_hiwat;		/* Packets in get list
11793929f25SAndrew Rybchenko 						 * high watermark */
118bce6d281SAndrew Rybchenko 	unsigned int	std_put_hiwat;		/* Packets in put list
119bce6d281SAndrew Rybchenko 						 * high watermark */
120e948693eSPhilip Paeps };
121e948693eSPhilip Paeps 
122e948693eSPhilip Paeps #define	SFXGE_TX_BUFFER_SIZE	0x400
123e948693eSPhilip Paeps #define	SFXGE_TX_HEADER_SIZE	0x100
124e948693eSPhilip Paeps #define	SFXGE_TX_COPY_THRESHOLD	0x200
125e948693eSPhilip Paeps 
126e948693eSPhilip Paeps enum sfxge_txq_state {
127e948693eSPhilip Paeps 	SFXGE_TXQ_UNINITIALIZED = 0,
128e948693eSPhilip Paeps 	SFXGE_TXQ_INITIALIZED,
129e948693eSPhilip Paeps 	SFXGE_TXQ_STARTED
130e948693eSPhilip Paeps };
131e948693eSPhilip Paeps 
132e948693eSPhilip Paeps enum sfxge_txq_type {
133e948693eSPhilip Paeps 	SFXGE_TXQ_NON_CKSUM = 0,
134e948693eSPhilip Paeps 	SFXGE_TXQ_IP_CKSUM,
135e948693eSPhilip Paeps 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
136e948693eSPhilip Paeps 	SFXGE_TXQ_NTYPES
137e948693eSPhilip Paeps };
138e948693eSPhilip Paeps 
1398b447157SAndrew Rybchenko #define	SFXGE_EVQ0_N_TXQ(_sc)						\
1408b447157SAndrew Rybchenko 	((_sc)->txq_dynamic_cksum_toggle_supported ?			\
1418b447157SAndrew Rybchenko 	1 : SFXGE_TXQ_NTYPES)
142e4b0a127SAndrew Rybchenko 
143385b1d8eSGeorge V. Neville-Neil #define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
144e948693eSPhilip Paeps 
145e948693eSPhilip Paeps #define	SFXGE_TX_BATCH	64
146e948693eSPhilip Paeps 
14733d45dc5SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index)			\
14833d45dc5SAndrew Rybchenko 	do {								\
14933d45dc5SAndrew Rybchenko 		struct sfxge_txq  *__txq = (_txq);			\
15033d45dc5SAndrew Rybchenko 									\
15133d45dc5SAndrew Rybchenko 		snprintf((__txq)->lock_name,				\
15233d45dc5SAndrew Rybchenko 			 sizeof((__txq)->lock_name),			\
15333d45dc5SAndrew Rybchenko 			 "%s:txq%u", (_ifname), (_txq_index));		\
15433d45dc5SAndrew Rybchenko 		mtx_init(&(__txq)->lock, (__txq)->lock_name,		\
15533d45dc5SAndrew Rybchenko 			 NULL, MTX_DEF);				\
15633d45dc5SAndrew Rybchenko 	} while (B_FALSE)
157763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_DESTROY(_txq)					\
158763cab71SAndrew Rybchenko 	mtx_destroy(&(_txq)->lock)
159763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK(_txq)						\
160a411fe4eSAndrew Rybchenko 	mtx_lock(&(_txq)->lock)
161763cab71SAndrew Rybchenko #define	SFXGE_TXQ_TRYLOCK(_txq)						\
162a411fe4eSAndrew Rybchenko 	mtx_trylock(&(_txq)->lock)
163763cab71SAndrew Rybchenko #define	SFXGE_TXQ_UNLOCK(_txq)						\
164a411fe4eSAndrew Rybchenko 	mtx_unlock(&(_txq)->lock)
165763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq)				\
166a411fe4eSAndrew Rybchenko 	mtx_assert(&(_txq)->lock, MA_OWNED)
167d6e9f736SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(_txq)				\
168d6e9f736SAndrew Rybchenko 	mtx_assert(&(_txq)->lock, MA_NOTOWNED)
169763cab71SAndrew Rybchenko 
170e948693eSPhilip Paeps struct sfxge_txq {
171e948693eSPhilip Paeps 	/* The following fields should be written very rarely */
172e948693eSPhilip Paeps 	struct sfxge_softc		*sc;
173e948693eSPhilip Paeps 	enum sfxge_txq_state		init_state;
174e948693eSPhilip Paeps 	enum sfxge_flush_state		flush_state;
175a45a0da1SAndrew Rybchenko 	unsigned int			tso_fw_assisted;
176e948693eSPhilip Paeps 	enum sfxge_txq_type		type;
177e948693eSPhilip Paeps 	unsigned int			evq_index;
178e948693eSPhilip Paeps 	efsys_mem_t			mem;
179e948693eSPhilip Paeps 	unsigned int			buf_base_id;
180385b1d8eSGeorge V. Neville-Neil 	unsigned int			entries;
181385b1d8eSGeorge V. Neville-Neil 	unsigned int			ptr_mask;
1823c838a9fSAndrew Rybchenko 	unsigned int			max_pkt_desc;
183e948693eSPhilip Paeps 
184e948693eSPhilip Paeps 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
185e948693eSPhilip Paeps 	bus_dma_tag_t			packet_dma_tag;
1863c838a9fSAndrew Rybchenko 	efx_desc_t			*pend_desc;
187e948693eSPhilip Paeps 	efx_txq_t			*common;
188e948693eSPhilip Paeps 
189e948693eSPhilip Paeps 	efsys_mem_t			*tsoh_buffer;
190e948693eSPhilip Paeps 
19133d45dc5SAndrew Rybchenko 	char				lock_name[SFXGE_LOCK_NAME_MAX];
19233d45dc5SAndrew Rybchenko 
193e948693eSPhilip Paeps 	/* This field changes more often and is read regularly on both
194e948693eSPhilip Paeps 	 * the initiation and completion paths
195e948693eSPhilip Paeps 	 */
196e948693eSPhilip Paeps 	int				blocked __aligned(CACHE_LINE_SIZE);
197e948693eSPhilip Paeps 
198e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
199e948693eSPhilip Paeps 	 * on the initiation path
200e948693eSPhilip Paeps 	 */
201e948693eSPhilip Paeps 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
202e948693eSPhilip Paeps 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
203e948693eSPhilip Paeps 	unsigned int			n_pend_desc;
204e948693eSPhilip Paeps 	unsigned int			added;
205e948693eSPhilip Paeps 	unsigned int			reaped;
2063c838a9fSAndrew Rybchenko 
2078b447157SAndrew Rybchenko 	/* The last (or constant) set of HW offloads requested on the queue */
2088b447157SAndrew Rybchenko 	uint16_t			hw_cksum_flags;
2098b447157SAndrew Rybchenko 
2103c838a9fSAndrew Rybchenko 	/* The last VLAN TCI seen on the queue if FW-assisted tagging is
2113c838a9fSAndrew Rybchenko 	   used */
2123c838a9fSAndrew Rybchenko 	uint16_t			hw_vlan_tci;
2133c838a9fSAndrew Rybchenko 
214e948693eSPhilip Paeps 	/* Statistics */
215e948693eSPhilip Paeps 	unsigned long			tso_bursts;
216e948693eSPhilip Paeps 	unsigned long			tso_packets;
217e948693eSPhilip Paeps 	unsigned long			tso_long_headers;
218e948693eSPhilip Paeps 	unsigned long			collapses;
219e948693eSPhilip Paeps 	unsigned long			drops;
22093929f25SAndrew Rybchenko 	unsigned long			get_overflow;
22193929f25SAndrew Rybchenko 	unsigned long			get_non_tcp_overflow;
22293929f25SAndrew Rybchenko 	unsigned long			put_overflow;
22393929f25SAndrew Rybchenko 	unsigned long			netdown_drops;
224e1a3d10eSAndrew Rybchenko 	unsigned long			tso_pdrop_too_many;
225e1a3d10eSAndrew Rybchenko 	unsigned long			tso_pdrop_no_rsrc;
226e948693eSPhilip Paeps 
227e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
228e948693eSPhilip Paeps 	 * on the completion path
229e948693eSPhilip Paeps 	 */
230e948693eSPhilip Paeps 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
231e948693eSPhilip Paeps 	unsigned int			completed;
232e5e31360SAndrew Rybchenko 	struct sfxge_txq		*next;
233e948693eSPhilip Paeps };
234e948693eSPhilip Paeps 
235cc933626SAndrew Rybchenko struct sfxge_evq;
236cc933626SAndrew Rybchenko 
2373d8fce27SAndrew Rybchenko extern uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc);
238e948693eSPhilip Paeps 
239e948693eSPhilip Paeps extern int sfxge_tx_init(struct sfxge_softc *sc);
240e948693eSPhilip Paeps extern void sfxge_tx_fini(struct sfxge_softc *sc);
241e948693eSPhilip Paeps extern int sfxge_tx_start(struct sfxge_softc *sc);
242e948693eSPhilip Paeps extern void sfxge_tx_stop(struct sfxge_softc *sc);
243cc933626SAndrew Rybchenko extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
244e948693eSPhilip Paeps extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
24504abf87bSJustin Hibbits extern void sfxge_if_qflush(if_t ifp);
24604abf87bSJustin Hibbits extern int sfxge_if_transmit(if_t ifp, struct mbuf *m);
247e948693eSPhilip Paeps 
248e948693eSPhilip Paeps #endif
249