xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision 33d45dc5e401ce782dae077f00b53d9af1f84910)
1e948693eSPhilip Paeps /*-
2e948693eSPhilip Paeps  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3e948693eSPhilip Paeps  * All rights reserved.
4e948693eSPhilip Paeps  *
5e948693eSPhilip Paeps  * This software was developed in part by Philip Paeps under contract for
6e948693eSPhilip Paeps  * Solarflare Communications, Inc.
7e948693eSPhilip Paeps  *
8e948693eSPhilip Paeps  * Redistribution and use in source and binary forms, with or without
9e948693eSPhilip Paeps  * modification, are permitted provided that the following conditions
10e948693eSPhilip Paeps  * are met:
11e948693eSPhilip Paeps  * 1. Redistributions of source code must retain the above copyright
12e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer.
13e948693eSPhilip Paeps  * 2. Redistributions in binary form must reproduce the above copyright
14e948693eSPhilip Paeps  *    notice, this list of conditions and the following disclaimer in the
15e948693eSPhilip Paeps  *    documentation and/or other materials provided with the distribution.
16e948693eSPhilip Paeps  *
17e948693eSPhilip Paeps  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18e948693eSPhilip Paeps  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19e948693eSPhilip Paeps  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20e948693eSPhilip Paeps  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21e948693eSPhilip Paeps  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22e948693eSPhilip Paeps  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23e948693eSPhilip Paeps  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24e948693eSPhilip Paeps  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25e948693eSPhilip Paeps  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26e948693eSPhilip Paeps  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27e948693eSPhilip Paeps  * SUCH DAMAGE.
28e948693eSPhilip Paeps  *
29e948693eSPhilip Paeps  * $FreeBSD$
30e948693eSPhilip Paeps  */
31e948693eSPhilip Paeps 
32e948693eSPhilip Paeps #ifndef _SFXGE_TX_H
33e948693eSPhilip Paeps #define	_SFXGE_TX_H
34e948693eSPhilip Paeps 
35e948693eSPhilip Paeps #include <netinet/in.h>
36e948693eSPhilip Paeps #include <netinet/ip.h>
37e948693eSPhilip Paeps #include <netinet/tcp.h>
38e948693eSPhilip Paeps 
39e948693eSPhilip Paeps /* Maximum number of DMA segments needed to map an mbuf chain.  With
40e948693eSPhilip Paeps  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
41e948693eSPhilip Paeps  * clusters.  (The chain could be longer than this initially, but can
42e948693eSPhilip Paeps  * be shortened with m_collapse().)
43e948693eSPhilip Paeps  */
44e948693eSPhilip Paeps #define	SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
45e948693eSPhilip Paeps 
46e948693eSPhilip Paeps /* Maximum number of DMA segments needed to map an output packet.  It
47e948693eSPhilip Paeps  * could overlap all mbufs in the chain and also require an extra
48e948693eSPhilip Paeps  * segment for a TSO header.
49e948693eSPhilip Paeps  */
50e948693eSPhilip Paeps #define	SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
51e948693eSPhilip Paeps 
52e948693eSPhilip Paeps /*
53e948693eSPhilip Paeps  * Buffer mapping flags.
54e948693eSPhilip Paeps  *
55e948693eSPhilip Paeps  * Buffers and DMA mappings must be freed when the last descriptor
56e948693eSPhilip Paeps  * referring to them is completed.  Set the TX_BUF_UNMAP and
57e948693eSPhilip Paeps  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
58e948693eSPhilip Paeps  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
59e948693eSPhilip Paeps  * a heap buffer.
60e948693eSPhilip Paeps  */
61e948693eSPhilip Paeps enum sfxge_tx_buf_flags {
62e948693eSPhilip Paeps 	TX_BUF_UNMAP = 1,
63e948693eSPhilip Paeps 	TX_BUF_MBUF = 2,
64e948693eSPhilip Paeps };
65e948693eSPhilip Paeps 
66e948693eSPhilip Paeps /*
67e948693eSPhilip Paeps  * Buffer mapping information for descriptors in flight.
68e948693eSPhilip Paeps  */
69e948693eSPhilip Paeps struct sfxge_tx_mapping {
70e948693eSPhilip Paeps 	union {
71e948693eSPhilip Paeps 		struct mbuf	*mbuf;
72e948693eSPhilip Paeps 		caddr_t		heap_buf;
73e948693eSPhilip Paeps 	}			u;
74e948693eSPhilip Paeps 	bus_dmamap_t		map;
75e948693eSPhilip Paeps 	enum sfxge_tx_buf_flags	flags;
76e948693eSPhilip Paeps };
77e948693eSPhilip Paeps 
7893929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT		(64 * 1024)
7993929f25SAndrew Rybchenko #define	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT	1024
80c1974e29SGleb Smirnoff #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT		64
81e948693eSPhilip Paeps 
82e948693eSPhilip Paeps /*
83e948693eSPhilip Paeps  * Deferred packet list.
84e948693eSPhilip Paeps  */
85e948693eSPhilip Paeps struct sfxge_tx_dpl {
86060a95efSGeorge V. Neville-Neil 	unsigned int	std_get_max;		/* Maximum number  of packets
87060a95efSGeorge V. Neville-Neil 						 * in get list */
8893929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_max;	/* Maximum number
8993929f25SAndrew Rybchenko 						 * of non-TCP packets
9093929f25SAndrew Rybchenko 						 * in get list */
91060a95efSGeorge V. Neville-Neil 	unsigned int	std_put_max;		/* Maximum number of packets
92060a95efSGeorge V. Neville-Neil 						 * in put list */
93e948693eSPhilip Paeps 	uintptr_t	std_put;		/* Head of put list. */
94e948693eSPhilip Paeps 	struct mbuf	*std_get;		/* Head of get list. */
95e948693eSPhilip Paeps 	struct mbuf	**std_getp;		/* Tail of get list. */
96bc85c897SGeorge V. Neville-Neil 	unsigned int	std_get_count;		/* Packets in get list. */
9793929f25SAndrew Rybchenko 	unsigned int	std_get_non_tcp_count;	/* Non-TCP packets
9893929f25SAndrew Rybchenko 						 * in get list */
9993929f25SAndrew Rybchenko 	unsigned int	std_get_hiwat;		/* Packets in get list
10093929f25SAndrew Rybchenko 						 * high watermark */
101e948693eSPhilip Paeps };
102e948693eSPhilip Paeps 
103e948693eSPhilip Paeps 
104e948693eSPhilip Paeps #define	SFXGE_TX_BUFFER_SIZE	0x400
105e948693eSPhilip Paeps #define	SFXGE_TX_HEADER_SIZE	0x100
106e948693eSPhilip Paeps #define	SFXGE_TX_COPY_THRESHOLD	0x200
107e948693eSPhilip Paeps 
108e948693eSPhilip Paeps enum sfxge_txq_state {
109e948693eSPhilip Paeps 	SFXGE_TXQ_UNINITIALIZED = 0,
110e948693eSPhilip Paeps 	SFXGE_TXQ_INITIALIZED,
111e948693eSPhilip Paeps 	SFXGE_TXQ_STARTED
112e948693eSPhilip Paeps };
113e948693eSPhilip Paeps 
114e948693eSPhilip Paeps enum sfxge_txq_type {
115e948693eSPhilip Paeps 	SFXGE_TXQ_NON_CKSUM = 0,
116e948693eSPhilip Paeps 	SFXGE_TXQ_IP_CKSUM,
117e948693eSPhilip Paeps 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
118e948693eSPhilip Paeps 	SFXGE_TXQ_NTYPES
119e948693eSPhilip Paeps };
120e948693eSPhilip Paeps 
121385b1d8eSGeorge V. Neville-Neil #define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
122e948693eSPhilip Paeps 
123e948693eSPhilip Paeps #define	SFXGE_TX_BATCH	64
124e948693eSPhilip Paeps 
125e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
126763cab71SAndrew Rybchenko #define	SFXGE_TX_LOCK(txq)		(&(txq)->lock)
127e948693eSPhilip Paeps #define	SFXGE_TX_SCALE(sc)		((sc)->intr.n_alloc)
128e948693eSPhilip Paeps #else
129763cab71SAndrew Rybchenko #define	SFXGE_TX_LOCK(txq)		(&(txq)->sc->tx_lock)
130e948693eSPhilip Paeps #define	SFXGE_TX_SCALE(sc)		1
131e948693eSPhilip Paeps #endif
132e948693eSPhilip Paeps 
133*33d45dc5SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index)			\
134*33d45dc5SAndrew Rybchenko 	do {								\
135*33d45dc5SAndrew Rybchenko 		struct sfxge_txq  *__txq = (_txq);			\
136*33d45dc5SAndrew Rybchenko 									\
137*33d45dc5SAndrew Rybchenko 		snprintf((__txq)->lock_name,				\
138*33d45dc5SAndrew Rybchenko 			 sizeof((__txq)->lock_name),			\
139*33d45dc5SAndrew Rybchenko 			 "%s:txq%u", (_ifname), (_txq_index));		\
140*33d45dc5SAndrew Rybchenko 		mtx_init(&(__txq)->lock, (__txq)->lock_name,		\
141*33d45dc5SAndrew Rybchenko 			 NULL, MTX_DEF);				\
142*33d45dc5SAndrew Rybchenko 	} while (B_FALSE)
143763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_DESTROY(_txq)					\
144763cab71SAndrew Rybchenko 	mtx_destroy(&(_txq)->lock)
145763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK(_txq)						\
146763cab71SAndrew Rybchenko 	mtx_lock(SFXGE_TX_LOCK(_txq))
147763cab71SAndrew Rybchenko #define	SFXGE_TXQ_TRYLOCK(_txq)						\
148763cab71SAndrew Rybchenko 	mtx_trylock(SFXGE_TX_LOCK(_txq))
149763cab71SAndrew Rybchenko #define	SFXGE_TXQ_UNLOCK(_txq)						\
150763cab71SAndrew Rybchenko 	mtx_unlock(SFXGE_TX_LOCK(_txq))
151763cab71SAndrew Rybchenko #define	SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq)				\
152763cab71SAndrew Rybchenko 	mtx_assert(SFXGE_TX_LOCK(_txq), MA_OWNED)
153763cab71SAndrew Rybchenko 
154763cab71SAndrew Rybchenko 
155e948693eSPhilip Paeps struct sfxge_txq {
156e948693eSPhilip Paeps 	/* The following fields should be written very rarely */
157e948693eSPhilip Paeps 	struct sfxge_softc		*sc;
158e948693eSPhilip Paeps 	enum sfxge_txq_state		init_state;
159e948693eSPhilip Paeps 	enum sfxge_flush_state		flush_state;
160e948693eSPhilip Paeps 	enum sfxge_txq_type		type;
161e948693eSPhilip Paeps 	unsigned int			txq_index;
162e948693eSPhilip Paeps 	unsigned int			evq_index;
163e948693eSPhilip Paeps 	efsys_mem_t			mem;
164e948693eSPhilip Paeps 	unsigned int			buf_base_id;
165385b1d8eSGeorge V. Neville-Neil 	unsigned int			entries;
166385b1d8eSGeorge V. Neville-Neil 	unsigned int			ptr_mask;
167e948693eSPhilip Paeps 
168e948693eSPhilip Paeps 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
169e948693eSPhilip Paeps 	bus_dma_tag_t			packet_dma_tag;
170e948693eSPhilip Paeps 	efx_buffer_t			*pend_desc;
171e948693eSPhilip Paeps 	efx_txq_t			*common;
172e948693eSPhilip Paeps 
173e948693eSPhilip Paeps 	efsys_mem_t			*tsoh_buffer;
174e948693eSPhilip Paeps 
175*33d45dc5SAndrew Rybchenko 	char				lock_name[SFXGE_LOCK_NAME_MAX];
176*33d45dc5SAndrew Rybchenko 
177e948693eSPhilip Paeps 	/* This field changes more often and is read regularly on both
178e948693eSPhilip Paeps 	 * the initiation and completion paths
179e948693eSPhilip Paeps 	 */
180e948693eSPhilip Paeps 	int				blocked __aligned(CACHE_LINE_SIZE);
181e948693eSPhilip Paeps 
182e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
183e948693eSPhilip Paeps 	 * on the initiation path
184e948693eSPhilip Paeps 	 */
185e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
186e948693eSPhilip Paeps 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
187e948693eSPhilip Paeps 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
188e948693eSPhilip Paeps 	unsigned int			n_pend_desc;
189e948693eSPhilip Paeps #else
190e948693eSPhilip Paeps 	unsigned int			n_pend_desc __aligned(CACHE_LINE_SIZE);
191e948693eSPhilip Paeps #endif
192e948693eSPhilip Paeps 	unsigned int			added;
193e948693eSPhilip Paeps 	unsigned int			reaped;
194e948693eSPhilip Paeps 	/* Statistics */
195e948693eSPhilip Paeps 	unsigned long			tso_bursts;
196e948693eSPhilip Paeps 	unsigned long			tso_packets;
197e948693eSPhilip Paeps 	unsigned long			tso_long_headers;
198e948693eSPhilip Paeps 	unsigned long			collapses;
199e948693eSPhilip Paeps 	unsigned long			drops;
20093929f25SAndrew Rybchenko 	unsigned long			get_overflow;
20193929f25SAndrew Rybchenko 	unsigned long			get_non_tcp_overflow;
20293929f25SAndrew Rybchenko 	unsigned long			put_overflow;
20393929f25SAndrew Rybchenko 	unsigned long			netdown_drops;
204e948693eSPhilip Paeps 
205e948693eSPhilip Paeps 	/* The following fields change more often, and are used mostly
206e948693eSPhilip Paeps 	 * on the completion path
207e948693eSPhilip Paeps 	 */
208e948693eSPhilip Paeps 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
209e948693eSPhilip Paeps 	unsigned int			completed;
210e5e31360SAndrew Rybchenko 	struct sfxge_txq		*next;
211e948693eSPhilip Paeps };
212e948693eSPhilip Paeps 
213cc933626SAndrew Rybchenko struct sfxge_evq;
214cc933626SAndrew Rybchenko 
215e948693eSPhilip Paeps extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
216e948693eSPhilip Paeps 
217e948693eSPhilip Paeps extern int sfxge_tx_init(struct sfxge_softc *sc);
218e948693eSPhilip Paeps extern void sfxge_tx_fini(struct sfxge_softc *sc);
219e948693eSPhilip Paeps extern int sfxge_tx_start(struct sfxge_softc *sc);
220e948693eSPhilip Paeps extern void sfxge_tx_stop(struct sfxge_softc *sc);
221cc933626SAndrew Rybchenko extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
222e948693eSPhilip Paeps extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
223e948693eSPhilip Paeps #ifdef SFXGE_HAVE_MQ
224e948693eSPhilip Paeps extern void sfxge_if_qflush(struct ifnet *ifp);
225e948693eSPhilip Paeps extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
226e948693eSPhilip Paeps #else
227e948693eSPhilip Paeps extern void sfxge_if_start(struct ifnet *ifp);
228e948693eSPhilip Paeps #endif
229e948693eSPhilip Paeps 
230e948693eSPhilip Paeps #endif
231