xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _SFXGE_TX_H
33 #define	_SFXGE_TX_H
34 
35 #include <netinet/in.h>
36 #include <netinet/ip.h>
37 #include <netinet/tcp.h>
38 
39 /* Maximum number of DMA segments needed to map an mbuf chain.  With
40  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
41  * clusters.  (The chain could be longer than this initially, but can
42  * be shortened with m_collapse().)
43  */
44 #define	SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
45 
46 /* Maximum number of DMA segments needed to map an output packet.  It
47  * could overlap all mbufs in the chain and also require an extra
48  * segment for a TSO header.
49  */
50 #define	SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
51 
52 /*
53  * Buffer mapping flags.
54  *
55  * Buffers and DMA mappings must be freed when the last descriptor
56  * referring to them is completed.  Set the TX_BUF_UNMAP and
57  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
58  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
59  * a heap buffer.
60  */
61 enum sfxge_tx_buf_flags {
62 	TX_BUF_UNMAP = 1,
63 	TX_BUF_MBUF = 2,
64 };
65 
66 /*
67  * Buffer mapping information for descriptors in flight.
68  */
69 struct sfxge_tx_mapping {
70 	union {
71 		struct mbuf	*mbuf;
72 		caddr_t		heap_buf;
73 	}			u;
74 	bus_dmamap_t		map;
75 	enum sfxge_tx_buf_flags	flags;
76 };
77 
78 #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT		(64 * 1024)
79 #define	SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT	1024
80 #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT		64
81 
82 /*
83  * Deferred packet list.
84  */
85 struct sfxge_tx_dpl {
86 	unsigned int	std_get_max;		/* Maximum number  of packets
87 						 * in get list */
88 	unsigned int	std_get_non_tcp_max;	/* Maximum number
89 						 * of non-TCP packets
90 						 * in get list */
91 	unsigned int	std_put_max;		/* Maximum number of packets
92 						 * in put list */
93 	uintptr_t	std_put;		/* Head of put list. */
94 	struct mbuf	*std_get;		/* Head of get list. */
95 	struct mbuf	**std_getp;		/* Tail of get list. */
96 	unsigned int	std_get_count;		/* Packets in get list. */
97 	unsigned int	std_get_non_tcp_count;	/* Non-TCP packets
98 						 * in get list */
99 	unsigned int	std_get_hiwat;		/* Packets in get list
100 						 * high watermark */
101 };
102 
103 
104 #define	SFXGE_TX_BUFFER_SIZE	0x400
105 #define	SFXGE_TX_HEADER_SIZE	0x100
106 #define	SFXGE_TX_COPY_THRESHOLD	0x200
107 
108 enum sfxge_txq_state {
109 	SFXGE_TXQ_UNINITIALIZED = 0,
110 	SFXGE_TXQ_INITIALIZED,
111 	SFXGE_TXQ_STARTED
112 };
113 
114 enum sfxge_txq_type {
115 	SFXGE_TXQ_NON_CKSUM = 0,
116 	SFXGE_TXQ_IP_CKSUM,
117 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
118 	SFXGE_TXQ_NTYPES
119 };
120 
121 #define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
122 
123 #define	SFXGE_TX_BATCH	64
124 
125 #ifdef SFXGE_HAVE_MQ
126 #define	SFXGE_TX_LOCK(txq)		(&(txq)->lock)
127 #define	SFXGE_TX_SCALE(sc)		((sc)->intr.n_alloc)
128 #else
129 #define	SFXGE_TX_LOCK(txq)		(&(txq)->sc->tx_lock)
130 #define	SFXGE_TX_SCALE(sc)		1
131 #endif
132 
133 #define	SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index)			\
134 	do {								\
135 		struct sfxge_txq  *__txq = (_txq);			\
136 									\
137 		snprintf((__txq)->lock_name,				\
138 			 sizeof((__txq)->lock_name),			\
139 			 "%s:txq%u", (_ifname), (_txq_index));		\
140 		mtx_init(&(__txq)->lock, (__txq)->lock_name,		\
141 			 NULL, MTX_DEF);				\
142 	} while (B_FALSE)
143 #define	SFXGE_TXQ_LOCK_DESTROY(_txq)					\
144 	mtx_destroy(&(_txq)->lock)
145 #define	SFXGE_TXQ_LOCK(_txq)						\
146 	mtx_lock(SFXGE_TX_LOCK(_txq))
147 #define	SFXGE_TXQ_TRYLOCK(_txq)						\
148 	mtx_trylock(SFXGE_TX_LOCK(_txq))
149 #define	SFXGE_TXQ_UNLOCK(_txq)						\
150 	mtx_unlock(SFXGE_TX_LOCK(_txq))
151 #define	SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq)				\
152 	mtx_assert(SFXGE_TX_LOCK(_txq), MA_OWNED)
153 
154 
155 struct sfxge_txq {
156 	/* The following fields should be written very rarely */
157 	struct sfxge_softc		*sc;
158 	enum sfxge_txq_state		init_state;
159 	enum sfxge_flush_state		flush_state;
160 	enum sfxge_txq_type		type;
161 	unsigned int			txq_index;
162 	unsigned int			evq_index;
163 	efsys_mem_t			mem;
164 	unsigned int			buf_base_id;
165 	unsigned int			entries;
166 	unsigned int			ptr_mask;
167 
168 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
169 	bus_dma_tag_t			packet_dma_tag;
170 	efx_buffer_t			*pend_desc;
171 	efx_txq_t			*common;
172 
173 	efsys_mem_t			*tsoh_buffer;
174 
175 	char				lock_name[SFXGE_LOCK_NAME_MAX];
176 
177 	/* This field changes more often and is read regularly on both
178 	 * the initiation and completion paths
179 	 */
180 	int				blocked __aligned(CACHE_LINE_SIZE);
181 
182 	/* The following fields change more often, and are used mostly
183 	 * on the initiation path
184 	 */
185 #ifdef SFXGE_HAVE_MQ
186 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
187 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
188 	unsigned int			n_pend_desc;
189 #else
190 	unsigned int			n_pend_desc __aligned(CACHE_LINE_SIZE);
191 #endif
192 	unsigned int			added;
193 	unsigned int			reaped;
194 	/* Statistics */
195 	unsigned long			tso_bursts;
196 	unsigned long			tso_packets;
197 	unsigned long			tso_long_headers;
198 	unsigned long			collapses;
199 	unsigned long			drops;
200 	unsigned long			get_overflow;
201 	unsigned long			get_non_tcp_overflow;
202 	unsigned long			put_overflow;
203 	unsigned long			netdown_drops;
204 	unsigned long			tso_pdrop_too_many;
205 	unsigned long			tso_pdrop_no_rsrc;
206 
207 	/* The following fields change more often, and are used mostly
208 	 * on the completion path
209 	 */
210 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
211 	unsigned int			completed;
212 	struct sfxge_txq		*next;
213 };
214 
215 struct sfxge_evq;
216 
217 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
218 
219 extern int sfxge_tx_init(struct sfxge_softc *sc);
220 extern void sfxge_tx_fini(struct sfxge_softc *sc);
221 extern int sfxge_tx_start(struct sfxge_softc *sc);
222 extern void sfxge_tx_stop(struct sfxge_softc *sc);
223 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
224 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
225 #ifdef SFXGE_HAVE_MQ
226 extern void sfxge_if_qflush(struct ifnet *ifp);
227 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
228 #else
229 extern void sfxge_if_start(struct ifnet *ifp);
230 #endif
231 
232 #endif
233