xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision 40a8ac8f62b535d30349faf28cf47106b7041b83)
1 /*-
2  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _SFXGE_TX_H
33 #define _SFXGE_TX_H
34 
35 #include <netinet/in.h>
36 #include <netinet/ip.h>
37 #include <netinet/tcp.h>
38 
39 /* Maximum number of DMA segments needed to map an mbuf chain.  With
40  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
41  * clusters.  (The chain could be longer than this initially, but can
42  * be shortened with m_collapse().)
43  */
44 #define	SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
45 
46 /* Maximum number of DMA segments needed to map an output packet.  It
47  * could overlap all mbufs in the chain and also require an extra
48  * segment for a TSO header.
49  */
50 #define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
51 
52 /*
53  * Buffer mapping flags.
54  *
55  * Buffers and DMA mappings must be freed when the last descriptor
56  * referring to them is completed.  Set the TX_BUF_UNMAP and
57  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
58  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
59  * a heap buffer.
60  */
61 enum sfxge_tx_buf_flags {
62 	TX_BUF_UNMAP = 1,
63 	TX_BUF_MBUF = 2,
64 };
65 
66 /*
67  * Buffer mapping information for descriptors in flight.
68  */
69 struct sfxge_tx_mapping {
70 	union {
71 		struct mbuf	*mbuf;
72 		caddr_t		heap_buf;
73 	}			u;
74 	bus_dmamap_t		map;
75 	enum sfxge_tx_buf_flags	flags;
76 };
77 
78 #define	SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT	64
79 #define	SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT	64
80 
81 /*
82  * Deferred packet list.
83  */
84 struct sfxge_tx_dpl {
85 	uintptr_t		std_put;    /* Head of put list. */
86 	struct mbuf		*std_get;   /* Head of get list. */
87 	struct mbuf		**std_getp; /* Tail of get list. */
88 	unsigned int		std_count;  /* Count of packets. */
89 };
90 
91 
92 #define	SFXGE_TX_BUFFER_SIZE	0x400
93 #define	SFXGE_TX_HEADER_SIZE	0x100
94 #define	SFXGE_TX_COPY_THRESHOLD	0x200
95 
96 enum sfxge_txq_state {
97 	SFXGE_TXQ_UNINITIALIZED = 0,
98 	SFXGE_TXQ_INITIALIZED,
99 	SFXGE_TXQ_STARTED
100 };
101 
102 enum sfxge_txq_type {
103 	SFXGE_TXQ_NON_CKSUM = 0,
104 	SFXGE_TXQ_IP_CKSUM,
105 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
106 	SFXGE_TXQ_NTYPES
107 };
108 
109 #define	SFXGE_TXQ_UNBLOCK_LEVEL		(EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
110 
111 #define	SFXGE_TX_BATCH	64
112 
113 #ifdef SFXGE_HAVE_MQ
114 #define SFXGE_TXQ_LOCK(txq)		(&(txq)->lock)
115 #define SFXGE_TX_SCALE(sc)		((sc)->intr.n_alloc)
116 #else
117 #define SFXGE_TXQ_LOCK(txq)		(&(txq)->sc->tx_lock)
118 #define SFXGE_TX_SCALE(sc)		1
119 #endif
120 
121 struct sfxge_txq {
122 	/* The following fields should be written very rarely */
123 	struct sfxge_softc		*sc;
124 	enum sfxge_txq_state		init_state;
125 	enum sfxge_flush_state		flush_state;
126 	enum sfxge_txq_type		type;
127 	unsigned int			txq_index;
128 	unsigned int			evq_index;
129 	efsys_mem_t			mem;
130 	unsigned int			buf_base_id;
131 
132 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
133 	bus_dma_tag_t			packet_dma_tag;
134 	efx_buffer_t			*pend_desc;
135 	efx_txq_t			*common;
136 	struct sfxge_txq		*next;
137 
138 	efsys_mem_t			*tsoh_buffer;
139 
140 	/* This field changes more often and is read regularly on both
141 	 * the initiation and completion paths
142 	 */
143 	int				blocked __aligned(CACHE_LINE_SIZE);
144 
145 	/* The following fields change more often, and are used mostly
146 	 * on the initiation path
147 	 */
148 #ifdef SFXGE_HAVE_MQ
149 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
150 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
151 	unsigned int			n_pend_desc;
152 #else
153 	unsigned int			n_pend_desc __aligned(CACHE_LINE_SIZE);
154 #endif
155 	unsigned int			added;
156 	unsigned int			reaped;
157 	/* Statistics */
158 	unsigned long			tso_bursts;
159 	unsigned long			tso_packets;
160 	unsigned long			tso_long_headers;
161 	unsigned long			collapses;
162 	unsigned long			drops;
163 	unsigned long			early_drops;
164 
165 	/* The following fields change more often, and are used mostly
166 	 * on the completion path
167 	 */
168 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
169 	unsigned int			completed;
170 };
171 
172 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
173 
174 extern int sfxge_tx_init(struct sfxge_softc *sc);
175 extern void sfxge_tx_fini(struct sfxge_softc *sc);
176 extern int sfxge_tx_start(struct sfxge_softc *sc);
177 extern void sfxge_tx_stop(struct sfxge_softc *sc);
178 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq);
179 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
180 #ifdef SFXGE_HAVE_MQ
181 extern void sfxge_if_qflush(struct ifnet *ifp);
182 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
183 #else
184 extern void sfxge_if_start(struct ifnet *ifp);
185 #endif
186 
187 #endif
188