xref: /freebsd/sys/dev/sfxge/sfxge_tx.h (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 /*-
2  * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _SFXGE_TX_H
33 #define _SFXGE_TX_H
34 
35 #include <netinet/in.h>
36 #include <netinet/ip.h>
37 #include <netinet/tcp.h>
38 
39 /* Maximum number of DMA segments needed to map an mbuf chain.  With
40  * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
41  * clusters.  (The chain could be longer than this initially, but can
42  * be shortened with m_collapse().)
43  */
44 #define	SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
45 
46 /* Maximum number of DMA segments needed to map an output packet.  It
47  * could overlap all mbufs in the chain and also require an extra
48  * segment for a TSO header.
49  */
50 #define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
51 
52 /*
53  * Buffer mapping flags.
54  *
55  * Buffers and DMA mappings must be freed when the last descriptor
56  * referring to them is completed.  Set the TX_BUF_UNMAP and
57  * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
58  * chain.  Set only the TX_BUF_UNMAP flag on a descriptor referring to
59  * a heap buffer.
60  */
61 enum sfxge_tx_buf_flags {
62 	TX_BUF_UNMAP = 1,
63 	TX_BUF_MBUF = 2,
64 };
65 
66 /*
67  * Buffer mapping information for descriptors in flight.
68  */
69 struct sfxge_tx_mapping {
70 	union {
71 		struct mbuf	*mbuf;
72 		caddr_t		heap_buf;
73 	}			u;
74 	bus_dmamap_t		map;
75 	enum sfxge_tx_buf_flags	flags;
76 };
77 
78 #define SFXGE_TX_MAX_DEFERRED 64
79 
80 /*
81  * Deferred packet list.
82  */
83 struct sfxge_tx_dpl {
84 	uintptr_t		std_put;    /* Head of put list. */
85 	struct mbuf		*std_get;   /* Head of get list. */
86 	struct mbuf		**std_getp; /* Tail of get list. */
87 	unsigned int		std_count;  /* Count of packets. */
88 };
89 
90 
91 #define	SFXGE_TX_BUFFER_SIZE	0x400
92 #define	SFXGE_TX_HEADER_SIZE	0x100
93 #define	SFXGE_TX_COPY_THRESHOLD	0x200
94 
95 enum sfxge_txq_state {
96 	SFXGE_TXQ_UNINITIALIZED = 0,
97 	SFXGE_TXQ_INITIALIZED,
98 	SFXGE_TXQ_STARTED
99 };
100 
101 enum sfxge_txq_type {
102 	SFXGE_TXQ_NON_CKSUM = 0,
103 	SFXGE_TXQ_IP_CKSUM,
104 	SFXGE_TXQ_IP_TCP_UDP_CKSUM,
105 	SFXGE_TXQ_NTYPES
106 };
107 
108 #define	SFXGE_TXQ_UNBLOCK_LEVEL		(EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
109 
110 #define	SFXGE_TX_BATCH	64
111 
112 #ifdef SFXGE_HAVE_MQ
113 #define SFXGE_TXQ_LOCK(txq)		(&(txq)->lock)
114 #define SFXGE_TX_SCALE(sc)		((sc)->intr.n_alloc)
115 #else
116 #define SFXGE_TXQ_LOCK(txq)		(&(txq)->sc->tx_lock)
117 #define SFXGE_TX_SCALE(sc)		1
118 #endif
119 
120 struct sfxge_txq {
121 	/* The following fields should be written very rarely */
122 	struct sfxge_softc		*sc;
123 	enum sfxge_txq_state		init_state;
124 	enum sfxge_flush_state		flush_state;
125 	enum sfxge_txq_type		type;
126 	unsigned int			txq_index;
127 	unsigned int			evq_index;
128 	efsys_mem_t			mem;
129 	unsigned int			buf_base_id;
130 
131 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
132 	bus_dma_tag_t			packet_dma_tag;
133 	efx_buffer_t			*pend_desc;
134 	efx_txq_t			*common;
135 	struct sfxge_txq		*next;
136 
137 	efsys_mem_t			*tsoh_buffer;
138 
139 	/* This field changes more often and is read regularly on both
140 	 * the initiation and completion paths
141 	 */
142 	int				blocked __aligned(CACHE_LINE_SIZE);
143 
144 	/* The following fields change more often, and are used mostly
145 	 * on the initiation path
146 	 */
147 #ifdef SFXGE_HAVE_MQ
148 	struct mtx			lock __aligned(CACHE_LINE_SIZE);
149 	struct sfxge_tx_dpl		dpl;	/* Deferred packet list. */
150 	unsigned int			n_pend_desc;
151 #else
152 	unsigned int			n_pend_desc __aligned(CACHE_LINE_SIZE);
153 #endif
154 	unsigned int			added;
155 	unsigned int			reaped;
156 	/* Statistics */
157 	unsigned long			tso_bursts;
158 	unsigned long			tso_packets;
159 	unsigned long			tso_long_headers;
160 	unsigned long			collapses;
161 	unsigned long			drops;
162 
163 	/* The following fields change more often, and are used mostly
164 	 * on the completion path
165 	 */
166 	unsigned int			pending __aligned(CACHE_LINE_SIZE);
167 	unsigned int			completed;
168 };
169 
170 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
171 
172 extern int sfxge_tx_init(struct sfxge_softc *sc);
173 extern void sfxge_tx_fini(struct sfxge_softc *sc);
174 extern int sfxge_tx_start(struct sfxge_softc *sc);
175 extern void sfxge_tx_stop(struct sfxge_softc *sc);
176 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq);
177 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
178 #ifdef SFXGE_HAVE_MQ
179 extern void sfxge_if_qflush(struct ifnet *ifp);
180 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
181 #else
182 extern void sfxge_if_start(struct ifnet *ifp);
183 #endif
184 
185 #endif
186