xref: /titanic_52/usr/src/uts/common/sys/nxge/nxge_txdma.h (revision b9bd317cda1afb3a01f4812de73e8cec888cbbd7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_NXGE_NXGE_TXDMA_H
27 #define	_SYS_NXGE_NXGE_TXDMA_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #ifdef	__cplusplus
32 extern "C" {
33 #endif
34 
35 #include <sys/nxge/nxge_txdma_hw.h>
36 #include <npi_txdma.h>
37 #include <sys/nxge/nxge_serialize.h>
38 
39 #define	TXDMA_PORT_BITMAP(nxgep)		(nxgep->pt_config.tx_dma_map)
40 
41 #define	TXDMA_RECLAIM_PENDING_DEFAULT		64
42 #define	TX_FULL_MARK				3
43 
44 /*
45  * Transmit load balancing definitions.
46  */
47 #define	NXGE_TX_LB_TCPUDP			0	/* default policy */
48 #define	NXGE_TX_LB_HASH				1	/* from the hint data */
49 #define	NXGE_TX_LB_DEST_MAC			2	/* Dest. MAC */
50 
51 /*
52  * Descriptor ring empty:
53  *		(1) head index is equal to tail index.
54  *		(2) wrapped around bits are the same.
55  * Descriptor ring full:
56  *		(1) head index is equal to tail index.
57  *		(2) wrapped around bits are different.
58  *
59  */
60 #define	TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap)	\
61 	((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
62 
63 #define	TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap)	\
64 	((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
65 
66 #define	TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
67 			((index + entries) & wrap_mask)
68 
69 #define	TXDMA_DRR_WEIGHT_DEFAULT	0x001f
70 
71 typedef enum {
72 	NXGE_USE_SERIAL	= 0,
73 	NXGE_USE_START,
74 } nxge_tx_mode_t;
75 
76 typedef struct _tx_msg_t {
77 	nxge_os_block_mv_t 	flags;		/* DMA, BCOPY, DVMA (?) */
78 	nxge_os_dma_common_t	buf_dma;	/* premapped buffer blocks */
79 	nxge_os_dma_handle_t	buf_dma_handle; /* premapped buffer handle */
80 	nxge_os_dma_handle_t 	dma_handle;	/* DMA handle for normal send */
81 	nxge_os_dma_handle_t 	dvma_handle;	/* Fast DVMA  handle */
82 
83 	p_mblk_t 		tx_message;
84 	uint32_t 		tx_msg_size;
85 	size_t			bytes_used;
86 	int			head;
87 	int			tail;
88 } tx_msg_t, *p_tx_msg_t;
89 
90 /*
91  * TX  Statistics.
92  */
93 typedef struct _nxge_tx_ring_stats_t {
94 	uint64_t	opackets;
95 	uint64_t	obytes;
96 	uint64_t	oerrors;
97 
98 	uint32_t	tx_inits;
99 	uint32_t	tx_no_buf;
100 
101 	uint32_t		mbox_err;
102 	uint32_t		pkt_size_err;
103 	uint32_t 		tx_ring_oflow;
104 	uint32_t 		pre_buf_par_err;
105 	uint32_t 		nack_pref;
106 	uint32_t 		nack_pkt_rd;
107 	uint32_t 		conf_part_err;
108 	uint32_t 		pkt_part_err;
109 	uint32_t		tx_starts;
110 	uint32_t		tx_nocanput;
111 	uint32_t		tx_msgdup_fail;
112 	uint32_t		tx_allocb_fail;
113 	uint32_t		tx_no_desc;
114 	uint32_t		tx_dma_bind_fail;
115 	uint32_t		tx_uflo;
116 
117 	uint32_t		tx_hdr_pkts;
118 	uint32_t		tx_ddi_pkts;
119 	uint32_t		tx_dvma_pkts;
120 
121 	uint32_t		tx_max_pend;
122 	uint32_t		tx_jumbo_pkts;
123 
124 	txdma_ring_errlog_t	errlog;
125 } nxge_tx_ring_stats_t, *p_nxge_tx_ring_stats_t;
126 
127 typedef struct _tx_ring_t {
128 	nxge_os_dma_common_t	tdc_desc;
129 	struct _nxge_t		*nxgep;
130 	p_tx_msg_t 		tx_msg_ring;
131 	uint32_t		tnblocks;
132 	tx_rng_cfig_t		tx_ring_cfig;
133 	tx_ring_hdl_t		tx_ring_hdl;
134 	tx_ring_kick_t		tx_ring_kick;
135 	tx_cs_t			tx_cs;
136 	tx_dma_ent_msk_t	tx_evmask;
137 	txdma_mbh_t		tx_mbox_mbh;
138 	txdma_mbl_t		tx_mbox_mbl;
139 	log_page_vld_t		page_valid;
140 	log_page_mask_t		page_mask_1;
141 	log_page_mask_t		page_mask_2;
142 	log_page_value_t	page_value_1;
143 	log_page_value_t	page_value_2;
144 	log_page_relo_t		page_reloc_1;
145 	log_page_relo_t		page_reloc_2;
146 	log_page_hdl_t		page_hdl;
147 	txc_dma_max_burst_t	max_burst;
148 	boolean_t		cfg_set;
149 #define	NXGE_TX_RING_ONLINE	0x00
150 #define	NXGE_TX_RING_OFFLINING	0x01
151 #define	NXGE_TX_RING_OFFLINED	0x02
152 	uint32_t		tx_ring_offline;
153 	boolean_t		tx_ring_busy;
154 
155 	nxge_os_mutex_t		lock;
156 	uint16_t 		index;
157 	uint16_t		tdc;
158 	struct nxge_tdc_cfg	*tdc_p;
159 	uint_t 			tx_ring_size;
160 	uint32_t 		num_chunks;
161 
162 	uint_t 			tx_wrap_mask;
163 	uint_t 			rd_index;
164 	uint_t 			wr_index;
165 	boolean_t		wr_index_wrap;
166 	uint_t 			head_index;
167 	boolean_t		head_wrap;
168 	tx_ring_hdl_t		ring_head;
169 	tx_ring_kick_t		ring_kick_tail;
170 	txdma_mailbox_t		tx_mbox;
171 
172 	uint_t 			descs_pending;
173 	boolean_t 		queueing;
174 
175 	nxge_os_mutex_t		sq_lock;
176 	nxge_serialize_t 	*serial;
177 	p_mblk_t 		head;
178 	p_mblk_t 		tail;
179 
180 	uint16_t		ldg_group_id;
181 	p_nxge_tx_ring_stats_t tdc_stats;
182 
183 	nxge_os_mutex_t 	dvma_lock;
184 	uint_t 			dvma_wr_index;
185 	uint_t 			dvma_rd_index;
186 	uint_t 			dvma_pending;
187 	uint_t 			dvma_available;
188 	uint_t 			dvma_wrap_mask;
189 
190 	nxge_os_dma_handle_t 	*dvma_ring;
191 
192 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
193 	uint64_t		hv_tx_buf_base_ioaddr_pp;
194 	uint64_t		hv_tx_buf_ioaddr_size;
195 	uint64_t		hv_tx_cntl_base_ioaddr_pp;
196 	uint64_t		hv_tx_cntl_ioaddr_size;
197 	boolean_t		hv_set;
198 #endif
199 } tx_ring_t, *p_tx_ring_t;
200 
201 
202 /* Transmit Mailbox */
203 typedef struct _tx_mbox_t {
204 	nxge_os_mutex_t 	lock;
205 	uint16_t		index;
206 	struct _nxge_t		*nxgep;
207 	uint16_t		tdc;
208 	nxge_os_dma_common_t	tx_mbox;
209 	txdma_mbl_t		tx_mbox_l;
210 	txdma_mbh_t		tx_mbox_h;
211 } tx_mbox_t, *p_tx_mbox_t;
212 
213 typedef struct _tx_rings_t {
214 	p_tx_ring_t 		*rings;
215 	boolean_t		txdesc_allocated;
216 	uint32_t		ndmas;
217 	nxge_os_dma_common_t	tdc_dma;
218 	nxge_os_dma_common_t	tdc_mbox;
219 } tx_rings_t, *p_tx_rings_t;
220 
221 
222 #if defined(_KERNEL) || (defined(COSIM) && !defined(IODIAG))
223 
224 typedef struct _tx_buf_rings_t {
225 	struct _tx_buf_ring_t 	*txbuf_rings;
226 	boolean_t		txbuf_allocated;
227 } tx_buf_rings_t, *p_tx_buf_rings_t;
228 
229 #endif
230 
231 typedef struct _tx_mbox_areas_t {
232 	p_tx_mbox_t 		*txmbox_areas_p;
233 	boolean_t		txmbox_allocated;
234 } tx_mbox_areas_t, *p_tx_mbox_areas_t;
235 
236 typedef struct _tx_param_t {
237 	nxge_logical_page_t tx_logical_pages[NXGE_MAX_LOGICAL_PAGES];
238 } tx_param_t, *p_tx_param_t;
239 
240 typedef struct _tx_params {
241 	struct _tx_param_t 	*tx_param_p;
242 } tx_params_t, *p_tx_params_t;
243 
244 /*
245  * Global register definitions per chip and they are initialized
246  * using the function zero control registers.
247  * .
248  */
249 typedef struct _txdma_globals {
250 	boolean_t		mode32;
251 } txdma_globals_t, *p_txdma_globals;
252 
253 
254 #if	defined(SOLARIS) && (defined(_KERNEL) || \
255 	(defined(COSIM) && !defined(IODIAG)))
256 
257 /*
258  * Transmit prototypes.
259  */
260 nxge_status_t nxge_init_txdma_channels(p_nxge_t);
261 void nxge_uninit_txdma_channels(p_nxge_t);
262 
263 nxge_status_t nxge_init_txdma_channel(p_nxge_t, int);
264 void nxge_uninit_txdma_channel(p_nxge_t, int);
265 
266 void nxge_setup_dma_common(p_nxge_dma_common_t, p_nxge_dma_common_t,
267 		uint32_t, uint32_t);
268 nxge_status_t nxge_reset_txdma_channel(p_nxge_t, uint16_t,
269 	uint64_t);
270 nxge_status_t nxge_init_txdma_channel_event_mask(p_nxge_t,
271 	uint16_t, p_tx_dma_ent_msk_t);
272 nxge_status_t nxge_init_txdma_channel_cntl_stat(p_nxge_t,
273 	uint16_t, uint64_t);
274 nxge_status_t nxge_enable_txdma_channel(p_nxge_t, uint16_t,
275 	p_tx_ring_t, p_tx_mbox_t);
276 
277 p_mblk_t nxge_tx_pkt_header_reserve(p_mblk_t, uint8_t *);
278 int nxge_tx_pkt_nmblocks(p_mblk_t, int *);
279 boolean_t nxge_txdma_reclaim(p_nxge_t, p_tx_ring_t, int);
280 
281 void nxge_fill_tx_hdr(p_mblk_t, boolean_t, boolean_t,
282 	int, uint8_t, p_tx_pkt_hdr_all_t, t_uscalar_t, t_uscalar_t);
283 
284 nxge_status_t nxge_txdma_hw_mode(p_nxge_t, boolean_t);
285 void nxge_hw_start_tx(p_nxge_t);
286 void nxge_txdma_stop(p_nxge_t);
287 void nxge_txdma_stop_start(p_nxge_t);
288 void nxge_fixup_txdma_rings(p_nxge_t);
289 void nxge_txdma_hw_kick(p_nxge_t);
290 void nxge_txdma_fix_channel(p_nxge_t, uint16_t);
291 void nxge_txdma_fixup_channel(p_nxge_t, p_tx_ring_t,
292 	uint16_t);
293 void nxge_txdma_hw_kick_channel(p_nxge_t, p_tx_ring_t,
294 	uint16_t);
295 
296 void nxge_txdma_regs_dump(p_nxge_t, int);
297 void nxge_txdma_regs_dump_channels(p_nxge_t);
298 
299 void nxge_check_tx_hang(p_nxge_t);
300 void nxge_fixup_hung_txdma_rings(p_nxge_t);
301 
302 void nxge_reclaim_rings(p_nxge_t);
303 int nxge_txdma_channel_hung(p_nxge_t,
304 	p_tx_ring_t tx_ring_p, uint16_t);
305 int nxge_txdma_hung(p_nxge_t);
306 int nxge_txdma_stop_inj_err(p_nxge_t, int);
307 void nxge_txdma_inject_err(p_nxge_t, uint32_t, uint8_t);
308 
309 extern nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
310 extern nxge_status_t nxge_alloc_txb(p_nxge_t nxgep, int channel);
311 extern void nxge_free_txb(p_nxge_t nxgep, int channel);
312 
313 #endif
314 
315 #ifdef	__cplusplus
316 }
317 #endif
318 
319 #endif	/* _SYS_NXGE_NXGE_TXDMA_H */
320