xref: /titanic_50/usr/src/uts/common/io/hxge/hxge_txdma.h (revision b5d3ab78446c645a1150b57b7a58b535229ee742)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_HXGE_HXGE_TXDMA_H
27 #define	_SYS_HXGE_HXGE_TXDMA_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #ifdef	__cplusplus
32 extern "C" {
33 #endif
34 
35 #include <hxge_txdma_hw.h>
36 #include <hpi_txdma.h>
37 
38 #define	TXDMA_RECLAIM_PENDING_DEFAULT		64
39 #define	TX_FULL_MARK				3
40 
41 /*
42  * Transmit load balancing definitions.
43  */
44 #define	HXGE_TX_LB_TCPUDP	0	/* default policy */
45 #define	HXGE_TX_LB_HASH		1	/* from the hint data */
46 #define	HXGE_TX_LB_DEST_MAC	2	/* Dest. MAC */
47 
48 /*
49  * Descriptor ring empty:
50  *		(1) head index is equal to tail index.
51  *		(2) wrapped around bits are the same.
52  * Descriptor ring full:
53  *		(1) head index is equal to tail index.
54  *		(2) wrapped around bits are different.
55  *
56  */
57 #define	TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap)	\
58 	((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
59 
60 #define	TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap)	\
61 	((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
62 
63 #define	TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
64 			((index + entries) & wrap_mask)
65 
66 typedef struct _tx_msg_t {
67 	hxge_os_block_mv_t	flags;		/* DMA, BCOPY, DVMA (?) */
68 	hxge_os_dma_common_t	buf_dma;	/* premapped buffer blocks */
69 	hxge_os_dma_handle_t	buf_dma_handle;	/* premapped buffer handle */
70 	hxge_os_dma_handle_t	dma_handle;	/* DMA handle for normal send */
71 	hxge_os_dma_handle_t	dvma_handle;	/* Fast DVMA  handle */
72 
73 	p_mblk_t		tx_message;
74 	uint32_t		tx_msg_size;
75 	size_t			bytes_used;
76 	int			head;
77 	int			tail;
78 } tx_msg_t, *p_tx_msg_t;
79 
80 /*
81  * TX  Statistics.
82  */
83 typedef struct _hxge_tx_ring_stats_t {
84 	uint64_t		opackets;
85 	uint64_t		obytes;
86 	uint64_t		obytes_with_pad;
87 	uint64_t		oerrors;
88 
89 	uint32_t		tx_inits;
90 	uint32_t		tx_no_buf;
91 
92 	uint32_t		peu_resp_err;
93 	uint32_t		pkt_size_hdr_err;
94 	uint32_t		runt_pkt_drop_err;
95 	uint32_t		pkt_size_err;
96 	uint32_t		tx_rng_oflow;
97 	uint32_t		pref_par_err;
98 	uint32_t		tdr_pref_cpl_to;
99 	uint32_t		pkt_cpl_to;
100 	uint32_t		invalid_sop;
101 	uint32_t		unexpected_sop;
102 
103 	uint64_t		count_hdr_size_err;
104 	uint64_t		count_runt;
105 	uint64_t		count_abort;
106 
107 	uint32_t		tx_starts;
108 	uint32_t		tx_no_desc;
109 	uint32_t		tx_dma_bind_fail;
110 	uint32_t		tx_hdr_pkts;
111 	uint32_t		tx_ddi_pkts;
112 	uint32_t		tx_jumbo_pkts;
113 	uint32_t		tx_max_pend;
114 	uint32_t		tx_marks;
115 	tdc_pref_par_log_t	errlog;
116 } hxge_tx_ring_stats_t, *p_hxge_tx_ring_stats_t;
117 
118 typedef struct _hxge_tdc_sys_stats {
119 	uint32_t	reord_tbl_par_err;
120 	uint32_t	reord_buf_ded_err;
121 	uint32_t	reord_buf_sec_err;
122 } hxge_tdc_sys_stats_t, *p_hxge_tdc_sys_stats_t;
123 
124 typedef struct _tx_ring_t {
125 	hxge_os_dma_common_t	tdc_desc;
126 	struct _hxge_t		*hxgep;
127 	p_tx_msg_t		tx_msg_ring;
128 	uint32_t		tnblocks;
129 	tdc_tdr_cfg_t		tx_ring_cfig;
130 	tdc_tdr_kick_t		tx_ring_kick;
131 	tdc_tdr_cfg_t		tx_cs;
132 	tdc_int_mask_t		tx_evmask;
133 	tdc_mbh_t		tx_mbox_mbh;
134 	tdc_mbl_t		tx_mbox_mbl;
135 
136 	tdc_page_handle_t	page_hdl;
137 
138 	hxge_os_mutex_t		lock;
139 	uint16_t		index;
140 	uint16_t		tdc;
141 	struct hxge_tdc_cfg	*tdc_p;
142 	uint_t			tx_ring_size;
143 	uint32_t		num_chunks;
144 
145 	uint_t			tx_wrap_mask;
146 	uint_t			rd_index;
147 	uint_t			wr_index;
148 	boolean_t		wr_index_wrap;
149 	uint_t			head_index;
150 	boolean_t		head_wrap;
151 	tdc_tdr_head_t		ring_head;
152 	tdc_tdr_kick_t		ring_kick_tail;
153 	txdma_mailbox_t		tx_mbox;
154 
155 	uint_t			descs_pending;
156 	boolean_t		queueing;
157 
158 	p_mblk_t		head;
159 	p_mblk_t		tail;
160 
161 	p_hxge_tx_ring_stats_t	tdc_stats;
162 
163 	uint_t			dvma_wr_index;
164 	uint_t			dvma_rd_index;
165 	uint_t			dvma_pending;
166 	uint_t			dvma_available;
167 	uint_t			dvma_wrap_mask;
168 
169 	hxge_os_dma_handle_t	*dvma_ring;
170 
171 	mac_resource_handle_t	tx_mac_resource_handle;
172 } tx_ring_t, *p_tx_ring_t;
173 
174 
175 /* Transmit Mailbox */
176 typedef struct _tx_mbox_t {
177 	hxge_os_mutex_t		lock;
178 	uint16_t		index;
179 	struct _hxge_t		*hxgep;
180 	uint16_t		tdc;
181 	hxge_os_dma_common_t	tx_mbox;
182 	tdc_mbl_t		tx_mbox_l;
183 	tdc_mbh_t		tx_mbox_h;
184 } tx_mbox_t, *p_tx_mbox_t;
185 
186 typedef struct _tx_rings_t {
187 	p_tx_ring_t		*rings;
188 	boolean_t		txdesc_allocated;
189 	uint32_t		ndmas;
190 	uint32_t		dma_to_reenable;
191 	hxge_os_dma_common_t	tdc_dma;
192 	hxge_os_dma_common_t	tdc_mbox;
193 } tx_rings_t, *p_tx_rings_t;
194 
195 typedef struct _tx_mbox_areas_t {
196 	p_tx_mbox_t		*txmbox_areas_p;
197 	boolean_t		txmbox_allocated;
198 } tx_mbox_areas_t, *p_tx_mbox_areas_t;
199 
200 /*
201  * Transmit prototypes.
202  */
203 hxge_status_t hxge_init_txdma_channels(p_hxge_t hxgep);
204 void hxge_uninit_txdma_channels(p_hxge_t hxgep);
205 void hxge_setup_dma_common(p_hxge_dma_common_t, p_hxge_dma_common_t,
206 	uint32_t, uint32_t);
207 hxge_status_t hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel,
208 	uint64_t reg_data);
209 hxge_status_t hxge_init_txdma_channel_event_mask(p_hxge_t hxgep,
210 	uint16_t channel, tdc_int_mask_t *mask_p);
211 hxge_status_t hxge_enable_txdma_channel(p_hxge_t hxgep, uint16_t channel,
212 	p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p);
213 
214 p_mblk_t hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads);
215 	int hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p);
216 boolean_t hxge_txdma_reclaim(p_hxge_t hxgep,
217 	p_tx_ring_t tx_ring_p, int nmblks);
218 
219 void hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
220 	int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp);
221 
222 hxge_status_t hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable);
223 void hxge_txdma_stop(p_hxge_t hxgep);
224 void hxge_fixup_txdma_rings(p_hxge_t hxgep);
225 void hxge_txdma_hw_kick(p_hxge_t hxgep);
226 void hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel);
227 void hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
228 	uint16_t channel);
229 void hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
230 	uint16_t channel);
231 
232 void hxge_check_tx_hang(p_hxge_t hxgep);
233 void hxge_fixup_hung_txdma_rings(p_hxge_t hxgep);
234 void hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel);
235 void hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
236 	uint16_t channel);
237 
238 void hxge_reclaim_rings(p_hxge_t hxgep);
239 int hxge_txdma_channel_hung(p_hxge_t hxgep,
240 	p_tx_ring_t tx_ring_p, uint16_t channel);
241 int hxge_txdma_hung(p_hxge_t hxgep);
242 int hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel);
243 hxge_status_t hxge_txdma_handle_sys_errors(p_hxge_t hxgep);
244 
245 #ifdef	__cplusplus
246 }
247 #endif
248 
249 #endif /* _SYS_HXGE_HXGE_TXDMA_H */
250