xref: /titanic_41/usr/src/uts/common/sys/nxge/nxge_rxdma.h (revision 03494a9880d80f834bec10a1e8f0a2f8f7c97bf4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_NXGE_NXGE_RXDMA_H
27 #define	_SYS_NXGE_NXGE_RXDMA_H
28 
29 #ifdef	__cplusplus
30 extern "C" {
31 #endif
32 
33 #include <sys/nxge/nxge_rxdma_hw.h>
34 #include <npi_rxdma.h>
35 
36 #define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
37 /*
38  * Hardware RDC designer: 8 cache lines during Atlas bringup.
39  */
40 #define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
41 #define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
42 #define	RXDMA_RED_WINDOW_DEFAULT	0
43 #define	RXDMA_RED_THRES_DEFAULT		0
44 
45 #define	RXDMA_RCR_PTHRES_DEFAULT	0x20
46 #define	RXDMA_RCR_TO_DEFAULT		0x8
47 
48 /*
49  * hardware workarounds: kick 16 (was 8 before)
50  */
51 #define	NXGE_RXDMA_POST_BATCH		16
52 
53 #define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
54 #define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
55 #define	RXBUF_64B_ALIGNED		64
56 
57 #define	NXGE_RXBUF_EXTRA		34
58 /*
59  * Receive buffer thresholds and buffer types
60  */
61 #define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
62 typedef enum  {
63 	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
64 	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
65 	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
66 	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
67 	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
68 	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
69 	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
70 	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
71 	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
72 } nxge_rxbuf_threshold_t;
73 
74 typedef enum  {
75 	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
76 	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
77 	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
78 } nxge_rxbuf_type_t;
79 
80 typedef	struct _rdc_errlog {
81 	rdmc_par_err_log_t	pre_par;
82 	rdmc_par_err_log_t	sha_par;
83 	uint8_t			compl_err_type;
84 } rdc_errlog_t;
85 
86 /*
87  * Receive  Statistics.
88  */
89 typedef struct _nxge_rx_ring_stats_t {
90 	uint64_t	ipackets;
91 	uint64_t	ibytes;
92 	uint32_t	ierrors;
93 	uint32_t	multircv;
94 	uint32_t	brdcstrcv;
95 	uint32_t	norcvbuf;
96 
97 	uint32_t	rx_inits;
98 	uint32_t	rx_jumbo_pkts;
99 	uint32_t	rx_multi_pkts;
100 	uint32_t	rx_mtu_pkts;
101 	uint32_t	rx_no_buf;
102 
103 	/*
104 	 * Receive buffer management statistics.
105 	 */
106 	uint32_t	rx_new_pages;
107 	uint32_t	rx_new_mtu_pgs;
108 	uint32_t	rx_new_nxt_pgs;
109 	uint32_t	rx_reused_pgs;
110 	uint32_t	rx_mtu_drops;
111 	uint32_t	rx_nxt_drops;
112 
113 	/*
114 	 * Error event stats.
115 	 */
116 	uint32_t	rx_rbr_tmout;
117 	uint32_t	pkt_too_long_err;
118 	uint32_t	l2_err;
119 	uint32_t	l4_cksum_err;
120 	uint32_t	fflp_soft_err;
121 	uint32_t	zcp_soft_err;
122 	uint32_t	rcr_unknown_err;
123 	uint32_t	dcf_err;
124 	uint32_t 	rbr_tmout;
125 	uint32_t 	rsp_cnt_err;
126 	uint32_t 	byte_en_err;
127 	uint32_t 	byte_en_bus;
128 	uint32_t 	rsp_dat_err;
129 	uint32_t 	rcr_ack_err;
130 	uint32_t 	dc_fifo_err;
131 	uint32_t 	rcr_sha_par;
132 	uint32_t 	rbr_pre_par;
133 	uint32_t 	port_drop_pkt;
134 	uint32_t 	wred_drop;
135 	uint32_t 	rbr_pre_empty;
136 	uint32_t 	rcr_shadow_full;
137 	uint32_t 	config_err;
138 	uint32_t 	rcrincon;
139 	uint32_t 	rcrfull;
140 	uint32_t 	rbr_empty;
141 	uint32_t 	rbrfull;
142 	uint32_t 	rbrlogpage;
143 	uint32_t 	cfiglogpage;
144 	uint32_t 	rcrto;
145 	uint32_t 	rcrthres;
146 	uint32_t 	mex;
147 	rdc_errlog_t	errlog;
148 } nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
149 
150 typedef struct _nxge_rdc_sys_stats {
151 	uint32_t	pre_par;
152 	uint32_t	sha_par;
153 	uint32_t	id_mismatch;
154 	uint32_t	ipp_eop_err;
155 	uint32_t	zcp_eop_err;
156 } nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
157 
158 /*
159  * Software reserved buffer offset
160  */
161 typedef struct _nxge_rxbuf_off_hdr_t {
162 	uint32_t		index;
163 } nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
164 
165 
166 typedef struct _rx_msg_t {
167 	nxge_os_dma_common_t	buf_dma;
168 	nxge_os_mutex_t 	lock;
169 	struct _nxge_t		*nxgep;
170 	struct _rx_rbr_ring_t	*rx_rbr_p;
171 	boolean_t 		spare_in_use;
172 	boolean_t 		free;
173 	uint32_t 		ref_cnt;
174 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR
175 	uint32_t 		pass_up_cnt;
176 	boolean_t 		release;
177 #endif
178 	nxge_os_frtn_t 		freeb;
179 	size_t 			bytes_arrived;
180 	size_t 			bytes_expected;
181 	size_t 			block_size;
182 	uint32_t		block_index;
183 	uint32_t 		pkt_buf_size;
184 	uint32_t 		pkt_buf_size_code;
185 	uint32_t 		max_pkt_bufs;
186 	uint32_t		cur_usage_cnt;
187 	uint32_t		max_usage_cnt;
188 	uchar_t			*buffer;
189 	uint32_t 		pri;
190 	uint32_t 		shifted_addr;
191 	boolean_t		use_buf_pool;
192 	p_mblk_t 		rx_mblk_p;
193 	boolean_t		rx_use_bcopy;
194 } rx_msg_t, *p_rx_msg_t;
195 
196 typedef struct _rx_dma_handle_t {
197 	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
198 	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
199 	npi_handle_t		npi_handle;
200 } rx_dma_handle_t, *p_rx_dma_handle_t;
201 
202 
203 /* Receive Completion Ring */
204 typedef struct _rx_rcr_ring_t {
205 	nxge_os_dma_common_t	rcr_desc;
206 
207 	struct _nxge_t		*nxgep;
208 
209 	p_nxge_rx_ring_stats_t	rdc_stats;
210 
211 	int			poll_flag; /* 1 if polling mode */
212 
213 	rcrcfig_a_t		rcr_cfga;
214 	rcrcfig_b_t		rcr_cfgb;
215 
216 	nxge_os_mutex_t 	lock;
217 	uint16_t		index;
218 	uint16_t		rdc;
219 	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
220 	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
221 	uint32_t 		comp_size;	 /* # of RCR entries */
222 	uint64_t		rcr_addr;
223 	uint_t 			comp_wrap_mask;
224 	uint_t 			comp_rd_index;
225 	uint_t 			comp_wt_index;
226 
227 	p_rcr_entry_t		rcr_desc_first_p;
228 	p_rcr_entry_t		rcr_desc_first_pp;
229 	p_rcr_entry_t		rcr_desc_last_p;
230 	p_rcr_entry_t		rcr_desc_last_pp;
231 
232 	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
233 	p_rcr_entry_t		rcr_desc_rd_head_pp;
234 
235 	uint64_t		rcr_tail_pp;
236 	uint64_t		rcr_head_pp;
237 	struct _rx_rbr_ring_t	*rx_rbr_p;
238 	uint32_t		intr_timeout;
239 	uint32_t		intr_threshold;
240 	uint64_t		max_receive_pkts;
241 	mac_ring_handle_t	rcr_mac_handle;
242 	uint64_t		rcr_gen_num;
243 	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
244 	p_nxge_ldv_t		ldvp;
245 	p_nxge_ldg_t		ldgp;
246 } rx_rcr_ring_t, *p_rx_rcr_ring_t;
247 
248 
249 
250 /* Buffer index information */
251 typedef struct _rxbuf_index_info_t {
252 	uint32_t buf_index;
253 	uint32_t start_index;
254 	uint32_t buf_size;
255 	uint64_t dvma_addr;
256 	uint64_t kaddr;
257 } rxbuf_index_info_t, *p_rxbuf_index_info_t;
258 
259 /* Buffer index information */
260 
261 typedef struct _rxring_info_t {
262 	uint32_t hint[3];
263 	uint32_t block_size_mask;
264 	uint16_t max_iterations;
265 	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
266 } rxring_info_t, *p_rxring_info_t;
267 
268 
269 typedef enum {
270 	RBR_POSTING = 1,	/* We may post rx buffers. */
271 	RBR_UNMAPPING,		/* We are in the process of unmapping. */
272 	RBR_UNMAPPED		/* The ring is unmapped. */
273 } rbr_state_t;
274 
275 
276 /* Receive Buffer Block Ring */
277 typedef struct _rx_rbr_ring_t {
278 	nxge_os_dma_common_t	rbr_desc;
279 	p_rx_msg_t 		*rx_msg_ring;
280 	p_nxge_dma_common_t 	*dma_bufp;
281 	rbr_cfig_a_t		rbr_cfga;
282 	rbr_cfig_b_t		rbr_cfgb;
283 	rbr_kick_t		rbr_kick;
284 	log_page_vld_t		page_valid;
285 	log_page_mask_t		page_mask_1;
286 	log_page_mask_t		page_mask_2;
287 	log_page_value_t	page_value_1;
288 	log_page_value_t	page_value_2;
289 	log_page_relo_t		page_reloc_1;
290 	log_page_relo_t		page_reloc_2;
291 	log_page_hdl_t		page_hdl;
292 
293 	boolean_t		cfg_set;
294 
295 	nxge_os_mutex_t		lock;
296 	nxge_os_mutex_t		post_lock;
297 	uint16_t		index;
298 	struct _nxge_t		*nxgep;
299 	uint16_t		rdc;
300 	uint16_t		rdc_grp_id;
301 	uint_t 			rbr_max_size;
302 	uint64_t		rbr_addr;
303 	uint_t 			rbr_wrap_mask;
304 	uint_t 			rbb_max;
305 	uint_t 			rbb_added;
306 	uint_t			block_size;
307 	uint_t			num_blocks;
308 	uint_t			tnblocks;
309 	uint_t			pkt_buf_size0;
310 	uint_t			pkt_buf_size0_bytes;
311 	uint_t			npi_pkt_buf_size0;
312 	uint_t			pkt_buf_size1;
313 	uint_t			pkt_buf_size1_bytes;
314 	uint_t			npi_pkt_buf_size1;
315 	uint_t			pkt_buf_size2;
316 	uint_t			pkt_buf_size2_bytes;
317 	uint_t			npi_pkt_buf_size2;
318 
319 	uint32_t		*rbr_desc_vp;
320 
321 	p_rx_rcr_ring_t		rx_rcr_p;
322 
323 	uint_t 			rbr_wr_index;
324 	uint_t 			rbr_rd_index;
325 
326 	rxring_info_t  *ring_info;
327 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
328 	uint64_t		hv_rx_buf_base_ioaddr_pp;
329 	uint64_t		hv_rx_buf_ioaddr_size;
330 	uint64_t		hv_rx_cntl_base_ioaddr_pp;
331 	uint64_t		hv_rx_cntl_ioaddr_size;
332 	boolean_t		hv_set;
333 #endif
334 	uint_t 			rbr_consumed;
335 	uint_t 			rbr_threshold_hi;
336 	uint_t 			rbr_threshold_lo;
337 	nxge_rxbuf_type_t	rbr_bufsize_type;
338 	boolean_t		rbr_use_bcopy;
339 
340 	/*
341 	 * <rbr_ref_cnt> is a count of those receive buffers which
342 	 * have been loaned to the kernel.  We will not free this
343 	 * ring until the reference count reaches zero (0).
344 	 */
345 	uint32_t		rbr_ref_cnt;
346 	rbr_state_t		rbr_state; /* POSTING, etc */
347 	/*
348 	 * Receive buffer allocation types:
349 	 *   ddi_dma_mem_alloc(), contig_mem_alloc(), kmem_alloc()
350 	 */
351 	buf_alloc_type_t	rbr_alloc_type;
352 } rx_rbr_ring_t, *p_rx_rbr_ring_t;
353 
354 /* Receive Mailbox */
355 typedef struct _rx_mbox_t {
356 	nxge_os_dma_common_t	rx_mbox;
357 	rxdma_cfig1_t		rx_cfg1;
358 	rxdma_cfig2_t		rx_cfg2;
359 	uint64_t		mbox_addr;
360 	boolean_t		cfg_set;
361 
362 	nxge_os_mutex_t 	lock;
363 	uint16_t		index;
364 	struct _nxge_t		*nxgep;
365 	uint16_t		rdc;
366 } rx_mbox_t, *p_rx_mbox_t;
367 
368 
369 typedef struct _rx_rbr_rings_t {
370 	p_rx_rbr_ring_t 	*rbr_rings;
371 	uint32_t		ndmas;
372 	boolean_t		rxbuf_allocated;
373 } rx_rbr_rings_t, *p_rx_rbr_rings_t;
374 
375 typedef struct _rx_rcr_rings_t {
376 	p_rx_rcr_ring_t 	*rcr_rings;
377 	uint32_t		ndmas;
378 	boolean_t		cntl_buf_allocated;
379 } rx_rcr_rings_t, *p_rx_rcr_rings_t;
380 
381 typedef struct _rx_mbox_areas_t {
382 	p_rx_mbox_t 		*rxmbox_areas;
383 	uint32_t		ndmas;
384 	boolean_t		mbox_allocated;
385 } rx_mbox_areas_t, *p_rx_mbox_areas_t;
386 
387 /*
388  * Global register definitions per chip and they are initialized
389  * using the function zero control registers.
390  * .
391  */
392 
393 typedef struct _rxdma_globals {
394 	boolean_t		mode32;
395 	uint16_t		rxdma_ck_div_cnt;
396 	uint16_t		rxdma_red_ran_init;
397 	uint32_t		rxdma_eing_timeout;
398 } rxdma_globals_t, *p_rxdma_globals;
399 
400 
401 /*
402  * Receive DMA Prototypes.
403  */
404 nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
405 void nxge_uninit_rxdma_channels(p_nxge_t);
406 
407 nxge_status_t nxge_init_rxdma_channel(p_nxge_t, int);
408 void nxge_uninit_rxdma_channel(p_nxge_t, int);
409 
410 nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
411 nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
412 nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
413 	uint16_t, p_rx_dma_ctl_stat_t);
414 nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
415 	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
416 	p_rx_mbox_t);
417 nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
418 		uint16_t, p_rx_dma_ent_msk_t);
419 
420 nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
421 void nxge_hw_start_rx(p_nxge_t);
422 void nxge_fixup_rxdma_rings(p_nxge_t);
423 nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
424 
425 void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
426 void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int);
427 int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t);
428 
429 mblk_t *nxge_rx_poll(void *, int);
430 int nxge_enable_poll(void *);
431 int nxge_disable_poll(void *);
432 
433 void nxge_rxdma_regs_dump_channels(p_nxge_t);
434 nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
435 void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
436 
437 extern nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
438 extern nxge_status_t nxge_alloc_rxb(p_nxge_t nxgep, int channel);
439 extern void nxge_free_rxb(p_nxge_t nxgep, int channel);
440 
441 int nxge_get_rxring_index(p_nxge_t, int, int);
442 
443 #ifdef	__cplusplus
444 }
445 #endif
446 
447 #endif	/* _SYS_NXGE_NXGE_RXDMA_H */
448