xref: /titanic_44/usr/src/uts/common/io/hxge/hxge_rxdma.c (revision ca35ed50b0189df73b8ea3eb509c9d582312df7c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hxge_impl.h>
27 #include <hxge_rxdma.h>
28 
29 /*
30  * Number of blocks to accumulate before re-enabling DMA
31  * when we get RBR empty.
32  */
33 #define	HXGE_RBR_EMPTY_THRESHOLD	64
34 
35 /*
36  * Globals: tunable parameters (/etc/system or adb)
37  *
38  */
39 extern uint32_t hxge_rbr_size;
40 extern uint32_t hxge_rcr_size;
41 extern uint32_t hxge_rbr_spare_size;
42 extern uint32_t hxge_mblks_pending;
43 
44 /*
45  * Tunable to reduce the amount of time spent in the
46  * ISR doing Rx Processing.
47  */
48 extern uint32_t hxge_max_rx_pkts;
49 
50 /*
51  * Tunables to manage the receive buffer blocks.
52  *
53  * hxge_rx_threshold_hi: copy all buffers.
54  * hxge_rx_bcopy_size_type: receive buffer block size type.
55  * hxge_rx_threshold_lo: copy only up to tunable block size type.
56  */
57 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
58 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
59 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
60 
61 /*
62  * Static local functions.
63  */
64 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
65 static void hxge_unmap_rxdma(p_hxge_t hxgep);
66 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
67 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
68 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
69 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
70     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
71     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
72     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
73     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
74 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
75 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
76 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
77     uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p,
78     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
79     p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
80 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
81 	p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
82 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
83 	uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
84 	p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
85 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
86 	p_rx_rbr_ring_t rbr_p);
87 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
88 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
89 	int n_init_kick);
90 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
91 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
92 	p_rx_rcr_ring_t	*rcr_p, rdc_stat_t cs);
93 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
94 	p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
95 	mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry);
96 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
97 	uint16_t channel);
98 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
99 static void hxge_freeb(p_rx_msg_t);
100 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex,
101     p_hxge_ldv_t ldvp, rdc_stat_t cs);
102 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
103 	p_hxge_ldv_t ldvp, rdc_stat_t cs);
104 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
105 	p_rx_rbr_ring_t rx_dmap);
106 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
107 	uint16_t channel);
108 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
109 static void hxge_rbr_empty_restore(p_hxge_t hxgep,
110 	p_rx_rbr_ring_t rx_rbr_p);
111 
112 hxge_status_t
113 hxge_init_rxdma_channels(p_hxge_t hxgep)
114 {
115 	hxge_status_t		status = HXGE_OK;
116 	block_reset_t		reset_reg;
117 
118 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
119 
120 	/* Reset RDC block from PEU to clear any previous state */
121 	reset_reg.value = 0;
122 	reset_reg.bits.rdc_rst = 1;
123 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
124 	HXGE_DELAY(1000);
125 
126 	status = hxge_map_rxdma(hxgep);
127 	if (status != HXGE_OK) {
128 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
129 		    "<== hxge_init_rxdma: status 0x%x", status));
130 		return (status);
131 	}
132 
133 	status = hxge_rxdma_hw_start_common(hxgep);
134 	if (status != HXGE_OK) {
135 		hxge_unmap_rxdma(hxgep);
136 	}
137 
138 	status = hxge_rxdma_hw_start(hxgep);
139 	if (status != HXGE_OK) {
140 		hxge_unmap_rxdma(hxgep);
141 	}
142 
143 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
144 	    "<== hxge_init_rxdma_channels: status 0x%x", status));
145 	return (status);
146 }
147 
148 void
149 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
150 {
151 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
152 
153 	hxge_rxdma_hw_stop(hxgep);
154 	hxge_unmap_rxdma(hxgep);
155 
156 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
157 }
158 
159 hxge_status_t
160 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
161     rdc_stat_t *cs_p)
162 {
163 	hpi_handle_t	handle;
164 	hpi_status_t	rs = HPI_SUCCESS;
165 	hxge_status_t	status = HXGE_OK;
166 
167 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
168 	    "<== hxge_init_rxdma_channel_cntl_stat"));
169 
170 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
171 	rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
172 
173 	if (rs != HPI_SUCCESS) {
174 		status = HXGE_ERROR | rs;
175 	}
176 	return (status);
177 }
178 
179 
180 hxge_status_t
181 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
182     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
183     int n_init_kick)
184 {
185 	hpi_handle_t		handle;
186 	rdc_desc_cfg_t 		rdc_desc;
187 	rdc_rcr_cfg_b_t		*cfgb_p;
188 	hpi_status_t		rs = HPI_SUCCESS;
189 
190 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
191 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
192 
193 	/*
194 	 * Use configuration data composed at init time. Write to hardware the
195 	 * receive ring configurations.
196 	 */
197 	rdc_desc.mbox_enable = 1;
198 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
199 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
200 	    "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
201 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
202 
203 	rdc_desc.rbr_len = rbr_p->rbb_max;
204 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
205 
206 	switch (hxgep->rx_bksize_code) {
207 	case RBR_BKSIZE_4K:
208 		rdc_desc.page_size = SIZE_4KB;
209 		break;
210 	case RBR_BKSIZE_8K:
211 		rdc_desc.page_size = SIZE_8KB;
212 		break;
213 	}
214 
215 	rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
216 	rdc_desc.valid0 = 1;
217 
218 	rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
219 	rdc_desc.valid1 = 1;
220 
221 	rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
222 	rdc_desc.valid2 = 1;
223 
224 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
225 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
226 
227 	rdc_desc.rcr_len = rcr_p->comp_size;
228 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
229 
230 	cfgb_p = &(rcr_p->rcr_cfgb);
231 	rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
232 	rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
233 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
234 
235 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
236 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
237 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
238 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
239 	    "size 0 %d size 1 %d size 2 %d",
240 	    rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
241 	    rbr_p->hpi_pkt_buf_size2));
242 
243 	rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
244 	if (rs != HPI_SUCCESS) {
245 		return (HXGE_ERROR | rs);
246 	}
247 
248 	/*
249 	 * Enable the timeout and threshold.
250 	 */
251 	rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
252 	    rdc_desc.rcr_threshold);
253 	if (rs != HPI_SUCCESS) {
254 		return (HXGE_ERROR | rs);
255 	}
256 
257 	rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
258 	    rdc_desc.rcr_timeout);
259 	if (rs != HPI_SUCCESS) {
260 		return (HXGE_ERROR | rs);
261 	}
262 
263 	/* Enable the DMA */
264 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
265 	if (rs != HPI_SUCCESS) {
266 		return (HXGE_ERROR | rs);
267 	}
268 
269 	/* Kick the DMA engine */
270 	hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick);
271 
272 	/* Clear the rbr empty bit */
273 	(void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
274 
275 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
276 
277 	return (HXGE_OK);
278 }
279 
280 static hxge_status_t
281 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
282 {
283 	hpi_handle_t handle;
284 	hpi_status_t rs = HPI_SUCCESS;
285 
286 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
287 
288 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
289 
290 	/* disable the DMA */
291 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
292 	if (rs != HPI_SUCCESS) {
293 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
294 		    "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
295 		return (HXGE_ERROR | rs);
296 	}
297 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
298 	return (HXGE_OK);
299 }
300 
301 hxge_status_t
302 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
303 {
304 	hpi_handle_t	handle;
305 	hxge_status_t	status = HXGE_OK;
306 
307 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
308 	    "==> hxge_rxdma_channel_rcrflush"));
309 
310 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
311 	hpi_rxdma_rdc_rcr_flush(handle, channel);
312 
313 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
314 	    "<== hxge_rxdma_channel_rcrflush"));
315 	return (status);
316 
317 }
318 
319 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
320 
321 #define	TO_LEFT -1
322 #define	TO_RIGHT 1
323 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
324 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
325 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
326 #define	NO_HINT 0xffffffff
327 
328 /*ARGSUSED*/
329 hxge_status_t
330 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
331     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
332     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
333 {
334 	int			bufsize;
335 	uint64_t		pktbuf_pp;
336 	uint64_t		dvma_addr;
337 	rxring_info_t		*ring_info;
338 	int			base_side, end_side;
339 	int			r_index, l_index, anchor_index;
340 	int			found, search_done;
341 	uint32_t		offset, chunk_size, block_size, page_size_mask;
342 	uint32_t		chunk_index, block_index, total_index;
343 	int			max_iterations, iteration;
344 	rxbuf_index_info_t	*bufinfo;
345 
346 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
347 
348 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
349 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
350 	    pkt_buf_addr_pp, pktbufsz_type));
351 
352 #if defined(__i386)
353 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
354 #else
355 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
356 #endif
357 
358 	switch (pktbufsz_type) {
359 	case 0:
360 		bufsize = rbr_p->pkt_buf_size0;
361 		break;
362 	case 1:
363 		bufsize = rbr_p->pkt_buf_size1;
364 		break;
365 	case 2:
366 		bufsize = rbr_p->pkt_buf_size2;
367 		break;
368 	case RCR_SINGLE_BLOCK:
369 		bufsize = 0;
370 		anchor_index = 0;
371 		break;
372 	default:
373 		return (HXGE_ERROR);
374 	}
375 
376 	if (rbr_p->num_blocks == 1) {
377 		anchor_index = 0;
378 		ring_info = rbr_p->ring_info;
379 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
380 
381 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
382 		    "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
383 		    "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
384 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
385 
386 		goto found_index;
387 	}
388 
389 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
390 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
391 	    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
392 
393 	ring_info = rbr_p->ring_info;
394 	found = B_FALSE;
395 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
396 	iteration = 0;
397 	max_iterations = ring_info->max_iterations;
398 
399 	/*
400 	 * First check if this block have been seen recently. This is indicated
401 	 * by a hint which is initialized when the first buffer of the block is
402 	 * seen. The hint is reset when the last buffer of the block has been
403 	 * processed. As three block sizes are supported, three hints are kept.
404 	 * The idea behind the hints is that once the hardware  uses a block
405 	 * for a buffer  of that size, it will use it exclusively for that size
406 	 * and will use it until it is exhausted. It is assumed that there
407 	 * would a single block being used for the same buffer sizes at any
408 	 * given time.
409 	 */
410 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
411 		anchor_index = ring_info->hint[pktbufsz_type];
412 		dvma_addr = bufinfo[anchor_index].dvma_addr;
413 		chunk_size = bufinfo[anchor_index].buf_size;
414 		if ((pktbuf_pp >= dvma_addr) &&
415 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
416 			found = B_TRUE;
417 			/*
418 			 * check if this is the last buffer in the block If so,
419 			 * then reset the hint for the size;
420 			 */
421 
422 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
423 				ring_info->hint[pktbufsz_type] = NO_HINT;
424 		}
425 	}
426 
427 	if (found == B_FALSE) {
428 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
429 		    "==> hxge_rxbuf_pp_to_vp: (!found)"
430 		    "buf_pp $%p btype %d anchor_index %d",
431 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
432 
433 		/*
434 		 * This is the first buffer of the block of this size. Need to
435 		 * search the whole information array. the search algorithm
436 		 * uses a binary tree search algorithm. It assumes that the
437 		 * information is already sorted with increasing order info[0]
438 		 * < info[1] < info[2]  .... < info[n-1] where n is the size of
439 		 * the information array
440 		 */
441 		r_index = rbr_p->num_blocks - 1;
442 		l_index = 0;
443 		search_done = B_FALSE;
444 		anchor_index = MID_INDEX(r_index, l_index);
445 		while (search_done == B_FALSE) {
446 			if ((r_index == l_index) ||
447 			    (iteration >= max_iterations))
448 				search_done = B_TRUE;
449 
450 			end_side = TO_RIGHT;	/* to the right */
451 			base_side = TO_LEFT;	/* to the left */
452 			/* read the DVMA address information and sort it */
453 			dvma_addr = bufinfo[anchor_index].dvma_addr;
454 			chunk_size = bufinfo[anchor_index].buf_size;
455 
456 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
457 			    "==> hxge_rxbuf_pp_to_vp: (searching)"
458 			    "buf_pp $%p btype %d "
459 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
460 			    pkt_buf_addr_pp, pktbufsz_type, anchor_index,
461 			    chunk_size, dvma_addr));
462 
463 			if (pktbuf_pp >= dvma_addr)
464 				base_side = TO_RIGHT;	/* to the right */
465 			if (pktbuf_pp < (dvma_addr + chunk_size))
466 				end_side = TO_LEFT;	/* to the left */
467 
468 			switch (base_side + end_side) {
469 			case IN_MIDDLE:
470 				/* found */
471 				found = B_TRUE;
472 				search_done = B_TRUE;
473 				if ((pktbuf_pp + bufsize) <
474 				    (dvma_addr + chunk_size))
475 					ring_info->hint[pktbufsz_type] =
476 					    bufinfo[anchor_index].buf_index;
477 				break;
478 			case BOTH_RIGHT:
479 				/* not found: go to the right */
480 				l_index = anchor_index + 1;
481 				anchor_index = MID_INDEX(r_index, l_index);
482 				break;
483 
484 			case BOTH_LEFT:
485 				/* not found: go to the left */
486 				r_index = anchor_index - 1;
487 				anchor_index = MID_INDEX(r_index, l_index);
488 				break;
489 			default:	/* should not come here */
490 				return (HXGE_ERROR);
491 			}
492 			iteration++;
493 		}
494 
495 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
496 		    "==> hxge_rxbuf_pp_to_vp: (search done)"
497 		    "buf_pp $%p btype %d anchor_index %d",
498 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
499 	}
500 
501 	if (found == B_FALSE) {
502 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
503 		    "==> hxge_rxbuf_pp_to_vp: (search failed)"
504 		    "buf_pp $%p btype %d anchor_index %d",
505 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
506 		return (HXGE_ERROR);
507 	}
508 
509 found_index:
510 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
511 	    "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
512 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
513 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
514 
515 	/* index of the first block in this chunk */
516 	chunk_index = bufinfo[anchor_index].start_index;
517 	dvma_addr = bufinfo[anchor_index].dvma_addr;
518 	page_size_mask = ring_info->block_size_mask;
519 
520 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
521 	    "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
522 	    "buf_pp $%p btype %d bufsize %d "
523 	    "anchor_index %d chunk_index %d dvma $%p",
524 	    pkt_buf_addr_pp, pktbufsz_type, bufsize,
525 	    anchor_index, chunk_index, dvma_addr));
526 
527 	offset = pktbuf_pp - dvma_addr;	/* offset within the chunk */
528 	block_size = rbr_p->block_size;	/* System  block(page) size */
529 
530 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
531 	    "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
532 	    "buf_pp $%p btype %d bufsize %d "
533 	    "anchor_index %d chunk_index %d dvma $%p "
534 	    "offset %d block_size %d",
535 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
536 	    chunk_index, dvma_addr, offset, block_size));
537 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
538 
539 	block_index = (offset / block_size);	/* index within chunk */
540 	total_index = chunk_index + block_index;
541 
542 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
543 	    "==> hxge_rxbuf_pp_to_vp: "
544 	    "total_index %d dvma_addr $%p "
545 	    "offset %d block_size %d "
546 	    "block_index %d ",
547 	    total_index, dvma_addr, offset, block_size, block_index));
548 
549 #if defined(__i386)
550 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
551 	    (uint32_t)offset);
552 #else
553 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
554 	    offset);
555 #endif
556 
557 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
558 	    "==> hxge_rxbuf_pp_to_vp: "
559 	    "total_index %d dvma_addr $%p "
560 	    "offset %d block_size %d "
561 	    "block_index %d "
562 	    "*pkt_buf_addr_p $%p",
563 	    total_index, dvma_addr, offset, block_size,
564 	    block_index, *pkt_buf_addr_p));
565 
566 	*msg_index = total_index;
567 	*bufoffset = (offset & page_size_mask);
568 
569 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
570 	    "==> hxge_rxbuf_pp_to_vp: get msg index: "
571 	    "msg_index %d bufoffset_index %d",
572 	    *msg_index, *bufoffset));
573 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
574 
575 	return (HXGE_OK);
576 }
577 
578 
579 /*
580  * used by quick sort (qsort) function
581  * to perform comparison
582  */
583 static int
584 hxge_sort_compare(const void *p1, const void *p2)
585 {
586 
587 	rxbuf_index_info_t *a, *b;
588 
589 	a = (rxbuf_index_info_t *)p1;
590 	b = (rxbuf_index_info_t *)p2;
591 
592 	if (a->dvma_addr > b->dvma_addr)
593 		return (1);
594 	if (a->dvma_addr < b->dvma_addr)
595 		return (-1);
596 	return (0);
597 }
598 
599 /*
600  * Grabbed this sort implementation from common/syscall/avl.c
601  *
602  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
603  * v = Ptr to array/vector of objs
604  * n = # objs in the array
605  * s = size of each obj (must be multiples of a word size)
606  * f = ptr to function to compare two objs
607  *	returns (-1 = less than, 0 = equal, 1 = greater than
608  */
609 void
610 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
611 {
612 	int		g, i, j, ii;
613 	unsigned int	*p1, *p2;
614 	unsigned int	tmp;
615 
616 	/* No work to do */
617 	if (v == NULL || n <= 1)
618 		return;
619 	/* Sanity check on arguments */
620 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
621 	ASSERT(s > 0);
622 
623 	for (g = n / 2; g > 0; g /= 2) {
624 		for (i = g; i < n; i++) {
625 			for (j = i - g; j >= 0 &&
626 			    (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
627 				p1 = (unsigned *)(v + j * s);
628 				p2 = (unsigned *)(v + (j + g) * s);
629 				for (ii = 0; ii < s / 4; ii++) {
630 					tmp = *p1;
631 					*p1++ = *p2;
632 					*p2++ = tmp;
633 				}
634 			}
635 		}
636 	}
637 }
638 
639 /*
640  * Initialize data structures required for rxdma
641  * buffer dvma->vmem address lookup
642  */
643 /*ARGSUSED*/
644 static hxge_status_t
645 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
646 {
647 	int		index;
648 	rxring_info_t	*ring_info;
649 	int		max_iteration = 0, max_index = 0;
650 
651 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
652 
653 	ring_info = rbrp->ring_info;
654 	ring_info->hint[0] = NO_HINT;
655 	ring_info->hint[1] = NO_HINT;
656 	ring_info->hint[2] = NO_HINT;
657 	max_index = rbrp->num_blocks;
658 
659 	/* read the DVMA address information and sort it */
660 	/* do init of the information array */
661 
662 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
663 	    " hxge_rxbuf_index_info_init Sort ptrs"));
664 
665 	/* sort the array */
666 	hxge_ksort((void *) ring_info->buffer, max_index,
667 	    sizeof (rxbuf_index_info_t), hxge_sort_compare);
668 
669 	for (index = 0; index < max_index; index++) {
670 		HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
671 		    " hxge_rxbuf_index_info_init: sorted chunk %d "
672 		    " ioaddr $%p kaddr $%p size %x",
673 		    index, ring_info->buffer[index].dvma_addr,
674 		    ring_info->buffer[index].kaddr,
675 		    ring_info->buffer[index].buf_size));
676 	}
677 
678 	max_iteration = 0;
679 	while (max_index >= (1ULL << max_iteration))
680 		max_iteration++;
681 	ring_info->max_iterations = max_iteration + 1;
682 
683 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
684 	    " hxge_rxbuf_index_info_init Find max iter %d",
685 	    ring_info->max_iterations));
686 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
687 
688 	return (HXGE_OK);
689 }
690 
691 /*ARGSUSED*/
692 void
693 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
694 {
695 #ifdef	HXGE_DEBUG
696 
697 	uint32_t bptr;
698 	uint64_t pp;
699 
700 	bptr = entry_p->bits.pkt_buf_addr;
701 
702 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
703 	    "\trcr entry $%p "
704 	    "\trcr entry 0x%0llx "
705 	    "\trcr entry 0x%08x "
706 	    "\trcr entry 0x%08x "
707 	    "\tvalue 0x%0llx\n"
708 	    "\tmulti = %d\n"
709 	    "\tpkt_type = 0x%x\n"
710 	    "\terror = 0x%04x\n"
711 	    "\tl2_len = %d\n"
712 	    "\tpktbufsize = %d\n"
713 	    "\tpkt_buf_addr = $%p\n"
714 	    "\tpkt_buf_addr (<< 6) = $%p\n",
715 	    entry_p,
716 	    *(int64_t *)entry_p,
717 	    *(int32_t *)entry_p,
718 	    *(int32_t *)((char *)entry_p + 32),
719 	    entry_p->value,
720 	    entry_p->bits.multi,
721 	    entry_p->bits.pkt_type,
722 	    entry_p->bits.error,
723 	    entry_p->bits.l2_len,
724 	    entry_p->bits.pktbufsz,
725 	    bptr,
726 	    entry_p->bits.pkt_buf_addr_l));
727 
728 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
729 	    RCR_PKT_BUF_ADDR_SHIFT;
730 
731 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
732 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
733 #endif
734 }
735 
736 /*ARGSUSED*/
737 void
738 hxge_rxdma_stop(p_hxge_t hxgep)
739 {
740 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
741 
742 	(void) hxge_rx_vmac_disable(hxgep);
743 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
744 
745 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
746 }
747 
748 void
749 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
750 {
751 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
752 
753 	(void) hxge_rxdma_stop(hxgep);
754 	(void) hxge_uninit_rxdma_channels(hxgep);
755 	(void) hxge_init_rxdma_channels(hxgep);
756 
757 	(void) hxge_rx_vmac_enable(hxgep);
758 
759 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
760 }
761 
762 hxge_status_t
763 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
764 {
765 	int			i, ndmas;
766 	uint16_t		channel;
767 	p_rx_rbr_rings_t	rx_rbr_rings;
768 	p_rx_rbr_ring_t		*rbr_rings;
769 	hpi_handle_t		handle;
770 	hpi_status_t		rs = HPI_SUCCESS;
771 	hxge_status_t		status = HXGE_OK;
772 
773 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
774 	    "==> hxge_rxdma_hw_mode: mode %d", enable));
775 
776 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
777 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
778 		    "<== hxge_rxdma_mode: not initialized"));
779 		return (HXGE_ERROR);
780 	}
781 
782 	rx_rbr_rings = hxgep->rx_rbr_rings;
783 	if (rx_rbr_rings == NULL) {
784 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
785 		    "<== hxge_rxdma_mode: NULL ring pointer"));
786 		return (HXGE_ERROR);
787 	}
788 
789 	if (rx_rbr_rings->rbr_rings == NULL) {
790 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
791 		    "<== hxge_rxdma_mode: NULL rbr rings pointer"));
792 		return (HXGE_ERROR);
793 	}
794 
795 	ndmas = rx_rbr_rings->ndmas;
796 	if (!ndmas) {
797 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
798 		    "<== hxge_rxdma_mode: no channel"));
799 		return (HXGE_ERROR);
800 	}
801 
802 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
803 	    "==> hxge_rxdma_mode (ndmas %d)", ndmas));
804 
805 	rbr_rings = rx_rbr_rings->rbr_rings;
806 
807 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
808 
809 	for (i = 0; i < ndmas; i++) {
810 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
811 			continue;
812 		}
813 		channel = rbr_rings[i]->rdc;
814 		if (enable) {
815 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
816 			    "==> hxge_rxdma_hw_mode: channel %d (enable)",
817 			    channel));
818 			rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
819 		} else {
820 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
821 			    "==> hxge_rxdma_hw_mode: channel %d (disable)",
822 			    channel));
823 			rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
824 		}
825 	}
826 
827 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
828 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
829 	    "<== hxge_rxdma_hw_mode: status 0x%x", status));
830 
831 	return (status);
832 }
833 
834 int
835 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel)
836 {
837 	int			i, ndmas;
838 	uint16_t		rdc;
839 	p_rx_rbr_rings_t 	rx_rbr_rings;
840 	p_rx_rbr_ring_t		*rbr_rings;
841 
842 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
843 	    "==> hxge_rxdma_get_ring_index: channel %d", channel));
844 
845 	rx_rbr_rings = hxgep->rx_rbr_rings;
846 	if (rx_rbr_rings == NULL) {
847 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
848 		    "<== hxge_rxdma_get_ring_index: NULL ring pointer"));
849 		return (-1);
850 	}
851 
852 	ndmas = rx_rbr_rings->ndmas;
853 	if (!ndmas) {
854 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
855 		    "<== hxge_rxdma_get_ring_index: no channel"));
856 		return (-1);
857 	}
858 
859 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
860 	    "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
861 
862 	rbr_rings = rx_rbr_rings->rbr_rings;
863 	for (i = 0; i < ndmas; i++) {
864 		rdc = rbr_rings[i]->rdc;
865 		if (channel == rdc) {
866 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
867 			    "==> hxge_rxdma_get_rbr_ring: "
868 			    "channel %d (index %d) "
869 			    "ring %d", channel, i, rbr_rings[i]));
870 
871 			return (i);
872 		}
873 	}
874 
875 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
876 	    "<== hxge_rxdma_get_rbr_ring_index: not found"));
877 
878 	return (-1);
879 }
880 
881 /*
882  * Static functions start here.
883  */
884 static p_rx_msg_t
885 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
886 {
887 	p_rx_msg_t		hxge_mp = NULL;
888 	p_hxge_dma_common_t	dmamsg_p;
889 	uchar_t			*buffer;
890 
891 	hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
892 	if (hxge_mp == NULL) {
893 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
894 		    "Allocation of a rx msg failed."));
895 		goto hxge_allocb_exit;
896 	}
897 
898 	hxge_mp->use_buf_pool = B_FALSE;
899 	if (dmabuf_p) {
900 		hxge_mp->use_buf_pool = B_TRUE;
901 
902 		dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
903 		*dmamsg_p = *dmabuf_p;
904 		dmamsg_p->nblocks = 1;
905 		dmamsg_p->block_size = size;
906 		dmamsg_p->alength = size;
907 		buffer = (uchar_t *)dmabuf_p->kaddrp;
908 
909 		dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
910 		dmabuf_p->ioaddr_pp = (void *)
911 		    ((char *)dmabuf_p->ioaddr_pp + size);
912 
913 		dmabuf_p->alength -= size;
914 		dmabuf_p->offset += size;
915 		dmabuf_p->dma_cookie.dmac_laddress += size;
916 		dmabuf_p->dma_cookie.dmac_size -= size;
917 	} else {
918 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
919 		if (buffer == NULL) {
920 			HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
921 			    "Allocation of a receive page failed."));
922 			goto hxge_allocb_fail1;
923 		}
924 	}
925 
926 	hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
927 	if (hxge_mp->rx_mblk_p == NULL) {
928 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
929 		goto hxge_allocb_fail2;
930 	}
931 	hxge_mp->buffer = buffer;
932 	hxge_mp->block_size = size;
933 	hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
934 	hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
935 	hxge_mp->ref_cnt = 1;
936 	hxge_mp->free = B_TRUE;
937 	hxge_mp->rx_use_bcopy = B_FALSE;
938 
939 	atomic_inc_32(&hxge_mblks_pending);
940 
941 	goto hxge_allocb_exit;
942 
943 hxge_allocb_fail2:
944 	if (!hxge_mp->use_buf_pool) {
945 		KMEM_FREE(buffer, size);
946 	}
947 hxge_allocb_fail1:
948 	KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
949 	hxge_mp = NULL;
950 
951 hxge_allocb_exit:
952 	return (hxge_mp);
953 }
954 
955 p_mblk_t
956 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
957 {
958 	p_mblk_t mp;
959 
960 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
961 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
962 	    "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
963 
964 	mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
965 	if (mp == NULL) {
966 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
967 		goto hxge_dupb_exit;
968 	}
969 
970 	atomic_inc_32(&hxge_mp->ref_cnt);
971 
972 hxge_dupb_exit:
973 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
974 	return (mp);
975 }
976 
977 p_mblk_t
978 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
979 {
980 	p_mblk_t	mp;
981 	uchar_t		*dp;
982 
983 	mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
984 	if (mp == NULL) {
985 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
986 		goto hxge_dupb_bcopy_exit;
987 	}
988 	dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
989 	bcopy((void *) &hxge_mp->buffer[offset], dp, size);
990 	mp->b_wptr = dp + size;
991 
992 hxge_dupb_bcopy_exit:
993 
994 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
995 
996 	return (mp);
997 }
998 
999 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
1000     p_rx_msg_t rx_msg_p);
1001 
1002 void
1003 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1004 {
1005 
1006 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
1007 
1008 	/* Reuse this buffer */
1009 	rx_msg_p->free = B_FALSE;
1010 	rx_msg_p->cur_usage_cnt = 0;
1011 	rx_msg_p->max_usage_cnt = 0;
1012 	rx_msg_p->pkt_buf_size = 0;
1013 
1014 	if (rx_rbr_p->rbr_use_bcopy) {
1015 		rx_msg_p->rx_use_bcopy = B_FALSE;
1016 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
1017 	}
1018 
1019 	/*
1020 	 * Get the rbr header pointer and its offset index.
1021 	 */
1022 	rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1023 	    rx_rbr_p->rbr_wrap_mask);
1024 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1025 
1026 	/*
1027 	 * Accumulate some buffers in the ring before re-enabling the
1028 	 * DMA channel, if rbr empty was signaled.
1029 	 */
1030 	hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
1031 	if (rx_rbr_p->rbr_is_empty &&
1032 	    rx_rbr_p->rbr_consumed < rx_rbr_p->rbb_max / 16) {
1033 		hxge_rbr_empty_restore(hxgep, rx_rbr_p);
1034 	}
1035 
1036 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1037 	    "<== hxge_post_page (channel %d post_next_index %d)",
1038 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1039 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
1040 }
1041 
1042 void
1043 hxge_freeb(p_rx_msg_t rx_msg_p)
1044 {
1045 	size_t		size;
1046 	uchar_t		*buffer = NULL;
1047 	int		ref_cnt;
1048 	boolean_t	free_state = B_FALSE;
1049 	rx_rbr_ring_t	*ring = rx_msg_p->rx_rbr_p;
1050 
1051 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
1052 	HXGE_DEBUG_MSG((NULL, MEM2_CTL,
1053 	    "hxge_freeb:rx_msg_p = $%p (block pending %d)",
1054 	    rx_msg_p, hxge_mblks_pending));
1055 
1056 	if (ring == NULL)
1057 		return;
1058 
1059 	/*
1060 	 * This is to prevent posting activities while we are recovering
1061 	 * from fatal errors. This should not be a performance drag since
1062 	 * ref_cnt != 0 most times.
1063 	 */
1064 	if (ring->rbr_state == RBR_POSTING)
1065 		MUTEX_ENTER(&ring->post_lock);
1066 
1067 	/*
1068 	 * First we need to get the free state, then
1069 	 * atomic decrement the reference count to prevent
1070 	 * the race condition with the interrupt thread that
1071 	 * is processing a loaned up buffer block.
1072 	 */
1073 	free_state = rx_msg_p->free;
1074 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1075 	if (!ref_cnt) {
1076 		atomic_dec_32(&hxge_mblks_pending);
1077 
1078 		buffer = rx_msg_p->buffer;
1079 		size = rx_msg_p->block_size;
1080 
1081 		HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
1082 		    "will free: rx_msg_p = $%p (block pending %d)",
1083 		    rx_msg_p, hxge_mblks_pending));
1084 
1085 		if (!rx_msg_p->use_buf_pool) {
1086 			KMEM_FREE(buffer, size);
1087 		}
1088 
1089 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1090 		/*
1091 		 * Decrement the receive buffer ring's reference
1092 		 * count, too.
1093 		 */
1094 		atomic_dec_32(&ring->rbr_ref_cnt);
1095 
1096 		/*
1097 		 * Free the receive buffer ring, iff
1098 		 * 1. all the receive buffers have been freed
1099 		 * 2. and we are in the proper state (that is,
1100 		 *    we are not UNMAPPING).
1101 		 */
1102 		if (ring->rbr_ref_cnt == 0 &&
1103 		    ring->rbr_state == RBR_UNMAPPED) {
1104 			KMEM_FREE(ring, sizeof (*ring));
1105 			/* post_lock has been destroyed already */
1106 			return;
1107 		}
1108 	}
1109 
1110 	/*
1111 	 * Repost buffer.
1112 	 */
1113 	if (free_state && (ref_cnt == 1)) {
1114 		HXGE_DEBUG_MSG((NULL, RX_CTL,
1115 		    "hxge_freeb: post page $%p:", rx_msg_p));
1116 		if (ring->rbr_state == RBR_POSTING)
1117 			hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
1118 	}
1119 
1120 	if (ring->rbr_state == RBR_POSTING)
1121 		MUTEX_EXIT(&ring->post_lock);
1122 
1123 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
1124 }
1125 
1126 uint_t
1127 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
1128 {
1129 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1130 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1131 	p_hxge_ldg_t		ldgp;
1132 	uint8_t			channel;
1133 	hpi_handle_t		handle;
1134 	rdc_stat_t		cs;
1135 	uint_t			serviced = DDI_INTR_UNCLAIMED;
1136 
1137 	if (ldvp == NULL) {
1138 		HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
1139 		    "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1140 		return (DDI_INTR_UNCLAIMED);
1141 	}
1142 
1143 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1144 		hxgep = ldvp->hxgep;
1145 	}
1146 
1147 	/*
1148 	 * If the interface is not started, just swallow the interrupt
1149 	 * for the logical device and don't rearm it.
1150 	 */
1151 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
1152 		return (DDI_INTR_CLAIMED);
1153 
1154 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1155 	    "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1156 
1157 	/*
1158 	 * This interrupt handler is for a specific receive dma channel.
1159 	 */
1160 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1161 
1162 	/*
1163 	 * Get the control and status for this channel.
1164 	 */
1165 	channel = ldvp->channel;
1166 	ldgp = ldvp->ldgp;
1167 	RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
1168 
1169 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d "
1170 	    "cs 0x%016llx rcrto 0x%x rcrthres %x",
1171 	    channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres));
1172 
1173 	hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs);
1174 	serviced = DDI_INTR_CLAIMED;
1175 
1176 	/* error events. */
1177 	if (cs.value & RDC_STAT_ERROR) {
1178 		(void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
1179 	}
1180 
1181 hxge_intr_exit:
1182 	/*
1183 	 * Enable the mailbox update interrupt if we want to use mailbox. We
1184 	 * probably don't need to use mailbox as it only saves us one pio read.
1185 	 * Also write 1 to rcrthres and rcrto to clear these two edge triggered
1186 	 * bits.
1187 	 */
1188 	cs.value &= RDC_STAT_WR1C;
1189 	cs.bits.mex = 1;
1190 	cs.bits.ptrread = 0;
1191 	cs.bits.pktread = 0;
1192 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1193 
1194 	/*
1195 	 * Rearm this logical group if this is a single device group.
1196 	 */
1197 	if (ldgp->nldvs == 1) {
1198 		ld_intr_mgmt_t mgm;
1199 
1200 		mgm.value = 0;
1201 		mgm.bits.arm = 1;
1202 		mgm.bits.timer = ldgp->ldg_timer;
1203 		HXGE_REG_WR32(handle,
1204 		    LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value);
1205 	}
1206 
1207 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1208 	    "<== hxge_rx_intr: serviced %d", serviced));
1209 
1210 	return (serviced);
1211 }
1212 
1213 static void
1214 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1215     rdc_stat_t cs)
1216 {
1217 	p_mblk_t		mp;
1218 	p_rx_rcr_ring_t		rcrp;
1219 
1220 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring"));
1221 	if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
1222 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1223 		    "<== hxge_rx_pkts_vring: no mp"));
1224 		return;
1225 	}
1226 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp));
1227 
1228 #ifdef  HXGE_DEBUG
1229 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1230 	    "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) "
1231 	    "LEN %d mp $%p mp->b_next $%p rcrp $%p",
1232 	    (mp->b_wptr - mp->b_rptr), mp, mp->b_next, rcrp));
1233 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1234 	    "==> hxge_rx_pkts_vring: dump packets "
1235 	    "(mp $%p b_rptr $%p b_wptr $%p):\n %s",
1236 	    mp, mp->b_rptr, mp->b_wptr,
1237 	    hxge_dump_packet((char *)mp->b_rptr, 64)));
1238 
1239 	if (mp->b_cont) {
1240 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1241 		    "==> hxge_rx_pkts_vring: dump b_cont packets "
1242 		    "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
1243 		    mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr,
1244 		    hxge_dump_packet((char *)mp->b_cont->b_rptr,
1245 		    mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
1246 		}
1247 	if (mp->b_next) {
1248 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1249 		    "==> hxge_rx_pkts_vring: dump next packets "
1250 		    "(b_rptr $%p): %s",
1251 		    mp->b_next->b_rptr,
1252 		    hxge_dump_packet((char *)mp->b_next->b_rptr, 64)));
1253 	}
1254 #endif
1255 
1256 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1257 	    "==> hxge_rx_pkts_vring: send packet to stack"));
1258 	mac_rx(hxgep->mach, NULL, mp);
1259 
1260 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring"));
1261 }
1262 
1263 /*ARGSUSED*/
1264 mblk_t *
1265 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1266     p_rx_rcr_ring_t *rcrp, rdc_stat_t cs)
1267 {
1268 	hpi_handle_t		handle;
1269 	uint8_t			channel;
1270 	p_rx_rcr_rings_t	rx_rcr_rings;
1271 	p_rx_rcr_ring_t		rcr_p;
1272 	uint32_t		comp_rd_index;
1273 	p_rcr_entry_t		rcr_desc_rd_head_p;
1274 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1275 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1276 	uint16_t		qlen, nrcr_read, npkt_read;
1277 	uint32_t		qlen_hw, qlen_sw;
1278 	uint32_t		invalid_rcr_entry;
1279 	boolean_t		multi;
1280 	rdc_rcr_cfg_b_t		rcr_cfg_b;
1281 	p_rx_mbox_t		rx_mboxp;
1282 	p_rxdma_mailbox_t	mboxp;
1283 	uint64_t		rcr_head_index, rcr_tail_index;
1284 	uint64_t		rcr_tail;
1285 	uint64_t		value;
1286 	rdc_rcr_tail_t		rcr_tail_reg;
1287 	p_hxge_rx_ring_stats_t	rdc_stats;
1288 
1289 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
1290 	    "channel %d", vindex, ldvp->channel));
1291 
1292 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1293 		return (NULL);
1294 	}
1295 
1296 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1297 	rx_rcr_rings = hxgep->rx_rcr_rings;
1298 	rcr_p = rx_rcr_rings->rcr_rings[vindex];
1299 	channel = rcr_p->rdc;
1300 	if (channel != ldvp->channel) {
1301 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1302 		    "channel %d, and rcr channel %d not matched.",
1303 		    vindex, ldvp->channel, channel));
1304 		return (NULL);
1305 	}
1306 
1307 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1308 	    "==> hxge_rx_pkts: START: rcr channel %d "
1309 	    "head_p $%p head_pp $%p  index %d ",
1310 	    channel, rcr_p->rcr_desc_rd_head_p,
1311 	    rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1312 
1313 	rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
1314 	mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp;
1315 
1316 	(void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1317 	RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value);
1318 	rcr_tail = rcr_tail_reg.bits.tail;
1319 
1320 	if (!qlen) {
1321 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1322 		    "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
1323 		    channel, qlen));
1324 		return (NULL);
1325 	}
1326 
1327 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
1328 	    "qlen %d", channel, qlen));
1329 
1330 	comp_rd_index = rcr_p->comp_rd_index;
1331 
1332 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
1333 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
1334 	nrcr_read = npkt_read = 0;
1335 
1336 	/*
1337 	 * Number of packets queued (The jumbo or multi packet will be counted
1338 	 * as only one paccket and it may take up more than one completion
1339 	 * entry).
1340 	 */
1341 	qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts;
1342 	head_mp = NULL;
1343 	tail_mp = &head_mp;
1344 	nmp = mp_cont = NULL;
1345 	multi = B_FALSE;
1346 
1347 	rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p;
1348 	rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin;
1349 
1350 	if (rcr_tail_index >= rcr_head_index) {
1351 		qlen_sw = rcr_tail_index - rcr_head_index;
1352 	} else {
1353 		/* rcr_tail has wrapped around */
1354 		qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index;
1355 	}
1356 
1357 	if (qlen_hw > qlen_sw) {
1358 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1359 		    "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
1360 		    channel, qlen_hw, qlen_sw));
1361 		qlen_hw = qlen_sw;
1362 	}
1363 
1364 	while (qlen_hw) {
1365 #ifdef HXGE_DEBUG
1366 		hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
1367 #endif
1368 		/*
1369 		 * Process one completion ring entry.
1370 		 */
1371 		invalid_rcr_entry = 0;
1372 		hxge_receive_packet(hxgep,
1373 		    rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont,
1374 		    &invalid_rcr_entry);
1375 		if (invalid_rcr_entry != 0) {
1376 			rdc_stats = rcr_p->rdc_stats;
1377 			rdc_stats->rcr_invalids++;
1378 			HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1379 			    "Channel %d could only read 0x%x packets, "
1380 			    "but 0x%x pending\n", channel, npkt_read, qlen_hw));
1381 			break;
1382 		}
1383 
1384 		/*
1385 		 * message chaining modes (nemo msg chaining)
1386 		 */
1387 		if (nmp) {
1388 			nmp->b_next = NULL;
1389 			if (!multi && !mp_cont) { /* frame fits a partition */
1390 				*tail_mp = nmp;
1391 				tail_mp = &nmp->b_next;
1392 				nmp = NULL;
1393 			} else if (multi && !mp_cont) { /* first segment */
1394 				*tail_mp = nmp;
1395 				tail_mp = &nmp->b_cont;
1396 			} else if (multi && mp_cont) {	/* mid of multi segs */
1397 				*tail_mp = mp_cont;
1398 				tail_mp = &mp_cont->b_cont;
1399 			} else if (!multi && mp_cont) { /* last segment */
1400 				*tail_mp = mp_cont;
1401 				tail_mp = &nmp->b_next;
1402 				nmp = NULL;
1403 			}
1404 		}
1405 
1406 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1407 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1408 		    "before updating: multi %d "
1409 		    "nrcr_read %d "
1410 		    "npk read %d "
1411 		    "head_pp $%p  index %d ",
1412 		    channel, multi,
1413 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
1414 
1415 		if (!multi) {
1416 			qlen_hw--;
1417 			npkt_read++;
1418 		}
1419 
1420 		/*
1421 		 * Update the next read entry.
1422 		 */
1423 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
1424 		    rcr_p->comp_wrap_mask);
1425 
1426 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1427 		    rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p);
1428 
1429 		nrcr_read++;
1430 
1431 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1432 		    "<== hxge_rx_pkts: (SAM, process one packet) "
1433 		    "nrcr_read %d", nrcr_read));
1434 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1435 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1436 		    "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
1437 		    channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
1438 		    comp_rd_index));
1439 	}
1440 
1441 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
1442 	rcr_p->comp_rd_index = comp_rd_index;
1443 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
1444 
1445 	/* Adjust the mailbox queue length for a hardware bug workaround */
1446 	mboxp->rcrstat_a.bits.qlen -= npkt_read;
1447 
1448 	if ((hxgep->intr_timeout != rcr_p->intr_timeout) ||
1449 	    (hxgep->intr_threshold != rcr_p->intr_threshold)) {
1450 		rcr_p->intr_timeout = hxgep->intr_timeout;
1451 		rcr_p->intr_threshold = hxgep->intr_threshold;
1452 		rcr_cfg_b.value = 0x0ULL;
1453 		if (rcr_p->intr_timeout)
1454 			rcr_cfg_b.bits.entout = 1;
1455 		rcr_cfg_b.bits.timeout = rcr_p->intr_timeout;
1456 		rcr_cfg_b.bits.pthres = rcr_p->intr_threshold;
1457 		RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
1458 		    channel, rcr_cfg_b.value);
1459 	}
1460 
1461 	cs.bits.pktread = npkt_read;
1462 	cs.bits.ptrread = nrcr_read;
1463 	value = cs.value;
1464 	cs.value &= 0xffffffffULL;
1465 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1466 
1467 	cs.value = value & ~0xffffffffULL;
1468 	cs.bits.pktread = 0;
1469 	cs.bits.ptrread = 0;
1470 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1471 
1472 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1473 	    "==> hxge_rx_pkts: EXIT: rcr channel %d "
1474 	    "head_pp $%p  index %016llx ",
1475 	    channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1476 
1477 	/*
1478 	 * Update RCR buffer pointer read and number of packets read.
1479 	 */
1480 
1481 	*rcrp = rcr_p;
1482 
1483 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
1484 
1485 	return (head_mp);
1486 }
1487 
1488 #define	RCR_ENTRY_PATTERN	0x5a5a6b6b7c7c8d8dULL
1489 #define	NO_PORT_BIT		0x20
1490 
1491 /*ARGSUSED*/
1492 void
1493 hxge_receive_packet(p_hxge_t hxgep,
1494     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
1495     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont,
1496     uint32_t *invalid_rcr_entry)
1497 {
1498 	p_mblk_t		nmp = NULL;
1499 	uint64_t		multi;
1500 	uint8_t			channel;
1501 
1502 	boolean_t first_entry = B_TRUE;
1503 	boolean_t buffer_free = B_FALSE;
1504 	boolean_t error_send_up = B_FALSE;
1505 	uint8_t error_type;
1506 	uint16_t l2_len;
1507 	uint16_t skip_len;
1508 	uint8_t pktbufsz_type;
1509 	uint64_t rcr_entry;
1510 	uint64_t *pkt_buf_addr_pp;
1511 	uint64_t *pkt_buf_addr_p;
1512 	uint32_t buf_offset;
1513 	uint32_t bsize;
1514 	uint32_t msg_index;
1515 	p_rx_rbr_ring_t rx_rbr_p;
1516 	p_rx_msg_t *rx_msg_ring_p;
1517 	p_rx_msg_t rx_msg_p;
1518 
1519 	uint16_t sw_offset_bytes = 0, hdr_size = 0;
1520 	hxge_status_t status = HXGE_OK;
1521 	boolean_t is_valid = B_FALSE;
1522 	p_hxge_rx_ring_stats_t rdc_stats;
1523 	uint32_t bytes_read;
1524 	uint8_t header = 0;
1525 
1526 	channel = rcr_p->rdc;
1527 
1528 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
1529 
1530 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
1531 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1532 
1533 	/* Verify the content of the rcr_entry for a hardware bug workaround */
1534 	if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) {
1535 		*invalid_rcr_entry = 1;
1536 		HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet "
1537 		    "Channel %d invalid RCR entry 0x%llx found, returning\n",
1538 		    channel, (long long) rcr_entry));
1539 		return;
1540 	}
1541 	*((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN;
1542 
1543 	multi = (rcr_entry & RCR_MULTI_MASK);
1544 
1545 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
1546 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
1547 
1548 	/*
1549 	 * Hardware does not strip the CRC due bug ID 11451 where
1550 	 * the hardware mis handles minimum size packets.
1551 	 */
1552 	l2_len -= ETHERFCSL;
1553 
1554 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
1555 	    RCR_PKTBUFSZ_SHIFT);
1556 #if defined(__i386)
1557 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
1558 	    RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
1559 #else
1560 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
1561 	    RCR_PKT_BUF_ADDR_SHIFT);
1562 #endif
1563 
1564 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1565 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1566 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1567 	    "error_type 0x%x pktbufsz_type %d ",
1568 	    rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
1569 	    multi, error_type, pktbufsz_type));
1570 
1571 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1572 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1573 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1574 	    "error_type 0x%x ", rcr_desc_rd_head_p,
1575 	    rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type));
1576 
1577 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1578 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1579 	    "full pkt_buf_addr_pp $%p l2_len %d",
1580 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1581 
1582 	/* get the stats ptr */
1583 	rdc_stats = rcr_p->rdc_stats;
1584 
1585 	if (!l2_len) {
1586 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1587 		    "<== hxge_receive_packet: failed: l2 length is 0."));
1588 		return;
1589 	}
1590 
1591 	/* shift 6 bits to get the full io address */
1592 #if defined(__i386)
1593 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
1594 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1595 #else
1596 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
1597 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1598 #endif
1599 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1600 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1601 	    "full pkt_buf_addr_pp $%p l2_len %d",
1602 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1603 
1604 	rx_rbr_p = rcr_p->rx_rbr_p;
1605 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
1606 
1607 	if (first_entry) {
1608 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
1609 		    RXDMA_HDR_SIZE_DEFAULT);
1610 
1611 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1612 		    "==> hxge_receive_packet: first entry 0x%016llx "
1613 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
1614 		    rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
1615 	}
1616 
1617 	MUTEX_ENTER(&rcr_p->lock);
1618 	MUTEX_ENTER(&rx_rbr_p->lock);
1619 
1620 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1621 	    "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
1622 	    "full pkt_buf_addr_pp $%p l2_len %d",
1623 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1624 
1625 	/*
1626 	 * Packet buffer address in the completion entry points to the starting
1627 	 * buffer address (offset 0). Use the starting buffer address to locate
1628 	 * the corresponding kernel address.
1629 	 */
1630 	status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
1631 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
1632 	    &buf_offset, &msg_index);
1633 
1634 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1635 	    "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
1636 	    "full pkt_buf_addr_pp $%p l2_len %d",
1637 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1638 
1639 	if (status != HXGE_OK) {
1640 		MUTEX_EXIT(&rx_rbr_p->lock);
1641 		MUTEX_EXIT(&rcr_p->lock);
1642 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1643 		    "<== hxge_receive_packet: found vaddr failed %d", status));
1644 		return;
1645 	}
1646 
1647 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1648 	    "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
1649 	    "full pkt_buf_addr_pp $%p l2_len %d",
1650 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1651 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1652 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1653 	    "full pkt_buf_addr_pp $%p l2_len %d",
1654 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1655 
1656 	if (msg_index >= rx_rbr_p->tnblocks) {
1657 		MUTEX_EXIT(&rx_rbr_p->lock);
1658 		MUTEX_EXIT(&rcr_p->lock);
1659 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1660 		    "==> hxge_receive_packet: FATAL msg_index (%d) "
1661 		    "should be smaller than tnblocks (%d)\n",
1662 		    msg_index, rx_rbr_p->tnblocks));
1663 		return;
1664 	}
1665 
1666 	rx_msg_p = rx_msg_ring_p[msg_index];
1667 
1668 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1669 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1670 	    "full pkt_buf_addr_pp $%p l2_len %d",
1671 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1672 
1673 	switch (pktbufsz_type) {
1674 	case RCR_PKTBUFSZ_0:
1675 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
1676 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1677 		    "==> hxge_receive_packet: 0 buf %d", bsize));
1678 		break;
1679 	case RCR_PKTBUFSZ_1:
1680 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
1681 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1682 		    "==> hxge_receive_packet: 1 buf %d", bsize));
1683 		break;
1684 	case RCR_PKTBUFSZ_2:
1685 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
1686 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1687 		    "==> hxge_receive_packet: 2 buf %d", bsize));
1688 		break;
1689 	case RCR_SINGLE_BLOCK:
1690 		bsize = rx_msg_p->block_size;
1691 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1692 		    "==> hxge_receive_packet: single %d", bsize));
1693 
1694 		break;
1695 	default:
1696 		MUTEX_EXIT(&rx_rbr_p->lock);
1697 		MUTEX_EXIT(&rcr_p->lock);
1698 		return;
1699 	}
1700 
1701 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
1702 	    (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
1703 	    DDI_DMA_SYNC_FORCPU);
1704 
1705 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1706 	    "==> hxge_receive_packet: after first dump:usage count"));
1707 
1708 	if (rx_msg_p->cur_usage_cnt == 0) {
1709 		if (rx_rbr_p->rbr_use_bcopy) {
1710 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
1711 			if (rx_rbr_p->rbr_consumed >
1712 			    rx_rbr_p->rbr_threshold_hi) {
1713 				rx_msg_p->rx_use_bcopy = B_TRUE;
1714 			}
1715 		}
1716 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1717 		    "==> hxge_receive_packet: buf %d (new block) ", bsize));
1718 
1719 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
1720 		rx_msg_p->pkt_buf_size = bsize;
1721 		rx_msg_p->cur_usage_cnt = 1;
1722 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
1723 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1724 			    "==> hxge_receive_packet: buf %d (single block) ",
1725 			    bsize));
1726 			/*
1727 			 * Buffer can be reused once the free function is
1728 			 * called.
1729 			 */
1730 			rx_msg_p->max_usage_cnt = 1;
1731 			buffer_free = B_TRUE;
1732 		} else {
1733 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
1734 			if (rx_msg_p->max_usage_cnt == 1) {
1735 				buffer_free = B_TRUE;
1736 			}
1737 		}
1738 	} else {
1739 		rx_msg_p->cur_usage_cnt++;
1740 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
1741 			buffer_free = B_TRUE;
1742 		}
1743 	}
1744 
1745 	if (rx_msg_p->rx_use_bcopy) {
1746 		rdc_stats->pkt_drop++;
1747 		atomic_inc_32(&rx_msg_p->ref_cnt);
1748 		if (buffer_free == B_TRUE) {
1749 			rx_msg_p->free = B_TRUE;
1750 		}
1751 
1752 		MUTEX_EXIT(&rx_rbr_p->lock);
1753 		MUTEX_EXIT(&rcr_p->lock);
1754 		hxge_freeb(rx_msg_p);
1755 		return;
1756 	}
1757 
1758 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1759 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
1760 	    msg_index, l2_len,
1761 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
1762 
1763 	if (error_type) {
1764 		rdc_stats->ierrors++;
1765 		/* Update error stats */
1766 		rdc_stats->errlog.compl_err_type = error_type;
1767 		HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
1768 
1769 		if (error_type & RCR_CTRL_FIFO_DED) {
1770 			rdc_stats->ctrl_fifo_ecc_err++;
1771 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1772 			    " hxge_receive_packet: "
1773 			    " channel %d RCR ctrl_fifo_ded error", channel));
1774 		} else if (error_type & RCR_DATA_FIFO_DED) {
1775 			rdc_stats->data_fifo_ecc_err++;
1776 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1777 			    " hxge_receive_packet: channel %d"
1778 			    " RCR data_fifo_ded error", channel));
1779 		}
1780 
1781 		/*
1782 		 * Update and repost buffer block if max usage count is
1783 		 * reached.
1784 		 */
1785 		if (error_send_up == B_FALSE) {
1786 			atomic_inc_32(&rx_msg_p->ref_cnt);
1787 			if (buffer_free == B_TRUE) {
1788 				rx_msg_p->free = B_TRUE;
1789 			}
1790 
1791 			MUTEX_EXIT(&rx_rbr_p->lock);
1792 			MUTEX_EXIT(&rcr_p->lock);
1793 			hxge_freeb(rx_msg_p);
1794 			return;
1795 		}
1796 	}
1797 
1798 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1799 	    "==> hxge_receive_packet: DMA sync second "));
1800 
1801 	bytes_read = rcr_p->rcvd_pkt_bytes;
1802 	skip_len = sw_offset_bytes + hdr_size;
1803 
1804 	if (first_entry) {
1805 		header = rx_msg_p->buffer[buf_offset];
1806 	}
1807 
1808 	if (!rx_msg_p->rx_use_bcopy) {
1809 		/*
1810 		 * For loaned up buffers, the driver reference count
1811 		 * will be incremented first and then the free state.
1812 		 */
1813 		if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
1814 			if (first_entry) {
1815 				nmp->b_rptr = &nmp->b_rptr[skip_len];
1816 				if (l2_len < bsize - skip_len) {
1817 					nmp->b_wptr = &nmp->b_rptr[l2_len];
1818 				} else {
1819 					nmp->b_wptr = &nmp->b_rptr[bsize
1820 					    - skip_len];
1821 				}
1822 			} else {
1823 				if (l2_len - bytes_read < bsize) {
1824 					nmp->b_wptr =
1825 					    &nmp->b_rptr[l2_len - bytes_read];
1826 				} else {
1827 					nmp->b_wptr = &nmp->b_rptr[bsize];
1828 				}
1829 			}
1830 		}
1831 	} else {
1832 		if (first_entry) {
1833 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
1834 			    l2_len < bsize - skip_len ?
1835 			    l2_len : bsize - skip_len);
1836 		} else {
1837 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
1838 			    l2_len - bytes_read < bsize ?
1839 			    l2_len - bytes_read : bsize);
1840 		}
1841 	}
1842 
1843 	if (nmp != NULL) {
1844 		if (first_entry)
1845 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
1846 		else
1847 			bytes_read += nmp->b_wptr - nmp->b_rptr;
1848 
1849 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1850 		    "==> hxge_receive_packet after dupb: "
1851 		    "rbr consumed %d "
1852 		    "pktbufsz_type %d "
1853 		    "nmp $%p rptr $%p wptr $%p "
1854 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
1855 		    rx_rbr_p->rbr_consumed,
1856 		    pktbufsz_type,
1857 		    nmp, nmp->b_rptr, nmp->b_wptr,
1858 		    buf_offset, bsize, l2_len, skip_len));
1859 	} else {
1860 		cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
1861 
1862 		atomic_inc_32(&rx_msg_p->ref_cnt);
1863 		if (buffer_free == B_TRUE) {
1864 			rx_msg_p->free = B_TRUE;
1865 		}
1866 
1867 		MUTEX_EXIT(&rx_rbr_p->lock);
1868 		MUTEX_EXIT(&rcr_p->lock);
1869 		hxge_freeb(rx_msg_p);
1870 		return;
1871 	}
1872 
1873 	if (buffer_free == B_TRUE) {
1874 		rx_msg_p->free = B_TRUE;
1875 	}
1876 
1877 	/*
1878 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
1879 	 * packet is not fragmented and no error bit is set, then L4 checksum
1880 	 * is OK.
1881 	 */
1882 	is_valid = (nmp != NULL);
1883 	if (first_entry) {
1884 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
1885 		if (l2_len > (STD_FRAME_SIZE - ETHERFCSL))
1886 			rdc_stats->jumbo_pkts++;
1887 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
1888 		    l2_len : bsize;
1889 	} else {
1890 		/*
1891 		 * Add the current portion of the packet to the kstats.
1892 		 * The current portion of the packet is calculated by using
1893 		 * length of the packet and the previously received portion.
1894 		 */
1895 		rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ?
1896 		    l2_len - rcr_p->rcvd_pkt_bytes : bsize;
1897 	}
1898 
1899 	rcr_p->rcvd_pkt_bytes = bytes_read;
1900 
1901 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
1902 		atomic_inc_32(&rx_msg_p->ref_cnt);
1903 		MUTEX_EXIT(&rx_rbr_p->lock);
1904 		MUTEX_EXIT(&rcr_p->lock);
1905 		hxge_freeb(rx_msg_p);
1906 	} else {
1907 		MUTEX_EXIT(&rx_rbr_p->lock);
1908 		MUTEX_EXIT(&rcr_p->lock);
1909 	}
1910 
1911 	if (is_valid) {
1912 		nmp->b_cont = NULL;
1913 		if (first_entry) {
1914 			*mp = nmp;
1915 			*mp_cont = NULL;
1916 		} else {
1917 			*mp_cont = nmp;
1918 		}
1919 	}
1920 
1921 	/*
1922 	 * Update stats and hardware checksuming.
1923 	 */
1924 	if (is_valid && !multi) {
1925 		if (!(header & NO_PORT_BIT) && !error_type) {
1926 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
1927 			    HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
1928 
1929 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
1930 			    "==> hxge_receive_packet: Full tcp/udp cksum "
1931 			    "is_valid 0x%x multi %d error %d",
1932 			    is_valid, multi, error_type));
1933 		}
1934 	}
1935 
1936 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1937 	    "==> hxge_receive_packet: *mp 0x%016llx", *mp));
1938 
1939 	*multi_p = (multi == RCR_MULTI_MASK);
1940 
1941 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
1942 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
1943 	    *multi_p, nmp, *mp, *mp_cont));
1944 }
1945 
1946 static void
1947 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel)
1948 {
1949 	hpi_handle_t	handle;
1950 	p_rx_rcr_ring_t	rcrp;
1951 	p_rx_rbr_ring_t	rbrp;
1952 
1953 	rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
1954 	rbrp = rcrp->rx_rbr_p;
1955 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1956 
1957 	/*
1958 	 * Wait for the channel to be quiet
1959 	 */
1960 	(void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel);
1961 
1962 	/*
1963 	 * Post page will accumulate some buffers before re-enabling
1964 	 * the DMA channel.
1965 	 */
1966 	MUTEX_ENTER(&rbrp->post_lock);
1967 	if (rbrp->rbr_consumed < rbrp->rbb_max / 32) {
1968 		hxge_rbr_empty_restore(hxgep, rbrp);
1969 	} else {
1970 		rbrp->rbr_is_empty = B_TRUE;
1971 	}
1972 	MUTEX_EXIT(&rbrp->post_lock);
1973 }
1974 
1975 /*ARGSUSED*/
1976 static hxge_status_t
1977 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
1978     rdc_stat_t cs)
1979 {
1980 	p_hxge_rx_ring_stats_t	rdc_stats;
1981 	hpi_handle_t		handle;
1982 	boolean_t		rxchan_fatal = B_FALSE;
1983 	uint8_t			channel;
1984 	hxge_status_t		status = HXGE_OK;
1985 	uint64_t		cs_val;
1986 
1987 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
1988 
1989 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1990 	channel = ldvp->channel;
1991 
1992 	/* Clear the interrupts */
1993 	cs.bits.pktread = 0;
1994 	cs.bits.ptrread = 0;
1995 	cs_val = cs.value & RDC_STAT_WR1C;
1996 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val);
1997 
1998 	rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
1999 
2000 	if (cs.bits.rbr_cpl_to) {
2001 		rdc_stats->rbr_tmout++;
2002 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2003 		    HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
2004 		rxchan_fatal = B_TRUE;
2005 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2006 		    "==> hxge_rx_err_evnts(channel %d): "
2007 		    "fatal error: rx_rbr_timeout", channel));
2008 	}
2009 
2010 	if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
2011 		(void) hpi_rxdma_ring_perr_stat_get(handle,
2012 		    &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
2013 	}
2014 
2015 	if (cs.bits.rcr_shadow_par_err) {
2016 		rdc_stats->rcr_sha_par++;
2017 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2018 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2019 		rxchan_fatal = B_TRUE;
2020 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2021 		    "==> hxge_rx_err_evnts(channel %d): "
2022 		    "fatal error: rcr_shadow_par_err", channel));
2023 	}
2024 
2025 	if (cs.bits.rbr_prefetch_par_err) {
2026 		rdc_stats->rbr_pre_par++;
2027 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2028 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2029 		rxchan_fatal = B_TRUE;
2030 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2031 		    "==> hxge_rx_err_evnts(channel %d): "
2032 		    "fatal error: rbr_prefetch_par_err", channel));
2033 	}
2034 
2035 	if (cs.bits.rbr_pre_empty) {
2036 		rdc_stats->rbr_pre_empty++;
2037 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2038 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
2039 		rxchan_fatal = B_TRUE;
2040 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2041 		    "==> hxge_rx_err_evnts(channel %d): "
2042 		    "fatal error: rbr_pre_empty", channel));
2043 	}
2044 
2045 	if (cs.bits.peu_resp_err) {
2046 		rdc_stats->peu_resp_err++;
2047 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2048 		    HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
2049 		rxchan_fatal = B_TRUE;
2050 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2051 		    "==> hxge_rx_err_evnts(channel %d): "
2052 		    "fatal error: peu_resp_err", channel));
2053 	}
2054 
2055 	if (cs.bits.rcr_thres) {
2056 		rdc_stats->rcr_thres++;
2057 	}
2058 
2059 	if (cs.bits.rcr_to) {
2060 		rdc_stats->rcr_to++;
2061 	}
2062 
2063 	if (cs.bits.rcr_shadow_full) {
2064 		rdc_stats->rcr_shadow_full++;
2065 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2066 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
2067 		rxchan_fatal = B_TRUE;
2068 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2069 		    "==> hxge_rx_err_evnts(channel %d): "
2070 		    "fatal error: rcr_shadow_full", channel));
2071 	}
2072 
2073 	if (cs.bits.rcr_full) {
2074 		rdc_stats->rcrfull++;
2075 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2076 		    HXGE_FM_EREPORT_RDMC_RCRFULL);
2077 		rxchan_fatal = B_TRUE;
2078 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2079 		    "==> hxge_rx_err_evnts(channel %d): "
2080 		    "fatal error: rcrfull error", channel));
2081 	}
2082 
2083 	if (cs.bits.rbr_empty) {
2084 		rdc_stats->rbr_empty++;
2085 		hxge_rx_rbr_empty_recover(hxgep, channel);
2086 	}
2087 
2088 	if (cs.bits.rbr_full) {
2089 		rdc_stats->rbrfull++;
2090 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2091 		    HXGE_FM_EREPORT_RDMC_RBRFULL);
2092 		rxchan_fatal = B_TRUE;
2093 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2094 		    "==> hxge_rx_err_evnts(channel %d): "
2095 		    "fatal error: rbr_full error", channel));
2096 	}
2097 
2098 	if (rxchan_fatal) {
2099 		p_rx_rcr_ring_t	rcrp;
2100 		p_rx_rbr_ring_t rbrp;
2101 
2102 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
2103 		rbrp = rcrp->rx_rbr_p;
2104 
2105 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2106 		    " hxge_rx_err_evnts: fatal error on Channel #%d\n",
2107 		    channel));
2108 		MUTEX_ENTER(&rbrp->post_lock);
2109 		/* This function needs to be inside the post_lock */
2110 		status = hxge_rxdma_fatal_err_recover(hxgep, channel);
2111 		MUTEX_EXIT(&rbrp->post_lock);
2112 		if (status == HXGE_OK) {
2113 			FM_SERVICE_RESTORED(hxgep);
2114 		}
2115 	}
2116 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts"));
2117 
2118 	return (status);
2119 }
2120 
2121 static hxge_status_t
2122 hxge_map_rxdma(p_hxge_t hxgep)
2123 {
2124 	int			i, ndmas;
2125 	uint16_t		channel;
2126 	p_rx_rbr_rings_t	rx_rbr_rings;
2127 	p_rx_rbr_ring_t		*rbr_rings;
2128 	p_rx_rcr_rings_t	rx_rcr_rings;
2129 	p_rx_rcr_ring_t		*rcr_rings;
2130 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2131 	p_rx_mbox_t		*rx_mbox_p;
2132 	p_hxge_dma_pool_t	dma_buf_poolp;
2133 	p_hxge_dma_common_t	*dma_buf_p;
2134 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2135 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
2136 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2137 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
2138 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2139 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
2140 	uint32_t		*num_chunks;
2141 	hxge_status_t		status = HXGE_OK;
2142 
2143 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
2144 
2145 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2146 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2147 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2148 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2149 
2150 	if (!dma_buf_poolp->buf_allocated ||
2151 	    !dma_rbr_cntl_poolp->buf_allocated ||
2152 	    !dma_rcr_cntl_poolp->buf_allocated ||
2153 	    !dma_mbox_cntl_poolp->buf_allocated) {
2154 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2155 		    "<== hxge_map_rxdma: buf not allocated"));
2156 		return (HXGE_ERROR);
2157 	}
2158 
2159 	ndmas = dma_buf_poolp->ndmas;
2160 	if (!ndmas) {
2161 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2162 		    "<== hxge_map_rxdma: no dma allocated"));
2163 		return (HXGE_ERROR);
2164 	}
2165 
2166 	num_chunks = dma_buf_poolp->num_chunks;
2167 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2168 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
2169 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
2170 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
2171 
2172 	rx_rbr_rings = (p_rx_rbr_rings_t)
2173 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2174 	rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
2175 	    sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
2176 
2177 	rx_rcr_rings = (p_rx_rcr_rings_t)
2178 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2179 	rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
2180 	    sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
2181 
2182 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
2183 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2184 	rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
2185 	    sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
2186 
2187 	/*
2188 	 * Timeout should be set based on the system clock divider.
2189 	 * The following timeout value of 1 assumes that the
2190 	 * granularity (1000) is 3 microseconds running at 300MHz.
2191 	 */
2192 
2193 	hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
2194 	hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
2195 
2196 	/*
2197 	 * Map descriptors from the buffer polls for each dam channel.
2198 	 */
2199 	for (i = 0; i < ndmas; i++) {
2200 		/*
2201 		 * Set up and prepare buffer blocks, descriptors and mailbox.
2202 		 */
2203 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2204 		status = hxge_map_rxdma_channel(hxgep, channel,
2205 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
2206 		    (p_rx_rbr_ring_t *)&rbr_rings[i],
2207 		    num_chunks[i],
2208 		    (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
2209 		    (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
2210 		    (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
2211 		    (p_rx_rcr_ring_t *)&rcr_rings[i],
2212 		    (p_rx_mbox_t *)&rx_mbox_p[i]);
2213 		if (status != HXGE_OK) {
2214 			goto hxge_map_rxdma_fail1;
2215 		}
2216 		rbr_rings[i]->index = (uint16_t)i;
2217 		rcr_rings[i]->index = (uint16_t)i;
2218 		rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
2219 	}
2220 
2221 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
2222 	rx_rbr_rings->rbr_rings = rbr_rings;
2223 	hxgep->rx_rbr_rings = rx_rbr_rings;
2224 	rx_rcr_rings->rcr_rings = rcr_rings;
2225 	hxgep->rx_rcr_rings = rx_rcr_rings;
2226 
2227 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
2228 	hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
2229 
2230 	goto hxge_map_rxdma_exit;
2231 
2232 hxge_map_rxdma_fail1:
2233 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2234 	    "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
2235 	    status, channel, i));
2236 	i--;
2237 	for (; i >= 0; i--) {
2238 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2239 		hxge_unmap_rxdma_channel(hxgep, channel,
2240 		    rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
2241 	}
2242 
2243 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2244 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2245 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2246 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2247 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2248 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2249 
2250 hxge_map_rxdma_exit:
2251 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2252 	    "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
2253 
2254 	return (status);
2255 }
2256 
2257 static void
2258 hxge_unmap_rxdma(p_hxge_t hxgep)
2259 {
2260 	int			i, ndmas;
2261 	uint16_t		channel;
2262 	p_rx_rbr_rings_t	rx_rbr_rings;
2263 	p_rx_rbr_ring_t		*rbr_rings;
2264 	p_rx_rcr_rings_t	rx_rcr_rings;
2265 	p_rx_rcr_ring_t		*rcr_rings;
2266 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2267 	p_rx_mbox_t		*rx_mbox_p;
2268 	p_hxge_dma_pool_t	dma_buf_poolp;
2269 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2270 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2271 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2272 	p_hxge_dma_common_t	*dma_buf_p;
2273 
2274 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
2275 
2276 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2277 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2278 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2279 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2280 
2281 	if (!dma_buf_poolp->buf_allocated ||
2282 	    !dma_rbr_cntl_poolp->buf_allocated ||
2283 	    !dma_rcr_cntl_poolp->buf_allocated ||
2284 	    !dma_mbox_cntl_poolp->buf_allocated) {
2285 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2286 		    "<== hxge_unmap_rxdma: NULL buf pointers"));
2287 		return;
2288 	}
2289 
2290 	rx_rbr_rings = hxgep->rx_rbr_rings;
2291 	rx_rcr_rings = hxgep->rx_rcr_rings;
2292 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2293 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2294 		    "<== hxge_unmap_rxdma: NULL pointers"));
2295 		return;
2296 	}
2297 
2298 	ndmas = rx_rbr_rings->ndmas;
2299 	if (!ndmas) {
2300 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2301 		    "<== hxge_unmap_rxdma: no channel"));
2302 		return;
2303 	}
2304 
2305 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2306 	    "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
2307 
2308 	rbr_rings = rx_rbr_rings->rbr_rings;
2309 	rcr_rings = rx_rcr_rings->rcr_rings;
2310 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2311 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2312 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2313 
2314 	for (i = 0; i < ndmas; i++) {
2315 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2316 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2317 		    "==> hxge_unmap_rxdma (ndmas %d) channel %d",
2318 		    ndmas, channel));
2319 		(void) hxge_unmap_rxdma_channel(hxgep, channel,
2320 		    (p_rx_rbr_ring_t)rbr_rings[i],
2321 		    (p_rx_rcr_ring_t)rcr_rings[i],
2322 		    (p_rx_mbox_t)rx_mbox_p[i]);
2323 	}
2324 
2325 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2326 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2327 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2328 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2329 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2330 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2331 
2332 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
2333 }
2334 
2335 hxge_status_t
2336 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2337     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
2338     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
2339     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
2340     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2341 {
2342 	int status = HXGE_OK;
2343 
2344 	/*
2345 	 * Set up and prepare buffer blocks, descriptors and mailbox.
2346 	 */
2347 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2348 	    "==> hxge_map_rxdma_channel (channel %d)", channel));
2349 
2350 	/*
2351 	 * Receive buffer blocks
2352 	 */
2353 	status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
2354 	    dma_buf_p, rbr_p, num_chunks);
2355 	if (status != HXGE_OK) {
2356 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2357 		    "==> hxge_map_rxdma_channel (channel %d): "
2358 		    "map buffer failed 0x%x", channel, status));
2359 		goto hxge_map_rxdma_channel_exit;
2360 	}
2361 
2362 	/*
2363 	 * Receive block ring, completion ring and mailbox.
2364 	 */
2365 	status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
2366 	    dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p,
2367 	    rbr_p, rcr_p, rx_mbox_p);
2368 	if (status != HXGE_OK) {
2369 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2370 		    "==> hxge_map_rxdma_channel (channel %d): "
2371 		    "map config failed 0x%x", channel, status));
2372 		goto hxge_map_rxdma_channel_fail2;
2373 	}
2374 	goto hxge_map_rxdma_channel_exit;
2375 
2376 hxge_map_rxdma_channel_fail3:
2377 	/* Free rbr, rcr */
2378 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2379 	    "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
2380 	    status, channel));
2381 	hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
2382 
2383 hxge_map_rxdma_channel_fail2:
2384 	/* Free buffer blocks */
2385 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2386 	    "==> hxge_map_rxdma_channel: free rx buffers"
2387 	    "(hxgep 0x%x status 0x%x channel %d)",
2388 	    hxgep, status, channel));
2389 	hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
2390 
2391 	status = HXGE_ERROR;
2392 
2393 hxge_map_rxdma_channel_exit:
2394 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2395 	    "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
2396 	    hxgep, status, channel));
2397 
2398 	return (status);
2399 }
2400 
2401 /*ARGSUSED*/
2402 static void
2403 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2404     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2405 {
2406 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2407 	    "==> hxge_unmap_rxdma_channel (channel %d)", channel));
2408 
2409 	/*
2410 	 * unmap receive block ring, completion ring and mailbox.
2411 	 */
2412 	(void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
2413 
2414 	/* unmap buffer blocks */
2415 	(void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
2416 
2417 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
2418 }
2419 
2420 /*ARGSUSED*/
2421 static hxge_status_t
2422 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
2423     p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p,
2424     p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p,
2425     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2426 {
2427 	p_rx_rbr_ring_t 	rbrp;
2428 	p_rx_rcr_ring_t 	rcrp;
2429 	p_rx_mbox_t 		mboxp;
2430 	p_hxge_dma_common_t 	cntl_dmap;
2431 	p_hxge_dma_common_t 	dmap;
2432 	p_rx_msg_t 		*rx_msg_ring;
2433 	p_rx_msg_t 		rx_msg_p;
2434 	rdc_rbr_cfg_a_t		*rcfga_p;
2435 	rdc_rbr_cfg_b_t		*rcfgb_p;
2436 	rdc_rcr_cfg_a_t		*cfga_p;
2437 	rdc_rcr_cfg_b_t		*cfgb_p;
2438 	rdc_rx_cfg1_t		*cfig1_p;
2439 	rdc_rx_cfg2_t		*cfig2_p;
2440 	rdc_rbr_kick_t		*kick_p;
2441 	uint32_t		dmaaddrp;
2442 	uint32_t		*rbr_vaddrp;
2443 	uint32_t		bkaddr;
2444 	hxge_status_t		status = HXGE_OK;
2445 	int			i;
2446 	uint32_t 		hxge_port_rcr_size;
2447 
2448 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2449 	    "==> hxge_map_rxdma_channel_cfg_ring"));
2450 
2451 	cntl_dmap = *dma_rbr_cntl_p;
2452 
2453 	/*
2454 	 * Map in the receive block ring
2455 	 */
2456 	rbrp = *rbr_p;
2457 	dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
2458 	hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
2459 
2460 	/*
2461 	 * Zero out buffer block ring descriptors.
2462 	 */
2463 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2464 
2465 	rcfga_p = &(rbrp->rbr_cfga);
2466 	rcfgb_p = &(rbrp->rbr_cfgb);
2467 	kick_p = &(rbrp->rbr_kick);
2468 	rcfga_p->value = 0;
2469 	rcfgb_p->value = 0;
2470 	kick_p->value = 0;
2471 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
2472 	rcfga_p->value = (rbrp->rbr_addr &
2473 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
2474 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
2475 
2476 	/* XXXX: how to choose packet buffer sizes */
2477 	rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
2478 	rcfgb_p->bits.vld0 = 1;
2479 	rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
2480 	rcfgb_p->bits.vld1 = 1;
2481 	rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
2482 	rcfgb_p->bits.vld2 = 1;
2483 	rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
2484 
2485 	/*
2486 	 * For each buffer block, enter receive block address to the ring.
2487 	 */
2488 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
2489 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
2490 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2491 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2492 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
2493 
2494 	rx_msg_ring = rbrp->rx_msg_ring;
2495 	for (i = 0; i < rbrp->tnblocks; i++) {
2496 		rx_msg_p = rx_msg_ring[i];
2497 		rx_msg_p->hxgep = hxgep;
2498 		rx_msg_p->rx_rbr_p = rbrp;
2499 		bkaddr = (uint32_t)
2500 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2501 		    RBR_BKADDR_SHIFT));
2502 		rx_msg_p->free = B_FALSE;
2503 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
2504 
2505 		*rbr_vaddrp++ = bkaddr;
2506 	}
2507 
2508 	kick_p->bits.bkadd = rbrp->rbb_max;
2509 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
2510 
2511 	rbrp->rbr_rd_index = 0;
2512 
2513 	rbrp->rbr_consumed = 0;
2514 	rbrp->rbr_use_bcopy = B_TRUE;
2515 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
2516 
2517 	/*
2518 	 * Do bcopy on packets greater than bcopy size once the lo threshold is
2519 	 * reached. This lo threshold should be less than the hi threshold.
2520 	 *
2521 	 * Do bcopy on every packet once the hi threshold is reached.
2522 	 */
2523 	if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
2524 		/* default it to use hi */
2525 		hxge_rx_threshold_lo = hxge_rx_threshold_hi;
2526 	}
2527 	if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
2528 		hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
2529 	}
2530 	rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
2531 
2532 	switch (hxge_rx_threshold_hi) {
2533 	default:
2534 	case HXGE_RX_COPY_NONE:
2535 		/* Do not do bcopy at all */
2536 		rbrp->rbr_use_bcopy = B_FALSE;
2537 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
2538 		break;
2539 
2540 	case HXGE_RX_COPY_1:
2541 	case HXGE_RX_COPY_2:
2542 	case HXGE_RX_COPY_3:
2543 	case HXGE_RX_COPY_4:
2544 	case HXGE_RX_COPY_5:
2545 	case HXGE_RX_COPY_6:
2546 	case HXGE_RX_COPY_7:
2547 		rbrp->rbr_threshold_hi =
2548 		    rbrp->rbb_max * (hxge_rx_threshold_hi) /
2549 		    HXGE_RX_BCOPY_SCALE;
2550 		break;
2551 
2552 	case HXGE_RX_COPY_ALL:
2553 		rbrp->rbr_threshold_hi = 0;
2554 		break;
2555 	}
2556 
2557 	switch (hxge_rx_threshold_lo) {
2558 	default:
2559 	case HXGE_RX_COPY_NONE:
2560 		/* Do not do bcopy at all */
2561 		if (rbrp->rbr_use_bcopy) {
2562 			rbrp->rbr_use_bcopy = B_FALSE;
2563 		}
2564 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
2565 		break;
2566 
2567 	case HXGE_RX_COPY_1:
2568 	case HXGE_RX_COPY_2:
2569 	case HXGE_RX_COPY_3:
2570 	case HXGE_RX_COPY_4:
2571 	case HXGE_RX_COPY_5:
2572 	case HXGE_RX_COPY_6:
2573 	case HXGE_RX_COPY_7:
2574 		rbrp->rbr_threshold_lo =
2575 		    rbrp->rbb_max * (hxge_rx_threshold_lo) /
2576 		    HXGE_RX_BCOPY_SCALE;
2577 		break;
2578 
2579 	case HXGE_RX_COPY_ALL:
2580 		rbrp->rbr_threshold_lo = 0;
2581 		break;
2582 	}
2583 
2584 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
2585 	    "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
2586 	    "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
2587 	    "rbb_threshold_lo %d",
2588 	    dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
2589 	    rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
2590 
2591 	/* Map in the receive completion ring */
2592 	rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
2593 	rcrp->rdc = dma_channel;
2594 	rcrp->hxgep = hxgep;
2595 
2596 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
2597 	rcrp->comp_size = hxge_port_rcr_size;
2598 	rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
2599 
2600 	rcrp->max_receive_pkts = hxge_max_rx_pkts;
2601 
2602 	cntl_dmap = *dma_rcr_cntl_p;
2603 
2604 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
2605 	hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
2606 	    sizeof (rcr_entry_t));
2607 	rcrp->comp_rd_index = 0;
2608 	rcrp->comp_wt_index = 0;
2609 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
2610 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
2611 #if defined(__i386)
2612 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2613 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2614 #else
2615 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2616 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2617 #endif
2618 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
2619 	    (hxge_port_rcr_size - 1);
2620 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
2621 	    (hxge_port_rcr_size - 1);
2622 
2623 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
2624 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
2625 
2626 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2627 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2628 	    "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
2629 	    "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
2630 	    "rcr_desc_rd_last_pp $%p ",
2631 	    dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
2632 	    rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
2633 	    rcrp->rcr_desc_last_pp));
2634 
2635 	/*
2636 	 * Zero out buffer block ring descriptors.
2637 	 */
2638 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2639 	rcrp->intr_timeout = hxgep->intr_timeout;
2640 	rcrp->intr_threshold = hxgep->intr_threshold;
2641 	rcrp->full_hdr_flag = B_FALSE;
2642 	rcrp->sw_priv_hdr_len = 0;
2643 
2644 	cfga_p = &(rcrp->rcr_cfga);
2645 	cfgb_p = &(rcrp->rcr_cfgb);
2646 	cfga_p->value = 0;
2647 	cfgb_p->value = 0;
2648 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
2649 
2650 	cfga_p->value = (rcrp->rcr_addr &
2651 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
2652 
2653 	cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
2654 
2655 	/*
2656 	 * Timeout should be set based on the system clock divider. The
2657 	 * following timeout value of 1 assumes that the granularity (1000) is
2658 	 * 3 microseconds running at 300MHz.
2659 	 */
2660 	cfgb_p->bits.pthres = rcrp->intr_threshold;
2661 	cfgb_p->bits.timeout = rcrp->intr_timeout;
2662 	cfgb_p->bits.entout = 1;
2663 
2664 	/* Map in the mailbox */
2665 	cntl_dmap = *dma_mbox_cntl_p;
2666 	mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
2667 	dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
2668 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
2669 	cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
2670 	cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
2671 	cfig1_p->value = cfig2_p->value = 0;
2672 
2673 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
2674 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2675 	    "==> hxge_map_rxdma_channel_cfg_ring: "
2676 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
2677 	    dma_channel, cfig1_p->value, cfig2_p->value,
2678 	    mboxp->mbox_addr));
2679 
2680 	dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
2681 	cfig1_p->bits.mbaddr_h = dmaaddrp;
2682 
2683 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
2684 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
2685 	    RXDMA_CFIG2_MBADDR_L_MASK);
2686 
2687 	cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
2688 
2689 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2690 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
2691 	    "cfg1 0x%016llx cfig2 0x%016llx",
2692 	    dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
2693 
2694 	cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
2695 	cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
2696 
2697 	rbrp->rx_rcr_p = rcrp;
2698 	rcrp->rx_rbr_p = rbrp;
2699 	*rcr_p = rcrp;
2700 	*rx_mbox_p = mboxp;
2701 
2702 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2703 	    "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
2704 	return (status);
2705 }
2706 
2707 /*ARGSUSED*/
2708 static void
2709 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
2710     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2711 {
2712 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2713 	    "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
2714 
2715 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
2716 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
2717 
2718 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2719 	    "<== hxge_unmap_rxdma_channel_cfg_ring"));
2720 }
2721 
2722 static hxge_status_t
2723 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
2724     p_hxge_dma_common_t *dma_buf_p,
2725     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
2726 {
2727 	p_rx_rbr_ring_t		rbrp;
2728 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
2729 	p_rx_msg_t		*rx_msg_ring;
2730 	p_rx_msg_t		rx_msg_p;
2731 	p_mblk_t		mblk_p;
2732 
2733 	rxring_info_t *ring_info;
2734 	hxge_status_t status = HXGE_OK;
2735 	int i, j, index;
2736 	uint32_t size, bsize, nblocks, nmsgs;
2737 
2738 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2739 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
2740 
2741 	dma_bufp = tmp_bufp = *dma_buf_p;
2742 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2743 	    " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
2744 	    "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
2745 
2746 	nmsgs = 0;
2747 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2748 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2749 		    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2750 		    "bufp 0x%016llx nblocks %d nmsgs %d",
2751 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2752 		nmsgs += tmp_bufp->nblocks;
2753 	}
2754 	if (!nmsgs) {
2755 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2756 		    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2757 		    "no msg blocks", channel));
2758 		status = HXGE_ERROR;
2759 		goto hxge_map_rxdma_channel_buf_ring_exit;
2760 	}
2761 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
2762 
2763 	size = nmsgs * sizeof (p_rx_msg_t);
2764 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2765 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
2766 	    KM_SLEEP);
2767 
2768 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
2769 	    (void *) hxgep->interrupt_cookie);
2770 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
2771 	    (void *) hxgep->interrupt_cookie);
2772 
2773 	rbrp->rdc = channel;
2774 	rbrp->num_blocks = num_chunks;
2775 	rbrp->tnblocks = nmsgs;
2776 	rbrp->rbb_max = nmsgs;
2777 	rbrp->rbr_max_size = nmsgs;
2778 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
2779 
2780 	/*
2781 	 * Buffer sizes suggested by NIU architect. 256, 512 and 2K.
2782 	 */
2783 
2784 	switch (hxgep->rx_bksize_code) {
2785 	case RBR_BKSIZE_4K:
2786 		rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
2787 		rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
2788 		rbrp->hpi_pkt_buf_size0 = SIZE_256B;
2789 		break;
2790 	case RBR_BKSIZE_8K:
2791 		/* Use 512 to avoid possible rcr_full condition */
2792 		rbrp->pkt_buf_size0 = RBR_BUFSZ0_512B;
2793 		rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_512_BYTES;
2794 		rbrp->hpi_pkt_buf_size0 = SIZE_512B;
2795 		break;
2796 	}
2797 
2798 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
2799 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
2800 	rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
2801 
2802 	rbrp->block_size = hxgep->rx_default_block_size;
2803 
2804 	if (!hxgep->param_arr[param_accept_jumbo].value) {
2805 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
2806 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
2807 		rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
2808 	} else {
2809 		rbrp->hpi_pkt_buf_size2 = SIZE_4KB;
2810 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
2811 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
2812 	}
2813 
2814 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2815 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2816 	    "actual rbr max %d rbb_max %d nmsgs %d "
2817 	    "rbrp->block_size %d default_block_size %d "
2818 	    "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
2819 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
2820 	    rbrp->block_size, hxgep->rx_default_block_size,
2821 	    hxge_rbr_size, hxge_rbr_spare_size));
2822 
2823 	/*
2824 	 * Map in buffers from the buffer pool.
2825 	 * Note that num_blocks is the num_chunks. For Sparc, there is likely
2826 	 * only one chunk. For x86, there will be many chunks.
2827 	 * Loop over chunks.
2828 	 */
2829 	index = 0;
2830 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
2831 		bsize = dma_bufp->block_size;
2832 		nblocks = dma_bufp->nblocks;
2833 #if defined(__i386)
2834 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
2835 #else
2836 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
2837 #endif
2838 		ring_info->buffer[i].buf_index = i;
2839 		ring_info->buffer[i].buf_size = dma_bufp->alength;
2840 		ring_info->buffer[i].start_index = index;
2841 #if defined(__i386)
2842 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
2843 #else
2844 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
2845 #endif
2846 
2847 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2848 		    " hxge_map_rxdma_channel_buf_ring: map channel %d "
2849 		    "chunk %d nblocks %d chunk_size %x block_size 0x%x "
2850 		    "dma_bufp $%p dvma_addr $%p", channel, i,
2851 		    dma_bufp->nblocks,
2852 		    ring_info->buffer[i].buf_size, bsize, dma_bufp,
2853 		    ring_info->buffer[i].dvma_addr));
2854 
2855 		/* loop over blocks within a chunk */
2856 		for (j = 0; j < nblocks; j++) {
2857 			if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
2858 			    dma_bufp)) == NULL) {
2859 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2860 				    "allocb failed (index %d i %d j %d)",
2861 				    index, i, j));
2862 				goto hxge_map_rxdma_channel_buf_ring_fail1;
2863 			}
2864 			rx_msg_ring[index] = rx_msg_p;
2865 			rx_msg_p->block_index = index;
2866 			rx_msg_p->shifted_addr = (uint32_t)
2867 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2868 			    RBR_BKADDR_SHIFT));
2869 			/*
2870 			 * Too much output
2871 			 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2872 			 *	"index %d j %d rx_msg_p $%p mblk %p",
2873 			 *	index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
2874 			 */
2875 			mblk_p = rx_msg_p->rx_mblk_p;
2876 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
2877 
2878 			rbrp->rbr_ref_cnt++;
2879 			index++;
2880 			rx_msg_p->buf_dma.dma_channel = channel;
2881 		}
2882 	}
2883 	if (i < rbrp->num_blocks) {
2884 		goto hxge_map_rxdma_channel_buf_ring_fail1;
2885 	}
2886 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2887 	    "hxge_map_rxdma_channel_buf_ring: done buf init "
2888 	    "channel %d msg block entries %d", channel, index));
2889 	ring_info->block_size_mask = bsize - 1;
2890 	rbrp->rx_msg_ring = rx_msg_ring;
2891 	rbrp->dma_bufp = dma_buf_p;
2892 	rbrp->ring_info = ring_info;
2893 
2894 	status = hxge_rxbuf_index_info_init(hxgep, rbrp);
2895 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
2896 	    "channel %d done buf info init", channel));
2897 
2898 	/*
2899 	 * Finally, permit hxge_freeb() to call hxge_post_page().
2900 	 */
2901 	rbrp->rbr_state = RBR_POSTING;
2902 
2903 	*rbr_p = rbrp;
2904 
2905 	goto hxge_map_rxdma_channel_buf_ring_exit;
2906 
2907 hxge_map_rxdma_channel_buf_ring_fail1:
2908 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2909 	    " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
2910 	    channel, status));
2911 
2912 	index--;
2913 	for (; index >= 0; index--) {
2914 		rx_msg_p = rx_msg_ring[index];
2915 		if (rx_msg_p != NULL) {
2916 			freeb(rx_msg_p->rx_mblk_p);
2917 			rx_msg_ring[index] = NULL;
2918 		}
2919 	}
2920 
2921 hxge_map_rxdma_channel_buf_ring_fail:
2922 	MUTEX_DESTROY(&rbrp->post_lock);
2923 	MUTEX_DESTROY(&rbrp->lock);
2924 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2925 	KMEM_FREE(rx_msg_ring, size);
2926 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
2927 
2928 	status = HXGE_ERROR;
2929 
2930 hxge_map_rxdma_channel_buf_ring_exit:
2931 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2932 	    "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
2933 
2934 	return (status);
2935 }
2936 
2937 /*ARGSUSED*/
2938 static void
2939 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
2940     p_rx_rbr_ring_t rbr_p)
2941 {
2942 	p_rx_msg_t	*rx_msg_ring;
2943 	p_rx_msg_t	rx_msg_p;
2944 	rxring_info_t	*ring_info;
2945 	int		i;
2946 	uint32_t	size;
2947 
2948 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2949 	    "==> hxge_unmap_rxdma_channel_buf_ring"));
2950 	if (rbr_p == NULL) {
2951 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2952 		    "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
2953 		return;
2954 	}
2955 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2956 	    "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
2957 
2958 	rx_msg_ring = rbr_p->rx_msg_ring;
2959 	ring_info = rbr_p->ring_info;
2960 
2961 	if (rx_msg_ring == NULL || ring_info == NULL) {
2962 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2963 		    "<== hxge_unmap_rxdma_channel_buf_ring: "
2964 		    "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
2965 		return;
2966 	}
2967 
2968 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
2969 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2970 	    " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
2971 	    "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
2972 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
2973 
2974 	for (i = 0; i < rbr_p->tnblocks; i++) {
2975 		rx_msg_p = rx_msg_ring[i];
2976 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2977 		    " hxge_unmap_rxdma_channel_buf_ring: "
2978 		    "rx_msg_p $%p", rx_msg_p));
2979 		if (rx_msg_p != NULL) {
2980 			freeb(rx_msg_p->rx_mblk_p);
2981 			rx_msg_ring[i] = NULL;
2982 		}
2983 	}
2984 
2985 	/*
2986 	 * We no longer may use the mutex <post_lock>. By setting
2987 	 * <rbr_state> to anything but POSTING, we prevent
2988 	 * hxge_post_page() from accessing a dead mutex.
2989 	 */
2990 	rbr_p->rbr_state = RBR_UNMAPPING;
2991 	MUTEX_DESTROY(&rbr_p->post_lock);
2992 
2993 	MUTEX_DESTROY(&rbr_p->lock);
2994 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2995 	KMEM_FREE(rx_msg_ring, size);
2996 
2997 	if (rbr_p->rbr_ref_cnt == 0) {
2998 		/* This is the normal state of affairs. */
2999 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
3000 	} else {
3001 		/*
3002 		 * Some of our buffers are still being used.
3003 		 * Therefore, tell hxge_freeb() this ring is
3004 		 * unmapped, so it may free <rbr_p> for us.
3005 		 */
3006 		rbr_p->rbr_state = RBR_UNMAPPED;
3007 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3008 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
3009 		    rbr_p->rbr_ref_cnt,
3010 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
3011 	}
3012 
3013 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3014 	    "<== hxge_unmap_rxdma_channel_buf_ring"));
3015 }
3016 
3017 static hxge_status_t
3018 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
3019 {
3020 	hxge_status_t status = HXGE_OK;
3021 
3022 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3023 
3024 	/*
3025 	 * Load the sharable parameters by writing to the function zero control
3026 	 * registers. These FZC registers should be initialized only once for
3027 	 * the entire chip.
3028 	 */
3029 	(void) hxge_init_fzc_rx_common(hxgep);
3030 
3031 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3032 
3033 	return (status);
3034 }
3035 
3036 static hxge_status_t
3037 hxge_rxdma_hw_start(p_hxge_t hxgep)
3038 {
3039 	int			i, ndmas;
3040 	uint16_t		channel;
3041 	p_rx_rbr_rings_t	rx_rbr_rings;
3042 	p_rx_rbr_ring_t		*rbr_rings;
3043 	p_rx_rcr_rings_t	rx_rcr_rings;
3044 	p_rx_rcr_ring_t		*rcr_rings;
3045 	p_rx_mbox_areas_t	rx_mbox_areas_p;
3046 	p_rx_mbox_t		*rx_mbox_p;
3047 	hxge_status_t		status = HXGE_OK;
3048 
3049 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
3050 
3051 	rx_rbr_rings = hxgep->rx_rbr_rings;
3052 	rx_rcr_rings = hxgep->rx_rcr_rings;
3053 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3054 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3055 		    "<== hxge_rxdma_hw_start: NULL ring pointers"));
3056 		return (HXGE_ERROR);
3057 	}
3058 
3059 	ndmas = rx_rbr_rings->ndmas;
3060 	if (ndmas == 0) {
3061 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3062 		    "<== hxge_rxdma_hw_start: no dma channel allocated"));
3063 		return (HXGE_ERROR);
3064 	}
3065 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3066 	    "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
3067 
3068 	/*
3069 	 * Scrub the RDC Rx DMA Prefetch Buffer Command.
3070 	 */
3071 	for (i = 0; i < 128; i++) {
3072 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
3073 	}
3074 
3075 	/*
3076 	 * Scrub Rx DMA Shadow Tail Command.
3077 	 */
3078 	for (i = 0; i < 64; i++) {
3079 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
3080 	}
3081 
3082 	/*
3083 	 * Scrub Rx DMA Control Fifo Command.
3084 	 */
3085 	for (i = 0; i < 512; i++) {
3086 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
3087 	}
3088 
3089 	/*
3090 	 * Scrub Rx DMA Data Fifo Command.
3091 	 */
3092 	for (i = 0; i < 1536; i++) {
3093 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
3094 	}
3095 
3096 	/*
3097 	 * Reset the FIFO Error Stat.
3098 	 */
3099 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
3100 
3101 	/* Set the error mask to receive interrupts */
3102 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3103 
3104 	rbr_rings = rx_rbr_rings->rbr_rings;
3105 	rcr_rings = rx_rcr_rings->rcr_rings;
3106 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
3107 	if (rx_mbox_areas_p) {
3108 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
3109 	}
3110 
3111 	for (i = 0; i < ndmas; i++) {
3112 		channel = rbr_rings[i]->rdc;
3113 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3114 		    "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
3115 		    ndmas, channel));
3116 		status = hxge_rxdma_start_channel(hxgep, channel,
3117 		    (p_rx_rbr_ring_t)rbr_rings[i],
3118 		    (p_rx_rcr_ring_t)rcr_rings[i],
3119 		    (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max);
3120 		if (status != HXGE_OK) {
3121 			goto hxge_rxdma_hw_start_fail1;
3122 		}
3123 	}
3124 
3125 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
3126 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3127 	    rx_rbr_rings, rx_rcr_rings));
3128 	goto hxge_rxdma_hw_start_exit;
3129 
3130 hxge_rxdma_hw_start_fail1:
3131 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3132 	    "==> hxge_rxdma_hw_start: disable "
3133 	    "(status 0x%x channel %d i %d)", status, channel, i));
3134 	for (; i >= 0; i--) {
3135 		channel = rbr_rings[i]->rdc;
3136 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3137 	}
3138 
3139 hxge_rxdma_hw_start_exit:
3140 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3141 	    "==> hxge_rxdma_hw_start: (status 0x%x)", status));
3142 	return (status);
3143 }
3144 
3145 static void
3146 hxge_rxdma_hw_stop(p_hxge_t hxgep)
3147 {
3148 	int			i, ndmas;
3149 	uint16_t		channel;
3150 	p_rx_rbr_rings_t	rx_rbr_rings;
3151 	p_rx_rbr_ring_t		*rbr_rings;
3152 	p_rx_rcr_rings_t	rx_rcr_rings;
3153 
3154 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
3155 
3156 	rx_rbr_rings = hxgep->rx_rbr_rings;
3157 	rx_rcr_rings = hxgep->rx_rcr_rings;
3158 
3159 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3160 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3161 		    "<== hxge_rxdma_hw_stop: NULL ring pointers"));
3162 		return;
3163 	}
3164 
3165 	ndmas = rx_rbr_rings->ndmas;
3166 	if (!ndmas) {
3167 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3168 		    "<== hxge_rxdma_hw_stop: no dma channel allocated"));
3169 		return;
3170 	}
3171 
3172 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3173 	    "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
3174 
3175 	rbr_rings = rx_rbr_rings->rbr_rings;
3176 	for (i = 0; i < ndmas; i++) {
3177 		channel = rbr_rings[i]->rdc;
3178 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3179 		    "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
3180 		    ndmas, channel));
3181 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3182 	}
3183 
3184 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
3185 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3186 	    rx_rbr_rings, rx_rcr_rings));
3187 
3188 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
3189 }
3190 
3191 static hxge_status_t
3192 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
3193     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
3194     int n_init_kick)
3195 {
3196 	hpi_handle_t		handle;
3197 	hpi_status_t		rs = HPI_SUCCESS;
3198 	rdc_stat_t		cs;
3199 	rdc_int_mask_t		ent_mask;
3200 	hxge_status_t		status = HXGE_OK;
3201 
3202 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
3203 
3204 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3205 
3206 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
3207 	    "hpi handle addr $%p acc $%p",
3208 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3209 
3210 	/* Reset RXDMA channel */
3211 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3212 	if (rs != HPI_SUCCESS) {
3213 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3214 		    "==> hxge_rxdma_start_channel: "
3215 		    "reset rxdma failed (0x%08x channel %d)",
3216 		    status, channel));
3217 		return (HXGE_ERROR | rs);
3218 	}
3219 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3220 	    "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
3221 
3222 	/*
3223 	 * Initialize the RXDMA channel specific FZC control configurations.
3224 	 * These FZC registers are pertaining to each RX channel (logical
3225 	 * pages).
3226 	 */
3227 	status = hxge_init_fzc_rxdma_channel(hxgep,
3228 	    channel, rbr_p, rcr_p, mbox_p);
3229 	if (status != HXGE_OK) {
3230 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3231 		    "==> hxge_rxdma_start_channel: "
3232 		    "init fzc rxdma failed (0x%08x channel %d)",
3233 		    status, channel));
3234 		return (status);
3235 	}
3236 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3237 	    "==> hxge_rxdma_start_channel: fzc done"));
3238 
3239 	/*
3240 	 * Zero out the shadow  and prefetch ram.
3241 	 */
3242 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3243 	    "==> hxge_rxdma_start_channel: ram done"));
3244 
3245 	/* Set up the interrupt event masks. */
3246 	ent_mask.value = 0;
3247 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3248 	if (rs != HPI_SUCCESS) {
3249 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3250 		    "==> hxge_rxdma_start_channel: "
3251 		    "init rxdma event masks failed (0x%08x channel %d)",
3252 		    status, channel));
3253 		return (HXGE_ERROR | rs);
3254 	}
3255 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3256 	    "event done: channel %d (mask 0x%016llx)",
3257 	    channel, ent_mask.value));
3258 
3259 	/*
3260 	 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
3261 	 * channels and enable each DMA channel.
3262 	 */
3263 	status = hxge_enable_rxdma_channel(hxgep,
3264 	    channel, rbr_p, rcr_p, mbox_p, n_init_kick);
3265 	if (status != HXGE_OK) {
3266 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3267 		    " hxge_rxdma_start_channel: "
3268 		    " init enable rxdma failed (0x%08x channel %d)",
3269 		    status, channel));
3270 		return (status);
3271 	}
3272 
3273 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3274 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3275 
3276 	/*
3277 	 * Initialize the receive DMA control and status register
3278 	 * Note that rdc_stat HAS to be set after RBR and RCR rings are set
3279 	 */
3280 	cs.value = 0;
3281 	cs.bits.mex = 1;
3282 	cs.bits.rcr_thres = 1;
3283 	cs.bits.rcr_to = 1;
3284 	cs.bits.rbr_empty = 1;
3285 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3286 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3287 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
3288 	if (status != HXGE_OK) {
3289 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3290 		    "==> hxge_rxdma_start_channel: "
3291 		    "init rxdma control register failed (0x%08x channel %d",
3292 		    status, channel));
3293 		return (status);
3294 	}
3295 
3296 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3297 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3298 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3299 	    "==> hxge_rxdma_start_channel: enable done"));
3300 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
3301 
3302 	return (HXGE_OK);
3303 }
3304 
3305 static hxge_status_t
3306 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
3307 {
3308 	hpi_handle_t		handle;
3309 	hpi_status_t		rs = HPI_SUCCESS;
3310 	rdc_stat_t		cs;
3311 	rdc_int_mask_t		ent_mask;
3312 	hxge_status_t		status = HXGE_OK;
3313 
3314 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
3315 
3316 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3317 
3318 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
3319 	    "hpi handle addr $%p acc $%p",
3320 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3321 
3322 	/* Reset RXDMA channel */
3323 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3324 	if (rs != HPI_SUCCESS) {
3325 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3326 		    " hxge_rxdma_stop_channel: "
3327 		    " reset rxdma failed (0x%08x channel %d)",
3328 		    rs, channel));
3329 		return (HXGE_ERROR | rs);
3330 	}
3331 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3332 	    "==> hxge_rxdma_stop_channel: reset done"));
3333 
3334 	/* Set up the interrupt event masks. */
3335 	ent_mask.value = RDC_INT_MASK_ALL;
3336 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3337 	if (rs != HPI_SUCCESS) {
3338 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3339 		    "==> hxge_rxdma_stop_channel: "
3340 		    "set rxdma event masks failed (0x%08x channel %d)",
3341 		    rs, channel));
3342 		return (HXGE_ERROR | rs);
3343 	}
3344 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3345 	    "==> hxge_rxdma_stop_channel: event done"));
3346 
3347 	/* Initialize the receive DMA control and status register */
3348 	cs.value = 0;
3349 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3350 
3351 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
3352 	    " to default (all 0s) 0x%08x", cs.value));
3353 
3354 	if (status != HXGE_OK) {
3355 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3356 		    " hxge_rxdma_stop_channel: init rxdma"
3357 		    " control register failed (0x%08x channel %d",
3358 		    status, channel));
3359 		return (status);
3360 	}
3361 
3362 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3363 	    "==> hxge_rxdma_stop_channel: control done"));
3364 
3365 	/* disable dma channel */
3366 	status = hxge_disable_rxdma_channel(hxgep, channel);
3367 
3368 	if (status != HXGE_OK) {
3369 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3370 		    " hxge_rxdma_stop_channel: "
3371 		    " init enable rxdma failed (0x%08x channel %d)",
3372 		    status, channel));
3373 		return (status);
3374 	}
3375 
3376 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3377 	    "==> hxge_rxdma_stop_channel: disable done"));
3378 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
3379 
3380 	return (HXGE_OK);
3381 }
3382 
3383 hxge_status_t
3384 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
3385 {
3386 	hpi_handle_t		handle;
3387 	p_hxge_rdc_sys_stats_t	statsp;
3388 	rdc_fifo_err_stat_t	stat;
3389 	hxge_status_t		status = HXGE_OK;
3390 
3391 	handle = hxgep->hpi_handle;
3392 	statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
3393 
3394 	/* Clear the int_dbg register in case it is an injected err */
3395 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0);
3396 
3397 	/* Get the error status and clear the register */
3398 	HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
3399 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
3400 
3401 	if (stat.bits.rx_ctrl_fifo_sec) {
3402 		statsp->ctrl_fifo_sec++;
3403 		if (statsp->ctrl_fifo_sec == 1)
3404 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3405 			    "==> hxge_rxdma_handle_sys_errors: "
3406 			    "rx_ctrl_fifo_sec"));
3407 	}
3408 
3409 	if (stat.bits.rx_ctrl_fifo_ded) {
3410 		/* Global fatal error encountered */
3411 		statsp->ctrl_fifo_ded++;
3412 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3413 		    HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
3414 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3415 		    "==> hxge_rxdma_handle_sys_errors: "
3416 		    "fatal error: rx_ctrl_fifo_ded error"));
3417 	}
3418 
3419 	if (stat.bits.rx_data_fifo_sec) {
3420 		statsp->data_fifo_sec++;
3421 		if (statsp->data_fifo_sec == 1)
3422 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3423 			    "==> hxge_rxdma_handle_sys_errors: "
3424 			    "rx_data_fifo_sec"));
3425 	}
3426 
3427 	if (stat.bits.rx_data_fifo_ded) {
3428 		/* Global fatal error encountered */
3429 		statsp->data_fifo_ded++;
3430 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3431 		    HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
3432 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3433 		    "==> hxge_rxdma_handle_sys_errors: "
3434 		    "fatal error: rx_data_fifo_ded error"));
3435 	}
3436 
3437 	if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
3438 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3439 		    " hxge_rxdma_handle_sys_errors: fatal error\n"));
3440 		status = hxge_rx_port_fatal_err_recover(hxgep);
3441 		if (status == HXGE_OK) {
3442 			FM_SERVICE_RESTORED(hxgep);
3443 		}
3444 	}
3445 
3446 	return (HXGE_OK);
3447 }
3448 
3449 static hxge_status_t
3450 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
3451 {
3452 	hpi_handle_t		handle;
3453 	hpi_status_t 		rs = HPI_SUCCESS;
3454 	hxge_status_t 		status = HXGE_OK;
3455 	p_rx_rbr_ring_t		rbrp;
3456 	p_rx_rcr_ring_t		rcrp;
3457 	p_rx_mbox_t		mboxp;
3458 	rdc_int_mask_t		ent_mask;
3459 	p_hxge_dma_common_t	dmap;
3460 	int			ring_idx;
3461 	p_rx_msg_t		rx_msg_p;
3462 	int			i;
3463 	uint32_t		hxge_port_rcr_size;
3464 	uint64_t		tmp;
3465 	int			n_init_kick = 0;
3466 
3467 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
3468 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3469 	    "Recovering from RxDMAChannel#%d error...", channel));
3470 
3471 	/*
3472 	 * Stop the dma channel waits for the stop done. If the stop done bit
3473 	 * is not set, then create an error.
3474 	 */
3475 
3476 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3477 
3478 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
3479 
3480 	ring_idx = hxge_rxdma_get_ring_index(hxgep, channel);
3481 	rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx];
3482 	rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx];
3483 
3484 	MUTEX_ENTER(&rcrp->lock);
3485 	MUTEX_ENTER(&rbrp->lock);
3486 
3487 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
3488 
3489 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
3490 	if (rs != HPI_SUCCESS) {
3491 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3492 		    "hxge_disable_rxdma_channel:failed"));
3493 		goto fail;
3494 	}
3495 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
3496 
3497 	/* Disable interrupt */
3498 	ent_mask.value = RDC_INT_MASK_ALL;
3499 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3500 	if (rs != HPI_SUCCESS) {
3501 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3502 		    "Set rxdma event masks failed (channel %d)", channel));
3503 	}
3504 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
3505 
3506 	/* Reset RXDMA channel */
3507 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3508 	if (rs != HPI_SUCCESS) {
3509 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3510 		    "Reset rxdma failed (channel %d)", channel));
3511 		goto fail;
3512 	}
3513 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
3514 	mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
3515 
3516 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3517 	rbrp->rbr_rd_index = 0;
3518 
3519 	rcrp->comp_rd_index = 0;
3520 	rcrp->comp_wt_index = 0;
3521 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3522 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3523 #if defined(__i386)
3524 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3525 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3526 #else
3527 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3528 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3529 #endif
3530 
3531 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3532 	    (hxge_port_rcr_size - 1);
3533 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3534 	    (hxge_port_rcr_size - 1);
3535 
3536 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
3537 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
3538 
3539 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
3540 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3541 
3542 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
3543 	    rbrp->rbr_max_size));
3544 
3545 	/* Count the number of buffers owned by the hardware at this moment */
3546 	for (i = 0; i < rbrp->rbr_max_size; i++) {
3547 		rx_msg_p = rbrp->rx_msg_ring[i];
3548 		if (rx_msg_p->ref_cnt == 1) {
3549 			n_init_kick++;
3550 		}
3551 	}
3552 
3553 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
3554 
3555 	/*
3556 	 * This is error recover! Some buffers are owned by the hardware and
3557 	 * the rest are owned by the apps. We should only kick in those
3558 	 * owned by the hardware initially. The apps will post theirs
3559 	 * eventually.
3560 	 */
3561 	status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp,
3562 	    n_init_kick);
3563 	if (status != HXGE_OK) {
3564 		goto fail;
3565 	}
3566 
3567 	/*
3568 	 * The DMA channel may disable itself automatically.
3569 	 * The following is a work-around.
3570 	 */
3571 	HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
3572 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
3573 	if (rs != HPI_SUCCESS) {
3574 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3575 		    "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
3576 	}
3577 
3578 	MUTEX_EXIT(&rbrp->lock);
3579 	MUTEX_EXIT(&rcrp->lock);
3580 
3581 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3582 	    "Recovery Successful, RxDMAChannel#%d Restored", channel));
3583 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
3584 
3585 	return (HXGE_OK);
3586 
3587 fail:
3588 	MUTEX_EXIT(&rbrp->lock);
3589 	MUTEX_EXIT(&rcrp->lock);
3590 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3591 
3592 	return (HXGE_ERROR | rs);
3593 }
3594 
3595 static hxge_status_t
3596 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
3597 {
3598 	hxge_status_t		status = HXGE_OK;
3599 	p_hxge_dma_common_t	*dma_buf_p;
3600 	uint16_t		channel;
3601 	int			ndmas;
3602 	int			i;
3603 	block_reset_t		reset_reg;
3604 	p_rx_rcr_ring_t	rcrp;
3605 	p_rx_rbr_ring_t rbrp;
3606 
3607 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
3608 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
3609 
3610 	/* Reset RDC block from PEU for this fatal error */
3611 	reset_reg.value = 0;
3612 	reset_reg.bits.rdc_rst = 1;
3613 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
3614 
3615 	/* Disable RxMAC */
3616 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
3617 	if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
3618 		goto fail;
3619 
3620 	HXGE_DELAY(1000);
3621 
3622 	/* Restore any common settings after PEU reset */
3623 	if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
3624 		goto fail;
3625 
3626 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
3627 
3628 	ndmas = hxgep->rx_buf_pool_p->ndmas;
3629 	dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
3630 
3631 	for (i = 0; i < ndmas; i++) {
3632 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
3633 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
3634 		rbrp = rcrp->rx_rbr_p;
3635 
3636 		MUTEX_ENTER(&rbrp->post_lock);
3637 		/* This function needs to be inside the post_lock */
3638 		if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
3639 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3640 			    "Could not recover channel %d", channel));
3641 		}
3642 		MUTEX_EXIT(&rbrp->post_lock);
3643 	}
3644 
3645 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
3646 
3647 	/* Reset RxMAC */
3648 	if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
3649 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3650 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3651 		goto fail;
3652 	}
3653 
3654 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
3655 
3656 	/* Re-Initialize RxMAC */
3657 	if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
3658 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3659 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3660 		goto fail;
3661 	}
3662 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
3663 
3664 	/* Re-enable RxMAC */
3665 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
3666 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3667 		    "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
3668 		goto fail;
3669 	}
3670 
3671 	/* Reset the error mask since PEU reset cleared it */
3672 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3673 
3674 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3675 	    "Recovery Successful, RxPort Restored"));
3676 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
3677 
3678 	return (HXGE_OK);
3679 fail:
3680 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3681 	return (status);
3682 }
3683 
3684 static void
3685 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
3686 {
3687 	hpi_status_t		hpi_status;
3688 	hxge_status_t		status;
3689 	int			i;
3690 	p_hxge_rx_ring_stats_t	rdc_stats;
3691 
3692 	rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc];
3693 	rdc_stats->rbr_empty_restore++;
3694 	rx_rbr_p->rbr_is_empty = B_FALSE;
3695 
3696 	/*
3697 	 * Complete the processing for the RBR Empty by:
3698 	 *	0) kicking back HXGE_RBR_EMPTY_THRESHOLD
3699 	 *	   packets.
3700 	 *	1) Disable the RX vmac.
3701 	 *	2) Re-enable the affected DMA channel.
3702 	 *	3) Re-enable the RX vmac.
3703 	 */
3704 
3705 	/*
3706 	 * Disable the RX VMAC, but setting the framelength
3707 	 * to 0, since there is a hardware bug when disabling
3708 	 * the vmac.
3709 	 */
3710 	MUTEX_ENTER(hxgep->genlock);
3711 	(void) hpi_vmac_rx_set_framesize(
3712 	    HXGE_DEV_HPI_HANDLE(hxgep), (uint16_t)0);
3713 
3714 	hpi_status = hpi_rxdma_cfg_rdc_enable(
3715 	    HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc);
3716 	if (hpi_status != HPI_SUCCESS) {
3717 		rdc_stats->rbr_empty_fail++;
3718 
3719 		/* Assume we are already inside the post_lock */
3720 		status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc);
3721 		if (status != HXGE_OK) {
3722 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3723 			    "hxge(%d): channel(%d) is empty.",
3724 			    hxgep->instance, rx_rbr_p->rdc));
3725 		}
3726 	}
3727 
3728 	for (i = 0; i < 1024; i++) {
3729 		uint64_t value;
3730 		RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep),
3731 		    RDC_STAT, i & 3, &value);
3732 	}
3733 
3734 	/*
3735 	 * Re-enable the RX VMAC.
3736 	 */
3737 	(void) hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep),
3738 	    (uint16_t)hxgep->vmac.maxframesize);
3739 	MUTEX_EXIT(hxgep->genlock);
3740 }
3741